]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.9.1-3.9.7-201306220939.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9.1-3.9.7-201306220939.patch
CommitLineData
c4e57112
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index b89a739..79768fb 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -14,6 +16,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -48,14 +51,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -69,9 +75,11 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52+TRACEEVENT-CFLAGS
53 aconf
54 af_names.h
55 aic7*reg.h*
56@@ -80,6 +88,7 @@ aic7*seq.h*
57 aicasm
58 aicdb.h*
59 altivec*.c
60+ashldi3.S
61 asm-offsets.h
62 asm_offsets.h
63 autoconf.h*
64@@ -92,19 +101,24 @@ bounds.h
65 bsetup
66 btfixupprep
67 build
68+builtin-policy.h
69 bvmlinux
70 bzImage*
71 capability_names.h
72 capflags.c
73 classlist.h*
74+clut_vga16.c
75+common-cmds.h
76 comp*.log
77 compile.h*
78 conf
79 config
80 config-*
81 config_data.h*
82+config.c
83 config.mak
84 config.mak.autogen
85+config.tmp
86 conmakehash
87 consolemap_deftbl.c*
88 cpustr.h
89@@ -115,9 +129,11 @@ devlist.h*
90 dnotify_test
91 docproc
92 dslm
93+dtc-lexer.lex.c
94 elf2ecoff
95 elfconfig.h*
96 evergreen_reg_safe.h
97+exception_policy.conf
98 fixdep
99 flask.h
100 fore200e_mkfirm
101@@ -125,12 +141,15 @@ fore200e_pca_fw.c*
102 gconf
103 gconf.glade.h
104 gen-devlist
105+gen-kdb_cmds.c
106 gen_crc32table
107 gen_init_cpio
108 generated
109 genheaders
110 genksyms
111 *_gray256.c
112+hash
113+hid-example
114 hpet_example
115 hugepage-mmap
116 hugepage-shm
117@@ -145,14 +164,14 @@ int32.c
118 int4.c
119 int8.c
120 kallsyms
121-kconfig
122+kern_constants.h
123 keywords.c
124 ksym.c*
125 ksym.h*
126 kxgettext
127 lex.c
128 lex.*.c
129-linux
130+lib1funcs.S
131 logo_*.c
132 logo_*_clut224.c
133 logo_*_mono.c
134@@ -162,14 +181,15 @@ mach-types.h
135 machtypes.h
136 map
137 map_hugetlb
138-media
139 mconf
140+mdp
141 miboot*
142 mk_elfconfig
143 mkboot
144 mkbugboot
145 mkcpustr
146 mkdep
147+mkpiggy
148 mkprep
149 mkregtable
150 mktables
151@@ -185,6 +205,8 @@ oui.c*
152 page-types
153 parse.c
154 parse.h
155+parse-events*
156+pasyms.h
157 patches*
158 pca200e.bin
159 pca200e_ecd.bin2
160@@ -194,6 +216,7 @@ perf-archive
161 piggyback
162 piggy.gzip
163 piggy.S
164+pmu-*
165 pnmtologo
166 ppc_defs.h*
167 pss_boot.h
168@@ -203,7 +226,10 @@ r200_reg_safe.h
169 r300_reg_safe.h
170 r420_reg_safe.h
171 r600_reg_safe.h
172+realmode.lds
173+realmode.relocs
174 recordmcount
175+regdb.c
176 relocs
177 rlim_names.h
178 rn50_reg_safe.h
179@@ -213,8 +239,12 @@ series
180 setup
181 setup.bin
182 setup.elf
183+signing_key*
184+size_overflow_hash.h
185 sImage
186+slabinfo
187 sm_tbl*
188+sortextable
189 split-include
190 syscalltab.h
191 tables.c
192@@ -224,6 +254,7 @@ tftpboot.img
193 timeconst.h
194 times.h*
195 trix_boot.h
196+user_constants.h
197 utsrelease.h*
198 vdso-syms.lds
199 vdso.lds
200@@ -235,13 +266,17 @@ vdso32.lds
201 vdso32.so.dbg
202 vdso64.lds
203 vdso64.so.dbg
204+vdsox32.lds
205+vdsox32-syms.lds
206 version.h*
207 vmImage
208 vmlinux
209 vmlinux-*
210 vmlinux.aout
211 vmlinux.bin.all
212+vmlinux.bin.bz2
213 vmlinux.lds
214+vmlinux.relocs
215 vmlinuz
216 voffset.h
217 vsyscall.lds
218@@ -249,9 +284,12 @@ vsyscall_32.lds
219 wanxlfw.inc
220 uImage
221 unifdef
222+utsrelease.h
223 wakeup.bin
224 wakeup.elf
225 wakeup.lds
226+x509*
227 zImage*
228 zconf.hash.c
229+zconf.lex.c
230 zoffset.h
231diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
232index 8ccbf27..afffeb4 100644
233--- a/Documentation/kernel-parameters.txt
234+++ b/Documentation/kernel-parameters.txt
235@@ -948,6 +948,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
236 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
237 Default: 1024
238
239+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
240+ ignore grsecurity's /proc restrictions
241+
242+
243 hashdist= [KNL,NUMA] Large hashes allocated during boot
244 are distributed across NUMA nodes. Defaults on
245 for 64-bit NUMA, off otherwise.
246@@ -2147,6 +2151,18 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
247 the specified number of seconds. This is to be used if
248 your oopses keep scrolling off the screen.
249
250+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
251+ virtualization environments that don't cope well with the
252+ expand down segment used by UDEREF on X86-32 or the frequent
253+ page table updates on X86-64.
254+
255+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
256+
257+ pax_extra_latent_entropy
258+ Enable a very simple form of latent entropy extraction
259+ from the first 4GB of memory as the bootmem allocator
260+ passes the memory pages to the buddy allocator.
261+
262 pcbit= [HW,ISDN]
263
264 pcd. [PARIDE]
265diff --git a/Makefile b/Makefile
266index a129b15..548231d 100644
267--- a/Makefile
268+++ b/Makefile
269@@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
270
271 HOSTCC = gcc
272 HOSTCXX = g++
273-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
274-HOSTCXXFLAGS = -O2
275+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
276+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
277+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
278
279 # Decide whether to build built-in, modular, or both.
280 # Normally, just do built-in.
281@@ -414,8 +415,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
282 # Rules shared between *config targets and build targets
283
284 # Basic helpers built in scripts/
285-PHONY += scripts_basic
286-scripts_basic:
287+PHONY += scripts_basic gcc-plugins
288+scripts_basic: gcc-plugins
289 $(Q)$(MAKE) $(build)=scripts/basic
290 $(Q)rm -f .tmp_quiet_recordmcount
291
292@@ -576,6 +577,65 @@ else
293 KBUILD_CFLAGS += -O2
294 endif
295
296+ifndef DISABLE_PAX_PLUGINS
297+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
298+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
299+else
300+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
301+endif
302+ifneq ($(PLUGINCC),)
303+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
304+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
305+endif
306+ifdef CONFIG_PAX_MEMORY_STACKLEAK
307+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
308+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
309+endif
310+ifdef CONFIG_KALLOCSTAT_PLUGIN
311+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
312+endif
313+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
314+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
315+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
316+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
317+endif
318+ifdef CONFIG_CHECKER_PLUGIN
319+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
320+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
321+endif
322+endif
323+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
324+ifdef CONFIG_PAX_SIZE_OVERFLOW
325+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
326+endif
327+ifdef CONFIG_PAX_LATENT_ENTROPY
328+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
329+endif
330+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
331+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
332+endif
333+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
334+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
335+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
336+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
337+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN
338+ifeq ($(KBUILD_EXTMOD),)
339+gcc-plugins:
340+ $(Q)$(MAKE) $(build)=tools/gcc
341+else
342+gcc-plugins: ;
343+endif
344+else
345+gcc-plugins:
346+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
347+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
348+else
349+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
350+endif
351+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
352+endif
353+endif
354+
355 include $(srctree)/arch/$(SRCARCH)/Makefile
356
357 ifdef CONFIG_READABLE_ASM
358@@ -733,7 +793,7 @@ export mod_sign_cmd
359
360
361 ifeq ($(KBUILD_EXTMOD),)
362-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
363+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
364
365 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
366 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
367@@ -780,6 +840,8 @@ endif
368
369 # The actual objects are generated when descending,
370 # make sure no implicit rule kicks in
371+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
372+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
373 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
374
375 # Handle descending into subdirectories listed in $(vmlinux-dirs)
376@@ -789,7 +851,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
377 # Error messages still appears in the original language
378
379 PHONY += $(vmlinux-dirs)
380-$(vmlinux-dirs): prepare scripts
381+$(vmlinux-dirs): gcc-plugins prepare scripts
382 $(Q)$(MAKE) $(build)=$@
383
384 # Store (new) KERNELRELASE string in include/config/kernel.release
385@@ -833,6 +895,7 @@ prepare0: archprepare FORCE
386 $(Q)$(MAKE) $(build)=.
387
388 # All the preparing..
389+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
390 prepare: prepare0
391
392 # Generate some files
393@@ -940,6 +1003,8 @@ all: modules
394 # using awk while concatenating to the final file.
395
396 PHONY += modules
397+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
398+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
399 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
400 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
401 @$(kecho) ' Building modules, stage 2.';
402@@ -955,7 +1020,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
403
404 # Target to prepare building external modules
405 PHONY += modules_prepare
406-modules_prepare: prepare scripts
407+modules_prepare: gcc-plugins prepare scripts
408
409 # Target to install modules
410 PHONY += modules_install
411@@ -1021,7 +1086,7 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
412 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
413 signing_key.priv signing_key.x509 x509.genkey \
414 extra_certificates signing_key.x509.keyid \
415- signing_key.x509.signer
416+ signing_key.x509.signer tools/gcc/size_overflow_hash.h
417
418 # clean - Delete most, but leave enough to build external modules
419 #
420@@ -1061,6 +1126,7 @@ distclean: mrproper
421 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
422 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
423 -o -name '.*.rej' \
424+ -o -name '.*.rej' -o -name '*.so' \
425 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
426 -type f -print | xargs rm -f
427
428@@ -1221,6 +1287,8 @@ PHONY += $(module-dirs) modules
429 $(module-dirs): crmodverdir $(objtree)/Module.symvers
430 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
431
432+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
433+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
434 modules: $(module-dirs)
435 @$(kecho) ' Building modules, stage 2.';
436 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
437@@ -1357,17 +1425,21 @@ else
438 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
439 endif
440
441-%.s: %.c prepare scripts FORCE
442+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
443+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
444+%.s: %.c gcc-plugins prepare scripts FORCE
445 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
446 %.i: %.c prepare scripts FORCE
447 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
448-%.o: %.c prepare scripts FORCE
449+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
450+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
451+%.o: %.c gcc-plugins prepare scripts FORCE
452 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
453 %.lst: %.c prepare scripts FORCE
454 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
455-%.s: %.S prepare scripts FORCE
456+%.s: %.S gcc-plugins prepare scripts FORCE
457 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
458-%.o: %.S prepare scripts FORCE
459+%.o: %.S gcc-plugins prepare scripts FORCE
460 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
461 %.symtypes: %.c prepare scripts FORCE
462 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
463@@ -1377,11 +1449,15 @@ endif
464 $(cmd_crmodverdir)
465 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
466 $(build)=$(build-dir)
467-%/: prepare scripts FORCE
468+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
469+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
470+%/: gcc-plugins prepare scripts FORCE
471 $(cmd_crmodverdir)
472 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
473 $(build)=$(build-dir)
474-%.ko: prepare scripts FORCE
475+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
476+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
477+%.ko: gcc-plugins prepare scripts FORCE
478 $(cmd_crmodverdir)
479 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
480 $(build)=$(build-dir) $(@:.ko=.o)
481diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
482index c2cbe4f..f7264b4 100644
483--- a/arch/alpha/include/asm/atomic.h
484+++ b/arch/alpha/include/asm/atomic.h
485@@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
486 #define atomic_dec(v) atomic_sub(1,(v))
487 #define atomic64_dec(v) atomic64_sub(1,(v))
488
489+#define atomic64_read_unchecked(v) atomic64_read(v)
490+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
491+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
492+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
493+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
494+#define atomic64_inc_unchecked(v) atomic64_inc(v)
495+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
496+#define atomic64_dec_unchecked(v) atomic64_dec(v)
497+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
498+
499 #define smp_mb__before_atomic_dec() smp_mb()
500 #define smp_mb__after_atomic_dec() smp_mb()
501 #define smp_mb__before_atomic_inc() smp_mb()
502diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
503index ad368a9..fbe0f25 100644
504--- a/arch/alpha/include/asm/cache.h
505+++ b/arch/alpha/include/asm/cache.h
506@@ -4,19 +4,19 @@
507 #ifndef __ARCH_ALPHA_CACHE_H
508 #define __ARCH_ALPHA_CACHE_H
509
510+#include <linux/const.h>
511
512 /* Bytes per L1 (data) cache line. */
513 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
514-# define L1_CACHE_BYTES 64
515 # define L1_CACHE_SHIFT 6
516 #else
517 /* Both EV4 and EV5 are write-through, read-allocate,
518 direct-mapped, physical.
519 */
520-# define L1_CACHE_BYTES 32
521 # define L1_CACHE_SHIFT 5
522 #endif
523
524+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
525 #define SMP_CACHE_BYTES L1_CACHE_BYTES
526
527 #endif
528diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
529index 968d999..d36b2df 100644
530--- a/arch/alpha/include/asm/elf.h
531+++ b/arch/alpha/include/asm/elf.h
532@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
533
534 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
535
536+#ifdef CONFIG_PAX_ASLR
537+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
538+
539+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
540+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
541+#endif
542+
543 /* $0 is set by ld.so to a pointer to a function which might be
544 registered using atexit. This provides a mean for the dynamic
545 linker to call DT_FINI functions for shared libraries that have
546diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
547index bc2a0da..8ad11ee 100644
548--- a/arch/alpha/include/asm/pgalloc.h
549+++ b/arch/alpha/include/asm/pgalloc.h
550@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
551 pgd_set(pgd, pmd);
552 }
553
554+static inline void
555+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
556+{
557+ pgd_populate(mm, pgd, pmd);
558+}
559+
560 extern pgd_t *pgd_alloc(struct mm_struct *mm);
561
562 static inline void
563diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
564index 81a4342..348b927 100644
565--- a/arch/alpha/include/asm/pgtable.h
566+++ b/arch/alpha/include/asm/pgtable.h
567@@ -102,6 +102,17 @@ struct vm_area_struct;
568 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
569 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
570 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
571+
572+#ifdef CONFIG_PAX_PAGEEXEC
573+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
574+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
575+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
576+#else
577+# define PAGE_SHARED_NOEXEC PAGE_SHARED
578+# define PAGE_COPY_NOEXEC PAGE_COPY
579+# define PAGE_READONLY_NOEXEC PAGE_READONLY
580+#endif
581+
582 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
583
584 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
585diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
586index 2fd00b7..cfd5069 100644
587--- a/arch/alpha/kernel/module.c
588+++ b/arch/alpha/kernel/module.c
589@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
590
591 /* The small sections were sorted to the end of the segment.
592 The following should definitely cover them. */
593- gp = (u64)me->module_core + me->core_size - 0x8000;
594+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
595 got = sechdrs[me->arch.gotsecindex].sh_addr;
596
597 for (i = 0; i < n; i++) {
598diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
599index b9e37ad..44c24e7 100644
600--- a/arch/alpha/kernel/osf_sys.c
601+++ b/arch/alpha/kernel/osf_sys.c
602@@ -1297,10 +1297,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
603 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
604
605 static unsigned long
606-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
607- unsigned long limit)
608+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
609+ unsigned long limit, unsigned long flags)
610 {
611 struct vm_unmapped_area_info info;
612+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
613
614 info.flags = 0;
615 info.length = len;
616@@ -1308,6 +1309,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
617 info.high_limit = limit;
618 info.align_mask = 0;
619 info.align_offset = 0;
620+ info.threadstack_offset = offset;
621 return vm_unmapped_area(&info);
622 }
623
624@@ -1340,20 +1342,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
625 merely specific addresses, but regions of memory -- perhaps
626 this feature should be incorporated into all ports? */
627
628+#ifdef CONFIG_PAX_RANDMMAP
629+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
630+#endif
631+
632 if (addr) {
633- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
634+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
635 if (addr != (unsigned long) -ENOMEM)
636 return addr;
637 }
638
639 /* Next, try allocating at TASK_UNMAPPED_BASE. */
640- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
641- len, limit);
642+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
643+
644 if (addr != (unsigned long) -ENOMEM)
645 return addr;
646
647 /* Finally, try allocating in low memory. */
648- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
649+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
650
651 return addr;
652 }
653diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
654index 0c4132d..88f0d53 100644
655--- a/arch/alpha/mm/fault.c
656+++ b/arch/alpha/mm/fault.c
657@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
658 __reload_thread(pcb);
659 }
660
661+#ifdef CONFIG_PAX_PAGEEXEC
662+/*
663+ * PaX: decide what to do with offenders (regs->pc = fault address)
664+ *
665+ * returns 1 when task should be killed
666+ * 2 when patched PLT trampoline was detected
667+ * 3 when unpatched PLT trampoline was detected
668+ */
669+static int pax_handle_fetch_fault(struct pt_regs *regs)
670+{
671+
672+#ifdef CONFIG_PAX_EMUPLT
673+ int err;
674+
675+ do { /* PaX: patched PLT emulation #1 */
676+ unsigned int ldah, ldq, jmp;
677+
678+ err = get_user(ldah, (unsigned int *)regs->pc);
679+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
680+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
681+
682+ if (err)
683+ break;
684+
685+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
686+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
687+ jmp == 0x6BFB0000U)
688+ {
689+ unsigned long r27, addr;
690+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
691+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
692+
693+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
694+ err = get_user(r27, (unsigned long *)addr);
695+ if (err)
696+ break;
697+
698+ regs->r27 = r27;
699+ regs->pc = r27;
700+ return 2;
701+ }
702+ } while (0);
703+
704+ do { /* PaX: patched PLT emulation #2 */
705+ unsigned int ldah, lda, br;
706+
707+ err = get_user(ldah, (unsigned int *)regs->pc);
708+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
709+ err |= get_user(br, (unsigned int *)(regs->pc+8));
710+
711+ if (err)
712+ break;
713+
714+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
715+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
716+ (br & 0xFFE00000U) == 0xC3E00000U)
717+ {
718+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
719+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
720+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
721+
722+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
723+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
724+ return 2;
725+ }
726+ } while (0);
727+
728+ do { /* PaX: unpatched PLT emulation */
729+ unsigned int br;
730+
731+ err = get_user(br, (unsigned int *)regs->pc);
732+
733+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
734+ unsigned int br2, ldq, nop, jmp;
735+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
736+
737+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
738+ err = get_user(br2, (unsigned int *)addr);
739+ err |= get_user(ldq, (unsigned int *)(addr+4));
740+ err |= get_user(nop, (unsigned int *)(addr+8));
741+ err |= get_user(jmp, (unsigned int *)(addr+12));
742+ err |= get_user(resolver, (unsigned long *)(addr+16));
743+
744+ if (err)
745+ break;
746+
747+ if (br2 == 0xC3600000U &&
748+ ldq == 0xA77B000CU &&
749+ nop == 0x47FF041FU &&
750+ jmp == 0x6B7B0000U)
751+ {
752+ regs->r28 = regs->pc+4;
753+ regs->r27 = addr+16;
754+ regs->pc = resolver;
755+ return 3;
756+ }
757+ }
758+ } while (0);
759+#endif
760+
761+ return 1;
762+}
763+
764+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
765+{
766+ unsigned long i;
767+
768+ printk(KERN_ERR "PAX: bytes at PC: ");
769+ for (i = 0; i < 5; i++) {
770+ unsigned int c;
771+ if (get_user(c, (unsigned int *)pc+i))
772+ printk(KERN_CONT "???????? ");
773+ else
774+ printk(KERN_CONT "%08x ", c);
775+ }
776+ printk("\n");
777+}
778+#endif
779
780 /*
781 * This routine handles page faults. It determines the address,
782@@ -133,8 +251,29 @@ retry:
783 good_area:
784 si_code = SEGV_ACCERR;
785 if (cause < 0) {
786- if (!(vma->vm_flags & VM_EXEC))
787+ if (!(vma->vm_flags & VM_EXEC)) {
788+
789+#ifdef CONFIG_PAX_PAGEEXEC
790+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
791+ goto bad_area;
792+
793+ up_read(&mm->mmap_sem);
794+ switch (pax_handle_fetch_fault(regs)) {
795+
796+#ifdef CONFIG_PAX_EMUPLT
797+ case 2:
798+ case 3:
799+ return;
800+#endif
801+
802+ }
803+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
804+ do_group_exit(SIGKILL);
805+#else
806 goto bad_area;
807+#endif
808+
809+ }
810 } else if (!cause) {
811 /* Allow reads even for write-only mappings */
812 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
813diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
814index 1cacda4..2cef624 100644
815--- a/arch/arm/Kconfig
816+++ b/arch/arm/Kconfig
817@@ -1850,7 +1850,7 @@ config ALIGNMENT_TRAP
818
819 config UACCESS_WITH_MEMCPY
820 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
821- depends on MMU
822+ depends on MMU && !PAX_MEMORY_UDEREF
823 default y if CPU_FEROCEON
824 help
825 Implement faster copy_to_user and clear_user methods for CPU
826diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
827index c79f61f..9ac0642 100644
828--- a/arch/arm/include/asm/atomic.h
829+++ b/arch/arm/include/asm/atomic.h
830@@ -17,17 +17,35 @@
831 #include <asm/barrier.h>
832 #include <asm/cmpxchg.h>
833
834+#ifdef CONFIG_GENERIC_ATOMIC64
835+#include <asm-generic/atomic64.h>
836+#endif
837+
838 #define ATOMIC_INIT(i) { (i) }
839
840 #ifdef __KERNEL__
841
842+#define _ASM_EXTABLE(from, to) \
843+" .pushsection __ex_table,\"a\"\n"\
844+" .align 3\n" \
845+" .long " #from ", " #to"\n" \
846+" .popsection"
847+
848 /*
849 * On ARM, ordinary assignment (str instruction) doesn't clear the local
850 * strex/ldrex monitor on some implementations. The reason we can use it for
851 * atomic_set() is the clrex or dummy strex done on every exception return.
852 */
853 #define atomic_read(v) (*(volatile int *)&(v)->counter)
854+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
855+{
856+ return v->counter;
857+}
858 #define atomic_set(v,i) (((v)->counter) = (i))
859+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
860+{
861+ v->counter = i;
862+}
863
864 #if __LINUX_ARM_ARCH__ >= 6
865
866@@ -42,6 +60,35 @@ static inline void atomic_add(int i, atomic_t *v)
867 int result;
868
869 __asm__ __volatile__("@ atomic_add\n"
870+"1: ldrex %1, [%3]\n"
871+" adds %0, %1, %4\n"
872+
873+#ifdef CONFIG_PAX_REFCOUNT
874+" bvc 3f\n"
875+"2: bkpt 0xf103\n"
876+"3:\n"
877+#endif
878+
879+" strex %1, %0, [%3]\n"
880+" teq %1, #0\n"
881+" bne 1b"
882+
883+#ifdef CONFIG_PAX_REFCOUNT
884+"\n4:\n"
885+ _ASM_EXTABLE(2b, 4b)
886+#endif
887+
888+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
889+ : "r" (&v->counter), "Ir" (i)
890+ : "cc");
891+}
892+
893+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
894+{
895+ unsigned long tmp;
896+ int result;
897+
898+ __asm__ __volatile__("@ atomic_add_unchecked\n"
899 "1: ldrex %0, [%3]\n"
900 " add %0, %0, %4\n"
901 " strex %1, %0, [%3]\n"
902@@ -60,6 +107,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
903 smp_mb();
904
905 __asm__ __volatile__("@ atomic_add_return\n"
906+"1: ldrex %1, [%3]\n"
907+" adds %0, %1, %4\n"
908+
909+#ifdef CONFIG_PAX_REFCOUNT
910+" bvc 3f\n"
911+" mov %0, %1\n"
912+"2: bkpt 0xf103\n"
913+"3:\n"
914+#endif
915+
916+" strex %1, %0, [%3]\n"
917+" teq %1, #0\n"
918+" bne 1b"
919+
920+#ifdef CONFIG_PAX_REFCOUNT
921+"\n4:\n"
922+ _ASM_EXTABLE(2b, 4b)
923+#endif
924+
925+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
926+ : "r" (&v->counter), "Ir" (i)
927+ : "cc");
928+
929+ smp_mb();
930+
931+ return result;
932+}
933+
934+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
935+{
936+ unsigned long tmp;
937+ int result;
938+
939+ smp_mb();
940+
941+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
942 "1: ldrex %0, [%3]\n"
943 " add %0, %0, %4\n"
944 " strex %1, %0, [%3]\n"
945@@ -80,6 +163,35 @@ static inline void atomic_sub(int i, atomic_t *v)
946 int result;
947
948 __asm__ __volatile__("@ atomic_sub\n"
949+"1: ldrex %1, [%3]\n"
950+" subs %0, %1, %4\n"
951+
952+#ifdef CONFIG_PAX_REFCOUNT
953+" bvc 3f\n"
954+"2: bkpt 0xf103\n"
955+"3:\n"
956+#endif
957+
958+" strex %1, %0, [%3]\n"
959+" teq %1, #0\n"
960+" bne 1b"
961+
962+#ifdef CONFIG_PAX_REFCOUNT
963+"\n4:\n"
964+ _ASM_EXTABLE(2b, 4b)
965+#endif
966+
967+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
968+ : "r" (&v->counter), "Ir" (i)
969+ : "cc");
970+}
971+
972+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
973+{
974+ unsigned long tmp;
975+ int result;
976+
977+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
978 "1: ldrex %0, [%3]\n"
979 " sub %0, %0, %4\n"
980 " strex %1, %0, [%3]\n"
981@@ -98,11 +210,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
982 smp_mb();
983
984 __asm__ __volatile__("@ atomic_sub_return\n"
985-"1: ldrex %0, [%3]\n"
986-" sub %0, %0, %4\n"
987+"1: ldrex %1, [%3]\n"
988+" subs %0, %1, %4\n"
989+
990+#ifdef CONFIG_PAX_REFCOUNT
991+" bvc 3f\n"
992+" mov %0, %1\n"
993+"2: bkpt 0xf103\n"
994+"3:\n"
995+#endif
996+
997 " strex %1, %0, [%3]\n"
998 " teq %1, #0\n"
999 " bne 1b"
1000+
1001+#ifdef CONFIG_PAX_REFCOUNT
1002+"\n4:\n"
1003+ _ASM_EXTABLE(2b, 4b)
1004+#endif
1005+
1006 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1007 : "r" (&v->counter), "Ir" (i)
1008 : "cc");
1009@@ -134,6 +260,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
1010 return oldval;
1011 }
1012
1013+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1014+{
1015+ unsigned long oldval, res;
1016+
1017+ smp_mb();
1018+
1019+ do {
1020+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1021+ "ldrex %1, [%3]\n"
1022+ "mov %0, #0\n"
1023+ "teq %1, %4\n"
1024+ "strexeq %0, %5, [%3]\n"
1025+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1026+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1027+ : "cc");
1028+ } while (res);
1029+
1030+ smp_mb();
1031+
1032+ return oldval;
1033+}
1034+
1035 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1036 {
1037 unsigned long tmp, tmp2;
1038@@ -167,7 +315,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
1039
1040 return val;
1041 }
1042+
1043+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1044+{
1045+ return atomic_add_return(i, v);
1046+}
1047+
1048 #define atomic_add(i, v) (void) atomic_add_return(i, v)
1049+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1050+{
1051+ (void) atomic_add_return(i, v);
1052+}
1053
1054 static inline int atomic_sub_return(int i, atomic_t *v)
1055 {
1056@@ -182,6 +340,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1057 return val;
1058 }
1059 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
1060+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1061+{
1062+ (void) atomic_sub_return(i, v);
1063+}
1064
1065 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1066 {
1067@@ -197,6 +359,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1068 return ret;
1069 }
1070
1071+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1072+{
1073+ return atomic_cmpxchg(v, old, new);
1074+}
1075+
1076 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1077 {
1078 unsigned long flags;
1079@@ -209,6 +376,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1080 #endif /* __LINUX_ARM_ARCH__ */
1081
1082 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1083+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1084+{
1085+ return xchg(&v->counter, new);
1086+}
1087
1088 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1089 {
1090@@ -221,11 +392,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1091 }
1092
1093 #define atomic_inc(v) atomic_add(1, v)
1094+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1095+{
1096+ atomic_add_unchecked(1, v);
1097+}
1098 #define atomic_dec(v) atomic_sub(1, v)
1099+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1100+{
1101+ atomic_sub_unchecked(1, v);
1102+}
1103
1104 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1105+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1106+{
1107+ return atomic_add_return_unchecked(1, v) == 0;
1108+}
1109 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1110 #define atomic_inc_return(v) (atomic_add_return(1, v))
1111+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1112+{
1113+ return atomic_add_return_unchecked(1, v);
1114+}
1115 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1116 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1117
1118@@ -241,6 +428,14 @@ typedef struct {
1119 u64 __aligned(8) counter;
1120 } atomic64_t;
1121
1122+#ifdef CONFIG_PAX_REFCOUNT
1123+typedef struct {
1124+ u64 __aligned(8) counter;
1125+} atomic64_unchecked_t;
1126+#else
1127+typedef atomic64_t atomic64_unchecked_t;
1128+#endif
1129+
1130 #define ATOMIC64_INIT(i) { (i) }
1131
1132 static inline u64 atomic64_read(const atomic64_t *v)
1133@@ -256,6 +451,19 @@ static inline u64 atomic64_read(const atomic64_t *v)
1134 return result;
1135 }
1136
1137+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
1138+{
1139+ u64 result;
1140+
1141+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1142+" ldrexd %0, %H0, [%1]"
1143+ : "=&r" (result)
1144+ : "r" (&v->counter), "Qo" (v->counter)
1145+ );
1146+
1147+ return result;
1148+}
1149+
1150 static inline void atomic64_set(atomic64_t *v, u64 i)
1151 {
1152 u64 tmp;
1153@@ -270,6 +478,20 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1154 : "cc");
1155 }
1156
1157+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1158+{
1159+ u64 tmp;
1160+
1161+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1162+"1: ldrexd %0, %H0, [%2]\n"
1163+" strexd %0, %3, %H3, [%2]\n"
1164+" teq %0, #0\n"
1165+" bne 1b"
1166+ : "=&r" (tmp), "=Qo" (v->counter)
1167+ : "r" (&v->counter), "r" (i)
1168+ : "cc");
1169+}
1170+
1171 static inline void atomic64_add(u64 i, atomic64_t *v)
1172 {
1173 u64 result;
1174@@ -278,6 +500,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1175 __asm__ __volatile__("@ atomic64_add\n"
1176 "1: ldrexd %0, %H0, [%3]\n"
1177 " adds %0, %0, %4\n"
1178+" adcs %H0, %H0, %H4\n"
1179+
1180+#ifdef CONFIG_PAX_REFCOUNT
1181+" bvc 3f\n"
1182+"2: bkpt 0xf103\n"
1183+"3:\n"
1184+#endif
1185+
1186+" strexd %1, %0, %H0, [%3]\n"
1187+" teq %1, #0\n"
1188+" bne 1b"
1189+
1190+#ifdef CONFIG_PAX_REFCOUNT
1191+"\n4:\n"
1192+ _ASM_EXTABLE(2b, 4b)
1193+#endif
1194+
1195+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1196+ : "r" (&v->counter), "r" (i)
1197+ : "cc");
1198+}
1199+
1200+static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1201+{
1202+ u64 result;
1203+ unsigned long tmp;
1204+
1205+ __asm__ __volatile__("@ atomic64_add_unchecked\n"
1206+"1: ldrexd %0, %H0, [%3]\n"
1207+" adds %0, %0, %4\n"
1208 " adc %H0, %H0, %H4\n"
1209 " strexd %1, %0, %H0, [%3]\n"
1210 " teq %1, #0\n"
1211@@ -289,12 +541,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1212
1213 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1214 {
1215- u64 result;
1216- unsigned long tmp;
1217+ u64 result, tmp;
1218
1219 smp_mb();
1220
1221 __asm__ __volatile__("@ atomic64_add_return\n"
1222+"1: ldrexd %1, %H1, [%3]\n"
1223+" adds %0, %1, %4\n"
1224+" adcs %H0, %H1, %H4\n"
1225+
1226+#ifdef CONFIG_PAX_REFCOUNT
1227+" bvc 3f\n"
1228+" mov %0, %1\n"
1229+" mov %H0, %H1\n"
1230+"2: bkpt 0xf103\n"
1231+"3:\n"
1232+#endif
1233+
1234+" strexd %1, %0, %H0, [%3]\n"
1235+" teq %1, #0\n"
1236+" bne 1b"
1237+
1238+#ifdef CONFIG_PAX_REFCOUNT
1239+"\n4:\n"
1240+ _ASM_EXTABLE(2b, 4b)
1241+#endif
1242+
1243+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1244+ : "r" (&v->counter), "r" (i)
1245+ : "cc");
1246+
1247+ smp_mb();
1248+
1249+ return result;
1250+}
1251+
1252+static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1253+{
1254+ u64 result;
1255+ unsigned long tmp;
1256+
1257+ smp_mb();
1258+
1259+ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1260 "1: ldrexd %0, %H0, [%3]\n"
1261 " adds %0, %0, %4\n"
1262 " adc %H0, %H0, %H4\n"
1263@@ -318,6 +607,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1264 __asm__ __volatile__("@ atomic64_sub\n"
1265 "1: ldrexd %0, %H0, [%3]\n"
1266 " subs %0, %0, %4\n"
1267+" sbcs %H0, %H0, %H4\n"
1268+
1269+#ifdef CONFIG_PAX_REFCOUNT
1270+" bvc 3f\n"
1271+"2: bkpt 0xf103\n"
1272+"3:\n"
1273+#endif
1274+
1275+" strexd %1, %0, %H0, [%3]\n"
1276+" teq %1, #0\n"
1277+" bne 1b"
1278+
1279+#ifdef CONFIG_PAX_REFCOUNT
1280+"\n4:\n"
1281+ _ASM_EXTABLE(2b, 4b)
1282+#endif
1283+
1284+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1285+ : "r" (&v->counter), "r" (i)
1286+ : "cc");
1287+}
1288+
1289+static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1290+{
1291+ u64 result;
1292+ unsigned long tmp;
1293+
1294+ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1295+"1: ldrexd %0, %H0, [%3]\n"
1296+" subs %0, %0, %4\n"
1297 " sbc %H0, %H0, %H4\n"
1298 " strexd %1, %0, %H0, [%3]\n"
1299 " teq %1, #0\n"
1300@@ -329,18 +648,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1301
1302 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1303 {
1304- u64 result;
1305- unsigned long tmp;
1306+ u64 result, tmp;
1307
1308 smp_mb();
1309
1310 __asm__ __volatile__("@ atomic64_sub_return\n"
1311-"1: ldrexd %0, %H0, [%3]\n"
1312-" subs %0, %0, %4\n"
1313-" sbc %H0, %H0, %H4\n"
1314+"1: ldrexd %1, %H1, [%3]\n"
1315+" subs %0, %1, %4\n"
1316+" sbcs %H0, %H1, %H4\n"
1317+
1318+#ifdef CONFIG_PAX_REFCOUNT
1319+" bvc 3f\n"
1320+" mov %0, %1\n"
1321+" mov %H0, %H1\n"
1322+"2: bkpt 0xf103\n"
1323+"3:\n"
1324+#endif
1325+
1326 " strexd %1, %0, %H0, [%3]\n"
1327 " teq %1, #0\n"
1328 " bne 1b"
1329+
1330+#ifdef CONFIG_PAX_REFCOUNT
1331+"\n4:\n"
1332+ _ASM_EXTABLE(2b, 4b)
1333+#endif
1334+
1335 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1336 : "r" (&v->counter), "r" (i)
1337 : "cc");
1338@@ -374,6 +707,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1339 return oldval;
1340 }
1341
1342+static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1343+{
1344+ u64 oldval;
1345+ unsigned long res;
1346+
1347+ smp_mb();
1348+
1349+ do {
1350+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1351+ "ldrexd %1, %H1, [%3]\n"
1352+ "mov %0, #0\n"
1353+ "teq %1, %4\n"
1354+ "teqeq %H1, %H4\n"
1355+ "strexdeq %0, %5, %H5, [%3]"
1356+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1357+ : "r" (&ptr->counter), "r" (old), "r" (new)
1358+ : "cc");
1359+ } while (res);
1360+
1361+ smp_mb();
1362+
1363+ return oldval;
1364+}
1365+
1366 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1367 {
1368 u64 result;
1369@@ -397,21 +754,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1370
1371 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1372 {
1373- u64 result;
1374- unsigned long tmp;
1375+ u64 result, tmp;
1376
1377 smp_mb();
1378
1379 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1380-"1: ldrexd %0, %H0, [%3]\n"
1381-" subs %0, %0, #1\n"
1382-" sbc %H0, %H0, #0\n"
1383+"1: ldrexd %1, %H1, [%3]\n"
1384+" subs %0, %1, #1\n"
1385+" sbcs %H0, %H1, #0\n"
1386+
1387+#ifdef CONFIG_PAX_REFCOUNT
1388+" bvc 3f\n"
1389+" mov %0, %1\n"
1390+" mov %H0, %H1\n"
1391+"2: bkpt 0xf103\n"
1392+"3:\n"
1393+#endif
1394+
1395 " teq %H0, #0\n"
1396-" bmi 2f\n"
1397+" bmi 4f\n"
1398 " strexd %1, %0, %H0, [%3]\n"
1399 " teq %1, #0\n"
1400 " bne 1b\n"
1401-"2:"
1402+"4:\n"
1403+
1404+#ifdef CONFIG_PAX_REFCOUNT
1405+ _ASM_EXTABLE(2b, 4b)
1406+#endif
1407+
1408 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1409 : "r" (&v->counter)
1410 : "cc");
1411@@ -434,13 +804,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1412 " teq %0, %5\n"
1413 " teqeq %H0, %H5\n"
1414 " moveq %1, #0\n"
1415-" beq 2f\n"
1416+" beq 4f\n"
1417 " adds %0, %0, %6\n"
1418-" adc %H0, %H0, %H6\n"
1419+" adcs %H0, %H0, %H6\n"
1420+
1421+#ifdef CONFIG_PAX_REFCOUNT
1422+" bvc 3f\n"
1423+"2: bkpt 0xf103\n"
1424+"3:\n"
1425+#endif
1426+
1427 " strexd %2, %0, %H0, [%4]\n"
1428 " teq %2, #0\n"
1429 " bne 1b\n"
1430-"2:"
1431+"4:\n"
1432+
1433+#ifdef CONFIG_PAX_REFCOUNT
1434+ _ASM_EXTABLE(2b, 4b)
1435+#endif
1436+
1437 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1438 : "r" (&v->counter), "r" (u), "r" (a)
1439 : "cc");
1440@@ -453,10 +835,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1441
1442 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1443 #define atomic64_inc(v) atomic64_add(1LL, (v))
1444+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1445 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1446+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1447 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1448 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1449 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1450+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1451 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1452 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1453 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1454diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1455index 75fe66b..ba3dee4 100644
1456--- a/arch/arm/include/asm/cache.h
1457+++ b/arch/arm/include/asm/cache.h
1458@@ -4,8 +4,10 @@
1459 #ifndef __ASMARM_CACHE_H
1460 #define __ASMARM_CACHE_H
1461
1462+#include <linux/const.h>
1463+
1464 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1465-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1466+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1467
1468 /*
1469 * Memory returned by kmalloc() may be used for DMA, so we must make
1470@@ -24,5 +26,6 @@
1471 #endif
1472
1473 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1474+#define __read_only __attribute__ ((__section__(".data..read_only")))
1475
1476 #endif
1477diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1478index e1489c5..d418304 100644
1479--- a/arch/arm/include/asm/cacheflush.h
1480+++ b/arch/arm/include/asm/cacheflush.h
1481@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1482 void (*dma_unmap_area)(const void *, size_t, int);
1483
1484 void (*dma_flush_range)(const void *, const void *);
1485-};
1486+} __no_const;
1487
1488 /*
1489 * Select the calling method
1490diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1491index 6dcc164..b14d917 100644
1492--- a/arch/arm/include/asm/checksum.h
1493+++ b/arch/arm/include/asm/checksum.h
1494@@ -37,7 +37,19 @@ __wsum
1495 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1496
1497 __wsum
1498-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1499+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1500+
1501+static inline __wsum
1502+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1503+{
1504+ __wsum ret;
1505+ pax_open_userland();
1506+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1507+ pax_close_userland();
1508+ return ret;
1509+}
1510+
1511+
1512
1513 /*
1514 * Fold a partial checksum without adding pseudo headers
1515diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1516index 4f009c1..466c59b 100644
1517--- a/arch/arm/include/asm/cmpxchg.h
1518+++ b/arch/arm/include/asm/cmpxchg.h
1519@@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1520
1521 #define xchg(ptr,x) \
1522 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1523+#define xchg_unchecked(ptr,x) \
1524+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1525
1526 #include <asm-generic/cmpxchg-local.h>
1527
1528diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1529index 6ddbe44..b5e38b1 100644
1530--- a/arch/arm/include/asm/domain.h
1531+++ b/arch/arm/include/asm/domain.h
1532@@ -48,18 +48,37 @@
1533 * Domain types
1534 */
1535 #define DOMAIN_NOACCESS 0
1536-#define DOMAIN_CLIENT 1
1537 #ifdef CONFIG_CPU_USE_DOMAINS
1538+#define DOMAIN_USERCLIENT 1
1539+#define DOMAIN_KERNELCLIENT 1
1540 #define DOMAIN_MANAGER 3
1541+#define DOMAIN_VECTORS DOMAIN_USER
1542 #else
1543+
1544+#ifdef CONFIG_PAX_KERNEXEC
1545 #define DOMAIN_MANAGER 1
1546+#define DOMAIN_KERNEXEC 3
1547+#else
1548+#define DOMAIN_MANAGER 1
1549+#endif
1550+
1551+#ifdef CONFIG_PAX_MEMORY_UDEREF
1552+#define DOMAIN_USERCLIENT 0
1553+#define DOMAIN_UDEREF 1
1554+#define DOMAIN_VECTORS DOMAIN_KERNEL
1555+#else
1556+#define DOMAIN_USERCLIENT 1
1557+#define DOMAIN_VECTORS DOMAIN_USER
1558+#endif
1559+#define DOMAIN_KERNELCLIENT 1
1560+
1561 #endif
1562
1563 #define domain_val(dom,type) ((type) << (2*(dom)))
1564
1565 #ifndef __ASSEMBLY__
1566
1567-#ifdef CONFIG_CPU_USE_DOMAINS
1568+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1569 static inline void set_domain(unsigned val)
1570 {
1571 asm volatile(
1572@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1573 isb();
1574 }
1575
1576-#define modify_domain(dom,type) \
1577- do { \
1578- struct thread_info *thread = current_thread_info(); \
1579- unsigned int domain = thread->cpu_domain; \
1580- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1581- thread->cpu_domain = domain | domain_val(dom, type); \
1582- set_domain(thread->cpu_domain); \
1583- } while (0)
1584-
1585+extern void modify_domain(unsigned int dom, unsigned int type);
1586 #else
1587 static inline void set_domain(unsigned val) { }
1588 static inline void modify_domain(unsigned dom, unsigned type) { }
1589diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1590index 38050b1..9d90e8b 100644
1591--- a/arch/arm/include/asm/elf.h
1592+++ b/arch/arm/include/asm/elf.h
1593@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1594 the loader. We need to make sure that it is out of the way of the program
1595 that it will "exec", and that there is sufficient room for the brk. */
1596
1597-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1598+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1599+
1600+#ifdef CONFIG_PAX_ASLR
1601+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1602+
1603+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1604+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1605+#endif
1606
1607 /* When the program starts, a1 contains a pointer to a function to be
1608 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1609@@ -126,8 +133,4 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1610 extern void elf_set_personality(const struct elf32_hdr *);
1611 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1612
1613-struct mm_struct;
1614-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1615-#define arch_randomize_brk arch_randomize_brk
1616-
1617 #endif
1618diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1619index de53547..52b9a28 100644
1620--- a/arch/arm/include/asm/fncpy.h
1621+++ b/arch/arm/include/asm/fncpy.h
1622@@ -81,7 +81,9 @@
1623 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1624 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1625 \
1626+ pax_open_kernel(); \
1627 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1628+ pax_close_kernel(); \
1629 flush_icache_range((unsigned long)(dest_buf), \
1630 (unsigned long)(dest_buf) + (size)); \
1631 \
1632diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1633index e42cf59..7b94b8f 100644
1634--- a/arch/arm/include/asm/futex.h
1635+++ b/arch/arm/include/asm/futex.h
1636@@ -50,6 +50,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1637 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1638 return -EFAULT;
1639
1640+ pax_open_userland();
1641+
1642 smp_mb();
1643 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1644 "1: ldrex %1, [%4]\n"
1645@@ -65,6 +67,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1646 : "cc", "memory");
1647 smp_mb();
1648
1649+ pax_close_userland();
1650+
1651 *uval = val;
1652 return ret;
1653 }
1654@@ -95,6 +99,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1655 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1656 return -EFAULT;
1657
1658+ pax_open_userland();
1659+
1660 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1661 "1: " TUSER(ldr) " %1, [%4]\n"
1662 " teq %1, %2\n"
1663@@ -105,6 +111,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1664 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1665 : "cc", "memory");
1666
1667+ pax_close_userland();
1668+
1669 *uval = val;
1670 return ret;
1671 }
1672@@ -127,6 +135,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1673 return -EFAULT;
1674
1675 pagefault_disable(); /* implies preempt_disable() */
1676+ pax_open_userland();
1677
1678 switch (op) {
1679 case FUTEX_OP_SET:
1680@@ -148,6 +157,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1681 ret = -ENOSYS;
1682 }
1683
1684+ pax_close_userland();
1685 pagefault_enable(); /* subsumes preempt_enable() */
1686
1687 if (!ret) {
1688diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1689index 83eb2f7..ed77159 100644
1690--- a/arch/arm/include/asm/kmap_types.h
1691+++ b/arch/arm/include/asm/kmap_types.h
1692@@ -4,6 +4,6 @@
1693 /*
1694 * This is the "bare minimum". AIO seems to require this.
1695 */
1696-#define KM_TYPE_NR 16
1697+#define KM_TYPE_NR 17
1698
1699 #endif
1700diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1701index 9e614a1..3302cca 100644
1702--- a/arch/arm/include/asm/mach/dma.h
1703+++ b/arch/arm/include/asm/mach/dma.h
1704@@ -22,7 +22,7 @@ struct dma_ops {
1705 int (*residue)(unsigned int, dma_t *); /* optional */
1706 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1707 const char *type;
1708-};
1709+} __do_const;
1710
1711 struct dma_struct {
1712 void *addr; /* single DMA address */
1713diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1714index 2fe141f..192dc01 100644
1715--- a/arch/arm/include/asm/mach/map.h
1716+++ b/arch/arm/include/asm/mach/map.h
1717@@ -27,13 +27,16 @@ struct map_desc {
1718 #define MT_MINICLEAN 6
1719 #define MT_LOW_VECTORS 7
1720 #define MT_HIGH_VECTORS 8
1721-#define MT_MEMORY 9
1722+#define MT_MEMORY_RWX 9
1723 #define MT_ROM 10
1724-#define MT_MEMORY_NONCACHED 11
1725+#define MT_MEMORY_NONCACHED_RX 11
1726 #define MT_MEMORY_DTCM 12
1727 #define MT_MEMORY_ITCM 13
1728 #define MT_MEMORY_SO 14
1729 #define MT_MEMORY_DMA_READY 15
1730+#define MT_MEMORY_RW 16
1731+#define MT_MEMORY_RX 17
1732+#define MT_MEMORY_NONCACHED_RW 18
1733
1734 #ifdef CONFIG_MMU
1735 extern void iotable_init(struct map_desc *, int);
1736diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1737index 12f71a1..04e063c 100644
1738--- a/arch/arm/include/asm/outercache.h
1739+++ b/arch/arm/include/asm/outercache.h
1740@@ -35,7 +35,7 @@ struct outer_cache_fns {
1741 #endif
1742 void (*set_debug)(unsigned long);
1743 void (*resume)(void);
1744-};
1745+} __no_const;
1746
1747 #ifdef CONFIG_OUTER_CACHE
1748
1749diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1750index 812a494..71fc0b6 100644
1751--- a/arch/arm/include/asm/page.h
1752+++ b/arch/arm/include/asm/page.h
1753@@ -114,7 +114,7 @@ struct cpu_user_fns {
1754 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1755 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1756 unsigned long vaddr, struct vm_area_struct *vma);
1757-};
1758+} __no_const;
1759
1760 #ifdef MULTI_USER
1761 extern struct cpu_user_fns cpu_user;
1762diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1763index 943504f..c37a730 100644
1764--- a/arch/arm/include/asm/pgalloc.h
1765+++ b/arch/arm/include/asm/pgalloc.h
1766@@ -17,6 +17,7 @@
1767 #include <asm/processor.h>
1768 #include <asm/cacheflush.h>
1769 #include <asm/tlbflush.h>
1770+#include <asm/system_info.h>
1771
1772 #define check_pgt_cache() do { } while (0)
1773
1774@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1775 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1776 }
1777
1778+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1779+{
1780+ pud_populate(mm, pud, pmd);
1781+}
1782+
1783 #else /* !CONFIG_ARM_LPAE */
1784
1785 /*
1786@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1787 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1788 #define pmd_free(mm, pmd) do { } while (0)
1789 #define pud_populate(mm,pmd,pte) BUG()
1790+#define pud_populate_kernel(mm,pmd,pte) BUG()
1791
1792 #endif /* CONFIG_ARM_LPAE */
1793
1794@@ -126,6 +133,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1795 __free_page(pte);
1796 }
1797
1798+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1799+{
1800+#ifdef CONFIG_ARM_LPAE
1801+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1802+#else
1803+ if (addr & SECTION_SIZE)
1804+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1805+ else
1806+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1807+#endif
1808+ flush_pmd_entry(pmdp);
1809+}
1810+
1811 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1812 pmdval_t prot)
1813 {
1814@@ -155,7 +175,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
1815 static inline void
1816 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
1817 {
1818- __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
1819+ __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE | __supported_pmd_mask);
1820 }
1821 #define pmd_pgtable(pmd) pmd_page(pmd)
1822
1823diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1824index 5cfba15..f415e1a 100644
1825--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1826+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1827@@ -20,12 +20,15 @@
1828 #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
1829 #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
1830 #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
1831+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
1832 #define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
1833 #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
1834 #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
1835+
1836 /*
1837 * - section
1838 */
1839+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1840 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1841 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1842 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1843@@ -37,6 +40,7 @@
1844 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1845 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1846 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1847+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
1848
1849 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1850 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1851@@ -66,6 +70,7 @@
1852 * - extended small page/tiny page
1853 */
1854 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
1855+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
1856 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
1857 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
1858 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
1859diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
1860index f97ee02..07f1be5 100644
1861--- a/arch/arm/include/asm/pgtable-2level.h
1862+++ b/arch/arm/include/asm/pgtable-2level.h
1863@@ -125,6 +125,7 @@
1864 #define L_PTE_XN (_AT(pteval_t, 1) << 9)
1865 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
1866 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
1867+#define L_PTE_PXN (_AT(pteval_t, 1) << 12) /* v7*/
1868
1869 /*
1870 * These are the memory types, defined to be compatible with
1871diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
1872index 18f5cef..25b8f43 100644
1873--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
1874+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
1875@@ -41,6 +41,7 @@
1876 */
1877 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1878 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1879+#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7)
1880 #define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
1881 #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
1882 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 11)
1883@@ -71,6 +72,7 @@
1884 #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1885 #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
1886 #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
1887+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1888 #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
1889
1890 /*
1891diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
1892index 86b8fe3..e25f975 100644
1893--- a/arch/arm/include/asm/pgtable-3level.h
1894+++ b/arch/arm/include/asm/pgtable-3level.h
1895@@ -74,6 +74,7 @@
1896 #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
1897 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1898 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
1899+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1900 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
1901 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
1902 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
1903@@ -82,6 +83,7 @@
1904 /*
1905 * To be used in assembly code with the upper page attributes.
1906 */
1907+#define L_PTE_PXN_HIGH (1 << (53 - 32))
1908 #define L_PTE_XN_HIGH (1 << (54 - 32))
1909 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
1910
1911diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
1912index 9bcd262..fba731c 100644
1913--- a/arch/arm/include/asm/pgtable.h
1914+++ b/arch/arm/include/asm/pgtable.h
1915@@ -30,6 +30,9 @@
1916 #include <asm/pgtable-2level.h>
1917 #endif
1918
1919+#define ktla_ktva(addr) (addr)
1920+#define ktva_ktla(addr) (addr)
1921+
1922 /*
1923 * Just any arbitrary offset to the start of the vmalloc VM area: the
1924 * current 8MB value just means that there will be a 8MB "hole" after the
1925@@ -45,6 +48,9 @@
1926 #define LIBRARY_TEXT_START 0x0c000000
1927
1928 #ifndef __ASSEMBLY__
1929+extern pteval_t __supported_pte_mask;
1930+extern pmdval_t __supported_pmd_mask;
1931+
1932 extern void __pte_error(const char *file, int line, pte_t);
1933 extern void __pmd_error(const char *file, int line, pmd_t);
1934 extern void __pgd_error(const char *file, int line, pgd_t);
1935@@ -53,6 +59,50 @@ extern void __pgd_error(const char *file, int line, pgd_t);
1936 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
1937 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
1938
1939+#define __HAVE_ARCH_PAX_OPEN_KERNEL
1940+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
1941+
1942+#ifdef CONFIG_PAX_KERNEXEC
1943+#include <asm/domain.h>
1944+#include <linux/thread_info.h>
1945+#include <linux/preempt.h>
1946+#endif
1947+
1948+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1949+static inline int test_domain(int domain, int domaintype)
1950+{
1951+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
1952+}
1953+#endif
1954+
1955+#ifdef CONFIG_PAX_KERNEXEC
1956+static inline unsigned long pax_open_kernel(void) {
1957+#ifdef CONFIG_ARM_LPAE
1958+ /* TODO */
1959+#else
1960+ preempt_disable();
1961+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
1962+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
1963+#endif
1964+ return 0;
1965+}
1966+
1967+static inline unsigned long pax_close_kernel(void) {
1968+#ifdef CONFIG_ARM_LPAE
1969+ /* TODO */
1970+#else
1971+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
1972+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
1973+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
1974+ preempt_enable_no_resched();
1975+#endif
1976+ return 0;
1977+}
1978+#else
1979+static inline unsigned long pax_open_kernel(void) { return 0; }
1980+static inline unsigned long pax_close_kernel(void) { return 0; }
1981+#endif
1982+
1983 /*
1984 * This is the lowest virtual address we can permit any user space
1985 * mapping to be mapped at. This is particularly important for
1986@@ -72,8 +122,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
1987 /*
1988 * The pgprot_* and protection_map entries will be fixed up in runtime
1989 * to include the cachable and bufferable bits based on memory policy,
1990- * as well as any architecture dependent bits like global/ASID and SMP
1991- * shared mapping bits.
1992+ * as well as any architecture dependent bits like global/ASID, PXN,
1993+ * and SMP shared mapping bits.
1994 */
1995 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
1996
1997@@ -257,7 +307,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
1998 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
1999 {
2000 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2001- L_PTE_NONE | L_PTE_VALID;
2002+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2003 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2004 return pte;
2005 }
2006diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
2007index f3628fb..a0672dd 100644
2008--- a/arch/arm/include/asm/proc-fns.h
2009+++ b/arch/arm/include/asm/proc-fns.h
2010@@ -75,7 +75,7 @@ extern struct processor {
2011 unsigned int suspend_size;
2012 void (*do_suspend)(void *);
2013 void (*do_resume)(void *);
2014-} processor;
2015+} __do_const processor;
2016
2017 #ifndef MULTI_CPU
2018 extern void cpu_proc_init(void);
2019diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
2020index 06e7d50..8a8e251 100644
2021--- a/arch/arm/include/asm/processor.h
2022+++ b/arch/arm/include/asm/processor.h
2023@@ -65,9 +65,8 @@ struct thread_struct {
2024 regs->ARM_cpsr |= PSR_ENDSTATE; \
2025 regs->ARM_pc = pc & ~1; /* pc */ \
2026 regs->ARM_sp = sp; /* sp */ \
2027- regs->ARM_r2 = stack[2]; /* r2 (envp) */ \
2028- regs->ARM_r1 = stack[1]; /* r1 (argv) */ \
2029- regs->ARM_r0 = stack[0]; /* r0 (argc) */ \
2030+ /* r2 (envp), r1 (argv), r0 (argc) */ \
2031+ (void)copy_from_user(&regs->ARM_r0, (const char __user *)stack, 3 * sizeof(unsigned long)); \
2032 nommu_start_thread(regs); \
2033 })
2034
2035diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
2036index ce0dbe7..c085b6f 100644
2037--- a/arch/arm/include/asm/psci.h
2038+++ b/arch/arm/include/asm/psci.h
2039@@ -29,7 +29,7 @@ struct psci_operations {
2040 int (*cpu_off)(struct psci_power_state state);
2041 int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
2042 int (*migrate)(unsigned long cpuid);
2043-};
2044+} __no_const;
2045
2046 extern struct psci_operations psci_ops;
2047
2048diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2049index d3a22be..3a69ad5 100644
2050--- a/arch/arm/include/asm/smp.h
2051+++ b/arch/arm/include/asm/smp.h
2052@@ -107,7 +107,7 @@ struct smp_operations {
2053 int (*cpu_disable)(unsigned int cpu);
2054 #endif
2055 #endif
2056-};
2057+} __no_const;
2058
2059 /*
2060 * set platform specific SMP operations
2061diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2062index cddda1f..ff357f7 100644
2063--- a/arch/arm/include/asm/thread_info.h
2064+++ b/arch/arm/include/asm/thread_info.h
2065@@ -77,9 +77,9 @@ struct thread_info {
2066 .flags = 0, \
2067 .preempt_count = INIT_PREEMPT_COUNT, \
2068 .addr_limit = KERNEL_DS, \
2069- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2070- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2071- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2072+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2073+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2074+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2075 .restart_block = { \
2076 .fn = do_no_restart_syscall, \
2077 }, \
2078@@ -152,6 +152,12 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2079 #define TIF_SYSCALL_AUDIT 9
2080 #define TIF_SYSCALL_TRACEPOINT 10
2081 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2082+
2083+/* within 8 bits of TIF_SYSCALL_TRACE
2084+ * to meet flexible second operand requirements
2085+ */
2086+#define TIF_GRSEC_SETXID 12
2087+
2088 #define TIF_USING_IWMMXT 17
2089 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2090 #define TIF_RESTORE_SIGMASK 20
2091@@ -165,10 +171,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2092 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2093 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2094 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2095+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2096
2097 /* Checks for any syscall work in entry-common.S */
2098 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2099- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2100+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2101
2102 /*
2103 * Change these and you break ASM code in entry-common.S
2104diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2105index 7e1f760..752fcb7 100644
2106--- a/arch/arm/include/asm/uaccess.h
2107+++ b/arch/arm/include/asm/uaccess.h
2108@@ -18,6 +18,7 @@
2109 #include <asm/domain.h>
2110 #include <asm/unified.h>
2111 #include <asm/compiler.h>
2112+#include <asm/pgtable.h>
2113
2114 #define VERIFY_READ 0
2115 #define VERIFY_WRITE 1
2116@@ -60,10 +61,34 @@ extern int __put_user_bad(void);
2117 #define USER_DS TASK_SIZE
2118 #define get_fs() (current_thread_info()->addr_limit)
2119
2120+static inline void pax_open_userland(void)
2121+{
2122+
2123+#ifdef CONFIG_PAX_MEMORY_UDEREF
2124+ if (get_fs() == USER_DS) {
2125+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2126+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2127+ }
2128+#endif
2129+
2130+}
2131+
2132+static inline void pax_close_userland(void)
2133+{
2134+
2135+#ifdef CONFIG_PAX_MEMORY_UDEREF
2136+ if (get_fs() == USER_DS) {
2137+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2138+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2139+ }
2140+#endif
2141+
2142+}
2143+
2144 static inline void set_fs(mm_segment_t fs)
2145 {
2146 current_thread_info()->addr_limit = fs;
2147- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2148+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2149 }
2150
2151 #define segment_eq(a,b) ((a) == (b))
2152@@ -143,8 +168,12 @@ extern int __get_user_4(void *);
2153
2154 #define get_user(x,p) \
2155 ({ \
2156+ int __e; \
2157 might_fault(); \
2158- __get_user_check(x,p); \
2159+ pax_open_userland(); \
2160+ __e = __get_user_check(x,p); \
2161+ pax_close_userland(); \
2162+ __e; \
2163 })
2164
2165 extern int __put_user_1(void *, unsigned int);
2166@@ -188,8 +217,12 @@ extern int __put_user_8(void *, unsigned long long);
2167
2168 #define put_user(x,p) \
2169 ({ \
2170+ int __e; \
2171 might_fault(); \
2172- __put_user_check(x,p); \
2173+ pax_open_userland(); \
2174+ __e = __put_user_check(x,p); \
2175+ pax_close_userland(); \
2176+ __e; \
2177 })
2178
2179 #else /* CONFIG_MMU */
2180@@ -230,13 +263,17 @@ static inline void set_fs(mm_segment_t fs)
2181 #define __get_user(x,ptr) \
2182 ({ \
2183 long __gu_err = 0; \
2184+ pax_open_userland(); \
2185 __get_user_err((x),(ptr),__gu_err); \
2186+ pax_close_userland(); \
2187 __gu_err; \
2188 })
2189
2190 #define __get_user_error(x,ptr,err) \
2191 ({ \
2192+ pax_open_userland(); \
2193 __get_user_err((x),(ptr),err); \
2194+ pax_close_userland(); \
2195 (void) 0; \
2196 })
2197
2198@@ -312,13 +349,17 @@ do { \
2199 #define __put_user(x,ptr) \
2200 ({ \
2201 long __pu_err = 0; \
2202+ pax_open_userland(); \
2203 __put_user_err((x),(ptr),__pu_err); \
2204+ pax_close_userland(); \
2205 __pu_err; \
2206 })
2207
2208 #define __put_user_error(x,ptr,err) \
2209 ({ \
2210+ pax_open_userland(); \
2211 __put_user_err((x),(ptr),err); \
2212+ pax_close_userland(); \
2213 (void) 0; \
2214 })
2215
2216@@ -418,11 +459,44 @@ do { \
2217
2218
2219 #ifdef CONFIG_MMU
2220-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2221-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2222+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2223+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2224+
2225+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2226+{
2227+ unsigned long ret;
2228+
2229+ check_object_size(to, n, false);
2230+ pax_open_userland();
2231+ ret = ___copy_from_user(to, from, n);
2232+ pax_close_userland();
2233+ return ret;
2234+}
2235+
2236+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2237+{
2238+ unsigned long ret;
2239+
2240+ check_object_size(from, n, true);
2241+ pax_open_userland();
2242+ ret = ___copy_to_user(to, from, n);
2243+ pax_close_userland();
2244+ return ret;
2245+}
2246+
2247 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2248-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2249+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2250 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2251+
2252+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2253+{
2254+ unsigned long ret;
2255+ pax_open_userland();
2256+ ret = ___clear_user(addr, n);
2257+ pax_close_userland();
2258+ return ret;
2259+}
2260+
2261 #else
2262 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
2263 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
2264@@ -431,6 +505,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2265
2266 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2267 {
2268+ if ((long)n < 0)
2269+ return n;
2270+
2271 if (access_ok(VERIFY_READ, from, n))
2272 n = __copy_from_user(to, from, n);
2273 else /* security hole - plug it */
2274@@ -440,6 +517,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2275
2276 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2277 {
2278+ if ((long)n < 0)
2279+ return n;
2280+
2281 if (access_ok(VERIFY_WRITE, to, n))
2282 n = __copy_to_user(to, from, n);
2283 return n;
2284diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2285index 96ee092..37f1844 100644
2286--- a/arch/arm/include/uapi/asm/ptrace.h
2287+++ b/arch/arm/include/uapi/asm/ptrace.h
2288@@ -73,7 +73,7 @@
2289 * ARMv7 groups of PSR bits
2290 */
2291 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2292-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2293+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2294 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2295 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2296
2297diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2298index 60d3b73..d27ee09 100644
2299--- a/arch/arm/kernel/armksyms.c
2300+++ b/arch/arm/kernel/armksyms.c
2301@@ -89,9 +89,9 @@ EXPORT_SYMBOL(__memzero);
2302 #ifdef CONFIG_MMU
2303 EXPORT_SYMBOL(copy_page);
2304
2305-EXPORT_SYMBOL(__copy_from_user);
2306-EXPORT_SYMBOL(__copy_to_user);
2307-EXPORT_SYMBOL(__clear_user);
2308+EXPORT_SYMBOL(___copy_from_user);
2309+EXPORT_SYMBOL(___copy_to_user);
2310+EXPORT_SYMBOL(___clear_user);
2311
2312 EXPORT_SYMBOL(__get_user_1);
2313 EXPORT_SYMBOL(__get_user_2);
2314diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2315index 0f82098..3dbd3ee 100644
2316--- a/arch/arm/kernel/entry-armv.S
2317+++ b/arch/arm/kernel/entry-armv.S
2318@@ -47,6 +47,87 @@
2319 9997:
2320 .endm
2321
2322+ .macro pax_enter_kernel
2323+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2324+ @ make aligned space for saved DACR
2325+ sub sp, sp, #8
2326+ @ save regs
2327+ stmdb sp!, {r1, r2}
2328+ @ read DACR from cpu_domain into r1
2329+ mov r2, sp
2330+ @ assume 8K pages, since we have to split the immediate in two
2331+ bic r2, r2, #(0x1fc0)
2332+ bic r2, r2, #(0x3f)
2333+ ldr r1, [r2, #TI_CPU_DOMAIN]
2334+ @ store old DACR on stack
2335+ str r1, [sp, #8]
2336+#ifdef CONFIG_PAX_KERNEXEC
2337+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2338+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2339+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2340+#endif
2341+#ifdef CONFIG_PAX_MEMORY_UDEREF
2342+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2343+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2344+#endif
2345+ @ write r1 to current_thread_info()->cpu_domain
2346+ str r1, [r2, #TI_CPU_DOMAIN]
2347+ @ write r1 to DACR
2348+ mcr p15, 0, r1, c3, c0, 0
2349+ @ instruction sync
2350+ instr_sync
2351+ @ restore regs
2352+ ldmia sp!, {r1, r2}
2353+#endif
2354+ .endm
2355+
2356+ .macro pax_open_userland
2357+#ifdef CONFIG_PAX_MEMORY_UDEREF
2358+ @ save regs
2359+ stmdb sp!, {r0, r1}
2360+ @ read DACR from cpu_domain into r1
2361+ mov r0, sp
2362+ @ assume 8K pages, since we have to split the immediate in two
2363+ bic r0, r0, #(0x1fc0)
2364+ bic r0, r0, #(0x3f)
2365+ ldr r1, [r0, #TI_CPU_DOMAIN]
2366+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2367+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2368+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2369+ @ write r1 to current_thread_info()->cpu_domain
2370+ str r1, [r0, #TI_CPU_DOMAIN]
2371+ @ write r1 to DACR
2372+ mcr p15, 0, r1, c3, c0, 0
2373+ @ instruction sync
2374+ instr_sync
2375+ @ restore regs
2376+ ldmia sp!, {r0, r1}
2377+#endif
2378+ .endm
2379+
2380+ .macro pax_close_userland
2381+#ifdef CONFIG_PAX_MEMORY_UDEREF
2382+ @ save regs
2383+ stmdb sp!, {r0, r1}
2384+ @ read DACR from cpu_domain into r1
2385+ mov r0, sp
2386+ @ assume 8K pages, since we have to split the immediate in two
2387+ bic r0, r0, #(0x1fc0)
2388+ bic r0, r0, #(0x3f)
2389+ ldr r1, [r0, #TI_CPU_DOMAIN]
2390+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2391+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2392+ @ write r1 to current_thread_info()->cpu_domain
2393+ str r1, [r0, #TI_CPU_DOMAIN]
2394+ @ write r1 to DACR
2395+ mcr p15, 0, r1, c3, c0, 0
2396+ @ instruction sync
2397+ instr_sync
2398+ @ restore regs
2399+ ldmia sp!, {r0, r1}
2400+#endif
2401+ .endm
2402+
2403 .macro pabt_helper
2404 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2405 #ifdef MULTI_PABORT
2406@@ -89,11 +170,15 @@
2407 * Invalid mode handlers
2408 */
2409 .macro inv_entry, reason
2410+
2411+ pax_enter_kernel
2412+
2413 sub sp, sp, #S_FRAME_SIZE
2414 ARM( stmib sp, {r1 - lr} )
2415 THUMB( stmia sp, {r0 - r12} )
2416 THUMB( str sp, [sp, #S_SP] )
2417 THUMB( str lr, [sp, #S_LR] )
2418+
2419 mov r1, #\reason
2420 .endm
2421
2422@@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
2423 .macro svc_entry, stack_hole=0
2424 UNWIND(.fnstart )
2425 UNWIND(.save {r0 - pc} )
2426+
2427+ pax_enter_kernel
2428+
2429 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2430+
2431 #ifdef CONFIG_THUMB2_KERNEL
2432 SPFIX( str r0, [sp] ) @ temporarily saved
2433 SPFIX( mov r0, sp )
2434@@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
2435 ldmia r0, {r3 - r5}
2436 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2437 mov r6, #-1 @ "" "" "" ""
2438+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2439+ @ offset sp by 8 as done in pax_enter_kernel
2440+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2441+#else
2442 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2443+#endif
2444 SPFIX( addeq r2, r2, #4 )
2445 str r3, [sp, #-4]! @ save the "real" r0 copied
2446 @ from the exception stack
2447@@ -359,6 +453,9 @@ ENDPROC(__pabt_svc)
2448 .macro usr_entry
2449 UNWIND(.fnstart )
2450 UNWIND(.cantunwind ) @ don't unwind the user space
2451+
2452+ pax_enter_kernel_user
2453+
2454 sub sp, sp, #S_FRAME_SIZE
2455 ARM( stmib sp, {r1 - r12} )
2456 THUMB( stmia sp, {r0 - r12} )
2457@@ -456,7 +553,9 @@ __und_usr:
2458 tst r3, #PSR_T_BIT @ Thumb mode?
2459 bne __und_usr_thumb
2460 sub r4, r2, #4 @ ARM instr at LR - 4
2461+ pax_open_userland
2462 1: ldrt r0, [r4]
2463+ pax_close_userland
2464 #ifdef CONFIG_CPU_ENDIAN_BE8
2465 rev r0, r0 @ little endian instruction
2466 #endif
2467@@ -491,10 +590,14 @@ __und_usr_thumb:
2468 */
2469 .arch armv6t2
2470 #endif
2471+ pax_open_userland
2472 2: ldrht r5, [r4]
2473+ pax_close_userland
2474 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2475 blo __und_usr_fault_16 @ 16bit undefined instruction
2476+ pax_open_userland
2477 3: ldrht r0, [r2]
2478+ pax_close_userland
2479 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2480 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2481 orr r0, r0, r5, lsl #16
2482@@ -733,7 +836,7 @@ ENTRY(__switch_to)
2483 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
2484 THUMB( str sp, [ip], #4 )
2485 THUMB( str lr, [ip], #4 )
2486-#ifdef CONFIG_CPU_USE_DOMAINS
2487+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC)
2488 ldr r6, [r2, #TI_CPU_DOMAIN]
2489 #endif
2490 set_tls r3, r4, r5
2491@@ -742,7 +845,7 @@ ENTRY(__switch_to)
2492 ldr r8, =__stack_chk_guard
2493 ldr r7, [r7, #TSK_STACK_CANARY]
2494 #endif
2495-#ifdef CONFIG_CPU_USE_DOMAINS
2496+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC)
2497 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2498 #endif
2499 mov r5, r0
2500diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2501index fefd7f9..e6f250e 100644
2502--- a/arch/arm/kernel/entry-common.S
2503+++ b/arch/arm/kernel/entry-common.S
2504@@ -10,18 +10,46 @@
2505
2506 #include <asm/unistd.h>
2507 #include <asm/ftrace.h>
2508+#include <asm/domain.h>
2509 #include <asm/unwind.h>
2510
2511+#include "entry-header.S"
2512+
2513 #ifdef CONFIG_NEED_RET_TO_USER
2514 #include <mach/entry-macro.S>
2515 #else
2516 .macro arch_ret_to_user, tmp1, tmp2
2517+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2518+ @ save regs
2519+ stmdb sp!, {r1, r2}
2520+ @ read DACR from cpu_domain into r1
2521+ mov r2, sp
2522+ @ assume 8K pages, since we have to split the immediate in two
2523+ bic r2, r2, #(0x1fc0)
2524+ bic r2, r2, #(0x3f)
2525+ ldr r1, [r2, #TI_CPU_DOMAIN]
2526+#ifdef CONFIG_PAX_KERNEXEC
2527+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2528+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2529+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2530+#endif
2531+#ifdef CONFIG_PAX_MEMORY_UDEREF
2532+ @ set current DOMAIN_USER to DOMAIN_UDEREF
2533+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2534+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2535+#endif
2536+ @ write r1 to current_thread_info()->cpu_domain
2537+ str r1, [r2, #TI_CPU_DOMAIN]
2538+ @ write r1 to DACR
2539+ mcr p15, 0, r1, c3, c0, 0
2540+ @ instruction sync
2541+ instr_sync
2542+ @ restore regs
2543+ ldmia sp!, {r1, r2}
2544+#endif
2545 .endm
2546 #endif
2547
2548-#include "entry-header.S"
2549-
2550-
2551 .align 5
2552 /*
2553 * This is the fast syscall return path. We do as little as
2554@@ -351,6 +379,7 @@ ENDPROC(ftrace_stub)
2555
2556 .align 5
2557 ENTRY(vector_swi)
2558+
2559 sub sp, sp, #S_FRAME_SIZE
2560 stmia sp, {r0 - r12} @ Calling r0 - r12
2561 ARM( add r8, sp, #S_PC )
2562@@ -400,6 +429,12 @@ ENTRY(vector_swi)
2563 ldr scno, [lr, #-4] @ get SWI instruction
2564 #endif
2565
2566+ /*
2567+ * do this here to avoid a performance hit of wrapping the code above
2568+ * that directly dereferences userland to parse the SWI instruction
2569+ */
2570+ pax_enter_kernel_user
2571+
2572 #ifdef CONFIG_ALIGNMENT_TRAP
2573 ldr ip, __cr_alignment
2574 ldr ip, [ip]
2575diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2576index 9a8531e..812e287 100644
2577--- a/arch/arm/kernel/entry-header.S
2578+++ b/arch/arm/kernel/entry-header.S
2579@@ -73,9 +73,66 @@
2580 msr cpsr_c, \rtemp @ switch back to the SVC mode
2581 .endm
2582
2583+ .macro pax_enter_kernel_user
2584+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2585+ @ save regs
2586+ stmdb sp!, {r0, r1}
2587+ @ read DACR from cpu_domain into r1
2588+ mov r0, sp
2589+ @ assume 8K pages, since we have to split the immediate in two
2590+ bic r0, r0, #(0x1fc0)
2591+ bic r0, r0, #(0x3f)
2592+ ldr r1, [r0, #TI_CPU_DOMAIN]
2593+#ifdef CONFIG_PAX_MEMORY_UDEREF
2594+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2595+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2596+#endif
2597+#ifdef CONFIG_PAX_KERNEXEC
2598+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2599+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2600+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2601+#endif
2602+ @ write r1 to current_thread_info()->cpu_domain
2603+ str r1, [r0, #TI_CPU_DOMAIN]
2604+ @ write r1 to DACR
2605+ mcr p15, 0, r1, c3, c0, 0
2606+ @ instruction sync
2607+ instr_sync
2608+ @ restore regs
2609+ ldmia sp!, {r0, r1}
2610+#endif
2611+ .endm
2612+
2613+ .macro pax_exit_kernel
2614+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2615+ @ save regs
2616+ stmdb sp!, {r0, r1}
2617+ @ read old DACR from stack into r1
2618+ ldr r1, [sp, #(8 + S_SP)]
2619+ sub r1, r1, #8
2620+ ldr r1, [r1]
2621+
2622+ @ write r1 to current_thread_info()->cpu_domain
2623+ mov r0, sp
2624+ @ assume 8K pages, since we have to split the immediate in two
2625+ bic r0, r0, #(0x1fc0)
2626+ bic r0, r0, #(0x3f)
2627+ str r1, [r0, #TI_CPU_DOMAIN]
2628+ @ write r1 to DACR
2629+ mcr p15, 0, r1, c3, c0, 0
2630+ @ instruction sync
2631+ instr_sync
2632+ @ restore regs
2633+ ldmia sp!, {r0, r1}
2634+#endif
2635+ .endm
2636+
2637 #ifndef CONFIG_THUMB2_KERNEL
2638 .macro svc_exit, rpsr
2639 msr spsr_cxsf, \rpsr
2640+
2641+ pax_exit_kernel
2642+
2643 #if defined(CONFIG_CPU_V6)
2644 ldr r0, [sp]
2645 strex r1, r2, [sp] @ clear the exclusive monitor
2646@@ -121,6 +178,9 @@
2647 .endm
2648 #else /* CONFIG_THUMB2_KERNEL */
2649 .macro svc_exit, rpsr
2650+
2651+ pax_exit_kernel
2652+
2653 ldr lr, [sp, #S_SP] @ top of the stack
2654 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2655 clrex @ clear the exclusive monitor
2656diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2657index 2adda11..7fbe958 100644
2658--- a/arch/arm/kernel/fiq.c
2659+++ b/arch/arm/kernel/fiq.c
2660@@ -82,7 +82,9 @@ void set_fiq_handler(void *start, unsigned int length)
2661 #if defined(CONFIG_CPU_USE_DOMAINS)
2662 memcpy((void *)0xffff001c, start, length);
2663 #else
2664+ pax_open_kernel();
2665 memcpy(vectors_page + 0x1c, start, length);
2666+ pax_close_kernel();
2667 #endif
2668 flush_icache_range(0xffff001c, 0xffff001c + length);
2669 if (!vectors_high())
2670diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2671index 8bac553..caee108 100644
2672--- a/arch/arm/kernel/head.S
2673+++ b/arch/arm/kernel/head.S
2674@@ -52,7 +52,9 @@
2675 .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
2676
2677 .macro pgtbl, rd, phys
2678- add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE
2679+ mov \rd, #TEXT_OFFSET
2680+ sub \rd, #PG_DIR_SIZE
2681+ add \rd, \rd, \phys
2682 .endm
2683
2684 /*
2685@@ -434,7 +436,7 @@ __enable_mmu:
2686 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2687 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2688 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2689- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2690+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2691 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2692 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2693 #endif
2694diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
2695index 1fd749e..47adb08 100644
2696--- a/arch/arm/kernel/hw_breakpoint.c
2697+++ b/arch/arm/kernel/hw_breakpoint.c
2698@@ -1029,7 +1029,7 @@ static int __cpuinit dbg_reset_notify(struct notifier_block *self,
2699 return NOTIFY_OK;
2700 }
2701
2702-static struct notifier_block __cpuinitdata dbg_reset_nb = {
2703+static struct notifier_block dbg_reset_nb = {
2704 .notifier_call = dbg_reset_notify,
2705 };
2706
2707diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2708index 1e9be5d..03edbc2 100644
2709--- a/arch/arm/kernel/module.c
2710+++ b/arch/arm/kernel/module.c
2711@@ -37,12 +37,37 @@
2712 #endif
2713
2714 #ifdef CONFIG_MMU
2715-void *module_alloc(unsigned long size)
2716+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2717 {
2718+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2719+ return NULL;
2720 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2721- GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
2722+ GFP_KERNEL, prot, -1,
2723 __builtin_return_address(0));
2724 }
2725+
2726+void *module_alloc(unsigned long size)
2727+{
2728+
2729+#ifdef CONFIG_PAX_KERNEXEC
2730+ return __module_alloc(size, PAGE_KERNEL);
2731+#else
2732+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2733+#endif
2734+
2735+}
2736+
2737+#ifdef CONFIG_PAX_KERNEXEC
2738+void module_free_exec(struct module *mod, void *module_region)
2739+{
2740+ module_free(mod, module_region);
2741+}
2742+
2743+void *module_alloc_exec(unsigned long size)
2744+{
2745+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2746+}
2747+#endif
2748 #endif
2749
2750 int
2751diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2752index 07314af..c46655c 100644
2753--- a/arch/arm/kernel/patch.c
2754+++ b/arch/arm/kernel/patch.c
2755@@ -18,6 +18,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2756 bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
2757 int size;
2758
2759+ pax_open_kernel();
2760 if (thumb2 && __opcode_is_thumb16(insn)) {
2761 *(u16 *)addr = __opcode_to_mem_thumb16(insn);
2762 size = sizeof(u16);
2763@@ -39,6 +40,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2764 *(u32 *)addr = insn;
2765 size = sizeof(u32);
2766 }
2767+ pax_close_kernel();
2768
2769 flush_icache_range((uintptr_t)(addr),
2770 (uintptr_t)(addr) + size);
2771diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
2772index 1f2740e..b36e225 100644
2773--- a/arch/arm/kernel/perf_event_cpu.c
2774+++ b/arch/arm/kernel/perf_event_cpu.c
2775@@ -171,7 +171,7 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
2776 return NOTIFY_OK;
2777 }
2778
2779-static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = {
2780+static struct notifier_block cpu_pmu_hotplug_notifier = {
2781 .notifier_call = cpu_pmu_notify,
2782 };
2783
2784diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2785index 047d3e4..7e96107 100644
2786--- a/arch/arm/kernel/process.c
2787+++ b/arch/arm/kernel/process.c
2788@@ -28,7 +28,6 @@
2789 #include <linux/tick.h>
2790 #include <linux/utsname.h>
2791 #include <linux/uaccess.h>
2792-#include <linux/random.h>
2793 #include <linux/hw_breakpoint.h>
2794 #include <linux/cpuidle.h>
2795 #include <linux/leds.h>
2796@@ -251,9 +250,10 @@ void machine_power_off(void)
2797 machine_shutdown();
2798 if (pm_power_off)
2799 pm_power_off();
2800+ BUG();
2801 }
2802
2803-void machine_restart(char *cmd)
2804+__noreturn void machine_restart(char *cmd)
2805 {
2806 machine_shutdown();
2807
2808@@ -278,8 +278,8 @@ void __show_regs(struct pt_regs *regs)
2809 init_utsname()->release,
2810 (int)strcspn(init_utsname()->version, " "),
2811 init_utsname()->version);
2812- print_symbol("PC is at %s\n", instruction_pointer(regs));
2813- print_symbol("LR is at %s\n", regs->ARM_lr);
2814+ printk("PC is at %pA\n", instruction_pointer(regs));
2815+ printk("LR is at %pA\n", regs->ARM_lr);
2816 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2817 "sp : %08lx ip : %08lx fp : %08lx\n",
2818 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2819@@ -447,12 +447,6 @@ unsigned long get_wchan(struct task_struct *p)
2820 return 0;
2821 }
2822
2823-unsigned long arch_randomize_brk(struct mm_struct *mm)
2824-{
2825- unsigned long range_end = mm->brk + 0x02000000;
2826- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2827-}
2828-
2829 #ifdef CONFIG_MMU
2830 /*
2831 * The vectors page is always readable from user space for the
2832@@ -465,9 +459,8 @@ static int __init gate_vma_init(void)
2833 {
2834 gate_vma.vm_start = 0xffff0000;
2835 gate_vma.vm_end = 0xffff0000 + PAGE_SIZE;
2836- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
2837- gate_vma.vm_flags = VM_READ | VM_EXEC |
2838- VM_MAYREAD | VM_MAYEXEC;
2839+ gate_vma.vm_flags = VM_NONE;
2840+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
2841 return 0;
2842 }
2843 arch_initcall(gate_vma_init);
2844diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
2845index 3653164..d83e55d 100644
2846--- a/arch/arm/kernel/psci.c
2847+++ b/arch/arm/kernel/psci.c
2848@@ -24,7 +24,7 @@
2849 #include <asm/opcodes-virt.h>
2850 #include <asm/psci.h>
2851
2852-struct psci_operations psci_ops;
2853+struct psci_operations psci_ops __read_only;
2854
2855 static int (*invoke_psci_fn)(u32, u32, u32, u32);
2856
2857diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
2858index 03deeff..741ce88 100644
2859--- a/arch/arm/kernel/ptrace.c
2860+++ b/arch/arm/kernel/ptrace.c
2861@@ -937,10 +937,19 @@ static int tracehook_report_syscall(struct pt_regs *regs,
2862 return current_thread_info()->syscall;
2863 }
2864
2865+#ifdef CONFIG_GRKERNSEC_SETXID
2866+extern void gr_delayed_cred_worker(void);
2867+#endif
2868+
2869 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
2870 {
2871 current_thread_info()->syscall = scno;
2872
2873+#ifdef CONFIG_GRKERNSEC_SETXID
2874+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
2875+ gr_delayed_cred_worker();
2876+#endif
2877+
2878 /* Do the secure computing check first; failures should be fast. */
2879 if (secure_computing(scno) == -1)
2880 return -1;
2881diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
2882index 234e339..81264a1 100644
2883--- a/arch/arm/kernel/setup.c
2884+++ b/arch/arm/kernel/setup.c
2885@@ -96,21 +96,23 @@ EXPORT_SYMBOL(system_serial_high);
2886 unsigned int elf_hwcap __read_mostly;
2887 EXPORT_SYMBOL(elf_hwcap);
2888
2889+pteval_t __supported_pte_mask __read_only;
2890+pmdval_t __supported_pmd_mask __read_only;
2891
2892 #ifdef MULTI_CPU
2893-struct processor processor __read_mostly;
2894+struct processor processor;
2895 #endif
2896 #ifdef MULTI_TLB
2897-struct cpu_tlb_fns cpu_tlb __read_mostly;
2898+struct cpu_tlb_fns cpu_tlb __read_only;
2899 #endif
2900 #ifdef MULTI_USER
2901-struct cpu_user_fns cpu_user __read_mostly;
2902+struct cpu_user_fns cpu_user __read_only;
2903 #endif
2904 #ifdef MULTI_CACHE
2905-struct cpu_cache_fns cpu_cache __read_mostly;
2906+struct cpu_cache_fns cpu_cache __read_only;
2907 #endif
2908 #ifdef CONFIG_OUTER_CACHE
2909-struct outer_cache_fns outer_cache __read_mostly;
2910+struct outer_cache_fns outer_cache __read_only;
2911 EXPORT_SYMBOL(outer_cache);
2912 #endif
2913
2914@@ -235,9 +237,13 @@ static int __get_cpu_architecture(void)
2915 asm("mrc p15, 0, %0, c0, c1, 4"
2916 : "=r" (mmfr0));
2917 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
2918- (mmfr0 & 0x000000f0) >= 0x00000030)
2919+ (mmfr0 & 0x000000f0) >= 0x00000030) {
2920 cpu_arch = CPU_ARCH_ARMv7;
2921- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
2922+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
2923+ __supported_pte_mask |= L_PTE_PXN;
2924+ __supported_pmd_mask |= PMD_PXNTABLE;
2925+ }
2926+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
2927 (mmfr0 & 0x000000f0) == 0x00000020)
2928 cpu_arch = CPU_ARCH_ARMv6;
2929 else
2930@@ -478,7 +484,7 @@ static void __init setup_processor(void)
2931 __cpu_architecture = __get_cpu_architecture();
2932
2933 #ifdef MULTI_CPU
2934- processor = *list->proc;
2935+ memcpy((void *)&processor, list->proc, sizeof processor);
2936 #endif
2937 #ifdef MULTI_TLB
2938 cpu_tlb = *list->tlb;
2939diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
2940index 296786b..a8d4dd5 100644
2941--- a/arch/arm/kernel/signal.c
2942+++ b/arch/arm/kernel/signal.c
2943@@ -396,22 +396,14 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
2944 __put_user(sigreturn_codes[idx+1], rc+1))
2945 return 1;
2946
2947- if (cpsr & MODE32_BIT) {
2948- /*
2949- * 32-bit code can use the new high-page
2950- * signal return code support.
2951- */
2952- retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb;
2953- } else {
2954- /*
2955- * Ensure that the instruction cache sees
2956- * the return code written onto the stack.
2957- */
2958- flush_icache_range((unsigned long)rc,
2959- (unsigned long)(rc + 2));
2960+ /*
2961+ * Ensure that the instruction cache sees
2962+ * the return code written onto the stack.
2963+ */
2964+ flush_icache_range((unsigned long)rc,
2965+ (unsigned long)(rc + 2));
2966
2967- retcode = ((unsigned long)rc) + thumb;
2968- }
2969+ retcode = ((unsigned long)rc) + thumb;
2970 }
2971
2972 regs->ARM_r0 = map_sig(ksig->sig);
2973diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
2974index 1f2cccc..f40c02e 100644
2975--- a/arch/arm/kernel/smp.c
2976+++ b/arch/arm/kernel/smp.c
2977@@ -70,7 +70,7 @@ enum ipi_msg_type {
2978
2979 static DECLARE_COMPLETION(cpu_running);
2980
2981-static struct smp_operations smp_ops;
2982+static struct smp_operations smp_ops __read_only;
2983
2984 void __init smp_set_ops(struct smp_operations *ops)
2985 {
2986diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
2987index 1c08911..264f009 100644
2988--- a/arch/arm/kernel/traps.c
2989+++ b/arch/arm/kernel/traps.c
2990@@ -57,7 +57,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
2991 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
2992 {
2993 #ifdef CONFIG_KALLSYMS
2994- printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
2995+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
2996 #else
2997 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
2998 #endif
2999@@ -266,6 +266,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3000 static int die_owner = -1;
3001 static unsigned int die_nest_count;
3002
3003+extern void gr_handle_kernel_exploit(void);
3004+
3005 static unsigned long oops_begin(void)
3006 {
3007 int cpu;
3008@@ -308,6 +310,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3009 panic("Fatal exception in interrupt");
3010 if (panic_on_oops)
3011 panic("Fatal exception");
3012+
3013+ gr_handle_kernel_exploit();
3014+
3015 if (signr)
3016 do_exit(signr);
3017 }
3018@@ -601,7 +606,9 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
3019 * The user helper at 0xffff0fe0 must be used instead.
3020 * (see entry-armv.S for details)
3021 */
3022+ pax_open_kernel();
3023 *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
3024+ pax_close_kernel();
3025 }
3026 return 0;
3027
3028@@ -841,13 +848,10 @@ void __init early_trap_init(void *vectors_base)
3029 */
3030 kuser_get_tls_init(vectors);
3031
3032- /*
3033- * Copy signal return handlers into the vector page, and
3034- * set sigreturn to be a pointer to these.
3035- */
3036- memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
3037- sigreturn_codes, sizeof(sigreturn_codes));
3038-
3039 flush_icache_range(vectors, vectors + PAGE_SIZE);
3040- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3041+
3042+#ifndef CONFIG_PAX_MEMORY_UDEREF
3043+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3044+#endif
3045+
3046 }
3047diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3048index b571484..4b2fc9b 100644
3049--- a/arch/arm/kernel/vmlinux.lds.S
3050+++ b/arch/arm/kernel/vmlinux.lds.S
3051@@ -8,7 +8,11 @@
3052 #include <asm/thread_info.h>
3053 #include <asm/memory.h>
3054 #include <asm/page.h>
3055-
3056+
3057+#ifdef CONFIG_PAX_KERNEXEC
3058+#include <asm/pgtable.h>
3059+#endif
3060+
3061 #define PROC_INFO \
3062 . = ALIGN(4); \
3063 VMLINUX_SYMBOL(__proc_info_begin) = .; \
3064@@ -94,6 +98,11 @@ SECTIONS
3065 _text = .;
3066 HEAD_TEXT
3067 }
3068+
3069+#ifdef CONFIG_PAX_KERNEXEC
3070+ . = ALIGN(1<<SECTION_SHIFT);
3071+#endif
3072+
3073 .text : { /* Real text segment */
3074 _stext = .; /* Text and read-only data */
3075 __exception_text_start = .;
3076@@ -116,6 +125,8 @@ SECTIONS
3077 ARM_CPU_KEEP(PROC_INFO)
3078 }
3079
3080+ _etext = .; /* End of text section */
3081+
3082 RO_DATA(PAGE_SIZE)
3083
3084 . = ALIGN(4);
3085@@ -146,7 +157,9 @@ SECTIONS
3086
3087 NOTES
3088
3089- _etext = .; /* End of text and rodata section */
3090+#ifdef CONFIG_PAX_KERNEXEC
3091+ . = ALIGN(1<<SECTION_SHIFT);
3092+#endif
3093
3094 #ifndef CONFIG_XIP_KERNEL
3095 . = ALIGN(PAGE_SIZE);
3096@@ -207,6 +220,11 @@ SECTIONS
3097 . = PAGE_OFFSET + TEXT_OFFSET;
3098 #else
3099 __init_end = .;
3100+
3101+#ifdef CONFIG_PAX_KERNEXEC
3102+ . = ALIGN(1<<SECTION_SHIFT);
3103+#endif
3104+
3105 . = ALIGN(THREAD_SIZE);
3106 __data_loc = .;
3107 #endif
3108diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3109index 14a0d98..7771a7d 100644
3110--- a/arch/arm/lib/clear_user.S
3111+++ b/arch/arm/lib/clear_user.S
3112@@ -12,14 +12,14 @@
3113
3114 .text
3115
3116-/* Prototype: int __clear_user(void *addr, size_t sz)
3117+/* Prototype: int ___clear_user(void *addr, size_t sz)
3118 * Purpose : clear some user memory
3119 * Params : addr - user memory address to clear
3120 * : sz - number of bytes to clear
3121 * Returns : number of bytes NOT cleared
3122 */
3123 ENTRY(__clear_user_std)
3124-WEAK(__clear_user)
3125+WEAK(___clear_user)
3126 stmfd sp!, {r1, lr}
3127 mov r2, #0
3128 cmp r1, #4
3129@@ -44,7 +44,7 @@ WEAK(__clear_user)
3130 USER( strnebt r2, [r0])
3131 mov r0, #0
3132 ldmfd sp!, {r1, pc}
3133-ENDPROC(__clear_user)
3134+ENDPROC(___clear_user)
3135 ENDPROC(__clear_user_std)
3136
3137 .pushsection .fixup,"ax"
3138diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3139index 66a477a..bee61d3 100644
3140--- a/arch/arm/lib/copy_from_user.S
3141+++ b/arch/arm/lib/copy_from_user.S
3142@@ -16,7 +16,7 @@
3143 /*
3144 * Prototype:
3145 *
3146- * size_t __copy_from_user(void *to, const void *from, size_t n)
3147+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3148 *
3149 * Purpose:
3150 *
3151@@ -84,11 +84,11 @@
3152
3153 .text
3154
3155-ENTRY(__copy_from_user)
3156+ENTRY(___copy_from_user)
3157
3158 #include "copy_template.S"
3159
3160-ENDPROC(__copy_from_user)
3161+ENDPROC(___copy_from_user)
3162
3163 .pushsection .fixup,"ax"
3164 .align 0
3165diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3166index 6ee2f67..d1cce76 100644
3167--- a/arch/arm/lib/copy_page.S
3168+++ b/arch/arm/lib/copy_page.S
3169@@ -10,6 +10,7 @@
3170 * ASM optimised string functions
3171 */
3172 #include <linux/linkage.h>
3173+#include <linux/const.h>
3174 #include <asm/assembler.h>
3175 #include <asm/asm-offsets.h>
3176 #include <asm/cache.h>
3177diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3178index d066df6..df28194 100644
3179--- a/arch/arm/lib/copy_to_user.S
3180+++ b/arch/arm/lib/copy_to_user.S
3181@@ -16,7 +16,7 @@
3182 /*
3183 * Prototype:
3184 *
3185- * size_t __copy_to_user(void *to, const void *from, size_t n)
3186+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3187 *
3188 * Purpose:
3189 *
3190@@ -88,11 +88,11 @@
3191 .text
3192
3193 ENTRY(__copy_to_user_std)
3194-WEAK(__copy_to_user)
3195+WEAK(___copy_to_user)
3196
3197 #include "copy_template.S"
3198
3199-ENDPROC(__copy_to_user)
3200+ENDPROC(___copy_to_user)
3201 ENDPROC(__copy_to_user_std)
3202
3203 .pushsection .fixup,"ax"
3204diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3205index 7d08b43..f7ca7ea 100644
3206--- a/arch/arm/lib/csumpartialcopyuser.S
3207+++ b/arch/arm/lib/csumpartialcopyuser.S
3208@@ -57,8 +57,8 @@
3209 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3210 */
3211
3212-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3213-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3214+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3215+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3216
3217 #include "csumpartialcopygeneric.S"
3218
3219diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3220index 64dbfa5..84a3fd9 100644
3221--- a/arch/arm/lib/delay.c
3222+++ b/arch/arm/lib/delay.c
3223@@ -28,7 +28,7 @@
3224 /*
3225 * Default to the loop-based delay implementation.
3226 */
3227-struct arm_delay_ops arm_delay_ops = {
3228+struct arm_delay_ops arm_delay_ops __read_only = {
3229 .delay = __loop_delay,
3230 .const_udelay = __loop_const_udelay,
3231 .udelay = __loop_udelay,
3232diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3233index 025f742..8432b08 100644
3234--- a/arch/arm/lib/uaccess_with_memcpy.c
3235+++ b/arch/arm/lib/uaccess_with_memcpy.c
3236@@ -104,7 +104,7 @@ out:
3237 }
3238
3239 unsigned long
3240-__copy_to_user(void __user *to, const void *from, unsigned long n)
3241+___copy_to_user(void __user *to, const void *from, unsigned long n)
3242 {
3243 /*
3244 * This test is stubbed out of the main function above to keep
3245diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
3246index 49792a0..f192052 100644
3247--- a/arch/arm/mach-kirkwood/common.c
3248+++ b/arch/arm/mach-kirkwood/common.c
3249@@ -150,7 +150,16 @@ static void clk_gate_fn_disable(struct clk_hw *hw)
3250 clk_gate_ops.disable(hw);
3251 }
3252
3253-static struct clk_ops clk_gate_fn_ops;
3254+static int clk_gate_fn_is_enabled(struct clk_hw *hw)
3255+{
3256+ return clk_gate_ops.is_enabled(hw);
3257+}
3258+
3259+static struct clk_ops clk_gate_fn_ops = {
3260+ .enable = clk_gate_fn_enable,
3261+ .disable = clk_gate_fn_disable,
3262+ .is_enabled = clk_gate_fn_is_enabled,
3263+};
3264
3265 static struct clk __init *clk_register_gate_fn(struct device *dev,
3266 const char *name,
3267@@ -184,14 +193,6 @@ static struct clk __init *clk_register_gate_fn(struct device *dev,
3268 gate_fn->fn_en = fn_en;
3269 gate_fn->fn_dis = fn_dis;
3270
3271- /* ops is the gate ops, but with our enable/disable functions */
3272- if (clk_gate_fn_ops.enable != clk_gate_fn_enable ||
3273- clk_gate_fn_ops.disable != clk_gate_fn_disable) {
3274- clk_gate_fn_ops = clk_gate_ops;
3275- clk_gate_fn_ops.enable = clk_gate_fn_enable;
3276- clk_gate_fn_ops.disable = clk_gate_fn_disable;
3277- }
3278-
3279 clk = clk_register(dev, &gate_fn->gate.hw);
3280
3281 if (IS_ERR(clk))
3282diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3283index f6eeb87..cc90868 100644
3284--- a/arch/arm/mach-omap2/board-n8x0.c
3285+++ b/arch/arm/mach-omap2/board-n8x0.c
3286@@ -631,7 +631,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3287 }
3288 #endif
3289
3290-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3291+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3292 .late_init = n8x0_menelaus_late_init,
3293 };
3294
3295diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
3296index 410e1ba..1d2dd59 100644
3297--- a/arch/arm/mach-omap2/gpmc.c
3298+++ b/arch/arm/mach-omap2/gpmc.c
3299@@ -145,7 +145,6 @@ struct omap3_gpmc_regs {
3300 };
3301
3302 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
3303-static struct irq_chip gpmc_irq_chip;
3304 static unsigned gpmc_irq_start;
3305
3306 static struct resource gpmc_mem_root;
3307@@ -707,6 +706,18 @@ static void gpmc_irq_noop(struct irq_data *data) { }
3308
3309 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
3310
3311+static struct irq_chip gpmc_irq_chip = {
3312+ .name = "gpmc",
3313+ .irq_startup = gpmc_irq_noop_ret,
3314+ .irq_enable = gpmc_irq_enable,
3315+ .irq_disable = gpmc_irq_disable,
3316+ .irq_shutdown = gpmc_irq_noop,
3317+ .irq_ack = gpmc_irq_noop,
3318+ .irq_mask = gpmc_irq_noop,
3319+ .irq_unmask = gpmc_irq_noop,
3320+
3321+};
3322+
3323 static int gpmc_setup_irq(void)
3324 {
3325 int i;
3326@@ -721,15 +732,6 @@ static int gpmc_setup_irq(void)
3327 return gpmc_irq_start;
3328 }
3329
3330- gpmc_irq_chip.name = "gpmc";
3331- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
3332- gpmc_irq_chip.irq_enable = gpmc_irq_enable;
3333- gpmc_irq_chip.irq_disable = gpmc_irq_disable;
3334- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
3335- gpmc_irq_chip.irq_ack = gpmc_irq_noop;
3336- gpmc_irq_chip.irq_mask = gpmc_irq_noop;
3337- gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
3338-
3339 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
3340 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
3341
3342diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3343index f8bb3b9..831e7b8 100644
3344--- a/arch/arm/mach-omap2/omap-wakeupgen.c
3345+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3346@@ -339,7 +339,7 @@ static int __cpuinit irq_cpu_hotplug_notify(struct notifier_block *self,
3347 return NOTIFY_OK;
3348 }
3349
3350-static struct notifier_block __refdata irq_hotplug_notifier = {
3351+static struct notifier_block irq_hotplug_notifier = {
3352 .notifier_call = irq_cpu_hotplug_notify,
3353 };
3354
3355diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3356index 381be7a..89b9c7e 100644
3357--- a/arch/arm/mach-omap2/omap_device.c
3358+++ b/arch/arm/mach-omap2/omap_device.c
3359@@ -499,7 +499,7 @@ void omap_device_delete(struct omap_device *od)
3360 struct platform_device __init *omap_device_build(const char *pdev_name,
3361 int pdev_id,
3362 struct omap_hwmod *oh,
3363- void *pdata, int pdata_len)
3364+ const void *pdata, int pdata_len)
3365 {
3366 struct omap_hwmod *ohs[] = { oh };
3367
3368@@ -527,7 +527,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
3369 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
3370 int pdev_id,
3371 struct omap_hwmod **ohs,
3372- int oh_cnt, void *pdata,
3373+ int oh_cnt, const void *pdata,
3374 int pdata_len)
3375 {
3376 int ret = -ENOMEM;
3377diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
3378index 044c31d..2ee0861 100644
3379--- a/arch/arm/mach-omap2/omap_device.h
3380+++ b/arch/arm/mach-omap2/omap_device.h
3381@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
3382 /* Core code interface */
3383
3384 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
3385- struct omap_hwmod *oh, void *pdata,
3386+ struct omap_hwmod *oh, const void *pdata,
3387 int pdata_len);
3388
3389 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
3390 struct omap_hwmod **oh, int oh_cnt,
3391- void *pdata, int pdata_len);
3392+ const void *pdata, int pdata_len);
3393
3394 struct omap_device *omap_device_alloc(struct platform_device *pdev,
3395 struct omap_hwmod **ohs, int oh_cnt);
3396diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3397index 3a750de..4c9b88f 100644
3398--- a/arch/arm/mach-omap2/omap_hwmod.c
3399+++ b/arch/arm/mach-omap2/omap_hwmod.c
3400@@ -191,10 +191,10 @@ struct omap_hwmod_soc_ops {
3401 int (*init_clkdm)(struct omap_hwmod *oh);
3402 void (*update_context_lost)(struct omap_hwmod *oh);
3403 int (*get_context_lost)(struct omap_hwmod *oh);
3404-};
3405+} __no_const;
3406
3407 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3408-static struct omap_hwmod_soc_ops soc_ops;
3409+static struct omap_hwmod_soc_ops soc_ops __read_only;
3410
3411 /* omap_hwmod_list contains all registered struct omap_hwmods */
3412 static LIST_HEAD(omap_hwmod_list);
3413diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3414index d15c7bb..b2d1f0c 100644
3415--- a/arch/arm/mach-omap2/wd_timer.c
3416+++ b/arch/arm/mach-omap2/wd_timer.c
3417@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3418 struct omap_hwmod *oh;
3419 char *oh_name = "wd_timer2";
3420 char *dev_name = "omap_wdt";
3421- struct omap_wd_timer_platform_data pdata;
3422+ static struct omap_wd_timer_platform_data pdata = {
3423+ .read_reset_sources = prm_read_reset_sources
3424+ };
3425
3426 if (!cpu_class_is_omap2() || of_have_populated_dt())
3427 return 0;
3428@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3429 return -EINVAL;
3430 }
3431
3432- pdata.read_reset_sources = prm_read_reset_sources;
3433-
3434 pdev = omap_device_build(dev_name, id, oh, &pdata,
3435 sizeof(struct omap_wd_timer_platform_data));
3436 WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
3437diff --git a/arch/arm/mach-ux500/include/mach/setup.h b/arch/arm/mach-ux500/include/mach/setup.h
3438index bddce2b..3eb04e2 100644
3439--- a/arch/arm/mach-ux500/include/mach/setup.h
3440+++ b/arch/arm/mach-ux500/include/mach/setup.h
3441@@ -37,13 +37,6 @@ extern void ux500_timer_init(void);
3442 .type = MT_DEVICE, \
3443 }
3444
3445-#define __MEM_DEV_DESC(x, sz) { \
3446- .virtual = IO_ADDRESS(x), \
3447- .pfn = __phys_to_pfn(x), \
3448- .length = sz, \
3449- .type = MT_MEMORY, \
3450-}
3451-
3452 extern struct smp_operations ux500_smp_ops;
3453 extern void ux500_cpu_die(unsigned int cpu);
3454
3455diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3456index 4045c49..4e26c79 100644
3457--- a/arch/arm/mm/Kconfig
3458+++ b/arch/arm/mm/Kconfig
3459@@ -425,7 +425,7 @@ config CPU_32v5
3460
3461 config CPU_32v6
3462 bool
3463- select CPU_USE_DOMAINS if CPU_V6 && MMU
3464+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC
3465 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3466
3467 config CPU_32v6K
3468@@ -574,6 +574,7 @@ config CPU_CP15_MPU
3469
3470 config CPU_USE_DOMAINS
3471 bool
3472+ depends on !ARM_LPAE && !PAX_KERNEXEC
3473 help
3474 This option enables or disables the use of domain switching
3475 via the set_fs() function.
3476diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
3477index db26e2e..ee44569 100644
3478--- a/arch/arm/mm/alignment.c
3479+++ b/arch/arm/mm/alignment.c
3480@@ -211,10 +211,12 @@ union offset_union {
3481 #define __get16_unaligned_check(ins,val,addr) \
3482 do { \
3483 unsigned int err = 0, v, a = addr; \
3484+ pax_open_userland(); \
3485 __get8_unaligned_check(ins,v,a,err); \
3486 val = v << ((BE) ? 8 : 0); \
3487 __get8_unaligned_check(ins,v,a,err); \
3488 val |= v << ((BE) ? 0 : 8); \
3489+ pax_close_userland(); \
3490 if (err) \
3491 goto fault; \
3492 } while (0)
3493@@ -228,6 +230,7 @@ union offset_union {
3494 #define __get32_unaligned_check(ins,val,addr) \
3495 do { \
3496 unsigned int err = 0, v, a = addr; \
3497+ pax_open_userland(); \
3498 __get8_unaligned_check(ins,v,a,err); \
3499 val = v << ((BE) ? 24 : 0); \
3500 __get8_unaligned_check(ins,v,a,err); \
3501@@ -236,6 +239,7 @@ union offset_union {
3502 val |= v << ((BE) ? 8 : 16); \
3503 __get8_unaligned_check(ins,v,a,err); \
3504 val |= v << ((BE) ? 0 : 24); \
3505+ pax_close_userland(); \
3506 if (err) \
3507 goto fault; \
3508 } while (0)
3509@@ -249,6 +253,7 @@ union offset_union {
3510 #define __put16_unaligned_check(ins,val,addr) \
3511 do { \
3512 unsigned int err = 0, v = val, a = addr; \
3513+ pax_open_userland(); \
3514 __asm__( FIRST_BYTE_16 \
3515 ARM( "1: "ins" %1, [%2], #1\n" ) \
3516 THUMB( "1: "ins" %1, [%2]\n" ) \
3517@@ -268,6 +273,7 @@ union offset_union {
3518 " .popsection\n" \
3519 : "=r" (err), "=&r" (v), "=&r" (a) \
3520 : "0" (err), "1" (v), "2" (a)); \
3521+ pax_close_userland(); \
3522 if (err) \
3523 goto fault; \
3524 } while (0)
3525@@ -281,6 +287,7 @@ union offset_union {
3526 #define __put32_unaligned_check(ins,val,addr) \
3527 do { \
3528 unsigned int err = 0, v = val, a = addr; \
3529+ pax_open_userland(); \
3530 __asm__( FIRST_BYTE_32 \
3531 ARM( "1: "ins" %1, [%2], #1\n" ) \
3532 THUMB( "1: "ins" %1, [%2]\n" ) \
3533@@ -310,6 +317,7 @@ union offset_union {
3534 " .popsection\n" \
3535 : "=r" (err), "=&r" (v), "=&r" (a) \
3536 : "0" (err), "1" (v), "2" (a)); \
3537+ pax_close_userland(); \
3538 if (err) \
3539 goto fault; \
3540 } while (0)
3541diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
3542index 5dbf13f..1a60561 100644
3543--- a/arch/arm/mm/fault.c
3544+++ b/arch/arm/mm/fault.c
3545@@ -25,6 +25,7 @@
3546 #include <asm/system_misc.h>
3547 #include <asm/system_info.h>
3548 #include <asm/tlbflush.h>
3549+#include <asm/sections.h>
3550
3551 #include "fault.h"
3552
3553@@ -138,6 +139,20 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
3554 if (fixup_exception(regs))
3555 return;
3556
3557+#ifdef CONFIG_PAX_KERNEXEC
3558+ if ((fsr & FSR_WRITE) &&
3559+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
3560+ (MODULES_VADDR <= addr && addr < MODULES_END)))
3561+ {
3562+ if (current->signal->curr_ip)
3563+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3564+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3565+ else
3566+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
3567+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3568+ }
3569+#endif
3570+
3571 /*
3572 * No handler, we'll have to terminate things with extreme prejudice.
3573 */
3574@@ -174,6 +189,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
3575 }
3576 #endif
3577
3578+#ifdef CONFIG_PAX_PAGEEXEC
3579+ if (fsr & FSR_LNX_PF) {
3580+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
3581+ do_group_exit(SIGKILL);
3582+ }
3583+#endif
3584+
3585 tsk->thread.address = addr;
3586 tsk->thread.error_code = fsr;
3587 tsk->thread.trap_no = 14;
3588@@ -398,6 +420,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3589 }
3590 #endif /* CONFIG_MMU */
3591
3592+#ifdef CONFIG_PAX_PAGEEXEC
3593+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3594+{
3595+ long i;
3596+
3597+ printk(KERN_ERR "PAX: bytes at PC: ");
3598+ for (i = 0; i < 20; i++) {
3599+ unsigned char c;
3600+ if (get_user(c, (__force unsigned char __user *)pc+i))
3601+ printk(KERN_CONT "?? ");
3602+ else
3603+ printk(KERN_CONT "%02x ", c);
3604+ }
3605+ printk("\n");
3606+
3607+ printk(KERN_ERR "PAX: bytes at SP-4: ");
3608+ for (i = -1; i < 20; i++) {
3609+ unsigned long c;
3610+ if (get_user(c, (__force unsigned long __user *)sp+i))
3611+ printk(KERN_CONT "???????? ");
3612+ else
3613+ printk(KERN_CONT "%08lx ", c);
3614+ }
3615+ printk("\n");
3616+}
3617+#endif
3618+
3619 /*
3620 * First Level Translation Fault Handler
3621 *
3622@@ -543,9 +592,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3623 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
3624 struct siginfo info;
3625
3626+#ifdef CONFIG_PAX_MEMORY_UDEREF
3627+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
3628+ if (current->signal->curr_ip)
3629+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3630+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3631+ else
3632+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3633+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3634+ goto die;
3635+ }
3636+#endif
3637+
3638 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
3639 return;
3640
3641+die:
3642 printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
3643 inf->name, fsr, addr);
3644
3645@@ -575,9 +637,49 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
3646 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
3647 struct siginfo info;
3648
3649+ if (user_mode(regs)) {
3650+ if (addr == 0xffff0fe0UL) {
3651+ /*
3652+ * PaX: __kuser_get_tls emulation
3653+ */
3654+ regs->ARM_r0 = current_thread_info()->tp_value;
3655+ regs->ARM_pc = regs->ARM_lr;
3656+ return;
3657+ }
3658+ }
3659+
3660+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3661+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
3662+ if (current->signal->curr_ip)
3663+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3664+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
3665+ addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr);
3666+ else
3667+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
3668+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
3669+ addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr);
3670+ goto die;
3671+ }
3672+#endif
3673+
3674+#ifdef CONFIG_PAX_REFCOUNT
3675+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
3676+ unsigned int bkpt;
3677+
3678+ if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
3679+ current->thread.error_code = ifsr;
3680+ current->thread.trap_no = 0;
3681+ pax_report_refcount_overflow(regs);
3682+ fixup_exception(regs);
3683+ return;
3684+ }
3685+ }
3686+#endif
3687+
3688 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
3689 return;
3690
3691+die:
3692 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
3693 inf->name, ifsr, addr);
3694
3695diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
3696index cf08bdf..772656c 100644
3697--- a/arch/arm/mm/fault.h
3698+++ b/arch/arm/mm/fault.h
3699@@ -3,6 +3,7 @@
3700
3701 /*
3702 * Fault status register encodings. We steal bit 31 for our own purposes.
3703+ * Set when the FSR value is from an instruction fault.
3704 */
3705 #define FSR_LNX_PF (1 << 31)
3706 #define FSR_WRITE (1 << 11)
3707@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
3708 }
3709 #endif
3710
3711+/* valid for LPAE and !LPAE */
3712+static inline int is_xn_fault(unsigned int fsr)
3713+{
3714+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
3715+}
3716+
3717+static inline int is_domain_fault(unsigned int fsr)
3718+{
3719+ return ((fsr_fs(fsr) & 0xD) == 0x9);
3720+}
3721+
3722 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
3723 unsigned long search_exception_table(unsigned long addr);
3724
3725diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
3726index ad722f1..763fdd3 100644
3727--- a/arch/arm/mm/init.c
3728+++ b/arch/arm/mm/init.c
3729@@ -30,6 +30,8 @@
3730 #include <asm/setup.h>
3731 #include <asm/tlb.h>
3732 #include <asm/fixmap.h>
3733+#include <asm/system_info.h>
3734+#include <asm/cp15.h>
3735
3736 #include <asm/mach/arch.h>
3737 #include <asm/mach/map.h>
3738@@ -736,7 +738,46 @@ void free_initmem(void)
3739 {
3740 #ifdef CONFIG_HAVE_TCM
3741 extern char __tcm_start, __tcm_end;
3742+#endif
3743
3744+#ifdef CONFIG_PAX_KERNEXEC
3745+ unsigned long addr;
3746+ pgd_t *pgd;
3747+ pud_t *pud;
3748+ pmd_t *pmd;
3749+ int cpu_arch = cpu_architecture();
3750+ unsigned int cr = get_cr();
3751+
3752+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
3753+ /* make pages tables, etc before .text NX */
3754+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
3755+ pgd = pgd_offset_k(addr);
3756+ pud = pud_offset(pgd, addr);
3757+ pmd = pmd_offset(pud, addr);
3758+ __section_update(pmd, addr, PMD_SECT_XN);
3759+ }
3760+ /* make init NX */
3761+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
3762+ pgd = pgd_offset_k(addr);
3763+ pud = pud_offset(pgd, addr);
3764+ pmd = pmd_offset(pud, addr);
3765+ __section_update(pmd, addr, PMD_SECT_XN);
3766+ }
3767+ /* make kernel code/rodata RX */
3768+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
3769+ pgd = pgd_offset_k(addr);
3770+ pud = pud_offset(pgd, addr);
3771+ pmd = pmd_offset(pud, addr);
3772+#ifdef CONFIG_ARM_LPAE
3773+ __section_update(pmd, addr, PMD_SECT_RDONLY);
3774+#else
3775+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
3776+#endif
3777+ }
3778+ }
3779+#endif
3780+
3781+#ifdef CONFIG_HAVE_TCM
3782 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
3783 totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
3784 __phys_to_pfn(__pa(&__tcm_end)),
3785diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
3786index 04d9006..c547d85 100644
3787--- a/arch/arm/mm/ioremap.c
3788+++ b/arch/arm/mm/ioremap.c
3789@@ -392,9 +392,9 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
3790 unsigned int mtype;
3791
3792 if (cached)
3793- mtype = MT_MEMORY;
3794+ mtype = MT_MEMORY_RX;
3795 else
3796- mtype = MT_MEMORY_NONCACHED;
3797+ mtype = MT_MEMORY_NONCACHED_RX;
3798
3799 return __arm_ioremap_caller(phys_addr, size, mtype,
3800 __builtin_return_address(0));
3801diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
3802index 10062ce..cd34fb9 100644
3803--- a/arch/arm/mm/mmap.c
3804+++ b/arch/arm/mm/mmap.c
3805@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
3806 struct vm_area_struct *vma;
3807 int do_align = 0;
3808 int aliasing = cache_is_vipt_aliasing();
3809+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
3810 struct vm_unmapped_area_info info;
3811
3812 /*
3813@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
3814 if (len > TASK_SIZE)
3815 return -ENOMEM;
3816
3817+#ifdef CONFIG_PAX_RANDMMAP
3818+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3819+#endif
3820+
3821 if (addr) {
3822 if (do_align)
3823 addr = COLOUR_ALIGN(addr, pgoff);
3824@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
3825 addr = PAGE_ALIGN(addr);
3826
3827 vma = find_vma(mm, addr);
3828- if (TASK_SIZE - len >= addr &&
3829- (!vma || addr + len <= vma->vm_start))
3830+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
3831 return addr;
3832 }
3833
3834@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
3835 info.high_limit = TASK_SIZE;
3836 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
3837 info.align_offset = pgoff << PAGE_SHIFT;
3838+ info.threadstack_offset = offset;
3839 return vm_unmapped_area(&info);
3840 }
3841
3842@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3843 unsigned long addr = addr0;
3844 int do_align = 0;
3845 int aliasing = cache_is_vipt_aliasing();
3846+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
3847 struct vm_unmapped_area_info info;
3848
3849 /*
3850@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3851 return addr;
3852 }
3853
3854+#ifdef CONFIG_PAX_RANDMMAP
3855+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3856+#endif
3857+
3858 /* requesting a specific address */
3859 if (addr) {
3860 if (do_align)
3861@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3862 else
3863 addr = PAGE_ALIGN(addr);
3864 vma = find_vma(mm, addr);
3865- if (TASK_SIZE - len >= addr &&
3866- (!vma || addr + len <= vma->vm_start))
3867+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
3868 return addr;
3869 }
3870
3871@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3872 info.high_limit = mm->mmap_base;
3873 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
3874 info.align_offset = pgoff << PAGE_SHIFT;
3875+ info.threadstack_offset = offset;
3876 addr = vm_unmapped_area(&info);
3877
3878 /*
3879@@ -162,6 +172,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3880 VM_BUG_ON(addr != -ENOMEM);
3881 info.flags = 0;
3882 info.low_limit = mm->mmap_base;
3883+
3884+#ifdef CONFIG_PAX_RANDMMAP
3885+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3886+ info.low_limit += mm->delta_mmap;
3887+#endif
3888+
3889 info.high_limit = TASK_SIZE;
3890 addr = vm_unmapped_area(&info);
3891 }
3892@@ -173,6 +189,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3893 {
3894 unsigned long random_factor = 0UL;
3895
3896+#ifdef CONFIG_PAX_RANDMMAP
3897+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3898+#endif
3899+
3900 /* 8 bits of randomness in 20 address space bits */
3901 if ((current->flags & PF_RANDOMIZE) &&
3902 !(current->personality & ADDR_NO_RANDOMIZE))
3903@@ -180,10 +200,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3904
3905 if (mmap_is_legacy()) {
3906 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
3907+
3908+#ifdef CONFIG_PAX_RANDMMAP
3909+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3910+ mm->mmap_base += mm->delta_mmap;
3911+#endif
3912+
3913 mm->get_unmapped_area = arch_get_unmapped_area;
3914 mm->unmap_area = arch_unmap_area;
3915 } else {
3916 mm->mmap_base = mmap_base(random_factor);
3917+
3918+#ifdef CONFIG_PAX_RANDMMAP
3919+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3920+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3921+#endif
3922+
3923 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3924 mm->unmap_area = arch_unmap_area_topdown;
3925 }
3926diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
3927index a84ff76..f221c1d 100644
3928--- a/arch/arm/mm/mmu.c
3929+++ b/arch/arm/mm/mmu.c
3930@@ -36,6 +36,22 @@
3931 #include "mm.h"
3932 #include "tcm.h"
3933
3934+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3935+void modify_domain(unsigned int dom, unsigned int type)
3936+{
3937+ struct thread_info *thread = current_thread_info();
3938+ unsigned int domain = thread->cpu_domain;
3939+ /*
3940+ * DOMAIN_MANAGER might be defined to some other value,
3941+ * use the arch-defined constant
3942+ */
3943+ domain &= ~domain_val(dom, 3);
3944+ thread->cpu_domain = domain | domain_val(dom, type);
3945+ set_domain(thread->cpu_domain);
3946+}
3947+EXPORT_SYMBOL(modify_domain);
3948+#endif
3949+
3950 /*
3951 * empty_zero_page is a special page that is used for
3952 * zero-initialized data and COW.
3953@@ -211,10 +227,18 @@ void adjust_cr(unsigned long mask, unsigned long set)
3954 }
3955 #endif
3956
3957-#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
3958+#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY
3959 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
3960
3961-static struct mem_type mem_types[] = {
3962+#ifdef CONFIG_PAX_KERNEXEC
3963+#define L_PTE_KERNEXEC L_PTE_RDONLY
3964+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
3965+#else
3966+#define L_PTE_KERNEXEC L_PTE_DIRTY
3967+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
3968+#endif
3969+
3970+static struct mem_type mem_types[] __read_only = {
3971 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
3972 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
3973 L_PTE_SHARED,
3974@@ -243,16 +267,16 @@ static struct mem_type mem_types[] = {
3975 [MT_UNCACHED] = {
3976 .prot_pte = PROT_PTE_DEVICE,
3977 .prot_l1 = PMD_TYPE_TABLE,
3978- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
3979+ .prot_sect = PROT_SECT_DEVICE,
3980 .domain = DOMAIN_IO,
3981 },
3982 [MT_CACHECLEAN] = {
3983- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
3984+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
3985 .domain = DOMAIN_KERNEL,
3986 },
3987 #ifndef CONFIG_ARM_LPAE
3988 [MT_MINICLEAN] = {
3989- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
3990+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_RDONLY,
3991 .domain = DOMAIN_KERNEL,
3992 },
3993 #endif
3994@@ -260,36 +284,54 @@ static struct mem_type mem_types[] = {
3995 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
3996 L_PTE_RDONLY,
3997 .prot_l1 = PMD_TYPE_TABLE,
3998- .domain = DOMAIN_USER,
3999+ .domain = DOMAIN_VECTORS,
4000 },
4001 [MT_HIGH_VECTORS] = {
4002 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4003- L_PTE_USER | L_PTE_RDONLY,
4004+ L_PTE_RDONLY,
4005 .prot_l1 = PMD_TYPE_TABLE,
4006- .domain = DOMAIN_USER,
4007+ .domain = DOMAIN_VECTORS,
4008 },
4009- [MT_MEMORY] = {
4010+ [MT_MEMORY_RWX] = {
4011 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4012 .prot_l1 = PMD_TYPE_TABLE,
4013 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4014 .domain = DOMAIN_KERNEL,
4015 },
4016+ [MT_MEMORY_RW] = {
4017+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4018+ .prot_l1 = PMD_TYPE_TABLE,
4019+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4020+ .domain = DOMAIN_KERNEL,
4021+ },
4022+ [MT_MEMORY_RX] = {
4023+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4024+ .prot_l1 = PMD_TYPE_TABLE,
4025+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4026+ .domain = DOMAIN_KERNEL,
4027+ },
4028 [MT_ROM] = {
4029- .prot_sect = PMD_TYPE_SECT,
4030+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4031 .domain = DOMAIN_KERNEL,
4032 },
4033- [MT_MEMORY_NONCACHED] = {
4034+ [MT_MEMORY_NONCACHED_RW] = {
4035 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4036 L_PTE_MT_BUFFERABLE,
4037 .prot_l1 = PMD_TYPE_TABLE,
4038 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4039 .domain = DOMAIN_KERNEL,
4040 },
4041+ [MT_MEMORY_NONCACHED_RX] = {
4042+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4043+ L_PTE_MT_BUFFERABLE,
4044+ .prot_l1 = PMD_TYPE_TABLE,
4045+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4046+ .domain = DOMAIN_KERNEL,
4047+ },
4048 [MT_MEMORY_DTCM] = {
4049- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4050- L_PTE_XN,
4051+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4052 .prot_l1 = PMD_TYPE_TABLE,
4053- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4054+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4055 .domain = DOMAIN_KERNEL,
4056 },
4057 [MT_MEMORY_ITCM] = {
4058@@ -299,10 +341,10 @@ static struct mem_type mem_types[] = {
4059 },
4060 [MT_MEMORY_SO] = {
4061 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4062- L_PTE_MT_UNCACHED | L_PTE_XN,
4063+ L_PTE_MT_UNCACHED,
4064 .prot_l1 = PMD_TYPE_TABLE,
4065 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
4066- PMD_SECT_UNCACHED | PMD_SECT_XN,
4067+ PMD_SECT_UNCACHED,
4068 .domain = DOMAIN_KERNEL,
4069 },
4070 [MT_MEMORY_DMA_READY] = {
4071@@ -388,9 +430,35 @@ static void __init build_mem_type_table(void)
4072 * to prevent speculative instruction fetches.
4073 */
4074 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
4075+ mem_types[MT_DEVICE].prot_pte |= L_PTE_XN;
4076 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
4077+ mem_types[MT_DEVICE_NONSHARED].prot_pte |= L_PTE_XN;
4078 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
4079+ mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_XN;
4080 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
4081+ mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_XN;
4082+
4083+ /* Mark other regions on ARMv6+ as execute-never */
4084+
4085+#ifdef CONFIG_PAX_KERNEXEC
4086+ mem_types[MT_UNCACHED].prot_sect |= PMD_SECT_XN;
4087+ mem_types[MT_UNCACHED].prot_pte |= L_PTE_XN;
4088+ mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_XN;
4089+ mem_types[MT_CACHECLEAN].prot_pte |= L_PTE_XN;
4090+#ifndef CONFIG_ARM_LPAE
4091+ mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_XN;
4092+ mem_types[MT_MINICLEAN].prot_pte |= L_PTE_XN;
4093+#endif
4094+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;
4095+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_XN;
4096+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_XN;
4097+ mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= PMD_SECT_XN;
4098+ mem_types[MT_MEMORY_DTCM].prot_sect |= PMD_SECT_XN;
4099+ mem_types[MT_MEMORY_DTCM].prot_pte |= L_PTE_XN;
4100+#endif
4101+
4102+ mem_types[MT_MEMORY_SO].prot_sect |= PMD_SECT_XN;
4103+ mem_types[MT_MEMORY_SO].prot_pte |= L_PTE_XN;
4104 }
4105 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4106 /*
4107@@ -451,6 +519,9 @@ static void __init build_mem_type_table(void)
4108 * from SVC mode and no access from userspace.
4109 */
4110 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4111+#ifdef CONFIG_PAX_KERNEXEC
4112+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4113+#endif
4114 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4115 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4116 #endif
4117@@ -468,11 +539,17 @@ static void __init build_mem_type_table(void)
4118 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4119 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4120 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4121- mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
4122- mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
4123+ mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4124+ mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4125+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
4126+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
4127+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
4128+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
4129 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
4130- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
4131- mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
4132+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_S;
4133+ mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= L_PTE_SHARED;
4134+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_S;
4135+ mem_types[MT_MEMORY_NONCACHED_RX].prot_pte |= L_PTE_SHARED;
4136 }
4137 }
4138
4139@@ -483,15 +560,20 @@ static void __init build_mem_type_table(void)
4140 if (cpu_arch >= CPU_ARCH_ARMv6) {
4141 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4142 /* Non-cacheable Normal is XCB = 001 */
4143- mem_types[MT_MEMORY_NONCACHED].prot_sect |=
4144+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
4145+ PMD_SECT_BUFFERED;
4146+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
4147 PMD_SECT_BUFFERED;
4148 } else {
4149 /* For both ARMv6 and non-TEX-remapping ARMv7 */
4150- mem_types[MT_MEMORY_NONCACHED].prot_sect |=
4151+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
4152+ PMD_SECT_TEX(1);
4153+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
4154 PMD_SECT_TEX(1);
4155 }
4156 } else {
4157- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4158+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_BUFFERABLE;
4159+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_BUFFERABLE;
4160 }
4161
4162 #ifdef CONFIG_ARM_LPAE
4163@@ -507,6 +589,8 @@ static void __init build_mem_type_table(void)
4164 vecs_pgprot |= PTE_EXT_AF;
4165 #endif
4166
4167+ user_pgprot |= __supported_pte_mask;
4168+
4169 for (i = 0; i < 16; i++) {
4170 pteval_t v = pgprot_val(protection_map[i]);
4171 protection_map[i] = __pgprot(v | user_pgprot);
4172@@ -524,10 +608,15 @@ static void __init build_mem_type_table(void)
4173
4174 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4175 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4176- mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
4177- mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
4178+ mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4179+ mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4180+ mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4181+ mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4182+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4183+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4184 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4185- mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
4186+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= ecc_mask;
4187+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= ecc_mask;
4188 mem_types[MT_ROM].prot_sect |= cp->pmd;
4189
4190 switch (cp->pmd) {
4191@@ -1147,18 +1236,15 @@ void __init arm_mm_memblock_reserve(void)
4192 * called function. This means you can't use any function or debugging
4193 * method which may touch any device, otherwise the kernel _will_ crash.
4194 */
4195+
4196+static char vectors[PAGE_SIZE] __read_only __aligned(PAGE_SIZE);
4197+
4198 static void __init devicemaps_init(struct machine_desc *mdesc)
4199 {
4200 struct map_desc map;
4201 unsigned long addr;
4202- void *vectors;
4203
4204- /*
4205- * Allocate the vector page early.
4206- */
4207- vectors = early_alloc(PAGE_SIZE);
4208-
4209- early_trap_init(vectors);
4210+ early_trap_init(&vectors);
4211
4212 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4213 pmd_clear(pmd_off_k(addr));
4214@@ -1198,7 +1284,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
4215 * location (0xffff0000). If we aren't using high-vectors, also
4216 * create a mapping at the low-vectors virtual address.
4217 */
4218- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4219+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4220 map.virtual = 0xffff0000;
4221 map.length = PAGE_SIZE;
4222 map.type = MT_HIGH_VECTORS;
4223@@ -1256,8 +1342,39 @@ static void __init map_lowmem(void)
4224 map.pfn = __phys_to_pfn(start);
4225 map.virtual = __phys_to_virt(start);
4226 map.length = end - start;
4227- map.type = MT_MEMORY;
4228
4229+#ifdef CONFIG_PAX_KERNEXEC
4230+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4231+ struct map_desc kernel;
4232+ struct map_desc initmap;
4233+
4234+ /* when freeing initmem we will make this RW */
4235+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4236+ initmap.virtual = (unsigned long)__init_begin;
4237+ initmap.length = _sdata - __init_begin;
4238+ initmap.type = MT_MEMORY_RWX;
4239+ create_mapping(&initmap);
4240+
4241+ /* when freeing initmem we will make this RX */
4242+ kernel.pfn = __phys_to_pfn(__pa(_stext));
4243+ kernel.virtual = (unsigned long)_stext;
4244+ kernel.length = __init_begin - _stext;
4245+ kernel.type = MT_MEMORY_RWX;
4246+ create_mapping(&kernel);
4247+
4248+ if (map.virtual < (unsigned long)_stext) {
4249+ map.length = (unsigned long)_stext - map.virtual;
4250+ map.type = MT_MEMORY_RWX;
4251+ create_mapping(&map);
4252+ }
4253+
4254+ map.pfn = __phys_to_pfn(__pa(_sdata));
4255+ map.virtual = (unsigned long)_sdata;
4256+ map.length = end - __pa(_sdata);
4257+ }
4258+#endif
4259+
4260+ map.type = MT_MEMORY_RW;
4261 create_mapping(&map);
4262 }
4263 }
4264diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
4265index 78f520b..31f0cb6 100644
4266--- a/arch/arm/mm/proc-v7-2level.S
4267+++ b/arch/arm/mm/proc-v7-2level.S
4268@@ -99,6 +99,9 @@ ENTRY(cpu_v7_set_pte_ext)
4269 tst r1, #L_PTE_XN
4270 orrne r3, r3, #PTE_EXT_XN
4271
4272+ tst r1, #L_PTE_PXN
4273+ orrne r3, r3, #PTE_EXT_PXN
4274+
4275 tst r1, #L_PTE_YOUNG
4276 tstne r1, #L_PTE_VALID
4277 #ifndef CONFIG_CPU_USE_DOMAINS
4278diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4279index a5bc92d..0bb4730 100644
4280--- a/arch/arm/plat-omap/sram.c
4281+++ b/arch/arm/plat-omap/sram.c
4282@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4283 * Looks like we need to preserve some bootloader code at the
4284 * beginning of SRAM for jumping to flash for reboot to work...
4285 */
4286+ pax_open_kernel();
4287 memset_io(omap_sram_base + omap_sram_skip, 0,
4288 omap_sram_size - omap_sram_skip);
4289+ pax_close_kernel();
4290 }
4291diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
4292index 1141782..0959d64 100644
4293--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
4294+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
4295@@ -48,7 +48,7 @@ struct samsung_dma_ops {
4296 int (*started)(unsigned ch);
4297 int (*flush)(unsigned ch);
4298 int (*stop)(unsigned ch);
4299-};
4300+} __no_const;
4301
4302 extern void *samsung_dmadev_get_ops(void);
4303 extern void *s3c_dma_get_ops(void);
4304diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
4305index f4726dc..39ed646 100644
4306--- a/arch/arm64/kernel/debug-monitors.c
4307+++ b/arch/arm64/kernel/debug-monitors.c
4308@@ -149,7 +149,7 @@ static int __cpuinit os_lock_notify(struct notifier_block *self,
4309 return NOTIFY_OK;
4310 }
4311
4312-static struct notifier_block __cpuinitdata os_lock_nb = {
4313+static struct notifier_block os_lock_nb = {
4314 .notifier_call = os_lock_notify,
4315 };
4316
4317diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
4318index 5ab825c..96aaec8 100644
4319--- a/arch/arm64/kernel/hw_breakpoint.c
4320+++ b/arch/arm64/kernel/hw_breakpoint.c
4321@@ -831,7 +831,7 @@ static int __cpuinit hw_breakpoint_reset_notify(struct notifier_block *self,
4322 return NOTIFY_OK;
4323 }
4324
4325-static struct notifier_block __cpuinitdata hw_breakpoint_reset_nb = {
4326+static struct notifier_block hw_breakpoint_reset_nb = {
4327 .notifier_call = hw_breakpoint_reset_notify,
4328 };
4329
4330diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4331index c3a58a1..78fbf54 100644
4332--- a/arch/avr32/include/asm/cache.h
4333+++ b/arch/avr32/include/asm/cache.h
4334@@ -1,8 +1,10 @@
4335 #ifndef __ASM_AVR32_CACHE_H
4336 #define __ASM_AVR32_CACHE_H
4337
4338+#include <linux/const.h>
4339+
4340 #define L1_CACHE_SHIFT 5
4341-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4342+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4343
4344 /*
4345 * Memory returned by kmalloc() may be used for DMA, so we must make
4346diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
4347index d232888..87c8df1 100644
4348--- a/arch/avr32/include/asm/elf.h
4349+++ b/arch/avr32/include/asm/elf.h
4350@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
4351 the loader. We need to make sure that it is out of the way of the program
4352 that it will "exec", and that there is sufficient room for the brk. */
4353
4354-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
4355+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
4356
4357+#ifdef CONFIG_PAX_ASLR
4358+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
4359+
4360+#define PAX_DELTA_MMAP_LEN 15
4361+#define PAX_DELTA_STACK_LEN 15
4362+#endif
4363
4364 /* This yields a mask that user programs can use to figure out what
4365 instruction set this CPU supports. This could be done in user space,
4366diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
4367index 479330b..53717a8 100644
4368--- a/arch/avr32/include/asm/kmap_types.h
4369+++ b/arch/avr32/include/asm/kmap_types.h
4370@@ -2,9 +2,9 @@
4371 #define __ASM_AVR32_KMAP_TYPES_H
4372
4373 #ifdef CONFIG_DEBUG_HIGHMEM
4374-# define KM_TYPE_NR 29
4375+# define KM_TYPE_NR 30
4376 #else
4377-# define KM_TYPE_NR 14
4378+# define KM_TYPE_NR 15
4379 #endif
4380
4381 #endif /* __ASM_AVR32_KMAP_TYPES_H */
4382diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
4383index b2f2d2d..d1c85cb 100644
4384--- a/arch/avr32/mm/fault.c
4385+++ b/arch/avr32/mm/fault.c
4386@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
4387
4388 int exception_trace = 1;
4389
4390+#ifdef CONFIG_PAX_PAGEEXEC
4391+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4392+{
4393+ unsigned long i;
4394+
4395+ printk(KERN_ERR "PAX: bytes at PC: ");
4396+ for (i = 0; i < 20; i++) {
4397+ unsigned char c;
4398+ if (get_user(c, (unsigned char *)pc+i))
4399+ printk(KERN_CONT "???????? ");
4400+ else
4401+ printk(KERN_CONT "%02x ", c);
4402+ }
4403+ printk("\n");
4404+}
4405+#endif
4406+
4407 /*
4408 * This routine handles page faults. It determines the address and the
4409 * problem, and then passes it off to one of the appropriate routines.
4410@@ -174,6 +191,16 @@ bad_area:
4411 up_read(&mm->mmap_sem);
4412
4413 if (user_mode(regs)) {
4414+
4415+#ifdef CONFIG_PAX_PAGEEXEC
4416+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4417+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
4418+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
4419+ do_group_exit(SIGKILL);
4420+ }
4421+ }
4422+#endif
4423+
4424 if (exception_trace && printk_ratelimit())
4425 printk("%s%s[%d]: segfault at %08lx pc %08lx "
4426 "sp %08lx ecr %lu\n",
4427diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
4428index 568885a..f8008df 100644
4429--- a/arch/blackfin/include/asm/cache.h
4430+++ b/arch/blackfin/include/asm/cache.h
4431@@ -7,6 +7,7 @@
4432 #ifndef __ARCH_BLACKFIN_CACHE_H
4433 #define __ARCH_BLACKFIN_CACHE_H
4434
4435+#include <linux/const.h>
4436 #include <linux/linkage.h> /* for asmlinkage */
4437
4438 /*
4439@@ -14,7 +15,7 @@
4440 * Blackfin loads 32 bytes for cache
4441 */
4442 #define L1_CACHE_SHIFT 5
4443-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4444+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4445 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4446
4447 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
4448diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
4449index aea2718..3639a60 100644
4450--- a/arch/cris/include/arch-v10/arch/cache.h
4451+++ b/arch/cris/include/arch-v10/arch/cache.h
4452@@ -1,8 +1,9 @@
4453 #ifndef _ASM_ARCH_CACHE_H
4454 #define _ASM_ARCH_CACHE_H
4455
4456+#include <linux/const.h>
4457 /* Etrax 100LX have 32-byte cache-lines. */
4458-#define L1_CACHE_BYTES 32
4459 #define L1_CACHE_SHIFT 5
4460+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4461
4462 #endif /* _ASM_ARCH_CACHE_H */
4463diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
4464index 7caf25d..ee65ac5 100644
4465--- a/arch/cris/include/arch-v32/arch/cache.h
4466+++ b/arch/cris/include/arch-v32/arch/cache.h
4467@@ -1,11 +1,12 @@
4468 #ifndef _ASM_CRIS_ARCH_CACHE_H
4469 #define _ASM_CRIS_ARCH_CACHE_H
4470
4471+#include <linux/const.h>
4472 #include <arch/hwregs/dma.h>
4473
4474 /* A cache-line is 32 bytes. */
4475-#define L1_CACHE_BYTES 32
4476 #define L1_CACHE_SHIFT 5
4477+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4478
4479 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4480
4481diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
4482index b86329d..6709906 100644
4483--- a/arch/frv/include/asm/atomic.h
4484+++ b/arch/frv/include/asm/atomic.h
4485@@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v)
4486 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
4487 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
4488
4489+#define atomic64_read_unchecked(v) atomic64_read(v)
4490+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4491+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4492+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4493+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4494+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4495+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4496+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4497+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4498+
4499 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
4500 {
4501 int c, old;
4502diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
4503index 2797163..c2a401d 100644
4504--- a/arch/frv/include/asm/cache.h
4505+++ b/arch/frv/include/asm/cache.h
4506@@ -12,10 +12,11 @@
4507 #ifndef __ASM_CACHE_H
4508 #define __ASM_CACHE_H
4509
4510+#include <linux/const.h>
4511
4512 /* bytes per L1 cache line */
4513 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
4514-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4515+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4516
4517 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4518 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4519diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
4520index 43901f2..0d8b865 100644
4521--- a/arch/frv/include/asm/kmap_types.h
4522+++ b/arch/frv/include/asm/kmap_types.h
4523@@ -2,6 +2,6 @@
4524 #ifndef _ASM_KMAP_TYPES_H
4525 #define _ASM_KMAP_TYPES_H
4526
4527-#define KM_TYPE_NR 17
4528+#define KM_TYPE_NR 18
4529
4530 #endif
4531diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
4532index 836f147..4cf23f5 100644
4533--- a/arch/frv/mm/elf-fdpic.c
4534+++ b/arch/frv/mm/elf-fdpic.c
4535@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4536 {
4537 struct vm_area_struct *vma;
4538 struct vm_unmapped_area_info info;
4539+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
4540
4541 if (len > TASK_SIZE)
4542 return -ENOMEM;
4543@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4544 if (addr) {
4545 addr = PAGE_ALIGN(addr);
4546 vma = find_vma(current->mm, addr);
4547- if (TASK_SIZE - len >= addr &&
4548- (!vma || addr + len <= vma->vm_start))
4549+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4550 goto success;
4551 }
4552
4553@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4554 info.high_limit = (current->mm->start_stack - 0x00200000);
4555 info.align_mask = 0;
4556 info.align_offset = 0;
4557+ info.threadstack_offset = offset;
4558 addr = vm_unmapped_area(&info);
4559 if (!(addr & ~PAGE_MASK))
4560 goto success;
4561diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
4562index f4ca594..adc72fd6 100644
4563--- a/arch/hexagon/include/asm/cache.h
4564+++ b/arch/hexagon/include/asm/cache.h
4565@@ -21,9 +21,11 @@
4566 #ifndef __ASM_CACHE_H
4567 #define __ASM_CACHE_H
4568
4569+#include <linux/const.h>
4570+
4571 /* Bytes per L1 cache line */
4572-#define L1_CACHE_SHIFT (5)
4573-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4574+#define L1_CACHE_SHIFT 5
4575+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4576
4577 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
4578 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
4579diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
4580index 6e6fe18..a6ae668 100644
4581--- a/arch/ia64/include/asm/atomic.h
4582+++ b/arch/ia64/include/asm/atomic.h
4583@@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
4584 #define atomic64_inc(v) atomic64_add(1, (v))
4585 #define atomic64_dec(v) atomic64_sub(1, (v))
4586
4587+#define atomic64_read_unchecked(v) atomic64_read(v)
4588+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4589+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4590+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4591+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4592+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4593+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4594+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4595+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4596+
4597 /* Atomic operations are already serializing */
4598 #define smp_mb__before_atomic_dec() barrier()
4599 #define smp_mb__after_atomic_dec() barrier()
4600diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
4601index 988254a..e1ee885 100644
4602--- a/arch/ia64/include/asm/cache.h
4603+++ b/arch/ia64/include/asm/cache.h
4604@@ -1,6 +1,7 @@
4605 #ifndef _ASM_IA64_CACHE_H
4606 #define _ASM_IA64_CACHE_H
4607
4608+#include <linux/const.h>
4609
4610 /*
4611 * Copyright (C) 1998-2000 Hewlett-Packard Co
4612@@ -9,7 +10,7 @@
4613
4614 /* Bytes per L1 (data) cache line. */
4615 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
4616-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4617+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4618
4619 #ifdef CONFIG_SMP
4620 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
4621diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
4622index 5a83c5c..4d7f553 100644
4623--- a/arch/ia64/include/asm/elf.h
4624+++ b/arch/ia64/include/asm/elf.h
4625@@ -42,6 +42,13 @@
4626 */
4627 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
4628
4629+#ifdef CONFIG_PAX_ASLR
4630+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
4631+
4632+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4633+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4634+#endif
4635+
4636 #define PT_IA_64_UNWIND 0x70000001
4637
4638 /* IA-64 relocations: */
4639diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
4640index 96a8d92..617a1cf 100644
4641--- a/arch/ia64/include/asm/pgalloc.h
4642+++ b/arch/ia64/include/asm/pgalloc.h
4643@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4644 pgd_val(*pgd_entry) = __pa(pud);
4645 }
4646
4647+static inline void
4648+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4649+{
4650+ pgd_populate(mm, pgd_entry, pud);
4651+}
4652+
4653 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
4654 {
4655 return quicklist_alloc(0, GFP_KERNEL, NULL);
4656@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
4657 pud_val(*pud_entry) = __pa(pmd);
4658 }
4659
4660+static inline void
4661+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
4662+{
4663+ pud_populate(mm, pud_entry, pmd);
4664+}
4665+
4666 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
4667 {
4668 return quicklist_alloc(0, GFP_KERNEL, NULL);
4669diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
4670index 815810c..d60bd4c 100644
4671--- a/arch/ia64/include/asm/pgtable.h
4672+++ b/arch/ia64/include/asm/pgtable.h
4673@@ -12,7 +12,7 @@
4674 * David Mosberger-Tang <davidm@hpl.hp.com>
4675 */
4676
4677-
4678+#include <linux/const.h>
4679 #include <asm/mman.h>
4680 #include <asm/page.h>
4681 #include <asm/processor.h>
4682@@ -142,6 +142,17 @@
4683 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4684 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4685 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
4686+
4687+#ifdef CONFIG_PAX_PAGEEXEC
4688+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
4689+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4690+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4691+#else
4692+# define PAGE_SHARED_NOEXEC PAGE_SHARED
4693+# define PAGE_READONLY_NOEXEC PAGE_READONLY
4694+# define PAGE_COPY_NOEXEC PAGE_COPY
4695+#endif
4696+
4697 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
4698 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
4699 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
4700diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
4701index 54ff557..70c88b7 100644
4702--- a/arch/ia64/include/asm/spinlock.h
4703+++ b/arch/ia64/include/asm/spinlock.h
4704@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
4705 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
4706
4707 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
4708- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
4709+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
4710 }
4711
4712 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
4713diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
4714index 449c8c0..18965fb 100644
4715--- a/arch/ia64/include/asm/uaccess.h
4716+++ b/arch/ia64/include/asm/uaccess.h
4717@@ -240,12 +240,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
4718 static inline unsigned long
4719 __copy_to_user (void __user *to, const void *from, unsigned long count)
4720 {
4721+ if (count > INT_MAX)
4722+ return count;
4723+
4724+ if (!__builtin_constant_p(count))
4725+ check_object_size(from, count, true);
4726+
4727 return __copy_user(to, (__force void __user *) from, count);
4728 }
4729
4730 static inline unsigned long
4731 __copy_from_user (void *to, const void __user *from, unsigned long count)
4732 {
4733+ if (count > INT_MAX)
4734+ return count;
4735+
4736+ if (!__builtin_constant_p(count))
4737+ check_object_size(to, count, false);
4738+
4739 return __copy_user((__force void __user *) to, from, count);
4740 }
4741
4742@@ -255,10 +267,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
4743 ({ \
4744 void __user *__cu_to = (to); \
4745 const void *__cu_from = (from); \
4746- long __cu_len = (n); \
4747+ unsigned long __cu_len = (n); \
4748 \
4749- if (__access_ok(__cu_to, __cu_len, get_fs())) \
4750+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
4751+ if (!__builtin_constant_p(n)) \
4752+ check_object_size(__cu_from, __cu_len, true); \
4753 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
4754+ } \
4755 __cu_len; \
4756 })
4757
4758@@ -266,11 +281,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
4759 ({ \
4760 void *__cu_to = (to); \
4761 const void __user *__cu_from = (from); \
4762- long __cu_len = (n); \
4763+ unsigned long __cu_len = (n); \
4764 \
4765 __chk_user_ptr(__cu_from); \
4766- if (__access_ok(__cu_from, __cu_len, get_fs())) \
4767+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
4768+ if (!__builtin_constant_p(n)) \
4769+ check_object_size(__cu_to, __cu_len, false); \
4770 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
4771+ } \
4772 __cu_len; \
4773 })
4774
4775diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
4776index 2d67317..07d8bfa 100644
4777--- a/arch/ia64/kernel/err_inject.c
4778+++ b/arch/ia64/kernel/err_inject.c
4779@@ -256,7 +256,7 @@ static int __cpuinit err_inject_cpu_callback(struct notifier_block *nfb,
4780 return NOTIFY_OK;
4781 }
4782
4783-static struct notifier_block __cpuinitdata err_inject_cpu_notifier =
4784+static struct notifier_block err_inject_cpu_notifier =
4785 {
4786 .notifier_call = err_inject_cpu_callback,
4787 };
4788diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
4789index d7396db..b33e873 100644
4790--- a/arch/ia64/kernel/mca.c
4791+++ b/arch/ia64/kernel/mca.c
4792@@ -1922,7 +1922,7 @@ static int __cpuinit mca_cpu_callback(struct notifier_block *nfb,
4793 return NOTIFY_OK;
4794 }
4795
4796-static struct notifier_block mca_cpu_notifier __cpuinitdata = {
4797+static struct notifier_block mca_cpu_notifier = {
4798 .notifier_call = mca_cpu_callback
4799 };
4800
4801diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
4802index 24603be..948052d 100644
4803--- a/arch/ia64/kernel/module.c
4804+++ b/arch/ia64/kernel/module.c
4805@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
4806 void
4807 module_free (struct module *mod, void *module_region)
4808 {
4809- if (mod && mod->arch.init_unw_table &&
4810- module_region == mod->module_init) {
4811+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
4812 unw_remove_unwind_table(mod->arch.init_unw_table);
4813 mod->arch.init_unw_table = NULL;
4814 }
4815@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
4816 }
4817
4818 static inline int
4819+in_init_rx (const struct module *mod, uint64_t addr)
4820+{
4821+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
4822+}
4823+
4824+static inline int
4825+in_init_rw (const struct module *mod, uint64_t addr)
4826+{
4827+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
4828+}
4829+
4830+static inline int
4831 in_init (const struct module *mod, uint64_t addr)
4832 {
4833- return addr - (uint64_t) mod->module_init < mod->init_size;
4834+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
4835+}
4836+
4837+static inline int
4838+in_core_rx (const struct module *mod, uint64_t addr)
4839+{
4840+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
4841+}
4842+
4843+static inline int
4844+in_core_rw (const struct module *mod, uint64_t addr)
4845+{
4846+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
4847 }
4848
4849 static inline int
4850 in_core (const struct module *mod, uint64_t addr)
4851 {
4852- return addr - (uint64_t) mod->module_core < mod->core_size;
4853+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
4854 }
4855
4856 static inline int
4857@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
4858 break;
4859
4860 case RV_BDREL:
4861- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
4862+ if (in_init_rx(mod, val))
4863+ val -= (uint64_t) mod->module_init_rx;
4864+ else if (in_init_rw(mod, val))
4865+ val -= (uint64_t) mod->module_init_rw;
4866+ else if (in_core_rx(mod, val))
4867+ val -= (uint64_t) mod->module_core_rx;
4868+ else if (in_core_rw(mod, val))
4869+ val -= (uint64_t) mod->module_core_rw;
4870 break;
4871
4872 case RV_LTV:
4873@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
4874 * addresses have been selected...
4875 */
4876 uint64_t gp;
4877- if (mod->core_size > MAX_LTOFF)
4878+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
4879 /*
4880 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
4881 * at the end of the module.
4882 */
4883- gp = mod->core_size - MAX_LTOFF / 2;
4884+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
4885 else
4886- gp = mod->core_size / 2;
4887- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
4888+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
4889+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
4890 mod->arch.gp = gp;
4891 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
4892 }
4893diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
4894index 79521d5..43dddff 100644
4895--- a/arch/ia64/kernel/palinfo.c
4896+++ b/arch/ia64/kernel/palinfo.c
4897@@ -1006,7 +1006,7 @@ static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
4898 return NOTIFY_OK;
4899 }
4900
4901-static struct notifier_block __refdata palinfo_cpu_notifier =
4902+static struct notifier_block palinfo_cpu_notifier =
4903 {
4904 .notifier_call = palinfo_cpu_callback,
4905 .priority = 0,
4906diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
4907index aa527d7..f237752 100644
4908--- a/arch/ia64/kernel/salinfo.c
4909+++ b/arch/ia64/kernel/salinfo.c
4910@@ -616,7 +616,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu
4911 return NOTIFY_OK;
4912 }
4913
4914-static struct notifier_block salinfo_cpu_notifier __cpuinitdata =
4915+static struct notifier_block salinfo_cpu_notifier =
4916 {
4917 .notifier_call = salinfo_cpu_callback,
4918 .priority = 0,
4919diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
4920index 41e33f8..65180b2 100644
4921--- a/arch/ia64/kernel/sys_ia64.c
4922+++ b/arch/ia64/kernel/sys_ia64.c
4923@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
4924 unsigned long align_mask = 0;
4925 struct mm_struct *mm = current->mm;
4926 struct vm_unmapped_area_info info;
4927+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4928
4929 if (len > RGN_MAP_LIMIT)
4930 return -ENOMEM;
4931@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
4932 if (REGION_NUMBER(addr) == RGN_HPAGE)
4933 addr = 0;
4934 #endif
4935+
4936+#ifdef CONFIG_PAX_RANDMMAP
4937+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4938+ addr = mm->free_area_cache;
4939+ else
4940+#endif
4941+
4942 if (!addr)
4943 addr = TASK_UNMAPPED_BASE;
4944
4945@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
4946 info.high_limit = TASK_SIZE;
4947 info.align_mask = align_mask;
4948 info.align_offset = 0;
4949+ info.threadstack_offset = offset;
4950 return vm_unmapped_area(&info);
4951 }
4952
4953diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
4954index dc00b2c..cce53c2 100644
4955--- a/arch/ia64/kernel/topology.c
4956+++ b/arch/ia64/kernel/topology.c
4957@@ -445,7 +445,7 @@ static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
4958 return NOTIFY_OK;
4959 }
4960
4961-static struct notifier_block __cpuinitdata cache_cpu_notifier =
4962+static struct notifier_block cache_cpu_notifier =
4963 {
4964 .notifier_call = cache_cpu_callback
4965 };
4966diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
4967index 0ccb28f..8992469 100644
4968--- a/arch/ia64/kernel/vmlinux.lds.S
4969+++ b/arch/ia64/kernel/vmlinux.lds.S
4970@@ -198,7 +198,7 @@ SECTIONS {
4971 /* Per-cpu data: */
4972 . = ALIGN(PERCPU_PAGE_SIZE);
4973 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
4974- __phys_per_cpu_start = __per_cpu_load;
4975+ __phys_per_cpu_start = per_cpu_load;
4976 /*
4977 * ensure percpu data fits
4978 * into percpu page size
4979diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
4980index 6cf0341..d352594 100644
4981--- a/arch/ia64/mm/fault.c
4982+++ b/arch/ia64/mm/fault.c
4983@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
4984 return pte_present(pte);
4985 }
4986
4987+#ifdef CONFIG_PAX_PAGEEXEC
4988+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4989+{
4990+ unsigned long i;
4991+
4992+ printk(KERN_ERR "PAX: bytes at PC: ");
4993+ for (i = 0; i < 8; i++) {
4994+ unsigned int c;
4995+ if (get_user(c, (unsigned int *)pc+i))
4996+ printk(KERN_CONT "???????? ");
4997+ else
4998+ printk(KERN_CONT "%08x ", c);
4999+ }
5000+ printk("\n");
5001+}
5002+#endif
5003+
5004 # define VM_READ_BIT 0
5005 # define VM_WRITE_BIT 1
5006 # define VM_EXEC_BIT 2
5007@@ -149,8 +166,21 @@ retry:
5008 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5009 goto bad_area;
5010
5011- if ((vma->vm_flags & mask) != mask)
5012+ if ((vma->vm_flags & mask) != mask) {
5013+
5014+#ifdef CONFIG_PAX_PAGEEXEC
5015+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5016+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5017+ goto bad_area;
5018+
5019+ up_read(&mm->mmap_sem);
5020+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5021+ do_group_exit(SIGKILL);
5022+ }
5023+#endif
5024+
5025 goto bad_area;
5026+ }
5027
5028 /*
5029 * If for any reason at all we couldn't handle the fault, make
5030diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5031index 76069c1..c2aa816 100644
5032--- a/arch/ia64/mm/hugetlbpage.c
5033+++ b/arch/ia64/mm/hugetlbpage.c
5034@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5035 unsigned long pgoff, unsigned long flags)
5036 {
5037 struct vm_unmapped_area_info info;
5038+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5039
5040 if (len > RGN_MAP_LIMIT)
5041 return -ENOMEM;
5042@@ -172,6 +173,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5043 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
5044 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
5045 info.align_offset = 0;
5046+ info.threadstack_offset = offset;
5047 return vm_unmapped_area(&info);
5048 }
5049
5050diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5051index 20bc967..a26993e 100644
5052--- a/arch/ia64/mm/init.c
5053+++ b/arch/ia64/mm/init.c
5054@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
5055 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5056 vma->vm_end = vma->vm_start + PAGE_SIZE;
5057 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5058+
5059+#ifdef CONFIG_PAX_PAGEEXEC
5060+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5061+ vma->vm_flags &= ~VM_EXEC;
5062+
5063+#ifdef CONFIG_PAX_MPROTECT
5064+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
5065+ vma->vm_flags &= ~VM_MAYEXEC;
5066+#endif
5067+
5068+ }
5069+#endif
5070+
5071 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5072 down_write(&current->mm->mmap_sem);
5073 if (insert_vm_struct(current->mm, vma)) {
5074diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
5075index 40b3ee9..8c2c112 100644
5076--- a/arch/m32r/include/asm/cache.h
5077+++ b/arch/m32r/include/asm/cache.h
5078@@ -1,8 +1,10 @@
5079 #ifndef _ASM_M32R_CACHE_H
5080 #define _ASM_M32R_CACHE_H
5081
5082+#include <linux/const.h>
5083+
5084 /* L1 cache line size */
5085 #define L1_CACHE_SHIFT 4
5086-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5087+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5088
5089 #endif /* _ASM_M32R_CACHE_H */
5090diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
5091index 82abd15..d95ae5d 100644
5092--- a/arch/m32r/lib/usercopy.c
5093+++ b/arch/m32r/lib/usercopy.c
5094@@ -14,6 +14,9 @@
5095 unsigned long
5096 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5097 {
5098+ if ((long)n < 0)
5099+ return n;
5100+
5101 prefetch(from);
5102 if (access_ok(VERIFY_WRITE, to, n))
5103 __copy_user(to,from,n);
5104@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5105 unsigned long
5106 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
5107 {
5108+ if ((long)n < 0)
5109+ return n;
5110+
5111 prefetchw(to);
5112 if (access_ok(VERIFY_READ, from, n))
5113 __copy_user_zeroing(to,from,n);
5114diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
5115index 0395c51..5f26031 100644
5116--- a/arch/m68k/include/asm/cache.h
5117+++ b/arch/m68k/include/asm/cache.h
5118@@ -4,9 +4,11 @@
5119 #ifndef __ARCH_M68K_CACHE_H
5120 #define __ARCH_M68K_CACHE_H
5121
5122+#include <linux/const.h>
5123+
5124 /* bytes per L1 cache line */
5125 #define L1_CACHE_SHIFT 4
5126-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
5127+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5128
5129 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5130
5131diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
5132index 3c52fa6..11b2ad8 100644
5133--- a/arch/metag/mm/hugetlbpage.c
5134+++ b/arch/metag/mm/hugetlbpage.c
5135@@ -200,6 +200,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
5136 info.high_limit = TASK_SIZE;
5137 info.align_mask = PAGE_MASK & HUGEPT_MASK;
5138 info.align_offset = 0;
5139+ info.threadstack_offset = 0;
5140 return vm_unmapped_area(&info);
5141 }
5142
5143diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5144index 4efe96a..60e8699 100644
5145--- a/arch/microblaze/include/asm/cache.h
5146+++ b/arch/microblaze/include/asm/cache.h
5147@@ -13,11 +13,12 @@
5148 #ifndef _ASM_MICROBLAZE_CACHE_H
5149 #define _ASM_MICROBLAZE_CACHE_H
5150
5151+#include <linux/const.h>
5152 #include <asm/registers.h>
5153
5154 #define L1_CACHE_SHIFT 5
5155 /* word-granular cache in microblaze */
5156-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5157+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5158
5159 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5160
5161diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
5162index 08b6079..eb272cf 100644
5163--- a/arch/mips/include/asm/atomic.h
5164+++ b/arch/mips/include/asm/atomic.h
5165@@ -21,6 +21,10 @@
5166 #include <asm/cmpxchg.h>
5167 #include <asm/war.h>
5168
5169+#ifdef CONFIG_GENERIC_ATOMIC64
5170+#include <asm-generic/atomic64.h>
5171+#endif
5172+
5173 #define ATOMIC_INIT(i) { (i) }
5174
5175 /*
5176@@ -759,6 +763,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
5177 */
5178 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
5179
5180+#define atomic64_read_unchecked(v) atomic64_read(v)
5181+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5182+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5183+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5184+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5185+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5186+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5187+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5188+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5189+
5190 #endif /* CONFIG_64BIT */
5191
5192 /*
5193diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
5194index b4db69f..8f3b093 100644
5195--- a/arch/mips/include/asm/cache.h
5196+++ b/arch/mips/include/asm/cache.h
5197@@ -9,10 +9,11 @@
5198 #ifndef _ASM_CACHE_H
5199 #define _ASM_CACHE_H
5200
5201+#include <linux/const.h>
5202 #include <kmalloc.h>
5203
5204 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
5205-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5206+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5207
5208 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
5209 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5210diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
5211index cf3ae24..238d22f 100644
5212--- a/arch/mips/include/asm/elf.h
5213+++ b/arch/mips/include/asm/elf.h
5214@@ -372,13 +372,16 @@ extern const char *__elf_platform;
5215 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
5216 #endif
5217
5218+#ifdef CONFIG_PAX_ASLR
5219+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
5220+
5221+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5222+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5223+#endif
5224+
5225 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
5226 struct linux_binprm;
5227 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
5228 int uses_interp);
5229
5230-struct mm_struct;
5231-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
5232-#define arch_randomize_brk arch_randomize_brk
5233-
5234 #endif /* _ASM_ELF_H */
5235diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
5236index c1f6afa..38cc6e9 100644
5237--- a/arch/mips/include/asm/exec.h
5238+++ b/arch/mips/include/asm/exec.h
5239@@ -12,6 +12,6 @@
5240 #ifndef _ASM_EXEC_H
5241 #define _ASM_EXEC_H
5242
5243-extern unsigned long arch_align_stack(unsigned long sp);
5244+#define arch_align_stack(x) ((x) & ~0xfUL)
5245
5246 #endif /* _ASM_EXEC_H */
5247diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
5248index eab99e5..607c98e 100644
5249--- a/arch/mips/include/asm/page.h
5250+++ b/arch/mips/include/asm/page.h
5251@@ -96,7 +96,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
5252 #ifdef CONFIG_CPU_MIPS32
5253 typedef struct { unsigned long pte_low, pte_high; } pte_t;
5254 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
5255- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
5256+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
5257 #else
5258 typedef struct { unsigned long long pte; } pte_t;
5259 #define pte_val(x) ((x).pte)
5260diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
5261index 881d18b..cea38bc 100644
5262--- a/arch/mips/include/asm/pgalloc.h
5263+++ b/arch/mips/include/asm/pgalloc.h
5264@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
5265 {
5266 set_pud(pud, __pud((unsigned long)pmd));
5267 }
5268+
5269+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
5270+{
5271+ pud_populate(mm, pud, pmd);
5272+}
5273 #endif
5274
5275 /*
5276diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
5277index 178f792..8ebc510 100644
5278--- a/arch/mips/include/asm/thread_info.h
5279+++ b/arch/mips/include/asm/thread_info.h
5280@@ -111,6 +111,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
5281 #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
5282 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
5283 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
5284+/* li takes a 32bit immediate */
5285+#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
5286 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
5287
5288 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
5289@@ -126,15 +128,18 @@ register struct thread_info *__current_thread_info __asm__("$28");
5290 #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
5291 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
5292 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
5293+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
5294+
5295+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
5296
5297 /* work to do in syscall_trace_leave() */
5298-#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
5299+#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
5300
5301 /* work to do on interrupt/exception return */
5302 #define _TIF_WORK_MASK \
5303 (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME)
5304 /* work to do on any return to u-space */
5305-#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT)
5306+#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT | _TIF_GRSEC_SETXID)
5307
5308 #endif /* __KERNEL__ */
5309
5310diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
5311index e06f777..3244284 100644
5312--- a/arch/mips/kernel/binfmt_elfn32.c
5313+++ b/arch/mips/kernel/binfmt_elfn32.c
5314@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
5315 #undef ELF_ET_DYN_BASE
5316 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
5317
5318+#ifdef CONFIG_PAX_ASLR
5319+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
5320+
5321+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5322+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5323+#endif
5324+
5325 #include <asm/processor.h>
5326 #include <linux/module.h>
5327 #include <linux/elfcore.h>
5328diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
5329index 556a435..b4fd2e3 100644
5330--- a/arch/mips/kernel/binfmt_elfo32.c
5331+++ b/arch/mips/kernel/binfmt_elfo32.c
5332@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
5333 #undef ELF_ET_DYN_BASE
5334 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
5335
5336+#ifdef CONFIG_PAX_ASLR
5337+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
5338+
5339+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5340+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5341+#endif
5342+
5343 #include <asm/processor.h>
5344
5345 /*
5346diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
5347index 3be4405..a799827 100644
5348--- a/arch/mips/kernel/process.c
5349+++ b/arch/mips/kernel/process.c
5350@@ -461,15 +461,3 @@ unsigned long get_wchan(struct task_struct *task)
5351 out:
5352 return pc;
5353 }
5354-
5355-/*
5356- * Don't forget that the stack pointer must be aligned on a 8 bytes
5357- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
5358- */
5359-unsigned long arch_align_stack(unsigned long sp)
5360-{
5361- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5362- sp -= get_random_int() & ~PAGE_MASK;
5363-
5364- return sp & ALMASK;
5365-}
5366diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
5367index 9c6299c..2fb4c22 100644
5368--- a/arch/mips/kernel/ptrace.c
5369+++ b/arch/mips/kernel/ptrace.c
5370@@ -528,6 +528,10 @@ static inline int audit_arch(void)
5371 return arch;
5372 }
5373
5374+#ifdef CONFIG_GRKERNSEC_SETXID
5375+extern void gr_delayed_cred_worker(void);
5376+#endif
5377+
5378 /*
5379 * Notification of system call entry/exit
5380 * - triggered by current->work.syscall_trace
5381@@ -537,6 +541,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
5382 /* do the secure computing check first */
5383 secure_computing_strict(regs->regs[2]);
5384
5385+#ifdef CONFIG_GRKERNSEC_SETXID
5386+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5387+ gr_delayed_cred_worker();
5388+#endif
5389+
5390 if (!(current->ptrace & PT_PTRACED))
5391 goto out;
5392
5393diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
5394index 9ea2964..c4329c3 100644
5395--- a/arch/mips/kernel/scall32-o32.S
5396+++ b/arch/mips/kernel/scall32-o32.S
5397@@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
5398
5399 stack_done:
5400 lw t0, TI_FLAGS($28) # syscall tracing enabled?
5401- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5402+ li t1, _TIF_SYSCALL_WORK
5403 and t0, t1
5404 bnez t0, syscall_trace_entry # -> yes
5405
5406diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
5407index 36cfd40..b1436e0 100644
5408--- a/arch/mips/kernel/scall64-64.S
5409+++ b/arch/mips/kernel/scall64-64.S
5410@@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
5411
5412 sd a3, PT_R26(sp) # save a3 for syscall restarting
5413
5414- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5415+ li t1, _TIF_SYSCALL_WORK
5416 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
5417 and t0, t1, t0
5418 bnez t0, syscall_trace_entry
5419diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
5420index 693d60b..ae0ba75 100644
5421--- a/arch/mips/kernel/scall64-n32.S
5422+++ b/arch/mips/kernel/scall64-n32.S
5423@@ -47,7 +47,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
5424
5425 sd a3, PT_R26(sp) # save a3 for syscall restarting
5426
5427- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5428+ li t1, _TIF_SYSCALL_WORK
5429 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
5430 and t0, t1, t0
5431 bnez t0, n32_syscall_trace_entry
5432diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
5433index af8887f..611ccb6 100644
5434--- a/arch/mips/kernel/scall64-o32.S
5435+++ b/arch/mips/kernel/scall64-o32.S
5436@@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
5437 PTR 4b, bad_stack
5438 .previous
5439
5440- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5441+ li t1, _TIF_SYSCALL_WORK
5442 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
5443 and t0, t1, t0
5444 bnez t0, trace_a_syscall
5445diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
5446index 0fead53..a2c0fb5 100644
5447--- a/arch/mips/mm/fault.c
5448+++ b/arch/mips/mm/fault.c
5449@@ -27,6 +27,23 @@
5450 #include <asm/highmem.h> /* For VMALLOC_END */
5451 #include <linux/kdebug.h>
5452
5453+#ifdef CONFIG_PAX_PAGEEXEC
5454+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5455+{
5456+ unsigned long i;
5457+
5458+ printk(KERN_ERR "PAX: bytes at PC: ");
5459+ for (i = 0; i < 5; i++) {
5460+ unsigned int c;
5461+ if (get_user(c, (unsigned int *)pc+i))
5462+ printk(KERN_CONT "???????? ");
5463+ else
5464+ printk(KERN_CONT "%08x ", c);
5465+ }
5466+ printk("\n");
5467+}
5468+#endif
5469+
5470 /*
5471 * This routine handles page faults. It determines the address,
5472 * and the problem, and then passes it off to one of the appropriate
5473diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
5474index 7e5fe27..9656513 100644
5475--- a/arch/mips/mm/mmap.c
5476+++ b/arch/mips/mm/mmap.c
5477@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
5478 struct vm_area_struct *vma;
5479 unsigned long addr = addr0;
5480 int do_color_align;
5481+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5482 struct vm_unmapped_area_info info;
5483
5484 if (unlikely(len > TASK_SIZE))
5485@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
5486 do_color_align = 1;
5487
5488 /* requesting a specific address */
5489+
5490+#ifdef CONFIG_PAX_RANDMMAP
5491+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
5492+#endif
5493+
5494 if (addr) {
5495 if (do_color_align)
5496 addr = COLOUR_ALIGN(addr, pgoff);
5497@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
5498 addr = PAGE_ALIGN(addr);
5499
5500 vma = find_vma(mm, addr);
5501- if (TASK_SIZE - len >= addr &&
5502- (!vma || addr + len <= vma->vm_start))
5503+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len, offset))
5504 return addr;
5505 }
5506
5507 info.length = len;
5508 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
5509 info.align_offset = pgoff << PAGE_SHIFT;
5510+ info.threadstack_offset = offset;
5511
5512 if (dir == DOWN) {
5513 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
5514@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5515 {
5516 unsigned long random_factor = 0UL;
5517
5518+#ifdef CONFIG_PAX_RANDMMAP
5519+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5520+#endif
5521+
5522 if (current->flags & PF_RANDOMIZE) {
5523 random_factor = get_random_int();
5524 random_factor = random_factor << PAGE_SHIFT;
5525@@ -157,42 +167,27 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5526
5527 if (mmap_is_legacy()) {
5528 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5529+
5530+#ifdef CONFIG_PAX_RANDMMAP
5531+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5532+ mm->mmap_base += mm->delta_mmap;
5533+#endif
5534+
5535 mm->get_unmapped_area = arch_get_unmapped_area;
5536 mm->unmap_area = arch_unmap_area;
5537 } else {
5538 mm->mmap_base = mmap_base(random_factor);
5539+
5540+#ifdef CONFIG_PAX_RANDMMAP
5541+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5542+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5543+#endif
5544+
5545 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5546 mm->unmap_area = arch_unmap_area_topdown;
5547 }
5548 }
5549
5550-static inline unsigned long brk_rnd(void)
5551-{
5552- unsigned long rnd = get_random_int();
5553-
5554- rnd = rnd << PAGE_SHIFT;
5555- /* 8MB for 32bit, 256MB for 64bit */
5556- if (TASK_IS_32BIT_ADDR)
5557- rnd = rnd & 0x7ffffful;
5558- else
5559- rnd = rnd & 0xffffffful;
5560-
5561- return rnd;
5562-}
5563-
5564-unsigned long arch_randomize_brk(struct mm_struct *mm)
5565-{
5566- unsigned long base = mm->brk;
5567- unsigned long ret;
5568-
5569- ret = PAGE_ALIGN(base + brk_rnd());
5570-
5571- if (ret < mm->brk)
5572- return mm->brk;
5573-
5574- return ret;
5575-}
5576-
5577 int __virt_addr_valid(const volatile void *kaddr)
5578 {
5579 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
5580diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
5581index 967d144..db12197 100644
5582--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
5583+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
5584@@ -11,12 +11,14 @@
5585 #ifndef _ASM_PROC_CACHE_H
5586 #define _ASM_PROC_CACHE_H
5587
5588+#include <linux/const.h>
5589+
5590 /* L1 cache */
5591
5592 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
5593 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
5594-#define L1_CACHE_BYTES 16 /* bytes per entry */
5595 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
5596+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
5597 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
5598
5599 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
5600diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
5601index bcb5df2..84fabd2 100644
5602--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
5603+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
5604@@ -16,13 +16,15 @@
5605 #ifndef _ASM_PROC_CACHE_H
5606 #define _ASM_PROC_CACHE_H
5607
5608+#include <linux/const.h>
5609+
5610 /*
5611 * L1 cache
5612 */
5613 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
5614 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
5615-#define L1_CACHE_BYTES 32 /* bytes per entry */
5616 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
5617+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
5618 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
5619
5620 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
5621diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
5622index 4ce7a01..449202a 100644
5623--- a/arch/openrisc/include/asm/cache.h
5624+++ b/arch/openrisc/include/asm/cache.h
5625@@ -19,11 +19,13 @@
5626 #ifndef __ASM_OPENRISC_CACHE_H
5627 #define __ASM_OPENRISC_CACHE_H
5628
5629+#include <linux/const.h>
5630+
5631 /* FIXME: How can we replace these with values from the CPU...
5632 * they shouldn't be hard-coded!
5633 */
5634
5635-#define L1_CACHE_BYTES 16
5636 #define L1_CACHE_SHIFT 4
5637+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5638
5639 #endif /* __ASM_OPENRISC_CACHE_H */
5640diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
5641index f38e198..4179e38 100644
5642--- a/arch/parisc/include/asm/atomic.h
5643+++ b/arch/parisc/include/asm/atomic.h
5644@@ -229,6 +229,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
5645
5646 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
5647
5648+#define atomic64_read_unchecked(v) atomic64_read(v)
5649+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5650+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5651+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5652+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5653+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5654+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5655+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5656+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5657+
5658 #endif /* !CONFIG_64BIT */
5659
5660
5661diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
5662index 47f11c7..3420df2 100644
5663--- a/arch/parisc/include/asm/cache.h
5664+++ b/arch/parisc/include/asm/cache.h
5665@@ -5,6 +5,7 @@
5666 #ifndef __ARCH_PARISC_CACHE_H
5667 #define __ARCH_PARISC_CACHE_H
5668
5669+#include <linux/const.h>
5670
5671 /*
5672 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
5673@@ -15,13 +16,13 @@
5674 * just ruin performance.
5675 */
5676 #ifdef CONFIG_PA20
5677-#define L1_CACHE_BYTES 64
5678 #define L1_CACHE_SHIFT 6
5679 #else
5680-#define L1_CACHE_BYTES 32
5681 #define L1_CACHE_SHIFT 5
5682 #endif
5683
5684+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5685+
5686 #ifndef __ASSEMBLY__
5687
5688 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5689diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
5690index ad2b503..bdf1651 100644
5691--- a/arch/parisc/include/asm/elf.h
5692+++ b/arch/parisc/include/asm/elf.h
5693@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
5694
5695 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
5696
5697+#ifdef CONFIG_PAX_ASLR
5698+#define PAX_ELF_ET_DYN_BASE 0x10000UL
5699+
5700+#define PAX_DELTA_MMAP_LEN 16
5701+#define PAX_DELTA_STACK_LEN 16
5702+#endif
5703+
5704 /* This yields a mask that user programs can use to figure out what
5705 instruction set this CPU supports. This could be done in user space,
5706 but it's not easy, and we've already done it here. */
5707diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
5708index fc987a1..6e068ef 100644
5709--- a/arch/parisc/include/asm/pgalloc.h
5710+++ b/arch/parisc/include/asm/pgalloc.h
5711@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
5712 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
5713 }
5714
5715+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
5716+{
5717+ pgd_populate(mm, pgd, pmd);
5718+}
5719+
5720 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
5721 {
5722 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
5723@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
5724 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
5725 #define pmd_free(mm, x) do { } while (0)
5726 #define pgd_populate(mm, pmd, pte) BUG()
5727+#define pgd_populate_kernel(mm, pmd, pte) BUG()
5728
5729 #endif
5730
5731diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
5732index 1e40d7f..a3eb445 100644
5733--- a/arch/parisc/include/asm/pgtable.h
5734+++ b/arch/parisc/include/asm/pgtable.h
5735@@ -223,6 +223,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
5736 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
5737 #define PAGE_COPY PAGE_EXECREAD
5738 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
5739+
5740+#ifdef CONFIG_PAX_PAGEEXEC
5741+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
5742+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
5743+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
5744+#else
5745+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5746+# define PAGE_COPY_NOEXEC PAGE_COPY
5747+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5748+#endif
5749+
5750 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
5751 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
5752 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
5753diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
5754index e0a8235..ce2f1e1 100644
5755--- a/arch/parisc/include/asm/uaccess.h
5756+++ b/arch/parisc/include/asm/uaccess.h
5757@@ -245,10 +245,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
5758 const void __user *from,
5759 unsigned long n)
5760 {
5761- int sz = __compiletime_object_size(to);
5762+ size_t sz = __compiletime_object_size(to);
5763 int ret = -EFAULT;
5764
5765- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
5766+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
5767 ret = __copy_from_user(to, from, n);
5768 else
5769 copy_from_user_overflow();
5770diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
5771index 5709c5e..14285ca 100644
5772--- a/arch/parisc/kernel/drivers.c
5773+++ b/arch/parisc/kernel/drivers.c
5774@@ -394,7 +394,7 @@ EXPORT_SYMBOL(print_pci_hwpath);
5775 static void setup_bus_id(struct parisc_device *padev)
5776 {
5777 struct hardware_path path;
5778- char name[20];
5779+ char name[28];
5780 char *output = name;
5781 int i;
5782
5783diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
5784index 2a625fb..9908930 100644
5785--- a/arch/parisc/kernel/module.c
5786+++ b/arch/parisc/kernel/module.c
5787@@ -98,16 +98,38 @@
5788
5789 /* three functions to determine where in the module core
5790 * or init pieces the location is */
5791+static inline int in_init_rx(struct module *me, void *loc)
5792+{
5793+ return (loc >= me->module_init_rx &&
5794+ loc < (me->module_init_rx + me->init_size_rx));
5795+}
5796+
5797+static inline int in_init_rw(struct module *me, void *loc)
5798+{
5799+ return (loc >= me->module_init_rw &&
5800+ loc < (me->module_init_rw + me->init_size_rw));
5801+}
5802+
5803 static inline int in_init(struct module *me, void *loc)
5804 {
5805- return (loc >= me->module_init &&
5806- loc <= (me->module_init + me->init_size));
5807+ return in_init_rx(me, loc) || in_init_rw(me, loc);
5808+}
5809+
5810+static inline int in_core_rx(struct module *me, void *loc)
5811+{
5812+ return (loc >= me->module_core_rx &&
5813+ loc < (me->module_core_rx + me->core_size_rx));
5814+}
5815+
5816+static inline int in_core_rw(struct module *me, void *loc)
5817+{
5818+ return (loc >= me->module_core_rw &&
5819+ loc < (me->module_core_rw + me->core_size_rw));
5820 }
5821
5822 static inline int in_core(struct module *me, void *loc)
5823 {
5824- return (loc >= me->module_core &&
5825- loc <= (me->module_core + me->core_size));
5826+ return in_core_rx(me, loc) || in_core_rw(me, loc);
5827 }
5828
5829 static inline int in_local(struct module *me, void *loc)
5830@@ -371,13 +393,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
5831 }
5832
5833 /* align things a bit */
5834- me->core_size = ALIGN(me->core_size, 16);
5835- me->arch.got_offset = me->core_size;
5836- me->core_size += gots * sizeof(struct got_entry);
5837+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
5838+ me->arch.got_offset = me->core_size_rw;
5839+ me->core_size_rw += gots * sizeof(struct got_entry);
5840
5841- me->core_size = ALIGN(me->core_size, 16);
5842- me->arch.fdesc_offset = me->core_size;
5843- me->core_size += fdescs * sizeof(Elf_Fdesc);
5844+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
5845+ me->arch.fdesc_offset = me->core_size_rw;
5846+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
5847
5848 me->arch.got_max = gots;
5849 me->arch.fdesc_max = fdescs;
5850@@ -395,7 +417,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
5851
5852 BUG_ON(value == 0);
5853
5854- got = me->module_core + me->arch.got_offset;
5855+ got = me->module_core_rw + me->arch.got_offset;
5856 for (i = 0; got[i].addr; i++)
5857 if (got[i].addr == value)
5858 goto out;
5859@@ -413,7 +435,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
5860 #ifdef CONFIG_64BIT
5861 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
5862 {
5863- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
5864+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
5865
5866 if (!value) {
5867 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
5868@@ -431,7 +453,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
5869
5870 /* Create new one */
5871 fdesc->addr = value;
5872- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
5873+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
5874 return (Elf_Addr)fdesc;
5875 }
5876 #endif /* CONFIG_64BIT */
5877@@ -843,7 +865,7 @@ register_unwind_table(struct module *me,
5878
5879 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
5880 end = table + sechdrs[me->arch.unwind_section].sh_size;
5881- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
5882+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
5883
5884 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
5885 me->arch.unwind_section, table, end, gp);
5886diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
5887index a3328c2..3b812eb 100644
5888--- a/arch/parisc/kernel/setup.c
5889+++ b/arch/parisc/kernel/setup.c
5890@@ -69,7 +69,8 @@ void __init setup_cmdline(char **cmdline_p)
5891 /* called from hpux boot loader */
5892 boot_command_line[0] = '\0';
5893 } else {
5894- strcpy(boot_command_line, (char *)__va(boot_args[1]));
5895+ strlcpy(boot_command_line, (char *)__va(boot_args[1]),
5896+ COMMAND_LINE_SIZE);
5897
5898 #ifdef CONFIG_BLK_DEV_INITRD
5899 if (boot_args[2] != 0) /* did palo pass us a ramdisk? */
5900diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
5901index 5dfd248..64914ac 100644
5902--- a/arch/parisc/kernel/sys_parisc.c
5903+++ b/arch/parisc/kernel/sys_parisc.c
5904@@ -33,9 +33,11 @@
5905 #include <linux/utsname.h>
5906 #include <linux/personality.h>
5907
5908-static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
5909+static unsigned long get_unshared_area(struct file *filp, unsigned long addr, unsigned long len,
5910+ unsigned long flags)
5911 {
5912 struct vm_unmapped_area_info info;
5913+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
5914
5915 info.flags = 0;
5916 info.length = len;
5917@@ -43,6 +45,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
5918 info.high_limit = TASK_SIZE;
5919 info.align_mask = 0;
5920 info.align_offset = 0;
5921+ info.threadstack_offset = offset;
5922 return vm_unmapped_area(&info);
5923 }
5924
5925@@ -61,10 +64,11 @@ static int get_offset(struct address_space *mapping)
5926 return (unsigned long) mapping >> 8;
5927 }
5928
5929-static unsigned long get_shared_area(struct address_space *mapping,
5930- unsigned long addr, unsigned long len, unsigned long pgoff)
5931+static unsigned long get_shared_area(struct file *filp, struct address_space *mapping,
5932+ unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
5933 {
5934 struct vm_unmapped_area_info info;
5935+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
5936
5937 info.flags = 0;
5938 info.length = len;
5939@@ -72,6 +76,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
5940 info.high_limit = TASK_SIZE;
5941 info.align_mask = PAGE_MASK & (SHMLBA - 1);
5942 info.align_offset = (get_offset(mapping) + pgoff) << PAGE_SHIFT;
5943+ info.threadstack_offset = offset;
5944 return vm_unmapped_area(&info);
5945 }
5946
5947@@ -86,15 +91,22 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
5948 return -EINVAL;
5949 return addr;
5950 }
5951- if (!addr)
5952+ if (!addr) {
5953 addr = TASK_UNMAPPED_BASE;
5954
5955+#ifdef CONFIG_PAX_RANDMMAP
5956+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
5957+ addr += current->mm->delta_mmap;
5958+#endif
5959+
5960+ }
5961+
5962 if (filp) {
5963- addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
5964+ addr = get_shared_area(filp, filp->f_mapping, addr, len, pgoff, flags);
5965 } else if(flags & MAP_SHARED) {
5966- addr = get_shared_area(NULL, addr, len, pgoff);
5967+ addr = get_shared_area(filp, NULL, addr, len, pgoff, flags);
5968 } else {
5969- addr = get_unshared_area(addr, len);
5970+ addr = get_unshared_area(filp, addr, len, flags);
5971 }
5972 return addr;
5973 }
5974diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
5975index aeb8f8f..27a6c2f 100644
5976--- a/arch/parisc/kernel/traps.c
5977+++ b/arch/parisc/kernel/traps.c
5978@@ -732,9 +732,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
5979
5980 down_read(&current->mm->mmap_sem);
5981 vma = find_vma(current->mm,regs->iaoq[0]);
5982- if (vma && (regs->iaoq[0] >= vma->vm_start)
5983- && (vma->vm_flags & VM_EXEC)) {
5984-
5985+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
5986 fault_address = regs->iaoq[0];
5987 fault_space = regs->iasq[0];
5988
5989diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
5990index f247a34..dc0f219 100644
5991--- a/arch/parisc/mm/fault.c
5992+++ b/arch/parisc/mm/fault.c
5993@@ -15,6 +15,7 @@
5994 #include <linux/sched.h>
5995 #include <linux/interrupt.h>
5996 #include <linux/module.h>
5997+#include <linux/unistd.h>
5998
5999 #include <asm/uaccess.h>
6000 #include <asm/traps.h>
6001@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
6002 static unsigned long
6003 parisc_acctyp(unsigned long code, unsigned int inst)
6004 {
6005- if (code == 6 || code == 16)
6006+ if (code == 6 || code == 7 || code == 16)
6007 return VM_EXEC;
6008
6009 switch (inst & 0xf0000000) {
6010@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
6011 }
6012 #endif
6013
6014+#ifdef CONFIG_PAX_PAGEEXEC
6015+/*
6016+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
6017+ *
6018+ * returns 1 when task should be killed
6019+ * 2 when rt_sigreturn trampoline was detected
6020+ * 3 when unpatched PLT trampoline was detected
6021+ */
6022+static int pax_handle_fetch_fault(struct pt_regs *regs)
6023+{
6024+
6025+#ifdef CONFIG_PAX_EMUPLT
6026+ int err;
6027+
6028+ do { /* PaX: unpatched PLT emulation */
6029+ unsigned int bl, depwi;
6030+
6031+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
6032+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
6033+
6034+ if (err)
6035+ break;
6036+
6037+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
6038+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
6039+
6040+ err = get_user(ldw, (unsigned int *)addr);
6041+ err |= get_user(bv, (unsigned int *)(addr+4));
6042+ err |= get_user(ldw2, (unsigned int *)(addr+8));
6043+
6044+ if (err)
6045+ break;
6046+
6047+ if (ldw == 0x0E801096U &&
6048+ bv == 0xEAC0C000U &&
6049+ ldw2 == 0x0E881095U)
6050+ {
6051+ unsigned int resolver, map;
6052+
6053+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
6054+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
6055+ if (err)
6056+ break;
6057+
6058+ regs->gr[20] = instruction_pointer(regs)+8;
6059+ regs->gr[21] = map;
6060+ regs->gr[22] = resolver;
6061+ regs->iaoq[0] = resolver | 3UL;
6062+ regs->iaoq[1] = regs->iaoq[0] + 4;
6063+ return 3;
6064+ }
6065+ }
6066+ } while (0);
6067+#endif
6068+
6069+#ifdef CONFIG_PAX_EMUTRAMP
6070+
6071+#ifndef CONFIG_PAX_EMUSIGRT
6072+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
6073+ return 1;
6074+#endif
6075+
6076+ do { /* PaX: rt_sigreturn emulation */
6077+ unsigned int ldi1, ldi2, bel, nop;
6078+
6079+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
6080+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
6081+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
6082+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
6083+
6084+ if (err)
6085+ break;
6086+
6087+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
6088+ ldi2 == 0x3414015AU &&
6089+ bel == 0xE4008200U &&
6090+ nop == 0x08000240U)
6091+ {
6092+ regs->gr[25] = (ldi1 & 2) >> 1;
6093+ regs->gr[20] = __NR_rt_sigreturn;
6094+ regs->gr[31] = regs->iaoq[1] + 16;
6095+ regs->sr[0] = regs->iasq[1];
6096+ regs->iaoq[0] = 0x100UL;
6097+ regs->iaoq[1] = regs->iaoq[0] + 4;
6098+ regs->iasq[0] = regs->sr[2];
6099+ regs->iasq[1] = regs->sr[2];
6100+ return 2;
6101+ }
6102+ } while (0);
6103+#endif
6104+
6105+ return 1;
6106+}
6107+
6108+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6109+{
6110+ unsigned long i;
6111+
6112+ printk(KERN_ERR "PAX: bytes at PC: ");
6113+ for (i = 0; i < 5; i++) {
6114+ unsigned int c;
6115+ if (get_user(c, (unsigned int *)pc+i))
6116+ printk(KERN_CONT "???????? ");
6117+ else
6118+ printk(KERN_CONT "%08x ", c);
6119+ }
6120+ printk("\n");
6121+}
6122+#endif
6123+
6124 int fixup_exception(struct pt_regs *regs)
6125 {
6126 const struct exception_table_entry *fix;
6127@@ -194,8 +305,33 @@ good_area:
6128
6129 acc_type = parisc_acctyp(code,regs->iir);
6130
6131- if ((vma->vm_flags & acc_type) != acc_type)
6132+ if ((vma->vm_flags & acc_type) != acc_type) {
6133+
6134+#ifdef CONFIG_PAX_PAGEEXEC
6135+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
6136+ (address & ~3UL) == instruction_pointer(regs))
6137+ {
6138+ up_read(&mm->mmap_sem);
6139+ switch (pax_handle_fetch_fault(regs)) {
6140+
6141+#ifdef CONFIG_PAX_EMUPLT
6142+ case 3:
6143+ return;
6144+#endif
6145+
6146+#ifdef CONFIG_PAX_EMUTRAMP
6147+ case 2:
6148+ return;
6149+#endif
6150+
6151+ }
6152+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
6153+ do_group_exit(SIGKILL);
6154+ }
6155+#endif
6156+
6157 goto bad_area;
6158+ }
6159
6160 /*
6161 * If for any reason at all we couldn't handle the fault, make
6162diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
6163index e3b1d41..8e81edf 100644
6164--- a/arch/powerpc/include/asm/atomic.h
6165+++ b/arch/powerpc/include/asm/atomic.h
6166@@ -523,6 +523,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
6167 return t1;
6168 }
6169
6170+#define atomic64_read_unchecked(v) atomic64_read(v)
6171+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
6172+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
6173+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
6174+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
6175+#define atomic64_inc_unchecked(v) atomic64_inc(v)
6176+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
6177+#define atomic64_dec_unchecked(v) atomic64_dec(v)
6178+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
6179+
6180 #endif /* __powerpc64__ */
6181
6182 #endif /* __KERNEL__ */
6183diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
6184index 9e495c9..b6878e5 100644
6185--- a/arch/powerpc/include/asm/cache.h
6186+++ b/arch/powerpc/include/asm/cache.h
6187@@ -3,6 +3,7 @@
6188
6189 #ifdef __KERNEL__
6190
6191+#include <linux/const.h>
6192
6193 /* bytes per L1 cache line */
6194 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
6195@@ -22,7 +23,7 @@
6196 #define L1_CACHE_SHIFT 7
6197 #endif
6198
6199-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6200+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6201
6202 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6203
6204diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
6205index ac9790f..6d30741 100644
6206--- a/arch/powerpc/include/asm/elf.h
6207+++ b/arch/powerpc/include/asm/elf.h
6208@@ -28,8 +28,19 @@
6209 the loader. We need to make sure that it is out of the way of the program
6210 that it will "exec", and that there is sufficient room for the brk. */
6211
6212-extern unsigned long randomize_et_dyn(unsigned long base);
6213-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
6214+#define ELF_ET_DYN_BASE (0x20000000)
6215+
6216+#ifdef CONFIG_PAX_ASLR
6217+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
6218+
6219+#ifdef __powerpc64__
6220+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
6221+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
6222+#else
6223+#define PAX_DELTA_MMAP_LEN 15
6224+#define PAX_DELTA_STACK_LEN 15
6225+#endif
6226+#endif
6227
6228 /*
6229 * Our registers are always unsigned longs, whether we're a 32 bit
6230@@ -122,10 +133,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6231 (0x7ff >> (PAGE_SHIFT - 12)) : \
6232 (0x3ffff >> (PAGE_SHIFT - 12)))
6233
6234-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
6235-#define arch_randomize_brk arch_randomize_brk
6236-
6237-
6238 #ifdef CONFIG_SPU_BASE
6239 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
6240 #define NT_SPU 1
6241diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
6242index 8196e9c..d83a9f3 100644
6243--- a/arch/powerpc/include/asm/exec.h
6244+++ b/arch/powerpc/include/asm/exec.h
6245@@ -4,6 +4,6 @@
6246 #ifndef _ASM_POWERPC_EXEC_H
6247 #define _ASM_POWERPC_EXEC_H
6248
6249-extern unsigned long arch_align_stack(unsigned long sp);
6250+#define arch_align_stack(x) ((x) & ~0xfUL)
6251
6252 #endif /* _ASM_POWERPC_EXEC_H */
6253diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
6254index 5acabbd..7ea14fa 100644
6255--- a/arch/powerpc/include/asm/kmap_types.h
6256+++ b/arch/powerpc/include/asm/kmap_types.h
6257@@ -10,7 +10,7 @@
6258 * 2 of the License, or (at your option) any later version.
6259 */
6260
6261-#define KM_TYPE_NR 16
6262+#define KM_TYPE_NR 17
6263
6264 #endif /* __KERNEL__ */
6265 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
6266diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
6267index 8565c25..2865190 100644
6268--- a/arch/powerpc/include/asm/mman.h
6269+++ b/arch/powerpc/include/asm/mman.h
6270@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
6271 }
6272 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
6273
6274-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
6275+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
6276 {
6277 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
6278 }
6279diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
6280index f072e97..b436dee 100644
6281--- a/arch/powerpc/include/asm/page.h
6282+++ b/arch/powerpc/include/asm/page.h
6283@@ -220,8 +220,9 @@ extern long long virt_phys_offset;
6284 * and needs to be executable. This means the whole heap ends
6285 * up being executable.
6286 */
6287-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
6288- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6289+#define VM_DATA_DEFAULT_FLAGS32 \
6290+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
6291+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6292
6293 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
6294 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6295@@ -249,6 +250,9 @@ extern long long virt_phys_offset;
6296 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
6297 #endif
6298
6299+#define ktla_ktva(addr) (addr)
6300+#define ktva_ktla(addr) (addr)
6301+
6302 /*
6303 * Use the top bit of the higher-level page table entries to indicate whether
6304 * the entries we point to contain hugepages. This works because we know that
6305diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
6306index cd915d6..c10cee8 100644
6307--- a/arch/powerpc/include/asm/page_64.h
6308+++ b/arch/powerpc/include/asm/page_64.h
6309@@ -154,15 +154,18 @@ do { \
6310 * stack by default, so in the absence of a PT_GNU_STACK program header
6311 * we turn execute permission off.
6312 */
6313-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
6314- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6315+#define VM_STACK_DEFAULT_FLAGS32 \
6316+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
6317+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6318
6319 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
6320 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6321
6322+#ifndef CONFIG_PAX_PAGEEXEC
6323 #define VM_STACK_DEFAULT_FLAGS \
6324 (is_32bit_task() ? \
6325 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
6326+#endif
6327
6328 #include <asm-generic/getorder.h>
6329
6330diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
6331index 292725c..f87ae14 100644
6332--- a/arch/powerpc/include/asm/pgalloc-64.h
6333+++ b/arch/powerpc/include/asm/pgalloc-64.h
6334@@ -50,6 +50,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
6335 #ifndef CONFIG_PPC_64K_PAGES
6336
6337 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
6338+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
6339
6340 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
6341 {
6342@@ -67,6 +68,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6343 pud_set(pud, (unsigned long)pmd);
6344 }
6345
6346+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6347+{
6348+ pud_populate(mm, pud, pmd);
6349+}
6350+
6351 #define pmd_populate(mm, pmd, pte_page) \
6352 pmd_populate_kernel(mm, pmd, page_address(pte_page))
6353 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
6354@@ -76,6 +82,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6355 #else /* CONFIG_PPC_64K_PAGES */
6356
6357 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
6358+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
6359
6360 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
6361 pte_t *pte)
6362diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
6363index a9cbd3b..3b67efa 100644
6364--- a/arch/powerpc/include/asm/pgtable.h
6365+++ b/arch/powerpc/include/asm/pgtable.h
6366@@ -2,6 +2,7 @@
6367 #define _ASM_POWERPC_PGTABLE_H
6368 #ifdef __KERNEL__
6369
6370+#include <linux/const.h>
6371 #ifndef __ASSEMBLY__
6372 #include <asm/processor.h> /* For TASK_SIZE */
6373 #include <asm/mmu.h>
6374diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
6375index 4aad413..85d86bf 100644
6376--- a/arch/powerpc/include/asm/pte-hash32.h
6377+++ b/arch/powerpc/include/asm/pte-hash32.h
6378@@ -21,6 +21,7 @@
6379 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
6380 #define _PAGE_USER 0x004 /* usermode access allowed */
6381 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
6382+#define _PAGE_EXEC _PAGE_GUARDED
6383 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
6384 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
6385 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
6386diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
6387index 3b097a8..8f8c774 100644
6388--- a/arch/powerpc/include/asm/reg.h
6389+++ b/arch/powerpc/include/asm/reg.h
6390@@ -234,6 +234,7 @@
6391 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
6392 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
6393 #define DSISR_NOHPTE 0x40000000 /* no translation found */
6394+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
6395 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
6396 #define DSISR_ISSTORE 0x02000000 /* access was a store */
6397 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
6398diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
6399index 195ce2a..ab5c614 100644
6400--- a/arch/powerpc/include/asm/smp.h
6401+++ b/arch/powerpc/include/asm/smp.h
6402@@ -50,7 +50,7 @@ struct smp_ops_t {
6403 int (*cpu_disable)(void);
6404 void (*cpu_die)(unsigned int nr);
6405 int (*cpu_bootable)(unsigned int nr);
6406-};
6407+} __no_const;
6408
6409 extern void smp_send_debugger_break(void);
6410 extern void start_secondary_resume(void);
6411diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
6412index 406b7b9..af63426 100644
6413--- a/arch/powerpc/include/asm/thread_info.h
6414+++ b/arch/powerpc/include/asm/thread_info.h
6415@@ -97,7 +97,6 @@ static inline struct thread_info *current_thread_info(void)
6416 #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
6417 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
6418 #define TIF_SINGLESTEP 8 /* singlestepping active */
6419-#define TIF_MEMDIE 9 /* is terminating due to OOM killer */
6420 #define TIF_SECCOMP 10 /* secure computing */
6421 #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
6422 #define TIF_NOERROR 12 /* Force successful syscall return */
6423@@ -106,6 +105,9 @@ static inline struct thread_info *current_thread_info(void)
6424 #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
6425 #define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
6426 for stack store? */
6427+#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
6428+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
6429+#define TIF_GRSEC_SETXID 9 /* update credentials on syscall entry/exit */
6430
6431 /* as above, but as bit values */
6432 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
6433@@ -124,8 +126,10 @@ static inline struct thread_info *current_thread_info(void)
6434 #define _TIF_UPROBE (1<<TIF_UPROBE)
6435 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6436 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
6437+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6438 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
6439- _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
6440+ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
6441+ _TIF_GRSEC_SETXID)
6442
6443 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
6444 _TIF_NOTIFY_RESUME | _TIF_UPROBE)
6445diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
6446index 4db4959..aba5c41 100644
6447--- a/arch/powerpc/include/asm/uaccess.h
6448+++ b/arch/powerpc/include/asm/uaccess.h
6449@@ -318,52 +318,6 @@ do { \
6450 extern unsigned long __copy_tofrom_user(void __user *to,
6451 const void __user *from, unsigned long size);
6452
6453-#ifndef __powerpc64__
6454-
6455-static inline unsigned long copy_from_user(void *to,
6456- const void __user *from, unsigned long n)
6457-{
6458- unsigned long over;
6459-
6460- if (access_ok(VERIFY_READ, from, n))
6461- return __copy_tofrom_user((__force void __user *)to, from, n);
6462- if ((unsigned long)from < TASK_SIZE) {
6463- over = (unsigned long)from + n - TASK_SIZE;
6464- return __copy_tofrom_user((__force void __user *)to, from,
6465- n - over) + over;
6466- }
6467- return n;
6468-}
6469-
6470-static inline unsigned long copy_to_user(void __user *to,
6471- const void *from, unsigned long n)
6472-{
6473- unsigned long over;
6474-
6475- if (access_ok(VERIFY_WRITE, to, n))
6476- return __copy_tofrom_user(to, (__force void __user *)from, n);
6477- if ((unsigned long)to < TASK_SIZE) {
6478- over = (unsigned long)to + n - TASK_SIZE;
6479- return __copy_tofrom_user(to, (__force void __user *)from,
6480- n - over) + over;
6481- }
6482- return n;
6483-}
6484-
6485-#else /* __powerpc64__ */
6486-
6487-#define __copy_in_user(to, from, size) \
6488- __copy_tofrom_user((to), (from), (size))
6489-
6490-extern unsigned long copy_from_user(void *to, const void __user *from,
6491- unsigned long n);
6492-extern unsigned long copy_to_user(void __user *to, const void *from,
6493- unsigned long n);
6494-extern unsigned long copy_in_user(void __user *to, const void __user *from,
6495- unsigned long n);
6496-
6497-#endif /* __powerpc64__ */
6498-
6499 static inline unsigned long __copy_from_user_inatomic(void *to,
6500 const void __user *from, unsigned long n)
6501 {
6502@@ -387,6 +341,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
6503 if (ret == 0)
6504 return 0;
6505 }
6506+
6507+ if (!__builtin_constant_p(n))
6508+ check_object_size(to, n, false);
6509+
6510 return __copy_tofrom_user((__force void __user *)to, from, n);
6511 }
6512
6513@@ -413,6 +371,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
6514 if (ret == 0)
6515 return 0;
6516 }
6517+
6518+ if (!__builtin_constant_p(n))
6519+ check_object_size(from, n, true);
6520+
6521 return __copy_tofrom_user(to, (__force const void __user *)from, n);
6522 }
6523
6524@@ -430,6 +392,92 @@ static inline unsigned long __copy_to_user(void __user *to,
6525 return __copy_to_user_inatomic(to, from, size);
6526 }
6527
6528+#ifndef __powerpc64__
6529+
6530+static inline unsigned long __must_check copy_from_user(void *to,
6531+ const void __user *from, unsigned long n)
6532+{
6533+ unsigned long over;
6534+
6535+ if ((long)n < 0)
6536+ return n;
6537+
6538+ if (access_ok(VERIFY_READ, from, n)) {
6539+ if (!__builtin_constant_p(n))
6540+ check_object_size(to, n, false);
6541+ return __copy_tofrom_user((__force void __user *)to, from, n);
6542+ }
6543+ if ((unsigned long)from < TASK_SIZE) {
6544+ over = (unsigned long)from + n - TASK_SIZE;
6545+ if (!__builtin_constant_p(n - over))
6546+ check_object_size(to, n - over, false);
6547+ return __copy_tofrom_user((__force void __user *)to, from,
6548+ n - over) + over;
6549+ }
6550+ return n;
6551+}
6552+
6553+static inline unsigned long __must_check copy_to_user(void __user *to,
6554+ const void *from, unsigned long n)
6555+{
6556+ unsigned long over;
6557+
6558+ if ((long)n < 0)
6559+ return n;
6560+
6561+ if (access_ok(VERIFY_WRITE, to, n)) {
6562+ if (!__builtin_constant_p(n))
6563+ check_object_size(from, n, true);
6564+ return __copy_tofrom_user(to, (__force void __user *)from, n);
6565+ }
6566+ if ((unsigned long)to < TASK_SIZE) {
6567+ over = (unsigned long)to + n - TASK_SIZE;
6568+ if (!__builtin_constant_p(n))
6569+ check_object_size(from, n - over, true);
6570+ return __copy_tofrom_user(to, (__force void __user *)from,
6571+ n - over) + over;
6572+ }
6573+ return n;
6574+}
6575+
6576+#else /* __powerpc64__ */
6577+
6578+#define __copy_in_user(to, from, size) \
6579+ __copy_tofrom_user((to), (from), (size))
6580+
6581+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
6582+{
6583+ if ((long)n < 0 || n > INT_MAX)
6584+ return n;
6585+
6586+ if (!__builtin_constant_p(n))
6587+ check_object_size(to, n, false);
6588+
6589+ if (likely(access_ok(VERIFY_READ, from, n)))
6590+ n = __copy_from_user(to, from, n);
6591+ else
6592+ memset(to, 0, n);
6593+ return n;
6594+}
6595+
6596+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
6597+{
6598+ if ((long)n < 0 || n > INT_MAX)
6599+ return n;
6600+
6601+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
6602+ if (!__builtin_constant_p(n))
6603+ check_object_size(from, n, true);
6604+ n = __copy_to_user(to, from, n);
6605+ }
6606+ return n;
6607+}
6608+
6609+extern unsigned long copy_in_user(void __user *to, const void __user *from,
6610+ unsigned long n);
6611+
6612+#endif /* __powerpc64__ */
6613+
6614 extern unsigned long __clear_user(void __user *addr, unsigned long size);
6615
6616 static inline unsigned long clear_user(void __user *addr, unsigned long size)
6617diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
6618index ae54553..cf2184d 100644
6619--- a/arch/powerpc/kernel/exceptions-64e.S
6620+++ b/arch/powerpc/kernel/exceptions-64e.S
6621@@ -716,6 +716,7 @@ storage_fault_common:
6622 std r14,_DAR(r1)
6623 std r15,_DSISR(r1)
6624 addi r3,r1,STACK_FRAME_OVERHEAD
6625+ bl .save_nvgprs
6626 mr r4,r14
6627 mr r5,r15
6628 ld r14,PACA_EXGEN+EX_R14(r13)
6629@@ -724,8 +725,7 @@ storage_fault_common:
6630 cmpdi r3,0
6631 bne- 1f
6632 b .ret_from_except_lite
6633-1: bl .save_nvgprs
6634- mr r5,r3
6635+1: mr r5,r3
6636 addi r3,r1,STACK_FRAME_OVERHEAD
6637 ld r4,_DAR(r1)
6638 bl .bad_page_fault
6639diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
6640index 644378e..b6f2c26 100644
6641--- a/arch/powerpc/kernel/exceptions-64s.S
6642+++ b/arch/powerpc/kernel/exceptions-64s.S
6643@@ -1390,10 +1390,10 @@ handle_page_fault:
6644 11: ld r4,_DAR(r1)
6645 ld r5,_DSISR(r1)
6646 addi r3,r1,STACK_FRAME_OVERHEAD
6647+ bl .save_nvgprs
6648 bl .do_page_fault
6649 cmpdi r3,0
6650 beq+ 12f
6651- bl .save_nvgprs
6652 mr r5,r3
6653 addi r3,r1,STACK_FRAME_OVERHEAD
6654 lwz r4,_DAR(r1)
6655diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
6656index 2e3200c..72095ce 100644
6657--- a/arch/powerpc/kernel/module_32.c
6658+++ b/arch/powerpc/kernel/module_32.c
6659@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
6660 me->arch.core_plt_section = i;
6661 }
6662 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
6663- printk("Module doesn't contain .plt or .init.plt sections.\n");
6664+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
6665 return -ENOEXEC;
6666 }
6667
6668@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
6669
6670 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
6671 /* Init, or core PLT? */
6672- if (location >= mod->module_core
6673- && location < mod->module_core + mod->core_size)
6674+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
6675+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
6676 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
6677- else
6678+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
6679+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
6680 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
6681+ else {
6682+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
6683+ return ~0UL;
6684+ }
6685
6686 /* Find this entry, or if that fails, the next avail. entry */
6687 while (entry->jump[0]) {
6688diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
6689index 0d86c8a..df4c5f2 100644
6690--- a/arch/powerpc/kernel/process.c
6691+++ b/arch/powerpc/kernel/process.c
6692@@ -871,8 +871,8 @@ void show_regs(struct pt_regs * regs)
6693 * Lookup NIP late so we have the best change of getting the
6694 * above info out without failing
6695 */
6696- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
6697- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
6698+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
6699+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
6700 #endif
6701 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
6702 printk("PACATMSCRATCH [%llx]\n", get_paca()->tm_scratch);
6703@@ -1331,10 +1331,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
6704 newsp = stack[0];
6705 ip = stack[STACK_FRAME_LR_SAVE];
6706 if (!firstframe || ip != lr) {
6707- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
6708+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
6709 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6710 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
6711- printk(" (%pS)",
6712+ printk(" (%pA)",
6713 (void *)current->ret_stack[curr_frame].ret);
6714 curr_frame--;
6715 }
6716@@ -1354,7 +1354,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
6717 struct pt_regs *regs = (struct pt_regs *)
6718 (sp + STACK_FRAME_OVERHEAD);
6719 lr = regs->link;
6720- printk("--- Exception: %lx at %pS\n LR = %pS\n",
6721+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
6722 regs->trap, (void *)regs->nip, (void *)lr);
6723 firstframe = 1;
6724 }
6725@@ -1396,58 +1396,3 @@ void notrace __ppc64_runlatch_off(void)
6726 mtspr(SPRN_CTRLT, ctrl);
6727 }
6728 #endif /* CONFIG_PPC64 */
6729-
6730-unsigned long arch_align_stack(unsigned long sp)
6731-{
6732- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6733- sp -= get_random_int() & ~PAGE_MASK;
6734- return sp & ~0xf;
6735-}
6736-
6737-static inline unsigned long brk_rnd(void)
6738-{
6739- unsigned long rnd = 0;
6740-
6741- /* 8MB for 32bit, 1GB for 64bit */
6742- if (is_32bit_task())
6743- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
6744- else
6745- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
6746-
6747- return rnd << PAGE_SHIFT;
6748-}
6749-
6750-unsigned long arch_randomize_brk(struct mm_struct *mm)
6751-{
6752- unsigned long base = mm->brk;
6753- unsigned long ret;
6754-
6755-#ifdef CONFIG_PPC_STD_MMU_64
6756- /*
6757- * If we are using 1TB segments and we are allowed to randomise
6758- * the heap, we can put it above 1TB so it is backed by a 1TB
6759- * segment. Otherwise the heap will be in the bottom 1TB
6760- * which always uses 256MB segments and this may result in a
6761- * performance penalty.
6762- */
6763- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
6764- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
6765-#endif
6766-
6767- ret = PAGE_ALIGN(base + brk_rnd());
6768-
6769- if (ret < mm->brk)
6770- return mm->brk;
6771-
6772- return ret;
6773-}
6774-
6775-unsigned long randomize_et_dyn(unsigned long base)
6776-{
6777- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
6778-
6779- if (ret < base)
6780- return base;
6781-
6782- return ret;
6783-}
6784diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
6785index f9b30c6..d72e7a3 100644
6786--- a/arch/powerpc/kernel/ptrace.c
6787+++ b/arch/powerpc/kernel/ptrace.c
6788@@ -1771,6 +1771,10 @@ long arch_ptrace(struct task_struct *child, long request,
6789 return ret;
6790 }
6791
6792+#ifdef CONFIG_GRKERNSEC_SETXID
6793+extern void gr_delayed_cred_worker(void);
6794+#endif
6795+
6796 /*
6797 * We must return the syscall number to actually look up in the table.
6798 * This can be -1L to skip running any syscall at all.
6799@@ -1781,6 +1785,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
6800
6801 secure_computing_strict(regs->gpr[0]);
6802
6803+#ifdef CONFIG_GRKERNSEC_SETXID
6804+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6805+ gr_delayed_cred_worker();
6806+#endif
6807+
6808 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
6809 tracehook_report_syscall_entry(regs))
6810 /*
6811@@ -1815,6 +1824,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
6812 {
6813 int step;
6814
6815+#ifdef CONFIG_GRKERNSEC_SETXID
6816+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6817+ gr_delayed_cred_worker();
6818+#endif
6819+
6820 audit_syscall_exit(regs);
6821
6822 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
6823diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
6824index 201385c..0f01828 100644
6825--- a/arch/powerpc/kernel/signal_32.c
6826+++ b/arch/powerpc/kernel/signal_32.c
6827@@ -976,7 +976,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
6828 /* Save user registers on the stack */
6829 frame = &rt_sf->uc.uc_mcontext;
6830 addr = frame;
6831- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
6832+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
6833 sigret = 0;
6834 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
6835 } else {
6836diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
6837index 3459473..2d40783 100644
6838--- a/arch/powerpc/kernel/signal_64.c
6839+++ b/arch/powerpc/kernel/signal_64.c
6840@@ -749,7 +749,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
6841 #endif
6842
6843 /* Set up to return from userspace. */
6844- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
6845+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
6846 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
6847 } else {
6848 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
6849diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
6850index 3ce1f86..c30e629 100644
6851--- a/arch/powerpc/kernel/sysfs.c
6852+++ b/arch/powerpc/kernel/sysfs.c
6853@@ -522,7 +522,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
6854 return NOTIFY_OK;
6855 }
6856
6857-static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
6858+static struct notifier_block sysfs_cpu_nb = {
6859 .notifier_call = sysfs_cpu_notify,
6860 };
6861
6862diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
6863index bf33ace..e836d8b 100644
6864--- a/arch/powerpc/kernel/traps.c
6865+++ b/arch/powerpc/kernel/traps.c
6866@@ -142,6 +142,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
6867 return flags;
6868 }
6869
6870+extern void gr_handle_kernel_exploit(void);
6871+
6872 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
6873 int signr)
6874 {
6875@@ -191,6 +193,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
6876 panic("Fatal exception in interrupt");
6877 if (panic_on_oops)
6878 panic("Fatal exception");
6879+
6880+ gr_handle_kernel_exploit();
6881+
6882 do_exit(signr);
6883 }
6884
6885diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
6886index 1b2076f..835e4be 100644
6887--- a/arch/powerpc/kernel/vdso.c
6888+++ b/arch/powerpc/kernel/vdso.c
6889@@ -34,6 +34,7 @@
6890 #include <asm/firmware.h>
6891 #include <asm/vdso.h>
6892 #include <asm/vdso_datapage.h>
6893+#include <asm/mman.h>
6894
6895 #include "setup.h"
6896
6897@@ -218,7 +219,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
6898 vdso_base = VDSO32_MBASE;
6899 #endif
6900
6901- current->mm->context.vdso_base = 0;
6902+ current->mm->context.vdso_base = ~0UL;
6903
6904 /* vDSO has a problem and was disabled, just don't "enable" it for the
6905 * process
6906@@ -238,7 +239,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
6907 vdso_base = get_unmapped_area(NULL, vdso_base,
6908 (vdso_pages << PAGE_SHIFT) +
6909 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
6910- 0, 0);
6911+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
6912 if (IS_ERR_VALUE(vdso_base)) {
6913 rc = vdso_base;
6914 goto fail_mmapsem;
6915diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
6916index 5eea6f3..5d10396 100644
6917--- a/arch/powerpc/lib/usercopy_64.c
6918+++ b/arch/powerpc/lib/usercopy_64.c
6919@@ -9,22 +9,6 @@
6920 #include <linux/module.h>
6921 #include <asm/uaccess.h>
6922
6923-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
6924-{
6925- if (likely(access_ok(VERIFY_READ, from, n)))
6926- n = __copy_from_user(to, from, n);
6927- else
6928- memset(to, 0, n);
6929- return n;
6930-}
6931-
6932-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
6933-{
6934- if (likely(access_ok(VERIFY_WRITE, to, n)))
6935- n = __copy_to_user(to, from, n);
6936- return n;
6937-}
6938-
6939 unsigned long copy_in_user(void __user *to, const void __user *from,
6940 unsigned long n)
6941 {
6942@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
6943 return n;
6944 }
6945
6946-EXPORT_SYMBOL(copy_from_user);
6947-EXPORT_SYMBOL(copy_to_user);
6948 EXPORT_SYMBOL(copy_in_user);
6949
6950diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
6951index 229951f..cdeca42 100644
6952--- a/arch/powerpc/mm/fault.c
6953+++ b/arch/powerpc/mm/fault.c
6954@@ -32,6 +32,10 @@
6955 #include <linux/perf_event.h>
6956 #include <linux/magic.h>
6957 #include <linux/ratelimit.h>
6958+#include <linux/slab.h>
6959+#include <linux/pagemap.h>
6960+#include <linux/compiler.h>
6961+#include <linux/unistd.h>
6962
6963 #include <asm/firmware.h>
6964 #include <asm/page.h>
6965@@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
6966 }
6967 #endif
6968
6969+#ifdef CONFIG_PAX_PAGEEXEC
6970+/*
6971+ * PaX: decide what to do with offenders (regs->nip = fault address)
6972+ *
6973+ * returns 1 when task should be killed
6974+ */
6975+static int pax_handle_fetch_fault(struct pt_regs *regs)
6976+{
6977+ return 1;
6978+}
6979+
6980+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6981+{
6982+ unsigned long i;
6983+
6984+ printk(KERN_ERR "PAX: bytes at PC: ");
6985+ for (i = 0; i < 5; i++) {
6986+ unsigned int c;
6987+ if (get_user(c, (unsigned int __user *)pc+i))
6988+ printk(KERN_CONT "???????? ");
6989+ else
6990+ printk(KERN_CONT "%08x ", c);
6991+ }
6992+ printk("\n");
6993+}
6994+#endif
6995+
6996 /*
6997 * Check whether the instruction at regs->nip is a store using
6998 * an update addressing form which will update r1.
6999@@ -213,7 +244,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
7000 * indicate errors in DSISR but can validly be set in SRR1.
7001 */
7002 if (trap == 0x400)
7003- error_code &= 0x48200000;
7004+ error_code &= 0x58200000;
7005 else
7006 is_write = error_code & DSISR_ISSTORE;
7007 #else
7008@@ -364,7 +395,7 @@ good_area:
7009 * "undefined". Of those that can be set, this is the only
7010 * one which seems bad.
7011 */
7012- if (error_code & 0x10000000)
7013+ if (error_code & DSISR_GUARDED)
7014 /* Guarded storage error. */
7015 goto bad_area;
7016 #endif /* CONFIG_8xx */
7017@@ -379,7 +410,7 @@ good_area:
7018 * processors use the same I/D cache coherency mechanism
7019 * as embedded.
7020 */
7021- if (error_code & DSISR_PROTFAULT)
7022+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
7023 goto bad_area;
7024 #endif /* CONFIG_PPC_STD_MMU */
7025
7026@@ -462,6 +493,23 @@ bad_area:
7027 bad_area_nosemaphore:
7028 /* User mode accesses cause a SIGSEGV */
7029 if (user_mode(regs)) {
7030+
7031+#ifdef CONFIG_PAX_PAGEEXEC
7032+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
7033+#ifdef CONFIG_PPC_STD_MMU
7034+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
7035+#else
7036+ if (is_exec && regs->nip == address) {
7037+#endif
7038+ switch (pax_handle_fetch_fault(regs)) {
7039+ }
7040+
7041+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
7042+ do_group_exit(SIGKILL);
7043+ }
7044+ }
7045+#endif
7046+
7047 _exception(SIGSEGV, regs, code, address);
7048 return 0;
7049 }
7050diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
7051index 67a42ed..cd463e0 100644
7052--- a/arch/powerpc/mm/mmap_64.c
7053+++ b/arch/powerpc/mm/mmap_64.c
7054@@ -57,6 +57,10 @@ static unsigned long mmap_rnd(void)
7055 {
7056 unsigned long rnd = 0;
7057
7058+#ifdef CONFIG_PAX_RANDMMAP
7059+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7060+#endif
7061+
7062 if (current->flags & PF_RANDOMIZE) {
7063 /* 8MB for 32bit, 1GB for 64bit */
7064 if (is_32bit_task())
7065@@ -91,10 +95,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7066 */
7067 if (mmap_is_legacy()) {
7068 mm->mmap_base = TASK_UNMAPPED_BASE;
7069+
7070+#ifdef CONFIG_PAX_RANDMMAP
7071+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7072+ mm->mmap_base += mm->delta_mmap;
7073+#endif
7074+
7075 mm->get_unmapped_area = arch_get_unmapped_area;
7076 mm->unmap_area = arch_unmap_area;
7077 } else {
7078 mm->mmap_base = mmap_base();
7079+
7080+#ifdef CONFIG_PAX_RANDMMAP
7081+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7082+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7083+#endif
7084+
7085 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7086 mm->unmap_area = arch_unmap_area_topdown;
7087 }
7088diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
7089index e779642..e5bb889 100644
7090--- a/arch/powerpc/mm/mmu_context_nohash.c
7091+++ b/arch/powerpc/mm/mmu_context_nohash.c
7092@@ -363,7 +363,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
7093 return NOTIFY_OK;
7094 }
7095
7096-static struct notifier_block __cpuinitdata mmu_context_cpu_nb = {
7097+static struct notifier_block mmu_context_cpu_nb = {
7098 .notifier_call = mmu_context_cpu_notify,
7099 };
7100
7101diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
7102index 6a252c4..3024d81 100644
7103--- a/arch/powerpc/mm/numa.c
7104+++ b/arch/powerpc/mm/numa.c
7105@@ -932,7 +932,7 @@ static void __init *careful_zallocation(int nid, unsigned long size,
7106 return ret;
7107 }
7108
7109-static struct notifier_block __cpuinitdata ppc64_numa_nb = {
7110+static struct notifier_block ppc64_numa_nb = {
7111 .notifier_call = cpu_numa_callback,
7112 .priority = 1 /* Must run before sched domains notifier. */
7113 };
7114diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
7115index cf9dada..241529f 100644
7116--- a/arch/powerpc/mm/slice.c
7117+++ b/arch/powerpc/mm/slice.c
7118@@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
7119 if ((mm->task_size - len) < addr)
7120 return 0;
7121 vma = find_vma(mm, addr);
7122- return (!vma || (addr + len) <= vma->vm_start);
7123+ return check_heap_stack_gap(vma, addr, len, 0);
7124 }
7125
7126 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
7127@@ -272,7 +272,7 @@ full_search:
7128 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
7129 continue;
7130 }
7131- if (!vma || addr + len <= vma->vm_start) {
7132+ if (check_heap_stack_gap(vma, addr, len, 0)) {
7133 /*
7134 * Remember the place where we stopped the search:
7135 */
7136@@ -329,10 +329,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
7137 }
7138 }
7139
7140- addr = mm->mmap_base;
7141- while (addr > len) {
7142+ if (mm->mmap_base < len)
7143+ addr = -ENOMEM;
7144+ else
7145+ addr = mm->mmap_base - len;
7146+
7147+ while (!IS_ERR_VALUE(addr)) {
7148 /* Go down by chunk size */
7149- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
7150+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
7151
7152 /* Check for hit with different page size */
7153 mask = slice_range_to_mask(addr, len);
7154@@ -352,7 +356,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
7155 * return with success:
7156 */
7157 vma = find_vma(mm, addr);
7158- if (!vma || (addr + len) <= vma->vm_start) {
7159+ if (check_heap_stack_gap(vma, addr, len, 0)) {
7160 /* remember the address as a hint for next time */
7161 if (use_cache)
7162 mm->free_area_cache = addr;
7163@@ -364,7 +368,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
7164 mm->cached_hole_size = vma->vm_start - addr;
7165
7166 /* try just below the current vma->vm_start */
7167- addr = vma->vm_start;
7168+ addr = skip_heap_stack_gap(vma, len, 0);
7169 }
7170
7171 /*
7172@@ -442,6 +446,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
7173 if (fixed && addr > (mm->task_size - len))
7174 return -EINVAL;
7175
7176+#ifdef CONFIG_PAX_RANDMMAP
7177+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
7178+ addr = 0;
7179+#endif
7180+
7181 /* If hint, make sure it matches our alignment restrictions */
7182 if (!fixed && addr) {
7183 addr = _ALIGN_UP(addr, 1ul << pshift);
7184diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
7185index 68c57d3..1fdcfb2 100644
7186--- a/arch/powerpc/platforms/cell/spufs/file.c
7187+++ b/arch/powerpc/platforms/cell/spufs/file.c
7188@@ -281,9 +281,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
7189 return VM_FAULT_NOPAGE;
7190 }
7191
7192-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
7193+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
7194 unsigned long address,
7195- void *buf, int len, int write)
7196+ void *buf, size_t len, int write)
7197 {
7198 struct spu_context *ctx = vma->vm_file->private_data;
7199 unsigned long offset = address - vma->vm_start;
7200diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
7201index bdb738a..49c9f95 100644
7202--- a/arch/powerpc/platforms/powermac/smp.c
7203+++ b/arch/powerpc/platforms/powermac/smp.c
7204@@ -885,7 +885,7 @@ static int smp_core99_cpu_notify(struct notifier_block *self,
7205 return NOTIFY_OK;
7206 }
7207
7208-static struct notifier_block __cpuinitdata smp_core99_cpu_nb = {
7209+static struct notifier_block smp_core99_cpu_nb = {
7210 .notifier_call = smp_core99_cpu_notify,
7211 };
7212 #endif /* CONFIG_HOTPLUG_CPU */
7213diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
7214index c797832..ce575c8 100644
7215--- a/arch/s390/include/asm/atomic.h
7216+++ b/arch/s390/include/asm/atomic.h
7217@@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
7218 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
7219 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
7220
7221+#define atomic64_read_unchecked(v) atomic64_read(v)
7222+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7223+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7224+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7225+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7226+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7227+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7228+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7229+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7230+
7231 #define smp_mb__before_atomic_dec() smp_mb()
7232 #define smp_mb__after_atomic_dec() smp_mb()
7233 #define smp_mb__before_atomic_inc() smp_mb()
7234diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
7235index 4d7ccac..d03d0ad 100644
7236--- a/arch/s390/include/asm/cache.h
7237+++ b/arch/s390/include/asm/cache.h
7238@@ -9,8 +9,10 @@
7239 #ifndef __ARCH_S390_CACHE_H
7240 #define __ARCH_S390_CACHE_H
7241
7242-#define L1_CACHE_BYTES 256
7243+#include <linux/const.h>
7244+
7245 #define L1_CACHE_SHIFT 8
7246+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7247 #define NET_SKB_PAD 32
7248
7249 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
7250diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
7251index 1bfdf24..9c9ab2e 100644
7252--- a/arch/s390/include/asm/elf.h
7253+++ b/arch/s390/include/asm/elf.h
7254@@ -160,8 +160,14 @@ extern unsigned int vdso_enabled;
7255 the loader. We need to make sure that it is out of the way of the program
7256 that it will "exec", and that there is sufficient room for the brk. */
7257
7258-extern unsigned long randomize_et_dyn(unsigned long base);
7259-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
7260+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
7261+
7262+#ifdef CONFIG_PAX_ASLR
7263+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
7264+
7265+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
7266+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
7267+#endif
7268
7269 /* This yields a mask that user programs can use to figure out what
7270 instruction set this CPU supports. */
7271@@ -207,9 +213,6 @@ struct linux_binprm;
7272 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
7273 int arch_setup_additional_pages(struct linux_binprm *, int);
7274
7275-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7276-#define arch_randomize_brk arch_randomize_brk
7277-
7278 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
7279
7280 #endif
7281diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
7282index c4a93d6..4d2a9b4 100644
7283--- a/arch/s390/include/asm/exec.h
7284+++ b/arch/s390/include/asm/exec.h
7285@@ -7,6 +7,6 @@
7286 #ifndef __ASM_EXEC_H
7287 #define __ASM_EXEC_H
7288
7289-extern unsigned long arch_align_stack(unsigned long sp);
7290+#define arch_align_stack(x) ((x) & ~0xfUL)
7291
7292 #endif /* __ASM_EXEC_H */
7293diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
7294index 9c33ed4..e40cbef 100644
7295--- a/arch/s390/include/asm/uaccess.h
7296+++ b/arch/s390/include/asm/uaccess.h
7297@@ -252,6 +252,10 @@ static inline unsigned long __must_check
7298 copy_to_user(void __user *to, const void *from, unsigned long n)
7299 {
7300 might_fault();
7301+
7302+ if ((long)n < 0)
7303+ return n;
7304+
7305 return __copy_to_user(to, from, n);
7306 }
7307
7308@@ -275,6 +279,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
7309 static inline unsigned long __must_check
7310 __copy_from_user(void *to, const void __user *from, unsigned long n)
7311 {
7312+ if ((long)n < 0)
7313+ return n;
7314+
7315 if (__builtin_constant_p(n) && (n <= 256))
7316 return uaccess.copy_from_user_small(n, from, to);
7317 else
7318@@ -306,10 +313,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
7319 static inline unsigned long __must_check
7320 copy_from_user(void *to, const void __user *from, unsigned long n)
7321 {
7322- unsigned int sz = __compiletime_object_size(to);
7323+ size_t sz = __compiletime_object_size(to);
7324
7325 might_fault();
7326- if (unlikely(sz != -1 && sz < n)) {
7327+
7328+ if ((long)n < 0)
7329+ return n;
7330+
7331+ if (unlikely(sz != (size_t)-1 && sz < n)) {
7332 copy_from_user_overflow();
7333 return n;
7334 }
7335diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
7336index 7845e15..59c4353 100644
7337--- a/arch/s390/kernel/module.c
7338+++ b/arch/s390/kernel/module.c
7339@@ -169,11 +169,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
7340
7341 /* Increase core size by size of got & plt and set start
7342 offsets for got and plt. */
7343- me->core_size = ALIGN(me->core_size, 4);
7344- me->arch.got_offset = me->core_size;
7345- me->core_size += me->arch.got_size;
7346- me->arch.plt_offset = me->core_size;
7347- me->core_size += me->arch.plt_size;
7348+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
7349+ me->arch.got_offset = me->core_size_rw;
7350+ me->core_size_rw += me->arch.got_size;
7351+ me->arch.plt_offset = me->core_size_rx;
7352+ me->core_size_rx += me->arch.plt_size;
7353 return 0;
7354 }
7355
7356@@ -289,7 +289,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7357 if (info->got_initialized == 0) {
7358 Elf_Addr *gotent;
7359
7360- gotent = me->module_core + me->arch.got_offset +
7361+ gotent = me->module_core_rw + me->arch.got_offset +
7362 info->got_offset;
7363 *gotent = val;
7364 info->got_initialized = 1;
7365@@ -312,7 +312,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7366 rc = apply_rela_bits(loc, val, 0, 64, 0);
7367 else if (r_type == R_390_GOTENT ||
7368 r_type == R_390_GOTPLTENT) {
7369- val += (Elf_Addr) me->module_core - loc;
7370+ val += (Elf_Addr) me->module_core_rw - loc;
7371 rc = apply_rela_bits(loc, val, 1, 32, 1);
7372 }
7373 break;
7374@@ -325,7 +325,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7375 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
7376 if (info->plt_initialized == 0) {
7377 unsigned int *ip;
7378- ip = me->module_core + me->arch.plt_offset +
7379+ ip = me->module_core_rx + me->arch.plt_offset +
7380 info->plt_offset;
7381 #ifndef CONFIG_64BIT
7382 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
7383@@ -350,7 +350,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7384 val - loc + 0xffffUL < 0x1ffffeUL) ||
7385 (r_type == R_390_PLT32DBL &&
7386 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
7387- val = (Elf_Addr) me->module_core +
7388+ val = (Elf_Addr) me->module_core_rx +
7389 me->arch.plt_offset +
7390 info->plt_offset;
7391 val += rela->r_addend - loc;
7392@@ -372,7 +372,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7393 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
7394 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
7395 val = val + rela->r_addend -
7396- ((Elf_Addr) me->module_core + me->arch.got_offset);
7397+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
7398 if (r_type == R_390_GOTOFF16)
7399 rc = apply_rela_bits(loc, val, 0, 16, 0);
7400 else if (r_type == R_390_GOTOFF32)
7401@@ -382,7 +382,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7402 break;
7403 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
7404 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
7405- val = (Elf_Addr) me->module_core + me->arch.got_offset +
7406+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
7407 rela->r_addend - loc;
7408 if (r_type == R_390_GOTPC)
7409 rc = apply_rela_bits(loc, val, 1, 32, 0);
7410diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
7411index 536d645..4a5bd9e 100644
7412--- a/arch/s390/kernel/process.c
7413+++ b/arch/s390/kernel/process.c
7414@@ -250,39 +250,3 @@ unsigned long get_wchan(struct task_struct *p)
7415 }
7416 return 0;
7417 }
7418-
7419-unsigned long arch_align_stack(unsigned long sp)
7420-{
7421- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7422- sp -= get_random_int() & ~PAGE_MASK;
7423- return sp & ~0xf;
7424-}
7425-
7426-static inline unsigned long brk_rnd(void)
7427-{
7428- /* 8MB for 32bit, 1GB for 64bit */
7429- if (is_32bit_task())
7430- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
7431- else
7432- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
7433-}
7434-
7435-unsigned long arch_randomize_brk(struct mm_struct *mm)
7436-{
7437- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
7438-
7439- if (ret < mm->brk)
7440- return mm->brk;
7441- return ret;
7442-}
7443-
7444-unsigned long randomize_et_dyn(unsigned long base)
7445-{
7446- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
7447-
7448- if (!(current->flags & PF_RANDOMIZE))
7449- return base;
7450- if (ret < base)
7451- return base;
7452- return ret;
7453-}
7454diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
7455index 06bafec..2bca531 100644
7456--- a/arch/s390/mm/mmap.c
7457+++ b/arch/s390/mm/mmap.c
7458@@ -90,10 +90,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7459 */
7460 if (mmap_is_legacy()) {
7461 mm->mmap_base = TASK_UNMAPPED_BASE;
7462+
7463+#ifdef CONFIG_PAX_RANDMMAP
7464+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7465+ mm->mmap_base += mm->delta_mmap;
7466+#endif
7467+
7468 mm->get_unmapped_area = arch_get_unmapped_area;
7469 mm->unmap_area = arch_unmap_area;
7470 } else {
7471 mm->mmap_base = mmap_base();
7472+
7473+#ifdef CONFIG_PAX_RANDMMAP
7474+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7475+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7476+#endif
7477+
7478 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7479 mm->unmap_area = arch_unmap_area_topdown;
7480 }
7481@@ -175,10 +187,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7482 */
7483 if (mmap_is_legacy()) {
7484 mm->mmap_base = TASK_UNMAPPED_BASE;
7485+
7486+#ifdef CONFIG_PAX_RANDMMAP
7487+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7488+ mm->mmap_base += mm->delta_mmap;
7489+#endif
7490+
7491 mm->get_unmapped_area = s390_get_unmapped_area;
7492 mm->unmap_area = arch_unmap_area;
7493 } else {
7494 mm->mmap_base = mmap_base();
7495+
7496+#ifdef CONFIG_PAX_RANDMMAP
7497+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7498+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7499+#endif
7500+
7501 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
7502 mm->unmap_area = arch_unmap_area_topdown;
7503 }
7504diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
7505index ae3d59f..f65f075 100644
7506--- a/arch/score/include/asm/cache.h
7507+++ b/arch/score/include/asm/cache.h
7508@@ -1,7 +1,9 @@
7509 #ifndef _ASM_SCORE_CACHE_H
7510 #define _ASM_SCORE_CACHE_H
7511
7512+#include <linux/const.h>
7513+
7514 #define L1_CACHE_SHIFT 4
7515-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7516+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7517
7518 #endif /* _ASM_SCORE_CACHE_H */
7519diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
7520index f9f3cd5..58ff438 100644
7521--- a/arch/score/include/asm/exec.h
7522+++ b/arch/score/include/asm/exec.h
7523@@ -1,6 +1,6 @@
7524 #ifndef _ASM_SCORE_EXEC_H
7525 #define _ASM_SCORE_EXEC_H
7526
7527-extern unsigned long arch_align_stack(unsigned long sp);
7528+#define arch_align_stack(x) (x)
7529
7530 #endif /* _ASM_SCORE_EXEC_H */
7531diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
7532index 7956846..5f37677 100644
7533--- a/arch/score/kernel/process.c
7534+++ b/arch/score/kernel/process.c
7535@@ -134,8 +134,3 @@ unsigned long get_wchan(struct task_struct *task)
7536
7537 return task_pt_regs(task)->cp0_epc;
7538 }
7539-
7540-unsigned long arch_align_stack(unsigned long sp)
7541-{
7542- return sp;
7543-}
7544diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
7545index ef9e555..331bd29 100644
7546--- a/arch/sh/include/asm/cache.h
7547+++ b/arch/sh/include/asm/cache.h
7548@@ -9,10 +9,11 @@
7549 #define __ASM_SH_CACHE_H
7550 #ifdef __KERNEL__
7551
7552+#include <linux/const.h>
7553 #include <linux/init.h>
7554 #include <cpu/cache.h>
7555
7556-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7557+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7558
7559 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
7560
7561diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
7562index 03f2b55..b0270327 100644
7563--- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
7564+++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
7565@@ -143,7 +143,7 @@ shx3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
7566 return NOTIFY_OK;
7567 }
7568
7569-static struct notifier_block __cpuinitdata shx3_cpu_notifier = {
7570+static struct notifier_block shx3_cpu_notifier = {
7571 .notifier_call = shx3_cpu_callback,
7572 };
7573
7574diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
7575index 6777177..cb5e44f 100644
7576--- a/arch/sh/mm/mmap.c
7577+++ b/arch/sh/mm/mmap.c
7578@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7579 struct mm_struct *mm = current->mm;
7580 struct vm_area_struct *vma;
7581 int do_colour_align;
7582+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
7583 struct vm_unmapped_area_info info;
7584
7585 if (flags & MAP_FIXED) {
7586@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7587 if (filp || (flags & MAP_SHARED))
7588 do_colour_align = 1;
7589
7590+#ifdef CONFIG_PAX_RANDMMAP
7591+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7592+#endif
7593+
7594 if (addr) {
7595 if (do_colour_align)
7596 addr = COLOUR_ALIGN(addr, pgoff);
7597@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7598 addr = PAGE_ALIGN(addr);
7599
7600 vma = find_vma(mm, addr);
7601- if (TASK_SIZE - len >= addr &&
7602- (!vma || addr + len <= vma->vm_start))
7603+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
7604 return addr;
7605 }
7606
7607 info.flags = 0;
7608 info.length = len;
7609- info.low_limit = TASK_UNMAPPED_BASE;
7610+ info.low_limit = mm->mmap_base;
7611 info.high_limit = TASK_SIZE;
7612 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
7613 info.align_offset = pgoff << PAGE_SHIFT;
7614@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7615 struct mm_struct *mm = current->mm;
7616 unsigned long addr = addr0;
7617 int do_colour_align;
7618+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
7619 struct vm_unmapped_area_info info;
7620
7621 if (flags & MAP_FIXED) {
7622@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7623 if (filp || (flags & MAP_SHARED))
7624 do_colour_align = 1;
7625
7626+#ifdef CONFIG_PAX_RANDMMAP
7627+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7628+#endif
7629+
7630 /* requesting a specific address */
7631 if (addr) {
7632 if (do_colour_align)
7633@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7634 addr = PAGE_ALIGN(addr);
7635
7636 vma = find_vma(mm, addr);
7637- if (TASK_SIZE - len >= addr &&
7638- (!vma || addr + len <= vma->vm_start))
7639+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
7640 return addr;
7641 }
7642
7643@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7644 VM_BUG_ON(addr != -ENOMEM);
7645 info.flags = 0;
7646 info.low_limit = TASK_UNMAPPED_BASE;
7647+
7648+#ifdef CONFIG_PAX_RANDMMAP
7649+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7650+ info.low_limit += mm->delta_mmap;
7651+#endif
7652+
7653 info.high_limit = TASK_SIZE;
7654 addr = vm_unmapped_area(&info);
7655 }
7656diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
7657index be56a24..443328f 100644
7658--- a/arch/sparc/include/asm/atomic_64.h
7659+++ b/arch/sparc/include/asm/atomic_64.h
7660@@ -14,18 +14,40 @@
7661 #define ATOMIC64_INIT(i) { (i) }
7662
7663 #define atomic_read(v) (*(volatile int *)&(v)->counter)
7664+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7665+{
7666+ return v->counter;
7667+}
7668 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
7669+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7670+{
7671+ return v->counter;
7672+}
7673
7674 #define atomic_set(v, i) (((v)->counter) = i)
7675+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7676+{
7677+ v->counter = i;
7678+}
7679 #define atomic64_set(v, i) (((v)->counter) = i)
7680+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7681+{
7682+ v->counter = i;
7683+}
7684
7685 extern void atomic_add(int, atomic_t *);
7686+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
7687 extern void atomic64_add(long, atomic64_t *);
7688+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
7689 extern void atomic_sub(int, atomic_t *);
7690+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
7691 extern void atomic64_sub(long, atomic64_t *);
7692+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
7693
7694 extern int atomic_add_ret(int, atomic_t *);
7695+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
7696 extern long atomic64_add_ret(long, atomic64_t *);
7697+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
7698 extern int atomic_sub_ret(int, atomic_t *);
7699 extern long atomic64_sub_ret(long, atomic64_t *);
7700
7701@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
7702 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
7703
7704 #define atomic_inc_return(v) atomic_add_ret(1, v)
7705+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7706+{
7707+ return atomic_add_ret_unchecked(1, v);
7708+}
7709 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
7710+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7711+{
7712+ return atomic64_add_ret_unchecked(1, v);
7713+}
7714
7715 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
7716 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
7717
7718 #define atomic_add_return(i, v) atomic_add_ret(i, v)
7719+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7720+{
7721+ return atomic_add_ret_unchecked(i, v);
7722+}
7723 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
7724+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
7725+{
7726+ return atomic64_add_ret_unchecked(i, v);
7727+}
7728
7729 /*
7730 * atomic_inc_and_test - increment and test
7731@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
7732 * other cases.
7733 */
7734 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
7735+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7736+{
7737+ return atomic_inc_return_unchecked(v) == 0;
7738+}
7739 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
7740
7741 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
7742@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
7743 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
7744
7745 #define atomic_inc(v) atomic_add(1, v)
7746+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7747+{
7748+ atomic_add_unchecked(1, v);
7749+}
7750 #define atomic64_inc(v) atomic64_add(1, v)
7751+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7752+{
7753+ atomic64_add_unchecked(1, v);
7754+}
7755
7756 #define atomic_dec(v) atomic_sub(1, v)
7757+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7758+{
7759+ atomic_sub_unchecked(1, v);
7760+}
7761 #define atomic64_dec(v) atomic64_sub(1, v)
7762+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7763+{
7764+ atomic64_sub_unchecked(1, v);
7765+}
7766
7767 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
7768 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
7769
7770 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
7771+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7772+{
7773+ return cmpxchg(&v->counter, old, new);
7774+}
7775 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
7776+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7777+{
7778+ return xchg(&v->counter, new);
7779+}
7780
7781 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7782 {
7783- int c, old;
7784+ int c, old, new;
7785 c = atomic_read(v);
7786 for (;;) {
7787- if (unlikely(c == (u)))
7788+ if (unlikely(c == u))
7789 break;
7790- old = atomic_cmpxchg((v), c, c + (a));
7791+
7792+ asm volatile("addcc %2, %0, %0\n"
7793+
7794+#ifdef CONFIG_PAX_REFCOUNT
7795+ "tvs %%icc, 6\n"
7796+#endif
7797+
7798+ : "=r" (new)
7799+ : "0" (c), "ir" (a)
7800+ : "cc");
7801+
7802+ old = atomic_cmpxchg(v, c, new);
7803 if (likely(old == c))
7804 break;
7805 c = old;
7806@@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7807 #define atomic64_cmpxchg(v, o, n) \
7808 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
7809 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
7810+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
7811+{
7812+ return xchg(&v->counter, new);
7813+}
7814
7815 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
7816 {
7817- long c, old;
7818+ long c, old, new;
7819 c = atomic64_read(v);
7820 for (;;) {
7821- if (unlikely(c == (u)))
7822+ if (unlikely(c == u))
7823 break;
7824- old = atomic64_cmpxchg((v), c, c + (a));
7825+
7826+ asm volatile("addcc %2, %0, %0\n"
7827+
7828+#ifdef CONFIG_PAX_REFCOUNT
7829+ "tvs %%xcc, 6\n"
7830+#endif
7831+
7832+ : "=r" (new)
7833+ : "0" (c), "ir" (a)
7834+ : "cc");
7835+
7836+ old = atomic64_cmpxchg(v, c, new);
7837 if (likely(old == c))
7838 break;
7839 c = old;
7840 }
7841- return c != (u);
7842+ return c != u;
7843 }
7844
7845 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
7846diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
7847index 5bb6991..5c2132e 100644
7848--- a/arch/sparc/include/asm/cache.h
7849+++ b/arch/sparc/include/asm/cache.h
7850@@ -7,10 +7,12 @@
7851 #ifndef _SPARC_CACHE_H
7852 #define _SPARC_CACHE_H
7853
7854+#include <linux/const.h>
7855+
7856 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
7857
7858 #define L1_CACHE_SHIFT 5
7859-#define L1_CACHE_BYTES 32
7860+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7861
7862 #ifdef CONFIG_SPARC32
7863 #define SMP_CACHE_BYTES_SHIFT 5
7864diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
7865index a24e41f..47677ff 100644
7866--- a/arch/sparc/include/asm/elf_32.h
7867+++ b/arch/sparc/include/asm/elf_32.h
7868@@ -114,6 +114,13 @@ typedef struct {
7869
7870 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
7871
7872+#ifdef CONFIG_PAX_ASLR
7873+#define PAX_ELF_ET_DYN_BASE 0x10000UL
7874+
7875+#define PAX_DELTA_MMAP_LEN 16
7876+#define PAX_DELTA_STACK_LEN 16
7877+#endif
7878+
7879 /* This yields a mask that user programs can use to figure out what
7880 instruction set this cpu supports. This can NOT be done in userspace
7881 on Sparc. */
7882diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
7883index 370ca1e..d4f4a98 100644
7884--- a/arch/sparc/include/asm/elf_64.h
7885+++ b/arch/sparc/include/asm/elf_64.h
7886@@ -189,6 +189,13 @@ typedef struct {
7887 #define ELF_ET_DYN_BASE 0x0000010000000000UL
7888 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
7889
7890+#ifdef CONFIG_PAX_ASLR
7891+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
7892+
7893+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
7894+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
7895+#endif
7896+
7897 extern unsigned long sparc64_elf_hwcap;
7898 #define ELF_HWCAP sparc64_elf_hwcap
7899
7900diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
7901index 9b1c36d..209298b 100644
7902--- a/arch/sparc/include/asm/pgalloc_32.h
7903+++ b/arch/sparc/include/asm/pgalloc_32.h
7904@@ -33,6 +33,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
7905 }
7906
7907 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
7908+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
7909
7910 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
7911 unsigned long address)
7912diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
7913index bcfe063..b333142 100644
7914--- a/arch/sparc/include/asm/pgalloc_64.h
7915+++ b/arch/sparc/include/asm/pgalloc_64.h
7916@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
7917 }
7918
7919 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
7920+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
7921
7922 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
7923 {
7924diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
7925index 6fc1348..390c50a 100644
7926--- a/arch/sparc/include/asm/pgtable_32.h
7927+++ b/arch/sparc/include/asm/pgtable_32.h
7928@@ -50,6 +50,9 @@ extern unsigned long calc_highpages(void);
7929 #define PAGE_SHARED SRMMU_PAGE_SHARED
7930 #define PAGE_COPY SRMMU_PAGE_COPY
7931 #define PAGE_READONLY SRMMU_PAGE_RDONLY
7932+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
7933+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
7934+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
7935 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
7936
7937 /* Top-level page directory - dummy used by init-mm.
7938@@ -62,18 +65,18 @@ extern unsigned long ptr_in_current_pgd;
7939
7940 /* xwr */
7941 #define __P000 PAGE_NONE
7942-#define __P001 PAGE_READONLY
7943-#define __P010 PAGE_COPY
7944-#define __P011 PAGE_COPY
7945+#define __P001 PAGE_READONLY_NOEXEC
7946+#define __P010 PAGE_COPY_NOEXEC
7947+#define __P011 PAGE_COPY_NOEXEC
7948 #define __P100 PAGE_READONLY
7949 #define __P101 PAGE_READONLY
7950 #define __P110 PAGE_COPY
7951 #define __P111 PAGE_COPY
7952
7953 #define __S000 PAGE_NONE
7954-#define __S001 PAGE_READONLY
7955-#define __S010 PAGE_SHARED
7956-#define __S011 PAGE_SHARED
7957+#define __S001 PAGE_READONLY_NOEXEC
7958+#define __S010 PAGE_SHARED_NOEXEC
7959+#define __S011 PAGE_SHARED_NOEXEC
7960 #define __S100 PAGE_READONLY
7961 #define __S101 PAGE_READONLY
7962 #define __S110 PAGE_SHARED
7963diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
7964index 79da178..c2eede8 100644
7965--- a/arch/sparc/include/asm/pgtsrmmu.h
7966+++ b/arch/sparc/include/asm/pgtsrmmu.h
7967@@ -115,6 +115,11 @@
7968 SRMMU_EXEC | SRMMU_REF)
7969 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
7970 SRMMU_EXEC | SRMMU_REF)
7971+
7972+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
7973+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
7974+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
7975+
7976 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
7977 SRMMU_DIRTY | SRMMU_REF)
7978
7979diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
7980index 9689176..63c18ea 100644
7981--- a/arch/sparc/include/asm/spinlock_64.h
7982+++ b/arch/sparc/include/asm/spinlock_64.h
7983@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
7984
7985 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
7986
7987-static void inline arch_read_lock(arch_rwlock_t *lock)
7988+static inline void arch_read_lock(arch_rwlock_t *lock)
7989 {
7990 unsigned long tmp1, tmp2;
7991
7992 __asm__ __volatile__ (
7993 "1: ldsw [%2], %0\n"
7994 " brlz,pn %0, 2f\n"
7995-"4: add %0, 1, %1\n"
7996+"4: addcc %0, 1, %1\n"
7997+
7998+#ifdef CONFIG_PAX_REFCOUNT
7999+" tvs %%icc, 6\n"
8000+#endif
8001+
8002 " cas [%2], %0, %1\n"
8003 " cmp %0, %1\n"
8004 " bne,pn %%icc, 1b\n"
8005@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
8006 " .previous"
8007 : "=&r" (tmp1), "=&r" (tmp2)
8008 : "r" (lock)
8009- : "memory");
8010+ : "memory", "cc");
8011 }
8012
8013-static int inline arch_read_trylock(arch_rwlock_t *lock)
8014+static inline int arch_read_trylock(arch_rwlock_t *lock)
8015 {
8016 int tmp1, tmp2;
8017
8018@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
8019 "1: ldsw [%2], %0\n"
8020 " brlz,a,pn %0, 2f\n"
8021 " mov 0, %0\n"
8022-" add %0, 1, %1\n"
8023+" addcc %0, 1, %1\n"
8024+
8025+#ifdef CONFIG_PAX_REFCOUNT
8026+" tvs %%icc, 6\n"
8027+#endif
8028+
8029 " cas [%2], %0, %1\n"
8030 " cmp %0, %1\n"
8031 " bne,pn %%icc, 1b\n"
8032@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
8033 return tmp1;
8034 }
8035
8036-static void inline arch_read_unlock(arch_rwlock_t *lock)
8037+static inline void arch_read_unlock(arch_rwlock_t *lock)
8038 {
8039 unsigned long tmp1, tmp2;
8040
8041 __asm__ __volatile__(
8042 "1: lduw [%2], %0\n"
8043-" sub %0, 1, %1\n"
8044+" subcc %0, 1, %1\n"
8045+
8046+#ifdef CONFIG_PAX_REFCOUNT
8047+" tvs %%icc, 6\n"
8048+#endif
8049+
8050 " cas [%2], %0, %1\n"
8051 " cmp %0, %1\n"
8052 " bne,pn %%xcc, 1b\n"
8053@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
8054 : "memory");
8055 }
8056
8057-static void inline arch_write_lock(arch_rwlock_t *lock)
8058+static inline void arch_write_lock(arch_rwlock_t *lock)
8059 {
8060 unsigned long mask, tmp1, tmp2;
8061
8062@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
8063 : "memory");
8064 }
8065
8066-static void inline arch_write_unlock(arch_rwlock_t *lock)
8067+static inline void arch_write_unlock(arch_rwlock_t *lock)
8068 {
8069 __asm__ __volatile__(
8070 " stw %%g0, [%0]"
8071@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
8072 : "memory");
8073 }
8074
8075-static int inline arch_write_trylock(arch_rwlock_t *lock)
8076+static inline int arch_write_trylock(arch_rwlock_t *lock)
8077 {
8078 unsigned long mask, tmp1, tmp2, result;
8079
8080diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
8081index 25849ae..924c54b 100644
8082--- a/arch/sparc/include/asm/thread_info_32.h
8083+++ b/arch/sparc/include/asm/thread_info_32.h
8084@@ -49,6 +49,8 @@ struct thread_info {
8085 unsigned long w_saved;
8086
8087 struct restart_block restart_block;
8088+
8089+ unsigned long lowest_stack;
8090 };
8091
8092 /*
8093diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
8094index 269bd92..e46a9b8 100644
8095--- a/arch/sparc/include/asm/thread_info_64.h
8096+++ b/arch/sparc/include/asm/thread_info_64.h
8097@@ -63,6 +63,8 @@ struct thread_info {
8098 struct pt_regs *kern_una_regs;
8099 unsigned int kern_una_insn;
8100
8101+ unsigned long lowest_stack;
8102+
8103 unsigned long fpregs[0] __attribute__ ((aligned(64)));
8104 };
8105
8106@@ -192,10 +194,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
8107 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
8108 /* flag bit 6 is available */
8109 #define TIF_32BIT 7 /* 32-bit binary */
8110-/* flag bit 8 is available */
8111+#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */
8112 #define TIF_SECCOMP 9 /* secure computing */
8113 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
8114 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
8115+
8116 /* NOTE: Thread flags >= 12 should be ones we have no interest
8117 * in using in assembly, else we can't use the mask as
8118 * an immediate value in instructions such as andcc.
8119@@ -214,12 +217,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
8120 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
8121 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
8122 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
8123+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
8124
8125 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
8126 _TIF_DO_NOTIFY_RESUME_MASK | \
8127 _TIF_NEED_RESCHED)
8128 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
8129
8130+#define _TIF_WORK_SYSCALL \
8131+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
8132+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
8133+
8134+
8135 /*
8136 * Thread-synchronous status.
8137 *
8138diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
8139index 0167d26..767bb0c 100644
8140--- a/arch/sparc/include/asm/uaccess.h
8141+++ b/arch/sparc/include/asm/uaccess.h
8142@@ -1,5 +1,6 @@
8143 #ifndef ___ASM_SPARC_UACCESS_H
8144 #define ___ASM_SPARC_UACCESS_H
8145+
8146 #if defined(__sparc__) && defined(__arch64__)
8147 #include <asm/uaccess_64.h>
8148 #else
8149diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
8150index 53a28dd..50c38c3 100644
8151--- a/arch/sparc/include/asm/uaccess_32.h
8152+++ b/arch/sparc/include/asm/uaccess_32.h
8153@@ -250,27 +250,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
8154
8155 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
8156 {
8157- if (n && __access_ok((unsigned long) to, n))
8158+ if ((long)n < 0)
8159+ return n;
8160+
8161+ if (n && __access_ok((unsigned long) to, n)) {
8162+ if (!__builtin_constant_p(n))
8163+ check_object_size(from, n, true);
8164 return __copy_user(to, (__force void __user *) from, n);
8165- else
8166+ } else
8167 return n;
8168 }
8169
8170 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
8171 {
8172+ if ((long)n < 0)
8173+ return n;
8174+
8175+ if (!__builtin_constant_p(n))
8176+ check_object_size(from, n, true);
8177+
8178 return __copy_user(to, (__force void __user *) from, n);
8179 }
8180
8181 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
8182 {
8183- if (n && __access_ok((unsigned long) from, n))
8184+ if ((long)n < 0)
8185+ return n;
8186+
8187+ if (n && __access_ok((unsigned long) from, n)) {
8188+ if (!__builtin_constant_p(n))
8189+ check_object_size(to, n, false);
8190 return __copy_user((__force void __user *) to, from, n);
8191- else
8192+ } else
8193 return n;
8194 }
8195
8196 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
8197 {
8198+ if ((long)n < 0)
8199+ return n;
8200+
8201 return __copy_user((__force void __user *) to, from, n);
8202 }
8203
8204diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
8205index e562d3c..191f176 100644
8206--- a/arch/sparc/include/asm/uaccess_64.h
8207+++ b/arch/sparc/include/asm/uaccess_64.h
8208@@ -10,6 +10,7 @@
8209 #include <linux/compiler.h>
8210 #include <linux/string.h>
8211 #include <linux/thread_info.h>
8212+#include <linux/kernel.h>
8213 #include <asm/asi.h>
8214 #include <asm/spitfire.h>
8215 #include <asm-generic/uaccess-unaligned.h>
8216@@ -214,8 +215,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
8217 static inline unsigned long __must_check
8218 copy_from_user(void *to, const void __user *from, unsigned long size)
8219 {
8220- unsigned long ret = ___copy_from_user(to, from, size);
8221+ unsigned long ret;
8222
8223+ if ((long)size < 0 || size > INT_MAX)
8224+ return size;
8225+
8226+ if (!__builtin_constant_p(size))
8227+ check_object_size(to, size, false);
8228+
8229+ ret = ___copy_from_user(to, from, size);
8230 if (unlikely(ret))
8231 ret = copy_from_user_fixup(to, from, size);
8232
8233@@ -231,8 +239,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
8234 static inline unsigned long __must_check
8235 copy_to_user(void __user *to, const void *from, unsigned long size)
8236 {
8237- unsigned long ret = ___copy_to_user(to, from, size);
8238+ unsigned long ret;
8239
8240+ if ((long)size < 0 || size > INT_MAX)
8241+ return size;
8242+
8243+ if (!__builtin_constant_p(size))
8244+ check_object_size(from, size, true);
8245+
8246+ ret = ___copy_to_user(to, from, size);
8247 if (unlikely(ret))
8248 ret = copy_to_user_fixup(to, from, size);
8249 return ret;
8250diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
8251index 6cf591b..b49e65a 100644
8252--- a/arch/sparc/kernel/Makefile
8253+++ b/arch/sparc/kernel/Makefile
8254@@ -3,7 +3,7 @@
8255 #
8256
8257 asflags-y := -ansi
8258-ccflags-y := -Werror
8259+#ccflags-y := -Werror
8260
8261 extra-y := head_$(BITS).o
8262
8263diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
8264index 62eede1..9c5b904 100644
8265--- a/arch/sparc/kernel/process_32.c
8266+++ b/arch/sparc/kernel/process_32.c
8267@@ -125,14 +125,14 @@ void show_regs(struct pt_regs *r)
8268
8269 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
8270 r->psr, r->pc, r->npc, r->y, print_tainted());
8271- printk("PC: <%pS>\n", (void *) r->pc);
8272+ printk("PC: <%pA>\n", (void *) r->pc);
8273 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
8274 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
8275 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
8276 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
8277 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
8278 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
8279- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
8280+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
8281
8282 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
8283 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
8284@@ -167,7 +167,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
8285 rw = (struct reg_window32 *) fp;
8286 pc = rw->ins[7];
8287 printk("[%08lx : ", pc);
8288- printk("%pS ] ", (void *) pc);
8289+ printk("%pA ] ", (void *) pc);
8290 fp = rw->ins[6];
8291 } while (++count < 16);
8292 printk("\n");
8293diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
8294index cdb80b2..5ca141d 100644
8295--- a/arch/sparc/kernel/process_64.c
8296+++ b/arch/sparc/kernel/process_64.c
8297@@ -181,14 +181,14 @@ static void show_regwindow(struct pt_regs *regs)
8298 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
8299 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
8300 if (regs->tstate & TSTATE_PRIV)
8301- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
8302+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
8303 }
8304
8305 void show_regs(struct pt_regs *regs)
8306 {
8307 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
8308 regs->tpc, regs->tnpc, regs->y, print_tainted());
8309- printk("TPC: <%pS>\n", (void *) regs->tpc);
8310+ printk("TPC: <%pA>\n", (void *) regs->tpc);
8311 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
8312 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
8313 regs->u_regs[3]);
8314@@ -201,7 +201,7 @@ void show_regs(struct pt_regs *regs)
8315 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
8316 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
8317 regs->u_regs[15]);
8318- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
8319+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
8320 show_regwindow(regs);
8321 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
8322 }
8323@@ -290,7 +290,7 @@ void arch_trigger_all_cpu_backtrace(void)
8324 ((tp && tp->task) ? tp->task->pid : -1));
8325
8326 if (gp->tstate & TSTATE_PRIV) {
8327- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
8328+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
8329 (void *) gp->tpc,
8330 (void *) gp->o7,
8331 (void *) gp->i7,
8332diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
8333index 9f20566..67eb41b 100644
8334--- a/arch/sparc/kernel/prom_common.c
8335+++ b/arch/sparc/kernel/prom_common.c
8336@@ -143,7 +143,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
8337
8338 unsigned int prom_early_allocated __initdata;
8339
8340-static struct of_pdt_ops prom_sparc_ops __initdata = {
8341+static struct of_pdt_ops prom_sparc_ops __initconst = {
8342 .nextprop = prom_common_nextprop,
8343 .getproplen = prom_getproplen,
8344 .getproperty = prom_getproperty,
8345diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
8346index 7ff45e4..a58f271 100644
8347--- a/arch/sparc/kernel/ptrace_64.c
8348+++ b/arch/sparc/kernel/ptrace_64.c
8349@@ -1057,6 +1057,10 @@ long arch_ptrace(struct task_struct *child, long request,
8350 return ret;
8351 }
8352
8353+#ifdef CONFIG_GRKERNSEC_SETXID
8354+extern void gr_delayed_cred_worker(void);
8355+#endif
8356+
8357 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
8358 {
8359 int ret = 0;
8360@@ -1064,6 +1068,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
8361 /* do the secure computing check first */
8362 secure_computing_strict(regs->u_regs[UREG_G1]);
8363
8364+#ifdef CONFIG_GRKERNSEC_SETXID
8365+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8366+ gr_delayed_cred_worker();
8367+#endif
8368+
8369 if (test_thread_flag(TIF_SYSCALL_TRACE))
8370 ret = tracehook_report_syscall_entry(regs);
8371
8372@@ -1084,6 +1093,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
8373
8374 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
8375 {
8376+#ifdef CONFIG_GRKERNSEC_SETXID
8377+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8378+ gr_delayed_cred_worker();
8379+#endif
8380+
8381 audit_syscall_exit(regs);
8382
8383 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
8384diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
8385index 3a8d184..49498a8 100644
8386--- a/arch/sparc/kernel/sys_sparc_32.c
8387+++ b/arch/sparc/kernel/sys_sparc_32.c
8388@@ -52,7 +52,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8389 if (len > TASK_SIZE - PAGE_SIZE)
8390 return -ENOMEM;
8391 if (!addr)
8392- addr = TASK_UNMAPPED_BASE;
8393+ addr = current->mm->mmap_base;
8394
8395 info.flags = 0;
8396 info.length = len;
8397diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
8398index 708bc29..6bfdfad 100644
8399--- a/arch/sparc/kernel/sys_sparc_64.c
8400+++ b/arch/sparc/kernel/sys_sparc_64.c
8401@@ -90,13 +90,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8402 struct vm_area_struct * vma;
8403 unsigned long task_size = TASK_SIZE;
8404 int do_color_align;
8405+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8406 struct vm_unmapped_area_info info;
8407
8408 if (flags & MAP_FIXED) {
8409 /* We do not accept a shared mapping if it would violate
8410 * cache aliasing constraints.
8411 */
8412- if ((flags & MAP_SHARED) &&
8413+ if ((filp || (flags & MAP_SHARED)) &&
8414 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
8415 return -EINVAL;
8416 return addr;
8417@@ -111,6 +112,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8418 if (filp || (flags & MAP_SHARED))
8419 do_color_align = 1;
8420
8421+#ifdef CONFIG_PAX_RANDMMAP
8422+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8423+#endif
8424+
8425 if (addr) {
8426 if (do_color_align)
8427 addr = COLOR_ALIGN(addr, pgoff);
8428@@ -118,22 +123,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8429 addr = PAGE_ALIGN(addr);
8430
8431 vma = find_vma(mm, addr);
8432- if (task_size - len >= addr &&
8433- (!vma || addr + len <= vma->vm_start))
8434+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8435 return addr;
8436 }
8437
8438 info.flags = 0;
8439 info.length = len;
8440- info.low_limit = TASK_UNMAPPED_BASE;
8441+ info.low_limit = mm->mmap_base;
8442 info.high_limit = min(task_size, VA_EXCLUDE_START);
8443 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
8444 info.align_offset = pgoff << PAGE_SHIFT;
8445+ info.threadstack_offset = offset;
8446 addr = vm_unmapped_area(&info);
8447
8448 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
8449 VM_BUG_ON(addr != -ENOMEM);
8450 info.low_limit = VA_EXCLUDE_END;
8451+
8452+#ifdef CONFIG_PAX_RANDMMAP
8453+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8454+ info.low_limit += mm->delta_mmap;
8455+#endif
8456+
8457 info.high_limit = task_size;
8458 addr = vm_unmapped_area(&info);
8459 }
8460@@ -151,6 +162,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8461 unsigned long task_size = STACK_TOP32;
8462 unsigned long addr = addr0;
8463 int do_color_align;
8464+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8465 struct vm_unmapped_area_info info;
8466
8467 /* This should only ever run for 32-bit processes. */
8468@@ -160,7 +172,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8469 /* We do not accept a shared mapping if it would violate
8470 * cache aliasing constraints.
8471 */
8472- if ((flags & MAP_SHARED) &&
8473+ if ((filp || (flags & MAP_SHARED)) &&
8474 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
8475 return -EINVAL;
8476 return addr;
8477@@ -173,6 +185,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8478 if (filp || (flags & MAP_SHARED))
8479 do_color_align = 1;
8480
8481+#ifdef CONFIG_PAX_RANDMMAP
8482+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8483+#endif
8484+
8485 /* requesting a specific address */
8486 if (addr) {
8487 if (do_color_align)
8488@@ -181,8 +197,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8489 addr = PAGE_ALIGN(addr);
8490
8491 vma = find_vma(mm, addr);
8492- if (task_size - len >= addr &&
8493- (!vma || addr + len <= vma->vm_start))
8494+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8495 return addr;
8496 }
8497
8498@@ -192,6 +207,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8499 info.high_limit = mm->mmap_base;
8500 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
8501 info.align_offset = pgoff << PAGE_SHIFT;
8502+ info.threadstack_offset = offset;
8503 addr = vm_unmapped_area(&info);
8504
8505 /*
8506@@ -204,6 +220,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8507 VM_BUG_ON(addr != -ENOMEM);
8508 info.flags = 0;
8509 info.low_limit = TASK_UNMAPPED_BASE;
8510+
8511+#ifdef CONFIG_PAX_RANDMMAP
8512+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8513+ info.low_limit += mm->delta_mmap;
8514+#endif
8515+
8516 info.high_limit = STACK_TOP32;
8517 addr = vm_unmapped_area(&info);
8518 }
8519@@ -264,6 +286,10 @@ static unsigned long mmap_rnd(void)
8520 {
8521 unsigned long rnd = 0UL;
8522
8523+#ifdef CONFIG_PAX_RANDMMAP
8524+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8525+#endif
8526+
8527 if (current->flags & PF_RANDOMIZE) {
8528 unsigned long val = get_random_int();
8529 if (test_thread_flag(TIF_32BIT))
8530@@ -289,6 +315,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8531 gap == RLIM_INFINITY ||
8532 sysctl_legacy_va_layout) {
8533 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
8534+
8535+#ifdef CONFIG_PAX_RANDMMAP
8536+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8537+ mm->mmap_base += mm->delta_mmap;
8538+#endif
8539+
8540 mm->get_unmapped_area = arch_get_unmapped_area;
8541 mm->unmap_area = arch_unmap_area;
8542 } else {
8543@@ -301,6 +333,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8544 gap = (task_size / 6 * 5);
8545
8546 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
8547+
8548+#ifdef CONFIG_PAX_RANDMMAP
8549+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8550+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
8551+#endif
8552+
8553 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
8554 mm->unmap_area = arch_unmap_area_topdown;
8555 }
8556diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
8557index 22a1098..6255eb9 100644
8558--- a/arch/sparc/kernel/syscalls.S
8559+++ b/arch/sparc/kernel/syscalls.S
8560@@ -52,7 +52,7 @@ sys32_rt_sigreturn:
8561 #endif
8562 .align 32
8563 1: ldx [%g6 + TI_FLAGS], %l5
8564- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8565+ andcc %l5, _TIF_WORK_SYSCALL, %g0
8566 be,pt %icc, rtrap
8567 nop
8568 call syscall_trace_leave
8569@@ -184,7 +184,7 @@ linux_sparc_syscall32:
8570
8571 srl %i5, 0, %o5 ! IEU1
8572 srl %i2, 0, %o2 ! IEU0 Group
8573- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8574+ andcc %l0, _TIF_WORK_SYSCALL, %g0
8575 bne,pn %icc, linux_syscall_trace32 ! CTI
8576 mov %i0, %l5 ! IEU1
8577 call %l7 ! CTI Group brk forced
8578@@ -207,7 +207,7 @@ linux_sparc_syscall:
8579
8580 mov %i3, %o3 ! IEU1
8581 mov %i4, %o4 ! IEU0 Group
8582- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8583+ andcc %l0, _TIF_WORK_SYSCALL, %g0
8584 bne,pn %icc, linux_syscall_trace ! CTI Group
8585 mov %i0, %l5 ! IEU0
8586 2: call %l7 ! CTI Group brk forced
8587@@ -223,7 +223,7 @@ ret_sys_call:
8588
8589 cmp %o0, -ERESTART_RESTARTBLOCK
8590 bgeu,pn %xcc, 1f
8591- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8592+ andcc %l0, _TIF_WORK_SYSCALL, %g0
8593 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
8594
8595 2:
8596diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
8597index 654e8aa..45f431b 100644
8598--- a/arch/sparc/kernel/sysfs.c
8599+++ b/arch/sparc/kernel/sysfs.c
8600@@ -266,7 +266,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
8601 return NOTIFY_OK;
8602 }
8603
8604-static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
8605+static struct notifier_block sysfs_cpu_nb = {
8606 .notifier_call = sysfs_cpu_notify,
8607 };
8608
8609diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
8610index 6629829..036032d 100644
8611--- a/arch/sparc/kernel/traps_32.c
8612+++ b/arch/sparc/kernel/traps_32.c
8613@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
8614 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
8615 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
8616
8617+extern void gr_handle_kernel_exploit(void);
8618+
8619 void die_if_kernel(char *str, struct pt_regs *regs)
8620 {
8621 static int die_counter;
8622@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
8623 count++ < 30 &&
8624 (((unsigned long) rw) >= PAGE_OFFSET) &&
8625 !(((unsigned long) rw) & 0x7)) {
8626- printk("Caller[%08lx]: %pS\n", rw->ins[7],
8627+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
8628 (void *) rw->ins[7]);
8629 rw = (struct reg_window32 *)rw->ins[6];
8630 }
8631 }
8632 printk("Instruction DUMP:");
8633 instruction_dump ((unsigned long *) regs->pc);
8634- if(regs->psr & PSR_PS)
8635+ if(regs->psr & PSR_PS) {
8636+ gr_handle_kernel_exploit();
8637 do_exit(SIGKILL);
8638+ }
8639 do_exit(SIGSEGV);
8640 }
8641
8642diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
8643index 8d38ca9..845b1d6 100644
8644--- a/arch/sparc/kernel/traps_64.c
8645+++ b/arch/sparc/kernel/traps_64.c
8646@@ -76,7 +76,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
8647 i + 1,
8648 p->trapstack[i].tstate, p->trapstack[i].tpc,
8649 p->trapstack[i].tnpc, p->trapstack[i].tt);
8650- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
8651+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
8652 }
8653 }
8654
8655@@ -96,6 +96,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
8656
8657 lvl -= 0x100;
8658 if (regs->tstate & TSTATE_PRIV) {
8659+
8660+#ifdef CONFIG_PAX_REFCOUNT
8661+ if (lvl == 6)
8662+ pax_report_refcount_overflow(regs);
8663+#endif
8664+
8665 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
8666 die_if_kernel(buffer, regs);
8667 }
8668@@ -114,11 +120,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
8669 void bad_trap_tl1(struct pt_regs *regs, long lvl)
8670 {
8671 char buffer[32];
8672-
8673+
8674 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
8675 0, lvl, SIGTRAP) == NOTIFY_STOP)
8676 return;
8677
8678+#ifdef CONFIG_PAX_REFCOUNT
8679+ if (lvl == 6)
8680+ pax_report_refcount_overflow(regs);
8681+#endif
8682+
8683 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
8684
8685 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
8686@@ -1142,7 +1153,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
8687 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
8688 printk("%s" "ERROR(%d): ",
8689 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
8690- printk("TPC<%pS>\n", (void *) regs->tpc);
8691+ printk("TPC<%pA>\n", (void *) regs->tpc);
8692 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
8693 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
8694 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
8695@@ -1749,7 +1760,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
8696 smp_processor_id(),
8697 (type & 0x1) ? 'I' : 'D',
8698 regs->tpc);
8699- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
8700+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
8701 panic("Irrecoverable Cheetah+ parity error.");
8702 }
8703
8704@@ -1757,7 +1768,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
8705 smp_processor_id(),
8706 (type & 0x1) ? 'I' : 'D',
8707 regs->tpc);
8708- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
8709+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
8710 }
8711
8712 struct sun4v_error_entry {
8713@@ -2104,9 +2115,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
8714
8715 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
8716 regs->tpc, tl);
8717- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
8718+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
8719 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
8720- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
8721+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
8722 (void *) regs->u_regs[UREG_I7]);
8723 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
8724 "pte[%lx] error[%lx]\n",
8725@@ -2128,9 +2139,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
8726
8727 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
8728 regs->tpc, tl);
8729- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
8730+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
8731 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
8732- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
8733+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
8734 (void *) regs->u_regs[UREG_I7]);
8735 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
8736 "pte[%lx] error[%lx]\n",
8737@@ -2336,13 +2347,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
8738 fp = (unsigned long)sf->fp + STACK_BIAS;
8739 }
8740
8741- printk(" [%016lx] %pS\n", pc, (void *) pc);
8742+ printk(" [%016lx] %pA\n", pc, (void *) pc);
8743 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
8744 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
8745 int index = tsk->curr_ret_stack;
8746 if (tsk->ret_stack && index >= graph) {
8747 pc = tsk->ret_stack[index - graph].ret;
8748- printk(" [%016lx] %pS\n", pc, (void *) pc);
8749+ printk(" [%016lx] %pA\n", pc, (void *) pc);
8750 graph++;
8751 }
8752 }
8753@@ -2367,6 +2378,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
8754 return (struct reg_window *) (fp + STACK_BIAS);
8755 }
8756
8757+extern void gr_handle_kernel_exploit(void);
8758+
8759 void die_if_kernel(char *str, struct pt_regs *regs)
8760 {
8761 static int die_counter;
8762@@ -2395,7 +2408,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
8763 while (rw &&
8764 count++ < 30 &&
8765 kstack_valid(tp, (unsigned long) rw)) {
8766- printk("Caller[%016lx]: %pS\n", rw->ins[7],
8767+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
8768 (void *) rw->ins[7]);
8769
8770 rw = kernel_stack_up(rw);
8771@@ -2408,8 +2421,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
8772 }
8773 user_instruction_dump ((unsigned int __user *) regs->tpc);
8774 }
8775- if (regs->tstate & TSTATE_PRIV)
8776+ if (regs->tstate & TSTATE_PRIV) {
8777+ gr_handle_kernel_exploit();
8778 do_exit(SIGKILL);
8779+ }
8780 do_exit(SIGSEGV);
8781 }
8782 EXPORT_SYMBOL(die_if_kernel);
8783diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
8784index 8201c25e..072a2a7 100644
8785--- a/arch/sparc/kernel/unaligned_64.c
8786+++ b/arch/sparc/kernel/unaligned_64.c
8787@@ -286,7 +286,7 @@ static void log_unaligned(struct pt_regs *regs)
8788 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
8789
8790 if (__ratelimit(&ratelimit)) {
8791- printk("Kernel unaligned access at TPC[%lx] %pS\n",
8792+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
8793 regs->tpc, (void *) regs->tpc);
8794 }
8795 }
8796diff --git a/arch/sparc/kernel/us3_cpufreq.c b/arch/sparc/kernel/us3_cpufreq.c
8797index eb1624b..55100de 100644
8798--- a/arch/sparc/kernel/us3_cpufreq.c
8799+++ b/arch/sparc/kernel/us3_cpufreq.c
8800@@ -18,14 +18,12 @@
8801 #include <asm/head.h>
8802 #include <asm/timer.h>
8803
8804-static struct cpufreq_driver *cpufreq_us3_driver;
8805-
8806 struct us3_freq_percpu_info {
8807 struct cpufreq_frequency_table table[4];
8808 };
8809
8810 /* Indexed by cpu number. */
8811-static struct us3_freq_percpu_info *us3_freq_table;
8812+static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
8813
8814 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
8815 * in the Safari config register.
8816@@ -191,12 +189,25 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
8817
8818 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
8819 {
8820- if (cpufreq_us3_driver)
8821- us3_set_cpu_divider_index(policy->cpu, 0);
8822+ us3_set_cpu_divider_index(policy->cpu, 0);
8823
8824 return 0;
8825 }
8826
8827+static int __init us3_freq_init(void);
8828+static void __exit us3_freq_exit(void);
8829+
8830+static struct cpufreq_driver cpufreq_us3_driver = {
8831+ .init = us3_freq_cpu_init,
8832+ .verify = us3_freq_verify,
8833+ .target = us3_freq_target,
8834+ .get = us3_freq_get,
8835+ .exit = us3_freq_cpu_exit,
8836+ .owner = THIS_MODULE,
8837+ .name = "UltraSPARC-III",
8838+
8839+};
8840+
8841 static int __init us3_freq_init(void)
8842 {
8843 unsigned long manuf, impl, ver;
8844@@ -213,57 +224,15 @@ static int __init us3_freq_init(void)
8845 (impl == CHEETAH_IMPL ||
8846 impl == CHEETAH_PLUS_IMPL ||
8847 impl == JAGUAR_IMPL ||
8848- impl == PANTHER_IMPL)) {
8849- struct cpufreq_driver *driver;
8850-
8851- ret = -ENOMEM;
8852- driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
8853- if (!driver)
8854- goto err_out;
8855-
8856- us3_freq_table = kzalloc(
8857- (NR_CPUS * sizeof(struct us3_freq_percpu_info)),
8858- GFP_KERNEL);
8859- if (!us3_freq_table)
8860- goto err_out;
8861-
8862- driver->init = us3_freq_cpu_init;
8863- driver->verify = us3_freq_verify;
8864- driver->target = us3_freq_target;
8865- driver->get = us3_freq_get;
8866- driver->exit = us3_freq_cpu_exit;
8867- driver->owner = THIS_MODULE,
8868- strcpy(driver->name, "UltraSPARC-III");
8869-
8870- cpufreq_us3_driver = driver;
8871- ret = cpufreq_register_driver(driver);
8872- if (ret)
8873- goto err_out;
8874-
8875- return 0;
8876-
8877-err_out:
8878- if (driver) {
8879- kfree(driver);
8880- cpufreq_us3_driver = NULL;
8881- }
8882- kfree(us3_freq_table);
8883- us3_freq_table = NULL;
8884- return ret;
8885- }
8886+ impl == PANTHER_IMPL))
8887+ return cpufreq_register_driver(&cpufreq_us3_driver);
8888
8889 return -ENODEV;
8890 }
8891
8892 static void __exit us3_freq_exit(void)
8893 {
8894- if (cpufreq_us3_driver) {
8895- cpufreq_unregister_driver(cpufreq_us3_driver);
8896- kfree(cpufreq_us3_driver);
8897- cpufreq_us3_driver = NULL;
8898- kfree(us3_freq_table);
8899- us3_freq_table = NULL;
8900- }
8901+ cpufreq_unregister_driver(&cpufreq_us3_driver);
8902 }
8903
8904 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
8905diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
8906index 8410065f2..4fd4ca22 100644
8907--- a/arch/sparc/lib/Makefile
8908+++ b/arch/sparc/lib/Makefile
8909@@ -2,7 +2,7 @@
8910 #
8911
8912 asflags-y := -ansi -DST_DIV0=0x02
8913-ccflags-y := -Werror
8914+#ccflags-y := -Werror
8915
8916 lib-$(CONFIG_SPARC32) += ashrdi3.o
8917 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
8918diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
8919index 85c233d..68500e0 100644
8920--- a/arch/sparc/lib/atomic_64.S
8921+++ b/arch/sparc/lib/atomic_64.S
8922@@ -17,7 +17,12 @@
8923 ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
8924 BACKOFF_SETUP(%o2)
8925 1: lduw [%o1], %g1
8926- add %g1, %o0, %g7
8927+ addcc %g1, %o0, %g7
8928+
8929+#ifdef CONFIG_PAX_REFCOUNT
8930+ tvs %icc, 6
8931+#endif
8932+
8933 cas [%o1], %g1, %g7
8934 cmp %g1, %g7
8935 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
8936@@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
8937 2: BACKOFF_SPIN(%o2, %o3, 1b)
8938 ENDPROC(atomic_add)
8939
8940+ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
8941+ BACKOFF_SETUP(%o2)
8942+1: lduw [%o1], %g1
8943+ add %g1, %o0, %g7
8944+ cas [%o1], %g1, %g7
8945+ cmp %g1, %g7
8946+ bne,pn %icc, 2f
8947+ nop
8948+ retl
8949+ nop
8950+2: BACKOFF_SPIN(%o2, %o3, 1b)
8951+ENDPROC(atomic_add_unchecked)
8952+
8953 ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
8954 BACKOFF_SETUP(%o2)
8955 1: lduw [%o1], %g1
8956- sub %g1, %o0, %g7
8957+ subcc %g1, %o0, %g7
8958+
8959+#ifdef CONFIG_PAX_REFCOUNT
8960+ tvs %icc, 6
8961+#endif
8962+
8963 cas [%o1], %g1, %g7
8964 cmp %g1, %g7
8965 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
8966@@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
8967 2: BACKOFF_SPIN(%o2, %o3, 1b)
8968 ENDPROC(atomic_sub)
8969
8970+ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
8971+ BACKOFF_SETUP(%o2)
8972+1: lduw [%o1], %g1
8973+ sub %g1, %o0, %g7
8974+ cas [%o1], %g1, %g7
8975+ cmp %g1, %g7
8976+ bne,pn %icc, 2f
8977+ nop
8978+ retl
8979+ nop
8980+2: BACKOFF_SPIN(%o2, %o3, 1b)
8981+ENDPROC(atomic_sub_unchecked)
8982+
8983 ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
8984 BACKOFF_SETUP(%o2)
8985 1: lduw [%o1], %g1
8986- add %g1, %o0, %g7
8987+ addcc %g1, %o0, %g7
8988+
8989+#ifdef CONFIG_PAX_REFCOUNT
8990+ tvs %icc, 6
8991+#endif
8992+
8993 cas [%o1], %g1, %g7
8994 cmp %g1, %g7
8995 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
8996@@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
8997 2: BACKOFF_SPIN(%o2, %o3, 1b)
8998 ENDPROC(atomic_add_ret)
8999
9000+ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
9001+ BACKOFF_SETUP(%o2)
9002+1: lduw [%o1], %g1
9003+ addcc %g1, %o0, %g7
9004+ cas [%o1], %g1, %g7
9005+ cmp %g1, %g7
9006+ bne,pn %icc, 2f
9007+ add %g7, %o0, %g7
9008+ sra %g7, 0, %o0
9009+ retl
9010+ nop
9011+2: BACKOFF_SPIN(%o2, %o3, 1b)
9012+ENDPROC(atomic_add_ret_unchecked)
9013+
9014 ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
9015 BACKOFF_SETUP(%o2)
9016 1: lduw [%o1], %g1
9017- sub %g1, %o0, %g7
9018+ subcc %g1, %o0, %g7
9019+
9020+#ifdef CONFIG_PAX_REFCOUNT
9021+ tvs %icc, 6
9022+#endif
9023+
9024 cas [%o1], %g1, %g7
9025 cmp %g1, %g7
9026 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
9027@@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret)
9028 ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
9029 BACKOFF_SETUP(%o2)
9030 1: ldx [%o1], %g1
9031- add %g1, %o0, %g7
9032+ addcc %g1, %o0, %g7
9033+
9034+#ifdef CONFIG_PAX_REFCOUNT
9035+ tvs %xcc, 6
9036+#endif
9037+
9038 casx [%o1], %g1, %g7
9039 cmp %g1, %g7
9040 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
9041@@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
9042 2: BACKOFF_SPIN(%o2, %o3, 1b)
9043 ENDPROC(atomic64_add)
9044
9045+ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
9046+ BACKOFF_SETUP(%o2)
9047+1: ldx [%o1], %g1
9048+ addcc %g1, %o0, %g7
9049+ casx [%o1], %g1, %g7
9050+ cmp %g1, %g7
9051+ bne,pn %xcc, 2f
9052+ nop
9053+ retl
9054+ nop
9055+2: BACKOFF_SPIN(%o2, %o3, 1b)
9056+ENDPROC(atomic64_add_unchecked)
9057+
9058 ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
9059 BACKOFF_SETUP(%o2)
9060 1: ldx [%o1], %g1
9061- sub %g1, %o0, %g7
9062+ subcc %g1, %o0, %g7
9063+
9064+#ifdef CONFIG_PAX_REFCOUNT
9065+ tvs %xcc, 6
9066+#endif
9067+
9068 casx [%o1], %g1, %g7
9069 cmp %g1, %g7
9070 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
9071@@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
9072 2: BACKOFF_SPIN(%o2, %o3, 1b)
9073 ENDPROC(atomic64_sub)
9074
9075+ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
9076+ BACKOFF_SETUP(%o2)
9077+1: ldx [%o1], %g1
9078+ subcc %g1, %o0, %g7
9079+ casx [%o1], %g1, %g7
9080+ cmp %g1, %g7
9081+ bne,pn %xcc, 2f
9082+ nop
9083+ retl
9084+ nop
9085+2: BACKOFF_SPIN(%o2, %o3, 1b)
9086+ENDPROC(atomic64_sub_unchecked)
9087+
9088 ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
9089 BACKOFF_SETUP(%o2)
9090 1: ldx [%o1], %g1
9091- add %g1, %o0, %g7
9092+ addcc %g1, %o0, %g7
9093+
9094+#ifdef CONFIG_PAX_REFCOUNT
9095+ tvs %xcc, 6
9096+#endif
9097+
9098 casx [%o1], %g1, %g7
9099 cmp %g1, %g7
9100 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
9101@@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
9102 2: BACKOFF_SPIN(%o2, %o3, 1b)
9103 ENDPROC(atomic64_add_ret)
9104
9105+ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
9106+ BACKOFF_SETUP(%o2)
9107+1: ldx [%o1], %g1
9108+ addcc %g1, %o0, %g7
9109+ casx [%o1], %g1, %g7
9110+ cmp %g1, %g7
9111+ bne,pn %xcc, 2f
9112+ add %g7, %o0, %g7
9113+ mov %g7, %o0
9114+ retl
9115+ nop
9116+2: BACKOFF_SPIN(%o2, %o3, 1b)
9117+ENDPROC(atomic64_add_ret_unchecked)
9118+
9119 ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
9120 BACKOFF_SETUP(%o2)
9121 1: ldx [%o1], %g1
9122- sub %g1, %o0, %g7
9123+ subcc %g1, %o0, %g7
9124+
9125+#ifdef CONFIG_PAX_REFCOUNT
9126+ tvs %xcc, 6
9127+#endif
9128+
9129 casx [%o1], %g1, %g7
9130 cmp %g1, %g7
9131 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
9132diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
9133index 0c4e35e..745d3e4 100644
9134--- a/arch/sparc/lib/ksyms.c
9135+++ b/arch/sparc/lib/ksyms.c
9136@@ -109,12 +109,18 @@ EXPORT_SYMBOL(__downgrade_write);
9137
9138 /* Atomic counter implementation. */
9139 EXPORT_SYMBOL(atomic_add);
9140+EXPORT_SYMBOL(atomic_add_unchecked);
9141 EXPORT_SYMBOL(atomic_add_ret);
9142+EXPORT_SYMBOL(atomic_add_ret_unchecked);
9143 EXPORT_SYMBOL(atomic_sub);
9144+EXPORT_SYMBOL(atomic_sub_unchecked);
9145 EXPORT_SYMBOL(atomic_sub_ret);
9146 EXPORT_SYMBOL(atomic64_add);
9147+EXPORT_SYMBOL(atomic64_add_unchecked);
9148 EXPORT_SYMBOL(atomic64_add_ret);
9149+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
9150 EXPORT_SYMBOL(atomic64_sub);
9151+EXPORT_SYMBOL(atomic64_sub_unchecked);
9152 EXPORT_SYMBOL(atomic64_sub_ret);
9153 EXPORT_SYMBOL(atomic64_dec_if_positive);
9154
9155diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
9156index 30c3ecc..736f015 100644
9157--- a/arch/sparc/mm/Makefile
9158+++ b/arch/sparc/mm/Makefile
9159@@ -2,7 +2,7 @@
9160 #
9161
9162 asflags-y := -ansi
9163-ccflags-y := -Werror
9164+#ccflags-y := -Werror
9165
9166 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
9167 obj-y += fault_$(BITS).o
9168diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
9169index e98bfda..ea8d221 100644
9170--- a/arch/sparc/mm/fault_32.c
9171+++ b/arch/sparc/mm/fault_32.c
9172@@ -21,6 +21,9 @@
9173 #include <linux/perf_event.h>
9174 #include <linux/interrupt.h>
9175 #include <linux/kdebug.h>
9176+#include <linux/slab.h>
9177+#include <linux/pagemap.h>
9178+#include <linux/compiler.h>
9179
9180 #include <asm/page.h>
9181 #include <asm/pgtable.h>
9182@@ -159,6 +162,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
9183 return safe_compute_effective_address(regs, insn);
9184 }
9185
9186+#ifdef CONFIG_PAX_PAGEEXEC
9187+#ifdef CONFIG_PAX_DLRESOLVE
9188+static void pax_emuplt_close(struct vm_area_struct *vma)
9189+{
9190+ vma->vm_mm->call_dl_resolve = 0UL;
9191+}
9192+
9193+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9194+{
9195+ unsigned int *kaddr;
9196+
9197+ vmf->page = alloc_page(GFP_HIGHUSER);
9198+ if (!vmf->page)
9199+ return VM_FAULT_OOM;
9200+
9201+ kaddr = kmap(vmf->page);
9202+ memset(kaddr, 0, PAGE_SIZE);
9203+ kaddr[0] = 0x9DE3BFA8U; /* save */
9204+ flush_dcache_page(vmf->page);
9205+ kunmap(vmf->page);
9206+ return VM_FAULT_MAJOR;
9207+}
9208+
9209+static const struct vm_operations_struct pax_vm_ops = {
9210+ .close = pax_emuplt_close,
9211+ .fault = pax_emuplt_fault
9212+};
9213+
9214+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
9215+{
9216+ int ret;
9217+
9218+ INIT_LIST_HEAD(&vma->anon_vma_chain);
9219+ vma->vm_mm = current->mm;
9220+ vma->vm_start = addr;
9221+ vma->vm_end = addr + PAGE_SIZE;
9222+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
9223+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
9224+ vma->vm_ops = &pax_vm_ops;
9225+
9226+ ret = insert_vm_struct(current->mm, vma);
9227+ if (ret)
9228+ return ret;
9229+
9230+ ++current->mm->total_vm;
9231+ return 0;
9232+}
9233+#endif
9234+
9235+/*
9236+ * PaX: decide what to do with offenders (regs->pc = fault address)
9237+ *
9238+ * returns 1 when task should be killed
9239+ * 2 when patched PLT trampoline was detected
9240+ * 3 when unpatched PLT trampoline was detected
9241+ */
9242+static int pax_handle_fetch_fault(struct pt_regs *regs)
9243+{
9244+
9245+#ifdef CONFIG_PAX_EMUPLT
9246+ int err;
9247+
9248+ do { /* PaX: patched PLT emulation #1 */
9249+ unsigned int sethi1, sethi2, jmpl;
9250+
9251+ err = get_user(sethi1, (unsigned int *)regs->pc);
9252+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
9253+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
9254+
9255+ if (err)
9256+ break;
9257+
9258+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
9259+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
9260+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
9261+ {
9262+ unsigned int addr;
9263+
9264+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
9265+ addr = regs->u_regs[UREG_G1];
9266+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
9267+ regs->pc = addr;
9268+ regs->npc = addr+4;
9269+ return 2;
9270+ }
9271+ } while (0);
9272+
9273+ do { /* PaX: patched PLT emulation #2 */
9274+ unsigned int ba;
9275+
9276+ err = get_user(ba, (unsigned int *)regs->pc);
9277+
9278+ if (err)
9279+ break;
9280+
9281+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
9282+ unsigned int addr;
9283+
9284+ if ((ba & 0xFFC00000U) == 0x30800000U)
9285+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
9286+ else
9287+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
9288+ regs->pc = addr;
9289+ regs->npc = addr+4;
9290+ return 2;
9291+ }
9292+ } while (0);
9293+
9294+ do { /* PaX: patched PLT emulation #3 */
9295+ unsigned int sethi, bajmpl, nop;
9296+
9297+ err = get_user(sethi, (unsigned int *)regs->pc);
9298+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
9299+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
9300+
9301+ if (err)
9302+ break;
9303+
9304+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9305+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
9306+ nop == 0x01000000U)
9307+ {
9308+ unsigned int addr;
9309+
9310+ addr = (sethi & 0x003FFFFFU) << 10;
9311+ regs->u_regs[UREG_G1] = addr;
9312+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
9313+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
9314+ else
9315+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
9316+ regs->pc = addr;
9317+ regs->npc = addr+4;
9318+ return 2;
9319+ }
9320+ } while (0);
9321+
9322+ do { /* PaX: unpatched PLT emulation step 1 */
9323+ unsigned int sethi, ba, nop;
9324+
9325+ err = get_user(sethi, (unsigned int *)regs->pc);
9326+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
9327+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
9328+
9329+ if (err)
9330+ break;
9331+
9332+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9333+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
9334+ nop == 0x01000000U)
9335+ {
9336+ unsigned int addr, save, call;
9337+
9338+ if ((ba & 0xFFC00000U) == 0x30800000U)
9339+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
9340+ else
9341+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
9342+
9343+ err = get_user(save, (unsigned int *)addr);
9344+ err |= get_user(call, (unsigned int *)(addr+4));
9345+ err |= get_user(nop, (unsigned int *)(addr+8));
9346+ if (err)
9347+ break;
9348+
9349+#ifdef CONFIG_PAX_DLRESOLVE
9350+ if (save == 0x9DE3BFA8U &&
9351+ (call & 0xC0000000U) == 0x40000000U &&
9352+ nop == 0x01000000U)
9353+ {
9354+ struct vm_area_struct *vma;
9355+ unsigned long call_dl_resolve;
9356+
9357+ down_read(&current->mm->mmap_sem);
9358+ call_dl_resolve = current->mm->call_dl_resolve;
9359+ up_read(&current->mm->mmap_sem);
9360+ if (likely(call_dl_resolve))
9361+ goto emulate;
9362+
9363+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
9364+
9365+ down_write(&current->mm->mmap_sem);
9366+ if (current->mm->call_dl_resolve) {
9367+ call_dl_resolve = current->mm->call_dl_resolve;
9368+ up_write(&current->mm->mmap_sem);
9369+ if (vma)
9370+ kmem_cache_free(vm_area_cachep, vma);
9371+ goto emulate;
9372+ }
9373+
9374+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
9375+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
9376+ up_write(&current->mm->mmap_sem);
9377+ if (vma)
9378+ kmem_cache_free(vm_area_cachep, vma);
9379+ return 1;
9380+ }
9381+
9382+ if (pax_insert_vma(vma, call_dl_resolve)) {
9383+ up_write(&current->mm->mmap_sem);
9384+ kmem_cache_free(vm_area_cachep, vma);
9385+ return 1;
9386+ }
9387+
9388+ current->mm->call_dl_resolve = call_dl_resolve;
9389+ up_write(&current->mm->mmap_sem);
9390+
9391+emulate:
9392+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9393+ regs->pc = call_dl_resolve;
9394+ regs->npc = addr+4;
9395+ return 3;
9396+ }
9397+#endif
9398+
9399+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
9400+ if ((save & 0xFFC00000U) == 0x05000000U &&
9401+ (call & 0xFFFFE000U) == 0x85C0A000U &&
9402+ nop == 0x01000000U)
9403+ {
9404+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9405+ regs->u_regs[UREG_G2] = addr + 4;
9406+ addr = (save & 0x003FFFFFU) << 10;
9407+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
9408+ regs->pc = addr;
9409+ regs->npc = addr+4;
9410+ return 3;
9411+ }
9412+ }
9413+ } while (0);
9414+
9415+ do { /* PaX: unpatched PLT emulation step 2 */
9416+ unsigned int save, call, nop;
9417+
9418+ err = get_user(save, (unsigned int *)(regs->pc-4));
9419+ err |= get_user(call, (unsigned int *)regs->pc);
9420+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
9421+ if (err)
9422+ break;
9423+
9424+ if (save == 0x9DE3BFA8U &&
9425+ (call & 0xC0000000U) == 0x40000000U &&
9426+ nop == 0x01000000U)
9427+ {
9428+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
9429+
9430+ regs->u_regs[UREG_RETPC] = regs->pc;
9431+ regs->pc = dl_resolve;
9432+ regs->npc = dl_resolve+4;
9433+ return 3;
9434+ }
9435+ } while (0);
9436+#endif
9437+
9438+ return 1;
9439+}
9440+
9441+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9442+{
9443+ unsigned long i;
9444+
9445+ printk(KERN_ERR "PAX: bytes at PC: ");
9446+ for (i = 0; i < 8; i++) {
9447+ unsigned int c;
9448+ if (get_user(c, (unsigned int *)pc+i))
9449+ printk(KERN_CONT "???????? ");
9450+ else
9451+ printk(KERN_CONT "%08x ", c);
9452+ }
9453+ printk("\n");
9454+}
9455+#endif
9456+
9457 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
9458 int text_fault)
9459 {
9460@@ -230,6 +504,24 @@ good_area:
9461 if (!(vma->vm_flags & VM_WRITE))
9462 goto bad_area;
9463 } else {
9464+
9465+#ifdef CONFIG_PAX_PAGEEXEC
9466+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
9467+ up_read(&mm->mmap_sem);
9468+ switch (pax_handle_fetch_fault(regs)) {
9469+
9470+#ifdef CONFIG_PAX_EMUPLT
9471+ case 2:
9472+ case 3:
9473+ return;
9474+#endif
9475+
9476+ }
9477+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
9478+ do_group_exit(SIGKILL);
9479+ }
9480+#endif
9481+
9482 /* Allow reads even for write-only mappings */
9483 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
9484 goto bad_area;
9485diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
9486index 5062ff3..e0b75f3 100644
9487--- a/arch/sparc/mm/fault_64.c
9488+++ b/arch/sparc/mm/fault_64.c
9489@@ -21,6 +21,9 @@
9490 #include <linux/kprobes.h>
9491 #include <linux/kdebug.h>
9492 #include <linux/percpu.h>
9493+#include <linux/slab.h>
9494+#include <linux/pagemap.h>
9495+#include <linux/compiler.h>
9496
9497 #include <asm/page.h>
9498 #include <asm/pgtable.h>
9499@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
9500 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
9501 regs->tpc);
9502 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
9503- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
9504+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
9505 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
9506 dump_stack();
9507 unhandled_fault(regs->tpc, current, regs);
9508@@ -270,6 +273,466 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
9509 show_regs(regs);
9510 }
9511
9512+#ifdef CONFIG_PAX_PAGEEXEC
9513+#ifdef CONFIG_PAX_DLRESOLVE
9514+static void pax_emuplt_close(struct vm_area_struct *vma)
9515+{
9516+ vma->vm_mm->call_dl_resolve = 0UL;
9517+}
9518+
9519+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9520+{
9521+ unsigned int *kaddr;
9522+
9523+ vmf->page = alloc_page(GFP_HIGHUSER);
9524+ if (!vmf->page)
9525+ return VM_FAULT_OOM;
9526+
9527+ kaddr = kmap(vmf->page);
9528+ memset(kaddr, 0, PAGE_SIZE);
9529+ kaddr[0] = 0x9DE3BFA8U; /* save */
9530+ flush_dcache_page(vmf->page);
9531+ kunmap(vmf->page);
9532+ return VM_FAULT_MAJOR;
9533+}
9534+
9535+static const struct vm_operations_struct pax_vm_ops = {
9536+ .close = pax_emuplt_close,
9537+ .fault = pax_emuplt_fault
9538+};
9539+
9540+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
9541+{
9542+ int ret;
9543+
9544+ INIT_LIST_HEAD(&vma->anon_vma_chain);
9545+ vma->vm_mm = current->mm;
9546+ vma->vm_start = addr;
9547+ vma->vm_end = addr + PAGE_SIZE;
9548+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
9549+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
9550+ vma->vm_ops = &pax_vm_ops;
9551+
9552+ ret = insert_vm_struct(current->mm, vma);
9553+ if (ret)
9554+ return ret;
9555+
9556+ ++current->mm->total_vm;
9557+ return 0;
9558+}
9559+#endif
9560+
9561+/*
9562+ * PaX: decide what to do with offenders (regs->tpc = fault address)
9563+ *
9564+ * returns 1 when task should be killed
9565+ * 2 when patched PLT trampoline was detected
9566+ * 3 when unpatched PLT trampoline was detected
9567+ */
9568+static int pax_handle_fetch_fault(struct pt_regs *regs)
9569+{
9570+
9571+#ifdef CONFIG_PAX_EMUPLT
9572+ int err;
9573+
9574+ do { /* PaX: patched PLT emulation #1 */
9575+ unsigned int sethi1, sethi2, jmpl;
9576+
9577+ err = get_user(sethi1, (unsigned int *)regs->tpc);
9578+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
9579+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
9580+
9581+ if (err)
9582+ break;
9583+
9584+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
9585+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
9586+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
9587+ {
9588+ unsigned long addr;
9589+
9590+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
9591+ addr = regs->u_regs[UREG_G1];
9592+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
9593+
9594+ if (test_thread_flag(TIF_32BIT))
9595+ addr &= 0xFFFFFFFFUL;
9596+
9597+ regs->tpc = addr;
9598+ regs->tnpc = addr+4;
9599+ return 2;
9600+ }
9601+ } while (0);
9602+
9603+ do { /* PaX: patched PLT emulation #2 */
9604+ unsigned int ba;
9605+
9606+ err = get_user(ba, (unsigned int *)regs->tpc);
9607+
9608+ if (err)
9609+ break;
9610+
9611+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
9612+ unsigned long addr;
9613+
9614+ if ((ba & 0xFFC00000U) == 0x30800000U)
9615+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
9616+ else
9617+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
9618+
9619+ if (test_thread_flag(TIF_32BIT))
9620+ addr &= 0xFFFFFFFFUL;
9621+
9622+ regs->tpc = addr;
9623+ regs->tnpc = addr+4;
9624+ return 2;
9625+ }
9626+ } while (0);
9627+
9628+ do { /* PaX: patched PLT emulation #3 */
9629+ unsigned int sethi, bajmpl, nop;
9630+
9631+ err = get_user(sethi, (unsigned int *)regs->tpc);
9632+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
9633+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
9634+
9635+ if (err)
9636+ break;
9637+
9638+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9639+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
9640+ nop == 0x01000000U)
9641+ {
9642+ unsigned long addr;
9643+
9644+ addr = (sethi & 0x003FFFFFU) << 10;
9645+ regs->u_regs[UREG_G1] = addr;
9646+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
9647+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
9648+ else
9649+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
9650+
9651+ if (test_thread_flag(TIF_32BIT))
9652+ addr &= 0xFFFFFFFFUL;
9653+
9654+ regs->tpc = addr;
9655+ regs->tnpc = addr+4;
9656+ return 2;
9657+ }
9658+ } while (0);
9659+
9660+ do { /* PaX: patched PLT emulation #4 */
9661+ unsigned int sethi, mov1, call, mov2;
9662+
9663+ err = get_user(sethi, (unsigned int *)regs->tpc);
9664+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
9665+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
9666+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
9667+
9668+ if (err)
9669+ break;
9670+
9671+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9672+ mov1 == 0x8210000FU &&
9673+ (call & 0xC0000000U) == 0x40000000U &&
9674+ mov2 == 0x9E100001U)
9675+ {
9676+ unsigned long addr;
9677+
9678+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
9679+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
9680+
9681+ if (test_thread_flag(TIF_32BIT))
9682+ addr &= 0xFFFFFFFFUL;
9683+
9684+ regs->tpc = addr;
9685+ regs->tnpc = addr+4;
9686+ return 2;
9687+ }
9688+ } while (0);
9689+
9690+ do { /* PaX: patched PLT emulation #5 */
9691+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
9692+
9693+ err = get_user(sethi, (unsigned int *)regs->tpc);
9694+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
9695+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
9696+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
9697+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
9698+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
9699+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
9700+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
9701+
9702+ if (err)
9703+ break;
9704+
9705+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9706+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
9707+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
9708+ (or1 & 0xFFFFE000U) == 0x82106000U &&
9709+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
9710+ sllx == 0x83287020U &&
9711+ jmpl == 0x81C04005U &&
9712+ nop == 0x01000000U)
9713+ {
9714+ unsigned long addr;
9715+
9716+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
9717+ regs->u_regs[UREG_G1] <<= 32;
9718+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
9719+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
9720+ regs->tpc = addr;
9721+ regs->tnpc = addr+4;
9722+ return 2;
9723+ }
9724+ } while (0);
9725+
9726+ do { /* PaX: patched PLT emulation #6 */
9727+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
9728+
9729+ err = get_user(sethi, (unsigned int *)regs->tpc);
9730+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
9731+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
9732+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
9733+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
9734+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
9735+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
9736+
9737+ if (err)
9738+ break;
9739+
9740+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9741+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
9742+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
9743+ sllx == 0x83287020U &&
9744+ (or & 0xFFFFE000U) == 0x8A116000U &&
9745+ jmpl == 0x81C04005U &&
9746+ nop == 0x01000000U)
9747+ {
9748+ unsigned long addr;
9749+
9750+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
9751+ regs->u_regs[UREG_G1] <<= 32;
9752+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
9753+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
9754+ regs->tpc = addr;
9755+ regs->tnpc = addr+4;
9756+ return 2;
9757+ }
9758+ } while (0);
9759+
9760+ do { /* PaX: unpatched PLT emulation step 1 */
9761+ unsigned int sethi, ba, nop;
9762+
9763+ err = get_user(sethi, (unsigned int *)regs->tpc);
9764+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
9765+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
9766+
9767+ if (err)
9768+ break;
9769+
9770+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9771+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
9772+ nop == 0x01000000U)
9773+ {
9774+ unsigned long addr;
9775+ unsigned int save, call;
9776+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
9777+
9778+ if ((ba & 0xFFC00000U) == 0x30800000U)
9779+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
9780+ else
9781+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
9782+
9783+ if (test_thread_flag(TIF_32BIT))
9784+ addr &= 0xFFFFFFFFUL;
9785+
9786+ err = get_user(save, (unsigned int *)addr);
9787+ err |= get_user(call, (unsigned int *)(addr+4));
9788+ err |= get_user(nop, (unsigned int *)(addr+8));
9789+ if (err)
9790+ break;
9791+
9792+#ifdef CONFIG_PAX_DLRESOLVE
9793+ if (save == 0x9DE3BFA8U &&
9794+ (call & 0xC0000000U) == 0x40000000U &&
9795+ nop == 0x01000000U)
9796+ {
9797+ struct vm_area_struct *vma;
9798+ unsigned long call_dl_resolve;
9799+
9800+ down_read(&current->mm->mmap_sem);
9801+ call_dl_resolve = current->mm->call_dl_resolve;
9802+ up_read(&current->mm->mmap_sem);
9803+ if (likely(call_dl_resolve))
9804+ goto emulate;
9805+
9806+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
9807+
9808+ down_write(&current->mm->mmap_sem);
9809+ if (current->mm->call_dl_resolve) {
9810+ call_dl_resolve = current->mm->call_dl_resolve;
9811+ up_write(&current->mm->mmap_sem);
9812+ if (vma)
9813+ kmem_cache_free(vm_area_cachep, vma);
9814+ goto emulate;
9815+ }
9816+
9817+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
9818+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
9819+ up_write(&current->mm->mmap_sem);
9820+ if (vma)
9821+ kmem_cache_free(vm_area_cachep, vma);
9822+ return 1;
9823+ }
9824+
9825+ if (pax_insert_vma(vma, call_dl_resolve)) {
9826+ up_write(&current->mm->mmap_sem);
9827+ kmem_cache_free(vm_area_cachep, vma);
9828+ return 1;
9829+ }
9830+
9831+ current->mm->call_dl_resolve = call_dl_resolve;
9832+ up_write(&current->mm->mmap_sem);
9833+
9834+emulate:
9835+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9836+ regs->tpc = call_dl_resolve;
9837+ regs->tnpc = addr+4;
9838+ return 3;
9839+ }
9840+#endif
9841+
9842+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
9843+ if ((save & 0xFFC00000U) == 0x05000000U &&
9844+ (call & 0xFFFFE000U) == 0x85C0A000U &&
9845+ nop == 0x01000000U)
9846+ {
9847+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9848+ regs->u_regs[UREG_G2] = addr + 4;
9849+ addr = (save & 0x003FFFFFU) << 10;
9850+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
9851+
9852+ if (test_thread_flag(TIF_32BIT))
9853+ addr &= 0xFFFFFFFFUL;
9854+
9855+ regs->tpc = addr;
9856+ regs->tnpc = addr+4;
9857+ return 3;
9858+ }
9859+
9860+ /* PaX: 64-bit PLT stub */
9861+ err = get_user(sethi1, (unsigned int *)addr);
9862+ err |= get_user(sethi2, (unsigned int *)(addr+4));
9863+ err |= get_user(or1, (unsigned int *)(addr+8));
9864+ err |= get_user(or2, (unsigned int *)(addr+12));
9865+ err |= get_user(sllx, (unsigned int *)(addr+16));
9866+ err |= get_user(add, (unsigned int *)(addr+20));
9867+ err |= get_user(jmpl, (unsigned int *)(addr+24));
9868+ err |= get_user(nop, (unsigned int *)(addr+28));
9869+ if (err)
9870+ break;
9871+
9872+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
9873+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
9874+ (or1 & 0xFFFFE000U) == 0x88112000U &&
9875+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
9876+ sllx == 0x89293020U &&
9877+ add == 0x8A010005U &&
9878+ jmpl == 0x89C14000U &&
9879+ nop == 0x01000000U)
9880+ {
9881+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9882+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
9883+ regs->u_regs[UREG_G4] <<= 32;
9884+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
9885+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
9886+ regs->u_regs[UREG_G4] = addr + 24;
9887+ addr = regs->u_regs[UREG_G5];
9888+ regs->tpc = addr;
9889+ regs->tnpc = addr+4;
9890+ return 3;
9891+ }
9892+ }
9893+ } while (0);
9894+
9895+#ifdef CONFIG_PAX_DLRESOLVE
9896+ do { /* PaX: unpatched PLT emulation step 2 */
9897+ unsigned int save, call, nop;
9898+
9899+ err = get_user(save, (unsigned int *)(regs->tpc-4));
9900+ err |= get_user(call, (unsigned int *)regs->tpc);
9901+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
9902+ if (err)
9903+ break;
9904+
9905+ if (save == 0x9DE3BFA8U &&
9906+ (call & 0xC0000000U) == 0x40000000U &&
9907+ nop == 0x01000000U)
9908+ {
9909+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
9910+
9911+ if (test_thread_flag(TIF_32BIT))
9912+ dl_resolve &= 0xFFFFFFFFUL;
9913+
9914+ regs->u_regs[UREG_RETPC] = regs->tpc;
9915+ regs->tpc = dl_resolve;
9916+ regs->tnpc = dl_resolve+4;
9917+ return 3;
9918+ }
9919+ } while (0);
9920+#endif
9921+
9922+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
9923+ unsigned int sethi, ba, nop;
9924+
9925+ err = get_user(sethi, (unsigned int *)regs->tpc);
9926+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
9927+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
9928+
9929+ if (err)
9930+ break;
9931+
9932+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9933+ (ba & 0xFFF00000U) == 0x30600000U &&
9934+ nop == 0x01000000U)
9935+ {
9936+ unsigned long addr;
9937+
9938+ addr = (sethi & 0x003FFFFFU) << 10;
9939+ regs->u_regs[UREG_G1] = addr;
9940+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
9941+
9942+ if (test_thread_flag(TIF_32BIT))
9943+ addr &= 0xFFFFFFFFUL;
9944+
9945+ regs->tpc = addr;
9946+ regs->tnpc = addr+4;
9947+ return 2;
9948+ }
9949+ } while (0);
9950+
9951+#endif
9952+
9953+ return 1;
9954+}
9955+
9956+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9957+{
9958+ unsigned long i;
9959+
9960+ printk(KERN_ERR "PAX: bytes at PC: ");
9961+ for (i = 0; i < 8; i++) {
9962+ unsigned int c;
9963+ if (get_user(c, (unsigned int *)pc+i))
9964+ printk(KERN_CONT "???????? ");
9965+ else
9966+ printk(KERN_CONT "%08x ", c);
9967+ }
9968+ printk("\n");
9969+}
9970+#endif
9971+
9972 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
9973 {
9974 struct mm_struct *mm = current->mm;
9975@@ -341,6 +804,29 @@ retry:
9976 if (!vma)
9977 goto bad_area;
9978
9979+#ifdef CONFIG_PAX_PAGEEXEC
9980+ /* PaX: detect ITLB misses on non-exec pages */
9981+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
9982+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
9983+ {
9984+ if (address != regs->tpc)
9985+ goto good_area;
9986+
9987+ up_read(&mm->mmap_sem);
9988+ switch (pax_handle_fetch_fault(regs)) {
9989+
9990+#ifdef CONFIG_PAX_EMUPLT
9991+ case 2:
9992+ case 3:
9993+ return;
9994+#endif
9995+
9996+ }
9997+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
9998+ do_group_exit(SIGKILL);
9999+ }
10000+#endif
10001+
10002 /* Pure DTLB misses do not tell us whether the fault causing
10003 * load/store/atomic was a write or not, it only says that there
10004 * was no match. So in such a case we (carefully) read the
10005diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
10006index d2b5944..bd813f2 100644
10007--- a/arch/sparc/mm/hugetlbpage.c
10008+++ b/arch/sparc/mm/hugetlbpage.c
10009@@ -38,7 +38,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
10010
10011 info.flags = 0;
10012 info.length = len;
10013- info.low_limit = TASK_UNMAPPED_BASE;
10014+ info.low_limit = mm->mmap_base;
10015 info.high_limit = min(task_size, VA_EXCLUDE_START);
10016 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
10017 info.align_offset = 0;
10018@@ -47,6 +47,12 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
10019 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
10020 VM_BUG_ON(addr != -ENOMEM);
10021 info.low_limit = VA_EXCLUDE_END;
10022+
10023+#ifdef CONFIG_PAX_RANDMMAP
10024+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10025+ info.low_limit += mm->delta_mmap;
10026+#endif
10027+
10028 info.high_limit = task_size;
10029 addr = vm_unmapped_area(&info);
10030 }
10031@@ -85,6 +91,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10032 VM_BUG_ON(addr != -ENOMEM);
10033 info.flags = 0;
10034 info.low_limit = TASK_UNMAPPED_BASE;
10035+
10036+#ifdef CONFIG_PAX_RANDMMAP
10037+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10038+ info.low_limit += mm->delta_mmap;
10039+#endif
10040+
10041 info.high_limit = STACK_TOP32;
10042 addr = vm_unmapped_area(&info);
10043 }
10044@@ -99,6 +111,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
10045 struct mm_struct *mm = current->mm;
10046 struct vm_area_struct *vma;
10047 unsigned long task_size = TASK_SIZE;
10048+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
10049
10050 if (test_thread_flag(TIF_32BIT))
10051 task_size = STACK_TOP32;
10052@@ -114,11 +127,14 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
10053 return addr;
10054 }
10055
10056+#ifdef CONFIG_PAX_RANDMMAP
10057+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10058+#endif
10059+
10060 if (addr) {
10061 addr = ALIGN(addr, HPAGE_SIZE);
10062 vma = find_vma(mm, addr);
10063- if (task_size - len >= addr &&
10064- (!vma || addr + len <= vma->vm_start))
10065+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10066 return addr;
10067 }
10068 if (mm->get_unmapped_area == arch_get_unmapped_area)
10069diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
10070index 83d89bc..37e7bc4 100644
10071--- a/arch/sparc/mm/tlb.c
10072+++ b/arch/sparc/mm/tlb.c
10073@@ -85,8 +85,8 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
10074 }
10075
10076 if (!tb->active) {
10077- global_flush_tlb_page(mm, vaddr);
10078 flush_tsb_user_page(mm, vaddr);
10079+ global_flush_tlb_page(mm, vaddr);
10080 goto out;
10081 }
10082
10083diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
10084index f4500c6..889656c 100644
10085--- a/arch/tile/include/asm/atomic_64.h
10086+++ b/arch/tile/include/asm/atomic_64.h
10087@@ -143,6 +143,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
10088
10089 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10090
10091+#define atomic64_read_unchecked(v) atomic64_read(v)
10092+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
10093+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
10094+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
10095+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
10096+#define atomic64_inc_unchecked(v) atomic64_inc(v)
10097+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
10098+#define atomic64_dec_unchecked(v) atomic64_dec(v)
10099+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
10100+
10101 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
10102 #define smp_mb__before_atomic_dec() smp_mb()
10103 #define smp_mb__after_atomic_dec() smp_mb()
10104diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
10105index a9a5299..0fce79e 100644
10106--- a/arch/tile/include/asm/cache.h
10107+++ b/arch/tile/include/asm/cache.h
10108@@ -15,11 +15,12 @@
10109 #ifndef _ASM_TILE_CACHE_H
10110 #define _ASM_TILE_CACHE_H
10111
10112+#include <linux/const.h>
10113 #include <arch/chip.h>
10114
10115 /* bytes per L1 data cache line */
10116 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
10117-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10118+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10119
10120 /* bytes per L2 cache line */
10121 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
10122diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
10123index 9ab078a..d6635c2 100644
10124--- a/arch/tile/include/asm/uaccess.h
10125+++ b/arch/tile/include/asm/uaccess.h
10126@@ -403,9 +403,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
10127 const void __user *from,
10128 unsigned long n)
10129 {
10130- int sz = __compiletime_object_size(to);
10131+ size_t sz = __compiletime_object_size(to);
10132
10133- if (likely(sz == -1 || sz >= n))
10134+ if (likely(sz == (size_t)-1 || sz >= n))
10135 n = _copy_from_user(to, from, n);
10136 else
10137 copy_from_user_overflow();
10138diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
10139index 650ccff..45fe2d6 100644
10140--- a/arch/tile/mm/hugetlbpage.c
10141+++ b/arch/tile/mm/hugetlbpage.c
10142@@ -239,6 +239,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
10143 info.high_limit = TASK_SIZE;
10144 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
10145 info.align_offset = 0;
10146+ info.threadstack_offset = 0;
10147 return vm_unmapped_area(&info);
10148 }
10149
10150@@ -256,6 +257,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
10151 info.high_limit = current->mm->mmap_base;
10152 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
10153 info.align_offset = 0;
10154+ info.threadstack_offset = 0;
10155 addr = vm_unmapped_area(&info);
10156
10157 /*
10158diff --git a/arch/um/Makefile b/arch/um/Makefile
10159index 133f7de..1d6f2f1 100644
10160--- a/arch/um/Makefile
10161+++ b/arch/um/Makefile
10162@@ -62,6 +62,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
10163 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
10164 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
10165
10166+ifdef CONSTIFY_PLUGIN
10167+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
10168+endif
10169+
10170 #This will adjust *FLAGS accordingly to the platform.
10171 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
10172
10173diff --git a/arch/um/defconfig b/arch/um/defconfig
10174index 08107a7..ab22afe 100644
10175--- a/arch/um/defconfig
10176+++ b/arch/um/defconfig
10177@@ -51,7 +51,6 @@ CONFIG_X86_CMPXCHG=y
10178 CONFIG_X86_L1_CACHE_SHIFT=5
10179 CONFIG_X86_XADD=y
10180 CONFIG_X86_PPRO_FENCE=y
10181-CONFIG_X86_WP_WORKS_OK=y
10182 CONFIG_X86_INVLPG=y
10183 CONFIG_X86_BSWAP=y
10184 CONFIG_X86_POPAD_OK=y
10185diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
10186index 19e1bdd..3665b77 100644
10187--- a/arch/um/include/asm/cache.h
10188+++ b/arch/um/include/asm/cache.h
10189@@ -1,6 +1,7 @@
10190 #ifndef __UM_CACHE_H
10191 #define __UM_CACHE_H
10192
10193+#include <linux/const.h>
10194
10195 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
10196 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
10197@@ -12,6 +13,6 @@
10198 # define L1_CACHE_SHIFT 5
10199 #endif
10200
10201-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10202+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10203
10204 #endif
10205diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
10206index 2e0a6b1..a64d0f5 100644
10207--- a/arch/um/include/asm/kmap_types.h
10208+++ b/arch/um/include/asm/kmap_types.h
10209@@ -8,6 +8,6 @@
10210
10211 /* No more #include "asm/arch/kmap_types.h" ! */
10212
10213-#define KM_TYPE_NR 14
10214+#define KM_TYPE_NR 15
10215
10216 #endif
10217diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
10218index 5ff53d9..5850cdf 100644
10219--- a/arch/um/include/asm/page.h
10220+++ b/arch/um/include/asm/page.h
10221@@ -14,6 +14,9 @@
10222 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
10223 #define PAGE_MASK (~(PAGE_SIZE-1))
10224
10225+#define ktla_ktva(addr) (addr)
10226+#define ktva_ktla(addr) (addr)
10227+
10228 #ifndef __ASSEMBLY__
10229
10230 struct page;
10231diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
10232index 0032f92..cd151e0 100644
10233--- a/arch/um/include/asm/pgtable-3level.h
10234+++ b/arch/um/include/asm/pgtable-3level.h
10235@@ -58,6 +58,7 @@
10236 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
10237 #define pud_populate(mm, pud, pmd) \
10238 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
10239+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
10240
10241 #ifdef CONFIG_64BIT
10242 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
10243diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
10244index b462b13..e7a19aa 100644
10245--- a/arch/um/kernel/process.c
10246+++ b/arch/um/kernel/process.c
10247@@ -386,22 +386,6 @@ int singlestepping(void * t)
10248 return 2;
10249 }
10250
10251-/*
10252- * Only x86 and x86_64 have an arch_align_stack().
10253- * All other arches have "#define arch_align_stack(x) (x)"
10254- * in their asm/system.h
10255- * As this is included in UML from asm-um/system-generic.h,
10256- * we can use it to behave as the subarch does.
10257- */
10258-#ifndef arch_align_stack
10259-unsigned long arch_align_stack(unsigned long sp)
10260-{
10261- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
10262- sp -= get_random_int() % 8192;
10263- return sp & ~0xf;
10264-}
10265-#endif
10266-
10267 unsigned long get_wchan(struct task_struct *p)
10268 {
10269 unsigned long stack_page, sp, ip;
10270diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
10271index ad8f795..2c7eec6 100644
10272--- a/arch/unicore32/include/asm/cache.h
10273+++ b/arch/unicore32/include/asm/cache.h
10274@@ -12,8 +12,10 @@
10275 #ifndef __UNICORE_CACHE_H__
10276 #define __UNICORE_CACHE_H__
10277
10278-#define L1_CACHE_SHIFT (5)
10279-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10280+#include <linux/const.h>
10281+
10282+#define L1_CACHE_SHIFT 5
10283+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10284
10285 /*
10286 * Memory returned by kmalloc() may be used for DMA, so we must make
10287diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
10288index 6ef2a37..74ad6ad 100644
10289--- a/arch/x86/Kconfig
10290+++ b/arch/x86/Kconfig
10291@@ -243,7 +243,7 @@ config X86_HT
10292
10293 config X86_32_LAZY_GS
10294 def_bool y
10295- depends on X86_32 && !CC_STACKPROTECTOR
10296+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10297
10298 config ARCH_HWEIGHT_CFLAGS
10299 string
10300@@ -1076,6 +1076,7 @@ config MICROCODE_EARLY
10301
10302 config X86_MSR
10303 tristate "/dev/cpu/*/msr - Model-specific register support"
10304+ depends on !GRKERNSEC_KMEM
10305 ---help---
10306 This device gives privileged processes access to the x86
10307 Model-Specific Registers (MSRs). It is a character device with
10308@@ -1099,7 +1100,7 @@ choice
10309
10310 config NOHIGHMEM
10311 bool "off"
10312- depends on !X86_NUMAQ
10313+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10314 ---help---
10315 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
10316 However, the address space of 32-bit x86 processors is only 4
10317@@ -1136,7 +1137,7 @@ config NOHIGHMEM
10318
10319 config HIGHMEM4G
10320 bool "4GB"
10321- depends on !X86_NUMAQ
10322+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10323 ---help---
10324 Select this if you have a 32-bit processor and between 1 and 4
10325 gigabytes of physical RAM.
10326@@ -1189,7 +1190,7 @@ config PAGE_OFFSET
10327 hex
10328 default 0xB0000000 if VMSPLIT_3G_OPT
10329 default 0x80000000 if VMSPLIT_2G
10330- default 0x78000000 if VMSPLIT_2G_OPT
10331+ default 0x70000000 if VMSPLIT_2G_OPT
10332 default 0x40000000 if VMSPLIT_1G
10333 default 0xC0000000
10334 depends on X86_32
10335@@ -1587,6 +1588,7 @@ config SECCOMP
10336
10337 config CC_STACKPROTECTOR
10338 bool "Enable -fstack-protector buffer overflow detection"
10339+ depends on X86_64 || !PAX_MEMORY_UDEREF
10340 ---help---
10341 This option turns on the -fstack-protector GCC feature. This
10342 feature puts, at the beginning of functions, a canary value on
10343@@ -1706,6 +1708,8 @@ config X86_NEED_RELOCS
10344 config PHYSICAL_ALIGN
10345 hex "Alignment value to which kernel should be aligned" if X86_32
10346 default "0x1000000"
10347+ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
10348+ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
10349 range 0x2000 0x1000000
10350 ---help---
10351 This value puts the alignment restrictions on physical address
10352@@ -1781,9 +1785,10 @@ config DEBUG_HOTPLUG_CPU0
10353 If unsure, say N.
10354
10355 config COMPAT_VDSO
10356- def_bool y
10357+ def_bool n
10358 prompt "Compat VDSO support"
10359 depends on X86_32 || IA32_EMULATION
10360+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
10361 ---help---
10362 Map the 32-bit VDSO to the predictable old-style address too.
10363
10364diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
10365index c026cca..14657ae 100644
10366--- a/arch/x86/Kconfig.cpu
10367+++ b/arch/x86/Kconfig.cpu
10368@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
10369
10370 config X86_F00F_BUG
10371 def_bool y
10372- depends on M586MMX || M586TSC || M586 || M486
10373+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
10374
10375 config X86_INVD_BUG
10376 def_bool y
10377@@ -327,7 +327,7 @@ config X86_INVD_BUG
10378
10379 config X86_ALIGNMENT_16
10380 def_bool y
10381- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10382+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10383
10384 config X86_INTEL_USERCOPY
10385 def_bool y
10386@@ -373,7 +373,7 @@ config X86_CMPXCHG64
10387 # generates cmov.
10388 config X86_CMOV
10389 def_bool y
10390- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10391+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10392
10393 config X86_MINIMUM_CPU_FAMILY
10394 int
10395diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
10396index b322f12..652d0d9 100644
10397--- a/arch/x86/Kconfig.debug
10398+++ b/arch/x86/Kconfig.debug
10399@@ -84,7 +84,7 @@ config X86_PTDUMP
10400 config DEBUG_RODATA
10401 bool "Write protect kernel read-only data structures"
10402 default y
10403- depends on DEBUG_KERNEL
10404+ depends on DEBUG_KERNEL && BROKEN
10405 ---help---
10406 Mark the kernel read-only data as write-protected in the pagetables,
10407 in order to catch accidental (and incorrect) writes to such const
10408@@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
10409
10410 config DEBUG_SET_MODULE_RONX
10411 bool "Set loadable kernel module data as NX and text as RO"
10412- depends on MODULES
10413+ depends on MODULES && BROKEN
10414 ---help---
10415 This option helps catch unintended modifications to loadable
10416 kernel module's text and read-only data. It also prevents execution
10417@@ -294,7 +294,7 @@ config OPTIMIZE_INLINING
10418
10419 config DEBUG_STRICT_USER_COPY_CHECKS
10420 bool "Strict copy size checks"
10421- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
10422+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
10423 ---help---
10424 Enabling this option turns a certain set of sanity checks for user
10425 copy operations into compile time failures.
10426diff --git a/arch/x86/Makefile b/arch/x86/Makefile
10427index 5c47726..8c4fa67 100644
10428--- a/arch/x86/Makefile
10429+++ b/arch/x86/Makefile
10430@@ -54,6 +54,7 @@ else
10431 UTS_MACHINE := x86_64
10432 CHECKFLAGS += -D__x86_64__ -m64
10433
10434+ biarch := $(call cc-option,-m64)
10435 KBUILD_AFLAGS += -m64
10436 KBUILD_CFLAGS += -m64
10437
10438@@ -234,3 +235,12 @@ define archhelp
10439 echo ' FDARGS="..." arguments for the booted kernel'
10440 echo ' FDINITRD=file initrd for the booted kernel'
10441 endef
10442+
10443+define OLD_LD
10444+
10445+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
10446+*** Please upgrade your binutils to 2.18 or newer
10447+endef
10448+
10449+archprepare:
10450+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
10451diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
10452index 379814b..add62ce 100644
10453--- a/arch/x86/boot/Makefile
10454+++ b/arch/x86/boot/Makefile
10455@@ -65,6 +65,9 @@ KBUILD_CFLAGS := $(USERINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
10456 $(call cc-option, -fno-stack-protector) \
10457 $(call cc-option, -mpreferred-stack-boundary=2)
10458 KBUILD_CFLAGS += $(call cc-option, -m32)
10459+ifdef CONSTIFY_PLUGIN
10460+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
10461+endif
10462 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10463 GCOV_PROFILE := n
10464
10465diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
10466index 878e4b9..20537ab 100644
10467--- a/arch/x86/boot/bitops.h
10468+++ b/arch/x86/boot/bitops.h
10469@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
10470 u8 v;
10471 const u32 *p = (const u32 *)addr;
10472
10473- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
10474+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
10475 return v;
10476 }
10477
10478@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
10479
10480 static inline void set_bit(int nr, void *addr)
10481 {
10482- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
10483+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
10484 }
10485
10486 #endif /* BOOT_BITOPS_H */
10487diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
10488index 5b75319..331a4ca 100644
10489--- a/arch/x86/boot/boot.h
10490+++ b/arch/x86/boot/boot.h
10491@@ -85,7 +85,7 @@ static inline void io_delay(void)
10492 static inline u16 ds(void)
10493 {
10494 u16 seg;
10495- asm("movw %%ds,%0" : "=rm" (seg));
10496+ asm volatile("movw %%ds,%0" : "=rm" (seg));
10497 return seg;
10498 }
10499
10500@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
10501 static inline int memcmp(const void *s1, const void *s2, size_t len)
10502 {
10503 u8 diff;
10504- asm("repe; cmpsb; setnz %0"
10505+ asm volatile("repe; cmpsb; setnz %0"
10506 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
10507 return diff;
10508 }
10509diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
10510index 5ef205c..342191d 100644
10511--- a/arch/x86/boot/compressed/Makefile
10512+++ b/arch/x86/boot/compressed/Makefile
10513@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
10514 KBUILD_CFLAGS += $(cflags-y)
10515 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
10516 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
10517+ifdef CONSTIFY_PLUGIN
10518+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
10519+endif
10520
10521 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10522 GCOV_PROFILE := n
10523diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
10524index c205035..5853587 100644
10525--- a/arch/x86/boot/compressed/eboot.c
10526+++ b/arch/x86/boot/compressed/eboot.c
10527@@ -150,7 +150,6 @@ again:
10528 *addr = max_addr;
10529 }
10530
10531-free_pool:
10532 efi_call_phys1(sys_table->boottime->free_pool, map);
10533
10534 fail:
10535@@ -214,7 +213,6 @@ static efi_status_t low_alloc(unsigned long size, unsigned long align,
10536 if (i == map_size / desc_size)
10537 status = EFI_NOT_FOUND;
10538
10539-free_pool:
10540 efi_call_phys1(sys_table->boottime->free_pool, map);
10541 fail:
10542 return status;
10543diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
10544index a53440e..c3dbf1e 100644
10545--- a/arch/x86/boot/compressed/efi_stub_32.S
10546+++ b/arch/x86/boot/compressed/efi_stub_32.S
10547@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
10548 * parameter 2, ..., param n. To make things easy, we save the return
10549 * address of efi_call_phys in a global variable.
10550 */
10551- popl %ecx
10552- movl %ecx, saved_return_addr(%edx)
10553- /* get the function pointer into ECX*/
10554- popl %ecx
10555- movl %ecx, efi_rt_function_ptr(%edx)
10556+ popl saved_return_addr(%edx)
10557+ popl efi_rt_function_ptr(%edx)
10558
10559 /*
10560 * 3. Call the physical function.
10561 */
10562- call *%ecx
10563+ call *efi_rt_function_ptr(%edx)
10564
10565 /*
10566 * 4. Balance the stack. And because EAX contain the return value,
10567@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
10568 1: popl %edx
10569 subl $1b, %edx
10570
10571- movl efi_rt_function_ptr(%edx), %ecx
10572- pushl %ecx
10573+ pushl efi_rt_function_ptr(%edx)
10574
10575 /*
10576 * 10. Push the saved return address onto the stack and return.
10577 */
10578- movl saved_return_addr(%edx), %ecx
10579- pushl %ecx
10580- ret
10581+ jmpl *saved_return_addr(%edx)
10582 ENDPROC(efi_call_phys)
10583 .previous
10584
10585diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
10586index 1e3184f..0d11e2e 100644
10587--- a/arch/x86/boot/compressed/head_32.S
10588+++ b/arch/x86/boot/compressed/head_32.S
10589@@ -118,7 +118,7 @@ preferred_addr:
10590 notl %eax
10591 andl %eax, %ebx
10592 #else
10593- movl $LOAD_PHYSICAL_ADDR, %ebx
10594+ movl $____LOAD_PHYSICAL_ADDR, %ebx
10595 #endif
10596
10597 /* Target address to relocate to for decompression */
10598@@ -204,7 +204,7 @@ relocated:
10599 * and where it was actually loaded.
10600 */
10601 movl %ebp, %ebx
10602- subl $LOAD_PHYSICAL_ADDR, %ebx
10603+ subl $____LOAD_PHYSICAL_ADDR, %ebx
10604 jz 2f /* Nothing to be done if loaded at compiled addr. */
10605 /*
10606 * Process relocations.
10607@@ -212,8 +212,7 @@ relocated:
10608
10609 1: subl $4, %edi
10610 movl (%edi), %ecx
10611- testl %ecx, %ecx
10612- jz 2f
10613+ jecxz 2f
10614 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
10615 jmp 1b
10616 2:
10617diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
10618index c1d383d..57ab51c 100644
10619--- a/arch/x86/boot/compressed/head_64.S
10620+++ b/arch/x86/boot/compressed/head_64.S
10621@@ -97,7 +97,7 @@ ENTRY(startup_32)
10622 notl %eax
10623 andl %eax, %ebx
10624 #else
10625- movl $LOAD_PHYSICAL_ADDR, %ebx
10626+ movl $____LOAD_PHYSICAL_ADDR, %ebx
10627 #endif
10628
10629 /* Target address to relocate to for decompression */
10630@@ -272,7 +272,7 @@ preferred_addr:
10631 notq %rax
10632 andq %rax, %rbp
10633 #else
10634- movq $LOAD_PHYSICAL_ADDR, %rbp
10635+ movq $____LOAD_PHYSICAL_ADDR, %rbp
10636 #endif
10637
10638 /* Target address to relocate to for decompression */
10639@@ -363,8 +363,8 @@ gdt:
10640 .long gdt
10641 .word 0
10642 .quad 0x0000000000000000 /* NULL descriptor */
10643- .quad 0x00af9a000000ffff /* __KERNEL_CS */
10644- .quad 0x00cf92000000ffff /* __KERNEL_DS */
10645+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
10646+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
10647 .quad 0x0080890000000000 /* TS descriptor */
10648 .quad 0x0000000000000000 /* TS continued */
10649 gdt_end:
10650diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
10651index 7cb56c6..d382d84 100644
10652--- a/arch/x86/boot/compressed/misc.c
10653+++ b/arch/x86/boot/compressed/misc.c
10654@@ -303,7 +303,7 @@ static void parse_elf(void *output)
10655 case PT_LOAD:
10656 #ifdef CONFIG_RELOCATABLE
10657 dest = output;
10658- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
10659+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
10660 #else
10661 dest = (void *)(phdr->p_paddr);
10662 #endif
10663@@ -354,7 +354,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
10664 error("Destination address too large");
10665 #endif
10666 #ifndef CONFIG_RELOCATABLE
10667- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
10668+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
10669 error("Wrong destination address");
10670 #endif
10671
10672diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
10673index 4d3ff03..e4972ff 100644
10674--- a/arch/x86/boot/cpucheck.c
10675+++ b/arch/x86/boot/cpucheck.c
10676@@ -74,7 +74,7 @@ static int has_fpu(void)
10677 u16 fcw = -1, fsw = -1;
10678 u32 cr0;
10679
10680- asm("movl %%cr0,%0" : "=r" (cr0));
10681+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
10682 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
10683 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
10684 asm volatile("movl %0,%%cr0" : : "r" (cr0));
10685@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
10686 {
10687 u32 f0, f1;
10688
10689- asm("pushfl ; "
10690+ asm volatile("pushfl ; "
10691 "pushfl ; "
10692 "popl %0 ; "
10693 "movl %0,%1 ; "
10694@@ -115,7 +115,7 @@ static void get_flags(void)
10695 set_bit(X86_FEATURE_FPU, cpu.flags);
10696
10697 if (has_eflag(X86_EFLAGS_ID)) {
10698- asm("cpuid"
10699+ asm volatile("cpuid"
10700 : "=a" (max_intel_level),
10701 "=b" (cpu_vendor[0]),
10702 "=d" (cpu_vendor[1]),
10703@@ -124,7 +124,7 @@ static void get_flags(void)
10704
10705 if (max_intel_level >= 0x00000001 &&
10706 max_intel_level <= 0x0000ffff) {
10707- asm("cpuid"
10708+ asm volatile("cpuid"
10709 : "=a" (tfms),
10710 "=c" (cpu.flags[4]),
10711 "=d" (cpu.flags[0])
10712@@ -136,7 +136,7 @@ static void get_flags(void)
10713 cpu.model += ((tfms >> 16) & 0xf) << 4;
10714 }
10715
10716- asm("cpuid"
10717+ asm volatile("cpuid"
10718 : "=a" (max_amd_level)
10719 : "a" (0x80000000)
10720 : "ebx", "ecx", "edx");
10721@@ -144,7 +144,7 @@ static void get_flags(void)
10722 if (max_amd_level >= 0x80000001 &&
10723 max_amd_level <= 0x8000ffff) {
10724 u32 eax = 0x80000001;
10725- asm("cpuid"
10726+ asm volatile("cpuid"
10727 : "+a" (eax),
10728 "=c" (cpu.flags[6]),
10729 "=d" (cpu.flags[1])
10730@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
10731 u32 ecx = MSR_K7_HWCR;
10732 u32 eax, edx;
10733
10734- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10735+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10736 eax &= ~(1 << 15);
10737- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10738+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10739
10740 get_flags(); /* Make sure it really did something */
10741 err = check_flags();
10742@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
10743 u32 ecx = MSR_VIA_FCR;
10744 u32 eax, edx;
10745
10746- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10747+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10748 eax |= (1<<1)|(1<<7);
10749- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10750+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10751
10752 set_bit(X86_FEATURE_CX8, cpu.flags);
10753 err = check_flags();
10754@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
10755 u32 eax, edx;
10756 u32 level = 1;
10757
10758- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10759- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
10760- asm("cpuid"
10761+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10762+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
10763+ asm volatile("cpuid"
10764 : "+a" (level), "=d" (cpu.flags[0])
10765 : : "ecx", "ebx");
10766- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10767+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10768
10769 err = check_flags();
10770 }
10771diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
10772index 9ec06a1..2c25e79 100644
10773--- a/arch/x86/boot/header.S
10774+++ b/arch/x86/boot/header.S
10775@@ -409,10 +409,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
10776 # single linked list of
10777 # struct setup_data
10778
10779-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
10780+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
10781
10782 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
10783+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
10784+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
10785+#else
10786 #define VO_INIT_SIZE (VO__end - VO__text)
10787+#endif
10788 #if ZO_INIT_SIZE > VO_INIT_SIZE
10789 #define INIT_SIZE ZO_INIT_SIZE
10790 #else
10791diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
10792index db75d07..8e6d0af 100644
10793--- a/arch/x86/boot/memory.c
10794+++ b/arch/x86/boot/memory.c
10795@@ -19,7 +19,7 @@
10796
10797 static int detect_memory_e820(void)
10798 {
10799- int count = 0;
10800+ unsigned int count = 0;
10801 struct biosregs ireg, oreg;
10802 struct e820entry *desc = boot_params.e820_map;
10803 static struct e820entry buf; /* static so it is zeroed */
10804diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
10805index 11e8c6e..fdbb1ed 100644
10806--- a/arch/x86/boot/video-vesa.c
10807+++ b/arch/x86/boot/video-vesa.c
10808@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
10809
10810 boot_params.screen_info.vesapm_seg = oreg.es;
10811 boot_params.screen_info.vesapm_off = oreg.di;
10812+ boot_params.screen_info.vesapm_size = oreg.cx;
10813 }
10814
10815 /*
10816diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
10817index 43eda28..5ab5fdb 100644
10818--- a/arch/x86/boot/video.c
10819+++ b/arch/x86/boot/video.c
10820@@ -96,7 +96,7 @@ static void store_mode_params(void)
10821 static unsigned int get_entry(void)
10822 {
10823 char entry_buf[4];
10824- int i, len = 0;
10825+ unsigned int i, len = 0;
10826 int key;
10827 unsigned int v;
10828
10829diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
10830index 9105655..5e37f27 100644
10831--- a/arch/x86/crypto/aes-x86_64-asm_64.S
10832+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
10833@@ -8,6 +8,8 @@
10834 * including this sentence is retained in full.
10835 */
10836
10837+#include <asm/alternative-asm.h>
10838+
10839 .extern crypto_ft_tab
10840 .extern crypto_it_tab
10841 .extern crypto_fl_tab
10842@@ -70,6 +72,8 @@
10843 je B192; \
10844 leaq 32(r9),r9;
10845
10846+#define ret pax_force_retaddr 0, 1; ret
10847+
10848 #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
10849 movq r1,r2; \
10850 movq r3,r4; \
10851diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
10852index 04b7977..402f223 100644
10853--- a/arch/x86/crypto/aesni-intel_asm.S
10854+++ b/arch/x86/crypto/aesni-intel_asm.S
10855@@ -31,6 +31,7 @@
10856
10857 #include <linux/linkage.h>
10858 #include <asm/inst.h>
10859+#include <asm/alternative-asm.h>
10860
10861 #ifdef __x86_64__
10862 .data
10863@@ -1435,6 +1436,7 @@ _return_T_done_decrypt:
10864 pop %r14
10865 pop %r13
10866 pop %r12
10867+ pax_force_retaddr 0, 1
10868 ret
10869 ENDPROC(aesni_gcm_dec)
10870
10871@@ -1699,6 +1701,7 @@ _return_T_done_encrypt:
10872 pop %r14
10873 pop %r13
10874 pop %r12
10875+ pax_force_retaddr 0, 1
10876 ret
10877 ENDPROC(aesni_gcm_enc)
10878
10879@@ -1716,6 +1719,7 @@ _key_expansion_256a:
10880 pxor %xmm1, %xmm0
10881 movaps %xmm0, (TKEYP)
10882 add $0x10, TKEYP
10883+ pax_force_retaddr_bts
10884 ret
10885 ENDPROC(_key_expansion_128)
10886 ENDPROC(_key_expansion_256a)
10887@@ -1742,6 +1746,7 @@ _key_expansion_192a:
10888 shufps $0b01001110, %xmm2, %xmm1
10889 movaps %xmm1, 0x10(TKEYP)
10890 add $0x20, TKEYP
10891+ pax_force_retaddr_bts
10892 ret
10893 ENDPROC(_key_expansion_192a)
10894
10895@@ -1762,6 +1767,7 @@ _key_expansion_192b:
10896
10897 movaps %xmm0, (TKEYP)
10898 add $0x10, TKEYP
10899+ pax_force_retaddr_bts
10900 ret
10901 ENDPROC(_key_expansion_192b)
10902
10903@@ -1775,6 +1781,7 @@ _key_expansion_256b:
10904 pxor %xmm1, %xmm2
10905 movaps %xmm2, (TKEYP)
10906 add $0x10, TKEYP
10907+ pax_force_retaddr_bts
10908 ret
10909 ENDPROC(_key_expansion_256b)
10910
10911@@ -1888,6 +1895,7 @@ ENTRY(aesni_set_key)
10912 #ifndef __x86_64__
10913 popl KEYP
10914 #endif
10915+ pax_force_retaddr 0, 1
10916 ret
10917 ENDPROC(aesni_set_key)
10918
10919@@ -1910,6 +1918,7 @@ ENTRY(aesni_enc)
10920 popl KLEN
10921 popl KEYP
10922 #endif
10923+ pax_force_retaddr 0, 1
10924 ret
10925 ENDPROC(aesni_enc)
10926
10927@@ -1968,6 +1977,7 @@ _aesni_enc1:
10928 AESENC KEY STATE
10929 movaps 0x70(TKEYP), KEY
10930 AESENCLAST KEY STATE
10931+ pax_force_retaddr_bts
10932 ret
10933 ENDPROC(_aesni_enc1)
10934
10935@@ -2077,6 +2087,7 @@ _aesni_enc4:
10936 AESENCLAST KEY STATE2
10937 AESENCLAST KEY STATE3
10938 AESENCLAST KEY STATE4
10939+ pax_force_retaddr_bts
10940 ret
10941 ENDPROC(_aesni_enc4)
10942
10943@@ -2100,6 +2111,7 @@ ENTRY(aesni_dec)
10944 popl KLEN
10945 popl KEYP
10946 #endif
10947+ pax_force_retaddr 0, 1
10948 ret
10949 ENDPROC(aesni_dec)
10950
10951@@ -2158,6 +2170,7 @@ _aesni_dec1:
10952 AESDEC KEY STATE
10953 movaps 0x70(TKEYP), KEY
10954 AESDECLAST KEY STATE
10955+ pax_force_retaddr_bts
10956 ret
10957 ENDPROC(_aesni_dec1)
10958
10959@@ -2267,6 +2280,7 @@ _aesni_dec4:
10960 AESDECLAST KEY STATE2
10961 AESDECLAST KEY STATE3
10962 AESDECLAST KEY STATE4
10963+ pax_force_retaddr_bts
10964 ret
10965 ENDPROC(_aesni_dec4)
10966
10967@@ -2325,6 +2339,7 @@ ENTRY(aesni_ecb_enc)
10968 popl KEYP
10969 popl LEN
10970 #endif
10971+ pax_force_retaddr 0, 1
10972 ret
10973 ENDPROC(aesni_ecb_enc)
10974
10975@@ -2384,6 +2399,7 @@ ENTRY(aesni_ecb_dec)
10976 popl KEYP
10977 popl LEN
10978 #endif
10979+ pax_force_retaddr 0, 1
10980 ret
10981 ENDPROC(aesni_ecb_dec)
10982
10983@@ -2426,6 +2442,7 @@ ENTRY(aesni_cbc_enc)
10984 popl LEN
10985 popl IVP
10986 #endif
10987+ pax_force_retaddr 0, 1
10988 ret
10989 ENDPROC(aesni_cbc_enc)
10990
10991@@ -2517,6 +2534,7 @@ ENTRY(aesni_cbc_dec)
10992 popl LEN
10993 popl IVP
10994 #endif
10995+ pax_force_retaddr 0, 1
10996 ret
10997 ENDPROC(aesni_cbc_dec)
10998
10999@@ -2544,6 +2562,7 @@ _aesni_inc_init:
11000 mov $1, TCTR_LOW
11001 MOVQ_R64_XMM TCTR_LOW INC
11002 MOVQ_R64_XMM CTR TCTR_LOW
11003+ pax_force_retaddr_bts
11004 ret
11005 ENDPROC(_aesni_inc_init)
11006
11007@@ -2573,6 +2592,7 @@ _aesni_inc:
11008 .Linc_low:
11009 movaps CTR, IV
11010 PSHUFB_XMM BSWAP_MASK IV
11011+ pax_force_retaddr_bts
11012 ret
11013 ENDPROC(_aesni_inc)
11014
11015@@ -2634,6 +2654,7 @@ ENTRY(aesni_ctr_enc)
11016 .Lctr_enc_ret:
11017 movups IV, (IVP)
11018 .Lctr_enc_just_ret:
11019+ pax_force_retaddr 0, 1
11020 ret
11021 ENDPROC(aesni_ctr_enc)
11022 #endif
11023diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
11024index 246c670..4d1ed00 100644
11025--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
11026+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
11027@@ -21,6 +21,7 @@
11028 */
11029
11030 #include <linux/linkage.h>
11031+#include <asm/alternative-asm.h>
11032
11033 .file "blowfish-x86_64-asm.S"
11034 .text
11035@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
11036 jnz .L__enc_xor;
11037
11038 write_block();
11039+ pax_force_retaddr 0, 1
11040 ret;
11041 .L__enc_xor:
11042 xor_block();
11043+ pax_force_retaddr 0, 1
11044 ret;
11045 ENDPROC(__blowfish_enc_blk)
11046
11047@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
11048
11049 movq %r11, %rbp;
11050
11051+ pax_force_retaddr 0, 1
11052 ret;
11053 ENDPROC(blowfish_dec_blk)
11054
11055@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
11056
11057 popq %rbx;
11058 popq %rbp;
11059+ pax_force_retaddr 0, 1
11060 ret;
11061
11062 .L__enc_xor4:
11063@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
11064
11065 popq %rbx;
11066 popq %rbp;
11067+ pax_force_retaddr 0, 1
11068 ret;
11069 ENDPROC(__blowfish_enc_blk_4way)
11070
11071@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
11072 popq %rbx;
11073 popq %rbp;
11074
11075+ pax_force_retaddr 0, 1
11076 ret;
11077 ENDPROC(blowfish_dec_blk_4way)
11078diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
11079index 310319c..ce174a4 100644
11080--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
11081+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
11082@@ -21,6 +21,7 @@
11083 */
11084
11085 #include <linux/linkage.h>
11086+#include <asm/alternative-asm.h>
11087
11088 .file "camellia-x86_64-asm_64.S"
11089 .text
11090@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
11091 enc_outunpack(mov, RT1);
11092
11093 movq RRBP, %rbp;
11094+ pax_force_retaddr 0, 1
11095 ret;
11096
11097 .L__enc_xor:
11098 enc_outunpack(xor, RT1);
11099
11100 movq RRBP, %rbp;
11101+ pax_force_retaddr 0, 1
11102 ret;
11103 ENDPROC(__camellia_enc_blk)
11104
11105@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
11106 dec_outunpack();
11107
11108 movq RRBP, %rbp;
11109+ pax_force_retaddr 0, 1
11110 ret;
11111 ENDPROC(camellia_dec_blk)
11112
11113@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
11114
11115 movq RRBP, %rbp;
11116 popq %rbx;
11117+ pax_force_retaddr 0, 1
11118 ret;
11119
11120 .L__enc2_xor:
11121@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
11122
11123 movq RRBP, %rbp;
11124 popq %rbx;
11125+ pax_force_retaddr 0, 1
11126 ret;
11127 ENDPROC(__camellia_enc_blk_2way)
11128
11129@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
11130
11131 movq RRBP, %rbp;
11132 movq RXOR, %rbx;
11133+ pax_force_retaddr 0, 1
11134 ret;
11135 ENDPROC(camellia_dec_blk_2way)
11136diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
11137index c35fd5d..c1ee236 100644
11138--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
11139+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
11140@@ -24,6 +24,7 @@
11141 */
11142
11143 #include <linux/linkage.h>
11144+#include <asm/alternative-asm.h>
11145
11146 .file "cast5-avx-x86_64-asm_64.S"
11147
11148@@ -281,6 +282,7 @@ __cast5_enc_blk16:
11149 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
11150 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
11151
11152+ pax_force_retaddr 0, 1
11153 ret;
11154 ENDPROC(__cast5_enc_blk16)
11155
11156@@ -352,6 +354,7 @@ __cast5_dec_blk16:
11157 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
11158 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
11159
11160+ pax_force_retaddr 0, 1
11161 ret;
11162
11163 .L__skip_dec:
11164@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
11165 vmovdqu RR4, (6*4*4)(%r11);
11166 vmovdqu RL4, (7*4*4)(%r11);
11167
11168+ pax_force_retaddr
11169 ret;
11170 ENDPROC(cast5_ecb_enc_16way)
11171
11172@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
11173 vmovdqu RR4, (6*4*4)(%r11);
11174 vmovdqu RL4, (7*4*4)(%r11);
11175
11176+ pax_force_retaddr
11177 ret;
11178 ENDPROC(cast5_ecb_dec_16way)
11179
11180@@ -469,6 +474,7 @@ ENTRY(cast5_cbc_dec_16way)
11181
11182 popq %r12;
11183
11184+ pax_force_retaddr
11185 ret;
11186 ENDPROC(cast5_cbc_dec_16way)
11187
11188@@ -542,5 +548,6 @@ ENTRY(cast5_ctr_16way)
11189
11190 popq %r12;
11191
11192+ pax_force_retaddr
11193 ret;
11194 ENDPROC(cast5_ctr_16way)
11195diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
11196index f93b610..c09bf40 100644
11197--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
11198+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
11199@@ -24,6 +24,7 @@
11200 */
11201
11202 #include <linux/linkage.h>
11203+#include <asm/alternative-asm.h>
11204 #include "glue_helper-asm-avx.S"
11205
11206 .file "cast6-avx-x86_64-asm_64.S"
11207@@ -293,6 +294,7 @@ __cast6_enc_blk8:
11208 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
11209 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
11210
11211+ pax_force_retaddr 0, 1
11212 ret;
11213 ENDPROC(__cast6_enc_blk8)
11214
11215@@ -338,6 +340,7 @@ __cast6_dec_blk8:
11216 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
11217 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
11218
11219+ pax_force_retaddr 0, 1
11220 ret;
11221 ENDPROC(__cast6_dec_blk8)
11222
11223@@ -356,6 +359,7 @@ ENTRY(cast6_ecb_enc_8way)
11224
11225 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11226
11227+ pax_force_retaddr
11228 ret;
11229 ENDPROC(cast6_ecb_enc_8way)
11230
11231@@ -374,6 +378,7 @@ ENTRY(cast6_ecb_dec_8way)
11232
11233 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11234
11235+ pax_force_retaddr
11236 ret;
11237 ENDPROC(cast6_ecb_dec_8way)
11238
11239@@ -397,6 +402,7 @@ ENTRY(cast6_cbc_dec_8way)
11240
11241 popq %r12;
11242
11243+ pax_force_retaddr
11244 ret;
11245 ENDPROC(cast6_cbc_dec_8way)
11246
11247@@ -422,5 +428,6 @@ ENTRY(cast6_ctr_8way)
11248
11249 popq %r12;
11250
11251+ pax_force_retaddr
11252 ret;
11253 ENDPROC(cast6_ctr_8way)
11254diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
11255index 9279e0b..9270820 100644
11256--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
11257+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
11258@@ -1,4 +1,5 @@
11259 #include <linux/linkage.h>
11260+#include <asm/alternative-asm.h>
11261
11262 # enter salsa20_encrypt_bytes
11263 ENTRY(salsa20_encrypt_bytes)
11264@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
11265 add %r11,%rsp
11266 mov %rdi,%rax
11267 mov %rsi,%rdx
11268+ pax_force_retaddr 0, 1
11269 ret
11270 # bytesatleast65:
11271 ._bytesatleast65:
11272@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
11273 add %r11,%rsp
11274 mov %rdi,%rax
11275 mov %rsi,%rdx
11276+ pax_force_retaddr
11277 ret
11278 ENDPROC(salsa20_keysetup)
11279
11280@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
11281 add %r11,%rsp
11282 mov %rdi,%rax
11283 mov %rsi,%rdx
11284+ pax_force_retaddr
11285 ret
11286 ENDPROC(salsa20_ivsetup)
11287diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
11288index 43c9386..a0e2d60 100644
11289--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
11290+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
11291@@ -25,6 +25,7 @@
11292 */
11293
11294 #include <linux/linkage.h>
11295+#include <asm/alternative-asm.h>
11296 #include "glue_helper-asm-avx.S"
11297
11298 .file "serpent-avx-x86_64-asm_64.S"
11299@@ -617,6 +618,7 @@ __serpent_enc_blk8_avx:
11300 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
11301 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
11302
11303+ pax_force_retaddr
11304 ret;
11305 ENDPROC(__serpent_enc_blk8_avx)
11306
11307@@ -671,6 +673,7 @@ __serpent_dec_blk8_avx:
11308 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
11309 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
11310
11311+ pax_force_retaddr
11312 ret;
11313 ENDPROC(__serpent_dec_blk8_avx)
11314
11315@@ -687,6 +690,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
11316
11317 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11318
11319+ pax_force_retaddr
11320 ret;
11321 ENDPROC(serpent_ecb_enc_8way_avx)
11322
11323@@ -703,6 +707,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
11324
11325 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
11326
11327+ pax_force_retaddr
11328 ret;
11329 ENDPROC(serpent_ecb_dec_8way_avx)
11330
11331@@ -719,6 +724,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
11332
11333 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
11334
11335+ pax_force_retaddr
11336 ret;
11337 ENDPROC(serpent_cbc_dec_8way_avx)
11338
11339@@ -737,5 +743,6 @@ ENTRY(serpent_ctr_8way_avx)
11340
11341 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11342
11343+ pax_force_retaddr
11344 ret;
11345 ENDPROC(serpent_ctr_8way_avx)
11346diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
11347index acc066c..1559cc4 100644
11348--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
11349+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
11350@@ -25,6 +25,7 @@
11351 */
11352
11353 #include <linux/linkage.h>
11354+#include <asm/alternative-asm.h>
11355
11356 .file "serpent-sse2-x86_64-asm_64.S"
11357 .text
11358@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
11359 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
11360 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
11361
11362+ pax_force_retaddr
11363 ret;
11364
11365 .L__enc_xor8:
11366 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
11367 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
11368
11369+ pax_force_retaddr
11370 ret;
11371 ENDPROC(__serpent_enc_blk_8way)
11372
11373@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
11374 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
11375 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
11376
11377+ pax_force_retaddr
11378 ret;
11379 ENDPROC(serpent_dec_blk_8way)
11380diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
11381index a410950..3356d42 100644
11382--- a/arch/x86/crypto/sha1_ssse3_asm.S
11383+++ b/arch/x86/crypto/sha1_ssse3_asm.S
11384@@ -29,6 +29,7 @@
11385 */
11386
11387 #include <linux/linkage.h>
11388+#include <asm/alternative-asm.h>
11389
11390 #define CTX %rdi // arg1
11391 #define BUF %rsi // arg2
11392@@ -104,6 +105,7 @@
11393 pop %r12
11394 pop %rbp
11395 pop %rbx
11396+ pax_force_retaddr 0, 1
11397 ret
11398
11399 ENDPROC(\name)
11400diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
11401index 8d3e113..898b161 100644
11402--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
11403+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
11404@@ -24,6 +24,7 @@
11405 */
11406
11407 #include <linux/linkage.h>
11408+#include <asm/alternative-asm.h>
11409 #include "glue_helper-asm-avx.S"
11410
11411 .file "twofish-avx-x86_64-asm_64.S"
11412@@ -282,6 +283,7 @@ __twofish_enc_blk8:
11413 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
11414 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
11415
11416+ pax_force_retaddr 0, 1
11417 ret;
11418 ENDPROC(__twofish_enc_blk8)
11419
11420@@ -322,6 +324,7 @@ __twofish_dec_blk8:
11421 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
11422 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
11423
11424+ pax_force_retaddr 0, 1
11425 ret;
11426 ENDPROC(__twofish_dec_blk8)
11427
11428@@ -340,6 +343,7 @@ ENTRY(twofish_ecb_enc_8way)
11429
11430 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
11431
11432+ pax_force_retaddr 0, 1
11433 ret;
11434 ENDPROC(twofish_ecb_enc_8way)
11435
11436@@ -358,6 +362,7 @@ ENTRY(twofish_ecb_dec_8way)
11437
11438 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11439
11440+ pax_force_retaddr 0, 1
11441 ret;
11442 ENDPROC(twofish_ecb_dec_8way)
11443
11444@@ -381,6 +386,7 @@ ENTRY(twofish_cbc_dec_8way)
11445
11446 popq %r12;
11447
11448+ pax_force_retaddr 0, 1
11449 ret;
11450 ENDPROC(twofish_cbc_dec_8way)
11451
11452@@ -406,5 +412,6 @@ ENTRY(twofish_ctr_8way)
11453
11454 popq %r12;
11455
11456+ pax_force_retaddr 0, 1
11457 ret;
11458 ENDPROC(twofish_ctr_8way)
11459diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
11460index 1c3b7ce..b365c5e 100644
11461--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
11462+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
11463@@ -21,6 +21,7 @@
11464 */
11465
11466 #include <linux/linkage.h>
11467+#include <asm/alternative-asm.h>
11468
11469 .file "twofish-x86_64-asm-3way.S"
11470 .text
11471@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
11472 popq %r13;
11473 popq %r14;
11474 popq %r15;
11475+ pax_force_retaddr 0, 1
11476 ret;
11477
11478 .L__enc_xor3:
11479@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
11480 popq %r13;
11481 popq %r14;
11482 popq %r15;
11483+ pax_force_retaddr 0, 1
11484 ret;
11485 ENDPROC(__twofish_enc_blk_3way)
11486
11487@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
11488 popq %r13;
11489 popq %r14;
11490 popq %r15;
11491+ pax_force_retaddr 0, 1
11492 ret;
11493 ENDPROC(twofish_dec_blk_3way)
11494diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
11495index a039d21..29e7615 100644
11496--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
11497+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
11498@@ -22,6 +22,7 @@
11499
11500 #include <linux/linkage.h>
11501 #include <asm/asm-offsets.h>
11502+#include <asm/alternative-asm.h>
11503
11504 #define a_offset 0
11505 #define b_offset 4
11506@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
11507
11508 popq R1
11509 movq $1,%rax
11510+ pax_force_retaddr 0, 1
11511 ret
11512 ENDPROC(twofish_enc_blk)
11513
11514@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
11515
11516 popq R1
11517 movq $1,%rax
11518+ pax_force_retaddr 0, 1
11519 ret
11520 ENDPROC(twofish_dec_blk)
11521diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
11522index 03abf9b..a42ba29 100644
11523--- a/arch/x86/ia32/ia32_aout.c
11524+++ b/arch/x86/ia32/ia32_aout.c
11525@@ -159,6 +159,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
11526 unsigned long dump_start, dump_size;
11527 struct user32 dump;
11528
11529+ memset(&dump, 0, sizeof(dump));
11530+
11531 fs = get_fs();
11532 set_fs(KERNEL_DS);
11533 has_dumped = 1;
11534diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
11535index cf1a471..3bc4cf8 100644
11536--- a/arch/x86/ia32/ia32_signal.c
11537+++ b/arch/x86/ia32/ia32_signal.c
11538@@ -340,7 +340,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
11539 sp -= frame_size;
11540 /* Align the stack pointer according to the i386 ABI,
11541 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
11542- sp = ((sp + 4) & -16ul) - 4;
11543+ sp = ((sp - 12) & -16ul) - 4;
11544 return (void __user *) sp;
11545 }
11546
11547@@ -398,7 +398,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
11548 * These are actually not used anymore, but left because some
11549 * gdb versions depend on them as a marker.
11550 */
11551- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
11552+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
11553 } put_user_catch(err);
11554
11555 if (err)
11556@@ -440,7 +440,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
11557 0xb8,
11558 __NR_ia32_rt_sigreturn,
11559 0x80cd,
11560- 0,
11561+ 0
11562 };
11563
11564 frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
11565@@ -463,16 +463,18 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
11566
11567 if (ksig->ka.sa.sa_flags & SA_RESTORER)
11568 restorer = ksig->ka.sa.sa_restorer;
11569+ else if (current->mm->context.vdso)
11570+ /* Return stub is in 32bit vsyscall page */
11571+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
11572 else
11573- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
11574- rt_sigreturn);
11575+ restorer = &frame->retcode;
11576 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
11577
11578 /*
11579 * Not actually used anymore, but left because some gdb
11580 * versions need it.
11581 */
11582- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
11583+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
11584 } put_user_catch(err);
11585
11586 err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
11587diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
11588index 474dc1b..be7bff5 100644
11589--- a/arch/x86/ia32/ia32entry.S
11590+++ b/arch/x86/ia32/ia32entry.S
11591@@ -15,8 +15,10 @@
11592 #include <asm/irqflags.h>
11593 #include <asm/asm.h>
11594 #include <asm/smap.h>
11595+#include <asm/pgtable.h>
11596 #include <linux/linkage.h>
11597 #include <linux/err.h>
11598+#include <asm/alternative-asm.h>
11599
11600 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
11601 #include <linux/elf-em.h>
11602@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
11603 ENDPROC(native_irq_enable_sysexit)
11604 #endif
11605
11606+ .macro pax_enter_kernel_user
11607+ pax_set_fptr_mask
11608+#ifdef CONFIG_PAX_MEMORY_UDEREF
11609+ call pax_enter_kernel_user
11610+#endif
11611+ .endm
11612+
11613+ .macro pax_exit_kernel_user
11614+#ifdef CONFIG_PAX_MEMORY_UDEREF
11615+ call pax_exit_kernel_user
11616+#endif
11617+#ifdef CONFIG_PAX_RANDKSTACK
11618+ pushq %rax
11619+ pushq %r11
11620+ call pax_randomize_kstack
11621+ popq %r11
11622+ popq %rax
11623+#endif
11624+ .endm
11625+
11626+.macro pax_erase_kstack
11627+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11628+ call pax_erase_kstack
11629+#endif
11630+.endm
11631+
11632 /*
11633 * 32bit SYSENTER instruction entry.
11634 *
11635@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
11636 CFI_REGISTER rsp,rbp
11637 SWAPGS_UNSAFE_STACK
11638 movq PER_CPU_VAR(kernel_stack), %rsp
11639- addq $(KERNEL_STACK_OFFSET),%rsp
11640- /*
11641- * No need to follow this irqs on/off section: the syscall
11642- * disabled irqs, here we enable it straight after entry:
11643- */
11644- ENABLE_INTERRUPTS(CLBR_NONE)
11645 movl %ebp,%ebp /* zero extension */
11646 pushq_cfi $__USER32_DS
11647 /*CFI_REL_OFFSET ss,0*/
11648@@ -135,24 +157,44 @@ ENTRY(ia32_sysenter_target)
11649 CFI_REL_OFFSET rsp,0
11650 pushfq_cfi
11651 /*CFI_REL_OFFSET rflags,0*/
11652- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
11653- CFI_REGISTER rip,r10
11654+ orl $X86_EFLAGS_IF,(%rsp)
11655+ GET_THREAD_INFO(%r11)
11656+ movl TI_sysenter_return(%r11), %r11d
11657+ CFI_REGISTER rip,r11
11658 pushq_cfi $__USER32_CS
11659 /*CFI_REL_OFFSET cs,0*/
11660 movl %eax, %eax
11661- pushq_cfi %r10
11662+ pushq_cfi %r11
11663 CFI_REL_OFFSET rip,0
11664 pushq_cfi %rax
11665 cld
11666 SAVE_ARGS 0,1,0
11667+ pax_enter_kernel_user
11668+
11669+#ifdef CONFIG_PAX_RANDKSTACK
11670+ pax_erase_kstack
11671+#endif
11672+
11673+ /*
11674+ * No need to follow this irqs on/off section: the syscall
11675+ * disabled irqs, here we enable it straight after entry:
11676+ */
11677+ ENABLE_INTERRUPTS(CLBR_NONE)
11678 /* no need to do an access_ok check here because rbp has been
11679 32bit zero extended */
11680+
11681+#ifdef CONFIG_PAX_MEMORY_UDEREF
11682+ mov pax_user_shadow_base,%r11
11683+ add %r11,%rbp
11684+#endif
11685+
11686 ASM_STAC
11687 1: movl (%rbp),%ebp
11688 _ASM_EXTABLE(1b,ia32_badarg)
11689 ASM_CLAC
11690- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11691- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11692+ GET_THREAD_INFO(%r11)
11693+ orl $TS_COMPAT,TI_status(%r11)
11694+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
11695 CFI_REMEMBER_STATE
11696 jnz sysenter_tracesys
11697 cmpq $(IA32_NR_syscalls-1),%rax
11698@@ -162,12 +204,15 @@ sysenter_do_call:
11699 sysenter_dispatch:
11700 call *ia32_sys_call_table(,%rax,8)
11701 movq %rax,RAX-ARGOFFSET(%rsp)
11702+ GET_THREAD_INFO(%r11)
11703 DISABLE_INTERRUPTS(CLBR_NONE)
11704 TRACE_IRQS_OFF
11705- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11706+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
11707 jnz sysexit_audit
11708 sysexit_from_sys_call:
11709- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11710+ pax_exit_kernel_user
11711+ pax_erase_kstack
11712+ andl $~TS_COMPAT,TI_status(%r11)
11713 /* clear IF, that popfq doesn't enable interrupts early */
11714 andl $~0x200,EFLAGS-R11(%rsp)
11715 movl RIP-R11(%rsp),%edx /* User %eip */
11716@@ -193,6 +238,9 @@ sysexit_from_sys_call:
11717 movl %eax,%esi /* 2nd arg: syscall number */
11718 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
11719 call __audit_syscall_entry
11720+
11721+ pax_erase_kstack
11722+
11723 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
11724 cmpq $(IA32_NR_syscalls-1),%rax
11725 ja ia32_badsys
11726@@ -204,7 +252,7 @@ sysexit_from_sys_call:
11727 .endm
11728
11729 .macro auditsys_exit exit
11730- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11731+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
11732 jnz ia32_ret_from_sys_call
11733 TRACE_IRQS_ON
11734 ENABLE_INTERRUPTS(CLBR_NONE)
11735@@ -215,11 +263,12 @@ sysexit_from_sys_call:
11736 1: setbe %al /* 1 if error, 0 if not */
11737 movzbl %al,%edi /* zero-extend that into %edi */
11738 call __audit_syscall_exit
11739+ GET_THREAD_INFO(%r11)
11740 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
11741 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
11742 DISABLE_INTERRUPTS(CLBR_NONE)
11743 TRACE_IRQS_OFF
11744- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11745+ testl %edi,TI_flags(%r11)
11746 jz \exit
11747 CLEAR_RREGS -ARGOFFSET
11748 jmp int_with_check
11749@@ -237,7 +286,7 @@ sysexit_audit:
11750
11751 sysenter_tracesys:
11752 #ifdef CONFIG_AUDITSYSCALL
11753- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11754+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
11755 jz sysenter_auditsys
11756 #endif
11757 SAVE_REST
11758@@ -249,6 +298,9 @@ sysenter_tracesys:
11759 RESTORE_REST
11760 cmpq $(IA32_NR_syscalls-1),%rax
11761 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
11762+
11763+ pax_erase_kstack
11764+
11765 jmp sysenter_do_call
11766 CFI_ENDPROC
11767 ENDPROC(ia32_sysenter_target)
11768@@ -276,19 +328,25 @@ ENDPROC(ia32_sysenter_target)
11769 ENTRY(ia32_cstar_target)
11770 CFI_STARTPROC32 simple
11771 CFI_SIGNAL_FRAME
11772- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
11773+ CFI_DEF_CFA rsp,0
11774 CFI_REGISTER rip,rcx
11775 /*CFI_REGISTER rflags,r11*/
11776 SWAPGS_UNSAFE_STACK
11777 movl %esp,%r8d
11778 CFI_REGISTER rsp,r8
11779 movq PER_CPU_VAR(kernel_stack),%rsp
11780+ SAVE_ARGS 8*6,0,0
11781+ pax_enter_kernel_user
11782+
11783+#ifdef CONFIG_PAX_RANDKSTACK
11784+ pax_erase_kstack
11785+#endif
11786+
11787 /*
11788 * No need to follow this irqs on/off section: the syscall
11789 * disabled irqs and here we enable it straight after entry:
11790 */
11791 ENABLE_INTERRUPTS(CLBR_NONE)
11792- SAVE_ARGS 8,0,0
11793 movl %eax,%eax /* zero extension */
11794 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
11795 movq %rcx,RIP-ARGOFFSET(%rsp)
11796@@ -304,12 +362,19 @@ ENTRY(ia32_cstar_target)
11797 /* no need to do an access_ok check here because r8 has been
11798 32bit zero extended */
11799 /* hardware stack frame is complete now */
11800+
11801+#ifdef CONFIG_PAX_MEMORY_UDEREF
11802+ mov pax_user_shadow_base,%r11
11803+ add %r11,%r8
11804+#endif
11805+
11806 ASM_STAC
11807 1: movl (%r8),%r9d
11808 _ASM_EXTABLE(1b,ia32_badarg)
11809 ASM_CLAC
11810- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11811- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11812+ GET_THREAD_INFO(%r11)
11813+ orl $TS_COMPAT,TI_status(%r11)
11814+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
11815 CFI_REMEMBER_STATE
11816 jnz cstar_tracesys
11817 cmpq $IA32_NR_syscalls-1,%rax
11818@@ -319,12 +384,15 @@ cstar_do_call:
11819 cstar_dispatch:
11820 call *ia32_sys_call_table(,%rax,8)
11821 movq %rax,RAX-ARGOFFSET(%rsp)
11822+ GET_THREAD_INFO(%r11)
11823 DISABLE_INTERRUPTS(CLBR_NONE)
11824 TRACE_IRQS_OFF
11825- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11826+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
11827 jnz sysretl_audit
11828 sysretl_from_sys_call:
11829- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11830+ pax_exit_kernel_user
11831+ pax_erase_kstack
11832+ andl $~TS_COMPAT,TI_status(%r11)
11833 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
11834 movl RIP-ARGOFFSET(%rsp),%ecx
11835 CFI_REGISTER rip,rcx
11836@@ -352,7 +420,7 @@ sysretl_audit:
11837
11838 cstar_tracesys:
11839 #ifdef CONFIG_AUDITSYSCALL
11840- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11841+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
11842 jz cstar_auditsys
11843 #endif
11844 xchgl %r9d,%ebp
11845@@ -366,6 +434,9 @@ cstar_tracesys:
11846 xchgl %ebp,%r9d
11847 cmpq $(IA32_NR_syscalls-1),%rax
11848 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
11849+
11850+ pax_erase_kstack
11851+
11852 jmp cstar_do_call
11853 END(ia32_cstar_target)
11854
11855@@ -407,19 +478,26 @@ ENTRY(ia32_syscall)
11856 CFI_REL_OFFSET rip,RIP-RIP
11857 PARAVIRT_ADJUST_EXCEPTION_FRAME
11858 SWAPGS
11859- /*
11860- * No need to follow this irqs on/off section: the syscall
11861- * disabled irqs and here we enable it straight after entry:
11862- */
11863- ENABLE_INTERRUPTS(CLBR_NONE)
11864 movl %eax,%eax
11865 pushq_cfi %rax
11866 cld
11867 /* note the registers are not zero extended to the sf.
11868 this could be a problem. */
11869 SAVE_ARGS 0,1,0
11870- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11871- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11872+ pax_enter_kernel_user
11873+
11874+#ifdef CONFIG_PAX_RANDKSTACK
11875+ pax_erase_kstack
11876+#endif
11877+
11878+ /*
11879+ * No need to follow this irqs on/off section: the syscall
11880+ * disabled irqs and here we enable it straight after entry:
11881+ */
11882+ ENABLE_INTERRUPTS(CLBR_NONE)
11883+ GET_THREAD_INFO(%r11)
11884+ orl $TS_COMPAT,TI_status(%r11)
11885+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
11886 jnz ia32_tracesys
11887 cmpq $(IA32_NR_syscalls-1),%rax
11888 ja ia32_badsys
11889@@ -442,6 +520,9 @@ ia32_tracesys:
11890 RESTORE_REST
11891 cmpq $(IA32_NR_syscalls-1),%rax
11892 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
11893+
11894+ pax_erase_kstack
11895+
11896 jmp ia32_do_call
11897 END(ia32_syscall)
11898
11899diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
11900index ad7a20c..1ffa3c1 100644
11901--- a/arch/x86/ia32/sys_ia32.c
11902+++ b/arch/x86/ia32/sys_ia32.c
11903@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
11904 */
11905 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
11906 {
11907- typeof(ubuf->st_uid) uid = 0;
11908- typeof(ubuf->st_gid) gid = 0;
11909+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
11910+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
11911 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
11912 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
11913 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
11914@@ -205,7 +205,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
11915 return -EFAULT;
11916
11917 set_fs(KERNEL_DS);
11918- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
11919+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
11920 count);
11921 set_fs(old_fs);
11922
11923diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
11924index 372231c..a5aa1a1 100644
11925--- a/arch/x86/include/asm/alternative-asm.h
11926+++ b/arch/x86/include/asm/alternative-asm.h
11927@@ -18,6 +18,45 @@
11928 .endm
11929 #endif
11930
11931+#ifdef KERNEXEC_PLUGIN
11932+ .macro pax_force_retaddr_bts rip=0
11933+ btsq $63,\rip(%rsp)
11934+ .endm
11935+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
11936+ .macro pax_force_retaddr rip=0, reload=0
11937+ btsq $63,\rip(%rsp)
11938+ .endm
11939+ .macro pax_force_fptr ptr
11940+ btsq $63,\ptr
11941+ .endm
11942+ .macro pax_set_fptr_mask
11943+ .endm
11944+#endif
11945+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
11946+ .macro pax_force_retaddr rip=0, reload=0
11947+ .if \reload
11948+ pax_set_fptr_mask
11949+ .endif
11950+ orq %r10,\rip(%rsp)
11951+ .endm
11952+ .macro pax_force_fptr ptr
11953+ orq %r10,\ptr
11954+ .endm
11955+ .macro pax_set_fptr_mask
11956+ movabs $0x8000000000000000,%r10
11957+ .endm
11958+#endif
11959+#else
11960+ .macro pax_force_retaddr rip=0, reload=0
11961+ .endm
11962+ .macro pax_force_fptr ptr
11963+ .endm
11964+ .macro pax_force_retaddr_bts rip=0
11965+ .endm
11966+ .macro pax_set_fptr_mask
11967+ .endm
11968+#endif
11969+
11970 .macro altinstruction_entry orig alt feature orig_len alt_len
11971 .long \orig - .
11972 .long \alt - .
11973diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
11974index 58ed6d9..f1cbe58 100644
11975--- a/arch/x86/include/asm/alternative.h
11976+++ b/arch/x86/include/asm/alternative.h
11977@@ -105,7 +105,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
11978 ".pushsection .discard,\"aw\",@progbits\n" \
11979 DISCARD_ENTRY(1) \
11980 ".popsection\n" \
11981- ".pushsection .altinstr_replacement, \"ax\"\n" \
11982+ ".pushsection .altinstr_replacement, \"a\"\n" \
11983 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
11984 ".popsection"
11985
11986@@ -119,7 +119,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
11987 DISCARD_ENTRY(1) \
11988 DISCARD_ENTRY(2) \
11989 ".popsection\n" \
11990- ".pushsection .altinstr_replacement, \"ax\"\n" \
11991+ ".pushsection .altinstr_replacement, \"a\"\n" \
11992 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
11993 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
11994 ".popsection"
11995diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
11996index 3388034..050f0b9 100644
11997--- a/arch/x86/include/asm/apic.h
11998+++ b/arch/x86/include/asm/apic.h
11999@@ -44,7 +44,7 @@ static inline void generic_apic_probe(void)
12000
12001 #ifdef CONFIG_X86_LOCAL_APIC
12002
12003-extern unsigned int apic_verbosity;
12004+extern int apic_verbosity;
12005 extern int local_apic_timer_c2_ok;
12006
12007 extern int disable_apic;
12008diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
12009index 20370c6..a2eb9b0 100644
12010--- a/arch/x86/include/asm/apm.h
12011+++ b/arch/x86/include/asm/apm.h
12012@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
12013 __asm__ __volatile__(APM_DO_ZERO_SEGS
12014 "pushl %%edi\n\t"
12015 "pushl %%ebp\n\t"
12016- "lcall *%%cs:apm_bios_entry\n\t"
12017+ "lcall *%%ss:apm_bios_entry\n\t"
12018 "setc %%al\n\t"
12019 "popl %%ebp\n\t"
12020 "popl %%edi\n\t"
12021@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
12022 __asm__ __volatile__(APM_DO_ZERO_SEGS
12023 "pushl %%edi\n\t"
12024 "pushl %%ebp\n\t"
12025- "lcall *%%cs:apm_bios_entry\n\t"
12026+ "lcall *%%ss:apm_bios_entry\n\t"
12027 "setc %%bl\n\t"
12028 "popl %%ebp\n\t"
12029 "popl %%edi\n\t"
12030diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
12031index 722aa3b..3a0bb27 100644
12032--- a/arch/x86/include/asm/atomic.h
12033+++ b/arch/x86/include/asm/atomic.h
12034@@ -22,7 +22,18 @@
12035 */
12036 static inline int atomic_read(const atomic_t *v)
12037 {
12038- return (*(volatile int *)&(v)->counter);
12039+ return (*(volatile const int *)&(v)->counter);
12040+}
12041+
12042+/**
12043+ * atomic_read_unchecked - read atomic variable
12044+ * @v: pointer of type atomic_unchecked_t
12045+ *
12046+ * Atomically reads the value of @v.
12047+ */
12048+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
12049+{
12050+ return (*(volatile const int *)&(v)->counter);
12051 }
12052
12053 /**
12054@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
12055 }
12056
12057 /**
12058+ * atomic_set_unchecked - set atomic variable
12059+ * @v: pointer of type atomic_unchecked_t
12060+ * @i: required value
12061+ *
12062+ * Atomically sets the value of @v to @i.
12063+ */
12064+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
12065+{
12066+ v->counter = i;
12067+}
12068+
12069+/**
12070 * atomic_add - add integer to atomic variable
12071 * @i: integer value to add
12072 * @v: pointer of type atomic_t
12073@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
12074 */
12075 static inline void atomic_add(int i, atomic_t *v)
12076 {
12077- asm volatile(LOCK_PREFIX "addl %1,%0"
12078+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
12079+
12080+#ifdef CONFIG_PAX_REFCOUNT
12081+ "jno 0f\n"
12082+ LOCK_PREFIX "subl %1,%0\n"
12083+ "int $4\n0:\n"
12084+ _ASM_EXTABLE(0b, 0b)
12085+#endif
12086+
12087+ : "+m" (v->counter)
12088+ : "ir" (i));
12089+}
12090+
12091+/**
12092+ * atomic_add_unchecked - add integer to atomic variable
12093+ * @i: integer value to add
12094+ * @v: pointer of type atomic_unchecked_t
12095+ *
12096+ * Atomically adds @i to @v.
12097+ */
12098+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
12099+{
12100+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
12101 : "+m" (v->counter)
12102 : "ir" (i));
12103 }
12104@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
12105 */
12106 static inline void atomic_sub(int i, atomic_t *v)
12107 {
12108- asm volatile(LOCK_PREFIX "subl %1,%0"
12109+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
12110+
12111+#ifdef CONFIG_PAX_REFCOUNT
12112+ "jno 0f\n"
12113+ LOCK_PREFIX "addl %1,%0\n"
12114+ "int $4\n0:\n"
12115+ _ASM_EXTABLE(0b, 0b)
12116+#endif
12117+
12118+ : "+m" (v->counter)
12119+ : "ir" (i));
12120+}
12121+
12122+/**
12123+ * atomic_sub_unchecked - subtract integer from atomic variable
12124+ * @i: integer value to subtract
12125+ * @v: pointer of type atomic_unchecked_t
12126+ *
12127+ * Atomically subtracts @i from @v.
12128+ */
12129+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
12130+{
12131+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
12132 : "+m" (v->counter)
12133 : "ir" (i));
12134 }
12135@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
12136 {
12137 unsigned char c;
12138
12139- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
12140+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
12141+
12142+#ifdef CONFIG_PAX_REFCOUNT
12143+ "jno 0f\n"
12144+ LOCK_PREFIX "addl %2,%0\n"
12145+ "int $4\n0:\n"
12146+ _ASM_EXTABLE(0b, 0b)
12147+#endif
12148+
12149+ "sete %1\n"
12150 : "+m" (v->counter), "=qm" (c)
12151 : "ir" (i) : "memory");
12152 return c;
12153@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
12154 */
12155 static inline void atomic_inc(atomic_t *v)
12156 {
12157- asm volatile(LOCK_PREFIX "incl %0"
12158+ asm volatile(LOCK_PREFIX "incl %0\n"
12159+
12160+#ifdef CONFIG_PAX_REFCOUNT
12161+ "jno 0f\n"
12162+ LOCK_PREFIX "decl %0\n"
12163+ "int $4\n0:\n"
12164+ _ASM_EXTABLE(0b, 0b)
12165+#endif
12166+
12167+ : "+m" (v->counter));
12168+}
12169+
12170+/**
12171+ * atomic_inc_unchecked - increment atomic variable
12172+ * @v: pointer of type atomic_unchecked_t
12173+ *
12174+ * Atomically increments @v by 1.
12175+ */
12176+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
12177+{
12178+ asm volatile(LOCK_PREFIX "incl %0\n"
12179 : "+m" (v->counter));
12180 }
12181
12182@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
12183 */
12184 static inline void atomic_dec(atomic_t *v)
12185 {
12186- asm volatile(LOCK_PREFIX "decl %0"
12187+ asm volatile(LOCK_PREFIX "decl %0\n"
12188+
12189+#ifdef CONFIG_PAX_REFCOUNT
12190+ "jno 0f\n"
12191+ LOCK_PREFIX "incl %0\n"
12192+ "int $4\n0:\n"
12193+ _ASM_EXTABLE(0b, 0b)
12194+#endif
12195+
12196+ : "+m" (v->counter));
12197+}
12198+
12199+/**
12200+ * atomic_dec_unchecked - decrement atomic variable
12201+ * @v: pointer of type atomic_unchecked_t
12202+ *
12203+ * Atomically decrements @v by 1.
12204+ */
12205+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
12206+{
12207+ asm volatile(LOCK_PREFIX "decl %0\n"
12208 : "+m" (v->counter));
12209 }
12210
12211@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
12212 {
12213 unsigned char c;
12214
12215- asm volatile(LOCK_PREFIX "decl %0; sete %1"
12216+ asm volatile(LOCK_PREFIX "decl %0\n"
12217+
12218+#ifdef CONFIG_PAX_REFCOUNT
12219+ "jno 0f\n"
12220+ LOCK_PREFIX "incl %0\n"
12221+ "int $4\n0:\n"
12222+ _ASM_EXTABLE(0b, 0b)
12223+#endif
12224+
12225+ "sete %1\n"
12226 : "+m" (v->counter), "=qm" (c)
12227 : : "memory");
12228 return c != 0;
12229@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
12230 {
12231 unsigned char c;
12232
12233- asm volatile(LOCK_PREFIX "incl %0; sete %1"
12234+ asm volatile(LOCK_PREFIX "incl %0\n"
12235+
12236+#ifdef CONFIG_PAX_REFCOUNT
12237+ "jno 0f\n"
12238+ LOCK_PREFIX "decl %0\n"
12239+ "int $4\n0:\n"
12240+ _ASM_EXTABLE(0b, 0b)
12241+#endif
12242+
12243+ "sete %1\n"
12244+ : "+m" (v->counter), "=qm" (c)
12245+ : : "memory");
12246+ return c != 0;
12247+}
12248+
12249+/**
12250+ * atomic_inc_and_test_unchecked - increment and test
12251+ * @v: pointer of type atomic_unchecked_t
12252+ *
12253+ * Atomically increments @v by 1
12254+ * and returns true if the result is zero, or false for all
12255+ * other cases.
12256+ */
12257+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
12258+{
12259+ unsigned char c;
12260+
12261+ asm volatile(LOCK_PREFIX "incl %0\n"
12262+ "sete %1\n"
12263 : "+m" (v->counter), "=qm" (c)
12264 : : "memory");
12265 return c != 0;
12266@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
12267 {
12268 unsigned char c;
12269
12270- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
12271+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
12272+
12273+#ifdef CONFIG_PAX_REFCOUNT
12274+ "jno 0f\n"
12275+ LOCK_PREFIX "subl %2,%0\n"
12276+ "int $4\n0:\n"
12277+ _ASM_EXTABLE(0b, 0b)
12278+#endif
12279+
12280+ "sets %1\n"
12281 : "+m" (v->counter), "=qm" (c)
12282 : "ir" (i) : "memory");
12283 return c;
12284@@ -172,6 +334,18 @@ static inline int atomic_add_negative(int i, atomic_t *v)
12285 */
12286 static inline int atomic_add_return(int i, atomic_t *v)
12287 {
12288+ return i + xadd_check_overflow(&v->counter, i);
12289+}
12290+
12291+/**
12292+ * atomic_add_return_unchecked - add integer and return
12293+ * @i: integer value to add
12294+ * @v: pointer of type atomic_unchecked_t
12295+ *
12296+ * Atomically adds @i to @v and returns @i + @v
12297+ */
12298+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
12299+{
12300 return i + xadd(&v->counter, i);
12301 }
12302
12303@@ -188,6 +362,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
12304 }
12305
12306 #define atomic_inc_return(v) (atomic_add_return(1, v))
12307+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
12308+{
12309+ return atomic_add_return_unchecked(1, v);
12310+}
12311 #define atomic_dec_return(v) (atomic_sub_return(1, v))
12312
12313 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
12314@@ -195,11 +373,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
12315 return cmpxchg(&v->counter, old, new);
12316 }
12317
12318+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
12319+{
12320+ return cmpxchg(&v->counter, old, new);
12321+}
12322+
12323 static inline int atomic_xchg(atomic_t *v, int new)
12324 {
12325 return xchg(&v->counter, new);
12326 }
12327
12328+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
12329+{
12330+ return xchg(&v->counter, new);
12331+}
12332+
12333 /**
12334 * __atomic_add_unless - add unless the number is already a given value
12335 * @v: pointer of type atomic_t
12336@@ -211,12 +399,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
12337 */
12338 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
12339 {
12340- int c, old;
12341+ int c, old, new;
12342 c = atomic_read(v);
12343 for (;;) {
12344- if (unlikely(c == (u)))
12345+ if (unlikely(c == u))
12346 break;
12347- old = atomic_cmpxchg((v), c, c + (a));
12348+
12349+ asm volatile("addl %2,%0\n"
12350+
12351+#ifdef CONFIG_PAX_REFCOUNT
12352+ "jno 0f\n"
12353+ "subl %2,%0\n"
12354+ "int $4\n0:\n"
12355+ _ASM_EXTABLE(0b, 0b)
12356+#endif
12357+
12358+ : "=r" (new)
12359+ : "0" (c), "ir" (a));
12360+
12361+ old = atomic_cmpxchg(v, c, new);
12362 if (likely(old == c))
12363 break;
12364 c = old;
12365@@ -225,6 +426,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
12366 }
12367
12368 /**
12369+ * atomic_inc_not_zero_hint - increment if not null
12370+ * @v: pointer of type atomic_t
12371+ * @hint: probable value of the atomic before the increment
12372+ *
12373+ * This version of atomic_inc_not_zero() gives a hint of probable
12374+ * value of the atomic. This helps processor to not read the memory
12375+ * before doing the atomic read/modify/write cycle, lowering
12376+ * number of bus transactions on some arches.
12377+ *
12378+ * Returns: 0 if increment was not done, 1 otherwise.
12379+ */
12380+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
12381+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
12382+{
12383+ int val, c = hint, new;
12384+
12385+ /* sanity test, should be removed by compiler if hint is a constant */
12386+ if (!hint)
12387+ return __atomic_add_unless(v, 1, 0);
12388+
12389+ do {
12390+ asm volatile("incl %0\n"
12391+
12392+#ifdef CONFIG_PAX_REFCOUNT
12393+ "jno 0f\n"
12394+ "decl %0\n"
12395+ "int $4\n0:\n"
12396+ _ASM_EXTABLE(0b, 0b)
12397+#endif
12398+
12399+ : "=r" (new)
12400+ : "0" (c));
12401+
12402+ val = atomic_cmpxchg(v, c, new);
12403+ if (val == c)
12404+ return 1;
12405+ c = val;
12406+ } while (c);
12407+
12408+ return 0;
12409+}
12410+
12411+/**
12412 * atomic_inc_short - increment of a short integer
12413 * @v: pointer to type int
12414 *
12415@@ -253,14 +497,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
12416 #endif
12417
12418 /* These are x86-specific, used by some header files */
12419-#define atomic_clear_mask(mask, addr) \
12420- asm volatile(LOCK_PREFIX "andl %0,%1" \
12421- : : "r" (~(mask)), "m" (*(addr)) : "memory")
12422+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
12423+{
12424+ asm volatile(LOCK_PREFIX "andl %1,%0"
12425+ : "+m" (v->counter)
12426+ : "r" (~(mask))
12427+ : "memory");
12428+}
12429
12430-#define atomic_set_mask(mask, addr) \
12431- asm volatile(LOCK_PREFIX "orl %0,%1" \
12432- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
12433- : "memory")
12434+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
12435+{
12436+ asm volatile(LOCK_PREFIX "andl %1,%0"
12437+ : "+m" (v->counter)
12438+ : "r" (~(mask))
12439+ : "memory");
12440+}
12441+
12442+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
12443+{
12444+ asm volatile(LOCK_PREFIX "orl %1,%0"
12445+ : "+m" (v->counter)
12446+ : "r" (mask)
12447+ : "memory");
12448+}
12449+
12450+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
12451+{
12452+ asm volatile(LOCK_PREFIX "orl %1,%0"
12453+ : "+m" (v->counter)
12454+ : "r" (mask)
12455+ : "memory");
12456+}
12457
12458 /* Atomic operations are already serializing on x86 */
12459 #define smp_mb__before_atomic_dec() barrier()
12460diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
12461index b154de7..aadebd8 100644
12462--- a/arch/x86/include/asm/atomic64_32.h
12463+++ b/arch/x86/include/asm/atomic64_32.h
12464@@ -12,6 +12,14 @@ typedef struct {
12465 u64 __aligned(8) counter;
12466 } atomic64_t;
12467
12468+#ifdef CONFIG_PAX_REFCOUNT
12469+typedef struct {
12470+ u64 __aligned(8) counter;
12471+} atomic64_unchecked_t;
12472+#else
12473+typedef atomic64_t atomic64_unchecked_t;
12474+#endif
12475+
12476 #define ATOMIC64_INIT(val) { (val) }
12477
12478 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
12479@@ -37,21 +45,31 @@ typedef struct {
12480 ATOMIC64_DECL_ONE(sym##_386)
12481
12482 ATOMIC64_DECL_ONE(add_386);
12483+ATOMIC64_DECL_ONE(add_unchecked_386);
12484 ATOMIC64_DECL_ONE(sub_386);
12485+ATOMIC64_DECL_ONE(sub_unchecked_386);
12486 ATOMIC64_DECL_ONE(inc_386);
12487+ATOMIC64_DECL_ONE(inc_unchecked_386);
12488 ATOMIC64_DECL_ONE(dec_386);
12489+ATOMIC64_DECL_ONE(dec_unchecked_386);
12490 #endif
12491
12492 #define alternative_atomic64(f, out, in...) \
12493 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
12494
12495 ATOMIC64_DECL(read);
12496+ATOMIC64_DECL(read_unchecked);
12497 ATOMIC64_DECL(set);
12498+ATOMIC64_DECL(set_unchecked);
12499 ATOMIC64_DECL(xchg);
12500 ATOMIC64_DECL(add_return);
12501+ATOMIC64_DECL(add_return_unchecked);
12502 ATOMIC64_DECL(sub_return);
12503+ATOMIC64_DECL(sub_return_unchecked);
12504 ATOMIC64_DECL(inc_return);
12505+ATOMIC64_DECL(inc_return_unchecked);
12506 ATOMIC64_DECL(dec_return);
12507+ATOMIC64_DECL(dec_return_unchecked);
12508 ATOMIC64_DECL(dec_if_positive);
12509 ATOMIC64_DECL(inc_not_zero);
12510 ATOMIC64_DECL(add_unless);
12511@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
12512 }
12513
12514 /**
12515+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
12516+ * @p: pointer to type atomic64_unchecked_t
12517+ * @o: expected value
12518+ * @n: new value
12519+ *
12520+ * Atomically sets @v to @n if it was equal to @o and returns
12521+ * the old value.
12522+ */
12523+
12524+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
12525+{
12526+ return cmpxchg64(&v->counter, o, n);
12527+}
12528+
12529+/**
12530 * atomic64_xchg - xchg atomic64 variable
12531 * @v: pointer to type atomic64_t
12532 * @n: value to assign
12533@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
12534 }
12535
12536 /**
12537+ * atomic64_set_unchecked - set atomic64 variable
12538+ * @v: pointer to type atomic64_unchecked_t
12539+ * @n: value to assign
12540+ *
12541+ * Atomically sets the value of @v to @n.
12542+ */
12543+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
12544+{
12545+ unsigned high = (unsigned)(i >> 32);
12546+ unsigned low = (unsigned)i;
12547+ alternative_atomic64(set, /* no output */,
12548+ "S" (v), "b" (low), "c" (high)
12549+ : "eax", "edx", "memory");
12550+}
12551+
12552+/**
12553 * atomic64_read - read atomic64 variable
12554 * @v: pointer to type atomic64_t
12555 *
12556@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
12557 }
12558
12559 /**
12560+ * atomic64_read_unchecked - read atomic64 variable
12561+ * @v: pointer to type atomic64_unchecked_t
12562+ *
12563+ * Atomically reads the value of @v and returns it.
12564+ */
12565+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
12566+{
12567+ long long r;
12568+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
12569+ return r;
12570+ }
12571+
12572+/**
12573 * atomic64_add_return - add and return
12574 * @i: integer value to add
12575 * @v: pointer to type atomic64_t
12576@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
12577 return i;
12578 }
12579
12580+/**
12581+ * atomic64_add_return_unchecked - add and return
12582+ * @i: integer value to add
12583+ * @v: pointer to type atomic64_unchecked_t
12584+ *
12585+ * Atomically adds @i to @v and returns @i + *@v
12586+ */
12587+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
12588+{
12589+ alternative_atomic64(add_return_unchecked,
12590+ ASM_OUTPUT2("+A" (i), "+c" (v)),
12591+ ASM_NO_INPUT_CLOBBER("memory"));
12592+ return i;
12593+}
12594+
12595 /*
12596 * Other variants with different arithmetic operators:
12597 */
12598@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
12599 return a;
12600 }
12601
12602+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
12603+{
12604+ long long a;
12605+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
12606+ "S" (v) : "memory", "ecx");
12607+ return a;
12608+}
12609+
12610 static inline long long atomic64_dec_return(atomic64_t *v)
12611 {
12612 long long a;
12613@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
12614 }
12615
12616 /**
12617+ * atomic64_add_unchecked - add integer to atomic64 variable
12618+ * @i: integer value to add
12619+ * @v: pointer to type atomic64_unchecked_t
12620+ *
12621+ * Atomically adds @i to @v.
12622+ */
12623+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
12624+{
12625+ __alternative_atomic64(add_unchecked, add_return_unchecked,
12626+ ASM_OUTPUT2("+A" (i), "+c" (v)),
12627+ ASM_NO_INPUT_CLOBBER("memory"));
12628+ return i;
12629+}
12630+
12631+/**
12632 * atomic64_sub - subtract the atomic64 variable
12633 * @i: integer value to subtract
12634 * @v: pointer to type atomic64_t
12635diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
12636index 0e1cbfc..5623683 100644
12637--- a/arch/x86/include/asm/atomic64_64.h
12638+++ b/arch/x86/include/asm/atomic64_64.h
12639@@ -18,7 +18,19 @@
12640 */
12641 static inline long atomic64_read(const atomic64_t *v)
12642 {
12643- return (*(volatile long *)&(v)->counter);
12644+ return (*(volatile const long *)&(v)->counter);
12645+}
12646+
12647+/**
12648+ * atomic64_read_unchecked - read atomic64 variable
12649+ * @v: pointer of type atomic64_unchecked_t
12650+ *
12651+ * Atomically reads the value of @v.
12652+ * Doesn't imply a read memory barrier.
12653+ */
12654+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
12655+{
12656+ return (*(volatile const long *)&(v)->counter);
12657 }
12658
12659 /**
12660@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
12661 }
12662
12663 /**
12664+ * atomic64_set_unchecked - set atomic64 variable
12665+ * @v: pointer to type atomic64_unchecked_t
12666+ * @i: required value
12667+ *
12668+ * Atomically sets the value of @v to @i.
12669+ */
12670+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
12671+{
12672+ v->counter = i;
12673+}
12674+
12675+/**
12676 * atomic64_add - add integer to atomic64 variable
12677 * @i: integer value to add
12678 * @v: pointer to type atomic64_t
12679@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
12680 */
12681 static inline void atomic64_add(long i, atomic64_t *v)
12682 {
12683+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
12684+
12685+#ifdef CONFIG_PAX_REFCOUNT
12686+ "jno 0f\n"
12687+ LOCK_PREFIX "subq %1,%0\n"
12688+ "int $4\n0:\n"
12689+ _ASM_EXTABLE(0b, 0b)
12690+#endif
12691+
12692+ : "=m" (v->counter)
12693+ : "er" (i), "m" (v->counter));
12694+}
12695+
12696+/**
12697+ * atomic64_add_unchecked - add integer to atomic64 variable
12698+ * @i: integer value to add
12699+ * @v: pointer to type atomic64_unchecked_t
12700+ *
12701+ * Atomically adds @i to @v.
12702+ */
12703+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
12704+{
12705 asm volatile(LOCK_PREFIX "addq %1,%0"
12706 : "=m" (v->counter)
12707 : "er" (i), "m" (v->counter));
12708@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
12709 */
12710 static inline void atomic64_sub(long i, atomic64_t *v)
12711 {
12712- asm volatile(LOCK_PREFIX "subq %1,%0"
12713+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
12714+
12715+#ifdef CONFIG_PAX_REFCOUNT
12716+ "jno 0f\n"
12717+ LOCK_PREFIX "addq %1,%0\n"
12718+ "int $4\n0:\n"
12719+ _ASM_EXTABLE(0b, 0b)
12720+#endif
12721+
12722+ : "=m" (v->counter)
12723+ : "er" (i), "m" (v->counter));
12724+}
12725+
12726+/**
12727+ * atomic64_sub_unchecked - subtract the atomic64 variable
12728+ * @i: integer value to subtract
12729+ * @v: pointer to type atomic64_unchecked_t
12730+ *
12731+ * Atomically subtracts @i from @v.
12732+ */
12733+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
12734+{
12735+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
12736 : "=m" (v->counter)
12737 : "er" (i), "m" (v->counter));
12738 }
12739@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
12740 {
12741 unsigned char c;
12742
12743- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
12744+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
12745+
12746+#ifdef CONFIG_PAX_REFCOUNT
12747+ "jno 0f\n"
12748+ LOCK_PREFIX "addq %2,%0\n"
12749+ "int $4\n0:\n"
12750+ _ASM_EXTABLE(0b, 0b)
12751+#endif
12752+
12753+ "sete %1\n"
12754 : "=m" (v->counter), "=qm" (c)
12755 : "er" (i), "m" (v->counter) : "memory");
12756 return c;
12757@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
12758 */
12759 static inline void atomic64_inc(atomic64_t *v)
12760 {
12761+ asm volatile(LOCK_PREFIX "incq %0\n"
12762+
12763+#ifdef CONFIG_PAX_REFCOUNT
12764+ "jno 0f\n"
12765+ LOCK_PREFIX "decq %0\n"
12766+ "int $4\n0:\n"
12767+ _ASM_EXTABLE(0b, 0b)
12768+#endif
12769+
12770+ : "=m" (v->counter)
12771+ : "m" (v->counter));
12772+}
12773+
12774+/**
12775+ * atomic64_inc_unchecked - increment atomic64 variable
12776+ * @v: pointer to type atomic64_unchecked_t
12777+ *
12778+ * Atomically increments @v by 1.
12779+ */
12780+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
12781+{
12782 asm volatile(LOCK_PREFIX "incq %0"
12783 : "=m" (v->counter)
12784 : "m" (v->counter));
12785@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
12786 */
12787 static inline void atomic64_dec(atomic64_t *v)
12788 {
12789- asm volatile(LOCK_PREFIX "decq %0"
12790+ asm volatile(LOCK_PREFIX "decq %0\n"
12791+
12792+#ifdef CONFIG_PAX_REFCOUNT
12793+ "jno 0f\n"
12794+ LOCK_PREFIX "incq %0\n"
12795+ "int $4\n0:\n"
12796+ _ASM_EXTABLE(0b, 0b)
12797+#endif
12798+
12799+ : "=m" (v->counter)
12800+ : "m" (v->counter));
12801+}
12802+
12803+/**
12804+ * atomic64_dec_unchecked - decrement atomic64 variable
12805+ * @v: pointer to type atomic64_t
12806+ *
12807+ * Atomically decrements @v by 1.
12808+ */
12809+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
12810+{
12811+ asm volatile(LOCK_PREFIX "decq %0\n"
12812 : "=m" (v->counter)
12813 : "m" (v->counter));
12814 }
12815@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
12816 {
12817 unsigned char c;
12818
12819- asm volatile(LOCK_PREFIX "decq %0; sete %1"
12820+ asm volatile(LOCK_PREFIX "decq %0\n"
12821+
12822+#ifdef CONFIG_PAX_REFCOUNT
12823+ "jno 0f\n"
12824+ LOCK_PREFIX "incq %0\n"
12825+ "int $4\n0:\n"
12826+ _ASM_EXTABLE(0b, 0b)
12827+#endif
12828+
12829+ "sete %1\n"
12830 : "=m" (v->counter), "=qm" (c)
12831 : "m" (v->counter) : "memory");
12832 return c != 0;
12833@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
12834 {
12835 unsigned char c;
12836
12837- asm volatile(LOCK_PREFIX "incq %0; sete %1"
12838+ asm volatile(LOCK_PREFIX "incq %0\n"
12839+
12840+#ifdef CONFIG_PAX_REFCOUNT
12841+ "jno 0f\n"
12842+ LOCK_PREFIX "decq %0\n"
12843+ "int $4\n0:\n"
12844+ _ASM_EXTABLE(0b, 0b)
12845+#endif
12846+
12847+ "sete %1\n"
12848 : "=m" (v->counter), "=qm" (c)
12849 : "m" (v->counter) : "memory");
12850 return c != 0;
12851@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
12852 {
12853 unsigned char c;
12854
12855- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
12856+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
12857+
12858+#ifdef CONFIG_PAX_REFCOUNT
12859+ "jno 0f\n"
12860+ LOCK_PREFIX "subq %2,%0\n"
12861+ "int $4\n0:\n"
12862+ _ASM_EXTABLE(0b, 0b)
12863+#endif
12864+
12865+ "sets %1\n"
12866 : "=m" (v->counter), "=qm" (c)
12867 : "er" (i), "m" (v->counter) : "memory");
12868 return c;
12869@@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
12870 */
12871 static inline long atomic64_add_return(long i, atomic64_t *v)
12872 {
12873+ return i + xadd_check_overflow(&v->counter, i);
12874+}
12875+
12876+/**
12877+ * atomic64_add_return_unchecked - add and return
12878+ * @i: integer value to add
12879+ * @v: pointer to type atomic64_unchecked_t
12880+ *
12881+ * Atomically adds @i to @v and returns @i + @v
12882+ */
12883+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
12884+{
12885 return i + xadd(&v->counter, i);
12886 }
12887
12888@@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
12889 }
12890
12891 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
12892+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
12893+{
12894+ return atomic64_add_return_unchecked(1, v);
12895+}
12896 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
12897
12898 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
12899@@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
12900 return cmpxchg(&v->counter, old, new);
12901 }
12902
12903+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
12904+{
12905+ return cmpxchg(&v->counter, old, new);
12906+}
12907+
12908 static inline long atomic64_xchg(atomic64_t *v, long new)
12909 {
12910 return xchg(&v->counter, new);
12911@@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
12912 */
12913 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
12914 {
12915- long c, old;
12916+ long c, old, new;
12917 c = atomic64_read(v);
12918 for (;;) {
12919- if (unlikely(c == (u)))
12920+ if (unlikely(c == u))
12921 break;
12922- old = atomic64_cmpxchg((v), c, c + (a));
12923+
12924+ asm volatile("add %2,%0\n"
12925+
12926+#ifdef CONFIG_PAX_REFCOUNT
12927+ "jno 0f\n"
12928+ "sub %2,%0\n"
12929+ "int $4\n0:\n"
12930+ _ASM_EXTABLE(0b, 0b)
12931+#endif
12932+
12933+ : "=r" (new)
12934+ : "0" (c), "ir" (a));
12935+
12936+ old = atomic64_cmpxchg(v, c, new);
12937 if (likely(old == c))
12938 break;
12939 c = old;
12940 }
12941- return c != (u);
12942+ return c != u;
12943 }
12944
12945 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
12946diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
12947index 6dfd019..28e188d 100644
12948--- a/arch/x86/include/asm/bitops.h
12949+++ b/arch/x86/include/asm/bitops.h
12950@@ -40,7 +40,7 @@
12951 * a mask operation on a byte.
12952 */
12953 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
12954-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
12955+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
12956 #define CONST_MASK(nr) (1 << ((nr) & 7))
12957
12958 /**
12959@@ -486,7 +486,7 @@ static inline int fls(int x)
12960 * at position 64.
12961 */
12962 #ifdef CONFIG_X86_64
12963-static __always_inline int fls64(__u64 x)
12964+static __always_inline long fls64(__u64 x)
12965 {
12966 int bitpos = -1;
12967 /*
12968diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
12969index 4fa687a..60f2d39 100644
12970--- a/arch/x86/include/asm/boot.h
12971+++ b/arch/x86/include/asm/boot.h
12972@@ -6,10 +6,15 @@
12973 #include <uapi/asm/boot.h>
12974
12975 /* Physical address where kernel should be loaded. */
12976-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
12977+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
12978 + (CONFIG_PHYSICAL_ALIGN - 1)) \
12979 & ~(CONFIG_PHYSICAL_ALIGN - 1))
12980
12981+#ifndef __ASSEMBLY__
12982+extern unsigned char __LOAD_PHYSICAL_ADDR[];
12983+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
12984+#endif
12985+
12986 /* Minimum kernel alignment, as a power of two */
12987 #ifdef CONFIG_X86_64
12988 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
12989diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
12990index 48f99f1..d78ebf9 100644
12991--- a/arch/x86/include/asm/cache.h
12992+++ b/arch/x86/include/asm/cache.h
12993@@ -5,12 +5,13 @@
12994
12995 /* L1 cache line size */
12996 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
12997-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12998+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12999
13000 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
13001+#define __read_only __attribute__((__section__(".data..read_only")))
13002
13003 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
13004-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
13005+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
13006
13007 #ifdef CONFIG_X86_VSMP
13008 #ifdef CONFIG_SMP
13009diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
13010index 9863ee3..4a1f8e1 100644
13011--- a/arch/x86/include/asm/cacheflush.h
13012+++ b/arch/x86/include/asm/cacheflush.h
13013@@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
13014 unsigned long pg_flags = pg->flags & _PGMT_MASK;
13015
13016 if (pg_flags == _PGMT_DEFAULT)
13017- return -1;
13018+ return ~0UL;
13019 else if (pg_flags == _PGMT_WC)
13020 return _PAGE_CACHE_WC;
13021 else if (pg_flags == _PGMT_UC_MINUS)
13022diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
13023index 46fc474..b02b0f9 100644
13024--- a/arch/x86/include/asm/checksum_32.h
13025+++ b/arch/x86/include/asm/checksum_32.h
13026@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
13027 int len, __wsum sum,
13028 int *src_err_ptr, int *dst_err_ptr);
13029
13030+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
13031+ int len, __wsum sum,
13032+ int *src_err_ptr, int *dst_err_ptr);
13033+
13034+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
13035+ int len, __wsum sum,
13036+ int *src_err_ptr, int *dst_err_ptr);
13037+
13038 /*
13039 * Note: when you get a NULL pointer exception here this means someone
13040 * passed in an incorrect kernel address to one of these functions.
13041@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
13042 int *err_ptr)
13043 {
13044 might_sleep();
13045- return csum_partial_copy_generic((__force void *)src, dst,
13046+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
13047 len, sum, err_ptr, NULL);
13048 }
13049
13050@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
13051 {
13052 might_sleep();
13053 if (access_ok(VERIFY_WRITE, dst, len))
13054- return csum_partial_copy_generic(src, (__force void *)dst,
13055+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
13056 len, sum, NULL, err_ptr);
13057
13058 if (len)
13059diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
13060index 8d871ea..c1a0dc9 100644
13061--- a/arch/x86/include/asm/cmpxchg.h
13062+++ b/arch/x86/include/asm/cmpxchg.h
13063@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
13064 __compiletime_error("Bad argument size for cmpxchg");
13065 extern void __xadd_wrong_size(void)
13066 __compiletime_error("Bad argument size for xadd");
13067+extern void __xadd_check_overflow_wrong_size(void)
13068+ __compiletime_error("Bad argument size for xadd_check_overflow");
13069 extern void __add_wrong_size(void)
13070 __compiletime_error("Bad argument size for add");
13071+extern void __add_check_overflow_wrong_size(void)
13072+ __compiletime_error("Bad argument size for add_check_overflow");
13073
13074 /*
13075 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
13076@@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
13077 __ret; \
13078 })
13079
13080+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
13081+ ({ \
13082+ __typeof__ (*(ptr)) __ret = (arg); \
13083+ switch (sizeof(*(ptr))) { \
13084+ case __X86_CASE_L: \
13085+ asm volatile (lock #op "l %0, %1\n" \
13086+ "jno 0f\n" \
13087+ "mov %0,%1\n" \
13088+ "int $4\n0:\n" \
13089+ _ASM_EXTABLE(0b, 0b) \
13090+ : "+r" (__ret), "+m" (*(ptr)) \
13091+ : : "memory", "cc"); \
13092+ break; \
13093+ case __X86_CASE_Q: \
13094+ asm volatile (lock #op "q %q0, %1\n" \
13095+ "jno 0f\n" \
13096+ "mov %0,%1\n" \
13097+ "int $4\n0:\n" \
13098+ _ASM_EXTABLE(0b, 0b) \
13099+ : "+r" (__ret), "+m" (*(ptr)) \
13100+ : : "memory", "cc"); \
13101+ break; \
13102+ default: \
13103+ __ ## op ## _check_overflow_wrong_size(); \
13104+ } \
13105+ __ret; \
13106+ })
13107+
13108 /*
13109 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
13110 * Since this is generally used to protect other memory information, we
13111@@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
13112 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
13113 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
13114
13115+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
13116+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
13117+
13118 #define __add(ptr, inc, lock) \
13119 ({ \
13120 __typeof__ (*(ptr)) __ret = (inc); \
13121diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
13122index 59c6c40..5e0b22c 100644
13123--- a/arch/x86/include/asm/compat.h
13124+++ b/arch/x86/include/asm/compat.h
13125@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
13126 typedef u32 compat_uint_t;
13127 typedef u32 compat_ulong_t;
13128 typedef u64 __attribute__((aligned(4))) compat_u64;
13129-typedef u32 compat_uptr_t;
13130+typedef u32 __user compat_uptr_t;
13131
13132 struct compat_timespec {
13133 compat_time_t tv_sec;
13134diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
13135index 93fe929..90858b7 100644
13136--- a/arch/x86/include/asm/cpufeature.h
13137+++ b/arch/x86/include/asm/cpufeature.h
13138@@ -207,7 +207,7 @@
13139 #define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
13140 #define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
13141 #define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
13142-#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
13143+#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Prevention */
13144 #define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
13145 #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
13146 #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
13147@@ -377,7 +377,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
13148 ".section .discard,\"aw\",@progbits\n"
13149 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
13150 ".previous\n"
13151- ".section .altinstr_replacement,\"ax\"\n"
13152+ ".section .altinstr_replacement,\"a\"\n"
13153 "3: movb $1,%0\n"
13154 "4:\n"
13155 ".previous\n"
13156diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
13157index 8bf1c06..b6ae785 100644
13158--- a/arch/x86/include/asm/desc.h
13159+++ b/arch/x86/include/asm/desc.h
13160@@ -4,6 +4,7 @@
13161 #include <asm/desc_defs.h>
13162 #include <asm/ldt.h>
13163 #include <asm/mmu.h>
13164+#include <asm/pgtable.h>
13165
13166 #include <linux/smp.h>
13167 #include <linux/percpu.h>
13168@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
13169
13170 desc->type = (info->read_exec_only ^ 1) << 1;
13171 desc->type |= info->contents << 2;
13172+ desc->type |= info->seg_not_present ^ 1;
13173
13174 desc->s = 1;
13175 desc->dpl = 0x3;
13176@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
13177 }
13178
13179 extern struct desc_ptr idt_descr;
13180-extern gate_desc idt_table[];
13181 extern struct desc_ptr nmi_idt_descr;
13182-extern gate_desc nmi_idt_table[];
13183-
13184-struct gdt_page {
13185- struct desc_struct gdt[GDT_ENTRIES];
13186-} __attribute__((aligned(PAGE_SIZE)));
13187-
13188-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
13189+extern gate_desc idt_table[256];
13190+extern gate_desc nmi_idt_table[256];
13191
13192+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
13193 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
13194 {
13195- return per_cpu(gdt_page, cpu).gdt;
13196+ return cpu_gdt_table[cpu];
13197 }
13198
13199 #ifdef CONFIG_X86_64
13200@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
13201 unsigned long base, unsigned dpl, unsigned flags,
13202 unsigned short seg)
13203 {
13204- gate->a = (seg << 16) | (base & 0xffff);
13205- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
13206+ gate->gate.offset_low = base;
13207+ gate->gate.seg = seg;
13208+ gate->gate.reserved = 0;
13209+ gate->gate.type = type;
13210+ gate->gate.s = 0;
13211+ gate->gate.dpl = dpl;
13212+ gate->gate.p = 1;
13213+ gate->gate.offset_high = base >> 16;
13214 }
13215
13216 #endif
13217@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
13218
13219 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
13220 {
13221+ pax_open_kernel();
13222 memcpy(&idt[entry], gate, sizeof(*gate));
13223+ pax_close_kernel();
13224 }
13225
13226 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
13227 {
13228+ pax_open_kernel();
13229 memcpy(&ldt[entry], desc, 8);
13230+ pax_close_kernel();
13231 }
13232
13233 static inline void
13234@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
13235 default: size = sizeof(*gdt); break;
13236 }
13237
13238+ pax_open_kernel();
13239 memcpy(&gdt[entry], desc, size);
13240+ pax_close_kernel();
13241 }
13242
13243 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
13244@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
13245
13246 static inline void native_load_tr_desc(void)
13247 {
13248+ pax_open_kernel();
13249 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
13250+ pax_close_kernel();
13251 }
13252
13253 static inline void native_load_gdt(const struct desc_ptr *dtr)
13254@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
13255 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
13256 unsigned int i;
13257
13258+ pax_open_kernel();
13259 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
13260 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
13261+ pax_close_kernel();
13262 }
13263
13264 #define _LDT_empty(info) \
13265@@ -287,7 +300,7 @@ static inline void load_LDT(mm_context_t *pc)
13266 preempt_enable();
13267 }
13268
13269-static inline unsigned long get_desc_base(const struct desc_struct *desc)
13270+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
13271 {
13272 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
13273 }
13274@@ -311,7 +324,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
13275 }
13276
13277 #ifdef CONFIG_X86_64
13278-static inline void set_nmi_gate(int gate, void *addr)
13279+static inline void set_nmi_gate(int gate, const void *addr)
13280 {
13281 gate_desc s;
13282
13283@@ -320,7 +333,7 @@ static inline void set_nmi_gate(int gate, void *addr)
13284 }
13285 #endif
13286
13287-static inline void _set_gate(int gate, unsigned type, void *addr,
13288+static inline void _set_gate(int gate, unsigned type, const void *addr,
13289 unsigned dpl, unsigned ist, unsigned seg)
13290 {
13291 gate_desc s;
13292@@ -339,7 +352,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
13293 * Pentium F0 0F bugfix can have resulted in the mapped
13294 * IDT being write-protected.
13295 */
13296-static inline void set_intr_gate(unsigned int n, void *addr)
13297+static inline void set_intr_gate(unsigned int n, const void *addr)
13298 {
13299 BUG_ON((unsigned)n > 0xFF);
13300 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
13301@@ -369,19 +382,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
13302 /*
13303 * This routine sets up an interrupt gate at directory privilege level 3.
13304 */
13305-static inline void set_system_intr_gate(unsigned int n, void *addr)
13306+static inline void set_system_intr_gate(unsigned int n, const void *addr)
13307 {
13308 BUG_ON((unsigned)n > 0xFF);
13309 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
13310 }
13311
13312-static inline void set_system_trap_gate(unsigned int n, void *addr)
13313+static inline void set_system_trap_gate(unsigned int n, const void *addr)
13314 {
13315 BUG_ON((unsigned)n > 0xFF);
13316 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
13317 }
13318
13319-static inline void set_trap_gate(unsigned int n, void *addr)
13320+static inline void set_trap_gate(unsigned int n, const void *addr)
13321 {
13322 BUG_ON((unsigned)n > 0xFF);
13323 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
13324@@ -390,19 +403,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
13325 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
13326 {
13327 BUG_ON((unsigned)n > 0xFF);
13328- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
13329+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
13330 }
13331
13332-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
13333+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
13334 {
13335 BUG_ON((unsigned)n > 0xFF);
13336 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
13337 }
13338
13339-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
13340+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
13341 {
13342 BUG_ON((unsigned)n > 0xFF);
13343 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
13344 }
13345
13346+#ifdef CONFIG_X86_32
13347+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
13348+{
13349+ struct desc_struct d;
13350+
13351+ if (likely(limit))
13352+ limit = (limit - 1UL) >> PAGE_SHIFT;
13353+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
13354+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
13355+}
13356+#endif
13357+
13358 #endif /* _ASM_X86_DESC_H */
13359diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
13360index 278441f..b95a174 100644
13361--- a/arch/x86/include/asm/desc_defs.h
13362+++ b/arch/x86/include/asm/desc_defs.h
13363@@ -31,6 +31,12 @@ struct desc_struct {
13364 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
13365 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
13366 };
13367+ struct {
13368+ u16 offset_low;
13369+ u16 seg;
13370+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
13371+ unsigned offset_high: 16;
13372+ } gate;
13373 };
13374 } __attribute__((packed));
13375
13376diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
13377index ced283a..ffe04cc 100644
13378--- a/arch/x86/include/asm/div64.h
13379+++ b/arch/x86/include/asm/div64.h
13380@@ -39,7 +39,7 @@
13381 __mod; \
13382 })
13383
13384-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
13385+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
13386 {
13387 union {
13388 u64 v64;
13389diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
13390index 9c999c1..3860cb8 100644
13391--- a/arch/x86/include/asm/elf.h
13392+++ b/arch/x86/include/asm/elf.h
13393@@ -243,7 +243,25 @@ extern int force_personality32;
13394 the loader. We need to make sure that it is out of the way of the program
13395 that it will "exec", and that there is sufficient room for the brk. */
13396
13397+#ifdef CONFIG_PAX_SEGMEXEC
13398+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
13399+#else
13400 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
13401+#endif
13402+
13403+#ifdef CONFIG_PAX_ASLR
13404+#ifdef CONFIG_X86_32
13405+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
13406+
13407+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
13408+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
13409+#else
13410+#define PAX_ELF_ET_DYN_BASE 0x400000UL
13411+
13412+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
13413+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
13414+#endif
13415+#endif
13416
13417 /* This yields a mask that user programs can use to figure out what
13418 instruction set this CPU supports. This could be done in user space,
13419@@ -296,16 +314,12 @@ do { \
13420
13421 #define ARCH_DLINFO \
13422 do { \
13423- if (vdso_enabled) \
13424- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
13425- (unsigned long)current->mm->context.vdso); \
13426+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
13427 } while (0)
13428
13429 #define ARCH_DLINFO_X32 \
13430 do { \
13431- if (vdso_enabled) \
13432- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
13433- (unsigned long)current->mm->context.vdso); \
13434+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
13435 } while (0)
13436
13437 #define AT_SYSINFO 32
13438@@ -320,7 +334,7 @@ else \
13439
13440 #endif /* !CONFIG_X86_32 */
13441
13442-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
13443+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
13444
13445 #define VDSO_ENTRY \
13446 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
13447@@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
13448 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
13449 #define compat_arch_setup_additional_pages syscall32_setup_pages
13450
13451-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
13452-#define arch_randomize_brk arch_randomize_brk
13453-
13454 /*
13455 * True on X86_32 or when emulating IA32 on X86_64
13456 */
13457diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
13458index 75ce3f4..882e801 100644
13459--- a/arch/x86/include/asm/emergency-restart.h
13460+++ b/arch/x86/include/asm/emergency-restart.h
13461@@ -13,6 +13,6 @@ enum reboot_type {
13462
13463 extern enum reboot_type reboot_type;
13464
13465-extern void machine_emergency_restart(void);
13466+extern void machine_emergency_restart(void) __noreturn;
13467
13468 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
13469diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
13470index e25cc33..425d099 100644
13471--- a/arch/x86/include/asm/fpu-internal.h
13472+++ b/arch/x86/include/asm/fpu-internal.h
13473@@ -127,7 +127,9 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
13474 ({ \
13475 int err; \
13476 asm volatile(ASM_STAC "\n" \
13477- "1:" #insn "\n\t" \
13478+ "1:" \
13479+ __copyuser_seg \
13480+ #insn "\n\t" \
13481 "2: " ASM_CLAC "\n" \
13482 ".section .fixup,\"ax\"\n" \
13483 "3: movl $-1,%[err]\n" \
13484@@ -300,7 +302,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
13485 "emms\n\t" /* clear stack tags */
13486 "fildl %P[addr]", /* set F?P to defined value */
13487 X86_FEATURE_FXSAVE_LEAK,
13488- [addr] "m" (tsk->thread.fpu.has_fpu));
13489+ [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
13490
13491 return fpu_restore_checking(&tsk->thread.fpu);
13492 }
13493diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
13494index be27ba1..8f13ff9 100644
13495--- a/arch/x86/include/asm/futex.h
13496+++ b/arch/x86/include/asm/futex.h
13497@@ -12,6 +12,7 @@
13498 #include <asm/smap.h>
13499
13500 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
13501+ typecheck(u32 __user *, uaddr); \
13502 asm volatile("\t" ASM_STAC "\n" \
13503 "1:\t" insn "\n" \
13504 "2:\t" ASM_CLAC "\n" \
13505@@ -20,15 +21,16 @@
13506 "\tjmp\t2b\n" \
13507 "\t.previous\n" \
13508 _ASM_EXTABLE(1b, 3b) \
13509- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
13510+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
13511 : "i" (-EFAULT), "0" (oparg), "1" (0))
13512
13513 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
13514+ typecheck(u32 __user *, uaddr); \
13515 asm volatile("\t" ASM_STAC "\n" \
13516 "1:\tmovl %2, %0\n" \
13517 "\tmovl\t%0, %3\n" \
13518 "\t" insn "\n" \
13519- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
13520+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
13521 "\tjnz\t1b\n" \
13522 "3:\t" ASM_CLAC "\n" \
13523 "\t.section .fixup,\"ax\"\n" \
13524@@ -38,7 +40,7 @@
13525 _ASM_EXTABLE(1b, 4b) \
13526 _ASM_EXTABLE(2b, 4b) \
13527 : "=&a" (oldval), "=&r" (ret), \
13528- "+m" (*uaddr), "=&r" (tem) \
13529+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
13530 : "r" (oparg), "i" (-EFAULT), "1" (0))
13531
13532 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
13533@@ -59,10 +61,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
13534
13535 switch (op) {
13536 case FUTEX_OP_SET:
13537- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
13538+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
13539 break;
13540 case FUTEX_OP_ADD:
13541- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
13542+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
13543 uaddr, oparg);
13544 break;
13545 case FUTEX_OP_OR:
13546@@ -116,14 +118,14 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
13547 return -EFAULT;
13548
13549 asm volatile("\t" ASM_STAC "\n"
13550- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
13551+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
13552 "2:\t" ASM_CLAC "\n"
13553 "\t.section .fixup, \"ax\"\n"
13554 "3:\tmov %3, %0\n"
13555 "\tjmp 2b\n"
13556 "\t.previous\n"
13557 _ASM_EXTABLE(1b, 3b)
13558- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
13559+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
13560 : "i" (-EFAULT), "r" (newval), "1" (oldval)
13561 : "memory"
13562 );
13563diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
13564index 10a78c3..cc77143 100644
13565--- a/arch/x86/include/asm/hw_irq.h
13566+++ b/arch/x86/include/asm/hw_irq.h
13567@@ -147,8 +147,8 @@ extern void setup_ioapic_dest(void);
13568 extern void enable_IO_APIC(void);
13569
13570 /* Statistics */
13571-extern atomic_t irq_err_count;
13572-extern atomic_t irq_mis_count;
13573+extern atomic_unchecked_t irq_err_count;
13574+extern atomic_unchecked_t irq_mis_count;
13575
13576 /* EISA */
13577 extern void eisa_set_level_irq(unsigned int irq);
13578diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
13579index a203659..9889f1c 100644
13580--- a/arch/x86/include/asm/i8259.h
13581+++ b/arch/x86/include/asm/i8259.h
13582@@ -62,7 +62,7 @@ struct legacy_pic {
13583 void (*init)(int auto_eoi);
13584 int (*irq_pending)(unsigned int irq);
13585 void (*make_irq)(unsigned int irq);
13586-};
13587+} __do_const;
13588
13589 extern struct legacy_pic *legacy_pic;
13590 extern struct legacy_pic null_legacy_pic;
13591diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
13592index d8e8eef..1765f78 100644
13593--- a/arch/x86/include/asm/io.h
13594+++ b/arch/x86/include/asm/io.h
13595@@ -51,12 +51,12 @@ static inline void name(type val, volatile void __iomem *addr) \
13596 "m" (*(volatile type __force *)addr) barrier); }
13597
13598 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
13599-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
13600-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
13601+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
13602+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
13603
13604 build_mmio_read(__readb, "b", unsigned char, "=q", )
13605-build_mmio_read(__readw, "w", unsigned short, "=r", )
13606-build_mmio_read(__readl, "l", unsigned int, "=r", )
13607+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
13608+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
13609
13610 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
13611 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
13612@@ -184,7 +184,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
13613 return ioremap_nocache(offset, size);
13614 }
13615
13616-extern void iounmap(volatile void __iomem *addr);
13617+extern void iounmap(const volatile void __iomem *addr);
13618
13619 extern void set_iounmap_nonlazy(void);
13620
13621@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
13622
13623 #include <linux/vmalloc.h>
13624
13625+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
13626+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
13627+{
13628+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
13629+}
13630+
13631+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
13632+{
13633+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
13634+}
13635+
13636 /*
13637 * Convert a virtual cached pointer to an uncached pointer
13638 */
13639diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
13640index bba3cf8..06bc8da 100644
13641--- a/arch/x86/include/asm/irqflags.h
13642+++ b/arch/x86/include/asm/irqflags.h
13643@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
13644 sti; \
13645 sysexit
13646
13647+#define GET_CR0_INTO_RDI mov %cr0, %rdi
13648+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
13649+#define GET_CR3_INTO_RDI mov %cr3, %rdi
13650+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
13651+
13652 #else
13653 #define INTERRUPT_RETURN iret
13654 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
13655diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
13656index 5a6d287..f815789 100644
13657--- a/arch/x86/include/asm/kprobes.h
13658+++ b/arch/x86/include/asm/kprobes.h
13659@@ -38,13 +38,8 @@ typedef u8 kprobe_opcode_t;
13660 #define RELATIVEJUMP_SIZE 5
13661 #define RELATIVECALL_OPCODE 0xe8
13662 #define RELATIVE_ADDR_SIZE 4
13663-#define MAX_STACK_SIZE 64
13664-#define MIN_STACK_SIZE(ADDR) \
13665- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
13666- THREAD_SIZE - (unsigned long)(ADDR))) \
13667- ? (MAX_STACK_SIZE) \
13668- : (((unsigned long)current_thread_info()) + \
13669- THREAD_SIZE - (unsigned long)(ADDR)))
13670+#define MAX_STACK_SIZE 64UL
13671+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
13672
13673 #define flush_insn_slot(p) do { } while (0)
13674
13675diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
13676index 2d89e39..baee879 100644
13677--- a/arch/x86/include/asm/local.h
13678+++ b/arch/x86/include/asm/local.h
13679@@ -10,33 +10,97 @@ typedef struct {
13680 atomic_long_t a;
13681 } local_t;
13682
13683+typedef struct {
13684+ atomic_long_unchecked_t a;
13685+} local_unchecked_t;
13686+
13687 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
13688
13689 #define local_read(l) atomic_long_read(&(l)->a)
13690+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
13691 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
13692+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
13693
13694 static inline void local_inc(local_t *l)
13695 {
13696- asm volatile(_ASM_INC "%0"
13697+ asm volatile(_ASM_INC "%0\n"
13698+
13699+#ifdef CONFIG_PAX_REFCOUNT
13700+ "jno 0f\n"
13701+ _ASM_DEC "%0\n"
13702+ "int $4\n0:\n"
13703+ _ASM_EXTABLE(0b, 0b)
13704+#endif
13705+
13706+ : "+m" (l->a.counter));
13707+}
13708+
13709+static inline void local_inc_unchecked(local_unchecked_t *l)
13710+{
13711+ asm volatile(_ASM_INC "%0\n"
13712 : "+m" (l->a.counter));
13713 }
13714
13715 static inline void local_dec(local_t *l)
13716 {
13717- asm volatile(_ASM_DEC "%0"
13718+ asm volatile(_ASM_DEC "%0\n"
13719+
13720+#ifdef CONFIG_PAX_REFCOUNT
13721+ "jno 0f\n"
13722+ _ASM_INC "%0\n"
13723+ "int $4\n0:\n"
13724+ _ASM_EXTABLE(0b, 0b)
13725+#endif
13726+
13727+ : "+m" (l->a.counter));
13728+}
13729+
13730+static inline void local_dec_unchecked(local_unchecked_t *l)
13731+{
13732+ asm volatile(_ASM_DEC "%0\n"
13733 : "+m" (l->a.counter));
13734 }
13735
13736 static inline void local_add(long i, local_t *l)
13737 {
13738- asm volatile(_ASM_ADD "%1,%0"
13739+ asm volatile(_ASM_ADD "%1,%0\n"
13740+
13741+#ifdef CONFIG_PAX_REFCOUNT
13742+ "jno 0f\n"
13743+ _ASM_SUB "%1,%0\n"
13744+ "int $4\n0:\n"
13745+ _ASM_EXTABLE(0b, 0b)
13746+#endif
13747+
13748+ : "+m" (l->a.counter)
13749+ : "ir" (i));
13750+}
13751+
13752+static inline void local_add_unchecked(long i, local_unchecked_t *l)
13753+{
13754+ asm volatile(_ASM_ADD "%1,%0\n"
13755 : "+m" (l->a.counter)
13756 : "ir" (i));
13757 }
13758
13759 static inline void local_sub(long i, local_t *l)
13760 {
13761- asm volatile(_ASM_SUB "%1,%0"
13762+ asm volatile(_ASM_SUB "%1,%0\n"
13763+
13764+#ifdef CONFIG_PAX_REFCOUNT
13765+ "jno 0f\n"
13766+ _ASM_ADD "%1,%0\n"
13767+ "int $4\n0:\n"
13768+ _ASM_EXTABLE(0b, 0b)
13769+#endif
13770+
13771+ : "+m" (l->a.counter)
13772+ : "ir" (i));
13773+}
13774+
13775+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
13776+{
13777+ asm volatile(_ASM_SUB "%1,%0\n"
13778 : "+m" (l->a.counter)
13779 : "ir" (i));
13780 }
13781@@ -54,7 +118,16 @@ static inline int local_sub_and_test(long i, local_t *l)
13782 {
13783 unsigned char c;
13784
13785- asm volatile(_ASM_SUB "%2,%0; sete %1"
13786+ asm volatile(_ASM_SUB "%2,%0\n"
13787+
13788+#ifdef CONFIG_PAX_REFCOUNT
13789+ "jno 0f\n"
13790+ _ASM_ADD "%2,%0\n"
13791+ "int $4\n0:\n"
13792+ _ASM_EXTABLE(0b, 0b)
13793+#endif
13794+
13795+ "sete %1\n"
13796 : "+m" (l->a.counter), "=qm" (c)
13797 : "ir" (i) : "memory");
13798 return c;
13799@@ -72,7 +145,16 @@ static inline int local_dec_and_test(local_t *l)
13800 {
13801 unsigned char c;
13802
13803- asm volatile(_ASM_DEC "%0; sete %1"
13804+ asm volatile(_ASM_DEC "%0\n"
13805+
13806+#ifdef CONFIG_PAX_REFCOUNT
13807+ "jno 0f\n"
13808+ _ASM_INC "%0\n"
13809+ "int $4\n0:\n"
13810+ _ASM_EXTABLE(0b, 0b)
13811+#endif
13812+
13813+ "sete %1\n"
13814 : "+m" (l->a.counter), "=qm" (c)
13815 : : "memory");
13816 return c != 0;
13817@@ -90,7 +172,16 @@ static inline int local_inc_and_test(local_t *l)
13818 {
13819 unsigned char c;
13820
13821- asm volatile(_ASM_INC "%0; sete %1"
13822+ asm volatile(_ASM_INC "%0\n"
13823+
13824+#ifdef CONFIG_PAX_REFCOUNT
13825+ "jno 0f\n"
13826+ _ASM_DEC "%0\n"
13827+ "int $4\n0:\n"
13828+ _ASM_EXTABLE(0b, 0b)
13829+#endif
13830+
13831+ "sete %1\n"
13832 : "+m" (l->a.counter), "=qm" (c)
13833 : : "memory");
13834 return c != 0;
13835@@ -109,7 +200,16 @@ static inline int local_add_negative(long i, local_t *l)
13836 {
13837 unsigned char c;
13838
13839- asm volatile(_ASM_ADD "%2,%0; sets %1"
13840+ asm volatile(_ASM_ADD "%2,%0\n"
13841+
13842+#ifdef CONFIG_PAX_REFCOUNT
13843+ "jno 0f\n"
13844+ _ASM_SUB "%2,%0\n"
13845+ "int $4\n0:\n"
13846+ _ASM_EXTABLE(0b, 0b)
13847+#endif
13848+
13849+ "sets %1\n"
13850 : "+m" (l->a.counter), "=qm" (c)
13851 : "ir" (i) : "memory");
13852 return c;
13853@@ -125,6 +225,30 @@ static inline int local_add_negative(long i, local_t *l)
13854 static inline long local_add_return(long i, local_t *l)
13855 {
13856 long __i = i;
13857+ asm volatile(_ASM_XADD "%0, %1\n"
13858+
13859+#ifdef CONFIG_PAX_REFCOUNT
13860+ "jno 0f\n"
13861+ _ASM_MOV "%0,%1\n"
13862+ "int $4\n0:\n"
13863+ _ASM_EXTABLE(0b, 0b)
13864+#endif
13865+
13866+ : "+r" (i), "+m" (l->a.counter)
13867+ : : "memory");
13868+ return i + __i;
13869+}
13870+
13871+/**
13872+ * local_add_return_unchecked - add and return
13873+ * @i: integer value to add
13874+ * @l: pointer to type local_unchecked_t
13875+ *
13876+ * Atomically adds @i to @l and returns @i + @l
13877+ */
13878+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
13879+{
13880+ long __i = i;
13881 asm volatile(_ASM_XADD "%0, %1;"
13882 : "+r" (i), "+m" (l->a.counter)
13883 : : "memory");
13884@@ -141,6 +265,8 @@ static inline long local_sub_return(long i, local_t *l)
13885
13886 #define local_cmpxchg(l, o, n) \
13887 (cmpxchg_local(&((l)->a.counter), (o), (n)))
13888+#define local_cmpxchg_unchecked(l, o, n) \
13889+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
13890 /* Always has a lock prefix */
13891 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
13892
13893diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
13894new file mode 100644
13895index 0000000..2bfd3ba
13896--- /dev/null
13897+++ b/arch/x86/include/asm/mman.h
13898@@ -0,0 +1,15 @@
13899+#ifndef _X86_MMAN_H
13900+#define _X86_MMAN_H
13901+
13902+#include <uapi/asm/mman.h>
13903+
13904+#ifdef __KERNEL__
13905+#ifndef __ASSEMBLY__
13906+#ifdef CONFIG_X86_32
13907+#define arch_mmap_check i386_mmap_check
13908+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
13909+#endif
13910+#endif
13911+#endif
13912+
13913+#endif /* X86_MMAN_H */
13914diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
13915index 5f55e69..e20bfb1 100644
13916--- a/arch/x86/include/asm/mmu.h
13917+++ b/arch/x86/include/asm/mmu.h
13918@@ -9,7 +9,7 @@
13919 * we put the segment information here.
13920 */
13921 typedef struct {
13922- void *ldt;
13923+ struct desc_struct *ldt;
13924 int size;
13925
13926 #ifdef CONFIG_X86_64
13927@@ -18,7 +18,19 @@ typedef struct {
13928 #endif
13929
13930 struct mutex lock;
13931- void *vdso;
13932+ unsigned long vdso;
13933+
13934+#ifdef CONFIG_X86_32
13935+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
13936+ unsigned long user_cs_base;
13937+ unsigned long user_cs_limit;
13938+
13939+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
13940+ cpumask_t cpu_user_cs_mask;
13941+#endif
13942+
13943+#endif
13944+#endif
13945 } mm_context_t;
13946
13947 #ifdef CONFIG_SMP
13948diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
13949index cdbf367..adb37ac 100644
13950--- a/arch/x86/include/asm/mmu_context.h
13951+++ b/arch/x86/include/asm/mmu_context.h
13952@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
13953
13954 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
13955 {
13956+
13957+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13958+ unsigned int i;
13959+ pgd_t *pgd;
13960+
13961+ pax_open_kernel();
13962+ pgd = get_cpu_pgd(smp_processor_id());
13963+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
13964+ set_pgd_batched(pgd+i, native_make_pgd(0));
13965+ pax_close_kernel();
13966+#endif
13967+
13968 #ifdef CONFIG_SMP
13969 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
13970 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
13971@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
13972 struct task_struct *tsk)
13973 {
13974 unsigned cpu = smp_processor_id();
13975+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
13976+ int tlbstate = TLBSTATE_OK;
13977+#endif
13978
13979 if (likely(prev != next)) {
13980 #ifdef CONFIG_SMP
13981+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
13982+ tlbstate = this_cpu_read(cpu_tlbstate.state);
13983+#endif
13984 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
13985 this_cpu_write(cpu_tlbstate.active_mm, next);
13986 #endif
13987 cpumask_set_cpu(cpu, mm_cpumask(next));
13988
13989 /* Re-load page tables */
13990+#ifdef CONFIG_PAX_PER_CPU_PGD
13991+ pax_open_kernel();
13992+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
13993+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
13994+ pax_close_kernel();
13995+ load_cr3(get_cpu_pgd(cpu));
13996+#else
13997 load_cr3(next->pgd);
13998+#endif
13999
14000 /* stop flush ipis for the previous mm */
14001 cpumask_clear_cpu(cpu, mm_cpumask(prev));
14002@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
14003 */
14004 if (unlikely(prev->context.ldt != next->context.ldt))
14005 load_LDT_nolock(&next->context);
14006- }
14007+
14008+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
14009+ if (!(__supported_pte_mask & _PAGE_NX)) {
14010+ smp_mb__before_clear_bit();
14011+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
14012+ smp_mb__after_clear_bit();
14013+ cpu_set(cpu, next->context.cpu_user_cs_mask);
14014+ }
14015+#endif
14016+
14017+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
14018+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
14019+ prev->context.user_cs_limit != next->context.user_cs_limit))
14020+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
14021 #ifdef CONFIG_SMP
14022+ else if (unlikely(tlbstate != TLBSTATE_OK))
14023+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
14024+#endif
14025+#endif
14026+
14027+ }
14028 else {
14029+
14030+#ifdef CONFIG_PAX_PER_CPU_PGD
14031+ pax_open_kernel();
14032+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
14033+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
14034+ pax_close_kernel();
14035+ load_cr3(get_cpu_pgd(cpu));
14036+#endif
14037+
14038+#ifdef CONFIG_SMP
14039 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
14040 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
14041
14042@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
14043 * tlb flush IPI delivery. We must reload CR3
14044 * to make sure to use no freed page tables.
14045 */
14046+
14047+#ifndef CONFIG_PAX_PER_CPU_PGD
14048 load_cr3(next->pgd);
14049+#endif
14050+
14051 load_LDT_nolock(&next->context);
14052+
14053+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
14054+ if (!(__supported_pte_mask & _PAGE_NX))
14055+ cpu_set(cpu, next->context.cpu_user_cs_mask);
14056+#endif
14057+
14058+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
14059+#ifdef CONFIG_PAX_PAGEEXEC
14060+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
14061+#endif
14062+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
14063+#endif
14064+
14065 }
14066+#endif
14067 }
14068-#endif
14069 }
14070
14071 #define activate_mm(prev, next) \
14072diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
14073index e3b7819..b257c64 100644
14074--- a/arch/x86/include/asm/module.h
14075+++ b/arch/x86/include/asm/module.h
14076@@ -5,6 +5,7 @@
14077
14078 #ifdef CONFIG_X86_64
14079 /* X86_64 does not define MODULE_PROC_FAMILY */
14080+#define MODULE_PROC_FAMILY ""
14081 #elif defined CONFIG_M486
14082 #define MODULE_PROC_FAMILY "486 "
14083 #elif defined CONFIG_M586
14084@@ -57,8 +58,20 @@
14085 #error unknown processor family
14086 #endif
14087
14088-#ifdef CONFIG_X86_32
14089-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
14090+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
14091+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
14092+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
14093+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
14094+#else
14095+#define MODULE_PAX_KERNEXEC ""
14096 #endif
14097
14098+#ifdef CONFIG_PAX_MEMORY_UDEREF
14099+#define MODULE_PAX_UDEREF "UDEREF "
14100+#else
14101+#define MODULE_PAX_UDEREF ""
14102+#endif
14103+
14104+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
14105+
14106 #endif /* _ASM_X86_MODULE_H */
14107diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
14108index c0fa356..07a498a 100644
14109--- a/arch/x86/include/asm/nmi.h
14110+++ b/arch/x86/include/asm/nmi.h
14111@@ -42,11 +42,11 @@ struct nmiaction {
14112 nmi_handler_t handler;
14113 unsigned long flags;
14114 const char *name;
14115-};
14116+} __do_const;
14117
14118 #define register_nmi_handler(t, fn, fg, n, init...) \
14119 ({ \
14120- static struct nmiaction init fn##_na = { \
14121+ static const struct nmiaction init fn##_na = { \
14122 .handler = (fn), \
14123 .name = (n), \
14124 .flags = (fg), \
14125@@ -54,7 +54,7 @@ struct nmiaction {
14126 __register_nmi_handler((t), &fn##_na); \
14127 })
14128
14129-int __register_nmi_handler(unsigned int, struct nmiaction *);
14130+int __register_nmi_handler(unsigned int, const struct nmiaction *);
14131
14132 void unregister_nmi_handler(unsigned int, const char *);
14133
14134diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
14135index 0f1ddee..e2fc3d1 100644
14136--- a/arch/x86/include/asm/page_64.h
14137+++ b/arch/x86/include/asm/page_64.h
14138@@ -7,9 +7,9 @@
14139
14140 /* duplicated to the one in bootmem.h */
14141 extern unsigned long max_pfn;
14142-extern unsigned long phys_base;
14143+extern const unsigned long phys_base;
14144
14145-static inline unsigned long __phys_addr_nodebug(unsigned long x)
14146+static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
14147 {
14148 unsigned long y = x - __START_KERNEL_map;
14149
14150diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
14151index 7361e47..16dc226 100644
14152--- a/arch/x86/include/asm/paravirt.h
14153+++ b/arch/x86/include/asm/paravirt.h
14154@@ -564,7 +564,7 @@ static inline pmd_t __pmd(pmdval_t val)
14155 return (pmd_t) { ret };
14156 }
14157
14158-static inline pmdval_t pmd_val(pmd_t pmd)
14159+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
14160 {
14161 pmdval_t ret;
14162
14163@@ -630,6 +630,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
14164 val);
14165 }
14166
14167+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
14168+{
14169+ pgdval_t val = native_pgd_val(pgd);
14170+
14171+ if (sizeof(pgdval_t) > sizeof(long))
14172+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
14173+ val, (u64)val >> 32);
14174+ else
14175+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
14176+ val);
14177+}
14178+
14179 static inline void pgd_clear(pgd_t *pgdp)
14180 {
14181 set_pgd(pgdp, __pgd(0));
14182@@ -714,6 +726,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
14183 pv_mmu_ops.set_fixmap(idx, phys, flags);
14184 }
14185
14186+#ifdef CONFIG_PAX_KERNEXEC
14187+static inline unsigned long pax_open_kernel(void)
14188+{
14189+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
14190+}
14191+
14192+static inline unsigned long pax_close_kernel(void)
14193+{
14194+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
14195+}
14196+#else
14197+static inline unsigned long pax_open_kernel(void) { return 0; }
14198+static inline unsigned long pax_close_kernel(void) { return 0; }
14199+#endif
14200+
14201 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
14202
14203 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
14204@@ -930,7 +957,7 @@ extern void default_banner(void);
14205
14206 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
14207 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
14208-#define PARA_INDIRECT(addr) *%cs:addr
14209+#define PARA_INDIRECT(addr) *%ss:addr
14210 #endif
14211
14212 #define INTERRUPT_RETURN \
14213@@ -1005,6 +1032,21 @@ extern void default_banner(void);
14214 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
14215 CLBR_NONE, \
14216 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
14217+
14218+#define GET_CR0_INTO_RDI \
14219+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
14220+ mov %rax,%rdi
14221+
14222+#define SET_RDI_INTO_CR0 \
14223+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
14224+
14225+#define GET_CR3_INTO_RDI \
14226+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
14227+ mov %rax,%rdi
14228+
14229+#define SET_RDI_INTO_CR3 \
14230+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
14231+
14232 #endif /* CONFIG_X86_32 */
14233
14234 #endif /* __ASSEMBLY__ */
14235diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
14236index b3b0ec1..b1cd3eb 100644
14237--- a/arch/x86/include/asm/paravirt_types.h
14238+++ b/arch/x86/include/asm/paravirt_types.h
14239@@ -84,7 +84,7 @@ struct pv_init_ops {
14240 */
14241 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
14242 unsigned long addr, unsigned len);
14243-};
14244+} __no_const;
14245
14246
14247 struct pv_lazy_ops {
14248@@ -98,7 +98,7 @@ struct pv_time_ops {
14249 unsigned long long (*sched_clock)(void);
14250 unsigned long long (*steal_clock)(int cpu);
14251 unsigned long (*get_tsc_khz)(void);
14252-};
14253+} __no_const;
14254
14255 struct pv_cpu_ops {
14256 /* hooks for various privileged instructions */
14257@@ -192,7 +192,7 @@ struct pv_cpu_ops {
14258
14259 void (*start_context_switch)(struct task_struct *prev);
14260 void (*end_context_switch)(struct task_struct *next);
14261-};
14262+} __no_const;
14263
14264 struct pv_irq_ops {
14265 /*
14266@@ -223,7 +223,7 @@ struct pv_apic_ops {
14267 unsigned long start_eip,
14268 unsigned long start_esp);
14269 #endif
14270-};
14271+} __no_const;
14272
14273 struct pv_mmu_ops {
14274 unsigned long (*read_cr2)(void);
14275@@ -313,6 +313,7 @@ struct pv_mmu_ops {
14276 struct paravirt_callee_save make_pud;
14277
14278 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
14279+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
14280 #endif /* PAGETABLE_LEVELS == 4 */
14281 #endif /* PAGETABLE_LEVELS >= 3 */
14282
14283@@ -324,6 +325,12 @@ struct pv_mmu_ops {
14284 an mfn. We can tell which is which from the index. */
14285 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
14286 phys_addr_t phys, pgprot_t flags);
14287+
14288+#ifdef CONFIG_PAX_KERNEXEC
14289+ unsigned long (*pax_open_kernel)(void);
14290+ unsigned long (*pax_close_kernel)(void);
14291+#endif
14292+
14293 };
14294
14295 struct arch_spinlock;
14296@@ -334,7 +341,7 @@ struct pv_lock_ops {
14297 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
14298 int (*spin_trylock)(struct arch_spinlock *lock);
14299 void (*spin_unlock)(struct arch_spinlock *lock);
14300-};
14301+} __no_const;
14302
14303 /* This contains all the paravirt structures: we get a convenient
14304 * number for each function using the offset which we use to indicate
14305diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
14306index b4389a4..7024269 100644
14307--- a/arch/x86/include/asm/pgalloc.h
14308+++ b/arch/x86/include/asm/pgalloc.h
14309@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
14310 pmd_t *pmd, pte_t *pte)
14311 {
14312 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
14313+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
14314+}
14315+
14316+static inline void pmd_populate_user(struct mm_struct *mm,
14317+ pmd_t *pmd, pte_t *pte)
14318+{
14319+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
14320 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
14321 }
14322
14323@@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
14324
14325 #ifdef CONFIG_X86_PAE
14326 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
14327+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
14328+{
14329+ pud_populate(mm, pudp, pmd);
14330+}
14331 #else /* !CONFIG_X86_PAE */
14332 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
14333 {
14334 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
14335 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
14336 }
14337+
14338+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
14339+{
14340+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
14341+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
14342+}
14343 #endif /* CONFIG_X86_PAE */
14344
14345 #if PAGETABLE_LEVELS > 3
14346@@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
14347 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
14348 }
14349
14350+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
14351+{
14352+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
14353+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
14354+}
14355+
14356 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
14357 {
14358 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
14359diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
14360index f2b489c..4f7e2e5 100644
14361--- a/arch/x86/include/asm/pgtable-2level.h
14362+++ b/arch/x86/include/asm/pgtable-2level.h
14363@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
14364
14365 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
14366 {
14367+ pax_open_kernel();
14368 *pmdp = pmd;
14369+ pax_close_kernel();
14370 }
14371
14372 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
14373diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
14374index 4cc9f2b..5fd9226 100644
14375--- a/arch/x86/include/asm/pgtable-3level.h
14376+++ b/arch/x86/include/asm/pgtable-3level.h
14377@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
14378
14379 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
14380 {
14381+ pax_open_kernel();
14382 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
14383+ pax_close_kernel();
14384 }
14385
14386 static inline void native_set_pud(pud_t *pudp, pud_t pud)
14387 {
14388+ pax_open_kernel();
14389 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
14390+ pax_close_kernel();
14391 }
14392
14393 /*
14394diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
14395index 1e67223..dd6e7ea 100644
14396--- a/arch/x86/include/asm/pgtable.h
14397+++ b/arch/x86/include/asm/pgtable.h
14398@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
14399
14400 #ifndef __PAGETABLE_PUD_FOLDED
14401 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
14402+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
14403 #define pgd_clear(pgd) native_pgd_clear(pgd)
14404 #endif
14405
14406@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
14407
14408 #define arch_end_context_switch(prev) do {} while(0)
14409
14410+#define pax_open_kernel() native_pax_open_kernel()
14411+#define pax_close_kernel() native_pax_close_kernel()
14412 #endif /* CONFIG_PARAVIRT */
14413
14414+#define __HAVE_ARCH_PAX_OPEN_KERNEL
14415+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
14416+
14417+#ifdef CONFIG_PAX_KERNEXEC
14418+static inline unsigned long native_pax_open_kernel(void)
14419+{
14420+ unsigned long cr0;
14421+
14422+ preempt_disable();
14423+ barrier();
14424+ cr0 = read_cr0() ^ X86_CR0_WP;
14425+ BUG_ON(cr0 & X86_CR0_WP);
14426+ write_cr0(cr0);
14427+ return cr0 ^ X86_CR0_WP;
14428+}
14429+
14430+static inline unsigned long native_pax_close_kernel(void)
14431+{
14432+ unsigned long cr0;
14433+
14434+ cr0 = read_cr0() ^ X86_CR0_WP;
14435+ BUG_ON(!(cr0 & X86_CR0_WP));
14436+ write_cr0(cr0);
14437+ barrier();
14438+ preempt_enable_no_resched();
14439+ return cr0 ^ X86_CR0_WP;
14440+}
14441+#else
14442+static inline unsigned long native_pax_open_kernel(void) { return 0; }
14443+static inline unsigned long native_pax_close_kernel(void) { return 0; }
14444+#endif
14445+
14446 /*
14447 * The following only work if pte_present() is true.
14448 * Undefined behaviour if not..
14449 */
14450+static inline int pte_user(pte_t pte)
14451+{
14452+ return pte_val(pte) & _PAGE_USER;
14453+}
14454+
14455 static inline int pte_dirty(pte_t pte)
14456 {
14457 return pte_flags(pte) & _PAGE_DIRTY;
14458@@ -147,6 +187,11 @@ static inline unsigned long pud_pfn(pud_t pud)
14459 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
14460 }
14461
14462+static inline unsigned long pgd_pfn(pgd_t pgd)
14463+{
14464+ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
14465+}
14466+
14467 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
14468
14469 static inline int pmd_large(pmd_t pte)
14470@@ -200,9 +245,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
14471 return pte_clear_flags(pte, _PAGE_RW);
14472 }
14473
14474+static inline pte_t pte_mkread(pte_t pte)
14475+{
14476+ return __pte(pte_val(pte) | _PAGE_USER);
14477+}
14478+
14479 static inline pte_t pte_mkexec(pte_t pte)
14480 {
14481- return pte_clear_flags(pte, _PAGE_NX);
14482+#ifdef CONFIG_X86_PAE
14483+ if (__supported_pte_mask & _PAGE_NX)
14484+ return pte_clear_flags(pte, _PAGE_NX);
14485+ else
14486+#endif
14487+ return pte_set_flags(pte, _PAGE_USER);
14488+}
14489+
14490+static inline pte_t pte_exprotect(pte_t pte)
14491+{
14492+#ifdef CONFIG_X86_PAE
14493+ if (__supported_pte_mask & _PAGE_NX)
14494+ return pte_set_flags(pte, _PAGE_NX);
14495+ else
14496+#endif
14497+ return pte_clear_flags(pte, _PAGE_USER);
14498 }
14499
14500 static inline pte_t pte_mkdirty(pte_t pte)
14501@@ -394,6 +459,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
14502 #endif
14503
14504 #ifndef __ASSEMBLY__
14505+
14506+#ifdef CONFIG_PAX_PER_CPU_PGD
14507+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
14508+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
14509+{
14510+ return cpu_pgd[cpu];
14511+}
14512+#endif
14513+
14514 #include <linux/mm_types.h>
14515 #include <linux/log2.h>
14516
14517@@ -529,7 +603,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
14518 * Currently stuck as a macro due to indirect forward reference to
14519 * linux/mmzone.h's __section_mem_map_addr() definition:
14520 */
14521-#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
14522+#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
14523
14524 /* Find an entry in the second-level page table.. */
14525 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
14526@@ -569,7 +643,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
14527 * Currently stuck as a macro due to indirect forward reference to
14528 * linux/mmzone.h's __section_mem_map_addr() definition:
14529 */
14530-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
14531+#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
14532
14533 /* to find an entry in a page-table-directory. */
14534 static inline unsigned long pud_index(unsigned long address)
14535@@ -584,7 +658,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
14536
14537 static inline int pgd_bad(pgd_t pgd)
14538 {
14539- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
14540+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
14541 }
14542
14543 static inline int pgd_none(pgd_t pgd)
14544@@ -607,7 +681,12 @@ static inline int pgd_none(pgd_t pgd)
14545 * pgd_offset() returns a (pgd_t *)
14546 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
14547 */
14548-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
14549+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
14550+
14551+#ifdef CONFIG_PAX_PER_CPU_PGD
14552+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
14553+#endif
14554+
14555 /*
14556 * a shortcut which implies the use of the kernel's pgd, instead
14557 * of a process's
14558@@ -618,6 +697,22 @@ static inline int pgd_none(pgd_t pgd)
14559 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
14560 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
14561
14562+#ifdef CONFIG_X86_32
14563+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
14564+#else
14565+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
14566+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
14567+
14568+#ifdef CONFIG_PAX_MEMORY_UDEREF
14569+#ifdef __ASSEMBLY__
14570+#define pax_user_shadow_base pax_user_shadow_base(%rip)
14571+#else
14572+extern unsigned long pax_user_shadow_base;
14573+#endif
14574+#endif
14575+
14576+#endif
14577+
14578 #ifndef __ASSEMBLY__
14579
14580 extern int direct_gbpages;
14581@@ -784,11 +879,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
14582 * dst and src can be on the same page, but the range must not overlap,
14583 * and must not cross a page boundary.
14584 */
14585-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
14586+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
14587 {
14588- memcpy(dst, src, count * sizeof(pgd_t));
14589+ pax_open_kernel();
14590+ while (count--)
14591+ *dst++ = *src++;
14592+ pax_close_kernel();
14593 }
14594
14595+#ifdef CONFIG_PAX_PER_CPU_PGD
14596+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
14597+#endif
14598+
14599+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
14600+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
14601+#else
14602+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
14603+#endif
14604+
14605 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
14606 static inline int page_level_shift(enum pg_level level)
14607 {
14608diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
14609index 9ee3221..b979c6b 100644
14610--- a/arch/x86/include/asm/pgtable_32.h
14611+++ b/arch/x86/include/asm/pgtable_32.h
14612@@ -25,9 +25,6 @@
14613 struct mm_struct;
14614 struct vm_area_struct;
14615
14616-extern pgd_t swapper_pg_dir[1024];
14617-extern pgd_t initial_page_table[1024];
14618-
14619 static inline void pgtable_cache_init(void) { }
14620 static inline void check_pgt_cache(void) { }
14621 void paging_init(void);
14622@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
14623 # include <asm/pgtable-2level.h>
14624 #endif
14625
14626+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
14627+extern pgd_t initial_page_table[PTRS_PER_PGD];
14628+#ifdef CONFIG_X86_PAE
14629+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
14630+#endif
14631+
14632 #if defined(CONFIG_HIGHPTE)
14633 #define pte_offset_map(dir, address) \
14634 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
14635@@ -62,12 +65,17 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
14636 /* Clear a kernel PTE and flush it from the TLB */
14637 #define kpte_clear_flush(ptep, vaddr) \
14638 do { \
14639+ pax_open_kernel(); \
14640 pte_clear(&init_mm, (vaddr), (ptep)); \
14641+ pax_close_kernel(); \
14642 __flush_tlb_one((vaddr)); \
14643 } while (0)
14644
14645 #endif /* !__ASSEMBLY__ */
14646
14647+#define HAVE_ARCH_UNMAPPED_AREA
14648+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
14649+
14650 /*
14651 * kern_addr_valid() is (1) for FLATMEM and (0) for
14652 * SPARSEMEM and DISCONTIGMEM
14653diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
14654index ed5903b..c7fe163 100644
14655--- a/arch/x86/include/asm/pgtable_32_types.h
14656+++ b/arch/x86/include/asm/pgtable_32_types.h
14657@@ -8,7 +8,7 @@
14658 */
14659 #ifdef CONFIG_X86_PAE
14660 # include <asm/pgtable-3level_types.h>
14661-# define PMD_SIZE (1UL << PMD_SHIFT)
14662+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
14663 # define PMD_MASK (~(PMD_SIZE - 1))
14664 #else
14665 # include <asm/pgtable-2level_types.h>
14666@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
14667 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
14668 #endif
14669
14670+#ifdef CONFIG_PAX_KERNEXEC
14671+#ifndef __ASSEMBLY__
14672+extern unsigned char MODULES_EXEC_VADDR[];
14673+extern unsigned char MODULES_EXEC_END[];
14674+#endif
14675+#include <asm/boot.h>
14676+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
14677+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
14678+#else
14679+#define ktla_ktva(addr) (addr)
14680+#define ktva_ktla(addr) (addr)
14681+#endif
14682+
14683 #define MODULES_VADDR VMALLOC_START
14684 #define MODULES_END VMALLOC_END
14685 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
14686diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
14687index e22c1db..23a625a 100644
14688--- a/arch/x86/include/asm/pgtable_64.h
14689+++ b/arch/x86/include/asm/pgtable_64.h
14690@@ -16,10 +16,14 @@
14691
14692 extern pud_t level3_kernel_pgt[512];
14693 extern pud_t level3_ident_pgt[512];
14694+extern pud_t level3_vmalloc_start_pgt[512];
14695+extern pud_t level3_vmalloc_end_pgt[512];
14696+extern pud_t level3_vmemmap_pgt[512];
14697+extern pud_t level2_vmemmap_pgt[512];
14698 extern pmd_t level2_kernel_pgt[512];
14699 extern pmd_t level2_fixmap_pgt[512];
14700-extern pmd_t level2_ident_pgt[512];
14701-extern pgd_t init_level4_pgt[];
14702+extern pmd_t level2_ident_pgt[512*2];
14703+extern pgd_t init_level4_pgt[512];
14704
14705 #define swapper_pg_dir init_level4_pgt
14706
14707@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
14708
14709 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
14710 {
14711+ pax_open_kernel();
14712 *pmdp = pmd;
14713+ pax_close_kernel();
14714 }
14715
14716 static inline void native_pmd_clear(pmd_t *pmd)
14717@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
14718
14719 static inline void native_set_pud(pud_t *pudp, pud_t pud)
14720 {
14721+ pax_open_kernel();
14722 *pudp = pud;
14723+ pax_close_kernel();
14724 }
14725
14726 static inline void native_pud_clear(pud_t *pud)
14727@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
14728
14729 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
14730 {
14731+ pax_open_kernel();
14732+ *pgdp = pgd;
14733+ pax_close_kernel();
14734+}
14735+
14736+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
14737+{
14738 *pgdp = pgd;
14739 }
14740
14741diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
14742index 2d88344..4679fc3 100644
14743--- a/arch/x86/include/asm/pgtable_64_types.h
14744+++ b/arch/x86/include/asm/pgtable_64_types.h
14745@@ -61,6 +61,11 @@ typedef struct { pteval_t pte; } pte_t;
14746 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
14747 #define MODULES_END _AC(0xffffffffff000000, UL)
14748 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
14749+#define MODULES_EXEC_VADDR MODULES_VADDR
14750+#define MODULES_EXEC_END MODULES_END
14751+
14752+#define ktla_ktva(addr) (addr)
14753+#define ktva_ktla(addr) (addr)
14754
14755 #define EARLY_DYNAMIC_PAGE_TABLES 64
14756
14757diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
14758index 567b5d0..bd91d64 100644
14759--- a/arch/x86/include/asm/pgtable_types.h
14760+++ b/arch/x86/include/asm/pgtable_types.h
14761@@ -16,13 +16,12 @@
14762 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
14763 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
14764 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
14765-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
14766+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
14767 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
14768 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
14769 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
14770-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
14771-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
14772-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
14773+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
14774+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
14775 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
14776
14777 /* If _PAGE_BIT_PRESENT is clear, we use these: */
14778@@ -40,7 +39,6 @@
14779 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
14780 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
14781 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
14782-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
14783 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
14784 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
14785 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
14786@@ -57,8 +55,10 @@
14787
14788 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
14789 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
14790-#else
14791+#elif defined(CONFIG_KMEMCHECK)
14792 #define _PAGE_NX (_AT(pteval_t, 0))
14793+#else
14794+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
14795 #endif
14796
14797 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
14798@@ -116,6 +116,9 @@
14799 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
14800 _PAGE_ACCESSED)
14801
14802+#define PAGE_READONLY_NOEXEC PAGE_READONLY
14803+#define PAGE_SHARED_NOEXEC PAGE_SHARED
14804+
14805 #define __PAGE_KERNEL_EXEC \
14806 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
14807 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
14808@@ -126,7 +129,7 @@
14809 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
14810 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
14811 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
14812-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
14813+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
14814 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
14815 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
14816 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
14817@@ -188,8 +191,8 @@
14818 * bits are combined, this will alow user to access the high address mapped
14819 * VDSO in the presence of CONFIG_COMPAT_VDSO
14820 */
14821-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
14822-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
14823+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
14824+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
14825 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
14826 #endif
14827
14828@@ -227,7 +230,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
14829 {
14830 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
14831 }
14832+#endif
14833
14834+#if PAGETABLE_LEVELS == 3
14835+#include <asm-generic/pgtable-nopud.h>
14836+#endif
14837+
14838+#if PAGETABLE_LEVELS == 2
14839+#include <asm-generic/pgtable-nopmd.h>
14840+#endif
14841+
14842+#ifndef __ASSEMBLY__
14843 #if PAGETABLE_LEVELS > 3
14844 typedef struct { pudval_t pud; } pud_t;
14845
14846@@ -241,8 +254,6 @@ static inline pudval_t native_pud_val(pud_t pud)
14847 return pud.pud;
14848 }
14849 #else
14850-#include <asm-generic/pgtable-nopud.h>
14851-
14852 static inline pudval_t native_pud_val(pud_t pud)
14853 {
14854 return native_pgd_val(pud.pgd);
14855@@ -262,8 +273,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
14856 return pmd.pmd;
14857 }
14858 #else
14859-#include <asm-generic/pgtable-nopmd.h>
14860-
14861 static inline pmdval_t native_pmd_val(pmd_t pmd)
14862 {
14863 return native_pgd_val(pmd.pud.pgd);
14864@@ -303,7 +312,6 @@ typedef struct page *pgtable_t;
14865
14866 extern pteval_t __supported_pte_mask;
14867 extern void set_nx(void);
14868-extern int nx_enabled;
14869
14870 #define pgprot_writecombine pgprot_writecombine
14871 extern pgprot_t pgprot_writecombine(pgprot_t prot);
14872diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
14873index 3270116..8d99d82 100644
14874--- a/arch/x86/include/asm/processor.h
14875+++ b/arch/x86/include/asm/processor.h
14876@@ -285,7 +285,7 @@ struct tss_struct {
14877
14878 } ____cacheline_aligned;
14879
14880-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
14881+extern struct tss_struct init_tss[NR_CPUS];
14882
14883 /*
14884 * Save the original ist values for checking stack pointers during debugging
14885@@ -826,11 +826,18 @@ static inline void spin_lock_prefetch(const void *x)
14886 */
14887 #define TASK_SIZE PAGE_OFFSET
14888 #define TASK_SIZE_MAX TASK_SIZE
14889+
14890+#ifdef CONFIG_PAX_SEGMEXEC
14891+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
14892+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
14893+#else
14894 #define STACK_TOP TASK_SIZE
14895-#define STACK_TOP_MAX STACK_TOP
14896+#endif
14897+
14898+#define STACK_TOP_MAX TASK_SIZE
14899
14900 #define INIT_THREAD { \
14901- .sp0 = sizeof(init_stack) + (long)&init_stack, \
14902+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
14903 .vm86_info = NULL, \
14904 .sysenter_cs = __KERNEL_CS, \
14905 .io_bitmap_ptr = NULL, \
14906@@ -844,7 +851,7 @@ static inline void spin_lock_prefetch(const void *x)
14907 */
14908 #define INIT_TSS { \
14909 .x86_tss = { \
14910- .sp0 = sizeof(init_stack) + (long)&init_stack, \
14911+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
14912 .ss0 = __KERNEL_DS, \
14913 .ss1 = __KERNEL_CS, \
14914 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
14915@@ -855,11 +862,7 @@ static inline void spin_lock_prefetch(const void *x)
14916 extern unsigned long thread_saved_pc(struct task_struct *tsk);
14917
14918 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
14919-#define KSTK_TOP(info) \
14920-({ \
14921- unsigned long *__ptr = (unsigned long *)(info); \
14922- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
14923-})
14924+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
14925
14926 /*
14927 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
14928@@ -874,7 +877,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
14929 #define task_pt_regs(task) \
14930 ({ \
14931 struct pt_regs *__regs__; \
14932- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
14933+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
14934 __regs__ - 1; \
14935 })
14936
14937@@ -884,13 +887,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
14938 /*
14939 * User space process size. 47bits minus one guard page.
14940 */
14941-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
14942+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
14943
14944 /* This decides where the kernel will search for a free chunk of vm
14945 * space during mmap's.
14946 */
14947 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
14948- 0xc0000000 : 0xFFFFe000)
14949+ 0xc0000000 : 0xFFFFf000)
14950
14951 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
14952 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
14953@@ -901,11 +904,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
14954 #define STACK_TOP_MAX TASK_SIZE_MAX
14955
14956 #define INIT_THREAD { \
14957- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
14958+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
14959 }
14960
14961 #define INIT_TSS { \
14962- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
14963+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
14964 }
14965
14966 /*
14967@@ -933,6 +936,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
14968 */
14969 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
14970
14971+#ifdef CONFIG_PAX_SEGMEXEC
14972+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
14973+#endif
14974+
14975 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
14976
14977 /* Get/set a process' ability to use the timestamp counter instruction */
14978@@ -993,7 +1000,7 @@ extern bool cpu_has_amd_erratum(const int *);
14979 #define cpu_has_amd_erratum(x) (false)
14980 #endif /* CONFIG_CPU_SUP_AMD */
14981
14982-extern unsigned long arch_align_stack(unsigned long sp);
14983+#define arch_align_stack(x) ((x) & ~0xfUL)
14984 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
14985
14986 void default_idle(void);
14987@@ -1003,6 +1010,6 @@ bool xen_set_default_idle(void);
14988 #define xen_set_default_idle 0
14989 #endif
14990
14991-void stop_this_cpu(void *dummy);
14992+void stop_this_cpu(void *dummy) __noreturn;
14993
14994 #endif /* _ASM_X86_PROCESSOR_H */
14995diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
14996index 942a086..6c26446 100644
14997--- a/arch/x86/include/asm/ptrace.h
14998+++ b/arch/x86/include/asm/ptrace.h
14999@@ -85,28 +85,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
15000 }
15001
15002 /*
15003- * user_mode_vm(regs) determines whether a register set came from user mode.
15004+ * user_mode(regs) determines whether a register set came from user mode.
15005 * This is true if V8086 mode was enabled OR if the register set was from
15006 * protected mode with RPL-3 CS value. This tricky test checks that with
15007 * one comparison. Many places in the kernel can bypass this full check
15008- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
15009+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
15010+ * be used.
15011 */
15012-static inline int user_mode(struct pt_regs *regs)
15013+static inline int user_mode_novm(struct pt_regs *regs)
15014 {
15015 #ifdef CONFIG_X86_32
15016 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
15017 #else
15018- return !!(regs->cs & 3);
15019+ return !!(regs->cs & SEGMENT_RPL_MASK);
15020 #endif
15021 }
15022
15023-static inline int user_mode_vm(struct pt_regs *regs)
15024+static inline int user_mode(struct pt_regs *regs)
15025 {
15026 #ifdef CONFIG_X86_32
15027 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
15028 USER_RPL;
15029 #else
15030- return user_mode(regs);
15031+ return user_mode_novm(regs);
15032 #endif
15033 }
15034
15035@@ -122,15 +123,16 @@ static inline int v8086_mode(struct pt_regs *regs)
15036 #ifdef CONFIG_X86_64
15037 static inline bool user_64bit_mode(struct pt_regs *regs)
15038 {
15039+ unsigned long cs = regs->cs & 0xffff;
15040 #ifndef CONFIG_PARAVIRT
15041 /*
15042 * On non-paravirt systems, this is the only long mode CPL 3
15043 * selector. We do not allow long mode selectors in the LDT.
15044 */
15045- return regs->cs == __USER_CS;
15046+ return cs == __USER_CS;
15047 #else
15048 /* Headers are too twisted for this to go in paravirt.h. */
15049- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
15050+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
15051 #endif
15052 }
15053
15054@@ -181,9 +183,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
15055 * Traps from the kernel do not save sp and ss.
15056 * Use the helper function to retrieve sp.
15057 */
15058- if (offset == offsetof(struct pt_regs, sp) &&
15059- regs->cs == __KERNEL_CS)
15060- return kernel_stack_pointer(regs);
15061+ if (offset == offsetof(struct pt_regs, sp)) {
15062+ unsigned long cs = regs->cs & 0xffff;
15063+ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
15064+ return kernel_stack_pointer(regs);
15065+ }
15066 #endif
15067 return *(unsigned long *)((unsigned long)regs + offset);
15068 }
15069diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
15070index 9c6b890..5305f53 100644
15071--- a/arch/x86/include/asm/realmode.h
15072+++ b/arch/x86/include/asm/realmode.h
15073@@ -22,16 +22,14 @@ struct real_mode_header {
15074 #endif
15075 /* APM/BIOS reboot */
15076 u32 machine_real_restart_asm;
15077-#ifdef CONFIG_X86_64
15078 u32 machine_real_restart_seg;
15079-#endif
15080 };
15081
15082 /* This must match data at trampoline_32/64.S */
15083 struct trampoline_header {
15084 #ifdef CONFIG_X86_32
15085 u32 start;
15086- u16 gdt_pad;
15087+ u16 boot_cs;
15088 u16 gdt_limit;
15089 u32 gdt_base;
15090 #else
15091diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
15092index a82c4f1..ac45053 100644
15093--- a/arch/x86/include/asm/reboot.h
15094+++ b/arch/x86/include/asm/reboot.h
15095@@ -6,13 +6,13 @@
15096 struct pt_regs;
15097
15098 struct machine_ops {
15099- void (*restart)(char *cmd);
15100- void (*halt)(void);
15101- void (*power_off)(void);
15102+ void (* __noreturn restart)(char *cmd);
15103+ void (* __noreturn halt)(void);
15104+ void (* __noreturn power_off)(void);
15105 void (*shutdown)(void);
15106 void (*crash_shutdown)(struct pt_regs *);
15107- void (*emergency_restart)(void);
15108-};
15109+ void (* __noreturn emergency_restart)(void);
15110+} __no_const;
15111
15112 extern struct machine_ops machine_ops;
15113
15114diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
15115index 2dbe4a7..ce1db00 100644
15116--- a/arch/x86/include/asm/rwsem.h
15117+++ b/arch/x86/include/asm/rwsem.h
15118@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
15119 {
15120 asm volatile("# beginning down_read\n\t"
15121 LOCK_PREFIX _ASM_INC "(%1)\n\t"
15122+
15123+#ifdef CONFIG_PAX_REFCOUNT
15124+ "jno 0f\n"
15125+ LOCK_PREFIX _ASM_DEC "(%1)\n"
15126+ "int $4\n0:\n"
15127+ _ASM_EXTABLE(0b, 0b)
15128+#endif
15129+
15130 /* adds 0x00000001 */
15131 " jns 1f\n"
15132 " call call_rwsem_down_read_failed\n"
15133@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
15134 "1:\n\t"
15135 " mov %1,%2\n\t"
15136 " add %3,%2\n\t"
15137+
15138+#ifdef CONFIG_PAX_REFCOUNT
15139+ "jno 0f\n"
15140+ "sub %3,%2\n"
15141+ "int $4\n0:\n"
15142+ _ASM_EXTABLE(0b, 0b)
15143+#endif
15144+
15145 " jle 2f\n\t"
15146 LOCK_PREFIX " cmpxchg %2,%0\n\t"
15147 " jnz 1b\n\t"
15148@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
15149 long tmp;
15150 asm volatile("# beginning down_write\n\t"
15151 LOCK_PREFIX " xadd %1,(%2)\n\t"
15152+
15153+#ifdef CONFIG_PAX_REFCOUNT
15154+ "jno 0f\n"
15155+ "mov %1,(%2)\n"
15156+ "int $4\n0:\n"
15157+ _ASM_EXTABLE(0b, 0b)
15158+#endif
15159+
15160 /* adds 0xffff0001, returns the old value */
15161 " test %1,%1\n\t"
15162 /* was the count 0 before? */
15163@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
15164 long tmp;
15165 asm volatile("# beginning __up_read\n\t"
15166 LOCK_PREFIX " xadd %1,(%2)\n\t"
15167+
15168+#ifdef CONFIG_PAX_REFCOUNT
15169+ "jno 0f\n"
15170+ "mov %1,(%2)\n"
15171+ "int $4\n0:\n"
15172+ _ASM_EXTABLE(0b, 0b)
15173+#endif
15174+
15175 /* subtracts 1, returns the old value */
15176 " jns 1f\n\t"
15177 " call call_rwsem_wake\n" /* expects old value in %edx */
15178@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
15179 long tmp;
15180 asm volatile("# beginning __up_write\n\t"
15181 LOCK_PREFIX " xadd %1,(%2)\n\t"
15182+
15183+#ifdef CONFIG_PAX_REFCOUNT
15184+ "jno 0f\n"
15185+ "mov %1,(%2)\n"
15186+ "int $4\n0:\n"
15187+ _ASM_EXTABLE(0b, 0b)
15188+#endif
15189+
15190 /* subtracts 0xffff0001, returns the old value */
15191 " jns 1f\n\t"
15192 " call call_rwsem_wake\n" /* expects old value in %edx */
15193@@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
15194 {
15195 asm volatile("# beginning __downgrade_write\n\t"
15196 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
15197+
15198+#ifdef CONFIG_PAX_REFCOUNT
15199+ "jno 0f\n"
15200+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
15201+ "int $4\n0:\n"
15202+ _ASM_EXTABLE(0b, 0b)
15203+#endif
15204+
15205 /*
15206 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
15207 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
15208@@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
15209 */
15210 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
15211 {
15212- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
15213+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
15214+
15215+#ifdef CONFIG_PAX_REFCOUNT
15216+ "jno 0f\n"
15217+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
15218+ "int $4\n0:\n"
15219+ _ASM_EXTABLE(0b, 0b)
15220+#endif
15221+
15222 : "+m" (sem->count)
15223 : "er" (delta));
15224 }
15225@@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
15226 */
15227 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
15228 {
15229- return delta + xadd(&sem->count, delta);
15230+ return delta + xadd_check_overflow(&sem->count, delta);
15231 }
15232
15233 #endif /* __KERNEL__ */
15234diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
15235index c48a950..c6d7468 100644
15236--- a/arch/x86/include/asm/segment.h
15237+++ b/arch/x86/include/asm/segment.h
15238@@ -64,10 +64,15 @@
15239 * 26 - ESPFIX small SS
15240 * 27 - per-cpu [ offset to per-cpu data area ]
15241 * 28 - stack_canary-20 [ for stack protector ]
15242- * 29 - unused
15243- * 30 - unused
15244+ * 29 - PCI BIOS CS
15245+ * 30 - PCI BIOS DS
15246 * 31 - TSS for double fault handler
15247 */
15248+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
15249+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
15250+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
15251+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
15252+
15253 #define GDT_ENTRY_TLS_MIN 6
15254 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
15255
15256@@ -79,6 +84,8 @@
15257
15258 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
15259
15260+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
15261+
15262 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
15263
15264 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
15265@@ -104,6 +111,12 @@
15266 #define __KERNEL_STACK_CANARY 0
15267 #endif
15268
15269+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
15270+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
15271+
15272+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
15273+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
15274+
15275 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
15276
15277 /*
15278@@ -141,7 +154,7 @@
15279 */
15280
15281 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
15282-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
15283+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
15284
15285
15286 #else
15287@@ -165,6 +178,8 @@
15288 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
15289 #define __USER32_DS __USER_DS
15290
15291+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
15292+
15293 #define GDT_ENTRY_TSS 8 /* needs two entries */
15294 #define GDT_ENTRY_LDT 10 /* needs two entries */
15295 #define GDT_ENTRY_TLS_MIN 12
15296@@ -185,6 +200,7 @@
15297 #endif
15298
15299 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
15300+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
15301 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
15302 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
15303 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
15304@@ -265,7 +281,7 @@ static inline unsigned long get_limit(unsigned long segment)
15305 {
15306 unsigned long __limit;
15307 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
15308- return __limit + 1;
15309+ return __limit;
15310 }
15311
15312 #endif /* !__ASSEMBLY__ */
15313diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
15314index b073aae..39f9bdd 100644
15315--- a/arch/x86/include/asm/smp.h
15316+++ b/arch/x86/include/asm/smp.h
15317@@ -36,7 +36,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
15318 /* cpus sharing the last level cache: */
15319 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
15320 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
15321-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
15322+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
15323
15324 static inline struct cpumask *cpu_sibling_mask(int cpu)
15325 {
15326@@ -79,7 +79,7 @@ struct smp_ops {
15327
15328 void (*send_call_func_ipi)(const struct cpumask *mask);
15329 void (*send_call_func_single_ipi)(int cpu);
15330-};
15331+} __no_const;
15332
15333 /* Globals due to paravirt */
15334 extern void set_cpu_sibling_map(int cpu);
15335@@ -191,14 +191,8 @@ extern unsigned disabled_cpus __cpuinitdata;
15336 extern int safe_smp_processor_id(void);
15337
15338 #elif defined(CONFIG_X86_64_SMP)
15339-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
15340-
15341-#define stack_smp_processor_id() \
15342-({ \
15343- struct thread_info *ti; \
15344- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
15345- ti->cpu; \
15346-})
15347+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
15348+#define stack_smp_processor_id() raw_smp_processor_id()
15349 #define safe_smp_processor_id() smp_processor_id()
15350
15351 #endif
15352diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
15353index 33692ea..350a534 100644
15354--- a/arch/x86/include/asm/spinlock.h
15355+++ b/arch/x86/include/asm/spinlock.h
15356@@ -172,6 +172,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
15357 static inline void arch_read_lock(arch_rwlock_t *rw)
15358 {
15359 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
15360+
15361+#ifdef CONFIG_PAX_REFCOUNT
15362+ "jno 0f\n"
15363+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
15364+ "int $4\n0:\n"
15365+ _ASM_EXTABLE(0b, 0b)
15366+#endif
15367+
15368 "jns 1f\n"
15369 "call __read_lock_failed\n\t"
15370 "1:\n"
15371@@ -181,6 +189,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
15372 static inline void arch_write_lock(arch_rwlock_t *rw)
15373 {
15374 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
15375+
15376+#ifdef CONFIG_PAX_REFCOUNT
15377+ "jno 0f\n"
15378+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
15379+ "int $4\n0:\n"
15380+ _ASM_EXTABLE(0b, 0b)
15381+#endif
15382+
15383 "jz 1f\n"
15384 "call __write_lock_failed\n\t"
15385 "1:\n"
15386@@ -210,13 +226,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
15387
15388 static inline void arch_read_unlock(arch_rwlock_t *rw)
15389 {
15390- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
15391+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
15392+
15393+#ifdef CONFIG_PAX_REFCOUNT
15394+ "jno 0f\n"
15395+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
15396+ "int $4\n0:\n"
15397+ _ASM_EXTABLE(0b, 0b)
15398+#endif
15399+
15400 :"+m" (rw->lock) : : "memory");
15401 }
15402
15403 static inline void arch_write_unlock(arch_rwlock_t *rw)
15404 {
15405- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
15406+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
15407+
15408+#ifdef CONFIG_PAX_REFCOUNT
15409+ "jno 0f\n"
15410+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
15411+ "int $4\n0:\n"
15412+ _ASM_EXTABLE(0b, 0b)
15413+#endif
15414+
15415 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
15416 }
15417
15418diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
15419index 6a99859..03cb807 100644
15420--- a/arch/x86/include/asm/stackprotector.h
15421+++ b/arch/x86/include/asm/stackprotector.h
15422@@ -47,7 +47,7 @@
15423 * head_32 for boot CPU and setup_per_cpu_areas() for others.
15424 */
15425 #define GDT_STACK_CANARY_INIT \
15426- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
15427+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
15428
15429 /*
15430 * Initialize the stackprotector canary value.
15431@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
15432
15433 static inline void load_stack_canary_segment(void)
15434 {
15435-#ifdef CONFIG_X86_32
15436+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
15437 asm volatile ("mov %0, %%gs" : : "r" (0));
15438 #endif
15439 }
15440diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
15441index 70bbe39..4ae2bd4 100644
15442--- a/arch/x86/include/asm/stacktrace.h
15443+++ b/arch/x86/include/asm/stacktrace.h
15444@@ -11,28 +11,20 @@
15445
15446 extern int kstack_depth_to_print;
15447
15448-struct thread_info;
15449+struct task_struct;
15450 struct stacktrace_ops;
15451
15452-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
15453- unsigned long *stack,
15454- unsigned long bp,
15455- const struct stacktrace_ops *ops,
15456- void *data,
15457- unsigned long *end,
15458- int *graph);
15459+typedef unsigned long walk_stack_t(struct task_struct *task,
15460+ void *stack_start,
15461+ unsigned long *stack,
15462+ unsigned long bp,
15463+ const struct stacktrace_ops *ops,
15464+ void *data,
15465+ unsigned long *end,
15466+ int *graph);
15467
15468-extern unsigned long
15469-print_context_stack(struct thread_info *tinfo,
15470- unsigned long *stack, unsigned long bp,
15471- const struct stacktrace_ops *ops, void *data,
15472- unsigned long *end, int *graph);
15473-
15474-extern unsigned long
15475-print_context_stack_bp(struct thread_info *tinfo,
15476- unsigned long *stack, unsigned long bp,
15477- const struct stacktrace_ops *ops, void *data,
15478- unsigned long *end, int *graph);
15479+extern walk_stack_t print_context_stack;
15480+extern walk_stack_t print_context_stack_bp;
15481
15482 /* Generic stack tracer with callbacks */
15483
15484@@ -40,7 +32,7 @@ struct stacktrace_ops {
15485 void (*address)(void *data, unsigned long address, int reliable);
15486 /* On negative return stop dumping */
15487 int (*stack)(void *data, char *name);
15488- walk_stack_t walk_stack;
15489+ walk_stack_t *walk_stack;
15490 };
15491
15492 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
15493diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
15494index 4ec45b3..a4f0a8a 100644
15495--- a/arch/x86/include/asm/switch_to.h
15496+++ b/arch/x86/include/asm/switch_to.h
15497@@ -108,7 +108,7 @@ do { \
15498 "call __switch_to\n\t" \
15499 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
15500 __switch_canary \
15501- "movq %P[thread_info](%%rsi),%%r8\n\t" \
15502+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
15503 "movq %%rax,%%rdi\n\t" \
15504 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
15505 "jnz ret_from_fork\n\t" \
15506@@ -119,7 +119,7 @@ do { \
15507 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
15508 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
15509 [_tif_fork] "i" (_TIF_FORK), \
15510- [thread_info] "i" (offsetof(struct task_struct, stack)), \
15511+ [thread_info] "m" (current_tinfo), \
15512 [current_task] "m" (current_task) \
15513 __switch_canary_iparam \
15514 : "memory", "cc" __EXTRA_CLOBBER)
15515diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
15516index 2cd056e..0224df8 100644
15517--- a/arch/x86/include/asm/thread_info.h
15518+++ b/arch/x86/include/asm/thread_info.h
15519@@ -10,6 +10,7 @@
15520 #include <linux/compiler.h>
15521 #include <asm/page.h>
15522 #include <asm/types.h>
15523+#include <asm/percpu.h>
15524
15525 /*
15526 * low level task data that entry.S needs immediate access to
15527@@ -23,7 +24,6 @@ struct exec_domain;
15528 #include <linux/atomic.h>
15529
15530 struct thread_info {
15531- struct task_struct *task; /* main task structure */
15532 struct exec_domain *exec_domain; /* execution domain */
15533 __u32 flags; /* low level flags */
15534 __u32 status; /* thread synchronous flags */
15535@@ -33,19 +33,13 @@ struct thread_info {
15536 mm_segment_t addr_limit;
15537 struct restart_block restart_block;
15538 void __user *sysenter_return;
15539-#ifdef CONFIG_X86_32
15540- unsigned long previous_esp; /* ESP of the previous stack in
15541- case of nested (IRQ) stacks
15542- */
15543- __u8 supervisor_stack[0];
15544-#endif
15545+ unsigned long lowest_stack;
15546 unsigned int sig_on_uaccess_error:1;
15547 unsigned int uaccess_err:1; /* uaccess failed */
15548 };
15549
15550-#define INIT_THREAD_INFO(tsk) \
15551+#define INIT_THREAD_INFO \
15552 { \
15553- .task = &tsk, \
15554 .exec_domain = &default_exec_domain, \
15555 .flags = 0, \
15556 .cpu = 0, \
15557@@ -56,7 +50,7 @@ struct thread_info {
15558 }, \
15559 }
15560
15561-#define init_thread_info (init_thread_union.thread_info)
15562+#define init_thread_info (init_thread_union.stack)
15563 #define init_stack (init_thread_union.stack)
15564
15565 #else /* !__ASSEMBLY__ */
15566@@ -97,6 +91,7 @@ struct thread_info {
15567 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
15568 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
15569 #define TIF_X32 30 /* 32-bit native x86-64 binary */
15570+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
15571
15572 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
15573 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
15574@@ -121,17 +116,18 @@ struct thread_info {
15575 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
15576 #define _TIF_ADDR32 (1 << TIF_ADDR32)
15577 #define _TIF_X32 (1 << TIF_X32)
15578+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
15579
15580 /* work to do in syscall_trace_enter() */
15581 #define _TIF_WORK_SYSCALL_ENTRY \
15582 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
15583 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
15584- _TIF_NOHZ)
15585+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
15586
15587 /* work to do in syscall_trace_leave() */
15588 #define _TIF_WORK_SYSCALL_EXIT \
15589 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
15590- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
15591+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
15592
15593 /* work to do on interrupt/exception return */
15594 #define _TIF_WORK_MASK \
15595@@ -142,7 +138,7 @@ struct thread_info {
15596 /* work to do on any return to user space */
15597 #define _TIF_ALLWORK_MASK \
15598 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
15599- _TIF_NOHZ)
15600+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
15601
15602 /* Only used for 64 bit */
15603 #define _TIF_DO_NOTIFY_MASK \
15604@@ -158,45 +154,40 @@ struct thread_info {
15605
15606 #define PREEMPT_ACTIVE 0x10000000
15607
15608-#ifdef CONFIG_X86_32
15609-
15610-#define STACK_WARN (THREAD_SIZE/8)
15611-/*
15612- * macros/functions for gaining access to the thread information structure
15613- *
15614- * preempt_count needs to be 1 initially, until the scheduler is functional.
15615- */
15616-#ifndef __ASSEMBLY__
15617-
15618-
15619-/* how to get the current stack pointer from C */
15620-register unsigned long current_stack_pointer asm("esp") __used;
15621-
15622-/* how to get the thread information struct from C */
15623-static inline struct thread_info *current_thread_info(void)
15624-{
15625- return (struct thread_info *)
15626- (current_stack_pointer & ~(THREAD_SIZE - 1));
15627-}
15628-
15629-#else /* !__ASSEMBLY__ */
15630-
15631+#ifdef __ASSEMBLY__
15632 /* how to get the thread information struct from ASM */
15633 #define GET_THREAD_INFO(reg) \
15634- movl $-THREAD_SIZE, reg; \
15635- andl %esp, reg
15636+ mov PER_CPU_VAR(current_tinfo), reg
15637
15638 /* use this one if reg already contains %esp */
15639-#define GET_THREAD_INFO_WITH_ESP(reg) \
15640- andl $-THREAD_SIZE, reg
15641+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
15642+#else
15643+/* how to get the thread information struct from C */
15644+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
15645+
15646+static __always_inline struct thread_info *current_thread_info(void)
15647+{
15648+ return this_cpu_read_stable(current_tinfo);
15649+}
15650+#endif
15651+
15652+#ifdef CONFIG_X86_32
15653+
15654+#define STACK_WARN (THREAD_SIZE/8)
15655+/*
15656+ * macros/functions for gaining access to the thread information structure
15657+ *
15658+ * preempt_count needs to be 1 initially, until the scheduler is functional.
15659+ */
15660+#ifndef __ASSEMBLY__
15661+
15662+/* how to get the current stack pointer from C */
15663+register unsigned long current_stack_pointer asm("esp") __used;
15664
15665 #endif
15666
15667 #else /* X86_32 */
15668
15669-#include <asm/percpu.h>
15670-#define KERNEL_STACK_OFFSET (5*8)
15671-
15672 /*
15673 * macros/functions for gaining access to the thread information structure
15674 * preempt_count needs to be 1 initially, until the scheduler is functional.
15675@@ -204,27 +195,8 @@ static inline struct thread_info *current_thread_info(void)
15676 #ifndef __ASSEMBLY__
15677 DECLARE_PER_CPU(unsigned long, kernel_stack);
15678
15679-static inline struct thread_info *current_thread_info(void)
15680-{
15681- struct thread_info *ti;
15682- ti = (void *)(this_cpu_read_stable(kernel_stack) +
15683- KERNEL_STACK_OFFSET - THREAD_SIZE);
15684- return ti;
15685-}
15686-
15687-#else /* !__ASSEMBLY__ */
15688-
15689-/* how to get the thread information struct from ASM */
15690-#define GET_THREAD_INFO(reg) \
15691- movq PER_CPU_VAR(kernel_stack),reg ; \
15692- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
15693-
15694-/*
15695- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
15696- * a certain register (to be used in assembler memory operands).
15697- */
15698-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
15699-
15700+/* how to get the current stack pointer from C */
15701+register unsigned long current_stack_pointer asm("rsp") __used;
15702 #endif
15703
15704 #endif /* !X86_32 */
15705@@ -285,5 +257,12 @@ static inline bool is_ia32_task(void)
15706 extern void arch_task_cache_init(void);
15707 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
15708 extern void arch_release_task_struct(struct task_struct *tsk);
15709+
15710+#define __HAVE_THREAD_FUNCTIONS
15711+#define task_thread_info(task) (&(task)->tinfo)
15712+#define task_stack_page(task) ((task)->stack)
15713+#define setup_thread_stack(p, org) do {} while (0)
15714+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
15715+
15716 #endif
15717 #endif /* _ASM_X86_THREAD_INFO_H */
15718diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
15719index 5ee2687..70d5895 100644
15720--- a/arch/x86/include/asm/uaccess.h
15721+++ b/arch/x86/include/asm/uaccess.h
15722@@ -7,6 +7,7 @@
15723 #include <linux/compiler.h>
15724 #include <linux/thread_info.h>
15725 #include <linux/string.h>
15726+#include <linux/sched.h>
15727 #include <asm/asm.h>
15728 #include <asm/page.h>
15729 #include <asm/smap.h>
15730@@ -29,7 +30,12 @@
15731
15732 #define get_ds() (KERNEL_DS)
15733 #define get_fs() (current_thread_info()->addr_limit)
15734+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15735+void __set_fs(mm_segment_t x);
15736+void set_fs(mm_segment_t x);
15737+#else
15738 #define set_fs(x) (current_thread_info()->addr_limit = (x))
15739+#endif
15740
15741 #define segment_eq(a, b) ((a).seg == (b).seg)
15742
15743@@ -77,8 +83,33 @@
15744 * checks that the pointer is in the user space range - after calling
15745 * this function, memory access functions may still return -EFAULT.
15746 */
15747-#define access_ok(type, addr, size) \
15748- (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
15749+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
15750+#define access_ok(type, addr, size) \
15751+({ \
15752+ long __size = size; \
15753+ unsigned long __addr = (unsigned long)addr; \
15754+ unsigned long __addr_ao = __addr & PAGE_MASK; \
15755+ unsigned long __end_ao = __addr + __size - 1; \
15756+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
15757+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
15758+ while(__addr_ao <= __end_ao) { \
15759+ char __c_ao; \
15760+ __addr_ao += PAGE_SIZE; \
15761+ if (__size > PAGE_SIZE) \
15762+ cond_resched(); \
15763+ if (__get_user(__c_ao, (char __user *)__addr)) \
15764+ break; \
15765+ if (type != VERIFY_WRITE) { \
15766+ __addr = __addr_ao; \
15767+ continue; \
15768+ } \
15769+ if (__put_user(__c_ao, (char __user *)__addr)) \
15770+ break; \
15771+ __addr = __addr_ao; \
15772+ } \
15773+ } \
15774+ __ret_ao; \
15775+})
15776
15777 /*
15778 * The exception table consists of pairs of addresses relative to the
15779@@ -176,13 +207,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
15780 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
15781 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
15782
15783-
15784+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15785+#define __copyuser_seg "gs;"
15786+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
15787+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
15788+#else
15789+#define __copyuser_seg
15790+#define __COPYUSER_SET_ES
15791+#define __COPYUSER_RESTORE_ES
15792+#endif
15793
15794 #ifdef CONFIG_X86_32
15795 #define __put_user_asm_u64(x, addr, err, errret) \
15796 asm volatile(ASM_STAC "\n" \
15797- "1: movl %%eax,0(%2)\n" \
15798- "2: movl %%edx,4(%2)\n" \
15799+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
15800+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
15801 "3: " ASM_CLAC "\n" \
15802 ".section .fixup,\"ax\"\n" \
15803 "4: movl %3,%0\n" \
15804@@ -195,8 +234,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
15805
15806 #define __put_user_asm_ex_u64(x, addr) \
15807 asm volatile(ASM_STAC "\n" \
15808- "1: movl %%eax,0(%1)\n" \
15809- "2: movl %%edx,4(%1)\n" \
15810+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
15811+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
15812 "3: " ASM_CLAC "\n" \
15813 _ASM_EXTABLE_EX(1b, 2b) \
15814 _ASM_EXTABLE_EX(2b, 3b) \
15815@@ -246,7 +285,7 @@ extern void __put_user_8(void);
15816 __typeof__(*(ptr)) __pu_val; \
15817 __chk_user_ptr(ptr); \
15818 might_fault(); \
15819- __pu_val = x; \
15820+ __pu_val = (x); \
15821 switch (sizeof(*(ptr))) { \
15822 case 1: \
15823 __put_user_x(1, __pu_val, ptr, __ret_pu); \
15824@@ -345,7 +384,7 @@ do { \
15825
15826 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
15827 asm volatile(ASM_STAC "\n" \
15828- "1: mov"itype" %2,%"rtype"1\n" \
15829+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
15830 "2: " ASM_CLAC "\n" \
15831 ".section .fixup,\"ax\"\n" \
15832 "3: mov %3,%0\n" \
15833@@ -353,7 +392,7 @@ do { \
15834 " jmp 2b\n" \
15835 ".previous\n" \
15836 _ASM_EXTABLE(1b, 3b) \
15837- : "=r" (err), ltype(x) \
15838+ : "=r" (err), ltype (x) \
15839 : "m" (__m(addr)), "i" (errret), "0" (err))
15840
15841 #define __get_user_size_ex(x, ptr, size) \
15842@@ -378,7 +417,7 @@ do { \
15843 } while (0)
15844
15845 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
15846- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
15847+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
15848 "2:\n" \
15849 _ASM_EXTABLE_EX(1b, 2b) \
15850 : ltype(x) : "m" (__m(addr)))
15851@@ -395,13 +434,24 @@ do { \
15852 int __gu_err; \
15853 unsigned long __gu_val; \
15854 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
15855- (x) = (__force __typeof__(*(ptr)))__gu_val; \
15856+ (x) = (__typeof__(*(ptr)))__gu_val; \
15857 __gu_err; \
15858 })
15859
15860 /* FIXME: this hack is definitely wrong -AK */
15861 struct __large_struct { unsigned long buf[100]; };
15862-#define __m(x) (*(struct __large_struct __user *)(x))
15863+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15864+#define ____m(x) \
15865+({ \
15866+ unsigned long ____x = (unsigned long)(x); \
15867+ if (____x < pax_user_shadow_base) \
15868+ ____x += pax_user_shadow_base; \
15869+ (typeof(x))____x; \
15870+})
15871+#else
15872+#define ____m(x) (x)
15873+#endif
15874+#define __m(x) (*(struct __large_struct __user *)____m(x))
15875
15876 /*
15877 * Tell gcc we read from memory instead of writing: this is because
15878@@ -410,7 +460,7 @@ struct __large_struct { unsigned long buf[100]; };
15879 */
15880 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
15881 asm volatile(ASM_STAC "\n" \
15882- "1: mov"itype" %"rtype"1,%2\n" \
15883+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
15884 "2: " ASM_CLAC "\n" \
15885 ".section .fixup,\"ax\"\n" \
15886 "3: mov %3,%0\n" \
15887@@ -418,10 +468,10 @@ struct __large_struct { unsigned long buf[100]; };
15888 ".previous\n" \
15889 _ASM_EXTABLE(1b, 3b) \
15890 : "=r"(err) \
15891- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
15892+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
15893
15894 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
15895- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
15896+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
15897 "2:\n" \
15898 _ASM_EXTABLE_EX(1b, 2b) \
15899 : : ltype(x), "m" (__m(addr)))
15900@@ -460,8 +510,12 @@ struct __large_struct { unsigned long buf[100]; };
15901 * On error, the variable @x is set to zero.
15902 */
15903
15904+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15905+#define __get_user(x, ptr) get_user((x), (ptr))
15906+#else
15907 #define __get_user(x, ptr) \
15908 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
15909+#endif
15910
15911 /**
15912 * __put_user: - Write a simple value into user space, with less checking.
15913@@ -483,8 +537,12 @@ struct __large_struct { unsigned long buf[100]; };
15914 * Returns zero on success, or -EFAULT on error.
15915 */
15916
15917+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15918+#define __put_user(x, ptr) put_user((x), (ptr))
15919+#else
15920 #define __put_user(x, ptr) \
15921 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
15922+#endif
15923
15924 #define __get_user_unaligned __get_user
15925 #define __put_user_unaligned __put_user
15926@@ -502,7 +560,7 @@ struct __large_struct { unsigned long buf[100]; };
15927 #define get_user_ex(x, ptr) do { \
15928 unsigned long __gue_val; \
15929 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
15930- (x) = (__force __typeof__(*(ptr)))__gue_val; \
15931+ (x) = (__typeof__(*(ptr)))__gue_val; \
15932 } while (0)
15933
15934 #define put_user_try uaccess_try
15935@@ -519,8 +577,8 @@ strncpy_from_user(char *dst, const char __user *src, long count);
15936 extern __must_check long strlen_user(const char __user *str);
15937 extern __must_check long strnlen_user(const char __user *str, long n);
15938
15939-unsigned long __must_check clear_user(void __user *mem, unsigned long len);
15940-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
15941+unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
15942+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
15943
15944 /*
15945 * movsl can be slow when source and dest are not both 8-byte aligned
15946diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
15947index 7f760a9..04b1c65 100644
15948--- a/arch/x86/include/asm/uaccess_32.h
15949+++ b/arch/x86/include/asm/uaccess_32.h
15950@@ -11,15 +11,15 @@
15951 #include <asm/page.h>
15952
15953 unsigned long __must_check __copy_to_user_ll
15954- (void __user *to, const void *from, unsigned long n);
15955+ (void __user *to, const void *from, unsigned long n) __size_overflow(3);
15956 unsigned long __must_check __copy_from_user_ll
15957- (void *to, const void __user *from, unsigned long n);
15958+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
15959 unsigned long __must_check __copy_from_user_ll_nozero
15960- (void *to, const void __user *from, unsigned long n);
15961+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
15962 unsigned long __must_check __copy_from_user_ll_nocache
15963- (void *to, const void __user *from, unsigned long n);
15964+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
15965 unsigned long __must_check __copy_from_user_ll_nocache_nozero
15966- (void *to, const void __user *from, unsigned long n);
15967+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
15968
15969 /**
15970 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
15971@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
15972 static __always_inline unsigned long __must_check
15973 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
15974 {
15975+ if ((long)n < 0)
15976+ return n;
15977+
15978+ check_object_size(from, n, true);
15979+
15980 if (__builtin_constant_p(n)) {
15981 unsigned long ret;
15982
15983@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
15984 __copy_to_user(void __user *to, const void *from, unsigned long n)
15985 {
15986 might_fault();
15987+
15988 return __copy_to_user_inatomic(to, from, n);
15989 }
15990
15991 static __always_inline unsigned long
15992 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
15993 {
15994+ if ((long)n < 0)
15995+ return n;
15996+
15997 /* Avoid zeroing the tail if the copy fails..
15998 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
15999 * but as the zeroing behaviour is only significant when n is not
16000@@ -137,6 +146,12 @@ static __always_inline unsigned long
16001 __copy_from_user(void *to, const void __user *from, unsigned long n)
16002 {
16003 might_fault();
16004+
16005+ if ((long)n < 0)
16006+ return n;
16007+
16008+ check_object_size(to, n, false);
16009+
16010 if (__builtin_constant_p(n)) {
16011 unsigned long ret;
16012
16013@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
16014 const void __user *from, unsigned long n)
16015 {
16016 might_fault();
16017+
16018+ if ((long)n < 0)
16019+ return n;
16020+
16021 if (__builtin_constant_p(n)) {
16022 unsigned long ret;
16023
16024@@ -181,15 +200,19 @@ static __always_inline unsigned long
16025 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
16026 unsigned long n)
16027 {
16028- return __copy_from_user_ll_nocache_nozero(to, from, n);
16029+ if ((long)n < 0)
16030+ return n;
16031+
16032+ return __copy_from_user_ll_nocache_nozero(to, from, n);
16033 }
16034
16035-unsigned long __must_check copy_to_user(void __user *to,
16036- const void *from, unsigned long n);
16037-unsigned long __must_check _copy_from_user(void *to,
16038- const void __user *from,
16039- unsigned long n);
16040-
16041+extern void copy_to_user_overflow(void)
16042+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
16043+ __compiletime_error("copy_to_user() buffer size is not provably correct")
16044+#else
16045+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
16046+#endif
16047+;
16048
16049 extern void copy_from_user_overflow(void)
16050 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
16051@@ -199,17 +222,60 @@ extern void copy_from_user_overflow(void)
16052 #endif
16053 ;
16054
16055-static inline unsigned long __must_check copy_from_user(void *to,
16056- const void __user *from,
16057- unsigned long n)
16058+/**
16059+ * copy_to_user: - Copy a block of data into user space.
16060+ * @to: Destination address, in user space.
16061+ * @from: Source address, in kernel space.
16062+ * @n: Number of bytes to copy.
16063+ *
16064+ * Context: User context only. This function may sleep.
16065+ *
16066+ * Copy data from kernel space to user space.
16067+ *
16068+ * Returns number of bytes that could not be copied.
16069+ * On success, this will be zero.
16070+ */
16071+static inline unsigned long __must_check
16072+copy_to_user(void __user *to, const void *from, unsigned long n)
16073 {
16074- int sz = __compiletime_object_size(to);
16075+ size_t sz = __compiletime_object_size(from);
16076
16077- if (likely(sz == -1 || sz >= n))
16078- n = _copy_from_user(to, from, n);
16079- else
16080+ if (unlikely(sz != (size_t)-1 && sz < n))
16081+ copy_to_user_overflow();
16082+ else if (access_ok(VERIFY_WRITE, to, n))
16083+ n = __copy_to_user(to, from, n);
16084+ return n;
16085+}
16086+
16087+/**
16088+ * copy_from_user: - Copy a block of data from user space.
16089+ * @to: Destination address, in kernel space.
16090+ * @from: Source address, in user space.
16091+ * @n: Number of bytes to copy.
16092+ *
16093+ * Context: User context only. This function may sleep.
16094+ *
16095+ * Copy data from user space to kernel space.
16096+ *
16097+ * Returns number of bytes that could not be copied.
16098+ * On success, this will be zero.
16099+ *
16100+ * If some data could not be copied, this function will pad the copied
16101+ * data to the requested size using zero bytes.
16102+ */
16103+static inline unsigned long __must_check
16104+copy_from_user(void *to, const void __user *from, unsigned long n)
16105+{
16106+ size_t sz = __compiletime_object_size(to);
16107+
16108+ check_object_size(to, n, false);
16109+
16110+ if (unlikely(sz != (size_t)-1 && sz < n))
16111 copy_from_user_overflow();
16112-
16113+ else if (access_ok(VERIFY_READ, from, n))
16114+ n = __copy_from_user(to, from, n);
16115+ else if ((long)n > 0)
16116+ memset(to, 0, n);
16117 return n;
16118 }
16119
16120diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
16121index 142810c..1f2a0a7 100644
16122--- a/arch/x86/include/asm/uaccess_64.h
16123+++ b/arch/x86/include/asm/uaccess_64.h
16124@@ -10,6 +10,9 @@
16125 #include <asm/alternative.h>
16126 #include <asm/cpufeature.h>
16127 #include <asm/page.h>
16128+#include <asm/pgtable.h>
16129+
16130+#define set_fs(x) (current_thread_info()->addr_limit = (x))
16131
16132 /*
16133 * Copy To/From Userspace
16134@@ -17,13 +20,13 @@
16135
16136 /* Handles exceptions in both to and from, but doesn't do access_ok */
16137 __must_check unsigned long
16138-copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
16139+copy_user_enhanced_fast_string(void *to, const void *from, unsigned len) __size_overflow(3);
16140 __must_check unsigned long
16141-copy_user_generic_string(void *to, const void *from, unsigned len);
16142+copy_user_generic_string(void *to, const void *from, unsigned len) __size_overflow(3);
16143 __must_check unsigned long
16144-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
16145+copy_user_generic_unrolled(void *to, const void *from, unsigned len) __size_overflow(3);
16146
16147-static __always_inline __must_check unsigned long
16148+static __always_inline __must_check __size_overflow(3) unsigned long
16149 copy_user_generic(void *to, const void *from, unsigned len)
16150 {
16151 unsigned ret;
16152@@ -41,142 +44,204 @@ copy_user_generic(void *to, const void *from, unsigned len)
16153 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
16154 "=d" (len)),
16155 "1" (to), "2" (from), "3" (len)
16156- : "memory", "rcx", "r8", "r9", "r10", "r11");
16157+ : "memory", "rcx", "r8", "r9", "r11");
16158 return ret;
16159 }
16160
16161+static __always_inline __must_check unsigned long
16162+__copy_to_user(void __user *to, const void *from, unsigned long len);
16163+static __always_inline __must_check unsigned long
16164+__copy_from_user(void *to, const void __user *from, unsigned long len);
16165 __must_check unsigned long
16166-_copy_to_user(void __user *to, const void *from, unsigned len);
16167-__must_check unsigned long
16168-_copy_from_user(void *to, const void __user *from, unsigned len);
16169-__must_check unsigned long
16170-copy_in_user(void __user *to, const void __user *from, unsigned len);
16171+copy_in_user(void __user *to, const void __user *from, unsigned long len);
16172+
16173+extern void copy_to_user_overflow(void)
16174+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
16175+ __compiletime_error("copy_to_user() buffer size is not provably correct")
16176+#else
16177+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
16178+#endif
16179+;
16180+
16181+extern void copy_from_user_overflow(void)
16182+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
16183+ __compiletime_error("copy_from_user() buffer size is not provably correct")
16184+#else
16185+ __compiletime_warning("copy_from_user() buffer size is not provably correct")
16186+#endif
16187+;
16188
16189 static inline unsigned long __must_check copy_from_user(void *to,
16190 const void __user *from,
16191 unsigned long n)
16192 {
16193- int sz = __compiletime_object_size(to);
16194-
16195 might_fault();
16196- if (likely(sz == -1 || sz >= n))
16197- n = _copy_from_user(to, from, n);
16198-#ifdef CONFIG_DEBUG_VM
16199- else
16200- WARN(1, "Buffer overflow detected!\n");
16201-#endif
16202+
16203+ check_object_size(to, n, false);
16204+
16205+ if (access_ok(VERIFY_READ, from, n))
16206+ n = __copy_from_user(to, from, n);
16207+ else if (n < INT_MAX)
16208+ memset(to, 0, n);
16209 return n;
16210 }
16211
16212 static __always_inline __must_check
16213-int copy_to_user(void __user *dst, const void *src, unsigned size)
16214+int copy_to_user(void __user *dst, const void *src, unsigned long size)
16215 {
16216 might_fault();
16217
16218- return _copy_to_user(dst, src, size);
16219+ if (access_ok(VERIFY_WRITE, dst, size))
16220+ size = __copy_to_user(dst, src, size);
16221+ return size;
16222 }
16223
16224 static __always_inline __must_check
16225-int __copy_from_user(void *dst, const void __user *src, unsigned size)
16226+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
16227 {
16228- int ret = 0;
16229+ size_t sz = __compiletime_object_size(dst);
16230+ unsigned ret = 0;
16231
16232 might_fault();
16233+
16234+ if (size > INT_MAX)
16235+ return size;
16236+
16237+ check_object_size(dst, size, false);
16238+
16239+#ifdef CONFIG_PAX_MEMORY_UDEREF
16240+ if (!__access_ok(VERIFY_READ, src, size))
16241+ return size;
16242+#endif
16243+
16244+ if (unlikely(sz != (size_t)-1 && sz < size)) {
16245+ copy_from_user_overflow();
16246+ return size;
16247+ }
16248+
16249 if (!__builtin_constant_p(size))
16250- return copy_user_generic(dst, (__force void *)src, size);
16251+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
16252 switch (size) {
16253- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
16254+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
16255 ret, "b", "b", "=q", 1);
16256 return ret;
16257- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
16258+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
16259 ret, "w", "w", "=r", 2);
16260 return ret;
16261- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
16262+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
16263 ret, "l", "k", "=r", 4);
16264 return ret;
16265- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
16266+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
16267 ret, "q", "", "=r", 8);
16268 return ret;
16269 case 10:
16270- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
16271+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
16272 ret, "q", "", "=r", 10);
16273 if (unlikely(ret))
16274 return ret;
16275 __get_user_asm(*(u16 *)(8 + (char *)dst),
16276- (u16 __user *)(8 + (char __user *)src),
16277+ (const u16 __user *)(8 + (const char __user *)src),
16278 ret, "w", "w", "=r", 2);
16279 return ret;
16280 case 16:
16281- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
16282+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
16283 ret, "q", "", "=r", 16);
16284 if (unlikely(ret))
16285 return ret;
16286 __get_user_asm(*(u64 *)(8 + (char *)dst),
16287- (u64 __user *)(8 + (char __user *)src),
16288+ (const u64 __user *)(8 + (const char __user *)src),
16289 ret, "q", "", "=r", 8);
16290 return ret;
16291 default:
16292- return copy_user_generic(dst, (__force void *)src, size);
16293+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
16294 }
16295 }
16296
16297 static __always_inline __must_check
16298-int __copy_to_user(void __user *dst, const void *src, unsigned size)
16299+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
16300 {
16301- int ret = 0;
16302+ size_t sz = __compiletime_object_size(src);
16303+ unsigned ret = 0;
16304
16305 might_fault();
16306+
16307+ if (size > INT_MAX)
16308+ return size;
16309+
16310+ check_object_size(src, size, true);
16311+
16312+#ifdef CONFIG_PAX_MEMORY_UDEREF
16313+ if (!__access_ok(VERIFY_WRITE, dst, size))
16314+ return size;
16315+#endif
16316+
16317+ if (unlikely(sz != (size_t)-1 && sz < size)) {
16318+ copy_to_user_overflow();
16319+ return size;
16320+ }
16321+
16322 if (!__builtin_constant_p(size))
16323- return copy_user_generic((__force void *)dst, src, size);
16324+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
16325 switch (size) {
16326- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
16327+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
16328 ret, "b", "b", "iq", 1);
16329 return ret;
16330- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
16331+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
16332 ret, "w", "w", "ir", 2);
16333 return ret;
16334- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
16335+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
16336 ret, "l", "k", "ir", 4);
16337 return ret;
16338- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
16339+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
16340 ret, "q", "", "er", 8);
16341 return ret;
16342 case 10:
16343- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
16344+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
16345 ret, "q", "", "er", 10);
16346 if (unlikely(ret))
16347 return ret;
16348 asm("":::"memory");
16349- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
16350+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
16351 ret, "w", "w", "ir", 2);
16352 return ret;
16353 case 16:
16354- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
16355+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
16356 ret, "q", "", "er", 16);
16357 if (unlikely(ret))
16358 return ret;
16359 asm("":::"memory");
16360- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
16361+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
16362 ret, "q", "", "er", 8);
16363 return ret;
16364 default:
16365- return copy_user_generic((__force void *)dst, src, size);
16366+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
16367 }
16368 }
16369
16370 static __always_inline __must_check
16371-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
16372+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
16373 {
16374- int ret = 0;
16375+ unsigned ret = 0;
16376
16377 might_fault();
16378+
16379+ if (size > INT_MAX)
16380+ return size;
16381+
16382+#ifdef CONFIG_PAX_MEMORY_UDEREF
16383+ if (!__access_ok(VERIFY_READ, src, size))
16384+ return size;
16385+ if (!__access_ok(VERIFY_WRITE, dst, size))
16386+ return size;
16387+#endif
16388+
16389 if (!__builtin_constant_p(size))
16390- return copy_user_generic((__force void *)dst,
16391- (__force void *)src, size);
16392+ return copy_user_generic((__force_kernel void *)____m(dst),
16393+ (__force_kernel const void *)____m(src), size);
16394 switch (size) {
16395 case 1: {
16396 u8 tmp;
16397- __get_user_asm(tmp, (u8 __user *)src,
16398+ __get_user_asm(tmp, (const u8 __user *)src,
16399 ret, "b", "b", "=q", 1);
16400 if (likely(!ret))
16401 __put_user_asm(tmp, (u8 __user *)dst,
16402@@ -185,7 +250,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
16403 }
16404 case 2: {
16405 u16 tmp;
16406- __get_user_asm(tmp, (u16 __user *)src,
16407+ __get_user_asm(tmp, (const u16 __user *)src,
16408 ret, "w", "w", "=r", 2);
16409 if (likely(!ret))
16410 __put_user_asm(tmp, (u16 __user *)dst,
16411@@ -195,7 +260,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
16412
16413 case 4: {
16414 u32 tmp;
16415- __get_user_asm(tmp, (u32 __user *)src,
16416+ __get_user_asm(tmp, (const u32 __user *)src,
16417 ret, "l", "k", "=r", 4);
16418 if (likely(!ret))
16419 __put_user_asm(tmp, (u32 __user *)dst,
16420@@ -204,7 +269,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
16421 }
16422 case 8: {
16423 u64 tmp;
16424- __get_user_asm(tmp, (u64 __user *)src,
16425+ __get_user_asm(tmp, (const u64 __user *)src,
16426 ret, "q", "", "=r", 8);
16427 if (likely(!ret))
16428 __put_user_asm(tmp, (u64 __user *)dst,
16429@@ -212,41 +277,72 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
16430 return ret;
16431 }
16432 default:
16433- return copy_user_generic((__force void *)dst,
16434- (__force void *)src, size);
16435+ return copy_user_generic((__force_kernel void *)____m(dst),
16436+ (__force_kernel const void *)____m(src), size);
16437 }
16438 }
16439
16440 static __must_check __always_inline int
16441-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
16442+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
16443 {
16444- return copy_user_generic(dst, (__force const void *)src, size);
16445+ if (size > INT_MAX)
16446+ return size;
16447+
16448+#ifdef CONFIG_PAX_MEMORY_UDEREF
16449+ if (!__access_ok(VERIFY_READ, src, size))
16450+ return size;
16451+#endif
16452+
16453+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
16454 }
16455
16456-static __must_check __always_inline int
16457-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
16458+static __must_check __always_inline unsigned long
16459+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
16460 {
16461- return copy_user_generic((__force void *)dst, src, size);
16462+ if (size > INT_MAX)
16463+ return size;
16464+
16465+#ifdef CONFIG_PAX_MEMORY_UDEREF
16466+ if (!__access_ok(VERIFY_WRITE, dst, size))
16467+ return size;
16468+#endif
16469+
16470+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
16471 }
16472
16473-extern long __copy_user_nocache(void *dst, const void __user *src,
16474- unsigned size, int zerorest);
16475+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
16476+ unsigned long size, int zerorest) __size_overflow(3);
16477
16478-static inline int
16479-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
16480+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
16481 {
16482 might_sleep();
16483+
16484+ if (size > INT_MAX)
16485+ return size;
16486+
16487+#ifdef CONFIG_PAX_MEMORY_UDEREF
16488+ if (!__access_ok(VERIFY_READ, src, size))
16489+ return size;
16490+#endif
16491+
16492 return __copy_user_nocache(dst, src, size, 1);
16493 }
16494
16495-static inline int
16496-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
16497- unsigned size)
16498+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
16499+ unsigned long size)
16500 {
16501+ if (size > INT_MAX)
16502+ return size;
16503+
16504+#ifdef CONFIG_PAX_MEMORY_UDEREF
16505+ if (!__access_ok(VERIFY_READ, src, size))
16506+ return size;
16507+#endif
16508+
16509 return __copy_user_nocache(dst, src, size, 0);
16510 }
16511
16512-unsigned long
16513-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
16514+extern unsigned long
16515+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
16516
16517 #endif /* _ASM_X86_UACCESS_64_H */
16518diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
16519index 5b238981..77fdd78 100644
16520--- a/arch/x86/include/asm/word-at-a-time.h
16521+++ b/arch/x86/include/asm/word-at-a-time.h
16522@@ -11,7 +11,7 @@
16523 * and shift, for example.
16524 */
16525 struct word_at_a_time {
16526- const unsigned long one_bits, high_bits;
16527+ unsigned long one_bits, high_bits;
16528 };
16529
16530 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
16531diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
16532index d8d9922..bf6cecb 100644
16533--- a/arch/x86/include/asm/x86_init.h
16534+++ b/arch/x86/include/asm/x86_init.h
16535@@ -129,7 +129,7 @@ struct x86_init_ops {
16536 struct x86_init_timers timers;
16537 struct x86_init_iommu iommu;
16538 struct x86_init_pci pci;
16539-};
16540+} __no_const;
16541
16542 /**
16543 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
16544@@ -140,7 +140,7 @@ struct x86_cpuinit_ops {
16545 void (*setup_percpu_clockev)(void);
16546 void (*early_percpu_clock_init)(void);
16547 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
16548-};
16549+} __no_const;
16550
16551 /**
16552 * struct x86_platform_ops - platform specific runtime functions
16553@@ -166,7 +166,7 @@ struct x86_platform_ops {
16554 void (*save_sched_clock_state)(void);
16555 void (*restore_sched_clock_state)(void);
16556 void (*apic_post_init)(void);
16557-};
16558+} __no_const;
16559
16560 struct pci_dev;
16561 struct msi_msg;
16562@@ -180,7 +180,7 @@ struct x86_msi_ops {
16563 void (*teardown_msi_irqs)(struct pci_dev *dev);
16564 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
16565 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
16566-};
16567+} __no_const;
16568
16569 struct IO_APIC_route_entry;
16570 struct io_apic_irq_attr;
16571@@ -201,7 +201,7 @@ struct x86_io_apic_ops {
16572 unsigned int destination, int vector,
16573 struct io_apic_irq_attr *attr);
16574 void (*eoi_ioapic_pin)(int apic, int pin, int vector);
16575-};
16576+} __no_const;
16577
16578 extern struct x86_init_ops x86_init;
16579 extern struct x86_cpuinit_ops x86_cpuinit;
16580diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
16581index 0415cda..b43d877 100644
16582--- a/arch/x86/include/asm/xsave.h
16583+++ b/arch/x86/include/asm/xsave.h
16584@@ -71,7 +71,9 @@ static inline int xsave_user(struct xsave_struct __user *buf)
16585 return -EFAULT;
16586
16587 __asm__ __volatile__(ASM_STAC "\n"
16588- "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
16589+ "1:"
16590+ __copyuser_seg
16591+ ".byte " REX_PREFIX "0x0f,0xae,0x27\n"
16592 "2: " ASM_CLAC "\n"
16593 ".section .fixup,\"ax\"\n"
16594 "3: movl $-1,%[err]\n"
16595@@ -87,12 +89,14 @@ static inline int xsave_user(struct xsave_struct __user *buf)
16596 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
16597 {
16598 int err;
16599- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
16600+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
16601 u32 lmask = mask;
16602 u32 hmask = mask >> 32;
16603
16604 __asm__ __volatile__(ASM_STAC "\n"
16605- "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
16606+ "1:"
16607+ __copyuser_seg
16608+ ".byte " REX_PREFIX "0x0f,0xae,0x2f\n"
16609 "2: " ASM_CLAC "\n"
16610 ".section .fixup,\"ax\"\n"
16611 "3: movl $-1,%[err]\n"
16612diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
16613index bbae024..e1528f9 100644
16614--- a/arch/x86/include/uapi/asm/e820.h
16615+++ b/arch/x86/include/uapi/asm/e820.h
16616@@ -63,7 +63,7 @@ struct e820map {
16617 #define ISA_START_ADDRESS 0xa0000
16618 #define ISA_END_ADDRESS 0x100000
16619
16620-#define BIOS_BEGIN 0x000a0000
16621+#define BIOS_BEGIN 0x000c0000
16622 #define BIOS_END 0x00100000
16623
16624 #define BIOS_ROM_BASE 0xffe00000
16625diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
16626index 7bd3bd3..5dac791 100644
16627--- a/arch/x86/kernel/Makefile
16628+++ b/arch/x86/kernel/Makefile
16629@@ -22,7 +22,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
16630 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
16631 obj-$(CONFIG_IRQ_WORK) += irq_work.o
16632 obj-y += probe_roms.o
16633-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
16634+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
16635 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
16636 obj-y += syscall_$(BITS).o
16637 obj-$(CONFIG_X86_64) += vsyscall_64.o
16638diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
16639index 230c8ea..f915130 100644
16640--- a/arch/x86/kernel/acpi/boot.c
16641+++ b/arch/x86/kernel/acpi/boot.c
16642@@ -1361,7 +1361,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
16643 * If your system is blacklisted here, but you find that acpi=force
16644 * works for you, please contact linux-acpi@vger.kernel.org
16645 */
16646-static struct dmi_system_id __initdata acpi_dmi_table[] = {
16647+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
16648 /*
16649 * Boxes that need ACPI disabled
16650 */
16651@@ -1436,7 +1436,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
16652 };
16653
16654 /* second table for DMI checks that should run after early-quirks */
16655-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
16656+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
16657 /*
16658 * HP laptops which use a DSDT reporting as HP/SB400/10000,
16659 * which includes some code which overrides all temperature
16660diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
16661index 0532f5d..36afc0a 100644
16662--- a/arch/x86/kernel/acpi/sleep.c
16663+++ b/arch/x86/kernel/acpi/sleep.c
16664@@ -74,8 +74,12 @@ int acpi_suspend_lowlevel(void)
16665 #else /* CONFIG_64BIT */
16666 #ifdef CONFIG_SMP
16667 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
16668+
16669+ pax_open_kernel();
16670 early_gdt_descr.address =
16671 (unsigned long)get_cpu_gdt_table(smp_processor_id());
16672+ pax_close_kernel();
16673+
16674 initial_gs = per_cpu_offset(smp_processor_id());
16675 #endif
16676 initial_code = (unsigned long)wakeup_long64;
16677diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
16678index 13ab720..95d5442 100644
16679--- a/arch/x86/kernel/acpi/wakeup_32.S
16680+++ b/arch/x86/kernel/acpi/wakeup_32.S
16681@@ -30,13 +30,11 @@ wakeup_pmode_return:
16682 # and restore the stack ... but you need gdt for this to work
16683 movl saved_context_esp, %esp
16684
16685- movl %cs:saved_magic, %eax
16686- cmpl $0x12345678, %eax
16687+ cmpl $0x12345678, saved_magic
16688 jne bogus_magic
16689
16690 # jump to place where we left off
16691- movl saved_eip, %eax
16692- jmp *%eax
16693+ jmp *(saved_eip)
16694
16695 bogus_magic:
16696 jmp bogus_magic
16697diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
16698index ef5ccca..bd83949 100644
16699--- a/arch/x86/kernel/alternative.c
16700+++ b/arch/x86/kernel/alternative.c
16701@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
16702 */
16703 for (a = start; a < end; a++) {
16704 instr = (u8 *)&a->instr_offset + a->instr_offset;
16705+
16706+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16707+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16708+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
16709+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16710+#endif
16711+
16712 replacement = (u8 *)&a->repl_offset + a->repl_offset;
16713 BUG_ON(a->replacementlen > a->instrlen);
16714 BUG_ON(a->instrlen > sizeof(insnbuf));
16715@@ -299,10 +306,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
16716 for (poff = start; poff < end; poff++) {
16717 u8 *ptr = (u8 *)poff + *poff;
16718
16719+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16720+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16721+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
16722+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16723+#endif
16724+
16725 if (!*poff || ptr < text || ptr >= text_end)
16726 continue;
16727 /* turn DS segment override prefix into lock prefix */
16728- if (*ptr == 0x3e)
16729+ if (*ktla_ktva(ptr) == 0x3e)
16730 text_poke(ptr, ((unsigned char []){0xf0}), 1);
16731 }
16732 mutex_unlock(&text_mutex);
16733@@ -317,10 +330,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
16734 for (poff = start; poff < end; poff++) {
16735 u8 *ptr = (u8 *)poff + *poff;
16736
16737+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16738+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16739+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
16740+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16741+#endif
16742+
16743 if (!*poff || ptr < text || ptr >= text_end)
16744 continue;
16745 /* turn lock prefix into DS segment override prefix */
16746- if (*ptr == 0xf0)
16747+ if (*ktla_ktva(ptr) == 0xf0)
16748 text_poke(ptr, ((unsigned char []){0x3E}), 1);
16749 }
16750 mutex_unlock(&text_mutex);
16751@@ -468,7 +487,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
16752
16753 BUG_ON(p->len > MAX_PATCH_LEN);
16754 /* prep the buffer with the original instructions */
16755- memcpy(insnbuf, p->instr, p->len);
16756+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
16757 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
16758 (unsigned long)p->instr, p->len);
16759
16760@@ -515,7 +534,7 @@ void __init alternative_instructions(void)
16761 if (!uniproc_patched || num_possible_cpus() == 1)
16762 free_init_pages("SMP alternatives",
16763 (unsigned long)__smp_locks,
16764- (unsigned long)__smp_locks_end);
16765+ PAGE_ALIGN((unsigned long)__smp_locks_end));
16766 #endif
16767
16768 apply_paravirt(__parainstructions, __parainstructions_end);
16769@@ -535,13 +554,17 @@ void __init alternative_instructions(void)
16770 * instructions. And on the local CPU you need to be protected again NMI or MCE
16771 * handlers seeing an inconsistent instruction while you patch.
16772 */
16773-void *__init_or_module text_poke_early(void *addr, const void *opcode,
16774+void *__kprobes text_poke_early(void *addr, const void *opcode,
16775 size_t len)
16776 {
16777 unsigned long flags;
16778 local_irq_save(flags);
16779- memcpy(addr, opcode, len);
16780+
16781+ pax_open_kernel();
16782+ memcpy(ktla_ktva(addr), opcode, len);
16783 sync_core();
16784+ pax_close_kernel();
16785+
16786 local_irq_restore(flags);
16787 /* Could also do a CLFLUSH here to speed up CPU recovery; but
16788 that causes hangs on some VIA CPUs. */
16789@@ -563,36 +586,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
16790 */
16791 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
16792 {
16793- unsigned long flags;
16794- char *vaddr;
16795+ unsigned char *vaddr = ktla_ktva(addr);
16796 struct page *pages[2];
16797- int i;
16798+ size_t i;
16799
16800 if (!core_kernel_text((unsigned long)addr)) {
16801- pages[0] = vmalloc_to_page(addr);
16802- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
16803+ pages[0] = vmalloc_to_page(vaddr);
16804+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
16805 } else {
16806- pages[0] = virt_to_page(addr);
16807+ pages[0] = virt_to_page(vaddr);
16808 WARN_ON(!PageReserved(pages[0]));
16809- pages[1] = virt_to_page(addr + PAGE_SIZE);
16810+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
16811 }
16812 BUG_ON(!pages[0]);
16813- local_irq_save(flags);
16814- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
16815- if (pages[1])
16816- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
16817- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
16818- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
16819- clear_fixmap(FIX_TEXT_POKE0);
16820- if (pages[1])
16821- clear_fixmap(FIX_TEXT_POKE1);
16822- local_flush_tlb();
16823- sync_core();
16824- /* Could also do a CLFLUSH here to speed up CPU recovery; but
16825- that causes hangs on some VIA CPUs. */
16826+ text_poke_early(addr, opcode, len);
16827 for (i = 0; i < len; i++)
16828- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
16829- local_irq_restore(flags);
16830+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
16831 return addr;
16832 }
16833
16834diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
16835index 904611b..004dde6 100644
16836--- a/arch/x86/kernel/apic/apic.c
16837+++ b/arch/x86/kernel/apic/apic.c
16838@@ -189,7 +189,7 @@ int first_system_vector = 0xfe;
16839 /*
16840 * Debug level, exported for io_apic.c
16841 */
16842-unsigned int apic_verbosity;
16843+int apic_verbosity;
16844
16845 int pic_mode;
16846
16847@@ -1955,7 +1955,7 @@ void smp_error_interrupt(struct pt_regs *regs)
16848 apic_write(APIC_ESR, 0);
16849 v1 = apic_read(APIC_ESR);
16850 ack_APIC_irq();
16851- atomic_inc(&irq_err_count);
16852+ atomic_inc_unchecked(&irq_err_count);
16853
16854 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
16855 smp_processor_id(), v0 , v1);
16856diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
16857index 00c77cf..2dc6a2d 100644
16858--- a/arch/x86/kernel/apic/apic_flat_64.c
16859+++ b/arch/x86/kernel/apic/apic_flat_64.c
16860@@ -157,7 +157,7 @@ static int flat_probe(void)
16861 return 1;
16862 }
16863
16864-static struct apic apic_flat = {
16865+static struct apic apic_flat __read_only = {
16866 .name = "flat",
16867 .probe = flat_probe,
16868 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
16869@@ -271,7 +271,7 @@ static int physflat_probe(void)
16870 return 0;
16871 }
16872
16873-static struct apic apic_physflat = {
16874+static struct apic apic_physflat __read_only = {
16875
16876 .name = "physical flat",
16877 .probe = physflat_probe,
16878diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
16879index e145f28..2752888 100644
16880--- a/arch/x86/kernel/apic/apic_noop.c
16881+++ b/arch/x86/kernel/apic/apic_noop.c
16882@@ -119,7 +119,7 @@ static void noop_apic_write(u32 reg, u32 v)
16883 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
16884 }
16885
16886-struct apic apic_noop = {
16887+struct apic apic_noop __read_only = {
16888 .name = "noop",
16889 .probe = noop_probe,
16890 .acpi_madt_oem_check = NULL,
16891diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
16892index d50e364..543bee3 100644
16893--- a/arch/x86/kernel/apic/bigsmp_32.c
16894+++ b/arch/x86/kernel/apic/bigsmp_32.c
16895@@ -152,7 +152,7 @@ static int probe_bigsmp(void)
16896 return dmi_bigsmp;
16897 }
16898
16899-static struct apic apic_bigsmp = {
16900+static struct apic apic_bigsmp __read_only = {
16901
16902 .name = "bigsmp",
16903 .probe = probe_bigsmp,
16904diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
16905index 0874799..a7a7892 100644
16906--- a/arch/x86/kernel/apic/es7000_32.c
16907+++ b/arch/x86/kernel/apic/es7000_32.c
16908@@ -608,8 +608,7 @@ static int es7000_mps_oem_check_cluster(struct mpc_table *mpc, char *oem,
16909 return ret && es7000_apic_is_cluster();
16910 }
16911
16912-/* We've been warned by a false positive warning.Use __refdata to keep calm. */
16913-static struct apic __refdata apic_es7000_cluster = {
16914+static struct apic apic_es7000_cluster __read_only = {
16915
16916 .name = "es7000",
16917 .probe = probe_es7000,
16918@@ -675,7 +674,7 @@ static struct apic __refdata apic_es7000_cluster = {
16919 .x86_32_early_logical_apicid = es7000_early_logical_apicid,
16920 };
16921
16922-static struct apic __refdata apic_es7000 = {
16923+static struct apic apic_es7000 __read_only = {
16924
16925 .name = "es7000",
16926 .probe = probe_es7000,
16927diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
16928index 9ed796c..e930fe4 100644
16929--- a/arch/x86/kernel/apic/io_apic.c
16930+++ b/arch/x86/kernel/apic/io_apic.c
16931@@ -1060,7 +1060,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
16932 }
16933 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
16934
16935-void lock_vector_lock(void)
16936+void lock_vector_lock(void) __acquires(vector_lock)
16937 {
16938 /* Used to the online set of cpus does not change
16939 * during assign_irq_vector.
16940@@ -1068,7 +1068,7 @@ void lock_vector_lock(void)
16941 raw_spin_lock(&vector_lock);
16942 }
16943
16944-void unlock_vector_lock(void)
16945+void unlock_vector_lock(void) __releases(vector_lock)
16946 {
16947 raw_spin_unlock(&vector_lock);
16948 }
16949@@ -2362,7 +2362,7 @@ static void ack_apic_edge(struct irq_data *data)
16950 ack_APIC_irq();
16951 }
16952
16953-atomic_t irq_mis_count;
16954+atomic_unchecked_t irq_mis_count;
16955
16956 #ifdef CONFIG_GENERIC_PENDING_IRQ
16957 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
16958@@ -2503,7 +2503,7 @@ static void ack_apic_level(struct irq_data *data)
16959 * at the cpu.
16960 */
16961 if (!(v & (1 << (i & 0x1f)))) {
16962- atomic_inc(&irq_mis_count);
16963+ atomic_inc_unchecked(&irq_mis_count);
16964
16965 eoi_ioapic_irq(irq, cfg);
16966 }
16967diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
16968index d661ee9..791fd33 100644
16969--- a/arch/x86/kernel/apic/numaq_32.c
16970+++ b/arch/x86/kernel/apic/numaq_32.c
16971@@ -455,8 +455,7 @@ static void numaq_setup_portio_remap(void)
16972 (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
16973 }
16974
16975-/* Use __refdata to keep false positive warning calm. */
16976-static struct apic __refdata apic_numaq = {
16977+static struct apic apic_numaq __read_only = {
16978
16979 .name = "NUMAQ",
16980 .probe = probe_numaq,
16981diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
16982index eb35ef9..f184a21 100644
16983--- a/arch/x86/kernel/apic/probe_32.c
16984+++ b/arch/x86/kernel/apic/probe_32.c
16985@@ -72,7 +72,7 @@ static int probe_default(void)
16986 return 1;
16987 }
16988
16989-static struct apic apic_default = {
16990+static struct apic apic_default __read_only = {
16991
16992 .name = "default",
16993 .probe = probe_default,
16994diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
16995index 77c95c0..434f8a4 100644
16996--- a/arch/x86/kernel/apic/summit_32.c
16997+++ b/arch/x86/kernel/apic/summit_32.c
16998@@ -486,7 +486,7 @@ void setup_summit(void)
16999 }
17000 #endif
17001
17002-static struct apic apic_summit = {
17003+static struct apic apic_summit __read_only = {
17004
17005 .name = "summit",
17006 .probe = probe_summit,
17007diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
17008index c88baa4..757aee1 100644
17009--- a/arch/x86/kernel/apic/x2apic_cluster.c
17010+++ b/arch/x86/kernel/apic/x2apic_cluster.c
17011@@ -183,7 +183,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
17012 return notifier_from_errno(err);
17013 }
17014
17015-static struct notifier_block __refdata x2apic_cpu_notifier = {
17016+static struct notifier_block x2apic_cpu_notifier = {
17017 .notifier_call = update_clusterinfo,
17018 };
17019
17020@@ -235,7 +235,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
17021 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
17022 }
17023
17024-static struct apic apic_x2apic_cluster = {
17025+static struct apic apic_x2apic_cluster __read_only = {
17026
17027 .name = "cluster x2apic",
17028 .probe = x2apic_cluster_probe,
17029diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
17030index 562a76d..a003c0f 100644
17031--- a/arch/x86/kernel/apic/x2apic_phys.c
17032+++ b/arch/x86/kernel/apic/x2apic_phys.c
17033@@ -89,7 +89,7 @@ static int x2apic_phys_probe(void)
17034 return apic == &apic_x2apic_phys;
17035 }
17036
17037-static struct apic apic_x2apic_phys = {
17038+static struct apic apic_x2apic_phys __read_only = {
17039
17040 .name = "physical x2apic",
17041 .probe = x2apic_phys_probe,
17042diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
17043index 794f6eb..67e1db2 100644
17044--- a/arch/x86/kernel/apic/x2apic_uv_x.c
17045+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
17046@@ -342,7 +342,7 @@ static int uv_probe(void)
17047 return apic == &apic_x2apic_uv_x;
17048 }
17049
17050-static struct apic __refdata apic_x2apic_uv_x = {
17051+static struct apic apic_x2apic_uv_x __read_only = {
17052
17053 .name = "UV large system",
17054 .probe = uv_probe,
17055diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
17056index 66b5faf..3442423 100644
17057--- a/arch/x86/kernel/apm_32.c
17058+++ b/arch/x86/kernel/apm_32.c
17059@@ -434,7 +434,7 @@ static DEFINE_MUTEX(apm_mutex);
17060 * This is for buggy BIOS's that refer to (real mode) segment 0x40
17061 * even though they are called in protected mode.
17062 */
17063-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
17064+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
17065 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
17066
17067 static const char driver_version[] = "1.16ac"; /* no spaces */
17068@@ -612,7 +612,10 @@ static long __apm_bios_call(void *_call)
17069 BUG_ON(cpu != 0);
17070 gdt = get_cpu_gdt_table(cpu);
17071 save_desc_40 = gdt[0x40 / 8];
17072+
17073+ pax_open_kernel();
17074 gdt[0x40 / 8] = bad_bios_desc;
17075+ pax_close_kernel();
17076
17077 apm_irq_save(flags);
17078 APM_DO_SAVE_SEGS;
17079@@ -621,7 +624,11 @@ static long __apm_bios_call(void *_call)
17080 &call->esi);
17081 APM_DO_RESTORE_SEGS;
17082 apm_irq_restore(flags);
17083+
17084+ pax_open_kernel();
17085 gdt[0x40 / 8] = save_desc_40;
17086+ pax_close_kernel();
17087+
17088 put_cpu();
17089
17090 return call->eax & 0xff;
17091@@ -688,7 +695,10 @@ static long __apm_bios_call_simple(void *_call)
17092 BUG_ON(cpu != 0);
17093 gdt = get_cpu_gdt_table(cpu);
17094 save_desc_40 = gdt[0x40 / 8];
17095+
17096+ pax_open_kernel();
17097 gdt[0x40 / 8] = bad_bios_desc;
17098+ pax_close_kernel();
17099
17100 apm_irq_save(flags);
17101 APM_DO_SAVE_SEGS;
17102@@ -696,7 +706,11 @@ static long __apm_bios_call_simple(void *_call)
17103 &call->eax);
17104 APM_DO_RESTORE_SEGS;
17105 apm_irq_restore(flags);
17106+
17107+ pax_open_kernel();
17108 gdt[0x40 / 8] = save_desc_40;
17109+ pax_close_kernel();
17110+
17111 put_cpu();
17112 return error;
17113 }
17114@@ -2363,12 +2377,15 @@ static int __init apm_init(void)
17115 * code to that CPU.
17116 */
17117 gdt = get_cpu_gdt_table(0);
17118+
17119+ pax_open_kernel();
17120 set_desc_base(&gdt[APM_CS >> 3],
17121 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
17122 set_desc_base(&gdt[APM_CS_16 >> 3],
17123 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
17124 set_desc_base(&gdt[APM_DS >> 3],
17125 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
17126+ pax_close_kernel();
17127
17128 proc_create("apm", 0, NULL, &apm_file_ops);
17129
17130diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
17131index 2861082..6d4718e 100644
17132--- a/arch/x86/kernel/asm-offsets.c
17133+++ b/arch/x86/kernel/asm-offsets.c
17134@@ -33,6 +33,8 @@ void common(void) {
17135 OFFSET(TI_status, thread_info, status);
17136 OFFSET(TI_addr_limit, thread_info, addr_limit);
17137 OFFSET(TI_preempt_count, thread_info, preempt_count);
17138+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
17139+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
17140
17141 BLANK();
17142 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
17143@@ -53,8 +55,26 @@ void common(void) {
17144 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
17145 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
17146 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
17147+
17148+#ifdef CONFIG_PAX_KERNEXEC
17149+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
17150 #endif
17151
17152+#ifdef CONFIG_PAX_MEMORY_UDEREF
17153+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
17154+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
17155+#ifdef CONFIG_X86_64
17156+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
17157+#endif
17158+#endif
17159+
17160+#endif
17161+
17162+ BLANK();
17163+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
17164+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
17165+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
17166+
17167 #ifdef CONFIG_XEN
17168 BLANK();
17169 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
17170diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
17171index 1b4754f..fbb4227 100644
17172--- a/arch/x86/kernel/asm-offsets_64.c
17173+++ b/arch/x86/kernel/asm-offsets_64.c
17174@@ -76,6 +76,7 @@ int main(void)
17175 BLANK();
17176 #undef ENTRY
17177
17178+ DEFINE(TSS_size, sizeof(struct tss_struct));
17179 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
17180 BLANK();
17181
17182diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
17183index a0e067d..9c7db16 100644
17184--- a/arch/x86/kernel/cpu/Makefile
17185+++ b/arch/x86/kernel/cpu/Makefile
17186@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
17187 CFLAGS_REMOVE_perf_event.o = -pg
17188 endif
17189
17190-# Make sure load_percpu_segment has no stackprotector
17191-nostackp := $(call cc-option, -fno-stack-protector)
17192-CFLAGS_common.o := $(nostackp)
17193-
17194 obj-y := intel_cacheinfo.o scattered.o topology.o
17195 obj-y += proc.o capflags.o powerflags.o common.o
17196 obj-y += vmware.o hypervisor.o mshyperv.o
17197diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
17198index fa96eb0..03efe73 100644
17199--- a/arch/x86/kernel/cpu/amd.c
17200+++ b/arch/x86/kernel/cpu/amd.c
17201@@ -737,7 +737,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
17202 unsigned int size)
17203 {
17204 /* AMD errata T13 (order #21922) */
17205- if ((c->x86 == 6)) {
17206+ if (c->x86 == 6) {
17207 /* Duron Rev A0 */
17208 if (c->x86_model == 3 && c->x86_mask == 0)
17209 size = 64;
17210diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
17211index d814772..c615653 100644
17212--- a/arch/x86/kernel/cpu/common.c
17213+++ b/arch/x86/kernel/cpu/common.c
17214@@ -88,60 +88,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
17215
17216 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
17217
17218-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
17219-#ifdef CONFIG_X86_64
17220- /*
17221- * We need valid kernel segments for data and code in long mode too
17222- * IRET will check the segment types kkeil 2000/10/28
17223- * Also sysret mandates a special GDT layout
17224- *
17225- * TLS descriptors are currently at a different place compared to i386.
17226- * Hopefully nobody expects them at a fixed place (Wine?)
17227- */
17228- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
17229- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
17230- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
17231- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
17232- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
17233- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
17234-#else
17235- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
17236- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
17237- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
17238- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
17239- /*
17240- * Segments used for calling PnP BIOS have byte granularity.
17241- * They code segments and data segments have fixed 64k limits,
17242- * the transfer segment sizes are set at run time.
17243- */
17244- /* 32-bit code */
17245- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
17246- /* 16-bit code */
17247- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
17248- /* 16-bit data */
17249- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
17250- /* 16-bit data */
17251- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
17252- /* 16-bit data */
17253- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
17254- /*
17255- * The APM segments have byte granularity and their bases
17256- * are set at run time. All have 64k limits.
17257- */
17258- /* 32-bit code */
17259- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
17260- /* 16-bit code */
17261- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
17262- /* data */
17263- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
17264-
17265- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
17266- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
17267- GDT_STACK_CANARY_INIT
17268-#endif
17269-} };
17270-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
17271-
17272 static int __init x86_xsave_setup(char *s)
17273 {
17274 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
17275@@ -386,7 +332,7 @@ void switch_to_new_gdt(int cpu)
17276 {
17277 struct desc_ptr gdt_descr;
17278
17279- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
17280+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
17281 gdt_descr.size = GDT_SIZE - 1;
17282 load_gdt(&gdt_descr);
17283 /* Reload the per-cpu base */
17284@@ -882,6 +828,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
17285 /* Filter out anything that depends on CPUID levels we don't have */
17286 filter_cpuid_features(c, true);
17287
17288+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
17289+ setup_clear_cpu_cap(X86_FEATURE_SEP);
17290+#endif
17291+
17292 /* If the model name is still unset, do table lookup. */
17293 if (!c->x86_model_id[0]) {
17294 const char *p;
17295@@ -1065,10 +1015,12 @@ static __init int setup_disablecpuid(char *arg)
17296 }
17297 __setup("clearcpuid=", setup_disablecpuid);
17298
17299+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
17300+EXPORT_PER_CPU_SYMBOL(current_tinfo);
17301+
17302 #ifdef CONFIG_X86_64
17303 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
17304-struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
17305- (unsigned long) nmi_idt_table };
17306+struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
17307
17308 DEFINE_PER_CPU_FIRST(union irq_stack_union,
17309 irq_stack_union) __aligned(PAGE_SIZE);
17310@@ -1082,7 +1034,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
17311 EXPORT_PER_CPU_SYMBOL(current_task);
17312
17313 DEFINE_PER_CPU(unsigned long, kernel_stack) =
17314- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
17315+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
17316 EXPORT_PER_CPU_SYMBOL(kernel_stack);
17317
17318 DEFINE_PER_CPU(char *, irq_stack_ptr) =
17319@@ -1227,7 +1179,7 @@ void __cpuinit cpu_init(void)
17320 load_ucode_ap();
17321
17322 cpu = stack_smp_processor_id();
17323- t = &per_cpu(init_tss, cpu);
17324+ t = init_tss + cpu;
17325 oist = &per_cpu(orig_ist, cpu);
17326
17327 #ifdef CONFIG_NUMA
17328@@ -1253,7 +1205,7 @@ void __cpuinit cpu_init(void)
17329 switch_to_new_gdt(cpu);
17330 loadsegment(fs, 0);
17331
17332- load_idt((const struct desc_ptr *)&idt_descr);
17333+ load_idt(&idt_descr);
17334
17335 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
17336 syscall_init();
17337@@ -1262,7 +1214,6 @@ void __cpuinit cpu_init(void)
17338 wrmsrl(MSR_KERNEL_GS_BASE, 0);
17339 barrier();
17340
17341- x86_configure_nx();
17342 enable_x2apic();
17343
17344 /*
17345@@ -1314,7 +1265,7 @@ void __cpuinit cpu_init(void)
17346 {
17347 int cpu = smp_processor_id();
17348 struct task_struct *curr = current;
17349- struct tss_struct *t = &per_cpu(init_tss, cpu);
17350+ struct tss_struct *t = init_tss + cpu;
17351 struct thread_struct *thread = &curr->thread;
17352
17353 show_ucode_info_early();
17354diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
17355index 1905ce9..a7ac587 100644
17356--- a/arch/x86/kernel/cpu/intel.c
17357+++ b/arch/x86/kernel/cpu/intel.c
17358@@ -173,7 +173,7 @@ static void __cpuinit trap_init_f00f_bug(void)
17359 * Update the IDT descriptor and reload the IDT so that
17360 * it uses the read-only mapped virtual address.
17361 */
17362- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
17363+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
17364 load_idt(&idt_descr);
17365 }
17366 #endif
17367diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
17368index 7c6f7d5..8cac382 100644
17369--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
17370+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
17371@@ -1017,6 +1017,22 @@ static struct attribute *default_attrs[] = {
17372 };
17373
17374 #ifdef CONFIG_AMD_NB
17375+static struct attribute *default_attrs_amd_nb[] = {
17376+ &type.attr,
17377+ &level.attr,
17378+ &coherency_line_size.attr,
17379+ &physical_line_partition.attr,
17380+ &ways_of_associativity.attr,
17381+ &number_of_sets.attr,
17382+ &size.attr,
17383+ &shared_cpu_map.attr,
17384+ &shared_cpu_list.attr,
17385+ NULL,
17386+ NULL,
17387+ NULL,
17388+ NULL
17389+};
17390+
17391 static struct attribute ** __cpuinit amd_l3_attrs(void)
17392 {
17393 static struct attribute **attrs;
17394@@ -1027,18 +1043,7 @@ static struct attribute ** __cpuinit amd_l3_attrs(void)
17395
17396 n = ARRAY_SIZE(default_attrs);
17397
17398- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
17399- n += 2;
17400-
17401- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
17402- n += 1;
17403-
17404- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
17405- if (attrs == NULL)
17406- return attrs = default_attrs;
17407-
17408- for (n = 0; default_attrs[n]; n++)
17409- attrs[n] = default_attrs[n];
17410+ attrs = default_attrs_amd_nb;
17411
17412 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
17413 attrs[n++] = &cache_disable_0.attr;
17414@@ -1089,6 +1094,13 @@ static struct kobj_type ktype_cache = {
17415 .default_attrs = default_attrs,
17416 };
17417
17418+#ifdef CONFIG_AMD_NB
17419+static struct kobj_type ktype_cache_amd_nb = {
17420+ .sysfs_ops = &sysfs_ops,
17421+ .default_attrs = default_attrs_amd_nb,
17422+};
17423+#endif
17424+
17425 static struct kobj_type ktype_percpu_entry = {
17426 .sysfs_ops = &sysfs_ops,
17427 };
17428@@ -1154,20 +1166,26 @@ static int __cpuinit cache_add_dev(struct device *dev)
17429 return retval;
17430 }
17431
17432+#ifdef CONFIG_AMD_NB
17433+ amd_l3_attrs();
17434+#endif
17435+
17436 for (i = 0; i < num_cache_leaves; i++) {
17437+ struct kobj_type *ktype;
17438+
17439 this_object = INDEX_KOBJECT_PTR(cpu, i);
17440 this_object->cpu = cpu;
17441 this_object->index = i;
17442
17443 this_leaf = CPUID4_INFO_IDX(cpu, i);
17444
17445- ktype_cache.default_attrs = default_attrs;
17446+ ktype = &ktype_cache;
17447 #ifdef CONFIG_AMD_NB
17448 if (this_leaf->base.nb)
17449- ktype_cache.default_attrs = amd_l3_attrs();
17450+ ktype = &ktype_cache_amd_nb;
17451 #endif
17452 retval = kobject_init_and_add(&(this_object->kobj),
17453- &ktype_cache,
17454+ ktype,
17455 per_cpu(ici_cache_kobject, cpu),
17456 "index%1lu", i);
17457 if (unlikely(retval)) {
17458@@ -1222,7 +1240,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
17459 return NOTIFY_OK;
17460 }
17461
17462-static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
17463+static struct notifier_block cacheinfo_cpu_notifier = {
17464 .notifier_call = cacheinfo_cpu_callback,
17465 };
17466
17467diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
17468index 7bc1263..bff5686 100644
17469--- a/arch/x86/kernel/cpu/mcheck/mce.c
17470+++ b/arch/x86/kernel/cpu/mcheck/mce.c
17471@@ -45,6 +45,7 @@
17472 #include <asm/processor.h>
17473 #include <asm/mce.h>
17474 #include <asm/msr.h>
17475+#include <asm/local.h>
17476
17477 #include "mce-internal.h"
17478
17479@@ -246,7 +247,7 @@ static void print_mce(struct mce *m)
17480 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
17481 m->cs, m->ip);
17482
17483- if (m->cs == __KERNEL_CS)
17484+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
17485 print_symbol("{%s}", m->ip);
17486 pr_cont("\n");
17487 }
17488@@ -279,10 +280,10 @@ static void print_mce(struct mce *m)
17489
17490 #define PANIC_TIMEOUT 5 /* 5 seconds */
17491
17492-static atomic_t mce_paniced;
17493+static atomic_unchecked_t mce_paniced;
17494
17495 static int fake_panic;
17496-static atomic_t mce_fake_paniced;
17497+static atomic_unchecked_t mce_fake_paniced;
17498
17499 /* Panic in progress. Enable interrupts and wait for final IPI */
17500 static void wait_for_panic(void)
17501@@ -306,7 +307,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
17502 /*
17503 * Make sure only one CPU runs in machine check panic
17504 */
17505- if (atomic_inc_return(&mce_paniced) > 1)
17506+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
17507 wait_for_panic();
17508 barrier();
17509
17510@@ -314,7 +315,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
17511 console_verbose();
17512 } else {
17513 /* Don't log too much for fake panic */
17514- if (atomic_inc_return(&mce_fake_paniced) > 1)
17515+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
17516 return;
17517 }
17518 /* First print corrected ones that are still unlogged */
17519@@ -353,7 +354,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
17520 if (!fake_panic) {
17521 if (panic_timeout == 0)
17522 panic_timeout = mca_cfg.panic_timeout;
17523- panic(msg);
17524+ panic("%s", msg);
17525 } else
17526 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
17527 }
17528@@ -683,7 +684,7 @@ static int mce_timed_out(u64 *t)
17529 * might have been modified by someone else.
17530 */
17531 rmb();
17532- if (atomic_read(&mce_paniced))
17533+ if (atomic_read_unchecked(&mce_paniced))
17534 wait_for_panic();
17535 if (!mca_cfg.monarch_timeout)
17536 goto out;
17537@@ -1654,7 +1655,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
17538 }
17539
17540 /* Call the installed machine check handler for this CPU setup. */
17541-void (*machine_check_vector)(struct pt_regs *, long error_code) =
17542+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
17543 unexpected_machine_check;
17544
17545 /*
17546@@ -1677,7 +1678,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
17547 return;
17548 }
17549
17550+ pax_open_kernel();
17551 machine_check_vector = do_machine_check;
17552+ pax_close_kernel();
17553
17554 __mcheck_cpu_init_generic();
17555 __mcheck_cpu_init_vendor(c);
17556@@ -1691,7 +1694,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
17557 */
17558
17559 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
17560-static int mce_chrdev_open_count; /* #times opened */
17561+static local_t mce_chrdev_open_count; /* #times opened */
17562 static int mce_chrdev_open_exclu; /* already open exclusive? */
17563
17564 static int mce_chrdev_open(struct inode *inode, struct file *file)
17565@@ -1699,7 +1702,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
17566 spin_lock(&mce_chrdev_state_lock);
17567
17568 if (mce_chrdev_open_exclu ||
17569- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
17570+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
17571 spin_unlock(&mce_chrdev_state_lock);
17572
17573 return -EBUSY;
17574@@ -1707,7 +1710,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
17575
17576 if (file->f_flags & O_EXCL)
17577 mce_chrdev_open_exclu = 1;
17578- mce_chrdev_open_count++;
17579+ local_inc(&mce_chrdev_open_count);
17580
17581 spin_unlock(&mce_chrdev_state_lock);
17582
17583@@ -1718,7 +1721,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
17584 {
17585 spin_lock(&mce_chrdev_state_lock);
17586
17587- mce_chrdev_open_count--;
17588+ local_dec(&mce_chrdev_open_count);
17589 mce_chrdev_open_exclu = 0;
17590
17591 spin_unlock(&mce_chrdev_state_lock);
17592@@ -2364,7 +2367,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
17593 return NOTIFY_OK;
17594 }
17595
17596-static struct notifier_block mce_cpu_notifier __cpuinitdata = {
17597+static struct notifier_block mce_cpu_notifier = {
17598 .notifier_call = mce_cpu_callback,
17599 };
17600
17601@@ -2374,7 +2377,7 @@ static __init void mce_init_banks(void)
17602
17603 for (i = 0; i < mca_cfg.banks; i++) {
17604 struct mce_bank *b = &mce_banks[i];
17605- struct device_attribute *a = &b->attr;
17606+ device_attribute_no_const *a = &b->attr;
17607
17608 sysfs_attr_init(&a->attr);
17609 a->attr.name = b->attrname;
17610@@ -2442,7 +2445,7 @@ struct dentry *mce_get_debugfs_dir(void)
17611 static void mce_reset(void)
17612 {
17613 cpu_missing = 0;
17614- atomic_set(&mce_fake_paniced, 0);
17615+ atomic_set_unchecked(&mce_fake_paniced, 0);
17616 atomic_set(&mce_executing, 0);
17617 atomic_set(&mce_callin, 0);
17618 atomic_set(&global_nwo, 0);
17619diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
17620index 1c044b1..37a2a43 100644
17621--- a/arch/x86/kernel/cpu/mcheck/p5.c
17622+++ b/arch/x86/kernel/cpu/mcheck/p5.c
17623@@ -11,6 +11,7 @@
17624 #include <asm/processor.h>
17625 #include <asm/mce.h>
17626 #include <asm/msr.h>
17627+#include <asm/pgtable.h>
17628
17629 /* By default disabled */
17630 int mce_p5_enabled __read_mostly;
17631@@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
17632 if (!cpu_has(c, X86_FEATURE_MCE))
17633 return;
17634
17635+ pax_open_kernel();
17636 machine_check_vector = pentium_machine_check;
17637+ pax_close_kernel();
17638 /* Make sure the vector pointer is visible before we enable MCEs: */
17639 wmb();
17640
17641diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
17642index 47a1870..8c019a7 100644
17643--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
17644+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
17645@@ -288,7 +288,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
17646 return notifier_from_errno(err);
17647 }
17648
17649-static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata =
17650+static struct notifier_block thermal_throttle_cpu_notifier =
17651 {
17652 .notifier_call = thermal_throttle_cpu_callback,
17653 };
17654diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
17655index e9a701a..35317d6 100644
17656--- a/arch/x86/kernel/cpu/mcheck/winchip.c
17657+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
17658@@ -10,6 +10,7 @@
17659 #include <asm/processor.h>
17660 #include <asm/mce.h>
17661 #include <asm/msr.h>
17662+#include <asm/pgtable.h>
17663
17664 /* Machine check handler for WinChip C6: */
17665 static void winchip_machine_check(struct pt_regs *regs, long error_code)
17666@@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
17667 {
17668 u32 lo, hi;
17669
17670+ pax_open_kernel();
17671 machine_check_vector = winchip_machine_check;
17672+ pax_close_kernel();
17673 /* Make sure the vector pointer is visible before we enable MCEs: */
17674 wmb();
17675
17676diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
17677index 726bf96..81f0526 100644
17678--- a/arch/x86/kernel/cpu/mtrr/main.c
17679+++ b/arch/x86/kernel/cpu/mtrr/main.c
17680@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
17681 u64 size_or_mask, size_and_mask;
17682 static bool mtrr_aps_delayed_init;
17683
17684-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
17685+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
17686
17687 const struct mtrr_ops *mtrr_if;
17688
17689diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
17690index df5e41f..816c719 100644
17691--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
17692+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
17693@@ -25,7 +25,7 @@ struct mtrr_ops {
17694 int (*validate_add_page)(unsigned long base, unsigned long size,
17695 unsigned int type);
17696 int (*have_wrcomb)(void);
17697-};
17698+} __do_const;
17699
17700 extern int generic_get_free_region(unsigned long base, unsigned long size,
17701 int replace_reg);
17702diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
17703index bf0f01a..9adfee1 100644
17704--- a/arch/x86/kernel/cpu/perf_event.c
17705+++ b/arch/x86/kernel/cpu/perf_event.c
17706@@ -1305,7 +1305,7 @@ static void __init pmu_check_apic(void)
17707 pr_info("no hardware sampling interrupt available.\n");
17708 }
17709
17710-static struct attribute_group x86_pmu_format_group = {
17711+static attribute_group_no_const x86_pmu_format_group = {
17712 .name = "format",
17713 .attrs = NULL,
17714 };
17715@@ -1374,7 +1374,7 @@ static struct attribute *events_attr[] = {
17716 NULL,
17717 };
17718
17719-static struct attribute_group x86_pmu_events_group = {
17720+static attribute_group_no_const x86_pmu_events_group = {
17721 .name = "events",
17722 .attrs = events_attr,
17723 };
17724@@ -1873,7 +1873,7 @@ static unsigned long get_segment_base(unsigned int segment)
17725 if (idx > GDT_ENTRIES)
17726 return 0;
17727
17728- desc = __this_cpu_ptr(&gdt_page.gdt[0]);
17729+ desc = get_cpu_gdt_table(smp_processor_id());
17730 }
17731
17732 return get_desc_base(desc + idx);
17733@@ -1963,7 +1963,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
17734 break;
17735
17736 perf_callchain_store(entry, frame.return_address);
17737- fp = frame.next_frame;
17738+ fp = (const void __force_user *)frame.next_frame;
17739 }
17740 }
17741
17742diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
17743index 4a0a462..be3b204 100644
17744--- a/arch/x86/kernel/cpu/perf_event_intel.c
17745+++ b/arch/x86/kernel/cpu/perf_event_intel.c
17746@@ -1994,10 +1994,10 @@ __init int intel_pmu_init(void)
17747 * v2 and above have a perf capabilities MSR
17748 */
17749 if (version > 1) {
17750- u64 capabilities;
17751+ u64 capabilities = x86_pmu.intel_cap.capabilities;
17752
17753- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
17754- x86_pmu.intel_cap.capabilities = capabilities;
17755+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
17756+ x86_pmu.intel_cap.capabilities = capabilities;
17757 }
17758
17759 intel_ds_init();
17760diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
17761index 3e091f0..d2dc8d6 100644
17762--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
17763+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
17764@@ -2428,7 +2428,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
17765 static int __init uncore_type_init(struct intel_uncore_type *type)
17766 {
17767 struct intel_uncore_pmu *pmus;
17768- struct attribute_group *attr_group;
17769+ attribute_group_no_const *attr_group;
17770 struct attribute **attrs;
17771 int i, j;
17772
17773@@ -2826,7 +2826,7 @@ static int
17774 return NOTIFY_OK;
17775 }
17776
17777-static struct notifier_block uncore_cpu_nb __cpuinitdata = {
17778+static struct notifier_block uncore_cpu_nb = {
17779 .notifier_call = uncore_cpu_notifier,
17780 /*
17781 * to migrate uncore events, our notifier should be executed
17782diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
17783index e68a455..975a932 100644
17784--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
17785+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
17786@@ -428,7 +428,7 @@ struct intel_uncore_box {
17787 struct uncore_event_desc {
17788 struct kobj_attribute attr;
17789 const char *config;
17790-};
17791+} __do_const;
17792
17793 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \
17794 { \
17795diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
17796index 1e4dbcf..b9a34c2 100644
17797--- a/arch/x86/kernel/cpuid.c
17798+++ b/arch/x86/kernel/cpuid.c
17799@@ -171,7 +171,7 @@ static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb,
17800 return notifier_from_errno(err);
17801 }
17802
17803-static struct notifier_block __refdata cpuid_class_cpu_notifier =
17804+static struct notifier_block cpuid_class_cpu_notifier =
17805 {
17806 .notifier_call = cpuid_class_cpu_callback,
17807 };
17808diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
17809index 74467fe..18793d5 100644
17810--- a/arch/x86/kernel/crash.c
17811+++ b/arch/x86/kernel/crash.c
17812@@ -58,10 +58,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
17813 {
17814 #ifdef CONFIG_X86_32
17815 struct pt_regs fixed_regs;
17816-#endif
17817
17818-#ifdef CONFIG_X86_32
17819- if (!user_mode_vm(regs)) {
17820+ if (!user_mode(regs)) {
17821 crash_fixup_ss_esp(&fixed_regs, regs);
17822 regs = &fixed_regs;
17823 }
17824diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
17825index afa64ad..dce67dd 100644
17826--- a/arch/x86/kernel/crash_dump_64.c
17827+++ b/arch/x86/kernel/crash_dump_64.c
17828@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
17829 return -ENOMEM;
17830
17831 if (userbuf) {
17832- if (copy_to_user(buf, vaddr + offset, csize)) {
17833+ if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
17834 iounmap(vaddr);
17835 return -EFAULT;
17836 }
17837diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
17838index 37250fe..bf2ec74 100644
17839--- a/arch/x86/kernel/doublefault_32.c
17840+++ b/arch/x86/kernel/doublefault_32.c
17841@@ -11,7 +11,7 @@
17842
17843 #define DOUBLEFAULT_STACKSIZE (1024)
17844 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
17845-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
17846+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
17847
17848 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
17849
17850@@ -21,7 +21,7 @@ static void doublefault_fn(void)
17851 unsigned long gdt, tss;
17852
17853 store_gdt(&gdt_desc);
17854- gdt = gdt_desc.address;
17855+ gdt = (unsigned long)gdt_desc.address;
17856
17857 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
17858
17859@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
17860 /* 0x2 bit is always set */
17861 .flags = X86_EFLAGS_SF | 0x2,
17862 .sp = STACK_START,
17863- .es = __USER_DS,
17864+ .es = __KERNEL_DS,
17865 .cs = __KERNEL_CS,
17866 .ss = __KERNEL_DS,
17867- .ds = __USER_DS,
17868+ .ds = __KERNEL_DS,
17869 .fs = __KERNEL_PERCPU,
17870
17871 .__cr3 = __pa_nodebug(swapper_pg_dir),
17872diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
17873index c8797d5..c605e53 100644
17874--- a/arch/x86/kernel/dumpstack.c
17875+++ b/arch/x86/kernel/dumpstack.c
17876@@ -2,6 +2,9 @@
17877 * Copyright (C) 1991, 1992 Linus Torvalds
17878 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
17879 */
17880+#ifdef CONFIG_GRKERNSEC_HIDESYM
17881+#define __INCLUDED_BY_HIDESYM 1
17882+#endif
17883 #include <linux/kallsyms.h>
17884 #include <linux/kprobes.h>
17885 #include <linux/uaccess.h>
17886@@ -35,16 +38,14 @@ void printk_address(unsigned long address, int reliable)
17887 static void
17888 print_ftrace_graph_addr(unsigned long addr, void *data,
17889 const struct stacktrace_ops *ops,
17890- struct thread_info *tinfo, int *graph)
17891+ struct task_struct *task, int *graph)
17892 {
17893- struct task_struct *task;
17894 unsigned long ret_addr;
17895 int index;
17896
17897 if (addr != (unsigned long)return_to_handler)
17898 return;
17899
17900- task = tinfo->task;
17901 index = task->curr_ret_stack;
17902
17903 if (!task->ret_stack || index < *graph)
17904@@ -61,7 +62,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
17905 static inline void
17906 print_ftrace_graph_addr(unsigned long addr, void *data,
17907 const struct stacktrace_ops *ops,
17908- struct thread_info *tinfo, int *graph)
17909+ struct task_struct *task, int *graph)
17910 { }
17911 #endif
17912
17913@@ -72,10 +73,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
17914 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
17915 */
17916
17917-static inline int valid_stack_ptr(struct thread_info *tinfo,
17918- void *p, unsigned int size, void *end)
17919+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
17920 {
17921- void *t = tinfo;
17922 if (end) {
17923 if (p < end && p >= (end-THREAD_SIZE))
17924 return 1;
17925@@ -86,14 +85,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
17926 }
17927
17928 unsigned long
17929-print_context_stack(struct thread_info *tinfo,
17930+print_context_stack(struct task_struct *task, void *stack_start,
17931 unsigned long *stack, unsigned long bp,
17932 const struct stacktrace_ops *ops, void *data,
17933 unsigned long *end, int *graph)
17934 {
17935 struct stack_frame *frame = (struct stack_frame *)bp;
17936
17937- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
17938+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
17939 unsigned long addr;
17940
17941 addr = *stack;
17942@@ -105,7 +104,7 @@ print_context_stack(struct thread_info *tinfo,
17943 } else {
17944 ops->address(data, addr, 0);
17945 }
17946- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
17947+ print_ftrace_graph_addr(addr, data, ops, task, graph);
17948 }
17949 stack++;
17950 }
17951@@ -114,7 +113,7 @@ print_context_stack(struct thread_info *tinfo,
17952 EXPORT_SYMBOL_GPL(print_context_stack);
17953
17954 unsigned long
17955-print_context_stack_bp(struct thread_info *tinfo,
17956+print_context_stack_bp(struct task_struct *task, void *stack_start,
17957 unsigned long *stack, unsigned long bp,
17958 const struct stacktrace_ops *ops, void *data,
17959 unsigned long *end, int *graph)
17960@@ -122,7 +121,7 @@ print_context_stack_bp(struct thread_info *tinfo,
17961 struct stack_frame *frame = (struct stack_frame *)bp;
17962 unsigned long *ret_addr = &frame->return_address;
17963
17964- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
17965+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
17966 unsigned long addr = *ret_addr;
17967
17968 if (!__kernel_text_address(addr))
17969@@ -131,7 +130,7 @@ print_context_stack_bp(struct thread_info *tinfo,
17970 ops->address(data, addr, 1);
17971 frame = frame->next_frame;
17972 ret_addr = &frame->return_address;
17973- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
17974+ print_ftrace_graph_addr(addr, data, ops, task, graph);
17975 }
17976
17977 return (unsigned long)frame;
17978@@ -189,7 +188,7 @@ void dump_stack(void)
17979
17980 bp = stack_frame(current, NULL);
17981 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
17982- current->pid, current->comm, print_tainted(),
17983+ task_pid_nr(current), current->comm, print_tainted(),
17984 init_utsname()->release,
17985 (int)strcspn(init_utsname()->version, " "),
17986 init_utsname()->version);
17987@@ -225,6 +224,8 @@ unsigned __kprobes long oops_begin(void)
17988 }
17989 EXPORT_SYMBOL_GPL(oops_begin);
17990
17991+extern void gr_handle_kernel_exploit(void);
17992+
17993 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
17994 {
17995 if (regs && kexec_should_crash(current))
17996@@ -246,7 +247,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
17997 panic("Fatal exception in interrupt");
17998 if (panic_on_oops)
17999 panic("Fatal exception");
18000- do_exit(signr);
18001+
18002+ gr_handle_kernel_exploit();
18003+
18004+ do_group_exit(signr);
18005 }
18006
18007 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
18008@@ -274,7 +278,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
18009 print_modules();
18010 show_regs(regs);
18011 #ifdef CONFIG_X86_32
18012- if (user_mode_vm(regs)) {
18013+ if (user_mode(regs)) {
18014 sp = regs->sp;
18015 ss = regs->ss & 0xffff;
18016 } else {
18017@@ -302,7 +306,7 @@ void die(const char *str, struct pt_regs *regs, long err)
18018 unsigned long flags = oops_begin();
18019 int sig = SIGSEGV;
18020
18021- if (!user_mode_vm(regs))
18022+ if (!user_mode(regs))
18023 report_bug(regs->ip, regs);
18024
18025 if (__die(str, regs, err))
18026diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
18027index 1038a41..db2c12b 100644
18028--- a/arch/x86/kernel/dumpstack_32.c
18029+++ b/arch/x86/kernel/dumpstack_32.c
18030@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18031 bp = stack_frame(task, regs);
18032
18033 for (;;) {
18034- struct thread_info *context;
18035+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
18036
18037- context = (struct thread_info *)
18038- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
18039- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
18040+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
18041
18042- stack = (unsigned long *)context->previous_esp;
18043- if (!stack)
18044+ if (stack_start == task_stack_page(task))
18045 break;
18046+ stack = *(unsigned long **)stack_start;
18047 if (ops->stack(data, "IRQ") < 0)
18048 break;
18049 touch_nmi_watchdog();
18050@@ -86,7 +84,7 @@ void show_regs(struct pt_regs *regs)
18051 {
18052 int i;
18053
18054- __show_regs(regs, !user_mode_vm(regs));
18055+ __show_regs(regs, !user_mode(regs));
18056
18057 pr_emerg("Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n",
18058 TASK_COMM_LEN, current->comm, task_pid_nr(current),
18059@@ -95,21 +93,22 @@ void show_regs(struct pt_regs *regs)
18060 * When in-kernel, we also print out the stack and code at the
18061 * time of the fault..
18062 */
18063- if (!user_mode_vm(regs)) {
18064+ if (!user_mode(regs)) {
18065 unsigned int code_prologue = code_bytes * 43 / 64;
18066 unsigned int code_len = code_bytes;
18067 unsigned char c;
18068 u8 *ip;
18069+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
18070
18071 pr_emerg("Stack:\n");
18072 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
18073
18074 pr_emerg("Code:");
18075
18076- ip = (u8 *)regs->ip - code_prologue;
18077+ ip = (u8 *)regs->ip - code_prologue + cs_base;
18078 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
18079 /* try starting at IP */
18080- ip = (u8 *)regs->ip;
18081+ ip = (u8 *)regs->ip + cs_base;
18082 code_len = code_len - code_prologue + 1;
18083 }
18084 for (i = 0; i < code_len; i++, ip++) {
18085@@ -118,7 +117,7 @@ void show_regs(struct pt_regs *regs)
18086 pr_cont(" Bad EIP value.");
18087 break;
18088 }
18089- if (ip == (u8 *)regs->ip)
18090+ if (ip == (u8 *)regs->ip + cs_base)
18091 pr_cont(" <%02x>", c);
18092 else
18093 pr_cont(" %02x", c);
18094@@ -131,6 +130,7 @@ int is_valid_bugaddr(unsigned long ip)
18095 {
18096 unsigned short ud2;
18097
18098+ ip = ktla_ktva(ip);
18099 if (ip < PAGE_OFFSET)
18100 return 0;
18101 if (probe_kernel_address((unsigned short *)ip, ud2))
18102@@ -138,3 +138,15 @@ int is_valid_bugaddr(unsigned long ip)
18103
18104 return ud2 == 0x0b0f;
18105 }
18106+
18107+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
18108+void pax_check_alloca(unsigned long size)
18109+{
18110+ unsigned long sp = (unsigned long)&sp, stack_left;
18111+
18112+ /* all kernel stacks are of the same size */
18113+ stack_left = sp & (THREAD_SIZE - 1);
18114+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
18115+}
18116+EXPORT_SYMBOL(pax_check_alloca);
18117+#endif
18118diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
18119index b653675..51cc8c0 100644
18120--- a/arch/x86/kernel/dumpstack_64.c
18121+++ b/arch/x86/kernel/dumpstack_64.c
18122@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18123 unsigned long *irq_stack_end =
18124 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
18125 unsigned used = 0;
18126- struct thread_info *tinfo;
18127 int graph = 0;
18128 unsigned long dummy;
18129+ void *stack_start;
18130
18131 if (!task)
18132 task = current;
18133@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18134 * current stack address. If the stacks consist of nested
18135 * exceptions
18136 */
18137- tinfo = task_thread_info(task);
18138 for (;;) {
18139 char *id;
18140 unsigned long *estack_end;
18141+
18142 estack_end = in_exception_stack(cpu, (unsigned long)stack,
18143 &used, &id);
18144
18145@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18146 if (ops->stack(data, id) < 0)
18147 break;
18148
18149- bp = ops->walk_stack(tinfo, stack, bp, ops,
18150+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
18151 data, estack_end, &graph);
18152 ops->stack(data, "<EOE>");
18153 /*
18154@@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18155 * second-to-last pointer (index -2 to end) in the
18156 * exception stack:
18157 */
18158+ if ((u16)estack_end[-1] != __KERNEL_DS)
18159+ goto out;
18160 stack = (unsigned long *) estack_end[-2];
18161 continue;
18162 }
18163@@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18164 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
18165 if (ops->stack(data, "IRQ") < 0)
18166 break;
18167- bp = ops->walk_stack(tinfo, stack, bp,
18168+ bp = ops->walk_stack(task, irq_stack, stack, bp,
18169 ops, data, irq_stack_end, &graph);
18170 /*
18171 * We link to the next stack (which would be
18172@@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18173 /*
18174 * This handles the process stack:
18175 */
18176- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
18177+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
18178+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
18179+out:
18180 put_cpu();
18181 }
18182 EXPORT_SYMBOL(dump_trace);
18183@@ -249,7 +253,7 @@ void show_regs(struct pt_regs *regs)
18184 {
18185 int i;
18186 unsigned long sp;
18187- const int cpu = smp_processor_id();
18188+ const int cpu = raw_smp_processor_id();
18189 struct task_struct *cur = current;
18190
18191 sp = regs->sp;
18192@@ -304,3 +308,50 @@ int is_valid_bugaddr(unsigned long ip)
18193
18194 return ud2 == 0x0b0f;
18195 }
18196+
18197+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
18198+void pax_check_alloca(unsigned long size)
18199+{
18200+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
18201+ unsigned cpu, used;
18202+ char *id;
18203+
18204+ /* check the process stack first */
18205+ stack_start = (unsigned long)task_stack_page(current);
18206+ stack_end = stack_start + THREAD_SIZE;
18207+ if (likely(stack_start <= sp && sp < stack_end)) {
18208+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
18209+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
18210+ return;
18211+ }
18212+
18213+ cpu = get_cpu();
18214+
18215+ /* check the irq stacks */
18216+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
18217+ stack_start = stack_end - IRQ_STACK_SIZE;
18218+ if (stack_start <= sp && sp < stack_end) {
18219+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
18220+ put_cpu();
18221+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
18222+ return;
18223+ }
18224+
18225+ /* check the exception stacks */
18226+ used = 0;
18227+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
18228+ stack_start = stack_end - EXCEPTION_STKSZ;
18229+ if (stack_end && stack_start <= sp && sp < stack_end) {
18230+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
18231+ put_cpu();
18232+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
18233+ return;
18234+ }
18235+
18236+ put_cpu();
18237+
18238+ /* unknown stack */
18239+ BUG();
18240+}
18241+EXPORT_SYMBOL(pax_check_alloca);
18242+#endif
18243diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
18244index d32abea..74daf4f 100644
18245--- a/arch/x86/kernel/e820.c
18246+++ b/arch/x86/kernel/e820.c
18247@@ -800,8 +800,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
18248
18249 static void early_panic(char *msg)
18250 {
18251- early_printk(msg);
18252- panic(msg);
18253+ early_printk("%s", msg);
18254+ panic("%s", msg);
18255 }
18256
18257 static int userdef __initdata;
18258diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
18259index 9b9f18b..9fcaa04 100644
18260--- a/arch/x86/kernel/early_printk.c
18261+++ b/arch/x86/kernel/early_printk.c
18262@@ -7,6 +7,7 @@
18263 #include <linux/pci_regs.h>
18264 #include <linux/pci_ids.h>
18265 #include <linux/errno.h>
18266+#include <linux/sched.h>
18267 #include <asm/io.h>
18268 #include <asm/processor.h>
18269 #include <asm/fcntl.h>
18270diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
18271index 8f3e2de..934870f 100644
18272--- a/arch/x86/kernel/entry_32.S
18273+++ b/arch/x86/kernel/entry_32.S
18274@@ -177,13 +177,153 @@
18275 /*CFI_REL_OFFSET gs, PT_GS*/
18276 .endm
18277 .macro SET_KERNEL_GS reg
18278+
18279+#ifdef CONFIG_CC_STACKPROTECTOR
18280 movl $(__KERNEL_STACK_CANARY), \reg
18281+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
18282+ movl $(__USER_DS), \reg
18283+#else
18284+ xorl \reg, \reg
18285+#endif
18286+
18287 movl \reg, %gs
18288 .endm
18289
18290 #endif /* CONFIG_X86_32_LAZY_GS */
18291
18292-.macro SAVE_ALL
18293+.macro pax_enter_kernel
18294+#ifdef CONFIG_PAX_KERNEXEC
18295+ call pax_enter_kernel
18296+#endif
18297+.endm
18298+
18299+.macro pax_exit_kernel
18300+#ifdef CONFIG_PAX_KERNEXEC
18301+ call pax_exit_kernel
18302+#endif
18303+.endm
18304+
18305+#ifdef CONFIG_PAX_KERNEXEC
18306+ENTRY(pax_enter_kernel)
18307+#ifdef CONFIG_PARAVIRT
18308+ pushl %eax
18309+ pushl %ecx
18310+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
18311+ mov %eax, %esi
18312+#else
18313+ mov %cr0, %esi
18314+#endif
18315+ bts $16, %esi
18316+ jnc 1f
18317+ mov %cs, %esi
18318+ cmp $__KERNEL_CS, %esi
18319+ jz 3f
18320+ ljmp $__KERNEL_CS, $3f
18321+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
18322+2:
18323+#ifdef CONFIG_PARAVIRT
18324+ mov %esi, %eax
18325+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
18326+#else
18327+ mov %esi, %cr0
18328+#endif
18329+3:
18330+#ifdef CONFIG_PARAVIRT
18331+ popl %ecx
18332+ popl %eax
18333+#endif
18334+ ret
18335+ENDPROC(pax_enter_kernel)
18336+
18337+ENTRY(pax_exit_kernel)
18338+#ifdef CONFIG_PARAVIRT
18339+ pushl %eax
18340+ pushl %ecx
18341+#endif
18342+ mov %cs, %esi
18343+ cmp $__KERNEXEC_KERNEL_CS, %esi
18344+ jnz 2f
18345+#ifdef CONFIG_PARAVIRT
18346+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
18347+ mov %eax, %esi
18348+#else
18349+ mov %cr0, %esi
18350+#endif
18351+ btr $16, %esi
18352+ ljmp $__KERNEL_CS, $1f
18353+1:
18354+#ifdef CONFIG_PARAVIRT
18355+ mov %esi, %eax
18356+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
18357+#else
18358+ mov %esi, %cr0
18359+#endif
18360+2:
18361+#ifdef CONFIG_PARAVIRT
18362+ popl %ecx
18363+ popl %eax
18364+#endif
18365+ ret
18366+ENDPROC(pax_exit_kernel)
18367+#endif
18368+
18369+.macro pax_erase_kstack
18370+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
18371+ call pax_erase_kstack
18372+#endif
18373+.endm
18374+
18375+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
18376+/*
18377+ * ebp: thread_info
18378+ */
18379+ENTRY(pax_erase_kstack)
18380+ pushl %edi
18381+ pushl %ecx
18382+ pushl %eax
18383+
18384+ mov TI_lowest_stack(%ebp), %edi
18385+ mov $-0xBEEF, %eax
18386+ std
18387+
18388+1: mov %edi, %ecx
18389+ and $THREAD_SIZE_asm - 1, %ecx
18390+ shr $2, %ecx
18391+ repne scasl
18392+ jecxz 2f
18393+
18394+ cmp $2*16, %ecx
18395+ jc 2f
18396+
18397+ mov $2*16, %ecx
18398+ repe scasl
18399+ jecxz 2f
18400+ jne 1b
18401+
18402+2: cld
18403+ mov %esp, %ecx
18404+ sub %edi, %ecx
18405+
18406+ cmp $THREAD_SIZE_asm, %ecx
18407+ jb 3f
18408+ ud2
18409+3:
18410+
18411+ shr $2, %ecx
18412+ rep stosl
18413+
18414+ mov TI_task_thread_sp0(%ebp), %edi
18415+ sub $128, %edi
18416+ mov %edi, TI_lowest_stack(%ebp)
18417+
18418+ popl %eax
18419+ popl %ecx
18420+ popl %edi
18421+ ret
18422+ENDPROC(pax_erase_kstack)
18423+#endif
18424+
18425+.macro __SAVE_ALL _DS
18426 cld
18427 PUSH_GS
18428 pushl_cfi %fs
18429@@ -206,7 +346,7 @@
18430 CFI_REL_OFFSET ecx, 0
18431 pushl_cfi %ebx
18432 CFI_REL_OFFSET ebx, 0
18433- movl $(__USER_DS), %edx
18434+ movl $\_DS, %edx
18435 movl %edx, %ds
18436 movl %edx, %es
18437 movl $(__KERNEL_PERCPU), %edx
18438@@ -214,6 +354,15 @@
18439 SET_KERNEL_GS %edx
18440 .endm
18441
18442+.macro SAVE_ALL
18443+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
18444+ __SAVE_ALL __KERNEL_DS
18445+ pax_enter_kernel
18446+#else
18447+ __SAVE_ALL __USER_DS
18448+#endif
18449+.endm
18450+
18451 .macro RESTORE_INT_REGS
18452 popl_cfi %ebx
18453 CFI_RESTORE ebx
18454@@ -297,7 +446,7 @@ ENTRY(ret_from_fork)
18455 popfl_cfi
18456 jmp syscall_exit
18457 CFI_ENDPROC
18458-END(ret_from_fork)
18459+ENDPROC(ret_from_fork)
18460
18461 ENTRY(ret_from_kernel_thread)
18462 CFI_STARTPROC
18463@@ -344,7 +493,15 @@ ret_from_intr:
18464 andl $SEGMENT_RPL_MASK, %eax
18465 #endif
18466 cmpl $USER_RPL, %eax
18467+
18468+#ifdef CONFIG_PAX_KERNEXEC
18469+ jae resume_userspace
18470+
18471+ pax_exit_kernel
18472+ jmp resume_kernel
18473+#else
18474 jb resume_kernel # not returning to v8086 or userspace
18475+#endif
18476
18477 ENTRY(resume_userspace)
18478 LOCKDEP_SYS_EXIT
18479@@ -356,8 +513,8 @@ ENTRY(resume_userspace)
18480 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
18481 # int/exception return?
18482 jne work_pending
18483- jmp restore_all
18484-END(ret_from_exception)
18485+ jmp restore_all_pax
18486+ENDPROC(ret_from_exception)
18487
18488 #ifdef CONFIG_PREEMPT
18489 ENTRY(resume_kernel)
18490@@ -372,7 +529,7 @@ need_resched:
18491 jz restore_all
18492 call preempt_schedule_irq
18493 jmp need_resched
18494-END(resume_kernel)
18495+ENDPROC(resume_kernel)
18496 #endif
18497 CFI_ENDPROC
18498 /*
18499@@ -406,30 +563,45 @@ sysenter_past_esp:
18500 /*CFI_REL_OFFSET cs, 0*/
18501 /*
18502 * Push current_thread_info()->sysenter_return to the stack.
18503- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
18504- * pushed above; +8 corresponds to copy_thread's esp0 setting.
18505 */
18506- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
18507+ pushl_cfi $0
18508 CFI_REL_OFFSET eip, 0
18509
18510 pushl_cfi %eax
18511 SAVE_ALL
18512+ GET_THREAD_INFO(%ebp)
18513+ movl TI_sysenter_return(%ebp),%ebp
18514+ movl %ebp,PT_EIP(%esp)
18515 ENABLE_INTERRUPTS(CLBR_NONE)
18516
18517 /*
18518 * Load the potential sixth argument from user stack.
18519 * Careful about security.
18520 */
18521+ movl PT_OLDESP(%esp),%ebp
18522+
18523+#ifdef CONFIG_PAX_MEMORY_UDEREF
18524+ mov PT_OLDSS(%esp),%ds
18525+1: movl %ds:(%ebp),%ebp
18526+ push %ss
18527+ pop %ds
18528+#else
18529 cmpl $__PAGE_OFFSET-3,%ebp
18530 jae syscall_fault
18531 ASM_STAC
18532 1: movl (%ebp),%ebp
18533 ASM_CLAC
18534+#endif
18535+
18536 movl %ebp,PT_EBP(%esp)
18537 _ASM_EXTABLE(1b,syscall_fault)
18538
18539 GET_THREAD_INFO(%ebp)
18540
18541+#ifdef CONFIG_PAX_RANDKSTACK
18542+ pax_erase_kstack
18543+#endif
18544+
18545 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
18546 jnz sysenter_audit
18547 sysenter_do_call:
18548@@ -444,12 +616,24 @@ sysenter_do_call:
18549 testl $_TIF_ALLWORK_MASK, %ecx
18550 jne sysexit_audit
18551 sysenter_exit:
18552+
18553+#ifdef CONFIG_PAX_RANDKSTACK
18554+ pushl_cfi %eax
18555+ movl %esp, %eax
18556+ call pax_randomize_kstack
18557+ popl_cfi %eax
18558+#endif
18559+
18560+ pax_erase_kstack
18561+
18562 /* if something modifies registers it must also disable sysexit */
18563 movl PT_EIP(%esp), %edx
18564 movl PT_OLDESP(%esp), %ecx
18565 xorl %ebp,%ebp
18566 TRACE_IRQS_ON
18567 1: mov PT_FS(%esp), %fs
18568+2: mov PT_DS(%esp), %ds
18569+3: mov PT_ES(%esp), %es
18570 PTGS_TO_GS
18571 ENABLE_INTERRUPTS_SYSEXIT
18572
18573@@ -466,6 +650,9 @@ sysenter_audit:
18574 movl %eax,%edx /* 2nd arg: syscall number */
18575 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
18576 call __audit_syscall_entry
18577+
18578+ pax_erase_kstack
18579+
18580 pushl_cfi %ebx
18581 movl PT_EAX(%esp),%eax /* reload syscall number */
18582 jmp sysenter_do_call
18583@@ -491,10 +678,16 @@ sysexit_audit:
18584
18585 CFI_ENDPROC
18586 .pushsection .fixup,"ax"
18587-2: movl $0,PT_FS(%esp)
18588+4: movl $0,PT_FS(%esp)
18589+ jmp 1b
18590+5: movl $0,PT_DS(%esp)
18591+ jmp 1b
18592+6: movl $0,PT_ES(%esp)
18593 jmp 1b
18594 .popsection
18595- _ASM_EXTABLE(1b,2b)
18596+ _ASM_EXTABLE(1b,4b)
18597+ _ASM_EXTABLE(2b,5b)
18598+ _ASM_EXTABLE(3b,6b)
18599 PTGS_TO_GS_EX
18600 ENDPROC(ia32_sysenter_target)
18601
18602@@ -509,6 +702,11 @@ ENTRY(system_call)
18603 pushl_cfi %eax # save orig_eax
18604 SAVE_ALL
18605 GET_THREAD_INFO(%ebp)
18606+
18607+#ifdef CONFIG_PAX_RANDKSTACK
18608+ pax_erase_kstack
18609+#endif
18610+
18611 # system call tracing in operation / emulation
18612 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
18613 jnz syscall_trace_entry
18614@@ -527,6 +725,15 @@ syscall_exit:
18615 testl $_TIF_ALLWORK_MASK, %ecx # current->work
18616 jne syscall_exit_work
18617
18618+restore_all_pax:
18619+
18620+#ifdef CONFIG_PAX_RANDKSTACK
18621+ movl %esp, %eax
18622+ call pax_randomize_kstack
18623+#endif
18624+
18625+ pax_erase_kstack
18626+
18627 restore_all:
18628 TRACE_IRQS_IRET
18629 restore_all_notrace:
18630@@ -583,14 +790,34 @@ ldt_ss:
18631 * compensating for the offset by changing to the ESPFIX segment with
18632 * a base address that matches for the difference.
18633 */
18634-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
18635+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
18636 mov %esp, %edx /* load kernel esp */
18637 mov PT_OLDESP(%esp), %eax /* load userspace esp */
18638 mov %dx, %ax /* eax: new kernel esp */
18639 sub %eax, %edx /* offset (low word is 0) */
18640+#ifdef CONFIG_SMP
18641+ movl PER_CPU_VAR(cpu_number), %ebx
18642+ shll $PAGE_SHIFT_asm, %ebx
18643+ addl $cpu_gdt_table, %ebx
18644+#else
18645+ movl $cpu_gdt_table, %ebx
18646+#endif
18647 shr $16, %edx
18648- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
18649- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
18650+
18651+#ifdef CONFIG_PAX_KERNEXEC
18652+ mov %cr0, %esi
18653+ btr $16, %esi
18654+ mov %esi, %cr0
18655+#endif
18656+
18657+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
18658+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
18659+
18660+#ifdef CONFIG_PAX_KERNEXEC
18661+ bts $16, %esi
18662+ mov %esi, %cr0
18663+#endif
18664+
18665 pushl_cfi $__ESPFIX_SS
18666 pushl_cfi %eax /* new kernel esp */
18667 /* Disable interrupts, but do not irqtrace this section: we
18668@@ -619,20 +846,18 @@ work_resched:
18669 movl TI_flags(%ebp), %ecx
18670 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
18671 # than syscall tracing?
18672- jz restore_all
18673+ jz restore_all_pax
18674 testb $_TIF_NEED_RESCHED, %cl
18675 jnz work_resched
18676
18677 work_notifysig: # deal with pending signals and
18678 # notify-resume requests
18679+ movl %esp, %eax
18680 #ifdef CONFIG_VM86
18681 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
18682- movl %esp, %eax
18683 jne work_notifysig_v86 # returning to kernel-space or
18684 # vm86-space
18685 1:
18686-#else
18687- movl %esp, %eax
18688 #endif
18689 TRACE_IRQS_ON
18690 ENABLE_INTERRUPTS(CLBR_NONE)
18691@@ -653,7 +878,7 @@ work_notifysig_v86:
18692 movl %eax, %esp
18693 jmp 1b
18694 #endif
18695-END(work_pending)
18696+ENDPROC(work_pending)
18697
18698 # perform syscall exit tracing
18699 ALIGN
18700@@ -661,11 +886,14 @@ syscall_trace_entry:
18701 movl $-ENOSYS,PT_EAX(%esp)
18702 movl %esp, %eax
18703 call syscall_trace_enter
18704+
18705+ pax_erase_kstack
18706+
18707 /* What it returned is what we'll actually use. */
18708 cmpl $(NR_syscalls), %eax
18709 jnae syscall_call
18710 jmp syscall_exit
18711-END(syscall_trace_entry)
18712+ENDPROC(syscall_trace_entry)
18713
18714 # perform syscall exit tracing
18715 ALIGN
18716@@ -678,21 +906,25 @@ syscall_exit_work:
18717 movl %esp, %eax
18718 call syscall_trace_leave
18719 jmp resume_userspace
18720-END(syscall_exit_work)
18721+ENDPROC(syscall_exit_work)
18722 CFI_ENDPROC
18723
18724 RING0_INT_FRAME # can't unwind into user space anyway
18725 syscall_fault:
18726+#ifdef CONFIG_PAX_MEMORY_UDEREF
18727+ push %ss
18728+ pop %ds
18729+#endif
18730 ASM_CLAC
18731 GET_THREAD_INFO(%ebp)
18732 movl $-EFAULT,PT_EAX(%esp)
18733 jmp resume_userspace
18734-END(syscall_fault)
18735+ENDPROC(syscall_fault)
18736
18737 syscall_badsys:
18738 movl $-ENOSYS,PT_EAX(%esp)
18739 jmp resume_userspace
18740-END(syscall_badsys)
18741+ENDPROC(syscall_badsys)
18742 CFI_ENDPROC
18743 /*
18744 * End of kprobes section
18745@@ -708,8 +940,15 @@ END(syscall_badsys)
18746 * normal stack and adjusts ESP with the matching offset.
18747 */
18748 /* fixup the stack */
18749- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
18750- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
18751+#ifdef CONFIG_SMP
18752+ movl PER_CPU_VAR(cpu_number), %ebx
18753+ shll $PAGE_SHIFT_asm, %ebx
18754+ addl $cpu_gdt_table, %ebx
18755+#else
18756+ movl $cpu_gdt_table, %ebx
18757+#endif
18758+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
18759+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
18760 shl $16, %eax
18761 addl %esp, %eax /* the adjusted stack pointer */
18762 pushl_cfi $__KERNEL_DS
18763@@ -762,7 +1001,7 @@ vector=vector+1
18764 .endr
18765 2: jmp common_interrupt
18766 .endr
18767-END(irq_entries_start)
18768+ENDPROC(irq_entries_start)
18769
18770 .previous
18771 END(interrupt)
18772@@ -813,7 +1052,7 @@ ENTRY(coprocessor_error)
18773 pushl_cfi $do_coprocessor_error
18774 jmp error_code
18775 CFI_ENDPROC
18776-END(coprocessor_error)
18777+ENDPROC(coprocessor_error)
18778
18779 ENTRY(simd_coprocessor_error)
18780 RING0_INT_FRAME
18781@@ -835,7 +1074,7 @@ ENTRY(simd_coprocessor_error)
18782 #endif
18783 jmp error_code
18784 CFI_ENDPROC
18785-END(simd_coprocessor_error)
18786+ENDPROC(simd_coprocessor_error)
18787
18788 ENTRY(device_not_available)
18789 RING0_INT_FRAME
18790@@ -844,18 +1083,18 @@ ENTRY(device_not_available)
18791 pushl_cfi $do_device_not_available
18792 jmp error_code
18793 CFI_ENDPROC
18794-END(device_not_available)
18795+ENDPROC(device_not_available)
18796
18797 #ifdef CONFIG_PARAVIRT
18798 ENTRY(native_iret)
18799 iret
18800 _ASM_EXTABLE(native_iret, iret_exc)
18801-END(native_iret)
18802+ENDPROC(native_iret)
18803
18804 ENTRY(native_irq_enable_sysexit)
18805 sti
18806 sysexit
18807-END(native_irq_enable_sysexit)
18808+ENDPROC(native_irq_enable_sysexit)
18809 #endif
18810
18811 ENTRY(overflow)
18812@@ -865,7 +1104,7 @@ ENTRY(overflow)
18813 pushl_cfi $do_overflow
18814 jmp error_code
18815 CFI_ENDPROC
18816-END(overflow)
18817+ENDPROC(overflow)
18818
18819 ENTRY(bounds)
18820 RING0_INT_FRAME
18821@@ -874,7 +1113,7 @@ ENTRY(bounds)
18822 pushl_cfi $do_bounds
18823 jmp error_code
18824 CFI_ENDPROC
18825-END(bounds)
18826+ENDPROC(bounds)
18827
18828 ENTRY(invalid_op)
18829 RING0_INT_FRAME
18830@@ -883,7 +1122,7 @@ ENTRY(invalid_op)
18831 pushl_cfi $do_invalid_op
18832 jmp error_code
18833 CFI_ENDPROC
18834-END(invalid_op)
18835+ENDPROC(invalid_op)
18836
18837 ENTRY(coprocessor_segment_overrun)
18838 RING0_INT_FRAME
18839@@ -892,7 +1131,7 @@ ENTRY(coprocessor_segment_overrun)
18840 pushl_cfi $do_coprocessor_segment_overrun
18841 jmp error_code
18842 CFI_ENDPROC
18843-END(coprocessor_segment_overrun)
18844+ENDPROC(coprocessor_segment_overrun)
18845
18846 ENTRY(invalid_TSS)
18847 RING0_EC_FRAME
18848@@ -900,7 +1139,7 @@ ENTRY(invalid_TSS)
18849 pushl_cfi $do_invalid_TSS
18850 jmp error_code
18851 CFI_ENDPROC
18852-END(invalid_TSS)
18853+ENDPROC(invalid_TSS)
18854
18855 ENTRY(segment_not_present)
18856 RING0_EC_FRAME
18857@@ -908,7 +1147,7 @@ ENTRY(segment_not_present)
18858 pushl_cfi $do_segment_not_present
18859 jmp error_code
18860 CFI_ENDPROC
18861-END(segment_not_present)
18862+ENDPROC(segment_not_present)
18863
18864 ENTRY(stack_segment)
18865 RING0_EC_FRAME
18866@@ -916,7 +1155,7 @@ ENTRY(stack_segment)
18867 pushl_cfi $do_stack_segment
18868 jmp error_code
18869 CFI_ENDPROC
18870-END(stack_segment)
18871+ENDPROC(stack_segment)
18872
18873 ENTRY(alignment_check)
18874 RING0_EC_FRAME
18875@@ -924,7 +1163,7 @@ ENTRY(alignment_check)
18876 pushl_cfi $do_alignment_check
18877 jmp error_code
18878 CFI_ENDPROC
18879-END(alignment_check)
18880+ENDPROC(alignment_check)
18881
18882 ENTRY(divide_error)
18883 RING0_INT_FRAME
18884@@ -933,7 +1172,7 @@ ENTRY(divide_error)
18885 pushl_cfi $do_divide_error
18886 jmp error_code
18887 CFI_ENDPROC
18888-END(divide_error)
18889+ENDPROC(divide_error)
18890
18891 #ifdef CONFIG_X86_MCE
18892 ENTRY(machine_check)
18893@@ -943,7 +1182,7 @@ ENTRY(machine_check)
18894 pushl_cfi machine_check_vector
18895 jmp error_code
18896 CFI_ENDPROC
18897-END(machine_check)
18898+ENDPROC(machine_check)
18899 #endif
18900
18901 ENTRY(spurious_interrupt_bug)
18902@@ -953,7 +1192,7 @@ ENTRY(spurious_interrupt_bug)
18903 pushl_cfi $do_spurious_interrupt_bug
18904 jmp error_code
18905 CFI_ENDPROC
18906-END(spurious_interrupt_bug)
18907+ENDPROC(spurious_interrupt_bug)
18908 /*
18909 * End of kprobes section
18910 */
18911@@ -1063,7 +1302,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
18912
18913 ENTRY(mcount)
18914 ret
18915-END(mcount)
18916+ENDPROC(mcount)
18917
18918 ENTRY(ftrace_caller)
18919 cmpl $0, function_trace_stop
18920@@ -1096,7 +1335,7 @@ ftrace_graph_call:
18921 .globl ftrace_stub
18922 ftrace_stub:
18923 ret
18924-END(ftrace_caller)
18925+ENDPROC(ftrace_caller)
18926
18927 ENTRY(ftrace_regs_caller)
18928 pushf /* push flags before compare (in cs location) */
18929@@ -1197,7 +1436,7 @@ trace:
18930 popl %ecx
18931 popl %eax
18932 jmp ftrace_stub
18933-END(mcount)
18934+ENDPROC(mcount)
18935 #endif /* CONFIG_DYNAMIC_FTRACE */
18936 #endif /* CONFIG_FUNCTION_TRACER */
18937
18938@@ -1215,7 +1454,7 @@ ENTRY(ftrace_graph_caller)
18939 popl %ecx
18940 popl %eax
18941 ret
18942-END(ftrace_graph_caller)
18943+ENDPROC(ftrace_graph_caller)
18944
18945 .globl return_to_handler
18946 return_to_handler:
18947@@ -1271,15 +1510,18 @@ error_code:
18948 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
18949 REG_TO_PTGS %ecx
18950 SET_KERNEL_GS %ecx
18951- movl $(__USER_DS), %ecx
18952+ movl $(__KERNEL_DS), %ecx
18953 movl %ecx, %ds
18954 movl %ecx, %es
18955+
18956+ pax_enter_kernel
18957+
18958 TRACE_IRQS_OFF
18959 movl %esp,%eax # pt_regs pointer
18960 call *%edi
18961 jmp ret_from_exception
18962 CFI_ENDPROC
18963-END(page_fault)
18964+ENDPROC(page_fault)
18965
18966 /*
18967 * Debug traps and NMI can happen at the one SYSENTER instruction
18968@@ -1322,7 +1564,7 @@ debug_stack_correct:
18969 call do_debug
18970 jmp ret_from_exception
18971 CFI_ENDPROC
18972-END(debug)
18973+ENDPROC(debug)
18974
18975 /*
18976 * NMI is doubly nasty. It can happen _while_ we're handling
18977@@ -1360,6 +1602,9 @@ nmi_stack_correct:
18978 xorl %edx,%edx # zero error code
18979 movl %esp,%eax # pt_regs pointer
18980 call do_nmi
18981+
18982+ pax_exit_kernel
18983+
18984 jmp restore_all_notrace
18985 CFI_ENDPROC
18986
18987@@ -1396,12 +1641,15 @@ nmi_espfix_stack:
18988 FIXUP_ESPFIX_STACK # %eax == %esp
18989 xorl %edx,%edx # zero error code
18990 call do_nmi
18991+
18992+ pax_exit_kernel
18993+
18994 RESTORE_REGS
18995 lss 12+4(%esp), %esp # back to espfix stack
18996 CFI_ADJUST_CFA_OFFSET -24
18997 jmp irq_return
18998 CFI_ENDPROC
18999-END(nmi)
19000+ENDPROC(nmi)
19001
19002 ENTRY(int3)
19003 RING0_INT_FRAME
19004@@ -1414,14 +1662,14 @@ ENTRY(int3)
19005 call do_int3
19006 jmp ret_from_exception
19007 CFI_ENDPROC
19008-END(int3)
19009+ENDPROC(int3)
19010
19011 ENTRY(general_protection)
19012 RING0_EC_FRAME
19013 pushl_cfi $do_general_protection
19014 jmp error_code
19015 CFI_ENDPROC
19016-END(general_protection)
19017+ENDPROC(general_protection)
19018
19019 #ifdef CONFIG_KVM_GUEST
19020 ENTRY(async_page_fault)
19021@@ -1430,7 +1678,7 @@ ENTRY(async_page_fault)
19022 pushl_cfi $do_async_page_fault
19023 jmp error_code
19024 CFI_ENDPROC
19025-END(async_page_fault)
19026+ENDPROC(async_page_fault)
19027 #endif
19028
19029 /*
19030diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
19031index c1d01e6..1bef85a 100644
19032--- a/arch/x86/kernel/entry_64.S
19033+++ b/arch/x86/kernel/entry_64.S
19034@@ -59,6 +59,8 @@
19035 #include <asm/context_tracking.h>
19036 #include <asm/smap.h>
19037 #include <linux/err.h>
19038+#include <asm/pgtable.h>
19039+#include <asm/alternative-asm.h>
19040
19041 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
19042 #include <linux/elf-em.h>
19043@@ -80,8 +82,9 @@
19044 #ifdef CONFIG_DYNAMIC_FTRACE
19045
19046 ENTRY(function_hook)
19047+ pax_force_retaddr
19048 retq
19049-END(function_hook)
19050+ENDPROC(function_hook)
19051
19052 /* skip is set if stack has been adjusted */
19053 .macro ftrace_caller_setup skip=0
19054@@ -122,8 +125,9 @@ GLOBAL(ftrace_graph_call)
19055 #endif
19056
19057 GLOBAL(ftrace_stub)
19058+ pax_force_retaddr
19059 retq
19060-END(ftrace_caller)
19061+ENDPROC(ftrace_caller)
19062
19063 ENTRY(ftrace_regs_caller)
19064 /* Save the current flags before compare (in SS location)*/
19065@@ -191,7 +195,7 @@ ftrace_restore_flags:
19066 popfq
19067 jmp ftrace_stub
19068
19069-END(ftrace_regs_caller)
19070+ENDPROC(ftrace_regs_caller)
19071
19072
19073 #else /* ! CONFIG_DYNAMIC_FTRACE */
19074@@ -212,6 +216,7 @@ ENTRY(function_hook)
19075 #endif
19076
19077 GLOBAL(ftrace_stub)
19078+ pax_force_retaddr
19079 retq
19080
19081 trace:
19082@@ -225,12 +230,13 @@ trace:
19083 #endif
19084 subq $MCOUNT_INSN_SIZE, %rdi
19085
19086+ pax_force_fptr ftrace_trace_function
19087 call *ftrace_trace_function
19088
19089 MCOUNT_RESTORE_FRAME
19090
19091 jmp ftrace_stub
19092-END(function_hook)
19093+ENDPROC(function_hook)
19094 #endif /* CONFIG_DYNAMIC_FTRACE */
19095 #endif /* CONFIG_FUNCTION_TRACER */
19096
19097@@ -252,8 +258,9 @@ ENTRY(ftrace_graph_caller)
19098
19099 MCOUNT_RESTORE_FRAME
19100
19101+ pax_force_retaddr
19102 retq
19103-END(ftrace_graph_caller)
19104+ENDPROC(ftrace_graph_caller)
19105
19106 GLOBAL(return_to_handler)
19107 subq $24, %rsp
19108@@ -269,7 +276,9 @@ GLOBAL(return_to_handler)
19109 movq 8(%rsp), %rdx
19110 movq (%rsp), %rax
19111 addq $24, %rsp
19112+ pax_force_fptr %rdi
19113 jmp *%rdi
19114+ENDPROC(return_to_handler)
19115 #endif
19116
19117
19118@@ -284,6 +293,311 @@ ENTRY(native_usergs_sysret64)
19119 ENDPROC(native_usergs_sysret64)
19120 #endif /* CONFIG_PARAVIRT */
19121
19122+ .macro ljmpq sel, off
19123+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
19124+ .byte 0x48; ljmp *1234f(%rip)
19125+ .pushsection .rodata
19126+ .align 16
19127+ 1234: .quad \off; .word \sel
19128+ .popsection
19129+#else
19130+ pushq $\sel
19131+ pushq $\off
19132+ lretq
19133+#endif
19134+ .endm
19135+
19136+ .macro pax_enter_kernel
19137+ pax_set_fptr_mask
19138+#ifdef CONFIG_PAX_KERNEXEC
19139+ call pax_enter_kernel
19140+#endif
19141+ .endm
19142+
19143+ .macro pax_exit_kernel
19144+#ifdef CONFIG_PAX_KERNEXEC
19145+ call pax_exit_kernel
19146+#endif
19147+ .endm
19148+
19149+#ifdef CONFIG_PAX_KERNEXEC
19150+ENTRY(pax_enter_kernel)
19151+ pushq %rdi
19152+
19153+#ifdef CONFIG_PARAVIRT
19154+ PV_SAVE_REGS(CLBR_RDI)
19155+#endif
19156+
19157+ GET_CR0_INTO_RDI
19158+ bts $16,%rdi
19159+ jnc 3f
19160+ mov %cs,%edi
19161+ cmp $__KERNEL_CS,%edi
19162+ jnz 2f
19163+1:
19164+
19165+#ifdef CONFIG_PARAVIRT
19166+ PV_RESTORE_REGS(CLBR_RDI)
19167+#endif
19168+
19169+ popq %rdi
19170+ pax_force_retaddr
19171+ retq
19172+
19173+2: ljmpq __KERNEL_CS,1b
19174+3: ljmpq __KERNEXEC_KERNEL_CS,4f
19175+4: SET_RDI_INTO_CR0
19176+ jmp 1b
19177+ENDPROC(pax_enter_kernel)
19178+
19179+ENTRY(pax_exit_kernel)
19180+ pushq %rdi
19181+
19182+#ifdef CONFIG_PARAVIRT
19183+ PV_SAVE_REGS(CLBR_RDI)
19184+#endif
19185+
19186+ mov %cs,%rdi
19187+ cmp $__KERNEXEC_KERNEL_CS,%edi
19188+ jz 2f
19189+ GET_CR0_INTO_RDI
19190+ bts $16,%rdi
19191+ jnc 4f
19192+1:
19193+
19194+#ifdef CONFIG_PARAVIRT
19195+ PV_RESTORE_REGS(CLBR_RDI);
19196+#endif
19197+
19198+ popq %rdi
19199+ pax_force_retaddr
19200+ retq
19201+
19202+2: GET_CR0_INTO_RDI
19203+ btr $16,%rdi
19204+ jnc 4f
19205+ ljmpq __KERNEL_CS,3f
19206+3: SET_RDI_INTO_CR0
19207+ jmp 1b
19208+4: ud2
19209+ jmp 4b
19210+ENDPROC(pax_exit_kernel)
19211+#endif
19212+
19213+ .macro pax_enter_kernel_user
19214+ pax_set_fptr_mask
19215+#ifdef CONFIG_PAX_MEMORY_UDEREF
19216+ call pax_enter_kernel_user
19217+#endif
19218+ .endm
19219+
19220+ .macro pax_exit_kernel_user
19221+#ifdef CONFIG_PAX_MEMORY_UDEREF
19222+ call pax_exit_kernel_user
19223+#endif
19224+#ifdef CONFIG_PAX_RANDKSTACK
19225+ pushq %rax
19226+ pushq %r11
19227+ call pax_randomize_kstack
19228+ popq %r11
19229+ popq %rax
19230+#endif
19231+ .endm
19232+
19233+#ifdef CONFIG_PAX_MEMORY_UDEREF
19234+ENTRY(pax_enter_kernel_user)
19235+ pushq %rdi
19236+ pushq %rbx
19237+
19238+#ifdef CONFIG_PARAVIRT
19239+ PV_SAVE_REGS(CLBR_RDI)
19240+#endif
19241+
19242+ GET_CR3_INTO_RDI
19243+ mov %rdi,%rbx
19244+ add $__START_KERNEL_map,%rbx
19245+ sub phys_base(%rip),%rbx
19246+
19247+#ifdef CONFIG_PARAVIRT
19248+ pushq %rdi
19249+ cmpl $0, pv_info+PARAVIRT_enabled
19250+ jz 1f
19251+ i = 0
19252+ .rept USER_PGD_PTRS
19253+ mov i*8(%rbx),%rsi
19254+ mov $0,%sil
19255+ lea i*8(%rbx),%rdi
19256+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
19257+ i = i + 1
19258+ .endr
19259+ jmp 2f
19260+1:
19261+#endif
19262+
19263+ i = 0
19264+ .rept USER_PGD_PTRS
19265+ movb $0,i*8(%rbx)
19266+ i = i + 1
19267+ .endr
19268+
19269+#ifdef CONFIG_PARAVIRT
19270+2: popq %rdi
19271+#endif
19272+ SET_RDI_INTO_CR3
19273+
19274+#ifdef CONFIG_PAX_KERNEXEC
19275+ GET_CR0_INTO_RDI
19276+ bts $16,%rdi
19277+ SET_RDI_INTO_CR0
19278+#endif
19279+
19280+#ifdef CONFIG_PARAVIRT
19281+ PV_RESTORE_REGS(CLBR_RDI)
19282+#endif
19283+
19284+ popq %rbx
19285+ popq %rdi
19286+ pax_force_retaddr
19287+ retq
19288+ENDPROC(pax_enter_kernel_user)
19289+
19290+ENTRY(pax_exit_kernel_user)
19291+ pushq %rdi
19292+ pushq %rbx
19293+
19294+#ifdef CONFIG_PARAVIRT
19295+ PV_SAVE_REGS(CLBR_RDI)
19296+#endif
19297+
19298+#ifdef CONFIG_PAX_KERNEXEC
19299+ GET_CR0_INTO_RDI
19300+ btr $16,%rdi
19301+ jnc 3f
19302+ SET_RDI_INTO_CR0
19303+#endif
19304+
19305+ GET_CR3_INTO_RDI
19306+ mov %rdi,%rbx
19307+ add $__START_KERNEL_map,%rbx
19308+ sub phys_base(%rip),%rbx
19309+
19310+#ifdef CONFIG_PARAVIRT
19311+ pushq %rdi
19312+ cmpl $0, pv_info+PARAVIRT_enabled
19313+ jz 1f
19314+ i = 0
19315+ .rept USER_PGD_PTRS
19316+ mov i*8(%rbx),%rsi
19317+ mov $0x67,%sil
19318+ lea i*8(%rbx),%rdi
19319+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
19320+ i = i + 1
19321+ .endr
19322+ popq %rdi
19323+ PV_RESTORE_REGS(CLBR_RDI)
19324+ jmp 2f
19325+1:
19326+#endif
19327+
19328+ i = 0
19329+ .rept USER_PGD_PTRS
19330+ movb $0x67,i*8(%rbx)
19331+ i = i + 1
19332+ .endr
19333+
19334+#ifdef CONFIG_PARAVIRT
19335+2:
19336+#endif
19337+
19338+ popq %rbx
19339+ popq %rdi
19340+ pax_force_retaddr
19341+ retq
19342+3: ud2
19343+ jmp 3b
19344+ENDPROC(pax_exit_kernel_user)
19345+#endif
19346+
19347+ .macro pax_enter_kernel_nmi
19348+ pax_set_fptr_mask
19349+
19350+#ifdef CONFIG_PAX_KERNEXEC
19351+ GET_CR0_INTO_RDI
19352+ bts $16,%rdi
19353+ SET_RDI_INTO_CR0
19354+ jc 110f
19355+ or $2,%ebx
19356+110:
19357+#endif
19358+ .endm
19359+
19360+ .macro pax_exit_kernel_nmi
19361+#ifdef CONFIG_PAX_KERNEXEC
19362+ test $2,%ebx
19363+ jz 110f
19364+ GET_CR0_INTO_RDI
19365+ btr $16,%rdi
19366+ SET_RDI_INTO_CR0
19367+110:
19368+#endif
19369+ .endm
19370+
19371+.macro pax_erase_kstack
19372+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
19373+ call pax_erase_kstack
19374+#endif
19375+.endm
19376+
19377+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
19378+ENTRY(pax_erase_kstack)
19379+ pushq %rdi
19380+ pushq %rcx
19381+ pushq %rax
19382+ pushq %r11
19383+
19384+ GET_THREAD_INFO(%r11)
19385+ mov TI_lowest_stack(%r11), %rdi
19386+ mov $-0xBEEF, %rax
19387+ std
19388+
19389+1: mov %edi, %ecx
19390+ and $THREAD_SIZE_asm - 1, %ecx
19391+ shr $3, %ecx
19392+ repne scasq
19393+ jecxz 2f
19394+
19395+ cmp $2*8, %ecx
19396+ jc 2f
19397+
19398+ mov $2*8, %ecx
19399+ repe scasq
19400+ jecxz 2f
19401+ jne 1b
19402+
19403+2: cld
19404+ mov %esp, %ecx
19405+ sub %edi, %ecx
19406+
19407+ cmp $THREAD_SIZE_asm, %rcx
19408+ jb 3f
19409+ ud2
19410+3:
19411+
19412+ shr $3, %ecx
19413+ rep stosq
19414+
19415+ mov TI_task_thread_sp0(%r11), %rdi
19416+ sub $256, %rdi
19417+ mov %rdi, TI_lowest_stack(%r11)
19418+
19419+ popq %r11
19420+ popq %rax
19421+ popq %rcx
19422+ popq %rdi
19423+ pax_force_retaddr
19424+ ret
19425+ENDPROC(pax_erase_kstack)
19426+#endif
19427
19428 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
19429 #ifdef CONFIG_TRACE_IRQFLAGS
19430@@ -375,8 +689,8 @@ ENDPROC(native_usergs_sysret64)
19431 .endm
19432
19433 .macro UNFAKE_STACK_FRAME
19434- addq $8*6, %rsp
19435- CFI_ADJUST_CFA_OFFSET -(6*8)
19436+ addq $8*6 + ARG_SKIP, %rsp
19437+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
19438 .endm
19439
19440 /*
19441@@ -463,7 +777,7 @@ ENDPROC(native_usergs_sysret64)
19442 movq %rsp, %rsi
19443
19444 leaq -RBP(%rsp),%rdi /* arg1 for handler */
19445- testl $3, CS-RBP(%rsi)
19446+ testb $3, CS-RBP(%rsi)
19447 je 1f
19448 SWAPGS
19449 /*
19450@@ -498,9 +812,10 @@ ENTRY(save_rest)
19451 movq_cfi r15, R15+16
19452 movq %r11, 8(%rsp) /* return address */
19453 FIXUP_TOP_OF_STACK %r11, 16
19454+ pax_force_retaddr
19455 ret
19456 CFI_ENDPROC
19457-END(save_rest)
19458+ENDPROC(save_rest)
19459
19460 /* save complete stack frame */
19461 .pushsection .kprobes.text, "ax"
19462@@ -529,9 +844,10 @@ ENTRY(save_paranoid)
19463 js 1f /* negative -> in kernel */
19464 SWAPGS
19465 xorl %ebx,%ebx
19466-1: ret
19467+1: pax_force_retaddr_bts
19468+ ret
19469 CFI_ENDPROC
19470-END(save_paranoid)
19471+ENDPROC(save_paranoid)
19472 .popsection
19473
19474 /*
19475@@ -553,7 +869,7 @@ ENTRY(ret_from_fork)
19476
19477 RESTORE_REST
19478
19479- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
19480+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
19481 jz 1f
19482
19483 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
19484@@ -571,7 +887,7 @@ ENTRY(ret_from_fork)
19485 RESTORE_REST
19486 jmp int_ret_from_sys_call
19487 CFI_ENDPROC
19488-END(ret_from_fork)
19489+ENDPROC(ret_from_fork)
19490
19491 /*
19492 * System call entry. Up to 6 arguments in registers are supported.
19493@@ -608,7 +924,7 @@ END(ret_from_fork)
19494 ENTRY(system_call)
19495 CFI_STARTPROC simple
19496 CFI_SIGNAL_FRAME
19497- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
19498+ CFI_DEF_CFA rsp,0
19499 CFI_REGISTER rip,rcx
19500 /*CFI_REGISTER rflags,r11*/
19501 SWAPGS_UNSAFE_STACK
19502@@ -621,16 +937,23 @@ GLOBAL(system_call_after_swapgs)
19503
19504 movq %rsp,PER_CPU_VAR(old_rsp)
19505 movq PER_CPU_VAR(kernel_stack),%rsp
19506+ SAVE_ARGS 8*6,0
19507+ pax_enter_kernel_user
19508+
19509+#ifdef CONFIG_PAX_RANDKSTACK
19510+ pax_erase_kstack
19511+#endif
19512+
19513 /*
19514 * No need to follow this irqs off/on section - it's straight
19515 * and short:
19516 */
19517 ENABLE_INTERRUPTS(CLBR_NONE)
19518- SAVE_ARGS 8,0
19519 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
19520 movq %rcx,RIP-ARGOFFSET(%rsp)
19521 CFI_REL_OFFSET rip,RIP-ARGOFFSET
19522- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
19523+ GET_THREAD_INFO(%rcx)
19524+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
19525 jnz tracesys
19526 system_call_fastpath:
19527 #if __SYSCALL_MASK == ~0
19528@@ -640,7 +963,7 @@ system_call_fastpath:
19529 cmpl $__NR_syscall_max,%eax
19530 #endif
19531 ja badsys
19532- movq %r10,%rcx
19533+ movq R10-ARGOFFSET(%rsp),%rcx
19534 call *sys_call_table(,%rax,8) # XXX: rip relative
19535 movq %rax,RAX-ARGOFFSET(%rsp)
19536 /*
19537@@ -654,10 +977,13 @@ sysret_check:
19538 LOCKDEP_SYS_EXIT
19539 DISABLE_INTERRUPTS(CLBR_NONE)
19540 TRACE_IRQS_OFF
19541- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
19542+ GET_THREAD_INFO(%rcx)
19543+ movl TI_flags(%rcx),%edx
19544 andl %edi,%edx
19545 jnz sysret_careful
19546 CFI_REMEMBER_STATE
19547+ pax_exit_kernel_user
19548+ pax_erase_kstack
19549 /*
19550 * sysretq will re-enable interrupts:
19551 */
19552@@ -709,14 +1035,18 @@ badsys:
19553 * jump back to the normal fast path.
19554 */
19555 auditsys:
19556- movq %r10,%r9 /* 6th arg: 4th syscall arg */
19557+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
19558 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
19559 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
19560 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
19561 movq %rax,%rsi /* 2nd arg: syscall number */
19562 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
19563 call __audit_syscall_entry
19564+
19565+ pax_erase_kstack
19566+
19567 LOAD_ARGS 0 /* reload call-clobbered registers */
19568+ pax_set_fptr_mask
19569 jmp system_call_fastpath
19570
19571 /*
19572@@ -737,7 +1067,7 @@ sysret_audit:
19573 /* Do syscall tracing */
19574 tracesys:
19575 #ifdef CONFIG_AUDITSYSCALL
19576- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
19577+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
19578 jz auditsys
19579 #endif
19580 SAVE_REST
19581@@ -745,12 +1075,16 @@ tracesys:
19582 FIXUP_TOP_OF_STACK %rdi
19583 movq %rsp,%rdi
19584 call syscall_trace_enter
19585+
19586+ pax_erase_kstack
19587+
19588 /*
19589 * Reload arg registers from stack in case ptrace changed them.
19590 * We don't reload %rax because syscall_trace_enter() returned
19591 * the value it wants us to use in the table lookup.
19592 */
19593 LOAD_ARGS ARGOFFSET, 1
19594+ pax_set_fptr_mask
19595 RESTORE_REST
19596 #if __SYSCALL_MASK == ~0
19597 cmpq $__NR_syscall_max,%rax
19598@@ -759,7 +1093,7 @@ tracesys:
19599 cmpl $__NR_syscall_max,%eax
19600 #endif
19601 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
19602- movq %r10,%rcx /* fixup for C */
19603+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
19604 call *sys_call_table(,%rax,8)
19605 movq %rax,RAX-ARGOFFSET(%rsp)
19606 /* Use IRET because user could have changed frame */
19607@@ -780,7 +1114,9 @@ GLOBAL(int_with_check)
19608 andl %edi,%edx
19609 jnz int_careful
19610 andl $~TS_COMPAT,TI_status(%rcx)
19611- jmp retint_swapgs
19612+ pax_exit_kernel_user
19613+ pax_erase_kstack
19614+ jmp retint_swapgs_pax
19615
19616 /* Either reschedule or signal or syscall exit tracking needed. */
19617 /* First do a reschedule test. */
19618@@ -826,7 +1162,7 @@ int_restore_rest:
19619 TRACE_IRQS_OFF
19620 jmp int_with_check
19621 CFI_ENDPROC
19622-END(system_call)
19623+ENDPROC(system_call)
19624
19625 .macro FORK_LIKE func
19626 ENTRY(stub_\func)
19627@@ -839,9 +1175,10 @@ ENTRY(stub_\func)
19628 DEFAULT_FRAME 0 8 /* offset 8: return address */
19629 call sys_\func
19630 RESTORE_TOP_OF_STACK %r11, 8
19631+ pax_force_retaddr
19632 ret $REST_SKIP /* pop extended registers */
19633 CFI_ENDPROC
19634-END(stub_\func)
19635+ENDPROC(stub_\func)
19636 .endm
19637
19638 .macro FIXED_FRAME label,func
19639@@ -851,9 +1188,10 @@ ENTRY(\label)
19640 FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
19641 call \func
19642 RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
19643+ pax_force_retaddr
19644 ret
19645 CFI_ENDPROC
19646-END(\label)
19647+ENDPROC(\label)
19648 .endm
19649
19650 FORK_LIKE clone
19651@@ -870,9 +1208,10 @@ ENTRY(ptregscall_common)
19652 movq_cfi_restore R12+8, r12
19653 movq_cfi_restore RBP+8, rbp
19654 movq_cfi_restore RBX+8, rbx
19655+ pax_force_retaddr
19656 ret $REST_SKIP /* pop extended registers */
19657 CFI_ENDPROC
19658-END(ptregscall_common)
19659+ENDPROC(ptregscall_common)
19660
19661 ENTRY(stub_execve)
19662 CFI_STARTPROC
19663@@ -885,7 +1224,7 @@ ENTRY(stub_execve)
19664 RESTORE_REST
19665 jmp int_ret_from_sys_call
19666 CFI_ENDPROC
19667-END(stub_execve)
19668+ENDPROC(stub_execve)
19669
19670 /*
19671 * sigreturn is special because it needs to restore all registers on return.
19672@@ -902,7 +1241,7 @@ ENTRY(stub_rt_sigreturn)
19673 RESTORE_REST
19674 jmp int_ret_from_sys_call
19675 CFI_ENDPROC
19676-END(stub_rt_sigreturn)
19677+ENDPROC(stub_rt_sigreturn)
19678
19679 #ifdef CONFIG_X86_X32_ABI
19680 ENTRY(stub_x32_rt_sigreturn)
19681@@ -916,7 +1255,7 @@ ENTRY(stub_x32_rt_sigreturn)
19682 RESTORE_REST
19683 jmp int_ret_from_sys_call
19684 CFI_ENDPROC
19685-END(stub_x32_rt_sigreturn)
19686+ENDPROC(stub_x32_rt_sigreturn)
19687
19688 ENTRY(stub_x32_execve)
19689 CFI_STARTPROC
19690@@ -930,7 +1269,7 @@ ENTRY(stub_x32_execve)
19691 RESTORE_REST
19692 jmp int_ret_from_sys_call
19693 CFI_ENDPROC
19694-END(stub_x32_execve)
19695+ENDPROC(stub_x32_execve)
19696
19697 #endif
19698
19699@@ -967,7 +1306,7 @@ vector=vector+1
19700 2: jmp common_interrupt
19701 .endr
19702 CFI_ENDPROC
19703-END(irq_entries_start)
19704+ENDPROC(irq_entries_start)
19705
19706 .previous
19707 END(interrupt)
19708@@ -987,6 +1326,16 @@ END(interrupt)
19709 subq $ORIG_RAX-RBP, %rsp
19710 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
19711 SAVE_ARGS_IRQ
19712+#ifdef CONFIG_PAX_MEMORY_UDEREF
19713+ testb $3, CS(%rdi)
19714+ jnz 1f
19715+ pax_enter_kernel
19716+ jmp 2f
19717+1: pax_enter_kernel_user
19718+2:
19719+#else
19720+ pax_enter_kernel
19721+#endif
19722 call \func
19723 .endm
19724
19725@@ -1019,7 +1368,7 @@ ret_from_intr:
19726
19727 exit_intr:
19728 GET_THREAD_INFO(%rcx)
19729- testl $3,CS-ARGOFFSET(%rsp)
19730+ testb $3,CS-ARGOFFSET(%rsp)
19731 je retint_kernel
19732
19733 /* Interrupt came from user space */
19734@@ -1041,12 +1390,16 @@ retint_swapgs: /* return to user-space */
19735 * The iretq could re-enable interrupts:
19736 */
19737 DISABLE_INTERRUPTS(CLBR_ANY)
19738+ pax_exit_kernel_user
19739+retint_swapgs_pax:
19740 TRACE_IRQS_IRETQ
19741 SWAPGS
19742 jmp restore_args
19743
19744 retint_restore_args: /* return to kernel space */
19745 DISABLE_INTERRUPTS(CLBR_ANY)
19746+ pax_exit_kernel
19747+ pax_force_retaddr (RIP-ARGOFFSET)
19748 /*
19749 * The iretq could re-enable interrupts:
19750 */
19751@@ -1129,7 +1482,7 @@ ENTRY(retint_kernel)
19752 #endif
19753
19754 CFI_ENDPROC
19755-END(common_interrupt)
19756+ENDPROC(common_interrupt)
19757 /*
19758 * End of kprobes section
19759 */
19760@@ -1147,7 +1500,7 @@ ENTRY(\sym)
19761 interrupt \do_sym
19762 jmp ret_from_intr
19763 CFI_ENDPROC
19764-END(\sym)
19765+ENDPROC(\sym)
19766 .endm
19767
19768 #ifdef CONFIG_SMP
19769@@ -1203,12 +1556,22 @@ ENTRY(\sym)
19770 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
19771 call error_entry
19772 DEFAULT_FRAME 0
19773+#ifdef CONFIG_PAX_MEMORY_UDEREF
19774+ testb $3, CS(%rsp)
19775+ jnz 1f
19776+ pax_enter_kernel
19777+ jmp 2f
19778+1: pax_enter_kernel_user
19779+2:
19780+#else
19781+ pax_enter_kernel
19782+#endif
19783 movq %rsp,%rdi /* pt_regs pointer */
19784 xorl %esi,%esi /* no error code */
19785 call \do_sym
19786 jmp error_exit /* %ebx: no swapgs flag */
19787 CFI_ENDPROC
19788-END(\sym)
19789+ENDPROC(\sym)
19790 .endm
19791
19792 .macro paranoidzeroentry sym do_sym
19793@@ -1221,15 +1584,25 @@ ENTRY(\sym)
19794 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
19795 call save_paranoid
19796 TRACE_IRQS_OFF
19797+#ifdef CONFIG_PAX_MEMORY_UDEREF
19798+ testb $3, CS(%rsp)
19799+ jnz 1f
19800+ pax_enter_kernel
19801+ jmp 2f
19802+1: pax_enter_kernel_user
19803+2:
19804+#else
19805+ pax_enter_kernel
19806+#endif
19807 movq %rsp,%rdi /* pt_regs pointer */
19808 xorl %esi,%esi /* no error code */
19809 call \do_sym
19810 jmp paranoid_exit /* %ebx: no swapgs flag */
19811 CFI_ENDPROC
19812-END(\sym)
19813+ENDPROC(\sym)
19814 .endm
19815
19816-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
19817+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
19818 .macro paranoidzeroentry_ist sym do_sym ist
19819 ENTRY(\sym)
19820 INTR_FRAME
19821@@ -1240,14 +1613,30 @@ ENTRY(\sym)
19822 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
19823 call save_paranoid
19824 TRACE_IRQS_OFF_DEBUG
19825+#ifdef CONFIG_PAX_MEMORY_UDEREF
19826+ testb $3, CS(%rsp)
19827+ jnz 1f
19828+ pax_enter_kernel
19829+ jmp 2f
19830+1: pax_enter_kernel_user
19831+2:
19832+#else
19833+ pax_enter_kernel
19834+#endif
19835 movq %rsp,%rdi /* pt_regs pointer */
19836 xorl %esi,%esi /* no error code */
19837+#ifdef CONFIG_SMP
19838+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
19839+ lea init_tss(%r12), %r12
19840+#else
19841+ lea init_tss(%rip), %r12
19842+#endif
19843 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
19844 call \do_sym
19845 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
19846 jmp paranoid_exit /* %ebx: no swapgs flag */
19847 CFI_ENDPROC
19848-END(\sym)
19849+ENDPROC(\sym)
19850 .endm
19851
19852 .macro errorentry sym do_sym
19853@@ -1259,13 +1648,23 @@ ENTRY(\sym)
19854 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
19855 call error_entry
19856 DEFAULT_FRAME 0
19857+#ifdef CONFIG_PAX_MEMORY_UDEREF
19858+ testb $3, CS(%rsp)
19859+ jnz 1f
19860+ pax_enter_kernel
19861+ jmp 2f
19862+1: pax_enter_kernel_user
19863+2:
19864+#else
19865+ pax_enter_kernel
19866+#endif
19867 movq %rsp,%rdi /* pt_regs pointer */
19868 movq ORIG_RAX(%rsp),%rsi /* get error code */
19869 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
19870 call \do_sym
19871 jmp error_exit /* %ebx: no swapgs flag */
19872 CFI_ENDPROC
19873-END(\sym)
19874+ENDPROC(\sym)
19875 .endm
19876
19877 /* error code is on the stack already */
19878@@ -1279,13 +1678,23 @@ ENTRY(\sym)
19879 call save_paranoid
19880 DEFAULT_FRAME 0
19881 TRACE_IRQS_OFF
19882+#ifdef CONFIG_PAX_MEMORY_UDEREF
19883+ testb $3, CS(%rsp)
19884+ jnz 1f
19885+ pax_enter_kernel
19886+ jmp 2f
19887+1: pax_enter_kernel_user
19888+2:
19889+#else
19890+ pax_enter_kernel
19891+#endif
19892 movq %rsp,%rdi /* pt_regs pointer */
19893 movq ORIG_RAX(%rsp),%rsi /* get error code */
19894 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
19895 call \do_sym
19896 jmp paranoid_exit /* %ebx: no swapgs flag */
19897 CFI_ENDPROC
19898-END(\sym)
19899+ENDPROC(\sym)
19900 .endm
19901
19902 zeroentry divide_error do_divide_error
19903@@ -1315,9 +1724,10 @@ gs_change:
19904 2: mfence /* workaround */
19905 SWAPGS
19906 popfq_cfi
19907+ pax_force_retaddr
19908 ret
19909 CFI_ENDPROC
19910-END(native_load_gs_index)
19911+ENDPROC(native_load_gs_index)
19912
19913 _ASM_EXTABLE(gs_change,bad_gs)
19914 .section .fixup,"ax"
19915@@ -1345,9 +1755,10 @@ ENTRY(call_softirq)
19916 CFI_DEF_CFA_REGISTER rsp
19917 CFI_ADJUST_CFA_OFFSET -8
19918 decl PER_CPU_VAR(irq_count)
19919+ pax_force_retaddr
19920 ret
19921 CFI_ENDPROC
19922-END(call_softirq)
19923+ENDPROC(call_softirq)
19924
19925 #ifdef CONFIG_XEN
19926 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
19927@@ -1385,7 +1796,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
19928 decl PER_CPU_VAR(irq_count)
19929 jmp error_exit
19930 CFI_ENDPROC
19931-END(xen_do_hypervisor_callback)
19932+ENDPROC(xen_do_hypervisor_callback)
19933
19934 /*
19935 * Hypervisor uses this for application faults while it executes.
19936@@ -1444,7 +1855,7 @@ ENTRY(xen_failsafe_callback)
19937 SAVE_ALL
19938 jmp error_exit
19939 CFI_ENDPROC
19940-END(xen_failsafe_callback)
19941+ENDPROC(xen_failsafe_callback)
19942
19943 apicinterrupt HYPERVISOR_CALLBACK_VECTOR \
19944 xen_hvm_callback_vector xen_evtchn_do_upcall
19945@@ -1498,16 +1909,31 @@ ENTRY(paranoid_exit)
19946 TRACE_IRQS_OFF_DEBUG
19947 testl %ebx,%ebx /* swapgs needed? */
19948 jnz paranoid_restore
19949- testl $3,CS(%rsp)
19950+ testb $3,CS(%rsp)
19951 jnz paranoid_userspace
19952+#ifdef CONFIG_PAX_MEMORY_UDEREF
19953+ pax_exit_kernel
19954+ TRACE_IRQS_IRETQ 0
19955+ SWAPGS_UNSAFE_STACK
19956+ RESTORE_ALL 8
19957+ pax_force_retaddr_bts
19958+ jmp irq_return
19959+#endif
19960 paranoid_swapgs:
19961+#ifdef CONFIG_PAX_MEMORY_UDEREF
19962+ pax_exit_kernel_user
19963+#else
19964+ pax_exit_kernel
19965+#endif
19966 TRACE_IRQS_IRETQ 0
19967 SWAPGS_UNSAFE_STACK
19968 RESTORE_ALL 8
19969 jmp irq_return
19970 paranoid_restore:
19971+ pax_exit_kernel
19972 TRACE_IRQS_IRETQ_DEBUG 0
19973 RESTORE_ALL 8
19974+ pax_force_retaddr_bts
19975 jmp irq_return
19976 paranoid_userspace:
19977 GET_THREAD_INFO(%rcx)
19978@@ -1536,7 +1962,7 @@ paranoid_schedule:
19979 TRACE_IRQS_OFF
19980 jmp paranoid_userspace
19981 CFI_ENDPROC
19982-END(paranoid_exit)
19983+ENDPROC(paranoid_exit)
19984
19985 /*
19986 * Exception entry point. This expects an error code/orig_rax on the stack.
19987@@ -1563,12 +1989,13 @@ ENTRY(error_entry)
19988 movq_cfi r14, R14+8
19989 movq_cfi r15, R15+8
19990 xorl %ebx,%ebx
19991- testl $3,CS+8(%rsp)
19992+ testb $3,CS+8(%rsp)
19993 je error_kernelspace
19994 error_swapgs:
19995 SWAPGS
19996 error_sti:
19997 TRACE_IRQS_OFF
19998+ pax_force_retaddr_bts
19999 ret
20000
20001 /*
20002@@ -1595,7 +2022,7 @@ bstep_iret:
20003 movq %rcx,RIP+8(%rsp)
20004 jmp error_swapgs
20005 CFI_ENDPROC
20006-END(error_entry)
20007+ENDPROC(error_entry)
20008
20009
20010 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
20011@@ -1615,7 +2042,7 @@ ENTRY(error_exit)
20012 jnz retint_careful
20013 jmp retint_swapgs
20014 CFI_ENDPROC
20015-END(error_exit)
20016+ENDPROC(error_exit)
20017
20018 /*
20019 * Test if a given stack is an NMI stack or not.
20020@@ -1673,9 +2100,11 @@ ENTRY(nmi)
20021 * If %cs was not the kernel segment, then the NMI triggered in user
20022 * space, which means it is definitely not nested.
20023 */
20024+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
20025+ je 1f
20026 cmpl $__KERNEL_CS, 16(%rsp)
20027 jne first_nmi
20028-
20029+1:
20030 /*
20031 * Check the special variable on the stack to see if NMIs are
20032 * executing.
20033@@ -1709,8 +2138,7 @@ nested_nmi:
20034
20035 1:
20036 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
20037- leaq -1*8(%rsp), %rdx
20038- movq %rdx, %rsp
20039+ subq $8, %rsp
20040 CFI_ADJUST_CFA_OFFSET 1*8
20041 leaq -10*8(%rsp), %rdx
20042 pushq_cfi $__KERNEL_DS
20043@@ -1728,6 +2156,7 @@ nested_nmi_out:
20044 CFI_RESTORE rdx
20045
20046 /* No need to check faults here */
20047+# pax_force_retaddr_bts
20048 INTERRUPT_RETURN
20049
20050 CFI_RESTORE_STATE
20051@@ -1844,6 +2273,8 @@ end_repeat_nmi:
20052 */
20053 movq %cr2, %r12
20054
20055+ pax_enter_kernel_nmi
20056+
20057 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
20058 movq %rsp,%rdi
20059 movq $-1,%rsi
20060@@ -1856,26 +2287,31 @@ end_repeat_nmi:
20061 movq %r12, %cr2
20062 1:
20063
20064- testl %ebx,%ebx /* swapgs needed? */
20065+ testl $1,%ebx /* swapgs needed? */
20066 jnz nmi_restore
20067 nmi_swapgs:
20068 SWAPGS_UNSAFE_STACK
20069 nmi_restore:
20070+ pax_exit_kernel_nmi
20071 /* Pop the extra iret frame at once */
20072 RESTORE_ALL 6*8
20073+ testb $3, 8(%rsp)
20074+ jnz 1f
20075+ pax_force_retaddr_bts
20076+1:
20077
20078 /* Clear the NMI executing stack variable */
20079 movq $0, 5*8(%rsp)
20080 jmp irq_return
20081 CFI_ENDPROC
20082-END(nmi)
20083+ENDPROC(nmi)
20084
20085 ENTRY(ignore_sysret)
20086 CFI_STARTPROC
20087 mov $-ENOSYS,%eax
20088 sysret
20089 CFI_ENDPROC
20090-END(ignore_sysret)
20091+ENDPROC(ignore_sysret)
20092
20093 /*
20094 * End of kprobes section
20095diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
20096index 42a392a..fbbd930 100644
20097--- a/arch/x86/kernel/ftrace.c
20098+++ b/arch/x86/kernel/ftrace.c
20099@@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
20100 {
20101 unsigned char replaced[MCOUNT_INSN_SIZE];
20102
20103+ ip = ktla_ktva(ip);
20104+
20105 /*
20106 * Note: Due to modules and __init, code can
20107 * disappear and change, we need to protect against faulting
20108@@ -227,7 +229,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
20109 unsigned char old[MCOUNT_INSN_SIZE], *new;
20110 int ret;
20111
20112- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
20113+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
20114 new = ftrace_call_replace(ip, (unsigned long)func);
20115
20116 /* See comment above by declaration of modifying_ftrace_code */
20117@@ -238,7 +240,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
20118 /* Also update the regs callback function */
20119 if (!ret) {
20120 ip = (unsigned long)(&ftrace_regs_call);
20121- memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE);
20122+ memcpy(old, ktla_ktva((void *)&ftrace_regs_call), MCOUNT_INSN_SIZE);
20123 new = ftrace_call_replace(ip, (unsigned long)func);
20124 ret = ftrace_modify_code(ip, old, new);
20125 }
20126@@ -279,7 +281,7 @@ static int ftrace_write(unsigned long ip, const char *val, int size)
20127 * kernel identity mapping to modify code.
20128 */
20129 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
20130- ip = (unsigned long)__va(__pa_symbol(ip));
20131+ ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
20132
20133 return probe_kernel_write((void *)ip, val, size);
20134 }
20135@@ -289,7 +291,7 @@ static int add_break(unsigned long ip, const char *old)
20136 unsigned char replaced[MCOUNT_INSN_SIZE];
20137 unsigned char brk = BREAKPOINT_INSTRUCTION;
20138
20139- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
20140+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
20141 return -EFAULT;
20142
20143 /* Make sure it is what we expect it to be */
20144@@ -637,7 +639,7 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
20145 return ret;
20146
20147 fail_update:
20148- probe_kernel_write((void *)ip, &old_code[0], 1);
20149+ probe_kernel_write((void *)ktla_ktva(ip), &old_code[0], 1);
20150 goto out;
20151 }
20152
20153@@ -670,6 +672,8 @@ static int ftrace_mod_jmp(unsigned long ip,
20154 {
20155 unsigned char code[MCOUNT_INSN_SIZE];
20156
20157+ ip = ktla_ktva(ip);
20158+
20159 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
20160 return -EFAULT;
20161
20162diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
20163index 8f3201d..aa860bf 100644
20164--- a/arch/x86/kernel/head64.c
20165+++ b/arch/x86/kernel/head64.c
20166@@ -175,7 +175,6 @@ void __init x86_64_start_kernel(char * real_mode_data)
20167 if (console_loglevel == 10)
20168 early_printk("Kernel alive\n");
20169
20170- clear_page(init_level4_pgt);
20171 /* set init_level4_pgt kernel high mapping*/
20172 init_level4_pgt[511] = early_level4_pgt[511];
20173
20174diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
20175index 73afd11..d1670f5 100644
20176--- a/arch/x86/kernel/head_32.S
20177+++ b/arch/x86/kernel/head_32.S
20178@@ -26,6 +26,12 @@
20179 /* Physical address */
20180 #define pa(X) ((X) - __PAGE_OFFSET)
20181
20182+#ifdef CONFIG_PAX_KERNEXEC
20183+#define ta(X) (X)
20184+#else
20185+#define ta(X) ((X) - __PAGE_OFFSET)
20186+#endif
20187+
20188 /*
20189 * References to members of the new_cpu_data structure.
20190 */
20191@@ -55,11 +61,7 @@
20192 * and small than max_low_pfn, otherwise will waste some page table entries
20193 */
20194
20195-#if PTRS_PER_PMD > 1
20196-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
20197-#else
20198-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
20199-#endif
20200+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
20201
20202 /* Number of possible pages in the lowmem region */
20203 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
20204@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
20205 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
20206
20207 /*
20208+ * Real beginning of normal "text" segment
20209+ */
20210+ENTRY(stext)
20211+ENTRY(_stext)
20212+
20213+/*
20214 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
20215 * %esi points to the real-mode code as a 32-bit pointer.
20216 * CS and DS must be 4 GB flat segments, but we don't depend on
20217@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
20218 * can.
20219 */
20220 __HEAD
20221+
20222+#ifdef CONFIG_PAX_KERNEXEC
20223+ jmp startup_32
20224+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
20225+.fill PAGE_SIZE-5,1,0xcc
20226+#endif
20227+
20228 ENTRY(startup_32)
20229 movl pa(stack_start),%ecx
20230
20231@@ -106,6 +121,59 @@ ENTRY(startup_32)
20232 2:
20233 leal -__PAGE_OFFSET(%ecx),%esp
20234
20235+#ifdef CONFIG_SMP
20236+ movl $pa(cpu_gdt_table),%edi
20237+ movl $__per_cpu_load,%eax
20238+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
20239+ rorl $16,%eax
20240+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
20241+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
20242+ movl $__per_cpu_end - 1,%eax
20243+ subl $__per_cpu_start,%eax
20244+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
20245+#endif
20246+
20247+#ifdef CONFIG_PAX_MEMORY_UDEREF
20248+ movl $NR_CPUS,%ecx
20249+ movl $pa(cpu_gdt_table),%edi
20250+1:
20251+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
20252+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
20253+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
20254+ addl $PAGE_SIZE_asm,%edi
20255+ loop 1b
20256+#endif
20257+
20258+#ifdef CONFIG_PAX_KERNEXEC
20259+ movl $pa(boot_gdt),%edi
20260+ movl $__LOAD_PHYSICAL_ADDR,%eax
20261+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
20262+ rorl $16,%eax
20263+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
20264+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
20265+ rorl $16,%eax
20266+
20267+ ljmp $(__BOOT_CS),$1f
20268+1:
20269+
20270+ movl $NR_CPUS,%ecx
20271+ movl $pa(cpu_gdt_table),%edi
20272+ addl $__PAGE_OFFSET,%eax
20273+1:
20274+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
20275+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
20276+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
20277+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
20278+ rorl $16,%eax
20279+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
20280+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
20281+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
20282+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
20283+ rorl $16,%eax
20284+ addl $PAGE_SIZE_asm,%edi
20285+ loop 1b
20286+#endif
20287+
20288 /*
20289 * Clear BSS first so that there are no surprises...
20290 */
20291@@ -201,8 +269,11 @@ ENTRY(startup_32)
20292 movl %eax, pa(max_pfn_mapped)
20293
20294 /* Do early initialization of the fixmap area */
20295- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
20296- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
20297+#ifdef CONFIG_COMPAT_VDSO
20298+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
20299+#else
20300+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
20301+#endif
20302 #else /* Not PAE */
20303
20304 page_pde_offset = (__PAGE_OFFSET >> 20);
20305@@ -232,8 +303,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
20306 movl %eax, pa(max_pfn_mapped)
20307
20308 /* Do early initialization of the fixmap area */
20309- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
20310- movl %eax,pa(initial_page_table+0xffc)
20311+#ifdef CONFIG_COMPAT_VDSO
20312+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
20313+#else
20314+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
20315+#endif
20316 #endif
20317
20318 #ifdef CONFIG_PARAVIRT
20319@@ -247,9 +321,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
20320 cmpl $num_subarch_entries, %eax
20321 jae bad_subarch
20322
20323- movl pa(subarch_entries)(,%eax,4), %eax
20324- subl $__PAGE_OFFSET, %eax
20325- jmp *%eax
20326+ jmp *pa(subarch_entries)(,%eax,4)
20327
20328 bad_subarch:
20329 WEAK(lguest_entry)
20330@@ -261,10 +333,10 @@ WEAK(xen_entry)
20331 __INITDATA
20332
20333 subarch_entries:
20334- .long default_entry /* normal x86/PC */
20335- .long lguest_entry /* lguest hypervisor */
20336- .long xen_entry /* Xen hypervisor */
20337- .long default_entry /* Moorestown MID */
20338+ .long ta(default_entry) /* normal x86/PC */
20339+ .long ta(lguest_entry) /* lguest hypervisor */
20340+ .long ta(xen_entry) /* Xen hypervisor */
20341+ .long ta(default_entry) /* Moorestown MID */
20342 num_subarch_entries = (. - subarch_entries) / 4
20343 .previous
20344 #else
20345@@ -355,6 +427,7 @@ default_entry:
20346 movl pa(mmu_cr4_features),%eax
20347 movl %eax,%cr4
20348
20349+#ifdef CONFIG_X86_PAE
20350 testb $X86_CR4_PAE, %al # check if PAE is enabled
20351 jz enable_paging
20352
20353@@ -383,6 +456,9 @@ default_entry:
20354 /* Make changes effective */
20355 wrmsr
20356
20357+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
20358+#endif
20359+
20360 enable_paging:
20361
20362 /*
20363@@ -451,14 +527,20 @@ is486:
20364 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
20365 movl %eax,%ss # after changing gdt.
20366
20367- movl $(__USER_DS),%eax # DS/ES contains default USER segment
20368+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
20369 movl %eax,%ds
20370 movl %eax,%es
20371
20372 movl $(__KERNEL_PERCPU), %eax
20373 movl %eax,%fs # set this cpu's percpu
20374
20375+#ifdef CONFIG_CC_STACKPROTECTOR
20376 movl $(__KERNEL_STACK_CANARY),%eax
20377+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
20378+ movl $(__USER_DS),%eax
20379+#else
20380+ xorl %eax,%eax
20381+#endif
20382 movl %eax,%gs
20383
20384 xorl %eax,%eax # Clear LDT
20385@@ -534,8 +616,11 @@ setup_once:
20386 * relocation. Manually set base address in stack canary
20387 * segment descriptor.
20388 */
20389- movl $gdt_page,%eax
20390+ movl $cpu_gdt_table,%eax
20391 movl $stack_canary,%ecx
20392+#ifdef CONFIG_SMP
20393+ addl $__per_cpu_load,%ecx
20394+#endif
20395 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
20396 shrl $16, %ecx
20397 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
20398@@ -566,7 +651,7 @@ ENDPROC(early_idt_handlers)
20399 /* This is global to keep gas from relaxing the jumps */
20400 ENTRY(early_idt_handler)
20401 cld
20402- cmpl $2,%ss:early_recursion_flag
20403+ cmpl $1,%ss:early_recursion_flag
20404 je hlt_loop
20405 incl %ss:early_recursion_flag
20406
20407@@ -604,8 +689,8 @@ ENTRY(early_idt_handler)
20408 pushl (20+6*4)(%esp) /* trapno */
20409 pushl $fault_msg
20410 call printk
20411-#endif
20412 call dump_stack
20413+#endif
20414 hlt_loop:
20415 hlt
20416 jmp hlt_loop
20417@@ -624,8 +709,11 @@ ENDPROC(early_idt_handler)
20418 /* This is the default interrupt "handler" :-) */
20419 ALIGN
20420 ignore_int:
20421- cld
20422 #ifdef CONFIG_PRINTK
20423+ cmpl $2,%ss:early_recursion_flag
20424+ je hlt_loop
20425+ incl %ss:early_recursion_flag
20426+ cld
20427 pushl %eax
20428 pushl %ecx
20429 pushl %edx
20430@@ -634,9 +722,6 @@ ignore_int:
20431 movl $(__KERNEL_DS),%eax
20432 movl %eax,%ds
20433 movl %eax,%es
20434- cmpl $2,early_recursion_flag
20435- je hlt_loop
20436- incl early_recursion_flag
20437 pushl 16(%esp)
20438 pushl 24(%esp)
20439 pushl 32(%esp)
20440@@ -670,29 +755,43 @@ ENTRY(setup_once_ref)
20441 /*
20442 * BSS section
20443 */
20444-__PAGE_ALIGNED_BSS
20445- .align PAGE_SIZE
20446 #ifdef CONFIG_X86_PAE
20447+.section .initial_pg_pmd,"a",@progbits
20448 initial_pg_pmd:
20449 .fill 1024*KPMDS,4,0
20450 #else
20451+.section .initial_page_table,"a",@progbits
20452 ENTRY(initial_page_table)
20453 .fill 1024,4,0
20454 #endif
20455+.section .initial_pg_fixmap,"a",@progbits
20456 initial_pg_fixmap:
20457 .fill 1024,4,0
20458+.section .empty_zero_page,"a",@progbits
20459 ENTRY(empty_zero_page)
20460 .fill 4096,1,0
20461+.section .swapper_pg_dir,"a",@progbits
20462 ENTRY(swapper_pg_dir)
20463+#ifdef CONFIG_X86_PAE
20464+ .fill 4,8,0
20465+#else
20466 .fill 1024,4,0
20467+#endif
20468+
20469+/*
20470+ * The IDT has to be page-aligned to simplify the Pentium
20471+ * F0 0F bug workaround.. We have a special link segment
20472+ * for this.
20473+ */
20474+.section .idt,"a",@progbits
20475+ENTRY(idt_table)
20476+ .fill 256,8,0
20477
20478 /*
20479 * This starts the data section.
20480 */
20481 #ifdef CONFIG_X86_PAE
20482-__PAGE_ALIGNED_DATA
20483- /* Page-aligned for the benefit of paravirt? */
20484- .align PAGE_SIZE
20485+.section .initial_page_table,"a",@progbits
20486 ENTRY(initial_page_table)
20487 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
20488 # if KPMDS == 3
20489@@ -711,12 +810,20 @@ ENTRY(initial_page_table)
20490 # error "Kernel PMDs should be 1, 2 or 3"
20491 # endif
20492 .align PAGE_SIZE /* needs to be page-sized too */
20493+
20494+#ifdef CONFIG_PAX_PER_CPU_PGD
20495+ENTRY(cpu_pgd)
20496+ .rept NR_CPUS
20497+ .fill 4,8,0
20498+ .endr
20499+#endif
20500+
20501 #endif
20502
20503 .data
20504 .balign 4
20505 ENTRY(stack_start)
20506- .long init_thread_union+THREAD_SIZE
20507+ .long init_thread_union+THREAD_SIZE-8
20508
20509 __INITRODATA
20510 int_msg:
20511@@ -744,7 +851,7 @@ fault_msg:
20512 * segment size, and 32-bit linear address value:
20513 */
20514
20515- .data
20516+.section .rodata,"a",@progbits
20517 .globl boot_gdt_descr
20518 .globl idt_descr
20519
20520@@ -753,7 +860,7 @@ fault_msg:
20521 .word 0 # 32 bit align gdt_desc.address
20522 boot_gdt_descr:
20523 .word __BOOT_DS+7
20524- .long boot_gdt - __PAGE_OFFSET
20525+ .long pa(boot_gdt)
20526
20527 .word 0 # 32-bit align idt_desc.address
20528 idt_descr:
20529@@ -764,7 +871,7 @@ idt_descr:
20530 .word 0 # 32 bit align gdt_desc.address
20531 ENTRY(early_gdt_descr)
20532 .word GDT_ENTRIES*8-1
20533- .long gdt_page /* Overwritten for secondary CPUs */
20534+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
20535
20536 /*
20537 * The boot_gdt must mirror the equivalent in setup.S and is
20538@@ -773,5 +880,65 @@ ENTRY(early_gdt_descr)
20539 .align L1_CACHE_BYTES
20540 ENTRY(boot_gdt)
20541 .fill GDT_ENTRY_BOOT_CS,8,0
20542- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
20543- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
20544+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
20545+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
20546+
20547+ .align PAGE_SIZE_asm
20548+ENTRY(cpu_gdt_table)
20549+ .rept NR_CPUS
20550+ .quad 0x0000000000000000 /* NULL descriptor */
20551+ .quad 0x0000000000000000 /* 0x0b reserved */
20552+ .quad 0x0000000000000000 /* 0x13 reserved */
20553+ .quad 0x0000000000000000 /* 0x1b reserved */
20554+
20555+#ifdef CONFIG_PAX_KERNEXEC
20556+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
20557+#else
20558+ .quad 0x0000000000000000 /* 0x20 unused */
20559+#endif
20560+
20561+ .quad 0x0000000000000000 /* 0x28 unused */
20562+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
20563+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
20564+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
20565+ .quad 0x0000000000000000 /* 0x4b reserved */
20566+ .quad 0x0000000000000000 /* 0x53 reserved */
20567+ .quad 0x0000000000000000 /* 0x5b reserved */
20568+
20569+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
20570+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
20571+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
20572+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
20573+
20574+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
20575+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
20576+
20577+ /*
20578+ * Segments used for calling PnP BIOS have byte granularity.
20579+ * The code segments and data segments have fixed 64k limits,
20580+ * the transfer segment sizes are set at run time.
20581+ */
20582+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
20583+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
20584+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
20585+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
20586+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
20587+
20588+ /*
20589+ * The APM segments have byte granularity and their bases
20590+ * are set at run time. All have 64k limits.
20591+ */
20592+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
20593+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
20594+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
20595+
20596+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
20597+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
20598+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
20599+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
20600+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
20601+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
20602+
20603+ /* Be sure this is zeroed to avoid false validations in Xen */
20604+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
20605+ .endr
20606diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
20607index 321d65e..e9437f7 100644
20608--- a/arch/x86/kernel/head_64.S
20609+++ b/arch/x86/kernel/head_64.S
20610@@ -20,6 +20,8 @@
20611 #include <asm/processor-flags.h>
20612 #include <asm/percpu.h>
20613 #include <asm/nops.h>
20614+#include <asm/cpufeature.h>
20615+#include <asm/alternative-asm.h>
20616
20617 #ifdef CONFIG_PARAVIRT
20618 #include <asm/asm-offsets.h>
20619@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
20620 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
20621 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
20622 L3_START_KERNEL = pud_index(__START_KERNEL_map)
20623+L4_VMALLOC_START = pgd_index(VMALLOC_START)
20624+L3_VMALLOC_START = pud_index(VMALLOC_START)
20625+L4_VMALLOC_END = pgd_index(VMALLOC_END)
20626+L3_VMALLOC_END = pud_index(VMALLOC_END)
20627+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
20628+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
20629
20630 .text
20631 __HEAD
20632@@ -89,11 +97,15 @@ startup_64:
20633 * Fixup the physical addresses in the page table
20634 */
20635 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
20636+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
20637+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
20638+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
20639
20640 addq %rbp, level3_kernel_pgt + (510*8)(%rip)
20641 addq %rbp, level3_kernel_pgt + (511*8)(%rip)
20642
20643 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
20644+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
20645
20646 /*
20647 * Set up the identity mapping for the switchover. These
20648@@ -177,8 +189,8 @@ ENTRY(secondary_startup_64)
20649 movq $(init_level4_pgt - __START_KERNEL_map), %rax
20650 1:
20651
20652- /* Enable PAE mode and PGE */
20653- movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
20654+ /* Enable PAE mode and PSE/PGE */
20655+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
20656 movq %rcx, %cr4
20657
20658 /* Setup early boot stage 4 level pagetables. */
20659@@ -199,10 +211,18 @@ ENTRY(secondary_startup_64)
20660 movl $MSR_EFER, %ecx
20661 rdmsr
20662 btsl $_EFER_SCE, %eax /* Enable System Call */
20663- btl $20,%edi /* No Execute supported? */
20664+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
20665 jnc 1f
20666 btsl $_EFER_NX, %eax
20667 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
20668+ leaq init_level4_pgt(%rip), %rdi
20669+#ifndef CONFIG_EFI
20670+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
20671+#endif
20672+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
20673+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
20674+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
20675+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
20676 1: wrmsr /* Make changes effective */
20677
20678 /* Setup cr0 */
20679@@ -282,6 +302,7 @@ ENTRY(secondary_startup_64)
20680 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
20681 * address given in m16:64.
20682 */
20683+ pax_set_fptr_mask
20684 movq initial_code(%rip),%rax
20685 pushq $0 # fake return address to stop unwinder
20686 pushq $__KERNEL_CS # set correct cs
20687@@ -388,7 +409,7 @@ ENTRY(early_idt_handler)
20688 call dump_stack
20689 #ifdef CONFIG_KALLSYMS
20690 leaq early_idt_ripmsg(%rip),%rdi
20691- movq 40(%rsp),%rsi # %rip again
20692+ movq 88(%rsp),%rsi # %rip again
20693 call __print_symbol
20694 #endif
20695 #endif /* EARLY_PRINTK */
20696@@ -416,6 +437,7 @@ ENDPROC(early_idt_handler)
20697 early_recursion_flag:
20698 .long 0
20699
20700+ .section .rodata,"a",@progbits
20701 #ifdef CONFIG_EARLY_PRINTK
20702 early_idt_msg:
20703 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
20704@@ -445,27 +467,50 @@ NEXT_PAGE(early_dynamic_pgts)
20705
20706 .data
20707
20708-#ifndef CONFIG_XEN
20709 NEXT_PAGE(init_level4_pgt)
20710- .fill 512,8,0
20711-#else
20712-NEXT_PAGE(init_level4_pgt)
20713- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
20714 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
20715 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
20716+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
20717+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
20718+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
20719+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
20720+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
20721+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
20722 .org init_level4_pgt + L4_START_KERNEL*8, 0
20723 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
20724 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
20725
20726+#ifdef CONFIG_PAX_PER_CPU_PGD
20727+NEXT_PAGE(cpu_pgd)
20728+ .rept NR_CPUS
20729+ .fill 512,8,0
20730+ .endr
20731+#endif
20732+
20733 NEXT_PAGE(level3_ident_pgt)
20734 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
20735+#ifdef CONFIG_XEN
20736 .fill 511, 8, 0
20737+#else
20738+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
20739+ .fill 510,8,0
20740+#endif
20741+
20742+NEXT_PAGE(level3_vmalloc_start_pgt)
20743+ .fill 512,8,0
20744+
20745+NEXT_PAGE(level3_vmalloc_end_pgt)
20746+ .fill 512,8,0
20747+
20748+NEXT_PAGE(level3_vmemmap_pgt)
20749+ .fill L3_VMEMMAP_START,8,0
20750+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
20751+
20752 NEXT_PAGE(level2_ident_pgt)
20753- /* Since I easily can, map the first 1G.
20754+ /* Since I easily can, map the first 2G.
20755 * Don't set NX because code runs from these pages.
20756 */
20757- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
20758-#endif
20759+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
20760
20761 NEXT_PAGE(level3_kernel_pgt)
20762 .fill L3_START_KERNEL,8,0
20763@@ -473,6 +518,9 @@ NEXT_PAGE(level3_kernel_pgt)
20764 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
20765 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
20766
20767+NEXT_PAGE(level2_vmemmap_pgt)
20768+ .fill 512,8,0
20769+
20770 NEXT_PAGE(level2_kernel_pgt)
20771 /*
20772 * 512 MB kernel mapping. We spend a full page on this pagetable
20773@@ -488,38 +536,64 @@ NEXT_PAGE(level2_kernel_pgt)
20774 KERNEL_IMAGE_SIZE/PMD_SIZE)
20775
20776 NEXT_PAGE(level2_fixmap_pgt)
20777- .fill 506,8,0
20778- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
20779- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
20780- .fill 5,8,0
20781+ .fill 507,8,0
20782+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
20783+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
20784+ .fill 4,8,0
20785
20786-NEXT_PAGE(level1_fixmap_pgt)
20787+NEXT_PAGE(level1_vsyscall_pgt)
20788 .fill 512,8,0
20789
20790 #undef PMDS
20791
20792- .data
20793+ .align PAGE_SIZE
20794+ENTRY(cpu_gdt_table)
20795+ .rept NR_CPUS
20796+ .quad 0x0000000000000000 /* NULL descriptor */
20797+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
20798+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
20799+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
20800+ .quad 0x00cffb000000ffff /* __USER32_CS */
20801+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
20802+ .quad 0x00affb000000ffff /* __USER_CS */
20803+
20804+#ifdef CONFIG_PAX_KERNEXEC
20805+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
20806+#else
20807+ .quad 0x0 /* unused */
20808+#endif
20809+
20810+ .quad 0,0 /* TSS */
20811+ .quad 0,0 /* LDT */
20812+ .quad 0,0,0 /* three TLS descriptors */
20813+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
20814+ /* asm/segment.h:GDT_ENTRIES must match this */
20815+
20816+ /* zero the remaining page */
20817+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
20818+ .endr
20819+
20820 .align 16
20821 .globl early_gdt_descr
20822 early_gdt_descr:
20823 .word GDT_ENTRIES*8-1
20824 early_gdt_descr_base:
20825- .quad INIT_PER_CPU_VAR(gdt_page)
20826+ .quad cpu_gdt_table
20827
20828 ENTRY(phys_base)
20829 /* This must match the first entry in level2_kernel_pgt */
20830 .quad 0x0000000000000000
20831
20832 #include "../../x86/xen/xen-head.S"
20833-
20834- .section .bss, "aw", @nobits
20835+
20836+ .section .rodata,"a",@progbits
20837 .align L1_CACHE_BYTES
20838 ENTRY(idt_table)
20839- .skip IDT_ENTRIES * 16
20840+ .fill 512,8,0
20841
20842 .align L1_CACHE_BYTES
20843 ENTRY(nmi_idt_table)
20844- .skip IDT_ENTRIES * 16
20845+ .fill 512,8,0
20846
20847 __PAGE_ALIGNED_BSS
20848 NEXT_PAGE(empty_zero_page)
20849diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
20850index 0fa6912..37fce70 100644
20851--- a/arch/x86/kernel/i386_ksyms_32.c
20852+++ b/arch/x86/kernel/i386_ksyms_32.c
20853@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
20854 EXPORT_SYMBOL(cmpxchg8b_emu);
20855 #endif
20856
20857+EXPORT_SYMBOL_GPL(cpu_gdt_table);
20858+
20859 /* Networking helper routines. */
20860 EXPORT_SYMBOL(csum_partial_copy_generic);
20861+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
20862+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
20863
20864 EXPORT_SYMBOL(__get_user_1);
20865 EXPORT_SYMBOL(__get_user_2);
20866@@ -37,3 +41,7 @@ EXPORT_SYMBOL(strstr);
20867
20868 EXPORT_SYMBOL(csum_partial);
20869 EXPORT_SYMBOL(empty_zero_page);
20870+
20871+#ifdef CONFIG_PAX_KERNEXEC
20872+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
20873+#endif
20874diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
20875index cb33909..1163b40 100644
20876--- a/arch/x86/kernel/i387.c
20877+++ b/arch/x86/kernel/i387.c
20878@@ -51,7 +51,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
20879 static inline bool interrupted_user_mode(void)
20880 {
20881 struct pt_regs *regs = get_irq_regs();
20882- return regs && user_mode_vm(regs);
20883+ return regs && user_mode(regs);
20884 }
20885
20886 /*
20887diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
20888index 9a5c460..84868423 100644
20889--- a/arch/x86/kernel/i8259.c
20890+++ b/arch/x86/kernel/i8259.c
20891@@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
20892 static void make_8259A_irq(unsigned int irq)
20893 {
20894 disable_irq_nosync(irq);
20895- io_apic_irqs &= ~(1<<irq);
20896+ io_apic_irqs &= ~(1UL<<irq);
20897 irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
20898 i8259A_chip.name);
20899 enable_irq(irq);
20900@@ -209,7 +209,7 @@ spurious_8259A_irq:
20901 "spurious 8259A interrupt: IRQ%d.\n", irq);
20902 spurious_irq_mask |= irqmask;
20903 }
20904- atomic_inc(&irq_err_count);
20905+ atomic_inc_unchecked(&irq_err_count);
20906 /*
20907 * Theoretically we do not have to handle this IRQ,
20908 * but in Linux this does not cause problems and is
20909@@ -333,14 +333,16 @@ static void init_8259A(int auto_eoi)
20910 /* (slave's support for AEOI in flat mode is to be investigated) */
20911 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
20912
20913+ pax_open_kernel();
20914 if (auto_eoi)
20915 /*
20916 * In AEOI mode we just have to mask the interrupt
20917 * when acking.
20918 */
20919- i8259A_chip.irq_mask_ack = disable_8259A_irq;
20920+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
20921 else
20922- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
20923+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
20924+ pax_close_kernel();
20925
20926 udelay(100); /* wait for 8259A to initialize */
20927
20928diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
20929index a979b5b..1d6db75 100644
20930--- a/arch/x86/kernel/io_delay.c
20931+++ b/arch/x86/kernel/io_delay.c
20932@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
20933 * Quirk table for systems that misbehave (lock up, etc.) if port
20934 * 0x80 is used:
20935 */
20936-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
20937+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
20938 {
20939 .callback = dmi_io_delay_0xed_port,
20940 .ident = "Compaq Presario V6000",
20941diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
20942index 4ddaf66..6292f4e 100644
20943--- a/arch/x86/kernel/ioport.c
20944+++ b/arch/x86/kernel/ioport.c
20945@@ -6,6 +6,7 @@
20946 #include <linux/sched.h>
20947 #include <linux/kernel.h>
20948 #include <linux/capability.h>
20949+#include <linux/security.h>
20950 #include <linux/errno.h>
20951 #include <linux/types.h>
20952 #include <linux/ioport.h>
20953@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
20954
20955 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
20956 return -EINVAL;
20957+#ifdef CONFIG_GRKERNSEC_IO
20958+ if (turn_on && grsec_disable_privio) {
20959+ gr_handle_ioperm();
20960+ return -EPERM;
20961+ }
20962+#endif
20963 if (turn_on && !capable(CAP_SYS_RAWIO))
20964 return -EPERM;
20965
20966@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
20967 * because the ->io_bitmap_max value must match the bitmap
20968 * contents:
20969 */
20970- tss = &per_cpu(init_tss, get_cpu());
20971+ tss = init_tss + get_cpu();
20972
20973 if (turn_on)
20974 bitmap_clear(t->io_bitmap_ptr, from, num);
20975@@ -103,6 +110,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
20976 return -EINVAL;
20977 /* Trying to gain more privileges? */
20978 if (level > old) {
20979+#ifdef CONFIG_GRKERNSEC_IO
20980+ if (grsec_disable_privio) {
20981+ gr_handle_iopl();
20982+ return -EPERM;
20983+ }
20984+#endif
20985 if (!capable(CAP_SYS_RAWIO))
20986 return -EPERM;
20987 }
20988diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
20989index 84b7789..e65e8be 100644
20990--- a/arch/x86/kernel/irq.c
20991+++ b/arch/x86/kernel/irq.c
20992@@ -18,7 +18,7 @@
20993 #include <asm/mce.h>
20994 #include <asm/hw_irq.h>
20995
20996-atomic_t irq_err_count;
20997+atomic_unchecked_t irq_err_count;
20998
20999 /* Function pointer for generic interrupt vector handling */
21000 void (*x86_platform_ipi_callback)(void) = NULL;
21001@@ -122,9 +122,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
21002 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
21003 seq_printf(p, " Machine check polls\n");
21004 #endif
21005- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
21006+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
21007 #if defined(CONFIG_X86_IO_APIC)
21008- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
21009+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
21010 #endif
21011 return 0;
21012 }
21013@@ -164,7 +164,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
21014
21015 u64 arch_irq_stat(void)
21016 {
21017- u64 sum = atomic_read(&irq_err_count);
21018+ u64 sum = atomic_read_unchecked(&irq_err_count);
21019 return sum;
21020 }
21021
21022diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
21023index 344faf8..355f60d 100644
21024--- a/arch/x86/kernel/irq_32.c
21025+++ b/arch/x86/kernel/irq_32.c
21026@@ -39,7 +39,7 @@ static int check_stack_overflow(void)
21027 __asm__ __volatile__("andl %%esp,%0" :
21028 "=r" (sp) : "0" (THREAD_SIZE - 1));
21029
21030- return sp < (sizeof(struct thread_info) + STACK_WARN);
21031+ return sp < STACK_WARN;
21032 }
21033
21034 static void print_stack_overflow(void)
21035@@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
21036 * per-CPU IRQ handling contexts (thread information and stack)
21037 */
21038 union irq_ctx {
21039- struct thread_info tinfo;
21040- u32 stack[THREAD_SIZE/sizeof(u32)];
21041+ unsigned long previous_esp;
21042+ u32 stack[THREAD_SIZE/sizeof(u32)];
21043 } __attribute__((aligned(THREAD_SIZE)));
21044
21045 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
21046@@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
21047 static inline int
21048 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
21049 {
21050- union irq_ctx *curctx, *irqctx;
21051+ union irq_ctx *irqctx;
21052 u32 *isp, arg1, arg2;
21053
21054- curctx = (union irq_ctx *) current_thread_info();
21055 irqctx = __this_cpu_read(hardirq_ctx);
21056
21057 /*
21058@@ -92,16 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
21059 * handler) we can't do that and just have to keep using the
21060 * current stack (which is the irq stack already after all)
21061 */
21062- if (unlikely(curctx == irqctx))
21063+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
21064 return 0;
21065
21066 /* build the stack frame on the IRQ stack */
21067- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
21068- irqctx->tinfo.task = curctx->tinfo.task;
21069- irqctx->tinfo.previous_esp = current_stack_pointer;
21070+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
21071+ irqctx->previous_esp = current_stack_pointer;
21072
21073- /* Copy the preempt_count so that the [soft]irq checks work. */
21074- irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
21075+#ifdef CONFIG_PAX_MEMORY_UDEREF
21076+ __set_fs(MAKE_MM_SEG(0));
21077+#endif
21078
21079 if (unlikely(overflow))
21080 call_on_stack(print_stack_overflow, isp);
21081@@ -113,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
21082 : "0" (irq), "1" (desc), "2" (isp),
21083 "D" (desc->handle_irq)
21084 : "memory", "cc", "ecx");
21085+
21086+#ifdef CONFIG_PAX_MEMORY_UDEREF
21087+ __set_fs(current_thread_info()->addr_limit);
21088+#endif
21089+
21090 return 1;
21091 }
21092
21093@@ -121,29 +125,14 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
21094 */
21095 void __cpuinit irq_ctx_init(int cpu)
21096 {
21097- union irq_ctx *irqctx;
21098-
21099 if (per_cpu(hardirq_ctx, cpu))
21100 return;
21101
21102- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
21103- THREADINFO_GFP,
21104- THREAD_SIZE_ORDER));
21105- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
21106- irqctx->tinfo.cpu = cpu;
21107- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
21108- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
21109-
21110- per_cpu(hardirq_ctx, cpu) = irqctx;
21111-
21112- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
21113- THREADINFO_GFP,
21114- THREAD_SIZE_ORDER));
21115- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
21116- irqctx->tinfo.cpu = cpu;
21117- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
21118-
21119- per_cpu(softirq_ctx, cpu) = irqctx;
21120+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
21121+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
21122+
21123+ printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
21124+ cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
21125
21126 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
21127 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
21128@@ -152,7 +141,6 @@ void __cpuinit irq_ctx_init(int cpu)
21129 asmlinkage void do_softirq(void)
21130 {
21131 unsigned long flags;
21132- struct thread_info *curctx;
21133 union irq_ctx *irqctx;
21134 u32 *isp;
21135
21136@@ -162,15 +150,22 @@ asmlinkage void do_softirq(void)
21137 local_irq_save(flags);
21138
21139 if (local_softirq_pending()) {
21140- curctx = current_thread_info();
21141 irqctx = __this_cpu_read(softirq_ctx);
21142- irqctx->tinfo.task = curctx->task;
21143- irqctx->tinfo.previous_esp = current_stack_pointer;
21144+ irqctx->previous_esp = current_stack_pointer;
21145
21146 /* build the stack frame on the softirq stack */
21147- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
21148+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
21149+
21150+#ifdef CONFIG_PAX_MEMORY_UDEREF
21151+ __set_fs(MAKE_MM_SEG(0));
21152+#endif
21153
21154 call_on_stack(__do_softirq, isp);
21155+
21156+#ifdef CONFIG_PAX_MEMORY_UDEREF
21157+ __set_fs(current_thread_info()->addr_limit);
21158+#endif
21159+
21160 /*
21161 * Shouldn't happen, we returned above if in_interrupt():
21162 */
21163@@ -191,7 +186,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
21164 if (unlikely(!desc))
21165 return false;
21166
21167- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
21168+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
21169 if (unlikely(overflow))
21170 print_stack_overflow();
21171 desc->handle_irq(irq, desc);
21172diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
21173index d04d3ec..ea4b374 100644
21174--- a/arch/x86/kernel/irq_64.c
21175+++ b/arch/x86/kernel/irq_64.c
21176@@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
21177 u64 estack_top, estack_bottom;
21178 u64 curbase = (u64)task_stack_page(current);
21179
21180- if (user_mode_vm(regs))
21181+ if (user_mode(regs))
21182 return;
21183
21184 if (regs->sp >= curbase + sizeof(struct thread_info) +
21185diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
21186index dc1404b..bbc43e7 100644
21187--- a/arch/x86/kernel/kdebugfs.c
21188+++ b/arch/x86/kernel/kdebugfs.c
21189@@ -27,7 +27,7 @@ struct setup_data_node {
21190 u32 len;
21191 };
21192
21193-static ssize_t setup_data_read(struct file *file, char __user *user_buf,
21194+static ssize_t __size_overflow(3) setup_data_read(struct file *file, char __user *user_buf,
21195 size_t count, loff_t *ppos)
21196 {
21197 struct setup_data_node *node = file->private_data;
21198diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
21199index 836f832..a8bda67 100644
21200--- a/arch/x86/kernel/kgdb.c
21201+++ b/arch/x86/kernel/kgdb.c
21202@@ -127,11 +127,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
21203 #ifdef CONFIG_X86_32
21204 switch (regno) {
21205 case GDB_SS:
21206- if (!user_mode_vm(regs))
21207+ if (!user_mode(regs))
21208 *(unsigned long *)mem = __KERNEL_DS;
21209 break;
21210 case GDB_SP:
21211- if (!user_mode_vm(regs))
21212+ if (!user_mode(regs))
21213 *(unsigned long *)mem = kernel_stack_pointer(regs);
21214 break;
21215 case GDB_GS:
21216@@ -229,7 +229,10 @@ static void kgdb_correct_hw_break(void)
21217 bp->attr.bp_addr = breakinfo[breakno].addr;
21218 bp->attr.bp_len = breakinfo[breakno].len;
21219 bp->attr.bp_type = breakinfo[breakno].type;
21220- info->address = breakinfo[breakno].addr;
21221+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
21222+ info->address = ktla_ktva(breakinfo[breakno].addr);
21223+ else
21224+ info->address = breakinfo[breakno].addr;
21225 info->len = breakinfo[breakno].len;
21226 info->type = breakinfo[breakno].type;
21227 val = arch_install_hw_breakpoint(bp);
21228@@ -476,12 +479,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
21229 case 'k':
21230 /* clear the trace bit */
21231 linux_regs->flags &= ~X86_EFLAGS_TF;
21232- atomic_set(&kgdb_cpu_doing_single_step, -1);
21233+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
21234
21235 /* set the trace bit if we're stepping */
21236 if (remcomInBuffer[0] == 's') {
21237 linux_regs->flags |= X86_EFLAGS_TF;
21238- atomic_set(&kgdb_cpu_doing_single_step,
21239+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
21240 raw_smp_processor_id());
21241 }
21242
21243@@ -546,7 +549,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
21244
21245 switch (cmd) {
21246 case DIE_DEBUG:
21247- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
21248+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
21249 if (user_mode(regs))
21250 return single_step_cont(regs, args);
21251 break;
21252@@ -751,11 +754,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
21253 #endif /* CONFIG_DEBUG_RODATA */
21254
21255 bpt->type = BP_BREAKPOINT;
21256- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
21257+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
21258 BREAK_INSTR_SIZE);
21259 if (err)
21260 return err;
21261- err = probe_kernel_write((char *)bpt->bpt_addr,
21262+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
21263 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
21264 #ifdef CONFIG_DEBUG_RODATA
21265 if (!err)
21266@@ -768,7 +771,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
21267 return -EBUSY;
21268 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
21269 BREAK_INSTR_SIZE);
21270- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
21271+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
21272 if (err)
21273 return err;
21274 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
21275@@ -793,13 +796,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
21276 if (mutex_is_locked(&text_mutex))
21277 goto knl_write;
21278 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
21279- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
21280+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
21281 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
21282 goto knl_write;
21283 return err;
21284 knl_write:
21285 #endif /* CONFIG_DEBUG_RODATA */
21286- return probe_kernel_write((char *)bpt->bpt_addr,
21287+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
21288 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
21289 }
21290
21291diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
21292index 7bfe318..383d238 100644
21293--- a/arch/x86/kernel/kprobes/core.c
21294+++ b/arch/x86/kernel/kprobes/core.c
21295@@ -119,9 +119,12 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
21296 s32 raddr;
21297 } __packed *insn;
21298
21299- insn = (struct __arch_relative_insn *)from;
21300+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
21301+
21302+ pax_open_kernel();
21303 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
21304 insn->op = op;
21305+ pax_close_kernel();
21306 }
21307
21308 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
21309@@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t *opcodes)
21310 kprobe_opcode_t opcode;
21311 kprobe_opcode_t *orig_opcodes = opcodes;
21312
21313- if (search_exception_tables((unsigned long)opcodes))
21314+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
21315 return 0; /* Page fault may occur on this address. */
21316
21317 retry:
21318@@ -238,9 +241,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
21319 * for the first byte, we can recover the original instruction
21320 * from it and kp->opcode.
21321 */
21322- memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
21323+ memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
21324 buf[0] = kp->opcode;
21325- return (unsigned long)buf;
21326+ return ktva_ktla((unsigned long)buf);
21327 }
21328
21329 /*
21330@@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
21331 /* Another subsystem puts a breakpoint, failed to recover */
21332 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
21333 return 0;
21334+ pax_open_kernel();
21335 memcpy(dest, insn.kaddr, insn.length);
21336+ pax_close_kernel();
21337
21338 #ifdef CONFIG_X86_64
21339 if (insn_rip_relative(&insn)) {
21340@@ -355,7 +360,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
21341 newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
21342 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
21343 disp = (u8 *) dest + insn_offset_displacement(&insn);
21344+ pax_open_kernel();
21345 *(s32 *) disp = (s32) newdisp;
21346+ pax_close_kernel();
21347 }
21348 #endif
21349 return insn.length;
21350@@ -488,7 +495,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
21351 * nor set current_kprobe, because it doesn't use single
21352 * stepping.
21353 */
21354- regs->ip = (unsigned long)p->ainsn.insn;
21355+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
21356 preempt_enable_no_resched();
21357 return;
21358 }
21359@@ -505,9 +512,9 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
21360 regs->flags &= ~X86_EFLAGS_IF;
21361 /* single step inline if the instruction is an int3 */
21362 if (p->opcode == BREAKPOINT_INSTRUCTION)
21363- regs->ip = (unsigned long)p->addr;
21364+ regs->ip = ktla_ktva((unsigned long)p->addr);
21365 else
21366- regs->ip = (unsigned long)p->ainsn.insn;
21367+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
21368 }
21369
21370 /*
21371@@ -586,7 +593,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
21372 setup_singlestep(p, regs, kcb, 0);
21373 return 1;
21374 }
21375- } else if (*addr != BREAKPOINT_INSTRUCTION) {
21376+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
21377 /*
21378 * The breakpoint instruction was removed right
21379 * after we hit it. Another cpu has removed
21380@@ -632,6 +639,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
21381 " movq %rax, 152(%rsp)\n"
21382 RESTORE_REGS_STRING
21383 " popfq\n"
21384+#ifdef KERNEXEC_PLUGIN
21385+ " btsq $63,(%rsp)\n"
21386+#endif
21387 #else
21388 " pushf\n"
21389 SAVE_REGS_STRING
21390@@ -769,7 +779,7 @@ static void __kprobes
21391 resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
21392 {
21393 unsigned long *tos = stack_addr(regs);
21394- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
21395+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
21396 unsigned long orig_ip = (unsigned long)p->addr;
21397 kprobe_opcode_t *insn = p->ainsn.insn;
21398
21399@@ -951,7 +961,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
21400 struct die_args *args = data;
21401 int ret = NOTIFY_DONE;
21402
21403- if (args->regs && user_mode_vm(args->regs))
21404+ if (args->regs && user_mode(args->regs))
21405 return ret;
21406
21407 switch (val) {
21408diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
21409index 76dc6f0..66bdfc3 100644
21410--- a/arch/x86/kernel/kprobes/opt.c
21411+++ b/arch/x86/kernel/kprobes/opt.c
21412@@ -79,6 +79,7 @@ found:
21413 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
21414 static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
21415 {
21416+ pax_open_kernel();
21417 #ifdef CONFIG_X86_64
21418 *addr++ = 0x48;
21419 *addr++ = 0xbf;
21420@@ -86,6 +87,7 @@ static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long v
21421 *addr++ = 0xb8;
21422 #endif
21423 *(unsigned long *)addr = val;
21424+ pax_close_kernel();
21425 }
21426
21427 static void __used __kprobes kprobes_optinsn_template_holder(void)
21428@@ -338,7 +340,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
21429 * Verify if the address gap is in 2GB range, because this uses
21430 * a relative jump.
21431 */
21432- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
21433+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
21434 if (abs(rel) > 0x7fffffff)
21435 return -ERANGE;
21436
21437@@ -353,16 +355,18 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
21438 op->optinsn.size = ret;
21439
21440 /* Copy arch-dep-instance from template */
21441- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
21442+ pax_open_kernel();
21443+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
21444+ pax_close_kernel();
21445
21446 /* Set probe information */
21447 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
21448
21449 /* Set probe function call */
21450- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
21451+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
21452
21453 /* Set returning jmp instruction at the tail of out-of-line buffer */
21454- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
21455+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
21456 (u8 *)op->kp.addr + op->optinsn.size);
21457
21458 flush_icache_range((unsigned long) buf,
21459@@ -385,7 +389,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
21460 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
21461
21462 /* Backup instructions which will be replaced by jump address */
21463- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
21464+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
21465 RELATIVE_ADDR_SIZE);
21466
21467 insn_buf[0] = RELATIVEJUMP_OPCODE;
21468@@ -483,7 +487,7 @@ setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
21469 /* This kprobe is really able to run optimized path. */
21470 op = container_of(p, struct optimized_kprobe, kp);
21471 /* Detour through copied instructions */
21472- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
21473+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
21474 if (!reenter)
21475 reset_current_kprobe();
21476 preempt_enable_no_resched();
21477diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
21478index b686a90..60d36fb 100644
21479--- a/arch/x86/kernel/kvm.c
21480+++ b/arch/x86/kernel/kvm.c
21481@@ -453,7 +453,7 @@ static int __cpuinit kvm_cpu_notify(struct notifier_block *self,
21482 return NOTIFY_OK;
21483 }
21484
21485-static struct notifier_block __cpuinitdata kvm_cpu_notifier = {
21486+static struct notifier_block kvm_cpu_notifier = {
21487 .notifier_call = kvm_cpu_notify,
21488 };
21489 #endif
21490diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
21491index ebc9873..1b9724b 100644
21492--- a/arch/x86/kernel/ldt.c
21493+++ b/arch/x86/kernel/ldt.c
21494@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
21495 if (reload) {
21496 #ifdef CONFIG_SMP
21497 preempt_disable();
21498- load_LDT(pc);
21499+ load_LDT_nolock(pc);
21500 if (!cpumask_equal(mm_cpumask(current->mm),
21501 cpumask_of(smp_processor_id())))
21502 smp_call_function(flush_ldt, current->mm, 1);
21503 preempt_enable();
21504 #else
21505- load_LDT(pc);
21506+ load_LDT_nolock(pc);
21507 #endif
21508 }
21509 if (oldsize) {
21510@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
21511 return err;
21512
21513 for (i = 0; i < old->size; i++)
21514- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
21515+ write_ldt_entry(new->ldt, i, old->ldt + i);
21516 return 0;
21517 }
21518
21519@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
21520 retval = copy_ldt(&mm->context, &old_mm->context);
21521 mutex_unlock(&old_mm->context.lock);
21522 }
21523+
21524+ if (tsk == current) {
21525+ mm->context.vdso = 0;
21526+
21527+#ifdef CONFIG_X86_32
21528+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21529+ mm->context.user_cs_base = 0UL;
21530+ mm->context.user_cs_limit = ~0UL;
21531+
21532+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
21533+ cpus_clear(mm->context.cpu_user_cs_mask);
21534+#endif
21535+
21536+#endif
21537+#endif
21538+
21539+ }
21540+
21541 return retval;
21542 }
21543
21544@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
21545 }
21546 }
21547
21548+#ifdef CONFIG_PAX_SEGMEXEC
21549+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
21550+ error = -EINVAL;
21551+ goto out_unlock;
21552+ }
21553+#endif
21554+
21555 fill_ldt(&ldt, &ldt_info);
21556 if (oldmode)
21557 ldt.avl = 0;
21558diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
21559index 5b19e4d..6476a76 100644
21560--- a/arch/x86/kernel/machine_kexec_32.c
21561+++ b/arch/x86/kernel/machine_kexec_32.c
21562@@ -26,7 +26,7 @@
21563 #include <asm/cacheflush.h>
21564 #include <asm/debugreg.h>
21565
21566-static void set_idt(void *newidt, __u16 limit)
21567+static void set_idt(struct desc_struct *newidt, __u16 limit)
21568 {
21569 struct desc_ptr curidt;
21570
21571@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
21572 }
21573
21574
21575-static void set_gdt(void *newgdt, __u16 limit)
21576+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
21577 {
21578 struct desc_ptr curgdt;
21579
21580@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
21581 }
21582
21583 control_page = page_address(image->control_code_page);
21584- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
21585+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
21586
21587 relocate_kernel_ptr = control_page;
21588 page_list[PA_CONTROL_PAGE] = __pa(control_page);
21589diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
21590index 22db92b..d546bec 100644
21591--- a/arch/x86/kernel/microcode_core.c
21592+++ b/arch/x86/kernel/microcode_core.c
21593@@ -513,7 +513,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
21594 return NOTIFY_OK;
21595 }
21596
21597-static struct notifier_block __refdata mc_cpu_notifier = {
21598+static struct notifier_block mc_cpu_notifier = {
21599 .notifier_call = mc_cpu_callback,
21600 };
21601
21602diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
21603index 5fb2ceb..3ae90bb 100644
21604--- a/arch/x86/kernel/microcode_intel.c
21605+++ b/arch/x86/kernel/microcode_intel.c
21606@@ -293,13 +293,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
21607
21608 static int get_ucode_user(void *to, const void *from, size_t n)
21609 {
21610- return copy_from_user(to, from, n);
21611+ return copy_from_user(to, (const void __force_user *)from, n);
21612 }
21613
21614 static enum ucode_state
21615 request_microcode_user(int cpu, const void __user *buf, size_t size)
21616 {
21617- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
21618+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
21619 }
21620
21621 static void microcode_fini_cpu(int cpu)
21622diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
21623index 216a4d7..228255a 100644
21624--- a/arch/x86/kernel/module.c
21625+++ b/arch/x86/kernel/module.c
21626@@ -43,15 +43,60 @@ do { \
21627 } while (0)
21628 #endif
21629
21630-void *module_alloc(unsigned long size)
21631+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
21632 {
21633- if (PAGE_ALIGN(size) > MODULES_LEN)
21634+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
21635 return NULL;
21636 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
21637- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
21638+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
21639 -1, __builtin_return_address(0));
21640 }
21641
21642+void *module_alloc(unsigned long size)
21643+{
21644+
21645+#ifdef CONFIG_PAX_KERNEXEC
21646+ return __module_alloc(size, PAGE_KERNEL);
21647+#else
21648+ return __module_alloc(size, PAGE_KERNEL_EXEC);
21649+#endif
21650+
21651+}
21652+
21653+#ifdef CONFIG_PAX_KERNEXEC
21654+#ifdef CONFIG_X86_32
21655+void *module_alloc_exec(unsigned long size)
21656+{
21657+ struct vm_struct *area;
21658+
21659+ if (size == 0)
21660+ return NULL;
21661+
21662+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
21663+ return area ? area->addr : NULL;
21664+}
21665+EXPORT_SYMBOL(module_alloc_exec);
21666+
21667+void module_free_exec(struct module *mod, void *module_region)
21668+{
21669+ vunmap(module_region);
21670+}
21671+EXPORT_SYMBOL(module_free_exec);
21672+#else
21673+void module_free_exec(struct module *mod, void *module_region)
21674+{
21675+ module_free(mod, module_region);
21676+}
21677+EXPORT_SYMBOL(module_free_exec);
21678+
21679+void *module_alloc_exec(unsigned long size)
21680+{
21681+ return __module_alloc(size, PAGE_KERNEL_RX);
21682+}
21683+EXPORT_SYMBOL(module_alloc_exec);
21684+#endif
21685+#endif
21686+
21687 #ifdef CONFIG_X86_32
21688 int apply_relocate(Elf32_Shdr *sechdrs,
21689 const char *strtab,
21690@@ -62,14 +107,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
21691 unsigned int i;
21692 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
21693 Elf32_Sym *sym;
21694- uint32_t *location;
21695+ uint32_t *plocation, location;
21696
21697 DEBUGP("Applying relocate section %u to %u\n",
21698 relsec, sechdrs[relsec].sh_info);
21699 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
21700 /* This is where to make the change */
21701- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
21702- + rel[i].r_offset;
21703+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
21704+ location = (uint32_t)plocation;
21705+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
21706+ plocation = ktla_ktva((void *)plocation);
21707 /* This is the symbol it is referring to. Note that all
21708 undefined symbols have been resolved. */
21709 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
21710@@ -78,11 +125,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
21711 switch (ELF32_R_TYPE(rel[i].r_info)) {
21712 case R_386_32:
21713 /* We add the value into the location given */
21714- *location += sym->st_value;
21715+ pax_open_kernel();
21716+ *plocation += sym->st_value;
21717+ pax_close_kernel();
21718 break;
21719 case R_386_PC32:
21720 /* Add the value, subtract its position */
21721- *location += sym->st_value - (uint32_t)location;
21722+ pax_open_kernel();
21723+ *plocation += sym->st_value - location;
21724+ pax_close_kernel();
21725 break;
21726 default:
21727 pr_err("%s: Unknown relocation: %u\n",
21728@@ -127,21 +178,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
21729 case R_X86_64_NONE:
21730 break;
21731 case R_X86_64_64:
21732+ pax_open_kernel();
21733 *(u64 *)loc = val;
21734+ pax_close_kernel();
21735 break;
21736 case R_X86_64_32:
21737+ pax_open_kernel();
21738 *(u32 *)loc = val;
21739+ pax_close_kernel();
21740 if (val != *(u32 *)loc)
21741 goto overflow;
21742 break;
21743 case R_X86_64_32S:
21744+ pax_open_kernel();
21745 *(s32 *)loc = val;
21746+ pax_close_kernel();
21747 if ((s64)val != *(s32 *)loc)
21748 goto overflow;
21749 break;
21750 case R_X86_64_PC32:
21751 val -= (u64)loc;
21752+ pax_open_kernel();
21753 *(u32 *)loc = val;
21754+ pax_close_kernel();
21755+
21756 #if 0
21757 if ((s64)val != *(s32 *)loc)
21758 goto overflow;
21759diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
21760index ce13049..e2e9c3c 100644
21761--- a/arch/x86/kernel/msr.c
21762+++ b/arch/x86/kernel/msr.c
21763@@ -233,7 +233,7 @@ static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb,
21764 return notifier_from_errno(err);
21765 }
21766
21767-static struct notifier_block __refdata msr_class_cpu_notifier = {
21768+static struct notifier_block msr_class_cpu_notifier = {
21769 .notifier_call = msr_class_cpu_callback,
21770 };
21771
21772diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
21773index 6030805..2d33f21 100644
21774--- a/arch/x86/kernel/nmi.c
21775+++ b/arch/x86/kernel/nmi.c
21776@@ -105,7 +105,7 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2
21777 return handled;
21778 }
21779
21780-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
21781+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
21782 {
21783 struct nmi_desc *desc = nmi_to_desc(type);
21784 unsigned long flags;
21785@@ -129,9 +129,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
21786 * event confuses some handlers (kdump uses this flag)
21787 */
21788 if (action->flags & NMI_FLAG_FIRST)
21789- list_add_rcu(&action->list, &desc->head);
21790+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
21791 else
21792- list_add_tail_rcu(&action->list, &desc->head);
21793+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
21794
21795 spin_unlock_irqrestore(&desc->lock, flags);
21796 return 0;
21797@@ -154,7 +154,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
21798 if (!strcmp(n->name, name)) {
21799 WARN(in_nmi(),
21800 "Trying to free NMI (%s) from NMI context!\n", n->name);
21801- list_del_rcu(&n->list);
21802+ pax_list_del_rcu((struct list_head *)&n->list);
21803 break;
21804 }
21805 }
21806@@ -479,6 +479,17 @@ static inline void nmi_nesting_postprocess(void)
21807 dotraplinkage notrace __kprobes void
21808 do_nmi(struct pt_regs *regs, long error_code)
21809 {
21810+
21811+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21812+ if (!user_mode(regs)) {
21813+ unsigned long cs = regs->cs & 0xFFFF;
21814+ unsigned long ip = ktva_ktla(regs->ip);
21815+
21816+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
21817+ regs->ip = ip;
21818+ }
21819+#endif
21820+
21821 nmi_nesting_preprocess(regs);
21822
21823 nmi_enter();
21824diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
21825index 6d9582e..f746287 100644
21826--- a/arch/x86/kernel/nmi_selftest.c
21827+++ b/arch/x86/kernel/nmi_selftest.c
21828@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
21829 {
21830 /* trap all the unknown NMIs we may generate */
21831 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
21832- __initdata);
21833+ __initconst);
21834 }
21835
21836 static void __init cleanup_nmi_testsuite(void)
21837@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
21838 unsigned long timeout;
21839
21840 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
21841- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
21842+ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
21843 nmi_fail = FAILURE;
21844 return;
21845 }
21846diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
21847index 676b8c7..870ba04 100644
21848--- a/arch/x86/kernel/paravirt-spinlocks.c
21849+++ b/arch/x86/kernel/paravirt-spinlocks.c
21850@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
21851 arch_spin_lock(lock);
21852 }
21853
21854-struct pv_lock_ops pv_lock_ops = {
21855+struct pv_lock_ops pv_lock_ops __read_only = {
21856 #ifdef CONFIG_SMP
21857 .spin_is_locked = __ticket_spin_is_locked,
21858 .spin_is_contended = __ticket_spin_is_contended,
21859diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
21860index 8bfb335..c1463c6 100644
21861--- a/arch/x86/kernel/paravirt.c
21862+++ b/arch/x86/kernel/paravirt.c
21863@@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
21864 {
21865 return x;
21866 }
21867+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
21868+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
21869+#endif
21870
21871 void __init default_banner(void)
21872 {
21873@@ -147,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
21874 if (opfunc == NULL)
21875 /* If there's no function, patch it with a ud2a (BUG) */
21876 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
21877- else if (opfunc == _paravirt_nop)
21878+ else if (opfunc == (void *)_paravirt_nop)
21879 /* If the operation is a nop, then nop the callsite */
21880 ret = paravirt_patch_nop();
21881
21882 /* identity functions just return their single argument */
21883- else if (opfunc == _paravirt_ident_32)
21884+ else if (opfunc == (void *)_paravirt_ident_32)
21885 ret = paravirt_patch_ident_32(insnbuf, len);
21886- else if (opfunc == _paravirt_ident_64)
21887+ else if (opfunc == (void *)_paravirt_ident_64)
21888 ret = paravirt_patch_ident_64(insnbuf, len);
21889+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
21890+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
21891+ ret = paravirt_patch_ident_64(insnbuf, len);
21892+#endif
21893
21894 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
21895 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
21896@@ -180,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
21897 if (insn_len > len || start == NULL)
21898 insn_len = len;
21899 else
21900- memcpy(insnbuf, start, insn_len);
21901+ memcpy(insnbuf, ktla_ktva(start), insn_len);
21902
21903 return insn_len;
21904 }
21905@@ -304,7 +311,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
21906 return this_cpu_read(paravirt_lazy_mode);
21907 }
21908
21909-struct pv_info pv_info = {
21910+struct pv_info pv_info __read_only = {
21911 .name = "bare hardware",
21912 .paravirt_enabled = 0,
21913 .kernel_rpl = 0,
21914@@ -315,16 +322,16 @@ struct pv_info pv_info = {
21915 #endif
21916 };
21917
21918-struct pv_init_ops pv_init_ops = {
21919+struct pv_init_ops pv_init_ops __read_only = {
21920 .patch = native_patch,
21921 };
21922
21923-struct pv_time_ops pv_time_ops = {
21924+struct pv_time_ops pv_time_ops __read_only = {
21925 .sched_clock = native_sched_clock,
21926 .steal_clock = native_steal_clock,
21927 };
21928
21929-struct pv_irq_ops pv_irq_ops = {
21930+struct pv_irq_ops pv_irq_ops __read_only = {
21931 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
21932 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
21933 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
21934@@ -336,7 +343,7 @@ struct pv_irq_ops pv_irq_ops = {
21935 #endif
21936 };
21937
21938-struct pv_cpu_ops pv_cpu_ops = {
21939+struct pv_cpu_ops pv_cpu_ops __read_only = {
21940 .cpuid = native_cpuid,
21941 .get_debugreg = native_get_debugreg,
21942 .set_debugreg = native_set_debugreg,
21943@@ -395,21 +402,26 @@ struct pv_cpu_ops pv_cpu_ops = {
21944 .end_context_switch = paravirt_nop,
21945 };
21946
21947-struct pv_apic_ops pv_apic_ops = {
21948+struct pv_apic_ops pv_apic_ops __read_only= {
21949 #ifdef CONFIG_X86_LOCAL_APIC
21950 .startup_ipi_hook = paravirt_nop,
21951 #endif
21952 };
21953
21954-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
21955+#ifdef CONFIG_X86_32
21956+#ifdef CONFIG_X86_PAE
21957+/* 64-bit pagetable entries */
21958+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
21959+#else
21960 /* 32-bit pagetable entries */
21961 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
21962+#endif
21963 #else
21964 /* 64-bit pagetable entries */
21965 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
21966 #endif
21967
21968-struct pv_mmu_ops pv_mmu_ops = {
21969+struct pv_mmu_ops pv_mmu_ops __read_only = {
21970
21971 .read_cr2 = native_read_cr2,
21972 .write_cr2 = native_write_cr2,
21973@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
21974 .make_pud = PTE_IDENT,
21975
21976 .set_pgd = native_set_pgd,
21977+ .set_pgd_batched = native_set_pgd_batched,
21978 #endif
21979 #endif /* PAGETABLE_LEVELS >= 3 */
21980
21981@@ -479,6 +492,12 @@ struct pv_mmu_ops pv_mmu_ops = {
21982 },
21983
21984 .set_fixmap = native_set_fixmap,
21985+
21986+#ifdef CONFIG_PAX_KERNEXEC
21987+ .pax_open_kernel = native_pax_open_kernel,
21988+ .pax_close_kernel = native_pax_close_kernel,
21989+#endif
21990+
21991 };
21992
21993 EXPORT_SYMBOL_GPL(pv_time_ops);
21994diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
21995index 299d493..2ccb0ee 100644
21996--- a/arch/x86/kernel/pci-calgary_64.c
21997+++ b/arch/x86/kernel/pci-calgary_64.c
21998@@ -1339,7 +1339,7 @@ static void __init get_tce_space_from_tar(void)
21999 tce_space = be64_to_cpu(readq(target));
22000 tce_space = tce_space & TAR_SW_BITS;
22001
22002- tce_space = tce_space & (~specified_table_size);
22003+ tce_space = tce_space & (~(unsigned long)specified_table_size);
22004 info->tce_space = (u64 *)__va(tce_space);
22005 }
22006 }
22007diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
22008index 35ccf75..7a15747 100644
22009--- a/arch/x86/kernel/pci-iommu_table.c
22010+++ b/arch/x86/kernel/pci-iommu_table.c
22011@@ -2,7 +2,7 @@
22012 #include <asm/iommu_table.h>
22013 #include <linux/string.h>
22014 #include <linux/kallsyms.h>
22015-
22016+#include <linux/sched.h>
22017
22018 #define DEBUG 1
22019
22020diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
22021index 6c483ba..d10ce2f 100644
22022--- a/arch/x86/kernel/pci-swiotlb.c
22023+++ b/arch/x86/kernel/pci-swiotlb.c
22024@@ -32,7 +32,7 @@ static void x86_swiotlb_free_coherent(struct device *dev, size_t size,
22025 void *vaddr, dma_addr_t dma_addr,
22026 struct dma_attrs *attrs)
22027 {
22028- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
22029+ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
22030 }
22031
22032 static struct dma_map_ops swiotlb_dma_ops = {
22033diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
22034index 14ae100..752a4f6 100644
22035--- a/arch/x86/kernel/process.c
22036+++ b/arch/x86/kernel/process.c
22037@@ -36,7 +36,8 @@
22038 * section. Since TSS's are completely CPU-local, we want them
22039 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
22040 */
22041-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
22042+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
22043+EXPORT_SYMBOL(init_tss);
22044
22045 #ifdef CONFIG_X86_64
22046 static DEFINE_PER_CPU(unsigned char, is_idle);
22047@@ -92,7 +93,7 @@ void arch_task_cache_init(void)
22048 task_xstate_cachep =
22049 kmem_cache_create("task_xstate", xstate_size,
22050 __alignof__(union thread_xstate),
22051- SLAB_PANIC | SLAB_NOTRACK, NULL);
22052+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
22053 }
22054
22055 /*
22056@@ -105,7 +106,7 @@ void exit_thread(void)
22057 unsigned long *bp = t->io_bitmap_ptr;
22058
22059 if (bp) {
22060- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
22061+ struct tss_struct *tss = init_tss + get_cpu();
22062
22063 t->io_bitmap_ptr = NULL;
22064 clear_thread_flag(TIF_IO_BITMAP);
22065@@ -136,7 +137,7 @@ void show_regs_common(void)
22066 board = dmi_get_system_info(DMI_BOARD_NAME);
22067
22068 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s %s%s%s\n",
22069- current->pid, current->comm, print_tainted(),
22070+ task_pid_nr(current), current->comm, print_tainted(),
22071 init_utsname()->release,
22072 (int)strcspn(init_utsname()->version, " "),
22073 init_utsname()->version,
22074@@ -149,6 +150,9 @@ void flush_thread(void)
22075 {
22076 struct task_struct *tsk = current;
22077
22078+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
22079+ loadsegment(gs, 0);
22080+#endif
22081 flush_ptrace_hw_breakpoint(tsk);
22082 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
22083 drop_init_fpu(tsk);
22084@@ -295,7 +299,7 @@ static void __exit_idle(void)
22085 void exit_idle(void)
22086 {
22087 /* idle loop has pid 0 */
22088- if (current->pid)
22089+ if (task_pid_nr(current))
22090 return;
22091 __exit_idle();
22092 }
22093@@ -398,7 +402,7 @@ bool xen_set_default_idle(void)
22094 return ret;
22095 }
22096 #endif
22097-void stop_this_cpu(void *dummy)
22098+__noreturn void stop_this_cpu(void *dummy)
22099 {
22100 local_irq_disable();
22101 /*
22102@@ -544,16 +548,37 @@ static int __init idle_setup(char *str)
22103 }
22104 early_param("idle", idle_setup);
22105
22106-unsigned long arch_align_stack(unsigned long sp)
22107+#ifdef CONFIG_PAX_RANDKSTACK
22108+void pax_randomize_kstack(struct pt_regs *regs)
22109 {
22110- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
22111- sp -= get_random_int() % 8192;
22112- return sp & ~0xf;
22113-}
22114+ struct thread_struct *thread = &current->thread;
22115+ unsigned long time;
22116
22117-unsigned long arch_randomize_brk(struct mm_struct *mm)
22118-{
22119- unsigned long range_end = mm->brk + 0x02000000;
22120- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
22121-}
22122+ if (!randomize_va_space)
22123+ return;
22124+
22125+ if (v8086_mode(regs))
22126+ return;
22127
22128+ rdtscl(time);
22129+
22130+ /* P4 seems to return a 0 LSB, ignore it */
22131+#ifdef CONFIG_MPENTIUM4
22132+ time &= 0x3EUL;
22133+ time <<= 2;
22134+#elif defined(CONFIG_X86_64)
22135+ time &= 0xFUL;
22136+ time <<= 4;
22137+#else
22138+ time &= 0x1FUL;
22139+ time <<= 3;
22140+#endif
22141+
22142+ thread->sp0 ^= time;
22143+ load_sp0(init_tss + smp_processor_id(), thread);
22144+
22145+#ifdef CONFIG_X86_64
22146+ this_cpu_write(kernel_stack, thread->sp0);
22147+#endif
22148+}
22149+#endif
22150diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
22151index b5a8905..d9cacac 100644
22152--- a/arch/x86/kernel/process_32.c
22153+++ b/arch/x86/kernel/process_32.c
22154@@ -65,6 +65,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
22155 unsigned long thread_saved_pc(struct task_struct *tsk)
22156 {
22157 return ((unsigned long *)tsk->thread.sp)[3];
22158+//XXX return tsk->thread.eip;
22159 }
22160
22161 void __show_regs(struct pt_regs *regs, int all)
22162@@ -74,21 +75,20 @@ void __show_regs(struct pt_regs *regs, int all)
22163 unsigned long sp;
22164 unsigned short ss, gs;
22165
22166- if (user_mode_vm(regs)) {
22167+ if (user_mode(regs)) {
22168 sp = regs->sp;
22169 ss = regs->ss & 0xffff;
22170- gs = get_user_gs(regs);
22171 } else {
22172 sp = kernel_stack_pointer(regs);
22173 savesegment(ss, ss);
22174- savesegment(gs, gs);
22175 }
22176+ gs = get_user_gs(regs);
22177
22178 show_regs_common();
22179
22180 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
22181 (u16)regs->cs, regs->ip, regs->flags,
22182- smp_processor_id());
22183+ raw_smp_processor_id());
22184 print_symbol("EIP is at %s\n", regs->ip);
22185
22186 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
22187@@ -130,20 +130,21 @@ void release_thread(struct task_struct *dead_task)
22188 int copy_thread(unsigned long clone_flags, unsigned long sp,
22189 unsigned long arg, struct task_struct *p)
22190 {
22191- struct pt_regs *childregs = task_pt_regs(p);
22192+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
22193 struct task_struct *tsk;
22194 int err;
22195
22196 p->thread.sp = (unsigned long) childregs;
22197 p->thread.sp0 = (unsigned long) (childregs+1);
22198+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
22199
22200 if (unlikely(p->flags & PF_KTHREAD)) {
22201 /* kernel thread */
22202 memset(childregs, 0, sizeof(struct pt_regs));
22203 p->thread.ip = (unsigned long) ret_from_kernel_thread;
22204- task_user_gs(p) = __KERNEL_STACK_CANARY;
22205- childregs->ds = __USER_DS;
22206- childregs->es = __USER_DS;
22207+ savesegment(gs, childregs->gs);
22208+ childregs->ds = __KERNEL_DS;
22209+ childregs->es = __KERNEL_DS;
22210 childregs->fs = __KERNEL_PERCPU;
22211 childregs->bx = sp; /* function */
22212 childregs->bp = arg;
22213@@ -250,7 +251,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22214 struct thread_struct *prev = &prev_p->thread,
22215 *next = &next_p->thread;
22216 int cpu = smp_processor_id();
22217- struct tss_struct *tss = &per_cpu(init_tss, cpu);
22218+ struct tss_struct *tss = init_tss + cpu;
22219 fpu_switch_t fpu;
22220
22221 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
22222@@ -274,6 +275,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22223 */
22224 lazy_save_gs(prev->gs);
22225
22226+#ifdef CONFIG_PAX_MEMORY_UDEREF
22227+ __set_fs(task_thread_info(next_p)->addr_limit);
22228+#endif
22229+
22230 /*
22231 * Load the per-thread Thread-Local Storage descriptor.
22232 */
22233@@ -304,6 +309,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22234 */
22235 arch_end_context_switch(next_p);
22236
22237+ this_cpu_write(current_task, next_p);
22238+ this_cpu_write(current_tinfo, &next_p->tinfo);
22239+
22240 /*
22241 * Restore %gs if needed (which is common)
22242 */
22243@@ -312,8 +320,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22244
22245 switch_fpu_finish(next_p, fpu);
22246
22247- this_cpu_write(current_task, next_p);
22248-
22249 return prev_p;
22250 }
22251
22252@@ -343,4 +349,3 @@ unsigned long get_wchan(struct task_struct *p)
22253 } while (count++ < 16);
22254 return 0;
22255 }
22256-
22257diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
22258index 0f49677..fcbf88c 100644
22259--- a/arch/x86/kernel/process_64.c
22260+++ b/arch/x86/kernel/process_64.c
22261@@ -152,10 +152,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
22262 struct pt_regs *childregs;
22263 struct task_struct *me = current;
22264
22265- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
22266+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
22267 childregs = task_pt_regs(p);
22268 p->thread.sp = (unsigned long) childregs;
22269 p->thread.usersp = me->thread.usersp;
22270+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
22271 set_tsk_thread_flag(p, TIF_FORK);
22272 p->fpu_counter = 0;
22273 p->thread.io_bitmap_ptr = NULL;
22274@@ -274,7 +275,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22275 struct thread_struct *prev = &prev_p->thread;
22276 struct thread_struct *next = &next_p->thread;
22277 int cpu = smp_processor_id();
22278- struct tss_struct *tss = &per_cpu(init_tss, cpu);
22279+ struct tss_struct *tss = init_tss + cpu;
22280 unsigned fsindex, gsindex;
22281 fpu_switch_t fpu;
22282
22283@@ -356,10 +357,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22284 prev->usersp = this_cpu_read(old_rsp);
22285 this_cpu_write(old_rsp, next->usersp);
22286 this_cpu_write(current_task, next_p);
22287+ this_cpu_write(current_tinfo, &next_p->tinfo);
22288
22289- this_cpu_write(kernel_stack,
22290- (unsigned long)task_stack_page(next_p) +
22291- THREAD_SIZE - KERNEL_STACK_OFFSET);
22292+ this_cpu_write(kernel_stack, next->sp0);
22293
22294 /*
22295 * Now maybe reload the debug registers and handle I/O bitmaps
22296@@ -428,12 +428,11 @@ unsigned long get_wchan(struct task_struct *p)
22297 if (!p || p == current || p->state == TASK_RUNNING)
22298 return 0;
22299 stack = (unsigned long)task_stack_page(p);
22300- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
22301+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
22302 return 0;
22303 fp = *(u64 *)(p->thread.sp);
22304 do {
22305- if (fp < (unsigned long)stack ||
22306- fp >= (unsigned long)stack+THREAD_SIZE)
22307+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
22308 return 0;
22309 ip = *(u64 *)(fp+8);
22310 if (!in_sched_functions(ip))
22311diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
22312index 29a8120..a50b5ee 100644
22313--- a/arch/x86/kernel/ptrace.c
22314+++ b/arch/x86/kernel/ptrace.c
22315@@ -184,14 +184,13 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
22316 {
22317 unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
22318 unsigned long sp = (unsigned long)&regs->sp;
22319- struct thread_info *tinfo;
22320
22321- if (context == (sp & ~(THREAD_SIZE - 1)))
22322+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
22323 return sp;
22324
22325- tinfo = (struct thread_info *)context;
22326- if (tinfo->previous_esp)
22327- return tinfo->previous_esp;
22328+ sp = *(unsigned long *)context;
22329+ if (sp)
22330+ return sp;
22331
22332 return (unsigned long)regs;
22333 }
22334@@ -588,7 +587,7 @@ static void ptrace_triggered(struct perf_event *bp,
22335 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
22336 {
22337 int i;
22338- int dr7 = 0;
22339+ unsigned long dr7 = 0;
22340 struct arch_hw_breakpoint *info;
22341
22342 for (i = 0; i < HBP_NUM; i++) {
22343@@ -856,7 +855,7 @@ long arch_ptrace(struct task_struct *child, long request,
22344 unsigned long addr, unsigned long data)
22345 {
22346 int ret;
22347- unsigned long __user *datap = (unsigned long __user *)data;
22348+ unsigned long __user *datap = (__force unsigned long __user *)data;
22349
22350 switch (request) {
22351 /* read the word at location addr in the USER area. */
22352@@ -941,14 +940,14 @@ long arch_ptrace(struct task_struct *child, long request,
22353 if ((int) addr < 0)
22354 return -EIO;
22355 ret = do_get_thread_area(child, addr,
22356- (struct user_desc __user *)data);
22357+ (__force struct user_desc __user *) data);
22358 break;
22359
22360 case PTRACE_SET_THREAD_AREA:
22361 if ((int) addr < 0)
22362 return -EIO;
22363 ret = do_set_thread_area(child, addr,
22364- (struct user_desc __user *)data, 0);
22365+ (__force struct user_desc __user *) data, 0);
22366 break;
22367 #endif
22368
22369@@ -1326,7 +1325,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
22370
22371 #ifdef CONFIG_X86_64
22372
22373-static struct user_regset x86_64_regsets[] __read_mostly = {
22374+static user_regset_no_const x86_64_regsets[] __read_only = {
22375 [REGSET_GENERAL] = {
22376 .core_note_type = NT_PRSTATUS,
22377 .n = sizeof(struct user_regs_struct) / sizeof(long),
22378@@ -1367,7 +1366,7 @@ static const struct user_regset_view user_x86_64_view = {
22379 #endif /* CONFIG_X86_64 */
22380
22381 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
22382-static struct user_regset x86_32_regsets[] __read_mostly = {
22383+static user_regset_no_const x86_32_regsets[] __read_only = {
22384 [REGSET_GENERAL] = {
22385 .core_note_type = NT_PRSTATUS,
22386 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
22387@@ -1420,7 +1419,7 @@ static const struct user_regset_view user_x86_32_view = {
22388 */
22389 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
22390
22391-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
22392+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
22393 {
22394 #ifdef CONFIG_X86_64
22395 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
22396@@ -1455,7 +1454,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
22397 memset(info, 0, sizeof(*info));
22398 info->si_signo = SIGTRAP;
22399 info->si_code = si_code;
22400- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
22401+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
22402 }
22403
22404 void user_single_step_siginfo(struct task_struct *tsk,
22405@@ -1484,6 +1483,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
22406 # define IS_IA32 0
22407 #endif
22408
22409+#ifdef CONFIG_GRKERNSEC_SETXID
22410+extern void gr_delayed_cred_worker(void);
22411+#endif
22412+
22413 /*
22414 * We must return the syscall number to actually look up in the table.
22415 * This can be -1L to skip running any syscall at all.
22416@@ -1494,6 +1497,11 @@ long syscall_trace_enter(struct pt_regs *regs)
22417
22418 user_exit();
22419
22420+#ifdef CONFIG_GRKERNSEC_SETXID
22421+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
22422+ gr_delayed_cred_worker();
22423+#endif
22424+
22425 /*
22426 * If we stepped into a sysenter/syscall insn, it trapped in
22427 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
22428@@ -1549,6 +1557,11 @@ void syscall_trace_leave(struct pt_regs *regs)
22429 */
22430 user_exit();
22431
22432+#ifdef CONFIG_GRKERNSEC_SETXID
22433+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
22434+ gr_delayed_cred_worker();
22435+#endif
22436+
22437 audit_syscall_exit(regs);
22438
22439 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
22440diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
22441index 2cb9470..ff1fd80 100644
22442--- a/arch/x86/kernel/pvclock.c
22443+++ b/arch/x86/kernel/pvclock.c
22444@@ -43,11 +43,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
22445 return pv_tsc_khz;
22446 }
22447
22448-static atomic64_t last_value = ATOMIC64_INIT(0);
22449+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
22450
22451 void pvclock_resume(void)
22452 {
22453- atomic64_set(&last_value, 0);
22454+ atomic64_set_unchecked(&last_value, 0);
22455 }
22456
22457 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
22458@@ -92,11 +92,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
22459 * updating at the same time, and one of them could be slightly behind,
22460 * making the assumption that last_value always go forward fail to hold.
22461 */
22462- last = atomic64_read(&last_value);
22463+ last = atomic64_read_unchecked(&last_value);
22464 do {
22465 if (ret < last)
22466 return last;
22467- last = atomic64_cmpxchg(&last_value, last, ret);
22468+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
22469 } while (unlikely(last != ret));
22470
22471 return ret;
22472diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
22473index 76fa1e9..abf09ea 100644
22474--- a/arch/x86/kernel/reboot.c
22475+++ b/arch/x86/kernel/reboot.c
22476@@ -36,7 +36,7 @@ void (*pm_power_off)(void);
22477 EXPORT_SYMBOL(pm_power_off);
22478
22479 static const struct desc_ptr no_idt = {};
22480-static int reboot_mode;
22481+static unsigned short reboot_mode;
22482 enum reboot_type reboot_type = BOOT_ACPI;
22483 int reboot_force;
22484
22485@@ -157,6 +157,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
22486
22487 void __noreturn machine_real_restart(unsigned int type)
22488 {
22489+
22490+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
22491+ struct desc_struct *gdt;
22492+#endif
22493+
22494 local_irq_disable();
22495
22496 /*
22497@@ -184,7 +189,29 @@ void __noreturn machine_real_restart(unsigned int type)
22498
22499 /* Jump to the identity-mapped low memory code */
22500 #ifdef CONFIG_X86_32
22501- asm volatile("jmpl *%0" : :
22502+
22503+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22504+ gdt = get_cpu_gdt_table(smp_processor_id());
22505+ pax_open_kernel();
22506+#ifdef CONFIG_PAX_MEMORY_UDEREF
22507+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
22508+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
22509+ loadsegment(ds, __KERNEL_DS);
22510+ loadsegment(es, __KERNEL_DS);
22511+ loadsegment(ss, __KERNEL_DS);
22512+#endif
22513+#ifdef CONFIG_PAX_KERNEXEC
22514+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
22515+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
22516+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
22517+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
22518+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
22519+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
22520+#endif
22521+ pax_close_kernel();
22522+#endif
22523+
22524+ asm volatile("ljmpl *%0" : :
22525 "rm" (real_mode_header->machine_real_restart_asm),
22526 "a" (type));
22527 #else
22528@@ -531,7 +558,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
22529 * try to force a triple fault and then cycle between hitting the keyboard
22530 * controller and doing that
22531 */
22532-static void native_machine_emergency_restart(void)
22533+static void __noreturn native_machine_emergency_restart(void)
22534 {
22535 int i;
22536 int attempt = 0;
22537@@ -654,13 +681,13 @@ void native_machine_shutdown(void)
22538 #endif
22539 }
22540
22541-static void __machine_emergency_restart(int emergency)
22542+static void __noreturn __machine_emergency_restart(int emergency)
22543 {
22544 reboot_emergency = emergency;
22545 machine_ops.emergency_restart();
22546 }
22547
22548-static void native_machine_restart(char *__unused)
22549+static void __noreturn native_machine_restart(char *__unused)
22550 {
22551 pr_notice("machine restart\n");
22552
22553@@ -669,7 +696,7 @@ static void native_machine_restart(char *__unused)
22554 __machine_emergency_restart(0);
22555 }
22556
22557-static void native_machine_halt(void)
22558+static void __noreturn native_machine_halt(void)
22559 {
22560 /* Stop other cpus and apics */
22561 machine_shutdown();
22562@@ -679,7 +706,7 @@ static void native_machine_halt(void)
22563 stop_this_cpu(NULL);
22564 }
22565
22566-static void native_machine_power_off(void)
22567+static void __noreturn native_machine_power_off(void)
22568 {
22569 if (pm_power_off) {
22570 if (!reboot_force)
22571@@ -688,9 +715,10 @@ static void native_machine_power_off(void)
22572 }
22573 /* A fallback in case there is no PM info available */
22574 tboot_shutdown(TB_SHUTDOWN_HALT);
22575+ unreachable();
22576 }
22577
22578-struct machine_ops machine_ops = {
22579+struct machine_ops machine_ops __read_only = {
22580 .power_off = native_machine_power_off,
22581 .shutdown = native_machine_shutdown,
22582 .emergency_restart = native_machine_emergency_restart,
22583diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
22584index f2bb9c9..bed145d7 100644
22585--- a/arch/x86/kernel/relocate_kernel_64.S
22586+++ b/arch/x86/kernel/relocate_kernel_64.S
22587@@ -11,6 +11,7 @@
22588 #include <asm/kexec.h>
22589 #include <asm/processor-flags.h>
22590 #include <asm/pgtable_types.h>
22591+#include <asm/alternative-asm.h>
22592
22593 /*
22594 * Must be relocatable PIC code callable as a C function
22595@@ -167,6 +168,7 @@ identity_mapped:
22596 xorq %r14, %r14
22597 xorq %r15, %r15
22598
22599+ pax_force_retaddr 0, 1
22600 ret
22601
22602 1:
22603diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
22604index fae9134..f8e4a47 100644
22605--- a/arch/x86/kernel/setup.c
22606+++ b/arch/x86/kernel/setup.c
22607@@ -111,6 +111,7 @@
22608 #include <asm/mce.h>
22609 #include <asm/alternative.h>
22610 #include <asm/prom.h>
22611+#include <asm/boot.h>
22612
22613 /*
22614 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
22615@@ -447,7 +448,7 @@ static void __init parse_setup_data(void)
22616
22617 switch (data->type) {
22618 case SETUP_E820_EXT:
22619- parse_e820_ext(data);
22620+ parse_e820_ext((struct setup_data __force_kernel *)data);
22621 break;
22622 case SETUP_DTB:
22623 add_dtb(pa_data);
22624@@ -774,7 +775,7 @@ static void __init trim_bios_range(void)
22625 * area (640->1Mb) as ram even though it is not.
22626 * take them out.
22627 */
22628- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
22629+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
22630
22631 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
22632 }
22633@@ -782,7 +783,7 @@ static void __init trim_bios_range(void)
22634 /* called before trim_bios_range() to spare extra sanitize */
22635 static void __init e820_add_kernel_range(void)
22636 {
22637- u64 start = __pa_symbol(_text);
22638+ u64 start = __pa_symbol(ktla_ktva(_text));
22639 u64 size = __pa_symbol(_end) - start;
22640
22641 /*
22642@@ -844,8 +845,12 @@ static void __init trim_low_memory_range(void)
22643
22644 void __init setup_arch(char **cmdline_p)
22645 {
22646+#ifdef CONFIG_X86_32
22647+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - ____LOAD_PHYSICAL_ADDR);
22648+#else
22649 memblock_reserve(__pa_symbol(_text),
22650 (unsigned long)__bss_stop - (unsigned long)_text);
22651+#endif
22652
22653 early_reserve_initrd();
22654
22655@@ -937,14 +942,14 @@ void __init setup_arch(char **cmdline_p)
22656
22657 if (!boot_params.hdr.root_flags)
22658 root_mountflags &= ~MS_RDONLY;
22659- init_mm.start_code = (unsigned long) _text;
22660- init_mm.end_code = (unsigned long) _etext;
22661+ init_mm.start_code = ktla_ktva((unsigned long) _text);
22662+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
22663 init_mm.end_data = (unsigned long) _edata;
22664 init_mm.brk = _brk_end;
22665
22666- code_resource.start = __pa_symbol(_text);
22667- code_resource.end = __pa_symbol(_etext)-1;
22668- data_resource.start = __pa_symbol(_etext);
22669+ code_resource.start = __pa_symbol(ktla_ktva(_text));
22670+ code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
22671+ data_resource.start = __pa_symbol(_sdata);
22672 data_resource.end = __pa_symbol(_edata)-1;
22673 bss_resource.start = __pa_symbol(__bss_start);
22674 bss_resource.end = __pa_symbol(__bss_stop)-1;
22675diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
22676index 5cdff03..80fa283 100644
22677--- a/arch/x86/kernel/setup_percpu.c
22678+++ b/arch/x86/kernel/setup_percpu.c
22679@@ -21,19 +21,17 @@
22680 #include <asm/cpu.h>
22681 #include <asm/stackprotector.h>
22682
22683-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
22684+#ifdef CONFIG_SMP
22685+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
22686 EXPORT_PER_CPU_SYMBOL(cpu_number);
22687+#endif
22688
22689-#ifdef CONFIG_X86_64
22690 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
22691-#else
22692-#define BOOT_PERCPU_OFFSET 0
22693-#endif
22694
22695 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
22696 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
22697
22698-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
22699+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
22700 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
22701 };
22702 EXPORT_SYMBOL(__per_cpu_offset);
22703@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
22704 {
22705 #ifdef CONFIG_NEED_MULTIPLE_NODES
22706 pg_data_t *last = NULL;
22707- unsigned int cpu;
22708+ int cpu;
22709
22710 for_each_possible_cpu(cpu) {
22711 int node = early_cpu_to_node(cpu);
22712@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
22713 {
22714 #ifdef CONFIG_X86_32
22715 struct desc_struct gdt;
22716+ unsigned long base = per_cpu_offset(cpu);
22717
22718- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
22719- 0x2 | DESCTYPE_S, 0x8);
22720- gdt.s = 1;
22721+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
22722+ 0x83 | DESCTYPE_S, 0xC);
22723 write_gdt_entry(get_cpu_gdt_table(cpu),
22724 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
22725 #endif
22726@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
22727 /* alrighty, percpu areas up and running */
22728 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
22729 for_each_possible_cpu(cpu) {
22730+#ifdef CONFIG_CC_STACKPROTECTOR
22731+#ifdef CONFIG_X86_32
22732+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
22733+#endif
22734+#endif
22735 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
22736 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
22737 per_cpu(cpu_number, cpu) = cpu;
22738@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
22739 */
22740 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
22741 #endif
22742+#ifdef CONFIG_CC_STACKPROTECTOR
22743+#ifdef CONFIG_X86_32
22744+ if (!cpu)
22745+ per_cpu(stack_canary.canary, cpu) = canary;
22746+#endif
22747+#endif
22748 /*
22749 * Up to this point, the boot CPU has been using .init.data
22750 * area. Reload any changed state for the boot CPU.
22751diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
22752index 6956299..f20beae 100644
22753--- a/arch/x86/kernel/signal.c
22754+++ b/arch/x86/kernel/signal.c
22755@@ -196,7 +196,7 @@ static unsigned long align_sigframe(unsigned long sp)
22756 * Align the stack pointer according to the i386 ABI,
22757 * i.e. so that on function entry ((sp + 4) & 15) == 0.
22758 */
22759- sp = ((sp + 4) & -16ul) - 4;
22760+ sp = ((sp - 12) & -16ul) - 4;
22761 #else /* !CONFIG_X86_32 */
22762 sp = round_down(sp, 16) - 8;
22763 #endif
22764@@ -304,9 +304,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
22765 }
22766
22767 if (current->mm->context.vdso)
22768- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
22769+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
22770 else
22771- restorer = &frame->retcode;
22772+ restorer = (void __user *)&frame->retcode;
22773 if (ksig->ka.sa.sa_flags & SA_RESTORER)
22774 restorer = ksig->ka.sa.sa_restorer;
22775
22776@@ -320,7 +320,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
22777 * reasons and because gdb uses it as a signature to notice
22778 * signal handler stack frames.
22779 */
22780- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
22781+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
22782
22783 if (err)
22784 return -EFAULT;
22785@@ -367,7 +367,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
22786 err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
22787
22788 /* Set up to return from userspace. */
22789- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
22790+ if (current->mm->context.vdso)
22791+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
22792+ else
22793+ restorer = (void __user *)&frame->retcode;
22794 if (ksig->ka.sa.sa_flags & SA_RESTORER)
22795 restorer = ksig->ka.sa.sa_restorer;
22796 put_user_ex(restorer, &frame->pretcode);
22797@@ -379,7 +382,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
22798 * reasons and because gdb uses it as a signature to notice
22799 * signal handler stack frames.
22800 */
22801- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
22802+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
22803 } put_user_catch(err);
22804
22805 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
22806@@ -615,7 +618,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
22807 {
22808 int usig = signr_convert(ksig->sig);
22809 sigset_t *set = sigmask_to_save();
22810- compat_sigset_t *cset = (compat_sigset_t *) set;
22811+ sigset_t sigcopy;
22812+ compat_sigset_t *cset;
22813+
22814+ sigcopy = *set;
22815+
22816+ cset = (compat_sigset_t *) &sigcopy;
22817
22818 /* Set up the stack frame */
22819 if (is_ia32_frame()) {
22820@@ -626,7 +634,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
22821 } else if (is_x32_frame()) {
22822 return x32_setup_rt_frame(ksig, cset, regs);
22823 } else {
22824- return __setup_rt_frame(ksig->sig, ksig, set, regs);
22825+ return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs);
22826 }
22827 }
22828
22829diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
22830index 48d2b7d..90d328a 100644
22831--- a/arch/x86/kernel/smp.c
22832+++ b/arch/x86/kernel/smp.c
22833@@ -285,7 +285,7 @@ static int __init nonmi_ipi_setup(char *str)
22834
22835 __setup("nonmi_ipi", nonmi_ipi_setup);
22836
22837-struct smp_ops smp_ops = {
22838+struct smp_ops smp_ops __read_only = {
22839 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
22840 .smp_prepare_cpus = native_smp_prepare_cpus,
22841 .smp_cpus_done = native_smp_cpus_done,
22842diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
22843index 9f190a2..90a0688 100644
22844--- a/arch/x86/kernel/smpboot.c
22845+++ b/arch/x86/kernel/smpboot.c
22846@@ -748,6 +748,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
22847 idle->thread.sp = (unsigned long) (((struct pt_regs *)
22848 (THREAD_SIZE + task_stack_page(idle))) - 1);
22849 per_cpu(current_task, cpu) = idle;
22850+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
22851
22852 #ifdef CONFIG_X86_32
22853 /* Stack for startup_32 can be just as for start_secondary onwards */
22854@@ -755,11 +756,13 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
22855 #else
22856 clear_tsk_thread_flag(idle, TIF_FORK);
22857 initial_gs = per_cpu_offset(cpu);
22858- per_cpu(kernel_stack, cpu) =
22859- (unsigned long)task_stack_page(idle) -
22860- KERNEL_STACK_OFFSET + THREAD_SIZE;
22861+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
22862 #endif
22863+
22864+ pax_open_kernel();
22865 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
22866+ pax_close_kernel();
22867+
22868 initial_code = (unsigned long)start_secondary;
22869 stack_start = idle->thread.sp;
22870
22871@@ -908,6 +911,15 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
22872 /* the FPU context is blank, nobody can own it */
22873 __cpu_disable_lazy_restore(cpu);
22874
22875+#ifdef CONFIG_PAX_PER_CPU_PGD
22876+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
22877+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22878+ KERNEL_PGD_PTRS);
22879+#endif
22880+
22881+ /* the FPU context is blank, nobody can own it */
22882+ __cpu_disable_lazy_restore(cpu);
22883+
22884 err = do_boot_cpu(apicid, cpu, tidle);
22885 if (err) {
22886 pr_debug("do_boot_cpu failed %d\n", err);
22887diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
22888index 9b4d51d..5d28b58 100644
22889--- a/arch/x86/kernel/step.c
22890+++ b/arch/x86/kernel/step.c
22891@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
22892 struct desc_struct *desc;
22893 unsigned long base;
22894
22895- seg &= ~7UL;
22896+ seg >>= 3;
22897
22898 mutex_lock(&child->mm->context.lock);
22899- if (unlikely((seg >> 3) >= child->mm->context.size))
22900+ if (unlikely(seg >= child->mm->context.size))
22901 addr = -1L; /* bogus selector, access would fault */
22902 else {
22903 desc = child->mm->context.ldt + seg;
22904@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
22905 addr += base;
22906 }
22907 mutex_unlock(&child->mm->context.lock);
22908- }
22909+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
22910+ addr = ktla_ktva(addr);
22911
22912 return addr;
22913 }
22914@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
22915 unsigned char opcode[15];
22916 unsigned long addr = convert_ip_to_linear(child, regs);
22917
22918+ if (addr == -EINVAL)
22919+ return 0;
22920+
22921 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
22922 for (i = 0; i < copied; i++) {
22923 switch (opcode[i]) {
22924diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
22925new file mode 100644
22926index 0000000..207bec6
22927--- /dev/null
22928+++ b/arch/x86/kernel/sys_i386_32.c
22929@@ -0,0 +1,250 @@
22930+/*
22931+ * This file contains various random system calls that
22932+ * have a non-standard calling sequence on the Linux/i386
22933+ * platform.
22934+ */
22935+
22936+#include <linux/errno.h>
22937+#include <linux/sched.h>
22938+#include <linux/mm.h>
22939+#include <linux/fs.h>
22940+#include <linux/smp.h>
22941+#include <linux/sem.h>
22942+#include <linux/msg.h>
22943+#include <linux/shm.h>
22944+#include <linux/stat.h>
22945+#include <linux/syscalls.h>
22946+#include <linux/mman.h>
22947+#include <linux/file.h>
22948+#include <linux/utsname.h>
22949+#include <linux/ipc.h>
22950+
22951+#include <linux/uaccess.h>
22952+#include <linux/unistd.h>
22953+
22954+#include <asm/syscalls.h>
22955+
22956+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
22957+{
22958+ unsigned long pax_task_size = TASK_SIZE;
22959+
22960+#ifdef CONFIG_PAX_SEGMEXEC
22961+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
22962+ pax_task_size = SEGMEXEC_TASK_SIZE;
22963+#endif
22964+
22965+ if (flags & MAP_FIXED)
22966+ if (len > pax_task_size || addr > pax_task_size - len)
22967+ return -EINVAL;
22968+
22969+ return 0;
22970+}
22971+
22972+unsigned long
22973+arch_get_unmapped_area(struct file *filp, unsigned long addr,
22974+ unsigned long len, unsigned long pgoff, unsigned long flags)
22975+{
22976+ struct mm_struct *mm = current->mm;
22977+ struct vm_area_struct *vma;
22978+ unsigned long start_addr, pax_task_size = TASK_SIZE;
22979+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
22980+
22981+#ifdef CONFIG_PAX_SEGMEXEC
22982+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22983+ pax_task_size = SEGMEXEC_TASK_SIZE;
22984+#endif
22985+
22986+ pax_task_size -= PAGE_SIZE;
22987+
22988+ if (len > pax_task_size)
22989+ return -ENOMEM;
22990+
22991+ if (flags & MAP_FIXED)
22992+ return addr;
22993+
22994+#ifdef CONFIG_PAX_RANDMMAP
22995+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
22996+#endif
22997+
22998+ if (addr) {
22999+ addr = PAGE_ALIGN(addr);
23000+ if (pax_task_size - len >= addr) {
23001+ vma = find_vma(mm, addr);
23002+ if (check_heap_stack_gap(vma, addr, len, offset))
23003+ return addr;
23004+ }
23005+ }
23006+ if (len > mm->cached_hole_size) {
23007+ start_addr = addr = mm->free_area_cache;
23008+ } else {
23009+ start_addr = addr = mm->mmap_base;
23010+ mm->cached_hole_size = 0;
23011+ }
23012+
23013+#ifdef CONFIG_PAX_PAGEEXEC
23014+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
23015+ start_addr = 0x00110000UL;
23016+
23017+#ifdef CONFIG_PAX_RANDMMAP
23018+ if (mm->pax_flags & MF_PAX_RANDMMAP)
23019+ start_addr += mm->delta_mmap & 0x03FFF000UL;
23020+#endif
23021+
23022+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
23023+ start_addr = addr = mm->mmap_base;
23024+ else
23025+ addr = start_addr;
23026+ }
23027+#endif
23028+
23029+full_search:
23030+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
23031+ /* At this point: (!vma || addr < vma->vm_end). */
23032+ if (pax_task_size - len < addr) {
23033+ /*
23034+ * Start a new search - just in case we missed
23035+ * some holes.
23036+ */
23037+ if (start_addr != mm->mmap_base) {
23038+ start_addr = addr = mm->mmap_base;
23039+ mm->cached_hole_size = 0;
23040+ goto full_search;
23041+ }
23042+ return -ENOMEM;
23043+ }
23044+ if (check_heap_stack_gap(vma, addr, len, offset))
23045+ break;
23046+ if (addr + mm->cached_hole_size < vma->vm_start)
23047+ mm->cached_hole_size = vma->vm_start - addr;
23048+ addr = vma->vm_end;
23049+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
23050+ start_addr = addr = mm->mmap_base;
23051+ mm->cached_hole_size = 0;
23052+ goto full_search;
23053+ }
23054+ }
23055+
23056+ /*
23057+ * Remember the place where we stopped the search:
23058+ */
23059+ mm->free_area_cache = addr + len;
23060+ return addr;
23061+}
23062+
23063+unsigned long
23064+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
23065+ const unsigned long len, const unsigned long pgoff,
23066+ const unsigned long flags)
23067+{
23068+ struct vm_area_struct *vma;
23069+ struct mm_struct *mm = current->mm;
23070+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
23071+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
23072+
23073+#ifdef CONFIG_PAX_SEGMEXEC
23074+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
23075+ pax_task_size = SEGMEXEC_TASK_SIZE;
23076+#endif
23077+
23078+ pax_task_size -= PAGE_SIZE;
23079+
23080+ /* requested length too big for entire address space */
23081+ if (len > pax_task_size)
23082+ return -ENOMEM;
23083+
23084+ if (flags & MAP_FIXED)
23085+ return addr;
23086+
23087+#ifdef CONFIG_PAX_PAGEEXEC
23088+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
23089+ goto bottomup;
23090+#endif
23091+
23092+#ifdef CONFIG_PAX_RANDMMAP
23093+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
23094+#endif
23095+
23096+ /* requesting a specific address */
23097+ if (addr) {
23098+ addr = PAGE_ALIGN(addr);
23099+ if (pax_task_size - len >= addr) {
23100+ vma = find_vma(mm, addr);
23101+ if (check_heap_stack_gap(vma, addr, len, offset))
23102+ return addr;
23103+ }
23104+ }
23105+
23106+ /* check if free_area_cache is useful for us */
23107+ if (len <= mm->cached_hole_size) {
23108+ mm->cached_hole_size = 0;
23109+ mm->free_area_cache = mm->mmap_base;
23110+ }
23111+
23112+ /* either no address requested or can't fit in requested address hole */
23113+ addr = mm->free_area_cache;
23114+
23115+ /* make sure it can fit in the remaining address space */
23116+ if (addr > len) {
23117+ vma = find_vma(mm, addr-len);
23118+ if (check_heap_stack_gap(vma, addr - len, len, offset))
23119+ /* remember the address as a hint for next time */
23120+ return (mm->free_area_cache = addr-len);
23121+ }
23122+
23123+ if (mm->mmap_base < len)
23124+ goto bottomup;
23125+
23126+ addr = mm->mmap_base-len;
23127+
23128+ do {
23129+ /*
23130+ * Lookup failure means no vma is above this address,
23131+ * else if new region fits below vma->vm_start,
23132+ * return with success:
23133+ */
23134+ vma = find_vma(mm, addr);
23135+ if (check_heap_stack_gap(vma, addr, len, offset))
23136+ /* remember the address as a hint for next time */
23137+ return (mm->free_area_cache = addr);
23138+
23139+ /* remember the largest hole we saw so far */
23140+ if (addr + mm->cached_hole_size < vma->vm_start)
23141+ mm->cached_hole_size = vma->vm_start - addr;
23142+
23143+ /* try just below the current vma->vm_start */
23144+ addr = skip_heap_stack_gap(vma, len, offset);
23145+ } while (!IS_ERR_VALUE(addr));
23146+
23147+bottomup:
23148+ /*
23149+ * A failed mmap() very likely causes application failure,
23150+ * so fall back to the bottom-up function here. This scenario
23151+ * can happen with large stack limits and large mmap()
23152+ * allocations.
23153+ */
23154+
23155+#ifdef CONFIG_PAX_SEGMEXEC
23156+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
23157+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
23158+ else
23159+#endif
23160+
23161+ mm->mmap_base = TASK_UNMAPPED_BASE;
23162+
23163+#ifdef CONFIG_PAX_RANDMMAP
23164+ if (mm->pax_flags & MF_PAX_RANDMMAP)
23165+ mm->mmap_base += mm->delta_mmap;
23166+#endif
23167+
23168+ mm->free_area_cache = mm->mmap_base;
23169+ mm->cached_hole_size = ~0UL;
23170+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
23171+ /*
23172+ * Restore the topdown base:
23173+ */
23174+ mm->mmap_base = base;
23175+ mm->free_area_cache = base;
23176+ mm->cached_hole_size = ~0UL;
23177+
23178+ return addr;
23179+}
23180diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
23181index dbded5a..ace2781 100644
23182--- a/arch/x86/kernel/sys_x86_64.c
23183+++ b/arch/x86/kernel/sys_x86_64.c
23184@@ -81,8 +81,8 @@ out:
23185 return error;
23186 }
23187
23188-static void find_start_end(unsigned long flags, unsigned long *begin,
23189- unsigned long *end)
23190+static void find_start_end(struct mm_struct *mm, unsigned long flags,
23191+ unsigned long *begin, unsigned long *end)
23192 {
23193 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
23194 unsigned long new_begin;
23195@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
23196 *begin = new_begin;
23197 }
23198 } else {
23199- *begin = TASK_UNMAPPED_BASE;
23200+ *begin = mm->mmap_base;
23201 *end = TASK_SIZE;
23202 }
23203 }
23204@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
23205 struct vm_area_struct *vma;
23206 struct vm_unmapped_area_info info;
23207 unsigned long begin, end;
23208+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
23209
23210 if (flags & MAP_FIXED)
23211 return addr;
23212
23213- find_start_end(flags, &begin, &end);
23214+ find_start_end(mm, flags, &begin, &end);
23215
23216 if (len > end)
23217 return -ENOMEM;
23218
23219+#ifdef CONFIG_PAX_RANDMMAP
23220+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
23221+#endif
23222+
23223 if (addr) {
23224 addr = PAGE_ALIGN(addr);
23225 vma = find_vma(mm, addr);
23226- if (end - len >= addr &&
23227- (!vma || addr + len <= vma->vm_start))
23228+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
23229 return addr;
23230 }
23231
23232@@ -137,6 +141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
23233 info.high_limit = end;
23234 info.align_mask = filp ? get_align_mask() : 0;
23235 info.align_offset = pgoff << PAGE_SHIFT;
23236+ info.threadstack_offset = offset;
23237 return vm_unmapped_area(&info);
23238 }
23239
23240@@ -149,6 +154,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
23241 struct mm_struct *mm = current->mm;
23242 unsigned long addr = addr0;
23243 struct vm_unmapped_area_info info;
23244+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
23245
23246 /* requested length too big for entire address space */
23247 if (len > TASK_SIZE)
23248@@ -161,12 +167,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
23249 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
23250 goto bottomup;
23251
23252+#ifdef CONFIG_PAX_RANDMMAP
23253+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
23254+#endif
23255+
23256 /* requesting a specific address */
23257 if (addr) {
23258 addr = PAGE_ALIGN(addr);
23259 vma = find_vma(mm, addr);
23260- if (TASK_SIZE - len >= addr &&
23261- (!vma || addr + len <= vma->vm_start))
23262+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
23263 return addr;
23264 }
23265
23266@@ -176,6 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
23267 info.high_limit = mm->mmap_base;
23268 info.align_mask = filp ? get_align_mask() : 0;
23269 info.align_offset = pgoff << PAGE_SHIFT;
23270+ info.threadstack_offset = offset;
23271 addr = vm_unmapped_area(&info);
23272 if (!(addr & ~PAGE_MASK))
23273 return addr;
23274diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
23275index f84fe00..f41d9f1 100644
23276--- a/arch/x86/kernel/tboot.c
23277+++ b/arch/x86/kernel/tboot.c
23278@@ -220,7 +220,7 @@ static int tboot_setup_sleep(void)
23279
23280 void tboot_shutdown(u32 shutdown_type)
23281 {
23282- void (*shutdown)(void);
23283+ void (* __noreturn shutdown)(void);
23284
23285 if (!tboot_enabled())
23286 return;
23287@@ -242,7 +242,7 @@ void tboot_shutdown(u32 shutdown_type)
23288
23289 switch_to_tboot_pt();
23290
23291- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
23292+ shutdown = (void *)tboot->shutdown_entry;
23293 shutdown();
23294
23295 /* should not reach here */
23296@@ -300,7 +300,7 @@ static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
23297 return 0;
23298 }
23299
23300-static atomic_t ap_wfs_count;
23301+static atomic_unchecked_t ap_wfs_count;
23302
23303 static int tboot_wait_for_aps(int num_aps)
23304 {
23305@@ -324,16 +324,16 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
23306 {
23307 switch (action) {
23308 case CPU_DYING:
23309- atomic_inc(&ap_wfs_count);
23310+ atomic_inc_unchecked(&ap_wfs_count);
23311 if (num_online_cpus() == 1)
23312- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
23313+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
23314 return NOTIFY_BAD;
23315 break;
23316 }
23317 return NOTIFY_OK;
23318 }
23319
23320-static struct notifier_block tboot_cpu_notifier __cpuinitdata =
23321+static struct notifier_block tboot_cpu_notifier =
23322 {
23323 .notifier_call = tboot_cpu_callback,
23324 };
23325@@ -345,7 +345,7 @@ static __init int tboot_late_init(void)
23326
23327 tboot_create_trampoline();
23328
23329- atomic_set(&ap_wfs_count, 0);
23330+ atomic_set_unchecked(&ap_wfs_count, 0);
23331 register_hotcpu_notifier(&tboot_cpu_notifier);
23332
23333 acpi_os_set_prepare_sleep(&tboot_sleep);
23334diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
23335index 24d3c91..d06b473 100644
23336--- a/arch/x86/kernel/time.c
23337+++ b/arch/x86/kernel/time.c
23338@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
23339 {
23340 unsigned long pc = instruction_pointer(regs);
23341
23342- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
23343+ if (!user_mode(regs) && in_lock_functions(pc)) {
23344 #ifdef CONFIG_FRAME_POINTER
23345- return *(unsigned long *)(regs->bp + sizeof(long));
23346+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
23347 #else
23348 unsigned long *sp =
23349 (unsigned long *)kernel_stack_pointer(regs);
23350@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
23351 * or above a saved flags. Eflags has bits 22-31 zero,
23352 * kernel addresses don't.
23353 */
23354+
23355+#ifdef CONFIG_PAX_KERNEXEC
23356+ return ktla_ktva(sp[0]);
23357+#else
23358 if (sp[0] >> 22)
23359 return sp[0];
23360 if (sp[1] >> 22)
23361 return sp[1];
23362 #endif
23363+
23364+#endif
23365 }
23366 return pc;
23367 }
23368diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
23369index 9d9d2f9..cad418a 100644
23370--- a/arch/x86/kernel/tls.c
23371+++ b/arch/x86/kernel/tls.c
23372@@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
23373 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
23374 return -EINVAL;
23375
23376+#ifdef CONFIG_PAX_SEGMEXEC
23377+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
23378+ return -EINVAL;
23379+#endif
23380+
23381 set_tls_desc(p, idx, &info, 1);
23382
23383 return 0;
23384@@ -204,7 +209,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
23385
23386 if (kbuf)
23387 info = kbuf;
23388- else if (__copy_from_user(infobuf, ubuf, count))
23389+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
23390 return -EFAULT;
23391 else
23392 info = infobuf;
23393diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
23394index 68bda7a..3ec7bb7 100644
23395--- a/arch/x86/kernel/traps.c
23396+++ b/arch/x86/kernel/traps.c
23397@@ -68,12 +68,6 @@
23398 #include <asm/setup.h>
23399
23400 asmlinkage int system_call(void);
23401-
23402-/*
23403- * The IDT has to be page-aligned to simplify the Pentium
23404- * F0 0F bug workaround.
23405- */
23406-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
23407 #endif
23408
23409 DECLARE_BITMAP(used_vectors, NR_VECTORS);
23410@@ -106,11 +100,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
23411 }
23412
23413 static int __kprobes
23414-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
23415+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
23416 struct pt_regs *regs, long error_code)
23417 {
23418 #ifdef CONFIG_X86_32
23419- if (regs->flags & X86_VM_MASK) {
23420+ if (v8086_mode(regs)) {
23421 /*
23422 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
23423 * On nmi (interrupt 2), do_trap should not be called.
23424@@ -123,12 +117,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
23425 return -1;
23426 }
23427 #endif
23428- if (!user_mode(regs)) {
23429+ if (!user_mode_novm(regs)) {
23430 if (!fixup_exception(regs)) {
23431 tsk->thread.error_code = error_code;
23432 tsk->thread.trap_nr = trapnr;
23433+
23434+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23435+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
23436+ str = "PAX: suspicious stack segment fault";
23437+#endif
23438+
23439 die(str, regs, error_code);
23440 }
23441+
23442+#ifdef CONFIG_PAX_REFCOUNT
23443+ if (trapnr == 4)
23444+ pax_report_refcount_overflow(regs);
23445+#endif
23446+
23447 return 0;
23448 }
23449
23450@@ -136,7 +142,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
23451 }
23452
23453 static void __kprobes
23454-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
23455+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
23456 long error_code, siginfo_t *info)
23457 {
23458 struct task_struct *tsk = current;
23459@@ -160,7 +166,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
23460 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
23461 printk_ratelimit()) {
23462 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
23463- tsk->comm, tsk->pid, str,
23464+ tsk->comm, task_pid_nr(tsk), str,
23465 regs->ip, regs->sp, error_code);
23466 print_vma_addr(" in ", regs->ip);
23467 pr_cont("\n");
23468@@ -266,7 +272,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
23469 conditional_sti(regs);
23470
23471 #ifdef CONFIG_X86_32
23472- if (regs->flags & X86_VM_MASK) {
23473+ if (v8086_mode(regs)) {
23474 local_irq_enable();
23475 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
23476 goto exit;
23477@@ -274,18 +280,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
23478 #endif
23479
23480 tsk = current;
23481- if (!user_mode(regs)) {
23482+ if (!user_mode_novm(regs)) {
23483 if (fixup_exception(regs))
23484 goto exit;
23485
23486 tsk->thread.error_code = error_code;
23487 tsk->thread.trap_nr = X86_TRAP_GP;
23488 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
23489- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
23490+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
23491+
23492+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23493+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
23494+ die("PAX: suspicious general protection fault", regs, error_code);
23495+ else
23496+#endif
23497+
23498 die("general protection fault", regs, error_code);
23499+ }
23500 goto exit;
23501 }
23502
23503+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23504+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
23505+ struct mm_struct *mm = tsk->mm;
23506+ unsigned long limit;
23507+
23508+ down_write(&mm->mmap_sem);
23509+ limit = mm->context.user_cs_limit;
23510+ if (limit < TASK_SIZE) {
23511+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
23512+ up_write(&mm->mmap_sem);
23513+ return;
23514+ }
23515+ up_write(&mm->mmap_sem);
23516+ }
23517+#endif
23518+
23519 tsk->thread.error_code = error_code;
23520 tsk->thread.trap_nr = X86_TRAP_GP;
23521
23522@@ -440,7 +470,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
23523 /* It's safe to allow irq's after DR6 has been saved */
23524 preempt_conditional_sti(regs);
23525
23526- if (regs->flags & X86_VM_MASK) {
23527+ if (v8086_mode(regs)) {
23528 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
23529 X86_TRAP_DB);
23530 preempt_conditional_cli(regs);
23531@@ -455,7 +485,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
23532 * We already checked v86 mode above, so we can check for kernel mode
23533 * by just checking the CPL of CS.
23534 */
23535- if ((dr6 & DR_STEP) && !user_mode(regs)) {
23536+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
23537 tsk->thread.debugreg6 &= ~DR_STEP;
23538 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
23539 regs->flags &= ~X86_EFLAGS_TF;
23540@@ -487,7 +517,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
23541 return;
23542 conditional_sti(regs);
23543
23544- if (!user_mode_vm(regs))
23545+ if (!user_mode(regs))
23546 {
23547 if (!fixup_exception(regs)) {
23548 task->thread.error_code = error_code;
23549diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
23550index 0ba4cfb..4596bec 100644
23551--- a/arch/x86/kernel/uprobes.c
23552+++ b/arch/x86/kernel/uprobes.c
23553@@ -629,7 +629,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
23554 int ret = NOTIFY_DONE;
23555
23556 /* We are only interested in userspace traps */
23557- if (regs && !user_mode_vm(regs))
23558+ if (regs && !user_mode(regs))
23559 return NOTIFY_DONE;
23560
23561 switch (val) {
23562diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
23563index b9242ba..50c5edd 100644
23564--- a/arch/x86/kernel/verify_cpu.S
23565+++ b/arch/x86/kernel/verify_cpu.S
23566@@ -20,6 +20,7 @@
23567 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
23568 * arch/x86/kernel/trampoline_64.S: secondary processor verification
23569 * arch/x86/kernel/head_32.S: processor startup
23570+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
23571 *
23572 * verify_cpu, returns the status of longmode and SSE in register %eax.
23573 * 0: Success 1: Failure
23574diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
23575index 3dbdd9c..888b14e 100644
23576--- a/arch/x86/kernel/vm86_32.c
23577+++ b/arch/x86/kernel/vm86_32.c
23578@@ -44,6 +44,7 @@
23579 #include <linux/ptrace.h>
23580 #include <linux/audit.h>
23581 #include <linux/stddef.h>
23582+#include <linux/grsecurity.h>
23583
23584 #include <asm/uaccess.h>
23585 #include <asm/io.h>
23586@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
23587 do_exit(SIGSEGV);
23588 }
23589
23590- tss = &per_cpu(init_tss, get_cpu());
23591+ tss = init_tss + get_cpu();
23592 current->thread.sp0 = current->thread.saved_sp0;
23593 current->thread.sysenter_cs = __KERNEL_CS;
23594 load_sp0(tss, &current->thread);
23595@@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
23596
23597 if (tsk->thread.saved_sp0)
23598 goto out;
23599+
23600+#ifdef CONFIG_GRKERNSEC_VM86
23601+ if (!capable(CAP_SYS_RAWIO)) {
23602+ gr_handle_vm86();
23603+ goto out;
23604+ }
23605+#endif
23606+
23607 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
23608 offsetof(struct kernel_vm86_struct, vm86plus) -
23609 sizeof(info.regs));
23610@@ -242,6 +251,14 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
23611 int tmp, ret;
23612 struct vm86plus_struct __user *v86;
23613
23614+#ifdef CONFIG_GRKERNSEC_VM86
23615+ if (!capable(CAP_SYS_RAWIO)) {
23616+ gr_handle_vm86();
23617+ ret = -EPERM;
23618+ goto out;
23619+ }
23620+#endif
23621+
23622 tsk = current;
23623 switch (cmd) {
23624 case VM86_REQUEST_IRQ:
23625@@ -329,7 +346,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
23626 tsk->thread.saved_fs = info->regs32->fs;
23627 tsk->thread.saved_gs = get_user_gs(info->regs32);
23628
23629- tss = &per_cpu(init_tss, get_cpu());
23630+ tss = init_tss + get_cpu();
23631 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
23632 if (cpu_has_sep)
23633 tsk->thread.sysenter_cs = 0;
23634@@ -536,7 +553,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
23635 goto cannot_handle;
23636 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
23637 goto cannot_handle;
23638- intr_ptr = (unsigned long __user *) (i << 2);
23639+ intr_ptr = (__force unsigned long __user *) (i << 2);
23640 if (get_user(segoffs, intr_ptr))
23641 goto cannot_handle;
23642 if ((segoffs >> 16) == BIOSSEG)
23643diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
23644index 22a1530..5efafbf 100644
23645--- a/arch/x86/kernel/vmlinux.lds.S
23646+++ b/arch/x86/kernel/vmlinux.lds.S
23647@@ -26,6 +26,13 @@
23648 #include <asm/page_types.h>
23649 #include <asm/cache.h>
23650 #include <asm/boot.h>
23651+#include <asm/segment.h>
23652+
23653+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23654+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
23655+#else
23656+#define __KERNEL_TEXT_OFFSET 0
23657+#endif
23658
23659 #undef i386 /* in case the preprocessor is a 32bit one */
23660
23661@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
23662
23663 PHDRS {
23664 text PT_LOAD FLAGS(5); /* R_E */
23665+#ifdef CONFIG_X86_32
23666+ module PT_LOAD FLAGS(5); /* R_E */
23667+#endif
23668+#ifdef CONFIG_XEN
23669+ rodata PT_LOAD FLAGS(5); /* R_E */
23670+#else
23671+ rodata PT_LOAD FLAGS(4); /* R__ */
23672+#endif
23673 data PT_LOAD FLAGS(6); /* RW_ */
23674-#ifdef CONFIG_X86_64
23675+ init.begin PT_LOAD FLAGS(6); /* RW_ */
23676 #ifdef CONFIG_SMP
23677 percpu PT_LOAD FLAGS(6); /* RW_ */
23678 #endif
23679+ text.init PT_LOAD FLAGS(5); /* R_E */
23680+ text.exit PT_LOAD FLAGS(5); /* R_E */
23681 init PT_LOAD FLAGS(7); /* RWE */
23682-#endif
23683 note PT_NOTE FLAGS(0); /* ___ */
23684 }
23685
23686 SECTIONS
23687 {
23688 #ifdef CONFIG_X86_32
23689- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
23690- phys_startup_32 = startup_32 - LOAD_OFFSET;
23691+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
23692 #else
23693- . = __START_KERNEL;
23694- phys_startup_64 = startup_64 - LOAD_OFFSET;
23695+ . = __START_KERNEL;
23696 #endif
23697
23698 /* Text and read-only data */
23699- .text : AT(ADDR(.text) - LOAD_OFFSET) {
23700- _text = .;
23701+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
23702 /* bootstrapping code */
23703+#ifdef CONFIG_X86_32
23704+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
23705+#else
23706+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
23707+#endif
23708+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
23709+ _text = .;
23710 HEAD_TEXT
23711 #ifdef CONFIG_X86_32
23712 . = ALIGN(PAGE_SIZE);
23713@@ -108,13 +128,48 @@ SECTIONS
23714 IRQENTRY_TEXT
23715 *(.fixup)
23716 *(.gnu.warning)
23717- /* End of text section */
23718- _etext = .;
23719 } :text = 0x9090
23720
23721- NOTES :text :note
23722+ . += __KERNEL_TEXT_OFFSET;
23723
23724- EXCEPTION_TABLE(16) :text = 0x9090
23725+#ifdef CONFIG_X86_32
23726+ . = ALIGN(PAGE_SIZE);
23727+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
23728+
23729+#ifdef CONFIG_PAX_KERNEXEC
23730+ MODULES_EXEC_VADDR = .;
23731+ BYTE(0)
23732+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
23733+ . = ALIGN(HPAGE_SIZE) - 1;
23734+ MODULES_EXEC_END = .;
23735+#endif
23736+
23737+ } :module
23738+#endif
23739+
23740+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
23741+ /* End of text section */
23742+ BYTE(0)
23743+ _etext = . - __KERNEL_TEXT_OFFSET;
23744+ }
23745+
23746+#ifdef CONFIG_X86_32
23747+ . = ALIGN(PAGE_SIZE);
23748+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
23749+ *(.idt)
23750+ . = ALIGN(PAGE_SIZE);
23751+ *(.empty_zero_page)
23752+ *(.initial_pg_fixmap)
23753+ *(.initial_pg_pmd)
23754+ *(.initial_page_table)
23755+ *(.swapper_pg_dir)
23756+ } :rodata
23757+#endif
23758+
23759+ . = ALIGN(PAGE_SIZE);
23760+ NOTES :rodata :note
23761+
23762+ EXCEPTION_TABLE(16) :rodata
23763
23764 #if defined(CONFIG_DEBUG_RODATA)
23765 /* .text should occupy whole number of pages */
23766@@ -126,16 +181,20 @@ SECTIONS
23767
23768 /* Data */
23769 .data : AT(ADDR(.data) - LOAD_OFFSET) {
23770+
23771+#ifdef CONFIG_PAX_KERNEXEC
23772+ . = ALIGN(HPAGE_SIZE);
23773+#else
23774+ . = ALIGN(PAGE_SIZE);
23775+#endif
23776+
23777 /* Start of data section */
23778 _sdata = .;
23779
23780 /* init_task */
23781 INIT_TASK_DATA(THREAD_SIZE)
23782
23783-#ifdef CONFIG_X86_32
23784- /* 32 bit has nosave before _edata */
23785 NOSAVE_DATA
23786-#endif
23787
23788 PAGE_ALIGNED_DATA(PAGE_SIZE)
23789
23790@@ -176,12 +235,19 @@ SECTIONS
23791 #endif /* CONFIG_X86_64 */
23792
23793 /* Init code and data - will be freed after init */
23794- . = ALIGN(PAGE_SIZE);
23795 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
23796+ BYTE(0)
23797+
23798+#ifdef CONFIG_PAX_KERNEXEC
23799+ . = ALIGN(HPAGE_SIZE);
23800+#else
23801+ . = ALIGN(PAGE_SIZE);
23802+#endif
23803+
23804 __init_begin = .; /* paired with __init_end */
23805- }
23806+ } :init.begin
23807
23808-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
23809+#ifdef CONFIG_SMP
23810 /*
23811 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
23812 * output PHDR, so the next output section - .init.text - should
23813@@ -190,12 +256,27 @@ SECTIONS
23814 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
23815 #endif
23816
23817- INIT_TEXT_SECTION(PAGE_SIZE)
23818-#ifdef CONFIG_X86_64
23819- :init
23820-#endif
23821+ . = ALIGN(PAGE_SIZE);
23822+ init_begin = .;
23823+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
23824+ VMLINUX_SYMBOL(_sinittext) = .;
23825+ INIT_TEXT
23826+ VMLINUX_SYMBOL(_einittext) = .;
23827+ . = ALIGN(PAGE_SIZE);
23828+ } :text.init
23829
23830- INIT_DATA_SECTION(16)
23831+ /*
23832+ * .exit.text is discard at runtime, not link time, to deal with
23833+ * references from .altinstructions and .eh_frame
23834+ */
23835+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
23836+ EXIT_TEXT
23837+ . = ALIGN(16);
23838+ } :text.exit
23839+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
23840+
23841+ . = ALIGN(PAGE_SIZE);
23842+ INIT_DATA_SECTION(16) :init
23843
23844 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
23845 __x86_cpu_dev_start = .;
23846@@ -257,19 +338,12 @@ SECTIONS
23847 }
23848
23849 . = ALIGN(8);
23850- /*
23851- * .exit.text is discard at runtime, not link time, to deal with
23852- * references from .altinstructions and .eh_frame
23853- */
23854- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
23855- EXIT_TEXT
23856- }
23857
23858 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
23859 EXIT_DATA
23860 }
23861
23862-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
23863+#ifndef CONFIG_SMP
23864 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
23865 #endif
23866
23867@@ -288,16 +362,10 @@ SECTIONS
23868 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
23869 __smp_locks = .;
23870 *(.smp_locks)
23871- . = ALIGN(PAGE_SIZE);
23872 __smp_locks_end = .;
23873+ . = ALIGN(PAGE_SIZE);
23874 }
23875
23876-#ifdef CONFIG_X86_64
23877- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
23878- NOSAVE_DATA
23879- }
23880-#endif
23881-
23882 /* BSS */
23883 . = ALIGN(PAGE_SIZE);
23884 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
23885@@ -313,6 +381,7 @@ SECTIONS
23886 __brk_base = .;
23887 . += 64 * 1024; /* 64k alignment slop space */
23888 *(.brk_reservation) /* areas brk users have reserved */
23889+ . = ALIGN(HPAGE_SIZE);
23890 __brk_limit = .;
23891 }
23892
23893@@ -339,13 +408,12 @@ SECTIONS
23894 * for the boot processor.
23895 */
23896 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
23897-INIT_PER_CPU(gdt_page);
23898 INIT_PER_CPU(irq_stack_union);
23899
23900 /*
23901 * Build-time check on the image size:
23902 */
23903-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
23904+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
23905 "kernel image bigger than KERNEL_IMAGE_SIZE");
23906
23907 #ifdef CONFIG_SMP
23908diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
23909index 9a907a6..f83f921 100644
23910--- a/arch/x86/kernel/vsyscall_64.c
23911+++ b/arch/x86/kernel/vsyscall_64.c
23912@@ -56,15 +56,13 @@
23913 DEFINE_VVAR(int, vgetcpu_mode);
23914 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
23915
23916-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
23917+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
23918
23919 static int __init vsyscall_setup(char *str)
23920 {
23921 if (str) {
23922 if (!strcmp("emulate", str))
23923 vsyscall_mode = EMULATE;
23924- else if (!strcmp("native", str))
23925- vsyscall_mode = NATIVE;
23926 else if (!strcmp("none", str))
23927 vsyscall_mode = NONE;
23928 else
23929@@ -323,8 +321,7 @@ do_ret:
23930 return true;
23931
23932 sigsegv:
23933- force_sig(SIGSEGV, current);
23934- return true;
23935+ do_group_exit(SIGKILL);
23936 }
23937
23938 /*
23939@@ -377,10 +374,7 @@ void __init map_vsyscall(void)
23940 extern char __vvar_page;
23941 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
23942
23943- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
23944- vsyscall_mode == NATIVE
23945- ? PAGE_KERNEL_VSYSCALL
23946- : PAGE_KERNEL_VVAR);
23947+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
23948 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
23949 (unsigned long)VSYSCALL_START);
23950
23951diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
23952index b014d94..6d6ca7b 100644
23953--- a/arch/x86/kernel/x8664_ksyms_64.c
23954+++ b/arch/x86/kernel/x8664_ksyms_64.c
23955@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
23956 EXPORT_SYMBOL(copy_user_generic_unrolled);
23957 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
23958 EXPORT_SYMBOL(__copy_user_nocache);
23959-EXPORT_SYMBOL(_copy_from_user);
23960-EXPORT_SYMBOL(_copy_to_user);
23961
23962 EXPORT_SYMBOL(copy_page);
23963 EXPORT_SYMBOL(clear_page);
23964diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
23965index 45a14db..075bb9b 100644
23966--- a/arch/x86/kernel/x86_init.c
23967+++ b/arch/x86/kernel/x86_init.c
23968@@ -85,7 +85,7 @@ struct x86_init_ops x86_init __initdata = {
23969 },
23970 };
23971
23972-struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
23973+struct x86_cpuinit_ops x86_cpuinit __cpuinitconst = {
23974 .early_percpu_clock_init = x86_init_noop,
23975 .setup_percpu_clockev = setup_secondary_APIC_clock,
23976 };
23977@@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
23978 static void default_nmi_init(void) { };
23979 static int default_i8042_detect(void) { return 1; };
23980
23981-struct x86_platform_ops x86_platform = {
23982+struct x86_platform_ops x86_platform __read_only = {
23983 .calibrate_tsc = native_calibrate_tsc,
23984 .get_wallclock = mach_get_cmos_time,
23985 .set_wallclock = mach_set_rtc_mmss,
23986@@ -107,7 +107,7 @@ struct x86_platform_ops x86_platform = {
23987 };
23988
23989 EXPORT_SYMBOL_GPL(x86_platform);
23990-struct x86_msi_ops x86_msi = {
23991+struct x86_msi_ops x86_msi __read_only = {
23992 .setup_msi_irqs = native_setup_msi_irqs,
23993 .compose_msi_msg = native_compose_msi_msg,
23994 .teardown_msi_irq = native_teardown_msi_irq,
23995@@ -116,7 +116,7 @@ struct x86_msi_ops x86_msi = {
23996 .setup_hpet_msi = default_setup_hpet_msi,
23997 };
23998
23999-struct x86_io_apic_ops x86_io_apic_ops = {
24000+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
24001 .init = native_io_apic_init_mappings,
24002 .read = native_io_apic_read,
24003 .write = native_io_apic_write,
24004diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
24005index ada87a3..afea76d 100644
24006--- a/arch/x86/kernel/xsave.c
24007+++ b/arch/x86/kernel/xsave.c
24008@@ -199,6 +199,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
24009 {
24010 int err;
24011
24012+ buf = (struct xsave_struct __user *)____m(buf);
24013 if (use_xsave())
24014 err = xsave_user(buf);
24015 else if (use_fxsr())
24016@@ -311,6 +312,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
24017 */
24018 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
24019 {
24020+ buf = (void __user *)____m(buf);
24021 if (use_xsave()) {
24022 if ((unsigned long)buf % 64 || fx_only) {
24023 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
24024diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
24025index a20ecb5..d0e2194 100644
24026--- a/arch/x86/kvm/cpuid.c
24027+++ b/arch/x86/kvm/cpuid.c
24028@@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
24029 struct kvm_cpuid2 *cpuid,
24030 struct kvm_cpuid_entry2 __user *entries)
24031 {
24032- int r;
24033+ int r, i;
24034
24035 r = -E2BIG;
24036 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
24037 goto out;
24038 r = -EFAULT;
24039- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
24040- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
24041+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
24042 goto out;
24043+ for (i = 0; i < cpuid->nent; ++i) {
24044+ struct kvm_cpuid_entry2 cpuid_entry;
24045+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
24046+ goto out;
24047+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
24048+ }
24049 vcpu->arch.cpuid_nent = cpuid->nent;
24050 kvm_apic_set_version(vcpu);
24051 kvm_x86_ops->cpuid_update(vcpu);
24052@@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
24053 struct kvm_cpuid2 *cpuid,
24054 struct kvm_cpuid_entry2 __user *entries)
24055 {
24056- int r;
24057+ int r, i;
24058
24059 r = -E2BIG;
24060 if (cpuid->nent < vcpu->arch.cpuid_nent)
24061 goto out;
24062 r = -EFAULT;
24063- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
24064- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
24065+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
24066 goto out;
24067+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
24068+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
24069+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
24070+ goto out;
24071+ }
24072 return 0;
24073
24074 out:
24075diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
24076index 698eece..776b682 100644
24077--- a/arch/x86/kvm/emulate.c
24078+++ b/arch/x86/kvm/emulate.c
24079@@ -328,6 +328,7 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
24080
24081 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
24082 do { \
24083+ unsigned long _tmp; \
24084 __asm__ __volatile__ ( \
24085 _PRE_EFLAGS("0", "4", "2") \
24086 _op _suffix " %"_x"3,%1; " \
24087@@ -342,8 +343,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
24088 /* Raw emulation: instruction has two explicit operands. */
24089 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
24090 do { \
24091- unsigned long _tmp; \
24092- \
24093 switch ((ctxt)->dst.bytes) { \
24094 case 2: \
24095 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
24096@@ -359,7 +358,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
24097
24098 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
24099 do { \
24100- unsigned long _tmp; \
24101 switch ((ctxt)->dst.bytes) { \
24102 case 1: \
24103 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
24104diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
24105index f77df1c..6f20690 100644
24106--- a/arch/x86/kvm/lapic.c
24107+++ b/arch/x86/kvm/lapic.c
24108@@ -55,7 +55,7 @@
24109 #define APIC_BUS_CYCLE_NS 1
24110
24111 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
24112-#define apic_debug(fmt, arg...)
24113+#define apic_debug(fmt, arg...) do {} while (0)
24114
24115 #define APIC_LVT_NUM 6
24116 /* 14 is the version for Xeon and Pentium 8.4.8*/
24117diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
24118index 105dd5b..1b0ccc2 100644
24119--- a/arch/x86/kvm/paging_tmpl.h
24120+++ b/arch/x86/kvm/paging_tmpl.h
24121@@ -208,7 +208,7 @@ retry_walk:
24122 if (unlikely(kvm_is_error_hva(host_addr)))
24123 goto error;
24124
24125- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
24126+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
24127 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
24128 goto error;
24129 walker->ptep_user[walker->level - 1] = ptep_user;
24130diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
24131index e1b1ce2..f7b4b43 100644
24132--- a/arch/x86/kvm/svm.c
24133+++ b/arch/x86/kvm/svm.c
24134@@ -3507,7 +3507,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
24135 int cpu = raw_smp_processor_id();
24136
24137 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
24138+
24139+ pax_open_kernel();
24140 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
24141+ pax_close_kernel();
24142+
24143 load_TR_desc();
24144 }
24145
24146@@ -3901,6 +3905,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
24147 #endif
24148 #endif
24149
24150+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
24151+ __set_fs(current_thread_info()->addr_limit);
24152+#endif
24153+
24154 reload_tss(vcpu);
24155
24156 local_irq_disable();
24157diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
24158index 0af1807..06912bb 100644
24159--- a/arch/x86/kvm/vmx.c
24160+++ b/arch/x86/kvm/vmx.c
24161@@ -1184,12 +1184,12 @@ static void vmcs_write64(unsigned long field, u64 value)
24162 #endif
24163 }
24164
24165-static void vmcs_clear_bits(unsigned long field, u32 mask)
24166+static void vmcs_clear_bits(unsigned long field, unsigned long mask)
24167 {
24168 vmcs_writel(field, vmcs_readl(field) & ~mask);
24169 }
24170
24171-static void vmcs_set_bits(unsigned long field, u32 mask)
24172+static void vmcs_set_bits(unsigned long field, unsigned long mask)
24173 {
24174 vmcs_writel(field, vmcs_readl(field) | mask);
24175 }
24176@@ -1390,7 +1390,11 @@ static void reload_tss(void)
24177 struct desc_struct *descs;
24178
24179 descs = (void *)gdt->address;
24180+
24181+ pax_open_kernel();
24182 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
24183+ pax_close_kernel();
24184+
24185 load_TR_desc();
24186 }
24187
24188@@ -1614,6 +1618,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
24189 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
24190 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
24191
24192+#ifdef CONFIG_PAX_PER_CPU_PGD
24193+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
24194+#endif
24195+
24196 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
24197 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
24198 vmx->loaded_vmcs->cpu = cpu;
24199@@ -2779,8 +2787,11 @@ static __init int hardware_setup(void)
24200 if (!cpu_has_vmx_flexpriority())
24201 flexpriority_enabled = 0;
24202
24203- if (!cpu_has_vmx_tpr_shadow())
24204- kvm_x86_ops->update_cr8_intercept = NULL;
24205+ if (!cpu_has_vmx_tpr_shadow()) {
24206+ pax_open_kernel();
24207+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
24208+ pax_close_kernel();
24209+ }
24210
24211 if (enable_ept && !cpu_has_vmx_ept_2m_page())
24212 kvm_disable_largepages();
24213@@ -2792,10 +2803,12 @@ static __init int hardware_setup(void)
24214 !cpu_has_vmx_virtual_intr_delivery())
24215 enable_apicv_reg_vid = 0;
24216
24217+ pax_open_kernel();
24218 if (enable_apicv_reg_vid)
24219- kvm_x86_ops->update_cr8_intercept = NULL;
24220+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
24221 else
24222- kvm_x86_ops->hwapic_irr_update = NULL;
24223+ *(void **)&kvm_x86_ops->hwapic_irr_update = NULL;
24224+ pax_close_kernel();
24225
24226 if (nested)
24227 nested_vmx_setup_ctls_msrs();
24228@@ -3883,7 +3896,10 @@ static void vmx_set_constant_host_state(void)
24229
24230 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
24231 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
24232+
24233+#ifndef CONFIG_PAX_PER_CPU_PGD
24234 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
24235+#endif
24236
24237 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
24238 #ifdef CONFIG_X86_64
24239@@ -3904,7 +3920,7 @@ static void vmx_set_constant_host_state(void)
24240 native_store_idt(&dt);
24241 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
24242
24243- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
24244+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
24245
24246 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
24247 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
24248@@ -6580,6 +6596,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
24249 "jmp 2f \n\t"
24250 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
24251 "2: "
24252+
24253+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
24254+ "ljmp %[cs],$3f\n\t"
24255+ "3: "
24256+#endif
24257+
24258 /* Save guest registers, load host registers, keep flags */
24259 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
24260 "pop %0 \n\t"
24261@@ -6632,6 +6654,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
24262 #endif
24263 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
24264 [wordsize]"i"(sizeof(ulong))
24265+
24266+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
24267+ ,[cs]"i"(__KERNEL_CS)
24268+#endif
24269+
24270 : "cc", "memory"
24271 #ifdef CONFIG_X86_64
24272 , "rax", "rbx", "rdi", "rsi"
24273@@ -6645,7 +6672,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
24274 if (debugctlmsr)
24275 update_debugctlmsr(debugctlmsr);
24276
24277-#ifndef CONFIG_X86_64
24278+#ifdef CONFIG_X86_32
24279 /*
24280 * The sysexit path does not restore ds/es, so we must set them to
24281 * a reasonable value ourselves.
24282@@ -6654,8 +6681,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
24283 * may be executed in interrupt context, which saves and restore segments
24284 * around it, nullifying its effect.
24285 */
24286- loadsegment(ds, __USER_DS);
24287- loadsegment(es, __USER_DS);
24288+ loadsegment(ds, __KERNEL_DS);
24289+ loadsegment(es, __KERNEL_DS);
24290+ loadsegment(ss, __KERNEL_DS);
24291+
24292+#ifdef CONFIG_PAX_KERNEXEC
24293+ loadsegment(fs, __KERNEL_PERCPU);
24294+#endif
24295+
24296+#ifdef CONFIG_PAX_MEMORY_UDEREF
24297+ __set_fs(current_thread_info()->addr_limit);
24298+#endif
24299+
24300 #endif
24301
24302 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
24303diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
24304index e172132..c3d3e27 100644
24305--- a/arch/x86/kvm/x86.c
24306+++ b/arch/x86/kvm/x86.c
24307@@ -1686,8 +1686,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
24308 {
24309 struct kvm *kvm = vcpu->kvm;
24310 int lm = is_long_mode(vcpu);
24311- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
24312- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
24313+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
24314+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
24315 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
24316 : kvm->arch.xen_hvm_config.blob_size_32;
24317 u32 page_num = data & ~PAGE_MASK;
24318@@ -2567,6 +2567,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
24319 if (n < msr_list.nmsrs)
24320 goto out;
24321 r = -EFAULT;
24322+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
24323+ goto out;
24324 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
24325 num_msrs_to_save * sizeof(u32)))
24326 goto out;
24327@@ -2696,7 +2698,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
24328 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
24329 struct kvm_interrupt *irq)
24330 {
24331- if (irq->irq < 0 || irq->irq >= KVM_NR_INTERRUPTS)
24332+ if (irq->irq >= KVM_NR_INTERRUPTS)
24333 return -EINVAL;
24334 if (irqchip_in_kernel(vcpu->kvm))
24335 return -ENXIO;
24336@@ -5247,7 +5249,7 @@ static struct notifier_block pvclock_gtod_notifier = {
24337 };
24338 #endif
24339
24340-int kvm_arch_init(void *opaque)
24341+int kvm_arch_init(const void *opaque)
24342 {
24343 int r;
24344 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
24345diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
24346index 7114c63..a1018fc 100644
24347--- a/arch/x86/lguest/boot.c
24348+++ b/arch/x86/lguest/boot.c
24349@@ -1201,9 +1201,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
24350 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
24351 * Launcher to reboot us.
24352 */
24353-static void lguest_restart(char *reason)
24354+static __noreturn void lguest_restart(char *reason)
24355 {
24356 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
24357+ BUG();
24358 }
24359
24360 /*G:050
24361diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
24362index 00933d5..3a64af9 100644
24363--- a/arch/x86/lib/atomic64_386_32.S
24364+++ b/arch/x86/lib/atomic64_386_32.S
24365@@ -48,6 +48,10 @@ BEGIN(read)
24366 movl (v), %eax
24367 movl 4(v), %edx
24368 RET_ENDP
24369+BEGIN(read_unchecked)
24370+ movl (v), %eax
24371+ movl 4(v), %edx
24372+RET_ENDP
24373 #undef v
24374
24375 #define v %esi
24376@@ -55,6 +59,10 @@ BEGIN(set)
24377 movl %ebx, (v)
24378 movl %ecx, 4(v)
24379 RET_ENDP
24380+BEGIN(set_unchecked)
24381+ movl %ebx, (v)
24382+ movl %ecx, 4(v)
24383+RET_ENDP
24384 #undef v
24385
24386 #define v %esi
24387@@ -70,6 +78,20 @@ RET_ENDP
24388 BEGIN(add)
24389 addl %eax, (v)
24390 adcl %edx, 4(v)
24391+
24392+#ifdef CONFIG_PAX_REFCOUNT
24393+ jno 0f
24394+ subl %eax, (v)
24395+ sbbl %edx, 4(v)
24396+ int $4
24397+0:
24398+ _ASM_EXTABLE(0b, 0b)
24399+#endif
24400+
24401+RET_ENDP
24402+BEGIN(add_unchecked)
24403+ addl %eax, (v)
24404+ adcl %edx, 4(v)
24405 RET_ENDP
24406 #undef v
24407
24408@@ -77,6 +99,24 @@ RET_ENDP
24409 BEGIN(add_return)
24410 addl (v), %eax
24411 adcl 4(v), %edx
24412+
24413+#ifdef CONFIG_PAX_REFCOUNT
24414+ into
24415+1234:
24416+ _ASM_EXTABLE(1234b, 2f)
24417+#endif
24418+
24419+ movl %eax, (v)
24420+ movl %edx, 4(v)
24421+
24422+#ifdef CONFIG_PAX_REFCOUNT
24423+2:
24424+#endif
24425+
24426+RET_ENDP
24427+BEGIN(add_return_unchecked)
24428+ addl (v), %eax
24429+ adcl 4(v), %edx
24430 movl %eax, (v)
24431 movl %edx, 4(v)
24432 RET_ENDP
24433@@ -86,6 +126,20 @@ RET_ENDP
24434 BEGIN(sub)
24435 subl %eax, (v)
24436 sbbl %edx, 4(v)
24437+
24438+#ifdef CONFIG_PAX_REFCOUNT
24439+ jno 0f
24440+ addl %eax, (v)
24441+ adcl %edx, 4(v)
24442+ int $4
24443+0:
24444+ _ASM_EXTABLE(0b, 0b)
24445+#endif
24446+
24447+RET_ENDP
24448+BEGIN(sub_unchecked)
24449+ subl %eax, (v)
24450+ sbbl %edx, 4(v)
24451 RET_ENDP
24452 #undef v
24453
24454@@ -96,6 +150,27 @@ BEGIN(sub_return)
24455 sbbl $0, %edx
24456 addl (v), %eax
24457 adcl 4(v), %edx
24458+
24459+#ifdef CONFIG_PAX_REFCOUNT
24460+ into
24461+1234:
24462+ _ASM_EXTABLE(1234b, 2f)
24463+#endif
24464+
24465+ movl %eax, (v)
24466+ movl %edx, 4(v)
24467+
24468+#ifdef CONFIG_PAX_REFCOUNT
24469+2:
24470+#endif
24471+
24472+RET_ENDP
24473+BEGIN(sub_return_unchecked)
24474+ negl %edx
24475+ negl %eax
24476+ sbbl $0, %edx
24477+ addl (v), %eax
24478+ adcl 4(v), %edx
24479 movl %eax, (v)
24480 movl %edx, 4(v)
24481 RET_ENDP
24482@@ -105,6 +180,20 @@ RET_ENDP
24483 BEGIN(inc)
24484 addl $1, (v)
24485 adcl $0, 4(v)
24486+
24487+#ifdef CONFIG_PAX_REFCOUNT
24488+ jno 0f
24489+ subl $1, (v)
24490+ sbbl $0, 4(v)
24491+ int $4
24492+0:
24493+ _ASM_EXTABLE(0b, 0b)
24494+#endif
24495+
24496+RET_ENDP
24497+BEGIN(inc_unchecked)
24498+ addl $1, (v)
24499+ adcl $0, 4(v)
24500 RET_ENDP
24501 #undef v
24502
24503@@ -114,6 +203,26 @@ BEGIN(inc_return)
24504 movl 4(v), %edx
24505 addl $1, %eax
24506 adcl $0, %edx
24507+
24508+#ifdef CONFIG_PAX_REFCOUNT
24509+ into
24510+1234:
24511+ _ASM_EXTABLE(1234b, 2f)
24512+#endif
24513+
24514+ movl %eax, (v)
24515+ movl %edx, 4(v)
24516+
24517+#ifdef CONFIG_PAX_REFCOUNT
24518+2:
24519+#endif
24520+
24521+RET_ENDP
24522+BEGIN(inc_return_unchecked)
24523+ movl (v), %eax
24524+ movl 4(v), %edx
24525+ addl $1, %eax
24526+ adcl $0, %edx
24527 movl %eax, (v)
24528 movl %edx, 4(v)
24529 RET_ENDP
24530@@ -123,6 +232,20 @@ RET_ENDP
24531 BEGIN(dec)
24532 subl $1, (v)
24533 sbbl $0, 4(v)
24534+
24535+#ifdef CONFIG_PAX_REFCOUNT
24536+ jno 0f
24537+ addl $1, (v)
24538+ adcl $0, 4(v)
24539+ int $4
24540+0:
24541+ _ASM_EXTABLE(0b, 0b)
24542+#endif
24543+
24544+RET_ENDP
24545+BEGIN(dec_unchecked)
24546+ subl $1, (v)
24547+ sbbl $0, 4(v)
24548 RET_ENDP
24549 #undef v
24550
24551@@ -132,6 +255,26 @@ BEGIN(dec_return)
24552 movl 4(v), %edx
24553 subl $1, %eax
24554 sbbl $0, %edx
24555+
24556+#ifdef CONFIG_PAX_REFCOUNT
24557+ into
24558+1234:
24559+ _ASM_EXTABLE(1234b, 2f)
24560+#endif
24561+
24562+ movl %eax, (v)
24563+ movl %edx, 4(v)
24564+
24565+#ifdef CONFIG_PAX_REFCOUNT
24566+2:
24567+#endif
24568+
24569+RET_ENDP
24570+BEGIN(dec_return_unchecked)
24571+ movl (v), %eax
24572+ movl 4(v), %edx
24573+ subl $1, %eax
24574+ sbbl $0, %edx
24575 movl %eax, (v)
24576 movl %edx, 4(v)
24577 RET_ENDP
24578@@ -143,6 +286,13 @@ BEGIN(add_unless)
24579 adcl %edx, %edi
24580 addl (v), %eax
24581 adcl 4(v), %edx
24582+
24583+#ifdef CONFIG_PAX_REFCOUNT
24584+ into
24585+1234:
24586+ _ASM_EXTABLE(1234b, 2f)
24587+#endif
24588+
24589 cmpl %eax, %ecx
24590 je 3f
24591 1:
24592@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
24593 1:
24594 addl $1, %eax
24595 adcl $0, %edx
24596+
24597+#ifdef CONFIG_PAX_REFCOUNT
24598+ into
24599+1234:
24600+ _ASM_EXTABLE(1234b, 2f)
24601+#endif
24602+
24603 movl %eax, (v)
24604 movl %edx, 4(v)
24605 movl $1, %eax
24606@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
24607 movl 4(v), %edx
24608 subl $1, %eax
24609 sbbl $0, %edx
24610+
24611+#ifdef CONFIG_PAX_REFCOUNT
24612+ into
24613+1234:
24614+ _ASM_EXTABLE(1234b, 1f)
24615+#endif
24616+
24617 js 1f
24618 movl %eax, (v)
24619 movl %edx, 4(v)
24620diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
24621index f5cc9eb..51fa319 100644
24622--- a/arch/x86/lib/atomic64_cx8_32.S
24623+++ b/arch/x86/lib/atomic64_cx8_32.S
24624@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
24625 CFI_STARTPROC
24626
24627 read64 %ecx
24628+ pax_force_retaddr
24629 ret
24630 CFI_ENDPROC
24631 ENDPROC(atomic64_read_cx8)
24632
24633+ENTRY(atomic64_read_unchecked_cx8)
24634+ CFI_STARTPROC
24635+
24636+ read64 %ecx
24637+ pax_force_retaddr
24638+ ret
24639+ CFI_ENDPROC
24640+ENDPROC(atomic64_read_unchecked_cx8)
24641+
24642 ENTRY(atomic64_set_cx8)
24643 CFI_STARTPROC
24644
24645@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
24646 cmpxchg8b (%esi)
24647 jne 1b
24648
24649+ pax_force_retaddr
24650 ret
24651 CFI_ENDPROC
24652 ENDPROC(atomic64_set_cx8)
24653
24654+ENTRY(atomic64_set_unchecked_cx8)
24655+ CFI_STARTPROC
24656+
24657+1:
24658+/* we don't need LOCK_PREFIX since aligned 64-bit writes
24659+ * are atomic on 586 and newer */
24660+ cmpxchg8b (%esi)
24661+ jne 1b
24662+
24663+ pax_force_retaddr
24664+ ret
24665+ CFI_ENDPROC
24666+ENDPROC(atomic64_set_unchecked_cx8)
24667+
24668 ENTRY(atomic64_xchg_cx8)
24669 CFI_STARTPROC
24670
24671@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
24672 cmpxchg8b (%esi)
24673 jne 1b
24674
24675+ pax_force_retaddr
24676 ret
24677 CFI_ENDPROC
24678 ENDPROC(atomic64_xchg_cx8)
24679
24680-.macro addsub_return func ins insc
24681-ENTRY(atomic64_\func\()_return_cx8)
24682+.macro addsub_return func ins insc unchecked=""
24683+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
24684 CFI_STARTPROC
24685 SAVE ebp
24686 SAVE ebx
24687@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
24688 movl %edx, %ecx
24689 \ins\()l %esi, %ebx
24690 \insc\()l %edi, %ecx
24691+
24692+.ifb \unchecked
24693+#ifdef CONFIG_PAX_REFCOUNT
24694+ into
24695+2:
24696+ _ASM_EXTABLE(2b, 3f)
24697+#endif
24698+.endif
24699+
24700 LOCK_PREFIX
24701 cmpxchg8b (%ebp)
24702 jne 1b
24703-
24704-10:
24705 movl %ebx, %eax
24706 movl %ecx, %edx
24707+
24708+.ifb \unchecked
24709+#ifdef CONFIG_PAX_REFCOUNT
24710+3:
24711+#endif
24712+.endif
24713+
24714 RESTORE edi
24715 RESTORE esi
24716 RESTORE ebx
24717 RESTORE ebp
24718+ pax_force_retaddr
24719 ret
24720 CFI_ENDPROC
24721-ENDPROC(atomic64_\func\()_return_cx8)
24722+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
24723 .endm
24724
24725 addsub_return add add adc
24726 addsub_return sub sub sbb
24727+addsub_return add add adc _unchecked
24728+addsub_return sub sub sbb _unchecked
24729
24730-.macro incdec_return func ins insc
24731-ENTRY(atomic64_\func\()_return_cx8)
24732+.macro incdec_return func ins insc unchecked=""
24733+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
24734 CFI_STARTPROC
24735 SAVE ebx
24736
24737@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
24738 movl %edx, %ecx
24739 \ins\()l $1, %ebx
24740 \insc\()l $0, %ecx
24741+
24742+.ifb \unchecked
24743+#ifdef CONFIG_PAX_REFCOUNT
24744+ into
24745+2:
24746+ _ASM_EXTABLE(2b, 3f)
24747+#endif
24748+.endif
24749+
24750 LOCK_PREFIX
24751 cmpxchg8b (%esi)
24752 jne 1b
24753
24754-10:
24755 movl %ebx, %eax
24756 movl %ecx, %edx
24757+
24758+.ifb \unchecked
24759+#ifdef CONFIG_PAX_REFCOUNT
24760+3:
24761+#endif
24762+.endif
24763+
24764 RESTORE ebx
24765+ pax_force_retaddr
24766 ret
24767 CFI_ENDPROC
24768-ENDPROC(atomic64_\func\()_return_cx8)
24769+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
24770 .endm
24771
24772 incdec_return inc add adc
24773 incdec_return dec sub sbb
24774+incdec_return inc add adc _unchecked
24775+incdec_return dec sub sbb _unchecked
24776
24777 ENTRY(atomic64_dec_if_positive_cx8)
24778 CFI_STARTPROC
24779@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
24780 movl %edx, %ecx
24781 subl $1, %ebx
24782 sbb $0, %ecx
24783+
24784+#ifdef CONFIG_PAX_REFCOUNT
24785+ into
24786+1234:
24787+ _ASM_EXTABLE(1234b, 2f)
24788+#endif
24789+
24790 js 2f
24791 LOCK_PREFIX
24792 cmpxchg8b (%esi)
24793@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
24794 movl %ebx, %eax
24795 movl %ecx, %edx
24796 RESTORE ebx
24797+ pax_force_retaddr
24798 ret
24799 CFI_ENDPROC
24800 ENDPROC(atomic64_dec_if_positive_cx8)
24801@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
24802 movl %edx, %ecx
24803 addl %ebp, %ebx
24804 adcl %edi, %ecx
24805+
24806+#ifdef CONFIG_PAX_REFCOUNT
24807+ into
24808+1234:
24809+ _ASM_EXTABLE(1234b, 3f)
24810+#endif
24811+
24812 LOCK_PREFIX
24813 cmpxchg8b (%esi)
24814 jne 1b
24815@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
24816 CFI_ADJUST_CFA_OFFSET -8
24817 RESTORE ebx
24818 RESTORE ebp
24819+ pax_force_retaddr
24820 ret
24821 4:
24822 cmpl %edx, 4(%esp)
24823@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
24824 xorl %ecx, %ecx
24825 addl $1, %ebx
24826 adcl %edx, %ecx
24827+
24828+#ifdef CONFIG_PAX_REFCOUNT
24829+ into
24830+1234:
24831+ _ASM_EXTABLE(1234b, 3f)
24832+#endif
24833+
24834 LOCK_PREFIX
24835 cmpxchg8b (%esi)
24836 jne 1b
24837@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
24838 movl $1, %eax
24839 3:
24840 RESTORE ebx
24841+ pax_force_retaddr
24842 ret
24843 CFI_ENDPROC
24844 ENDPROC(atomic64_inc_not_zero_cx8)
24845diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
24846index 2af5df3..62b1a5a 100644
24847--- a/arch/x86/lib/checksum_32.S
24848+++ b/arch/x86/lib/checksum_32.S
24849@@ -29,7 +29,8 @@
24850 #include <asm/dwarf2.h>
24851 #include <asm/errno.h>
24852 #include <asm/asm.h>
24853-
24854+#include <asm/segment.h>
24855+
24856 /*
24857 * computes a partial checksum, e.g. for TCP/UDP fragments
24858 */
24859@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
24860
24861 #define ARGBASE 16
24862 #define FP 12
24863-
24864-ENTRY(csum_partial_copy_generic)
24865+
24866+ENTRY(csum_partial_copy_generic_to_user)
24867 CFI_STARTPROC
24868+
24869+#ifdef CONFIG_PAX_MEMORY_UDEREF
24870+ pushl_cfi %gs
24871+ popl_cfi %es
24872+ jmp csum_partial_copy_generic
24873+#endif
24874+
24875+ENTRY(csum_partial_copy_generic_from_user)
24876+
24877+#ifdef CONFIG_PAX_MEMORY_UDEREF
24878+ pushl_cfi %gs
24879+ popl_cfi %ds
24880+#endif
24881+
24882+ENTRY(csum_partial_copy_generic)
24883 subl $4,%esp
24884 CFI_ADJUST_CFA_OFFSET 4
24885 pushl_cfi %edi
24886@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
24887 jmp 4f
24888 SRC(1: movw (%esi), %bx )
24889 addl $2, %esi
24890-DST( movw %bx, (%edi) )
24891+DST( movw %bx, %es:(%edi) )
24892 addl $2, %edi
24893 addw %bx, %ax
24894 adcl $0, %eax
24895@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
24896 SRC(1: movl (%esi), %ebx )
24897 SRC( movl 4(%esi), %edx )
24898 adcl %ebx, %eax
24899-DST( movl %ebx, (%edi) )
24900+DST( movl %ebx, %es:(%edi) )
24901 adcl %edx, %eax
24902-DST( movl %edx, 4(%edi) )
24903+DST( movl %edx, %es:4(%edi) )
24904
24905 SRC( movl 8(%esi), %ebx )
24906 SRC( movl 12(%esi), %edx )
24907 adcl %ebx, %eax
24908-DST( movl %ebx, 8(%edi) )
24909+DST( movl %ebx, %es:8(%edi) )
24910 adcl %edx, %eax
24911-DST( movl %edx, 12(%edi) )
24912+DST( movl %edx, %es:12(%edi) )
24913
24914 SRC( movl 16(%esi), %ebx )
24915 SRC( movl 20(%esi), %edx )
24916 adcl %ebx, %eax
24917-DST( movl %ebx, 16(%edi) )
24918+DST( movl %ebx, %es:16(%edi) )
24919 adcl %edx, %eax
24920-DST( movl %edx, 20(%edi) )
24921+DST( movl %edx, %es:20(%edi) )
24922
24923 SRC( movl 24(%esi), %ebx )
24924 SRC( movl 28(%esi), %edx )
24925 adcl %ebx, %eax
24926-DST( movl %ebx, 24(%edi) )
24927+DST( movl %ebx, %es:24(%edi) )
24928 adcl %edx, %eax
24929-DST( movl %edx, 28(%edi) )
24930+DST( movl %edx, %es:28(%edi) )
24931
24932 lea 32(%esi), %esi
24933 lea 32(%edi), %edi
24934@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
24935 shrl $2, %edx # This clears CF
24936 SRC(3: movl (%esi), %ebx )
24937 adcl %ebx, %eax
24938-DST( movl %ebx, (%edi) )
24939+DST( movl %ebx, %es:(%edi) )
24940 lea 4(%esi), %esi
24941 lea 4(%edi), %edi
24942 dec %edx
24943@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
24944 jb 5f
24945 SRC( movw (%esi), %cx )
24946 leal 2(%esi), %esi
24947-DST( movw %cx, (%edi) )
24948+DST( movw %cx, %es:(%edi) )
24949 leal 2(%edi), %edi
24950 je 6f
24951 shll $16,%ecx
24952 SRC(5: movb (%esi), %cl )
24953-DST( movb %cl, (%edi) )
24954+DST( movb %cl, %es:(%edi) )
24955 6: addl %ecx, %eax
24956 adcl $0, %eax
24957 7:
24958@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
24959
24960 6001:
24961 movl ARGBASE+20(%esp), %ebx # src_err_ptr
24962- movl $-EFAULT, (%ebx)
24963+ movl $-EFAULT, %ss:(%ebx)
24964
24965 # zero the complete destination - computing the rest
24966 # is too much work
24967@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
24968
24969 6002:
24970 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
24971- movl $-EFAULT,(%ebx)
24972+ movl $-EFAULT,%ss:(%ebx)
24973 jmp 5000b
24974
24975 .previous
24976
24977+ pushl_cfi %ss
24978+ popl_cfi %ds
24979+ pushl_cfi %ss
24980+ popl_cfi %es
24981 popl_cfi %ebx
24982 CFI_RESTORE ebx
24983 popl_cfi %esi
24984@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
24985 popl_cfi %ecx # equivalent to addl $4,%esp
24986 ret
24987 CFI_ENDPROC
24988-ENDPROC(csum_partial_copy_generic)
24989+ENDPROC(csum_partial_copy_generic_to_user)
24990
24991 #else
24992
24993 /* Version for PentiumII/PPro */
24994
24995 #define ROUND1(x) \
24996+ nop; nop; nop; \
24997 SRC(movl x(%esi), %ebx ) ; \
24998 addl %ebx, %eax ; \
24999- DST(movl %ebx, x(%edi) ) ;
25000+ DST(movl %ebx, %es:x(%edi)) ;
25001
25002 #define ROUND(x) \
25003+ nop; nop; nop; \
25004 SRC(movl x(%esi), %ebx ) ; \
25005 adcl %ebx, %eax ; \
25006- DST(movl %ebx, x(%edi) ) ;
25007+ DST(movl %ebx, %es:x(%edi)) ;
25008
25009 #define ARGBASE 12
25010-
25011-ENTRY(csum_partial_copy_generic)
25012+
25013+ENTRY(csum_partial_copy_generic_to_user)
25014 CFI_STARTPROC
25015+
25016+#ifdef CONFIG_PAX_MEMORY_UDEREF
25017+ pushl_cfi %gs
25018+ popl_cfi %es
25019+ jmp csum_partial_copy_generic
25020+#endif
25021+
25022+ENTRY(csum_partial_copy_generic_from_user)
25023+
25024+#ifdef CONFIG_PAX_MEMORY_UDEREF
25025+ pushl_cfi %gs
25026+ popl_cfi %ds
25027+#endif
25028+
25029+ENTRY(csum_partial_copy_generic)
25030 pushl_cfi %ebx
25031 CFI_REL_OFFSET ebx, 0
25032 pushl_cfi %edi
25033@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
25034 subl %ebx, %edi
25035 lea -1(%esi),%edx
25036 andl $-32,%edx
25037- lea 3f(%ebx,%ebx), %ebx
25038+ lea 3f(%ebx,%ebx,2), %ebx
25039 testl %esi, %esi
25040 jmp *%ebx
25041 1: addl $64,%esi
25042@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
25043 jb 5f
25044 SRC( movw (%esi), %dx )
25045 leal 2(%esi), %esi
25046-DST( movw %dx, (%edi) )
25047+DST( movw %dx, %es:(%edi) )
25048 leal 2(%edi), %edi
25049 je 6f
25050 shll $16,%edx
25051 5:
25052 SRC( movb (%esi), %dl )
25053-DST( movb %dl, (%edi) )
25054+DST( movb %dl, %es:(%edi) )
25055 6: addl %edx, %eax
25056 adcl $0, %eax
25057 7:
25058 .section .fixup, "ax"
25059 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
25060- movl $-EFAULT, (%ebx)
25061+ movl $-EFAULT, %ss:(%ebx)
25062 # zero the complete destination (computing the rest is too much work)
25063 movl ARGBASE+8(%esp),%edi # dst
25064 movl ARGBASE+12(%esp),%ecx # len
25065@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
25066 rep; stosb
25067 jmp 7b
25068 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
25069- movl $-EFAULT, (%ebx)
25070+ movl $-EFAULT, %ss:(%ebx)
25071 jmp 7b
25072 .previous
25073
25074+#ifdef CONFIG_PAX_MEMORY_UDEREF
25075+ pushl_cfi %ss
25076+ popl_cfi %ds
25077+ pushl_cfi %ss
25078+ popl_cfi %es
25079+#endif
25080+
25081 popl_cfi %esi
25082 CFI_RESTORE esi
25083 popl_cfi %edi
25084@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
25085 CFI_RESTORE ebx
25086 ret
25087 CFI_ENDPROC
25088-ENDPROC(csum_partial_copy_generic)
25089+ENDPROC(csum_partial_copy_generic_to_user)
25090
25091 #undef ROUND
25092 #undef ROUND1
25093diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
25094index f2145cf..cea889d 100644
25095--- a/arch/x86/lib/clear_page_64.S
25096+++ b/arch/x86/lib/clear_page_64.S
25097@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
25098 movl $4096/8,%ecx
25099 xorl %eax,%eax
25100 rep stosq
25101+ pax_force_retaddr
25102 ret
25103 CFI_ENDPROC
25104 ENDPROC(clear_page_c)
25105@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
25106 movl $4096,%ecx
25107 xorl %eax,%eax
25108 rep stosb
25109+ pax_force_retaddr
25110 ret
25111 CFI_ENDPROC
25112 ENDPROC(clear_page_c_e)
25113@@ -43,6 +45,7 @@ ENTRY(clear_page)
25114 leaq 64(%rdi),%rdi
25115 jnz .Lloop
25116 nop
25117+ pax_force_retaddr
25118 ret
25119 CFI_ENDPROC
25120 .Lclear_page_end:
25121@@ -58,7 +61,7 @@ ENDPROC(clear_page)
25122
25123 #include <asm/cpufeature.h>
25124
25125- .section .altinstr_replacement,"ax"
25126+ .section .altinstr_replacement,"a"
25127 1: .byte 0xeb /* jmp <disp8> */
25128 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
25129 2: .byte 0xeb /* jmp <disp8> */
25130diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
25131index 1e572c5..2a162cd 100644
25132--- a/arch/x86/lib/cmpxchg16b_emu.S
25133+++ b/arch/x86/lib/cmpxchg16b_emu.S
25134@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
25135
25136 popf
25137 mov $1, %al
25138+ pax_force_retaddr
25139 ret
25140
25141 not_same:
25142 popf
25143 xor %al,%al
25144+ pax_force_retaddr
25145 ret
25146
25147 CFI_ENDPROC
25148diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
25149index 176cca6..1166c50 100644
25150--- a/arch/x86/lib/copy_page_64.S
25151+++ b/arch/x86/lib/copy_page_64.S
25152@@ -9,6 +9,7 @@ copy_page_rep:
25153 CFI_STARTPROC
25154 movl $4096/8, %ecx
25155 rep movsq
25156+ pax_force_retaddr
25157 ret
25158 CFI_ENDPROC
25159 ENDPROC(copy_page_rep)
25160@@ -20,12 +21,14 @@ ENDPROC(copy_page_rep)
25161
25162 ENTRY(copy_page)
25163 CFI_STARTPROC
25164- subq $2*8, %rsp
25165- CFI_ADJUST_CFA_OFFSET 2*8
25166+ subq $3*8, %rsp
25167+ CFI_ADJUST_CFA_OFFSET 3*8
25168 movq %rbx, (%rsp)
25169 CFI_REL_OFFSET rbx, 0
25170 movq %r12, 1*8(%rsp)
25171 CFI_REL_OFFSET r12, 1*8
25172+ movq %r13, 2*8(%rsp)
25173+ CFI_REL_OFFSET r13, 2*8
25174
25175 movl $(4096/64)-5, %ecx
25176 .p2align 4
25177@@ -36,7 +39,7 @@ ENTRY(copy_page)
25178 movq 0x8*2(%rsi), %rdx
25179 movq 0x8*3(%rsi), %r8
25180 movq 0x8*4(%rsi), %r9
25181- movq 0x8*5(%rsi), %r10
25182+ movq 0x8*5(%rsi), %r13
25183 movq 0x8*6(%rsi), %r11
25184 movq 0x8*7(%rsi), %r12
25185
25186@@ -47,7 +50,7 @@ ENTRY(copy_page)
25187 movq %rdx, 0x8*2(%rdi)
25188 movq %r8, 0x8*3(%rdi)
25189 movq %r9, 0x8*4(%rdi)
25190- movq %r10, 0x8*5(%rdi)
25191+ movq %r13, 0x8*5(%rdi)
25192 movq %r11, 0x8*6(%rdi)
25193 movq %r12, 0x8*7(%rdi)
25194
25195@@ -66,7 +69,7 @@ ENTRY(copy_page)
25196 movq 0x8*2(%rsi), %rdx
25197 movq 0x8*3(%rsi), %r8
25198 movq 0x8*4(%rsi), %r9
25199- movq 0x8*5(%rsi), %r10
25200+ movq 0x8*5(%rsi), %r13
25201 movq 0x8*6(%rsi), %r11
25202 movq 0x8*7(%rsi), %r12
25203
25204@@ -75,7 +78,7 @@ ENTRY(copy_page)
25205 movq %rdx, 0x8*2(%rdi)
25206 movq %r8, 0x8*3(%rdi)
25207 movq %r9, 0x8*4(%rdi)
25208- movq %r10, 0x8*5(%rdi)
25209+ movq %r13, 0x8*5(%rdi)
25210 movq %r11, 0x8*6(%rdi)
25211 movq %r12, 0x8*7(%rdi)
25212
25213@@ -87,8 +90,11 @@ ENTRY(copy_page)
25214 CFI_RESTORE rbx
25215 movq 1*8(%rsp), %r12
25216 CFI_RESTORE r12
25217- addq $2*8, %rsp
25218- CFI_ADJUST_CFA_OFFSET -2*8
25219+ movq 2*8(%rsp), %r13
25220+ CFI_RESTORE r13
25221+ addq $3*8, %rsp
25222+ CFI_ADJUST_CFA_OFFSET -3*8
25223+ pax_force_retaddr
25224 ret
25225 .Lcopy_page_end:
25226 CFI_ENDPROC
25227@@ -99,7 +105,7 @@ ENDPROC(copy_page)
25228
25229 #include <asm/cpufeature.h>
25230
25231- .section .altinstr_replacement,"ax"
25232+ .section .altinstr_replacement,"a"
25233 1: .byte 0xeb /* jmp <disp8> */
25234 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
25235 2:
25236diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
25237index a30ca15..d25fab6 100644
25238--- a/arch/x86/lib/copy_user_64.S
25239+++ b/arch/x86/lib/copy_user_64.S
25240@@ -18,6 +18,7 @@
25241 #include <asm/alternative-asm.h>
25242 #include <asm/asm.h>
25243 #include <asm/smap.h>
25244+#include <asm/pgtable.h>
25245
25246 /*
25247 * By placing feature2 after feature1 in altinstructions section, we logically
25248@@ -31,7 +32,7 @@
25249 .byte 0xe9 /* 32bit jump */
25250 .long \orig-1f /* by default jump to orig */
25251 1:
25252- .section .altinstr_replacement,"ax"
25253+ .section .altinstr_replacement,"a"
25254 2: .byte 0xe9 /* near jump with 32bit immediate */
25255 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
25256 3: .byte 0xe9 /* near jump with 32bit immediate */
25257@@ -70,47 +71,20 @@
25258 #endif
25259 .endm
25260
25261-/* Standard copy_to_user with segment limit checking */
25262-ENTRY(_copy_to_user)
25263- CFI_STARTPROC
25264- GET_THREAD_INFO(%rax)
25265- movq %rdi,%rcx
25266- addq %rdx,%rcx
25267- jc bad_to_user
25268- cmpq TI_addr_limit(%rax),%rcx
25269- ja bad_to_user
25270- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
25271- copy_user_generic_unrolled,copy_user_generic_string, \
25272- copy_user_enhanced_fast_string
25273- CFI_ENDPROC
25274-ENDPROC(_copy_to_user)
25275-
25276-/* Standard copy_from_user with segment limit checking */
25277-ENTRY(_copy_from_user)
25278- CFI_STARTPROC
25279- GET_THREAD_INFO(%rax)
25280- movq %rsi,%rcx
25281- addq %rdx,%rcx
25282- jc bad_from_user
25283- cmpq TI_addr_limit(%rax),%rcx
25284- ja bad_from_user
25285- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
25286- copy_user_generic_unrolled,copy_user_generic_string, \
25287- copy_user_enhanced_fast_string
25288- CFI_ENDPROC
25289-ENDPROC(_copy_from_user)
25290-
25291 .section .fixup,"ax"
25292 /* must zero dest */
25293 ENTRY(bad_from_user)
25294 bad_from_user:
25295 CFI_STARTPROC
25296+ testl %edx,%edx
25297+ js bad_to_user
25298 movl %edx,%ecx
25299 xorl %eax,%eax
25300 rep
25301 stosb
25302 bad_to_user:
25303 movl %edx,%eax
25304+ pax_force_retaddr
25305 ret
25306 CFI_ENDPROC
25307 ENDPROC(bad_from_user)
25308@@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
25309 jz 17f
25310 1: movq (%rsi),%r8
25311 2: movq 1*8(%rsi),%r9
25312-3: movq 2*8(%rsi),%r10
25313+3: movq 2*8(%rsi),%rax
25314 4: movq 3*8(%rsi),%r11
25315 5: movq %r8,(%rdi)
25316 6: movq %r9,1*8(%rdi)
25317-7: movq %r10,2*8(%rdi)
25318+7: movq %rax,2*8(%rdi)
25319 8: movq %r11,3*8(%rdi)
25320 9: movq 4*8(%rsi),%r8
25321 10: movq 5*8(%rsi),%r9
25322-11: movq 6*8(%rsi),%r10
25323+11: movq 6*8(%rsi),%rax
25324 12: movq 7*8(%rsi),%r11
25325 13: movq %r8,4*8(%rdi)
25326 14: movq %r9,5*8(%rdi)
25327-15: movq %r10,6*8(%rdi)
25328+15: movq %rax,6*8(%rdi)
25329 16: movq %r11,7*8(%rdi)
25330 leaq 64(%rsi),%rsi
25331 leaq 64(%rdi),%rdi
25332@@ -180,6 +154,7 @@ ENTRY(copy_user_generic_unrolled)
25333 jnz 21b
25334 23: xor %eax,%eax
25335 ASM_CLAC
25336+ pax_force_retaddr
25337 ret
25338
25339 .section .fixup,"ax"
25340@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
25341 movsb
25342 4: xorl %eax,%eax
25343 ASM_CLAC
25344+ pax_force_retaddr
25345 ret
25346
25347 .section .fixup,"ax"
25348@@ -286,6 +262,7 @@ ENTRY(copy_user_enhanced_fast_string)
25349 movsb
25350 2: xorl %eax,%eax
25351 ASM_CLAC
25352+ pax_force_retaddr
25353 ret
25354
25355 .section .fixup,"ax"
25356diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
25357index 6a4f43c..f08b4a2 100644
25358--- a/arch/x86/lib/copy_user_nocache_64.S
25359+++ b/arch/x86/lib/copy_user_nocache_64.S
25360@@ -8,6 +8,7 @@
25361
25362 #include <linux/linkage.h>
25363 #include <asm/dwarf2.h>
25364+#include <asm/alternative-asm.h>
25365
25366 #define FIX_ALIGNMENT 1
25367
25368@@ -16,6 +17,7 @@
25369 #include <asm/thread_info.h>
25370 #include <asm/asm.h>
25371 #include <asm/smap.h>
25372+#include <asm/pgtable.h>
25373
25374 .macro ALIGN_DESTINATION
25375 #ifdef FIX_ALIGNMENT
25376@@ -49,6 +51,15 @@
25377 */
25378 ENTRY(__copy_user_nocache)
25379 CFI_STARTPROC
25380+
25381+#ifdef CONFIG_PAX_MEMORY_UDEREF
25382+ mov pax_user_shadow_base,%rcx
25383+ cmp %rcx,%rsi
25384+ jae 1f
25385+ add %rcx,%rsi
25386+1:
25387+#endif
25388+
25389 ASM_STAC
25390 cmpl $8,%edx
25391 jb 20f /* less then 8 bytes, go to byte copy loop */
25392@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
25393 jz 17f
25394 1: movq (%rsi),%r8
25395 2: movq 1*8(%rsi),%r9
25396-3: movq 2*8(%rsi),%r10
25397+3: movq 2*8(%rsi),%rax
25398 4: movq 3*8(%rsi),%r11
25399 5: movnti %r8,(%rdi)
25400 6: movnti %r9,1*8(%rdi)
25401-7: movnti %r10,2*8(%rdi)
25402+7: movnti %rax,2*8(%rdi)
25403 8: movnti %r11,3*8(%rdi)
25404 9: movq 4*8(%rsi),%r8
25405 10: movq 5*8(%rsi),%r9
25406-11: movq 6*8(%rsi),%r10
25407+11: movq 6*8(%rsi),%rax
25408 12: movq 7*8(%rsi),%r11
25409 13: movnti %r8,4*8(%rdi)
25410 14: movnti %r9,5*8(%rdi)
25411-15: movnti %r10,6*8(%rdi)
25412+15: movnti %rax,6*8(%rdi)
25413 16: movnti %r11,7*8(%rdi)
25414 leaq 64(%rsi),%rsi
25415 leaq 64(%rdi),%rdi
25416@@ -99,6 +110,7 @@ ENTRY(__copy_user_nocache)
25417 23: xorl %eax,%eax
25418 ASM_CLAC
25419 sfence
25420+ pax_force_retaddr
25421 ret
25422
25423 .section .fixup,"ax"
25424diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
25425index 2419d5f..953ee51 100644
25426--- a/arch/x86/lib/csum-copy_64.S
25427+++ b/arch/x86/lib/csum-copy_64.S
25428@@ -9,6 +9,7 @@
25429 #include <asm/dwarf2.h>
25430 #include <asm/errno.h>
25431 #include <asm/asm.h>
25432+#include <asm/alternative-asm.h>
25433
25434 /*
25435 * Checksum copy with exception handling.
25436@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
25437 CFI_RESTORE rbp
25438 addq $7*8, %rsp
25439 CFI_ADJUST_CFA_OFFSET -7*8
25440+ pax_force_retaddr 0, 1
25441 ret
25442 CFI_RESTORE_STATE
25443
25444diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
25445index 25b7ae8..169fafc 100644
25446--- a/arch/x86/lib/csum-wrappers_64.c
25447+++ b/arch/x86/lib/csum-wrappers_64.c
25448@@ -52,7 +52,7 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
25449 len -= 2;
25450 }
25451 }
25452- isum = csum_partial_copy_generic((__force const void *)src,
25453+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
25454 dst, len, isum, errp, NULL);
25455 if (unlikely(*errp))
25456 goto out_err;
25457@@ -105,7 +105,7 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
25458 }
25459
25460 *errp = 0;
25461- return csum_partial_copy_generic(src, (void __force *)dst,
25462+ return csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
25463 len, isum, NULL, errp);
25464 }
25465 EXPORT_SYMBOL(csum_partial_copy_to_user);
25466diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
25467index a451235..79fb5cf 100644
25468--- a/arch/x86/lib/getuser.S
25469+++ b/arch/x86/lib/getuser.S
25470@@ -33,17 +33,40 @@
25471 #include <asm/thread_info.h>
25472 #include <asm/asm.h>
25473 #include <asm/smap.h>
25474+#include <asm/segment.h>
25475+#include <asm/pgtable.h>
25476+#include <asm/alternative-asm.h>
25477+
25478+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
25479+#define __copyuser_seg gs;
25480+#else
25481+#define __copyuser_seg
25482+#endif
25483
25484 .text
25485 ENTRY(__get_user_1)
25486 CFI_STARTPROC
25487+
25488+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
25489 GET_THREAD_INFO(%_ASM_DX)
25490 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
25491 jae bad_get_user
25492 ASM_STAC
25493-1: movzbl (%_ASM_AX),%edx
25494+
25495+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25496+ mov pax_user_shadow_base,%_ASM_DX
25497+ cmp %_ASM_DX,%_ASM_AX
25498+ jae 1234f
25499+ add %_ASM_DX,%_ASM_AX
25500+1234:
25501+#endif
25502+
25503+#endif
25504+
25505+1: __copyuser_seg movzbl (%_ASM_AX),%edx
25506 xor %eax,%eax
25507 ASM_CLAC
25508+ pax_force_retaddr
25509 ret
25510 CFI_ENDPROC
25511 ENDPROC(__get_user_1)
25512@@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
25513 ENTRY(__get_user_2)
25514 CFI_STARTPROC
25515 add $1,%_ASM_AX
25516+
25517+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
25518 jc bad_get_user
25519 GET_THREAD_INFO(%_ASM_DX)
25520 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
25521 jae bad_get_user
25522 ASM_STAC
25523-2: movzwl -1(%_ASM_AX),%edx
25524+
25525+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25526+ mov pax_user_shadow_base,%_ASM_DX
25527+ cmp %_ASM_DX,%_ASM_AX
25528+ jae 1234f
25529+ add %_ASM_DX,%_ASM_AX
25530+1234:
25531+#endif
25532+
25533+#endif
25534+
25535+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
25536 xor %eax,%eax
25537 ASM_CLAC
25538+ pax_force_retaddr
25539 ret
25540 CFI_ENDPROC
25541 ENDPROC(__get_user_2)
25542@@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
25543 ENTRY(__get_user_4)
25544 CFI_STARTPROC
25545 add $3,%_ASM_AX
25546+
25547+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
25548 jc bad_get_user
25549 GET_THREAD_INFO(%_ASM_DX)
25550 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
25551 jae bad_get_user
25552 ASM_STAC
25553-3: movl -3(%_ASM_AX),%edx
25554+
25555+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25556+ mov pax_user_shadow_base,%_ASM_DX
25557+ cmp %_ASM_DX,%_ASM_AX
25558+ jae 1234f
25559+ add %_ASM_DX,%_ASM_AX
25560+1234:
25561+#endif
25562+
25563+#endif
25564+
25565+3: __copyuser_seg movl -3(%_ASM_AX),%edx
25566 xor %eax,%eax
25567 ASM_CLAC
25568+ pax_force_retaddr
25569 ret
25570 CFI_ENDPROC
25571 ENDPROC(__get_user_4)
25572@@ -86,10 +137,20 @@ ENTRY(__get_user_8)
25573 GET_THREAD_INFO(%_ASM_DX)
25574 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
25575 jae bad_get_user
25576+
25577+#ifdef CONFIG_PAX_MEMORY_UDEREF
25578+ mov pax_user_shadow_base,%_ASM_DX
25579+ cmp %_ASM_DX,%_ASM_AX
25580+ jae 1234f
25581+ add %_ASM_DX,%_ASM_AX
25582+1234:
25583+#endif
25584+
25585 ASM_STAC
25586 4: movq -7(%_ASM_AX),%rdx
25587 xor %eax,%eax
25588 ASM_CLAC
25589+ pax_force_retaddr
25590 ret
25591 #else
25592 add $7,%_ASM_AX
25593@@ -102,6 +163,7 @@ ENTRY(__get_user_8)
25594 5: movl -3(%_ASM_AX),%ecx
25595 xor %eax,%eax
25596 ASM_CLAC
25597+ pax_force_retaddr
25598 ret
25599 #endif
25600 CFI_ENDPROC
25601@@ -113,6 +175,7 @@ bad_get_user:
25602 xor %edx,%edx
25603 mov $(-EFAULT),%_ASM_AX
25604 ASM_CLAC
25605+ pax_force_retaddr
25606 ret
25607 CFI_ENDPROC
25608 END(bad_get_user)
25609@@ -124,6 +187,7 @@ bad_get_user_8:
25610 xor %ecx,%ecx
25611 mov $(-EFAULT),%_ASM_AX
25612 ASM_CLAC
25613+ pax_force_retaddr
25614 ret
25615 CFI_ENDPROC
25616 END(bad_get_user_8)
25617diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
25618index 54fcffe..7be149e 100644
25619--- a/arch/x86/lib/insn.c
25620+++ b/arch/x86/lib/insn.c
25621@@ -20,8 +20,10 @@
25622
25623 #ifdef __KERNEL__
25624 #include <linux/string.h>
25625+#include <asm/pgtable_types.h>
25626 #else
25627 #include <string.h>
25628+#define ktla_ktva(addr) addr
25629 #endif
25630 #include <asm/inat.h>
25631 #include <asm/insn.h>
25632@@ -53,8 +55,8 @@
25633 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
25634 {
25635 memset(insn, 0, sizeof(*insn));
25636- insn->kaddr = kaddr;
25637- insn->next_byte = kaddr;
25638+ insn->kaddr = ktla_ktva(kaddr);
25639+ insn->next_byte = ktla_ktva(kaddr);
25640 insn->x86_64 = x86_64 ? 1 : 0;
25641 insn->opnd_bytes = 4;
25642 if (x86_64)
25643diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
25644index 05a95e7..326f2fa 100644
25645--- a/arch/x86/lib/iomap_copy_64.S
25646+++ b/arch/x86/lib/iomap_copy_64.S
25647@@ -17,6 +17,7 @@
25648
25649 #include <linux/linkage.h>
25650 #include <asm/dwarf2.h>
25651+#include <asm/alternative-asm.h>
25652
25653 /*
25654 * override generic version in lib/iomap_copy.c
25655@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
25656 CFI_STARTPROC
25657 movl %edx,%ecx
25658 rep movsd
25659+ pax_force_retaddr
25660 ret
25661 CFI_ENDPROC
25662 ENDPROC(__iowrite32_copy)
25663diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
25664index 1c273be..da9cc0e 100644
25665--- a/arch/x86/lib/memcpy_64.S
25666+++ b/arch/x86/lib/memcpy_64.S
25667@@ -33,6 +33,7 @@
25668 rep movsq
25669 movl %edx, %ecx
25670 rep movsb
25671+ pax_force_retaddr
25672 ret
25673 .Lmemcpy_e:
25674 .previous
25675@@ -49,6 +50,7 @@
25676 movq %rdi, %rax
25677 movq %rdx, %rcx
25678 rep movsb
25679+ pax_force_retaddr
25680 ret
25681 .Lmemcpy_e_e:
25682 .previous
25683@@ -76,13 +78,13 @@ ENTRY(memcpy)
25684 */
25685 movq 0*8(%rsi), %r8
25686 movq 1*8(%rsi), %r9
25687- movq 2*8(%rsi), %r10
25688+ movq 2*8(%rsi), %rcx
25689 movq 3*8(%rsi), %r11
25690 leaq 4*8(%rsi), %rsi
25691
25692 movq %r8, 0*8(%rdi)
25693 movq %r9, 1*8(%rdi)
25694- movq %r10, 2*8(%rdi)
25695+ movq %rcx, 2*8(%rdi)
25696 movq %r11, 3*8(%rdi)
25697 leaq 4*8(%rdi), %rdi
25698 jae .Lcopy_forward_loop
25699@@ -105,12 +107,12 @@ ENTRY(memcpy)
25700 subq $0x20, %rdx
25701 movq -1*8(%rsi), %r8
25702 movq -2*8(%rsi), %r9
25703- movq -3*8(%rsi), %r10
25704+ movq -3*8(%rsi), %rcx
25705 movq -4*8(%rsi), %r11
25706 leaq -4*8(%rsi), %rsi
25707 movq %r8, -1*8(%rdi)
25708 movq %r9, -2*8(%rdi)
25709- movq %r10, -3*8(%rdi)
25710+ movq %rcx, -3*8(%rdi)
25711 movq %r11, -4*8(%rdi)
25712 leaq -4*8(%rdi), %rdi
25713 jae .Lcopy_backward_loop
25714@@ -130,12 +132,13 @@ ENTRY(memcpy)
25715 */
25716 movq 0*8(%rsi), %r8
25717 movq 1*8(%rsi), %r9
25718- movq -2*8(%rsi, %rdx), %r10
25719+ movq -2*8(%rsi, %rdx), %rcx
25720 movq -1*8(%rsi, %rdx), %r11
25721 movq %r8, 0*8(%rdi)
25722 movq %r9, 1*8(%rdi)
25723- movq %r10, -2*8(%rdi, %rdx)
25724+ movq %rcx, -2*8(%rdi, %rdx)
25725 movq %r11, -1*8(%rdi, %rdx)
25726+ pax_force_retaddr
25727 retq
25728 .p2align 4
25729 .Lless_16bytes:
25730@@ -148,6 +151,7 @@ ENTRY(memcpy)
25731 movq -1*8(%rsi, %rdx), %r9
25732 movq %r8, 0*8(%rdi)
25733 movq %r9, -1*8(%rdi, %rdx)
25734+ pax_force_retaddr
25735 retq
25736 .p2align 4
25737 .Lless_8bytes:
25738@@ -161,6 +165,7 @@ ENTRY(memcpy)
25739 movl -4(%rsi, %rdx), %r8d
25740 movl %ecx, (%rdi)
25741 movl %r8d, -4(%rdi, %rdx)
25742+ pax_force_retaddr
25743 retq
25744 .p2align 4
25745 .Lless_3bytes:
25746@@ -179,6 +184,7 @@ ENTRY(memcpy)
25747 movb %cl, (%rdi)
25748
25749 .Lend:
25750+ pax_force_retaddr
25751 retq
25752 CFI_ENDPROC
25753 ENDPROC(memcpy)
25754diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
25755index ee16461..c39c199 100644
25756--- a/arch/x86/lib/memmove_64.S
25757+++ b/arch/x86/lib/memmove_64.S
25758@@ -61,13 +61,13 @@ ENTRY(memmove)
25759 5:
25760 sub $0x20, %rdx
25761 movq 0*8(%rsi), %r11
25762- movq 1*8(%rsi), %r10
25763+ movq 1*8(%rsi), %rcx
25764 movq 2*8(%rsi), %r9
25765 movq 3*8(%rsi), %r8
25766 leaq 4*8(%rsi), %rsi
25767
25768 movq %r11, 0*8(%rdi)
25769- movq %r10, 1*8(%rdi)
25770+ movq %rcx, 1*8(%rdi)
25771 movq %r9, 2*8(%rdi)
25772 movq %r8, 3*8(%rdi)
25773 leaq 4*8(%rdi), %rdi
25774@@ -81,10 +81,10 @@ ENTRY(memmove)
25775 4:
25776 movq %rdx, %rcx
25777 movq -8(%rsi, %rdx), %r11
25778- lea -8(%rdi, %rdx), %r10
25779+ lea -8(%rdi, %rdx), %r9
25780 shrq $3, %rcx
25781 rep movsq
25782- movq %r11, (%r10)
25783+ movq %r11, (%r9)
25784 jmp 13f
25785 .Lmemmove_end_forward:
25786
25787@@ -95,14 +95,14 @@ ENTRY(memmove)
25788 7:
25789 movq %rdx, %rcx
25790 movq (%rsi), %r11
25791- movq %rdi, %r10
25792+ movq %rdi, %r9
25793 leaq -8(%rsi, %rdx), %rsi
25794 leaq -8(%rdi, %rdx), %rdi
25795 shrq $3, %rcx
25796 std
25797 rep movsq
25798 cld
25799- movq %r11, (%r10)
25800+ movq %r11, (%r9)
25801 jmp 13f
25802
25803 /*
25804@@ -127,13 +127,13 @@ ENTRY(memmove)
25805 8:
25806 subq $0x20, %rdx
25807 movq -1*8(%rsi), %r11
25808- movq -2*8(%rsi), %r10
25809+ movq -2*8(%rsi), %rcx
25810 movq -3*8(%rsi), %r9
25811 movq -4*8(%rsi), %r8
25812 leaq -4*8(%rsi), %rsi
25813
25814 movq %r11, -1*8(%rdi)
25815- movq %r10, -2*8(%rdi)
25816+ movq %rcx, -2*8(%rdi)
25817 movq %r9, -3*8(%rdi)
25818 movq %r8, -4*8(%rdi)
25819 leaq -4*8(%rdi), %rdi
25820@@ -151,11 +151,11 @@ ENTRY(memmove)
25821 * Move data from 16 bytes to 31 bytes.
25822 */
25823 movq 0*8(%rsi), %r11
25824- movq 1*8(%rsi), %r10
25825+ movq 1*8(%rsi), %rcx
25826 movq -2*8(%rsi, %rdx), %r9
25827 movq -1*8(%rsi, %rdx), %r8
25828 movq %r11, 0*8(%rdi)
25829- movq %r10, 1*8(%rdi)
25830+ movq %rcx, 1*8(%rdi)
25831 movq %r9, -2*8(%rdi, %rdx)
25832 movq %r8, -1*8(%rdi, %rdx)
25833 jmp 13f
25834@@ -167,9 +167,9 @@ ENTRY(memmove)
25835 * Move data from 8 bytes to 15 bytes.
25836 */
25837 movq 0*8(%rsi), %r11
25838- movq -1*8(%rsi, %rdx), %r10
25839+ movq -1*8(%rsi, %rdx), %r9
25840 movq %r11, 0*8(%rdi)
25841- movq %r10, -1*8(%rdi, %rdx)
25842+ movq %r9, -1*8(%rdi, %rdx)
25843 jmp 13f
25844 10:
25845 cmpq $4, %rdx
25846@@ -178,9 +178,9 @@ ENTRY(memmove)
25847 * Move data from 4 bytes to 7 bytes.
25848 */
25849 movl (%rsi), %r11d
25850- movl -4(%rsi, %rdx), %r10d
25851+ movl -4(%rsi, %rdx), %r9d
25852 movl %r11d, (%rdi)
25853- movl %r10d, -4(%rdi, %rdx)
25854+ movl %r9d, -4(%rdi, %rdx)
25855 jmp 13f
25856 11:
25857 cmp $2, %rdx
25858@@ -189,9 +189,9 @@ ENTRY(memmove)
25859 * Move data from 2 bytes to 3 bytes.
25860 */
25861 movw (%rsi), %r11w
25862- movw -2(%rsi, %rdx), %r10w
25863+ movw -2(%rsi, %rdx), %r9w
25864 movw %r11w, (%rdi)
25865- movw %r10w, -2(%rdi, %rdx)
25866+ movw %r9w, -2(%rdi, %rdx)
25867 jmp 13f
25868 12:
25869 cmp $1, %rdx
25870@@ -202,6 +202,7 @@ ENTRY(memmove)
25871 movb (%rsi), %r11b
25872 movb %r11b, (%rdi)
25873 13:
25874+ pax_force_retaddr
25875 retq
25876 CFI_ENDPROC
25877
25878@@ -210,6 +211,7 @@ ENTRY(memmove)
25879 /* Forward moving data. */
25880 movq %rdx, %rcx
25881 rep movsb
25882+ pax_force_retaddr
25883 retq
25884 .Lmemmove_end_forward_efs:
25885 .previous
25886diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
25887index 2dcb380..963660a 100644
25888--- a/arch/x86/lib/memset_64.S
25889+++ b/arch/x86/lib/memset_64.S
25890@@ -30,6 +30,7 @@
25891 movl %edx,%ecx
25892 rep stosb
25893 movq %r9,%rax
25894+ pax_force_retaddr
25895 ret
25896 .Lmemset_e:
25897 .previous
25898@@ -52,6 +53,7 @@
25899 movq %rdx,%rcx
25900 rep stosb
25901 movq %r9,%rax
25902+ pax_force_retaddr
25903 ret
25904 .Lmemset_e_e:
25905 .previous
25906@@ -59,7 +61,7 @@
25907 ENTRY(memset)
25908 ENTRY(__memset)
25909 CFI_STARTPROC
25910- movq %rdi,%r10
25911+ movq %rdi,%r11
25912
25913 /* expand byte value */
25914 movzbl %sil,%ecx
25915@@ -117,7 +119,8 @@ ENTRY(__memset)
25916 jnz .Lloop_1
25917
25918 .Lende:
25919- movq %r10,%rax
25920+ movq %r11,%rax
25921+ pax_force_retaddr
25922 ret
25923
25924 CFI_RESTORE_STATE
25925diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
25926index c9f2d9b..e7fd2c0 100644
25927--- a/arch/x86/lib/mmx_32.c
25928+++ b/arch/x86/lib/mmx_32.c
25929@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
25930 {
25931 void *p;
25932 int i;
25933+ unsigned long cr0;
25934
25935 if (unlikely(in_interrupt()))
25936 return __memcpy(to, from, len);
25937@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
25938 kernel_fpu_begin();
25939
25940 __asm__ __volatile__ (
25941- "1: prefetch (%0)\n" /* This set is 28 bytes */
25942- " prefetch 64(%0)\n"
25943- " prefetch 128(%0)\n"
25944- " prefetch 192(%0)\n"
25945- " prefetch 256(%0)\n"
25946+ "1: prefetch (%1)\n" /* This set is 28 bytes */
25947+ " prefetch 64(%1)\n"
25948+ " prefetch 128(%1)\n"
25949+ " prefetch 192(%1)\n"
25950+ " prefetch 256(%1)\n"
25951 "2: \n"
25952 ".section .fixup, \"ax\"\n"
25953- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
25954+ "3: \n"
25955+
25956+#ifdef CONFIG_PAX_KERNEXEC
25957+ " movl %%cr0, %0\n"
25958+ " movl %0, %%eax\n"
25959+ " andl $0xFFFEFFFF, %%eax\n"
25960+ " movl %%eax, %%cr0\n"
25961+#endif
25962+
25963+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
25964+
25965+#ifdef CONFIG_PAX_KERNEXEC
25966+ " movl %0, %%cr0\n"
25967+#endif
25968+
25969 " jmp 2b\n"
25970 ".previous\n"
25971 _ASM_EXTABLE(1b, 3b)
25972- : : "r" (from));
25973+ : "=&r" (cr0) : "r" (from) : "ax");
25974
25975 for ( ; i > 5; i--) {
25976 __asm__ __volatile__ (
25977- "1: prefetch 320(%0)\n"
25978- "2: movq (%0), %%mm0\n"
25979- " movq 8(%0), %%mm1\n"
25980- " movq 16(%0), %%mm2\n"
25981- " movq 24(%0), %%mm3\n"
25982- " movq %%mm0, (%1)\n"
25983- " movq %%mm1, 8(%1)\n"
25984- " movq %%mm2, 16(%1)\n"
25985- " movq %%mm3, 24(%1)\n"
25986- " movq 32(%0), %%mm0\n"
25987- " movq 40(%0), %%mm1\n"
25988- " movq 48(%0), %%mm2\n"
25989- " movq 56(%0), %%mm3\n"
25990- " movq %%mm0, 32(%1)\n"
25991- " movq %%mm1, 40(%1)\n"
25992- " movq %%mm2, 48(%1)\n"
25993- " movq %%mm3, 56(%1)\n"
25994+ "1: prefetch 320(%1)\n"
25995+ "2: movq (%1), %%mm0\n"
25996+ " movq 8(%1), %%mm1\n"
25997+ " movq 16(%1), %%mm2\n"
25998+ " movq 24(%1), %%mm3\n"
25999+ " movq %%mm0, (%2)\n"
26000+ " movq %%mm1, 8(%2)\n"
26001+ " movq %%mm2, 16(%2)\n"
26002+ " movq %%mm3, 24(%2)\n"
26003+ " movq 32(%1), %%mm0\n"
26004+ " movq 40(%1), %%mm1\n"
26005+ " movq 48(%1), %%mm2\n"
26006+ " movq 56(%1), %%mm3\n"
26007+ " movq %%mm0, 32(%2)\n"
26008+ " movq %%mm1, 40(%2)\n"
26009+ " movq %%mm2, 48(%2)\n"
26010+ " movq %%mm3, 56(%2)\n"
26011 ".section .fixup, \"ax\"\n"
26012- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
26013+ "3:\n"
26014+
26015+#ifdef CONFIG_PAX_KERNEXEC
26016+ " movl %%cr0, %0\n"
26017+ " movl %0, %%eax\n"
26018+ " andl $0xFFFEFFFF, %%eax\n"
26019+ " movl %%eax, %%cr0\n"
26020+#endif
26021+
26022+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
26023+
26024+#ifdef CONFIG_PAX_KERNEXEC
26025+ " movl %0, %%cr0\n"
26026+#endif
26027+
26028 " jmp 2b\n"
26029 ".previous\n"
26030 _ASM_EXTABLE(1b, 3b)
26031- : : "r" (from), "r" (to) : "memory");
26032+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
26033
26034 from += 64;
26035 to += 64;
26036@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
26037 static void fast_copy_page(void *to, void *from)
26038 {
26039 int i;
26040+ unsigned long cr0;
26041
26042 kernel_fpu_begin();
26043
26044@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
26045 * but that is for later. -AV
26046 */
26047 __asm__ __volatile__(
26048- "1: prefetch (%0)\n"
26049- " prefetch 64(%0)\n"
26050- " prefetch 128(%0)\n"
26051- " prefetch 192(%0)\n"
26052- " prefetch 256(%0)\n"
26053+ "1: prefetch (%1)\n"
26054+ " prefetch 64(%1)\n"
26055+ " prefetch 128(%1)\n"
26056+ " prefetch 192(%1)\n"
26057+ " prefetch 256(%1)\n"
26058 "2: \n"
26059 ".section .fixup, \"ax\"\n"
26060- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
26061+ "3: \n"
26062+
26063+#ifdef CONFIG_PAX_KERNEXEC
26064+ " movl %%cr0, %0\n"
26065+ " movl %0, %%eax\n"
26066+ " andl $0xFFFEFFFF, %%eax\n"
26067+ " movl %%eax, %%cr0\n"
26068+#endif
26069+
26070+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
26071+
26072+#ifdef CONFIG_PAX_KERNEXEC
26073+ " movl %0, %%cr0\n"
26074+#endif
26075+
26076 " jmp 2b\n"
26077 ".previous\n"
26078- _ASM_EXTABLE(1b, 3b) : : "r" (from));
26079+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
26080
26081 for (i = 0; i < (4096-320)/64; i++) {
26082 __asm__ __volatile__ (
26083- "1: prefetch 320(%0)\n"
26084- "2: movq (%0), %%mm0\n"
26085- " movntq %%mm0, (%1)\n"
26086- " movq 8(%0), %%mm1\n"
26087- " movntq %%mm1, 8(%1)\n"
26088- " movq 16(%0), %%mm2\n"
26089- " movntq %%mm2, 16(%1)\n"
26090- " movq 24(%0), %%mm3\n"
26091- " movntq %%mm3, 24(%1)\n"
26092- " movq 32(%0), %%mm4\n"
26093- " movntq %%mm4, 32(%1)\n"
26094- " movq 40(%0), %%mm5\n"
26095- " movntq %%mm5, 40(%1)\n"
26096- " movq 48(%0), %%mm6\n"
26097- " movntq %%mm6, 48(%1)\n"
26098- " movq 56(%0), %%mm7\n"
26099- " movntq %%mm7, 56(%1)\n"
26100+ "1: prefetch 320(%1)\n"
26101+ "2: movq (%1), %%mm0\n"
26102+ " movntq %%mm0, (%2)\n"
26103+ " movq 8(%1), %%mm1\n"
26104+ " movntq %%mm1, 8(%2)\n"
26105+ " movq 16(%1), %%mm2\n"
26106+ " movntq %%mm2, 16(%2)\n"
26107+ " movq 24(%1), %%mm3\n"
26108+ " movntq %%mm3, 24(%2)\n"
26109+ " movq 32(%1), %%mm4\n"
26110+ " movntq %%mm4, 32(%2)\n"
26111+ " movq 40(%1), %%mm5\n"
26112+ " movntq %%mm5, 40(%2)\n"
26113+ " movq 48(%1), %%mm6\n"
26114+ " movntq %%mm6, 48(%2)\n"
26115+ " movq 56(%1), %%mm7\n"
26116+ " movntq %%mm7, 56(%2)\n"
26117 ".section .fixup, \"ax\"\n"
26118- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
26119+ "3:\n"
26120+
26121+#ifdef CONFIG_PAX_KERNEXEC
26122+ " movl %%cr0, %0\n"
26123+ " movl %0, %%eax\n"
26124+ " andl $0xFFFEFFFF, %%eax\n"
26125+ " movl %%eax, %%cr0\n"
26126+#endif
26127+
26128+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
26129+
26130+#ifdef CONFIG_PAX_KERNEXEC
26131+ " movl %0, %%cr0\n"
26132+#endif
26133+
26134 " jmp 2b\n"
26135 ".previous\n"
26136- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
26137+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
26138
26139 from += 64;
26140 to += 64;
26141@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
26142 static void fast_copy_page(void *to, void *from)
26143 {
26144 int i;
26145+ unsigned long cr0;
26146
26147 kernel_fpu_begin();
26148
26149 __asm__ __volatile__ (
26150- "1: prefetch (%0)\n"
26151- " prefetch 64(%0)\n"
26152- " prefetch 128(%0)\n"
26153- " prefetch 192(%0)\n"
26154- " prefetch 256(%0)\n"
26155+ "1: prefetch (%1)\n"
26156+ " prefetch 64(%1)\n"
26157+ " prefetch 128(%1)\n"
26158+ " prefetch 192(%1)\n"
26159+ " prefetch 256(%1)\n"
26160 "2: \n"
26161 ".section .fixup, \"ax\"\n"
26162- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
26163+ "3: \n"
26164+
26165+#ifdef CONFIG_PAX_KERNEXEC
26166+ " movl %%cr0, %0\n"
26167+ " movl %0, %%eax\n"
26168+ " andl $0xFFFEFFFF, %%eax\n"
26169+ " movl %%eax, %%cr0\n"
26170+#endif
26171+
26172+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
26173+
26174+#ifdef CONFIG_PAX_KERNEXEC
26175+ " movl %0, %%cr0\n"
26176+#endif
26177+
26178 " jmp 2b\n"
26179 ".previous\n"
26180- _ASM_EXTABLE(1b, 3b) : : "r" (from));
26181+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
26182
26183 for (i = 0; i < 4096/64; i++) {
26184 __asm__ __volatile__ (
26185- "1: prefetch 320(%0)\n"
26186- "2: movq (%0), %%mm0\n"
26187- " movq 8(%0), %%mm1\n"
26188- " movq 16(%0), %%mm2\n"
26189- " movq 24(%0), %%mm3\n"
26190- " movq %%mm0, (%1)\n"
26191- " movq %%mm1, 8(%1)\n"
26192- " movq %%mm2, 16(%1)\n"
26193- " movq %%mm3, 24(%1)\n"
26194- " movq 32(%0), %%mm0\n"
26195- " movq 40(%0), %%mm1\n"
26196- " movq 48(%0), %%mm2\n"
26197- " movq 56(%0), %%mm3\n"
26198- " movq %%mm0, 32(%1)\n"
26199- " movq %%mm1, 40(%1)\n"
26200- " movq %%mm2, 48(%1)\n"
26201- " movq %%mm3, 56(%1)\n"
26202+ "1: prefetch 320(%1)\n"
26203+ "2: movq (%1), %%mm0\n"
26204+ " movq 8(%1), %%mm1\n"
26205+ " movq 16(%1), %%mm2\n"
26206+ " movq 24(%1), %%mm3\n"
26207+ " movq %%mm0, (%2)\n"
26208+ " movq %%mm1, 8(%2)\n"
26209+ " movq %%mm2, 16(%2)\n"
26210+ " movq %%mm3, 24(%2)\n"
26211+ " movq 32(%1), %%mm0\n"
26212+ " movq 40(%1), %%mm1\n"
26213+ " movq 48(%1), %%mm2\n"
26214+ " movq 56(%1), %%mm3\n"
26215+ " movq %%mm0, 32(%2)\n"
26216+ " movq %%mm1, 40(%2)\n"
26217+ " movq %%mm2, 48(%2)\n"
26218+ " movq %%mm3, 56(%2)\n"
26219 ".section .fixup, \"ax\"\n"
26220- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
26221+ "3:\n"
26222+
26223+#ifdef CONFIG_PAX_KERNEXEC
26224+ " movl %%cr0, %0\n"
26225+ " movl %0, %%eax\n"
26226+ " andl $0xFFFEFFFF, %%eax\n"
26227+ " movl %%eax, %%cr0\n"
26228+#endif
26229+
26230+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
26231+
26232+#ifdef CONFIG_PAX_KERNEXEC
26233+ " movl %0, %%cr0\n"
26234+#endif
26235+
26236 " jmp 2b\n"
26237 ".previous\n"
26238 _ASM_EXTABLE(1b, 3b)
26239- : : "r" (from), "r" (to) : "memory");
26240+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
26241
26242 from += 64;
26243 to += 64;
26244diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
26245index f6d13ee..aca5f0b 100644
26246--- a/arch/x86/lib/msr-reg.S
26247+++ b/arch/x86/lib/msr-reg.S
26248@@ -3,6 +3,7 @@
26249 #include <asm/dwarf2.h>
26250 #include <asm/asm.h>
26251 #include <asm/msr.h>
26252+#include <asm/alternative-asm.h>
26253
26254 #ifdef CONFIG_X86_64
26255 /*
26256@@ -16,7 +17,7 @@ ENTRY(\op\()_safe_regs)
26257 CFI_STARTPROC
26258 pushq_cfi %rbx
26259 pushq_cfi %rbp
26260- movq %rdi, %r10 /* Save pointer */
26261+ movq %rdi, %r9 /* Save pointer */
26262 xorl %r11d, %r11d /* Return value */
26263 movl (%rdi), %eax
26264 movl 4(%rdi), %ecx
26265@@ -27,16 +28,17 @@ ENTRY(\op\()_safe_regs)
26266 movl 28(%rdi), %edi
26267 CFI_REMEMBER_STATE
26268 1: \op
26269-2: movl %eax, (%r10)
26270+2: movl %eax, (%r9)
26271 movl %r11d, %eax /* Return value */
26272- movl %ecx, 4(%r10)
26273- movl %edx, 8(%r10)
26274- movl %ebx, 12(%r10)
26275- movl %ebp, 20(%r10)
26276- movl %esi, 24(%r10)
26277- movl %edi, 28(%r10)
26278+ movl %ecx, 4(%r9)
26279+ movl %edx, 8(%r9)
26280+ movl %ebx, 12(%r9)
26281+ movl %ebp, 20(%r9)
26282+ movl %esi, 24(%r9)
26283+ movl %edi, 28(%r9)
26284 popq_cfi %rbp
26285 popq_cfi %rbx
26286+ pax_force_retaddr
26287 ret
26288 3:
26289 CFI_RESTORE_STATE
26290diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
26291index fc6ba17..d4d989d 100644
26292--- a/arch/x86/lib/putuser.S
26293+++ b/arch/x86/lib/putuser.S
26294@@ -16,7 +16,9 @@
26295 #include <asm/errno.h>
26296 #include <asm/asm.h>
26297 #include <asm/smap.h>
26298-
26299+#include <asm/segment.h>
26300+#include <asm/pgtable.h>
26301+#include <asm/alternative-asm.h>
26302
26303 /*
26304 * __put_user_X
26305@@ -30,57 +32,125 @@
26306 * as they get called from within inline assembly.
26307 */
26308
26309-#define ENTER CFI_STARTPROC ; \
26310- GET_THREAD_INFO(%_ASM_BX)
26311-#define EXIT ASM_CLAC ; \
26312- ret ; \
26313+#define ENTER CFI_STARTPROC
26314+#define EXIT ASM_CLAC ; \
26315+ pax_force_retaddr ; \
26316+ ret ; \
26317 CFI_ENDPROC
26318
26319+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26320+#define _DEST %_ASM_CX,%_ASM_BX
26321+#else
26322+#define _DEST %_ASM_CX
26323+#endif
26324+
26325+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
26326+#define __copyuser_seg gs;
26327+#else
26328+#define __copyuser_seg
26329+#endif
26330+
26331 .text
26332 ENTRY(__put_user_1)
26333 ENTER
26334+
26335+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
26336+ GET_THREAD_INFO(%_ASM_BX)
26337 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
26338 jae bad_put_user
26339 ASM_STAC
26340-1: movb %al,(%_ASM_CX)
26341+
26342+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26343+ mov pax_user_shadow_base,%_ASM_BX
26344+ cmp %_ASM_BX,%_ASM_CX
26345+ jb 1234f
26346+ xor %ebx,%ebx
26347+1234:
26348+#endif
26349+
26350+#endif
26351+
26352+1: __copyuser_seg movb %al,(_DEST)
26353 xor %eax,%eax
26354 EXIT
26355 ENDPROC(__put_user_1)
26356
26357 ENTRY(__put_user_2)
26358 ENTER
26359+
26360+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
26361+ GET_THREAD_INFO(%_ASM_BX)
26362 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
26363 sub $1,%_ASM_BX
26364 cmp %_ASM_BX,%_ASM_CX
26365 jae bad_put_user
26366 ASM_STAC
26367-2: movw %ax,(%_ASM_CX)
26368+
26369+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26370+ mov pax_user_shadow_base,%_ASM_BX
26371+ cmp %_ASM_BX,%_ASM_CX
26372+ jb 1234f
26373+ xor %ebx,%ebx
26374+1234:
26375+#endif
26376+
26377+#endif
26378+
26379+2: __copyuser_seg movw %ax,(_DEST)
26380 xor %eax,%eax
26381 EXIT
26382 ENDPROC(__put_user_2)
26383
26384 ENTRY(__put_user_4)
26385 ENTER
26386+
26387+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
26388+ GET_THREAD_INFO(%_ASM_BX)
26389 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
26390 sub $3,%_ASM_BX
26391 cmp %_ASM_BX,%_ASM_CX
26392 jae bad_put_user
26393 ASM_STAC
26394-3: movl %eax,(%_ASM_CX)
26395+
26396+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26397+ mov pax_user_shadow_base,%_ASM_BX
26398+ cmp %_ASM_BX,%_ASM_CX
26399+ jb 1234f
26400+ xor %ebx,%ebx
26401+1234:
26402+#endif
26403+
26404+#endif
26405+
26406+3: __copyuser_seg movl %eax,(_DEST)
26407 xor %eax,%eax
26408 EXIT
26409 ENDPROC(__put_user_4)
26410
26411 ENTRY(__put_user_8)
26412 ENTER
26413+
26414+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
26415+ GET_THREAD_INFO(%_ASM_BX)
26416 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
26417 sub $7,%_ASM_BX
26418 cmp %_ASM_BX,%_ASM_CX
26419 jae bad_put_user
26420 ASM_STAC
26421-4: mov %_ASM_AX,(%_ASM_CX)
26422+
26423+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26424+ mov pax_user_shadow_base,%_ASM_BX
26425+ cmp %_ASM_BX,%_ASM_CX
26426+ jb 1234f
26427+ xor %ebx,%ebx
26428+1234:
26429+#endif
26430+
26431+#endif
26432+
26433+4: __copyuser_seg mov %_ASM_AX,(_DEST)
26434 #ifdef CONFIG_X86_32
26435-5: movl %edx,4(%_ASM_CX)
26436+5: __copyuser_seg movl %edx,4(_DEST)
26437 #endif
26438 xor %eax,%eax
26439 EXIT
26440diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
26441index 1cad221..de671ee 100644
26442--- a/arch/x86/lib/rwlock.S
26443+++ b/arch/x86/lib/rwlock.S
26444@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
26445 FRAME
26446 0: LOCK_PREFIX
26447 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
26448+
26449+#ifdef CONFIG_PAX_REFCOUNT
26450+ jno 1234f
26451+ LOCK_PREFIX
26452+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
26453+ int $4
26454+1234:
26455+ _ASM_EXTABLE(1234b, 1234b)
26456+#endif
26457+
26458 1: rep; nop
26459 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
26460 jne 1b
26461 LOCK_PREFIX
26462 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
26463+
26464+#ifdef CONFIG_PAX_REFCOUNT
26465+ jno 1234f
26466+ LOCK_PREFIX
26467+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
26468+ int $4
26469+1234:
26470+ _ASM_EXTABLE(1234b, 1234b)
26471+#endif
26472+
26473 jnz 0b
26474 ENDFRAME
26475+ pax_force_retaddr
26476 ret
26477 CFI_ENDPROC
26478 END(__write_lock_failed)
26479@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
26480 FRAME
26481 0: LOCK_PREFIX
26482 READ_LOCK_SIZE(inc) (%__lock_ptr)
26483+
26484+#ifdef CONFIG_PAX_REFCOUNT
26485+ jno 1234f
26486+ LOCK_PREFIX
26487+ READ_LOCK_SIZE(dec) (%__lock_ptr)
26488+ int $4
26489+1234:
26490+ _ASM_EXTABLE(1234b, 1234b)
26491+#endif
26492+
26493 1: rep; nop
26494 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
26495 js 1b
26496 LOCK_PREFIX
26497 READ_LOCK_SIZE(dec) (%__lock_ptr)
26498+
26499+#ifdef CONFIG_PAX_REFCOUNT
26500+ jno 1234f
26501+ LOCK_PREFIX
26502+ READ_LOCK_SIZE(inc) (%__lock_ptr)
26503+ int $4
26504+1234:
26505+ _ASM_EXTABLE(1234b, 1234b)
26506+#endif
26507+
26508 js 0b
26509 ENDFRAME
26510+ pax_force_retaddr
26511 ret
26512 CFI_ENDPROC
26513 END(__read_lock_failed)
26514diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
26515index 5dff5f0..cadebf4 100644
26516--- a/arch/x86/lib/rwsem.S
26517+++ b/arch/x86/lib/rwsem.S
26518@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
26519 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
26520 CFI_RESTORE __ASM_REG(dx)
26521 restore_common_regs
26522+ pax_force_retaddr
26523 ret
26524 CFI_ENDPROC
26525 ENDPROC(call_rwsem_down_read_failed)
26526@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
26527 movq %rax,%rdi
26528 call rwsem_down_write_failed
26529 restore_common_regs
26530+ pax_force_retaddr
26531 ret
26532 CFI_ENDPROC
26533 ENDPROC(call_rwsem_down_write_failed)
26534@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
26535 movq %rax,%rdi
26536 call rwsem_wake
26537 restore_common_regs
26538-1: ret
26539+1: pax_force_retaddr
26540+ ret
26541 CFI_ENDPROC
26542 ENDPROC(call_rwsem_wake)
26543
26544@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
26545 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
26546 CFI_RESTORE __ASM_REG(dx)
26547 restore_common_regs
26548+ pax_force_retaddr
26549 ret
26550 CFI_ENDPROC
26551 ENDPROC(call_rwsem_downgrade_wake)
26552diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
26553index a63efd6..ccecad8 100644
26554--- a/arch/x86/lib/thunk_64.S
26555+++ b/arch/x86/lib/thunk_64.S
26556@@ -8,6 +8,7 @@
26557 #include <linux/linkage.h>
26558 #include <asm/dwarf2.h>
26559 #include <asm/calling.h>
26560+#include <asm/alternative-asm.h>
26561
26562 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
26563 .macro THUNK name, func, put_ret_addr_in_rdi=0
26564@@ -41,5 +42,6 @@
26565 SAVE_ARGS
26566 restore:
26567 RESTORE_ARGS
26568+ pax_force_retaddr
26569 ret
26570 CFI_ENDPROC
26571diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
26572index f0312d7..9c39d63 100644
26573--- a/arch/x86/lib/usercopy_32.c
26574+++ b/arch/x86/lib/usercopy_32.c
26575@@ -42,11 +42,13 @@ do { \
26576 int __d0; \
26577 might_fault(); \
26578 __asm__ __volatile__( \
26579+ __COPYUSER_SET_ES \
26580 ASM_STAC "\n" \
26581 "0: rep; stosl\n" \
26582 " movl %2,%0\n" \
26583 "1: rep; stosb\n" \
26584 "2: " ASM_CLAC "\n" \
26585+ __COPYUSER_RESTORE_ES \
26586 ".section .fixup,\"ax\"\n" \
26587 "3: lea 0(%2,%0,4),%0\n" \
26588 " jmp 2b\n" \
26589@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
26590
26591 #ifdef CONFIG_X86_INTEL_USERCOPY
26592 static unsigned long
26593-__copy_user_intel(void __user *to, const void *from, unsigned long size)
26594+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
26595 {
26596 int d0, d1;
26597 __asm__ __volatile__(
26598@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
26599 " .align 2,0x90\n"
26600 "3: movl 0(%4), %%eax\n"
26601 "4: movl 4(%4), %%edx\n"
26602- "5: movl %%eax, 0(%3)\n"
26603- "6: movl %%edx, 4(%3)\n"
26604+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
26605+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
26606 "7: movl 8(%4), %%eax\n"
26607 "8: movl 12(%4),%%edx\n"
26608- "9: movl %%eax, 8(%3)\n"
26609- "10: movl %%edx, 12(%3)\n"
26610+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
26611+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
26612 "11: movl 16(%4), %%eax\n"
26613 "12: movl 20(%4), %%edx\n"
26614- "13: movl %%eax, 16(%3)\n"
26615- "14: movl %%edx, 20(%3)\n"
26616+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
26617+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
26618 "15: movl 24(%4), %%eax\n"
26619 "16: movl 28(%4), %%edx\n"
26620- "17: movl %%eax, 24(%3)\n"
26621- "18: movl %%edx, 28(%3)\n"
26622+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
26623+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
26624 "19: movl 32(%4), %%eax\n"
26625 "20: movl 36(%4), %%edx\n"
26626- "21: movl %%eax, 32(%3)\n"
26627- "22: movl %%edx, 36(%3)\n"
26628+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
26629+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
26630 "23: movl 40(%4), %%eax\n"
26631 "24: movl 44(%4), %%edx\n"
26632- "25: movl %%eax, 40(%3)\n"
26633- "26: movl %%edx, 44(%3)\n"
26634+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
26635+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
26636 "27: movl 48(%4), %%eax\n"
26637 "28: movl 52(%4), %%edx\n"
26638- "29: movl %%eax, 48(%3)\n"
26639- "30: movl %%edx, 52(%3)\n"
26640+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
26641+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
26642 "31: movl 56(%4), %%eax\n"
26643 "32: movl 60(%4), %%edx\n"
26644- "33: movl %%eax, 56(%3)\n"
26645- "34: movl %%edx, 60(%3)\n"
26646+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
26647+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
26648 " addl $-64, %0\n"
26649 " addl $64, %4\n"
26650 " addl $64, %3\n"
26651@@ -149,10 +151,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
26652 " shrl $2, %0\n"
26653 " andl $3, %%eax\n"
26654 " cld\n"
26655+ __COPYUSER_SET_ES
26656 "99: rep; movsl\n"
26657 "36: movl %%eax, %0\n"
26658 "37: rep; movsb\n"
26659 "100:\n"
26660+ __COPYUSER_RESTORE_ES
26661 ".section .fixup,\"ax\"\n"
26662 "101: lea 0(%%eax,%0,4),%0\n"
26663 " jmp 100b\n"
26664@@ -202,46 +206,150 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
26665 }
26666
26667 static unsigned long
26668+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
26669+{
26670+ int d0, d1;
26671+ __asm__ __volatile__(
26672+ " .align 2,0x90\n"
26673+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
26674+ " cmpl $67, %0\n"
26675+ " jbe 3f\n"
26676+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
26677+ " .align 2,0x90\n"
26678+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
26679+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
26680+ "5: movl %%eax, 0(%3)\n"
26681+ "6: movl %%edx, 4(%3)\n"
26682+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
26683+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
26684+ "9: movl %%eax, 8(%3)\n"
26685+ "10: movl %%edx, 12(%3)\n"
26686+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
26687+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
26688+ "13: movl %%eax, 16(%3)\n"
26689+ "14: movl %%edx, 20(%3)\n"
26690+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
26691+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
26692+ "17: movl %%eax, 24(%3)\n"
26693+ "18: movl %%edx, 28(%3)\n"
26694+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
26695+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
26696+ "21: movl %%eax, 32(%3)\n"
26697+ "22: movl %%edx, 36(%3)\n"
26698+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
26699+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
26700+ "25: movl %%eax, 40(%3)\n"
26701+ "26: movl %%edx, 44(%3)\n"
26702+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
26703+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
26704+ "29: movl %%eax, 48(%3)\n"
26705+ "30: movl %%edx, 52(%3)\n"
26706+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
26707+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
26708+ "33: movl %%eax, 56(%3)\n"
26709+ "34: movl %%edx, 60(%3)\n"
26710+ " addl $-64, %0\n"
26711+ " addl $64, %4\n"
26712+ " addl $64, %3\n"
26713+ " cmpl $63, %0\n"
26714+ " ja 1b\n"
26715+ "35: movl %0, %%eax\n"
26716+ " shrl $2, %0\n"
26717+ " andl $3, %%eax\n"
26718+ " cld\n"
26719+ "99: rep; "__copyuser_seg" movsl\n"
26720+ "36: movl %%eax, %0\n"
26721+ "37: rep; "__copyuser_seg" movsb\n"
26722+ "100:\n"
26723+ ".section .fixup,\"ax\"\n"
26724+ "101: lea 0(%%eax,%0,4),%0\n"
26725+ " jmp 100b\n"
26726+ ".previous\n"
26727+ _ASM_EXTABLE(1b,100b)
26728+ _ASM_EXTABLE(2b,100b)
26729+ _ASM_EXTABLE(3b,100b)
26730+ _ASM_EXTABLE(4b,100b)
26731+ _ASM_EXTABLE(5b,100b)
26732+ _ASM_EXTABLE(6b,100b)
26733+ _ASM_EXTABLE(7b,100b)
26734+ _ASM_EXTABLE(8b,100b)
26735+ _ASM_EXTABLE(9b,100b)
26736+ _ASM_EXTABLE(10b,100b)
26737+ _ASM_EXTABLE(11b,100b)
26738+ _ASM_EXTABLE(12b,100b)
26739+ _ASM_EXTABLE(13b,100b)
26740+ _ASM_EXTABLE(14b,100b)
26741+ _ASM_EXTABLE(15b,100b)
26742+ _ASM_EXTABLE(16b,100b)
26743+ _ASM_EXTABLE(17b,100b)
26744+ _ASM_EXTABLE(18b,100b)
26745+ _ASM_EXTABLE(19b,100b)
26746+ _ASM_EXTABLE(20b,100b)
26747+ _ASM_EXTABLE(21b,100b)
26748+ _ASM_EXTABLE(22b,100b)
26749+ _ASM_EXTABLE(23b,100b)
26750+ _ASM_EXTABLE(24b,100b)
26751+ _ASM_EXTABLE(25b,100b)
26752+ _ASM_EXTABLE(26b,100b)
26753+ _ASM_EXTABLE(27b,100b)
26754+ _ASM_EXTABLE(28b,100b)
26755+ _ASM_EXTABLE(29b,100b)
26756+ _ASM_EXTABLE(30b,100b)
26757+ _ASM_EXTABLE(31b,100b)
26758+ _ASM_EXTABLE(32b,100b)
26759+ _ASM_EXTABLE(33b,100b)
26760+ _ASM_EXTABLE(34b,100b)
26761+ _ASM_EXTABLE(35b,100b)
26762+ _ASM_EXTABLE(36b,100b)
26763+ _ASM_EXTABLE(37b,100b)
26764+ _ASM_EXTABLE(99b,101b)
26765+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
26766+ : "1"(to), "2"(from), "0"(size)
26767+ : "eax", "edx", "memory");
26768+ return size;
26769+}
26770+
26771+static unsigned long __size_overflow(3)
26772 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
26773 {
26774 int d0, d1;
26775 __asm__ __volatile__(
26776 " .align 2,0x90\n"
26777- "0: movl 32(%4), %%eax\n"
26778+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
26779 " cmpl $67, %0\n"
26780 " jbe 2f\n"
26781- "1: movl 64(%4), %%eax\n"
26782+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
26783 " .align 2,0x90\n"
26784- "2: movl 0(%4), %%eax\n"
26785- "21: movl 4(%4), %%edx\n"
26786+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
26787+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
26788 " movl %%eax, 0(%3)\n"
26789 " movl %%edx, 4(%3)\n"
26790- "3: movl 8(%4), %%eax\n"
26791- "31: movl 12(%4),%%edx\n"
26792+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
26793+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
26794 " movl %%eax, 8(%3)\n"
26795 " movl %%edx, 12(%3)\n"
26796- "4: movl 16(%4), %%eax\n"
26797- "41: movl 20(%4), %%edx\n"
26798+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
26799+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
26800 " movl %%eax, 16(%3)\n"
26801 " movl %%edx, 20(%3)\n"
26802- "10: movl 24(%4), %%eax\n"
26803- "51: movl 28(%4), %%edx\n"
26804+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
26805+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
26806 " movl %%eax, 24(%3)\n"
26807 " movl %%edx, 28(%3)\n"
26808- "11: movl 32(%4), %%eax\n"
26809- "61: movl 36(%4), %%edx\n"
26810+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
26811+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
26812 " movl %%eax, 32(%3)\n"
26813 " movl %%edx, 36(%3)\n"
26814- "12: movl 40(%4), %%eax\n"
26815- "71: movl 44(%4), %%edx\n"
26816+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
26817+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
26818 " movl %%eax, 40(%3)\n"
26819 " movl %%edx, 44(%3)\n"
26820- "13: movl 48(%4), %%eax\n"
26821- "81: movl 52(%4), %%edx\n"
26822+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
26823+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
26824 " movl %%eax, 48(%3)\n"
26825 " movl %%edx, 52(%3)\n"
26826- "14: movl 56(%4), %%eax\n"
26827- "91: movl 60(%4), %%edx\n"
26828+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
26829+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
26830 " movl %%eax, 56(%3)\n"
26831 " movl %%edx, 60(%3)\n"
26832 " addl $-64, %0\n"
26833@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
26834 " shrl $2, %0\n"
26835 " andl $3, %%eax\n"
26836 " cld\n"
26837- "6: rep; movsl\n"
26838+ "6: rep; "__copyuser_seg" movsl\n"
26839 " movl %%eax,%0\n"
26840- "7: rep; movsb\n"
26841+ "7: rep; "__copyuser_seg" movsb\n"
26842 "8:\n"
26843 ".section .fixup,\"ax\"\n"
26844 "9: lea 0(%%eax,%0,4),%0\n"
26845@@ -298,48 +406,48 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
26846 * hyoshiok@miraclelinux.com
26847 */
26848
26849-static unsigned long __copy_user_zeroing_intel_nocache(void *to,
26850+static unsigned long __size_overflow(3) __copy_user_zeroing_intel_nocache(void *to,
26851 const void __user *from, unsigned long size)
26852 {
26853 int d0, d1;
26854
26855 __asm__ __volatile__(
26856 " .align 2,0x90\n"
26857- "0: movl 32(%4), %%eax\n"
26858+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
26859 " cmpl $67, %0\n"
26860 " jbe 2f\n"
26861- "1: movl 64(%4), %%eax\n"
26862+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
26863 " .align 2,0x90\n"
26864- "2: movl 0(%4), %%eax\n"
26865- "21: movl 4(%4), %%edx\n"
26866+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
26867+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
26868 " movnti %%eax, 0(%3)\n"
26869 " movnti %%edx, 4(%3)\n"
26870- "3: movl 8(%4), %%eax\n"
26871- "31: movl 12(%4),%%edx\n"
26872+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
26873+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
26874 " movnti %%eax, 8(%3)\n"
26875 " movnti %%edx, 12(%3)\n"
26876- "4: movl 16(%4), %%eax\n"
26877- "41: movl 20(%4), %%edx\n"
26878+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
26879+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
26880 " movnti %%eax, 16(%3)\n"
26881 " movnti %%edx, 20(%3)\n"
26882- "10: movl 24(%4), %%eax\n"
26883- "51: movl 28(%4), %%edx\n"
26884+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
26885+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
26886 " movnti %%eax, 24(%3)\n"
26887 " movnti %%edx, 28(%3)\n"
26888- "11: movl 32(%4), %%eax\n"
26889- "61: movl 36(%4), %%edx\n"
26890+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
26891+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
26892 " movnti %%eax, 32(%3)\n"
26893 " movnti %%edx, 36(%3)\n"
26894- "12: movl 40(%4), %%eax\n"
26895- "71: movl 44(%4), %%edx\n"
26896+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
26897+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
26898 " movnti %%eax, 40(%3)\n"
26899 " movnti %%edx, 44(%3)\n"
26900- "13: movl 48(%4), %%eax\n"
26901- "81: movl 52(%4), %%edx\n"
26902+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
26903+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
26904 " movnti %%eax, 48(%3)\n"
26905 " movnti %%edx, 52(%3)\n"
26906- "14: movl 56(%4), %%eax\n"
26907- "91: movl 60(%4), %%edx\n"
26908+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
26909+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
26910 " movnti %%eax, 56(%3)\n"
26911 " movnti %%edx, 60(%3)\n"
26912 " addl $-64, %0\n"
26913@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
26914 " shrl $2, %0\n"
26915 " andl $3, %%eax\n"
26916 " cld\n"
26917- "6: rep; movsl\n"
26918+ "6: rep; "__copyuser_seg" movsl\n"
26919 " movl %%eax,%0\n"
26920- "7: rep; movsb\n"
26921+ "7: rep; "__copyuser_seg" movsb\n"
26922 "8:\n"
26923 ".section .fixup,\"ax\"\n"
26924 "9: lea 0(%%eax,%0,4),%0\n"
26925@@ -392,48 +500,48 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
26926 return size;
26927 }
26928
26929-static unsigned long __copy_user_intel_nocache(void *to,
26930+static unsigned long __size_overflow(3) __copy_user_intel_nocache(void *to,
26931 const void __user *from, unsigned long size)
26932 {
26933 int d0, d1;
26934
26935 __asm__ __volatile__(
26936 " .align 2,0x90\n"
26937- "0: movl 32(%4), %%eax\n"
26938+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
26939 " cmpl $67, %0\n"
26940 " jbe 2f\n"
26941- "1: movl 64(%4), %%eax\n"
26942+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
26943 " .align 2,0x90\n"
26944- "2: movl 0(%4), %%eax\n"
26945- "21: movl 4(%4), %%edx\n"
26946+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
26947+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
26948 " movnti %%eax, 0(%3)\n"
26949 " movnti %%edx, 4(%3)\n"
26950- "3: movl 8(%4), %%eax\n"
26951- "31: movl 12(%4),%%edx\n"
26952+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
26953+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
26954 " movnti %%eax, 8(%3)\n"
26955 " movnti %%edx, 12(%3)\n"
26956- "4: movl 16(%4), %%eax\n"
26957- "41: movl 20(%4), %%edx\n"
26958+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
26959+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
26960 " movnti %%eax, 16(%3)\n"
26961 " movnti %%edx, 20(%3)\n"
26962- "10: movl 24(%4), %%eax\n"
26963- "51: movl 28(%4), %%edx\n"
26964+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
26965+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
26966 " movnti %%eax, 24(%3)\n"
26967 " movnti %%edx, 28(%3)\n"
26968- "11: movl 32(%4), %%eax\n"
26969- "61: movl 36(%4), %%edx\n"
26970+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
26971+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
26972 " movnti %%eax, 32(%3)\n"
26973 " movnti %%edx, 36(%3)\n"
26974- "12: movl 40(%4), %%eax\n"
26975- "71: movl 44(%4), %%edx\n"
26976+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
26977+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
26978 " movnti %%eax, 40(%3)\n"
26979 " movnti %%edx, 44(%3)\n"
26980- "13: movl 48(%4), %%eax\n"
26981- "81: movl 52(%4), %%edx\n"
26982+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
26983+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
26984 " movnti %%eax, 48(%3)\n"
26985 " movnti %%edx, 52(%3)\n"
26986- "14: movl 56(%4), %%eax\n"
26987- "91: movl 60(%4), %%edx\n"
26988+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
26989+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
26990 " movnti %%eax, 56(%3)\n"
26991 " movnti %%edx, 60(%3)\n"
26992 " addl $-64, %0\n"
26993@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
26994 " shrl $2, %0\n"
26995 " andl $3, %%eax\n"
26996 " cld\n"
26997- "6: rep; movsl\n"
26998+ "6: rep; "__copyuser_seg" movsl\n"
26999 " movl %%eax,%0\n"
27000- "7: rep; movsb\n"
27001+ "7: rep; "__copyuser_seg" movsb\n"
27002 "8:\n"
27003 ".section .fixup,\"ax\"\n"
27004 "9: lea 0(%%eax,%0,4),%0\n"
27005@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
27006 */
27007 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
27008 unsigned long size);
27009-unsigned long __copy_user_intel(void __user *to, const void *from,
27010+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
27011+ unsigned long size);
27012+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
27013 unsigned long size);
27014 unsigned long __copy_user_zeroing_intel_nocache(void *to,
27015 const void __user *from, unsigned long size);
27016 #endif /* CONFIG_X86_INTEL_USERCOPY */
27017
27018 /* Generic arbitrary sized copy. */
27019-#define __copy_user(to, from, size) \
27020+#define __copy_user(to, from, size, prefix, set, restore) \
27021 do { \
27022 int __d0, __d1, __d2; \
27023 __asm__ __volatile__( \
27024+ set \
27025 " cmp $7,%0\n" \
27026 " jbe 1f\n" \
27027 " movl %1,%0\n" \
27028 " negl %0\n" \
27029 " andl $7,%0\n" \
27030 " subl %0,%3\n" \
27031- "4: rep; movsb\n" \
27032+ "4: rep; "prefix"movsb\n" \
27033 " movl %3,%0\n" \
27034 " shrl $2,%0\n" \
27035 " andl $3,%3\n" \
27036 " .align 2,0x90\n" \
27037- "0: rep; movsl\n" \
27038+ "0: rep; "prefix"movsl\n" \
27039 " movl %3,%0\n" \
27040- "1: rep; movsb\n" \
27041+ "1: rep; "prefix"movsb\n" \
27042 "2:\n" \
27043+ restore \
27044 ".section .fixup,\"ax\"\n" \
27045 "5: addl %3,%0\n" \
27046 " jmp 2b\n" \
27047@@ -538,14 +650,14 @@ do { \
27048 " negl %0\n" \
27049 " andl $7,%0\n" \
27050 " subl %0,%3\n" \
27051- "4: rep; movsb\n" \
27052+ "4: rep; "__copyuser_seg"movsb\n" \
27053 " movl %3,%0\n" \
27054 " shrl $2,%0\n" \
27055 " andl $3,%3\n" \
27056 " .align 2,0x90\n" \
27057- "0: rep; movsl\n" \
27058+ "0: rep; "__copyuser_seg"movsl\n" \
27059 " movl %3,%0\n" \
27060- "1: rep; movsb\n" \
27061+ "1: rep; "__copyuser_seg"movsb\n" \
27062 "2:\n" \
27063 ".section .fixup,\"ax\"\n" \
27064 "5: addl %3,%0\n" \
27065@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
27066 {
27067 stac();
27068 if (movsl_is_ok(to, from, n))
27069- __copy_user(to, from, n);
27070+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
27071 else
27072- n = __copy_user_intel(to, from, n);
27073+ n = __generic_copy_to_user_intel(to, from, n);
27074 clac();
27075 return n;
27076 }
27077@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
27078 {
27079 stac();
27080 if (movsl_is_ok(to, from, n))
27081- __copy_user(to, from, n);
27082+ __copy_user(to, from, n, __copyuser_seg, "", "");
27083 else
27084- n = __copy_user_intel((void __user *)to,
27085- (const void *)from, n);
27086+ n = __generic_copy_from_user_intel(to, from, n);
27087 clac();
27088 return n;
27089 }
27090@@ -632,66 +743,51 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
27091 if (n > 64 && cpu_has_xmm2)
27092 n = __copy_user_intel_nocache(to, from, n);
27093 else
27094- __copy_user(to, from, n);
27095+ __copy_user(to, from, n, __copyuser_seg, "", "");
27096 #else
27097- __copy_user(to, from, n);
27098+ __copy_user(to, from, n, __copyuser_seg, "", "");
27099 #endif
27100 clac();
27101 return n;
27102 }
27103 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
27104
27105-/**
27106- * copy_to_user: - Copy a block of data into user space.
27107- * @to: Destination address, in user space.
27108- * @from: Source address, in kernel space.
27109- * @n: Number of bytes to copy.
27110- *
27111- * Context: User context only. This function may sleep.
27112- *
27113- * Copy data from kernel space to user space.
27114- *
27115- * Returns number of bytes that could not be copied.
27116- * On success, this will be zero.
27117- */
27118-unsigned long
27119-copy_to_user(void __user *to, const void *from, unsigned long n)
27120-{
27121- if (access_ok(VERIFY_WRITE, to, n))
27122- n = __copy_to_user(to, from, n);
27123- return n;
27124-}
27125-EXPORT_SYMBOL(copy_to_user);
27126-
27127-/**
27128- * copy_from_user: - Copy a block of data from user space.
27129- * @to: Destination address, in kernel space.
27130- * @from: Source address, in user space.
27131- * @n: Number of bytes to copy.
27132- *
27133- * Context: User context only. This function may sleep.
27134- *
27135- * Copy data from user space to kernel space.
27136- *
27137- * Returns number of bytes that could not be copied.
27138- * On success, this will be zero.
27139- *
27140- * If some data could not be copied, this function will pad the copied
27141- * data to the requested size using zero bytes.
27142- */
27143-unsigned long
27144-_copy_from_user(void *to, const void __user *from, unsigned long n)
27145-{
27146- if (access_ok(VERIFY_READ, from, n))
27147- n = __copy_from_user(to, from, n);
27148- else
27149- memset(to, 0, n);
27150- return n;
27151-}
27152-EXPORT_SYMBOL(_copy_from_user);
27153-
27154 void copy_from_user_overflow(void)
27155 {
27156 WARN(1, "Buffer overflow detected!\n");
27157 }
27158 EXPORT_SYMBOL(copy_from_user_overflow);
27159+
27160+void copy_to_user_overflow(void)
27161+{
27162+ WARN(1, "Buffer overflow detected!\n");
27163+}
27164+EXPORT_SYMBOL(copy_to_user_overflow);
27165+
27166+#ifdef CONFIG_PAX_MEMORY_UDEREF
27167+void __set_fs(mm_segment_t x)
27168+{
27169+ switch (x.seg) {
27170+ case 0:
27171+ loadsegment(gs, 0);
27172+ break;
27173+ case TASK_SIZE_MAX:
27174+ loadsegment(gs, __USER_DS);
27175+ break;
27176+ case -1UL:
27177+ loadsegment(gs, __KERNEL_DS);
27178+ break;
27179+ default:
27180+ BUG();
27181+ }
27182+ return;
27183+}
27184+EXPORT_SYMBOL(__set_fs);
27185+
27186+void set_fs(mm_segment_t x)
27187+{
27188+ current_thread_info()->addr_limit = x;
27189+ __set_fs(x);
27190+}
27191+EXPORT_SYMBOL(set_fs);
27192+#endif
27193diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
27194index 906fea3..ee8a097 100644
27195--- a/arch/x86/lib/usercopy_64.c
27196+++ b/arch/x86/lib/usercopy_64.c
27197@@ -39,7 +39,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
27198 _ASM_EXTABLE(0b,3b)
27199 _ASM_EXTABLE(1b,2b)
27200 : [size8] "=&c"(size), [dst] "=&D" (__d0)
27201- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
27202+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
27203 [zero] "r" (0UL), [eight] "r" (8UL));
27204 clac();
27205 return size;
27206@@ -54,12 +54,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
27207 }
27208 EXPORT_SYMBOL(clear_user);
27209
27210-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
27211+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
27212 {
27213- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
27214- return copy_user_generic((__force void *)to, (__force void *)from, len);
27215- }
27216- return len;
27217+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
27218+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
27219+ return len;
27220 }
27221 EXPORT_SYMBOL(copy_in_user);
27222
27223@@ -69,7 +68,7 @@ EXPORT_SYMBOL(copy_in_user);
27224 * it is not necessary to optimize tail handling.
27225 */
27226 unsigned long
27227-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
27228+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
27229 {
27230 char c;
27231 unsigned zero_len;
27232@@ -87,3 +86,15 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
27233 clac();
27234 return len;
27235 }
27236+
27237+void copy_from_user_overflow(void)
27238+{
27239+ WARN(1, "Buffer overflow detected!\n");
27240+}
27241+EXPORT_SYMBOL(copy_from_user_overflow);
27242+
27243+void copy_to_user_overflow(void)
27244+{
27245+ WARN(1, "Buffer overflow detected!\n");
27246+}
27247+EXPORT_SYMBOL(copy_to_user_overflow);
27248diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
27249index 903ec1e..c4166b2 100644
27250--- a/arch/x86/mm/extable.c
27251+++ b/arch/x86/mm/extable.c
27252@@ -6,12 +6,24 @@
27253 static inline unsigned long
27254 ex_insn_addr(const struct exception_table_entry *x)
27255 {
27256- return (unsigned long)&x->insn + x->insn;
27257+ unsigned long reloc = 0;
27258+
27259+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27260+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
27261+#endif
27262+
27263+ return (unsigned long)&x->insn + x->insn + reloc;
27264 }
27265 static inline unsigned long
27266 ex_fixup_addr(const struct exception_table_entry *x)
27267 {
27268- return (unsigned long)&x->fixup + x->fixup;
27269+ unsigned long reloc = 0;
27270+
27271+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27272+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
27273+#endif
27274+
27275+ return (unsigned long)&x->fixup + x->fixup + reloc;
27276 }
27277
27278 int fixup_exception(struct pt_regs *regs)
27279@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
27280 unsigned long new_ip;
27281
27282 #ifdef CONFIG_PNPBIOS
27283- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
27284+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
27285 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
27286 extern u32 pnp_bios_is_utter_crap;
27287 pnp_bios_is_utter_crap = 1;
27288@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
27289 i += 4;
27290 p->fixup -= i;
27291 i += 4;
27292+
27293+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27294+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
27295+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
27296+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
27297+#endif
27298+
27299 }
27300 }
27301
27302diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
27303index 0e88336..2bb9777 100644
27304--- a/arch/x86/mm/fault.c
27305+++ b/arch/x86/mm/fault.c
27306@@ -13,12 +13,19 @@
27307 #include <linux/perf_event.h> /* perf_sw_event */
27308 #include <linux/hugetlb.h> /* hstate_index_to_shift */
27309 #include <linux/prefetch.h> /* prefetchw */
27310+#include <linux/unistd.h>
27311+#include <linux/compiler.h>
27312
27313 #include <asm/traps.h> /* dotraplinkage, ... */
27314 #include <asm/pgalloc.h> /* pgd_*(), ... */
27315 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
27316 #include <asm/fixmap.h> /* VSYSCALL_START */
27317 #include <asm/context_tracking.h> /* exception_enter(), ... */
27318+#include <asm/tlbflush.h>
27319+
27320+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
27321+#include <asm/stacktrace.h>
27322+#endif
27323
27324 /*
27325 * Page fault error code bits:
27326@@ -56,7 +63,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
27327 int ret = 0;
27328
27329 /* kprobe_running() needs smp_processor_id() */
27330- if (kprobes_built_in() && !user_mode_vm(regs)) {
27331+ if (kprobes_built_in() && !user_mode(regs)) {
27332 preempt_disable();
27333 if (kprobe_running() && kprobe_fault_handler(regs, 14))
27334 ret = 1;
27335@@ -117,7 +124,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
27336 return !instr_lo || (instr_lo>>1) == 1;
27337 case 0x00:
27338 /* Prefetch instruction is 0x0F0D or 0x0F18 */
27339- if (probe_kernel_address(instr, opcode))
27340+ if (user_mode(regs)) {
27341+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
27342+ return 0;
27343+ } else if (probe_kernel_address(instr, opcode))
27344 return 0;
27345
27346 *prefetch = (instr_lo == 0xF) &&
27347@@ -151,7 +161,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
27348 while (instr < max_instr) {
27349 unsigned char opcode;
27350
27351- if (probe_kernel_address(instr, opcode))
27352+ if (user_mode(regs)) {
27353+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
27354+ break;
27355+ } else if (probe_kernel_address(instr, opcode))
27356 break;
27357
27358 instr++;
27359@@ -182,6 +195,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
27360 force_sig_info(si_signo, &info, tsk);
27361 }
27362
27363+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
27364+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
27365+#endif
27366+
27367+#ifdef CONFIG_PAX_EMUTRAMP
27368+static int pax_handle_fetch_fault(struct pt_regs *regs);
27369+#endif
27370+
27371+#ifdef CONFIG_PAX_PAGEEXEC
27372+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
27373+{
27374+ pgd_t *pgd;
27375+ pud_t *pud;
27376+ pmd_t *pmd;
27377+
27378+ pgd = pgd_offset(mm, address);
27379+ if (!pgd_present(*pgd))
27380+ return NULL;
27381+ pud = pud_offset(pgd, address);
27382+ if (!pud_present(*pud))
27383+ return NULL;
27384+ pmd = pmd_offset(pud, address);
27385+ if (!pmd_present(*pmd))
27386+ return NULL;
27387+ return pmd;
27388+}
27389+#endif
27390+
27391 DEFINE_SPINLOCK(pgd_lock);
27392 LIST_HEAD(pgd_list);
27393
27394@@ -232,10 +273,22 @@ void vmalloc_sync_all(void)
27395 for (address = VMALLOC_START & PMD_MASK;
27396 address >= TASK_SIZE && address < FIXADDR_TOP;
27397 address += PMD_SIZE) {
27398+
27399+#ifdef CONFIG_PAX_PER_CPU_PGD
27400+ unsigned long cpu;
27401+#else
27402 struct page *page;
27403+#endif
27404
27405 spin_lock(&pgd_lock);
27406+
27407+#ifdef CONFIG_PAX_PER_CPU_PGD
27408+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
27409+ pgd_t *pgd = get_cpu_pgd(cpu);
27410+ pmd_t *ret;
27411+#else
27412 list_for_each_entry(page, &pgd_list, lru) {
27413+ pgd_t *pgd;
27414 spinlock_t *pgt_lock;
27415 pmd_t *ret;
27416
27417@@ -243,8 +296,14 @@ void vmalloc_sync_all(void)
27418 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
27419
27420 spin_lock(pgt_lock);
27421- ret = vmalloc_sync_one(page_address(page), address);
27422+ pgd = page_address(page);
27423+#endif
27424+
27425+ ret = vmalloc_sync_one(pgd, address);
27426+
27427+#ifndef CONFIG_PAX_PER_CPU_PGD
27428 spin_unlock(pgt_lock);
27429+#endif
27430
27431 if (!ret)
27432 break;
27433@@ -278,6 +337,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
27434 * an interrupt in the middle of a task switch..
27435 */
27436 pgd_paddr = read_cr3();
27437+
27438+#ifdef CONFIG_PAX_PER_CPU_PGD
27439+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
27440+#endif
27441+
27442 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
27443 if (!pmd_k)
27444 return -1;
27445@@ -373,7 +437,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
27446 * happen within a race in page table update. In the later
27447 * case just flush:
27448 */
27449+
27450+#ifdef CONFIG_PAX_PER_CPU_PGD
27451+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
27452+ pgd = pgd_offset_cpu(smp_processor_id(), address);
27453+#else
27454 pgd = pgd_offset(current->active_mm, address);
27455+#endif
27456+
27457 pgd_ref = pgd_offset_k(address);
27458 if (pgd_none(*pgd_ref))
27459 return -1;
27460@@ -543,7 +614,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
27461 static int is_errata100(struct pt_regs *regs, unsigned long address)
27462 {
27463 #ifdef CONFIG_X86_64
27464- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
27465+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
27466 return 1;
27467 #endif
27468 return 0;
27469@@ -570,7 +641,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
27470 }
27471
27472 static const char nx_warning[] = KERN_CRIT
27473-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
27474+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
27475
27476 static void
27477 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
27478@@ -579,15 +650,27 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
27479 if (!oops_may_print())
27480 return;
27481
27482- if (error_code & PF_INSTR) {
27483+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
27484 unsigned int level;
27485
27486 pte_t *pte = lookup_address(address, &level);
27487
27488 if (pte && pte_present(*pte) && !pte_exec(*pte))
27489- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
27490+ printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
27491 }
27492
27493+#ifdef CONFIG_PAX_KERNEXEC
27494+ if (init_mm.start_code <= address && address < init_mm.end_code) {
27495+ if (current->signal->curr_ip)
27496+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
27497+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
27498+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
27499+ else
27500+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
27501+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
27502+ }
27503+#endif
27504+
27505 printk(KERN_ALERT "BUG: unable to handle kernel ");
27506 if (address < PAGE_SIZE)
27507 printk(KERN_CONT "NULL pointer dereference");
27508@@ -750,6 +833,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
27509 return;
27510 }
27511 #endif
27512+
27513+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
27514+ if (pax_is_fetch_fault(regs, error_code, address)) {
27515+
27516+#ifdef CONFIG_PAX_EMUTRAMP
27517+ switch (pax_handle_fetch_fault(regs)) {
27518+ case 2:
27519+ return;
27520+ }
27521+#endif
27522+
27523+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
27524+ do_group_exit(SIGKILL);
27525+ }
27526+#endif
27527+
27528 /* Kernel addresses are always protection faults: */
27529 if (address >= TASK_SIZE)
27530 error_code |= PF_PROT;
27531@@ -835,7 +934,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
27532 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
27533 printk(KERN_ERR
27534 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
27535- tsk->comm, tsk->pid, address);
27536+ tsk->comm, task_pid_nr(tsk), address);
27537 code = BUS_MCEERR_AR;
27538 }
27539 #endif
27540@@ -898,6 +997,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
27541 return 1;
27542 }
27543
27544+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
27545+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
27546+{
27547+ pte_t *pte;
27548+ pmd_t *pmd;
27549+ spinlock_t *ptl;
27550+ unsigned char pte_mask;
27551+
27552+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
27553+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
27554+ return 0;
27555+
27556+ /* PaX: it's our fault, let's handle it if we can */
27557+
27558+ /* PaX: take a look at read faults before acquiring any locks */
27559+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
27560+ /* instruction fetch attempt from a protected page in user mode */
27561+ up_read(&mm->mmap_sem);
27562+
27563+#ifdef CONFIG_PAX_EMUTRAMP
27564+ switch (pax_handle_fetch_fault(regs)) {
27565+ case 2:
27566+ return 1;
27567+ }
27568+#endif
27569+
27570+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
27571+ do_group_exit(SIGKILL);
27572+ }
27573+
27574+ pmd = pax_get_pmd(mm, address);
27575+ if (unlikely(!pmd))
27576+ return 0;
27577+
27578+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
27579+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
27580+ pte_unmap_unlock(pte, ptl);
27581+ return 0;
27582+ }
27583+
27584+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
27585+ /* write attempt to a protected page in user mode */
27586+ pte_unmap_unlock(pte, ptl);
27587+ return 0;
27588+ }
27589+
27590+#ifdef CONFIG_SMP
27591+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
27592+#else
27593+ if (likely(address > get_limit(regs->cs)))
27594+#endif
27595+ {
27596+ set_pte(pte, pte_mkread(*pte));
27597+ __flush_tlb_one(address);
27598+ pte_unmap_unlock(pte, ptl);
27599+ up_read(&mm->mmap_sem);
27600+ return 1;
27601+ }
27602+
27603+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
27604+
27605+ /*
27606+ * PaX: fill DTLB with user rights and retry
27607+ */
27608+ __asm__ __volatile__ (
27609+ "orb %2,(%1)\n"
27610+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
27611+/*
27612+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
27613+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
27614+ * page fault when examined during a TLB load attempt. this is true not only
27615+ * for PTEs holding a non-present entry but also present entries that will
27616+ * raise a page fault (such as those set up by PaX, or the copy-on-write
27617+ * mechanism). in effect it means that we do *not* need to flush the TLBs
27618+ * for our target pages since their PTEs are simply not in the TLBs at all.
27619+
27620+ * the best thing in omitting it is that we gain around 15-20% speed in the
27621+ * fast path of the page fault handler and can get rid of tracing since we
27622+ * can no longer flush unintended entries.
27623+ */
27624+ "invlpg (%0)\n"
27625+#endif
27626+ __copyuser_seg"testb $0,(%0)\n"
27627+ "xorb %3,(%1)\n"
27628+ :
27629+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
27630+ : "memory", "cc");
27631+ pte_unmap_unlock(pte, ptl);
27632+ up_read(&mm->mmap_sem);
27633+ return 1;
27634+}
27635+#endif
27636+
27637 /*
27638 * Handle a spurious fault caused by a stale TLB entry.
27639 *
27640@@ -964,6 +1156,9 @@ int show_unhandled_signals = 1;
27641 static inline int
27642 access_error(unsigned long error_code, struct vm_area_struct *vma)
27643 {
27644+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
27645+ return 1;
27646+
27647 if (error_code & PF_WRITE) {
27648 /* write, present and write, not present: */
27649 if (unlikely(!(vma->vm_flags & VM_WRITE)))
27650@@ -992,7 +1187,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
27651 if (error_code & PF_USER)
27652 return false;
27653
27654- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
27655+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
27656 return false;
27657
27658 return true;
27659@@ -1008,18 +1203,33 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
27660 {
27661 struct vm_area_struct *vma;
27662 struct task_struct *tsk;
27663- unsigned long address;
27664 struct mm_struct *mm;
27665 int fault;
27666 int write = error_code & PF_WRITE;
27667 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
27668 (write ? FAULT_FLAG_WRITE : 0);
27669
27670- tsk = current;
27671- mm = tsk->mm;
27672-
27673 /* Get the faulting address: */
27674- address = read_cr2();
27675+ unsigned long address = read_cr2();
27676+
27677+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
27678+ if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
27679+ if (!search_exception_tables(regs->ip)) {
27680+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
27681+ bad_area_nosemaphore(regs, error_code, address);
27682+ return;
27683+ }
27684+ if (address < pax_user_shadow_base) {
27685+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
27686+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
27687+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
27688+ } else
27689+ address -= pax_user_shadow_base;
27690+ }
27691+#endif
27692+
27693+ tsk = current;
27694+ mm = tsk->mm;
27695
27696 /*
27697 * Detect and handle instructions that would cause a page fault for
27698@@ -1080,7 +1290,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
27699 * User-mode registers count as a user access even for any
27700 * potential system fault or CPU buglet:
27701 */
27702- if (user_mode_vm(regs)) {
27703+ if (user_mode(regs)) {
27704 local_irq_enable();
27705 error_code |= PF_USER;
27706 } else {
27707@@ -1142,6 +1352,11 @@ retry:
27708 might_sleep();
27709 }
27710
27711+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
27712+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
27713+ return;
27714+#endif
27715+
27716 vma = find_vma(mm, address);
27717 if (unlikely(!vma)) {
27718 bad_area(regs, error_code, address);
27719@@ -1153,18 +1368,24 @@ retry:
27720 bad_area(regs, error_code, address);
27721 return;
27722 }
27723- if (error_code & PF_USER) {
27724- /*
27725- * Accessing the stack below %sp is always a bug.
27726- * The large cushion allows instructions like enter
27727- * and pusha to work. ("enter $65535, $31" pushes
27728- * 32 pointers and then decrements %sp by 65535.)
27729- */
27730- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
27731- bad_area(regs, error_code, address);
27732- return;
27733- }
27734+ /*
27735+ * Accessing the stack below %sp is always a bug.
27736+ * The large cushion allows instructions like enter
27737+ * and pusha to work. ("enter $65535, $31" pushes
27738+ * 32 pointers and then decrements %sp by 65535.)
27739+ */
27740+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
27741+ bad_area(regs, error_code, address);
27742+ return;
27743 }
27744+
27745+#ifdef CONFIG_PAX_SEGMEXEC
27746+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
27747+ bad_area(regs, error_code, address);
27748+ return;
27749+ }
27750+#endif
27751+
27752 if (unlikely(expand_stack(vma, address))) {
27753 bad_area(regs, error_code, address);
27754 return;
27755@@ -1228,3 +1449,292 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
27756 __do_page_fault(regs, error_code);
27757 exception_exit(regs);
27758 }
27759+
27760+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
27761+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
27762+{
27763+ struct mm_struct *mm = current->mm;
27764+ unsigned long ip = regs->ip;
27765+
27766+ if (v8086_mode(regs))
27767+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
27768+
27769+#ifdef CONFIG_PAX_PAGEEXEC
27770+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
27771+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
27772+ return true;
27773+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
27774+ return true;
27775+ return false;
27776+ }
27777+#endif
27778+
27779+#ifdef CONFIG_PAX_SEGMEXEC
27780+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
27781+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
27782+ return true;
27783+ return false;
27784+ }
27785+#endif
27786+
27787+ return false;
27788+}
27789+#endif
27790+
27791+#ifdef CONFIG_PAX_EMUTRAMP
27792+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
27793+{
27794+ int err;
27795+
27796+ do { /* PaX: libffi trampoline emulation */
27797+ unsigned char mov, jmp;
27798+ unsigned int addr1, addr2;
27799+
27800+#ifdef CONFIG_X86_64
27801+ if ((regs->ip + 9) >> 32)
27802+ break;
27803+#endif
27804+
27805+ err = get_user(mov, (unsigned char __user *)regs->ip);
27806+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
27807+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
27808+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
27809+
27810+ if (err)
27811+ break;
27812+
27813+ if (mov == 0xB8 && jmp == 0xE9) {
27814+ regs->ax = addr1;
27815+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
27816+ return 2;
27817+ }
27818+ } while (0);
27819+
27820+ do { /* PaX: gcc trampoline emulation #1 */
27821+ unsigned char mov1, mov2;
27822+ unsigned short jmp;
27823+ unsigned int addr1, addr2;
27824+
27825+#ifdef CONFIG_X86_64
27826+ if ((regs->ip + 11) >> 32)
27827+ break;
27828+#endif
27829+
27830+ err = get_user(mov1, (unsigned char __user *)regs->ip);
27831+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
27832+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
27833+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
27834+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
27835+
27836+ if (err)
27837+ break;
27838+
27839+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
27840+ regs->cx = addr1;
27841+ regs->ax = addr2;
27842+ regs->ip = addr2;
27843+ return 2;
27844+ }
27845+ } while (0);
27846+
27847+ do { /* PaX: gcc trampoline emulation #2 */
27848+ unsigned char mov, jmp;
27849+ unsigned int addr1, addr2;
27850+
27851+#ifdef CONFIG_X86_64
27852+ if ((regs->ip + 9) >> 32)
27853+ break;
27854+#endif
27855+
27856+ err = get_user(mov, (unsigned char __user *)regs->ip);
27857+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
27858+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
27859+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
27860+
27861+ if (err)
27862+ break;
27863+
27864+ if (mov == 0xB9 && jmp == 0xE9) {
27865+ regs->cx = addr1;
27866+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
27867+ return 2;
27868+ }
27869+ } while (0);
27870+
27871+ return 1; /* PaX in action */
27872+}
27873+
27874+#ifdef CONFIG_X86_64
27875+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
27876+{
27877+ int err;
27878+
27879+ do { /* PaX: libffi trampoline emulation */
27880+ unsigned short mov1, mov2, jmp1;
27881+ unsigned char stcclc, jmp2;
27882+ unsigned long addr1, addr2;
27883+
27884+ err = get_user(mov1, (unsigned short __user *)regs->ip);
27885+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
27886+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
27887+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
27888+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
27889+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
27890+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
27891+
27892+ if (err)
27893+ break;
27894+
27895+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
27896+ regs->r11 = addr1;
27897+ regs->r10 = addr2;
27898+ if (stcclc == 0xF8)
27899+ regs->flags &= ~X86_EFLAGS_CF;
27900+ else
27901+ regs->flags |= X86_EFLAGS_CF;
27902+ regs->ip = addr1;
27903+ return 2;
27904+ }
27905+ } while (0);
27906+
27907+ do { /* PaX: gcc trampoline emulation #1 */
27908+ unsigned short mov1, mov2, jmp1;
27909+ unsigned char jmp2;
27910+ unsigned int addr1;
27911+ unsigned long addr2;
27912+
27913+ err = get_user(mov1, (unsigned short __user *)regs->ip);
27914+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
27915+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
27916+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
27917+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
27918+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
27919+
27920+ if (err)
27921+ break;
27922+
27923+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
27924+ regs->r11 = addr1;
27925+ regs->r10 = addr2;
27926+ regs->ip = addr1;
27927+ return 2;
27928+ }
27929+ } while (0);
27930+
27931+ do { /* PaX: gcc trampoline emulation #2 */
27932+ unsigned short mov1, mov2, jmp1;
27933+ unsigned char jmp2;
27934+ unsigned long addr1, addr2;
27935+
27936+ err = get_user(mov1, (unsigned short __user *)regs->ip);
27937+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
27938+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
27939+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
27940+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
27941+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
27942+
27943+ if (err)
27944+ break;
27945+
27946+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
27947+ regs->r11 = addr1;
27948+ regs->r10 = addr2;
27949+ regs->ip = addr1;
27950+ return 2;
27951+ }
27952+ } while (0);
27953+
27954+ return 1; /* PaX in action */
27955+}
27956+#endif
27957+
27958+/*
27959+ * PaX: decide what to do with offenders (regs->ip = fault address)
27960+ *
27961+ * returns 1 when task should be killed
27962+ * 2 when gcc trampoline was detected
27963+ */
27964+static int pax_handle_fetch_fault(struct pt_regs *regs)
27965+{
27966+ if (v8086_mode(regs))
27967+ return 1;
27968+
27969+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
27970+ return 1;
27971+
27972+#ifdef CONFIG_X86_32
27973+ return pax_handle_fetch_fault_32(regs);
27974+#else
27975+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
27976+ return pax_handle_fetch_fault_32(regs);
27977+ else
27978+ return pax_handle_fetch_fault_64(regs);
27979+#endif
27980+}
27981+#endif
27982+
27983+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
27984+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
27985+{
27986+ long i;
27987+
27988+ printk(KERN_ERR "PAX: bytes at PC: ");
27989+ for (i = 0; i < 20; i++) {
27990+ unsigned char c;
27991+ if (get_user(c, (unsigned char __force_user *)pc+i))
27992+ printk(KERN_CONT "?? ");
27993+ else
27994+ printk(KERN_CONT "%02x ", c);
27995+ }
27996+ printk("\n");
27997+
27998+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
27999+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
28000+ unsigned long c;
28001+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
28002+#ifdef CONFIG_X86_32
28003+ printk(KERN_CONT "???????? ");
28004+#else
28005+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
28006+ printk(KERN_CONT "???????? ???????? ");
28007+ else
28008+ printk(KERN_CONT "???????????????? ");
28009+#endif
28010+ } else {
28011+#ifdef CONFIG_X86_64
28012+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
28013+ printk(KERN_CONT "%08x ", (unsigned int)c);
28014+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
28015+ } else
28016+#endif
28017+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
28018+ }
28019+ }
28020+ printk("\n");
28021+}
28022+#endif
28023+
28024+/**
28025+ * probe_kernel_write(): safely attempt to write to a location
28026+ * @dst: address to write to
28027+ * @src: pointer to the data that shall be written
28028+ * @size: size of the data chunk
28029+ *
28030+ * Safely write to address @dst from the buffer at @src. If a kernel fault
28031+ * happens, handle that and return -EFAULT.
28032+ */
28033+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
28034+{
28035+ long ret;
28036+ mm_segment_t old_fs = get_fs();
28037+
28038+ set_fs(KERNEL_DS);
28039+ pagefault_disable();
28040+ pax_open_kernel();
28041+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
28042+ pax_close_kernel();
28043+ pagefault_enable();
28044+ set_fs(old_fs);
28045+
28046+ return ret ? -EFAULT : 0;
28047+}
28048diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
28049index dd74e46..7d26398 100644
28050--- a/arch/x86/mm/gup.c
28051+++ b/arch/x86/mm/gup.c
28052@@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
28053 addr = start;
28054 len = (unsigned long) nr_pages << PAGE_SHIFT;
28055 end = start + len;
28056- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
28057+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
28058 (void __user *)start, len)))
28059 return 0;
28060
28061diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
28062index 6f31ee5..8ee4164 100644
28063--- a/arch/x86/mm/highmem_32.c
28064+++ b/arch/x86/mm/highmem_32.c
28065@@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
28066 idx = type + KM_TYPE_NR*smp_processor_id();
28067 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
28068 BUG_ON(!pte_none(*(kmap_pte-idx)));
28069+
28070+ pax_open_kernel();
28071 set_pte(kmap_pte-idx, mk_pte(page, prot));
28072+ pax_close_kernel();
28073+
28074 arch_flush_lazy_mmu_mode();
28075
28076 return (void *)vaddr;
28077diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
28078index ae1aa71..d9bea75 100644
28079--- a/arch/x86/mm/hugetlbpage.c
28080+++ b/arch/x86/mm/hugetlbpage.c
28081@@ -271,23 +271,30 @@ follow_huge_pud(struct mm_struct *mm, unsigned long address,
28082 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
28083 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
28084 unsigned long addr, unsigned long len,
28085- unsigned long pgoff, unsigned long flags)
28086+ unsigned long pgoff, unsigned long flags, unsigned long offset)
28087 {
28088 struct hstate *h = hstate_file(file);
28089 struct vm_unmapped_area_info info;
28090-
28091+
28092 info.flags = 0;
28093 info.length = len;
28094 info.low_limit = TASK_UNMAPPED_BASE;
28095+
28096+#ifdef CONFIG_PAX_RANDMMAP
28097+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
28098+ info.low_limit += current->mm->delta_mmap;
28099+#endif
28100+
28101 info.high_limit = TASK_SIZE;
28102 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
28103 info.align_offset = 0;
28104+ info.threadstack_offset = offset;
28105 return vm_unmapped_area(&info);
28106 }
28107
28108 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
28109 unsigned long addr0, unsigned long len,
28110- unsigned long pgoff, unsigned long flags)
28111+ unsigned long pgoff, unsigned long flags, unsigned long offset)
28112 {
28113 struct hstate *h = hstate_file(file);
28114 struct vm_unmapped_area_info info;
28115@@ -299,6 +306,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
28116 info.high_limit = current->mm->mmap_base;
28117 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
28118 info.align_offset = 0;
28119+ info.threadstack_offset = offset;
28120 addr = vm_unmapped_area(&info);
28121
28122 /*
28123@@ -311,6 +319,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
28124 VM_BUG_ON(addr != -ENOMEM);
28125 info.flags = 0;
28126 info.low_limit = TASK_UNMAPPED_BASE;
28127+
28128+#ifdef CONFIG_PAX_RANDMMAP
28129+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
28130+ info.low_limit += current->mm->delta_mmap;
28131+#endif
28132+
28133 info.high_limit = TASK_SIZE;
28134 addr = vm_unmapped_area(&info);
28135 }
28136@@ -325,10 +339,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
28137 struct hstate *h = hstate_file(file);
28138 struct mm_struct *mm = current->mm;
28139 struct vm_area_struct *vma;
28140+ unsigned long pax_task_size = TASK_SIZE;
28141+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
28142
28143 if (len & ~huge_page_mask(h))
28144 return -EINVAL;
28145- if (len > TASK_SIZE)
28146+
28147+#ifdef CONFIG_PAX_SEGMEXEC
28148+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
28149+ pax_task_size = SEGMEXEC_TASK_SIZE;
28150+#endif
28151+
28152+ pax_task_size -= PAGE_SIZE;
28153+
28154+ if (len > pax_task_size)
28155 return -ENOMEM;
28156
28157 if (flags & MAP_FIXED) {
28158@@ -337,19 +361,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
28159 return addr;
28160 }
28161
28162+#ifdef CONFIG_PAX_RANDMMAP
28163+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
28164+#endif
28165+
28166 if (addr) {
28167 addr = ALIGN(addr, huge_page_size(h));
28168 vma = find_vma(mm, addr);
28169- if (TASK_SIZE - len >= addr &&
28170- (!vma || addr + len <= vma->vm_start))
28171+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
28172 return addr;
28173 }
28174 if (mm->get_unmapped_area == arch_get_unmapped_area)
28175 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
28176- pgoff, flags);
28177+ pgoff, flags, offset);
28178 else
28179 return hugetlb_get_unmapped_area_topdown(file, addr, len,
28180- pgoff, flags);
28181+ pgoff, flags, offset);
28182 }
28183
28184 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
28185diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
28186index 0c13708..689fe7f 100644
28187--- a/arch/x86/mm/init.c
28188+++ b/arch/x86/mm/init.c
28189@@ -4,6 +4,7 @@
28190 #include <linux/swap.h>
28191 #include <linux/memblock.h>
28192 #include <linux/bootmem.h> /* for max_low_pfn */
28193+#include <linux/tboot.h>
28194
28195 #include <asm/cacheflush.h>
28196 #include <asm/e820.h>
28197@@ -17,6 +18,8 @@
28198 #include <asm/proto.h>
28199 #include <asm/dma.h> /* for MAX_DMA_PFN */
28200 #include <asm/microcode.h>
28201+#include <asm/desc.h>
28202+#include <asm/bios_ebda.h>
28203
28204 #include "mm_internal.h"
28205
28206@@ -464,10 +467,40 @@ void __init init_mem_mapping(void)
28207 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
28208 * mmio resources as well as potential bios/acpi data regions.
28209 */
28210+
28211+#ifdef CONFIG_GRKERNSEC_KMEM
28212+static unsigned int ebda_start __read_only;
28213+static unsigned int ebda_end __read_only;
28214+#endif
28215+
28216 int devmem_is_allowed(unsigned long pagenr)
28217 {
28218- if (pagenr < 256)
28219+#ifdef CONFIG_GRKERNSEC_KMEM
28220+ /* allow BDA */
28221+ if (!pagenr)
28222 return 1;
28223+ /* allow EBDA */
28224+ if (pagenr >= ebda_start && pagenr < ebda_end)
28225+ return 1;
28226+ /* if tboot is in use, allow access to its hardcoded serial log range */
28227+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
28228+ return 1;
28229+#else
28230+ if (!pagenr)
28231+ return 1;
28232+#ifdef CONFIG_VM86
28233+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
28234+ return 1;
28235+#endif
28236+#endif
28237+
28238+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
28239+ return 1;
28240+#ifdef CONFIG_GRKERNSEC_KMEM
28241+ /* throw out everything else below 1MB */
28242+ if (pagenr <= 256)
28243+ return 0;
28244+#endif
28245 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
28246 return 0;
28247 if (!page_is_ram(pagenr))
28248@@ -524,8 +557,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
28249 #endif
28250 }
28251
28252+#ifdef CONFIG_GRKERNSEC_KMEM
28253+static inline void gr_init_ebda(void)
28254+{
28255+ unsigned int ebda_addr;
28256+ unsigned int ebda_size = 0;
28257+
28258+ ebda_addr = get_bios_ebda();
28259+ if (ebda_addr) {
28260+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
28261+ ebda_size <<= 10;
28262+ }
28263+ if (ebda_addr && ebda_size) {
28264+ ebda_start = ebda_addr >> PAGE_SHIFT;
28265+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
28266+ } else {
28267+ ebda_start = 0x9f000 >> PAGE_SHIFT;
28268+ ebda_end = 0xa0000 >> PAGE_SHIFT;
28269+ }
28270+}
28271+#else
28272+static inline void gr_init_ebda(void) { }
28273+#endif
28274+
28275 void free_initmem(void)
28276 {
28277+#ifdef CONFIG_PAX_KERNEXEC
28278+#ifdef CONFIG_X86_32
28279+ /* PaX: limit KERNEL_CS to actual size */
28280+ unsigned long addr, limit;
28281+ struct desc_struct d;
28282+ int cpu;
28283+#else
28284+ pgd_t *pgd;
28285+ pud_t *pud;
28286+ pmd_t *pmd;
28287+ unsigned long addr, end;
28288+#endif
28289+#endif
28290+
28291+ gr_init_ebda();
28292+
28293+#ifdef CONFIG_PAX_KERNEXEC
28294+#ifdef CONFIG_X86_32
28295+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
28296+ limit = (limit - 1UL) >> PAGE_SHIFT;
28297+
28298+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
28299+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
28300+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
28301+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
28302+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
28303+ }
28304+
28305+ /* PaX: make KERNEL_CS read-only */
28306+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
28307+ if (!paravirt_enabled())
28308+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
28309+/*
28310+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
28311+ pgd = pgd_offset_k(addr);
28312+ pud = pud_offset(pgd, addr);
28313+ pmd = pmd_offset(pud, addr);
28314+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
28315+ }
28316+*/
28317+#ifdef CONFIG_X86_PAE
28318+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
28319+/*
28320+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
28321+ pgd = pgd_offset_k(addr);
28322+ pud = pud_offset(pgd, addr);
28323+ pmd = pmd_offset(pud, addr);
28324+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
28325+ }
28326+*/
28327+#endif
28328+
28329+#ifdef CONFIG_MODULES
28330+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
28331+#endif
28332+
28333+#else
28334+ /* PaX: make kernel code/rodata read-only, rest non-executable */
28335+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
28336+ pgd = pgd_offset_k(addr);
28337+ pud = pud_offset(pgd, addr);
28338+ pmd = pmd_offset(pud, addr);
28339+ if (!pmd_present(*pmd))
28340+ continue;
28341+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
28342+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
28343+ else
28344+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
28345+ }
28346+
28347+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
28348+ end = addr + KERNEL_IMAGE_SIZE;
28349+ for (; addr < end; addr += PMD_SIZE) {
28350+ pgd = pgd_offset_k(addr);
28351+ pud = pud_offset(pgd, addr);
28352+ pmd = pmd_offset(pud, addr);
28353+ if (!pmd_present(*pmd))
28354+ continue;
28355+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
28356+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
28357+ }
28358+#endif
28359+
28360+ flush_tlb_all();
28361+#endif
28362+
28363 free_init_pages("unused kernel memory",
28364 (unsigned long)(&__init_begin),
28365 (unsigned long)(&__init_end));
28366diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
28367index 2d19001..6a1046c 100644
28368--- a/arch/x86/mm/init_32.c
28369+++ b/arch/x86/mm/init_32.c
28370@@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
28371 bool __read_mostly __vmalloc_start_set = false;
28372
28373 /*
28374- * Creates a middle page table and puts a pointer to it in the
28375- * given global directory entry. This only returns the gd entry
28376- * in non-PAE compilation mode, since the middle layer is folded.
28377- */
28378-static pmd_t * __init one_md_table_init(pgd_t *pgd)
28379-{
28380- pud_t *pud;
28381- pmd_t *pmd_table;
28382-
28383-#ifdef CONFIG_X86_PAE
28384- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
28385- pmd_table = (pmd_t *)alloc_low_page();
28386- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
28387- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
28388- pud = pud_offset(pgd, 0);
28389- BUG_ON(pmd_table != pmd_offset(pud, 0));
28390-
28391- return pmd_table;
28392- }
28393-#endif
28394- pud = pud_offset(pgd, 0);
28395- pmd_table = pmd_offset(pud, 0);
28396-
28397- return pmd_table;
28398-}
28399-
28400-/*
28401 * Create a page table and place a pointer to it in a middle page
28402 * directory entry:
28403 */
28404@@ -98,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
28405 pte_t *page_table = (pte_t *)alloc_low_page();
28406
28407 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
28408+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
28409+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
28410+#else
28411 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
28412+#endif
28413 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
28414 }
28415
28416 return pte_offset_kernel(pmd, 0);
28417 }
28418
28419+static pmd_t * __init one_md_table_init(pgd_t *pgd)
28420+{
28421+ pud_t *pud;
28422+ pmd_t *pmd_table;
28423+
28424+ pud = pud_offset(pgd, 0);
28425+ pmd_table = pmd_offset(pud, 0);
28426+
28427+ return pmd_table;
28428+}
28429+
28430 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
28431 {
28432 int pgd_idx = pgd_index(vaddr);
28433@@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
28434 int pgd_idx, pmd_idx;
28435 unsigned long vaddr;
28436 pgd_t *pgd;
28437+ pud_t *pud;
28438 pmd_t *pmd;
28439 pte_t *pte = NULL;
28440 unsigned long count = page_table_range_init_count(start, end);
28441@@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
28442 pgd = pgd_base + pgd_idx;
28443
28444 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
28445- pmd = one_md_table_init(pgd);
28446- pmd = pmd + pmd_index(vaddr);
28447+ pud = pud_offset(pgd, vaddr);
28448+ pmd = pmd_offset(pud, vaddr);
28449+
28450+#ifdef CONFIG_X86_PAE
28451+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
28452+#endif
28453+
28454 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
28455 pmd++, pmd_idx++) {
28456 pte = page_table_kmap_check(one_page_table_init(pmd),
28457@@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
28458 }
28459 }
28460
28461-static inline int is_kernel_text(unsigned long addr)
28462+static inline int is_kernel_text(unsigned long start, unsigned long end)
28463 {
28464- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
28465- return 1;
28466- return 0;
28467+ if ((start > ktla_ktva((unsigned long)_etext) ||
28468+ end <= ktla_ktva((unsigned long)_stext)) &&
28469+ (start > ktla_ktva((unsigned long)_einittext) ||
28470+ end <= ktla_ktva((unsigned long)_sinittext)) &&
28471+
28472+#ifdef CONFIG_ACPI_SLEEP
28473+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
28474+#endif
28475+
28476+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
28477+ return 0;
28478+ return 1;
28479 }
28480
28481 /*
28482@@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start,
28483 unsigned long last_map_addr = end;
28484 unsigned long start_pfn, end_pfn;
28485 pgd_t *pgd_base = swapper_pg_dir;
28486- int pgd_idx, pmd_idx, pte_ofs;
28487+ unsigned int pgd_idx, pmd_idx, pte_ofs;
28488 unsigned long pfn;
28489 pgd_t *pgd;
28490+ pud_t *pud;
28491 pmd_t *pmd;
28492 pte_t *pte;
28493 unsigned pages_2m, pages_4k;
28494@@ -291,8 +295,13 @@ repeat:
28495 pfn = start_pfn;
28496 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
28497 pgd = pgd_base + pgd_idx;
28498- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
28499- pmd = one_md_table_init(pgd);
28500+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
28501+ pud = pud_offset(pgd, 0);
28502+ pmd = pmd_offset(pud, 0);
28503+
28504+#ifdef CONFIG_X86_PAE
28505+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
28506+#endif
28507
28508 if (pfn >= end_pfn)
28509 continue;
28510@@ -304,14 +313,13 @@ repeat:
28511 #endif
28512 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
28513 pmd++, pmd_idx++) {
28514- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
28515+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
28516
28517 /*
28518 * Map with big pages if possible, otherwise
28519 * create normal page tables:
28520 */
28521 if (use_pse) {
28522- unsigned int addr2;
28523 pgprot_t prot = PAGE_KERNEL_LARGE;
28524 /*
28525 * first pass will use the same initial
28526@@ -322,11 +330,7 @@ repeat:
28527 _PAGE_PSE);
28528
28529 pfn &= PMD_MASK >> PAGE_SHIFT;
28530- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
28531- PAGE_OFFSET + PAGE_SIZE-1;
28532-
28533- if (is_kernel_text(addr) ||
28534- is_kernel_text(addr2))
28535+ if (is_kernel_text(address, address + PMD_SIZE))
28536 prot = PAGE_KERNEL_LARGE_EXEC;
28537
28538 pages_2m++;
28539@@ -343,7 +347,7 @@ repeat:
28540 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
28541 pte += pte_ofs;
28542 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
28543- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
28544+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
28545 pgprot_t prot = PAGE_KERNEL;
28546 /*
28547 * first pass will use the same initial
28548@@ -351,7 +355,7 @@ repeat:
28549 */
28550 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
28551
28552- if (is_kernel_text(addr))
28553+ if (is_kernel_text(address, address + PAGE_SIZE))
28554 prot = PAGE_KERNEL_EXEC;
28555
28556 pages_4k++;
28557@@ -482,7 +486,7 @@ void __init native_pagetable_init(void)
28558
28559 pud = pud_offset(pgd, va);
28560 pmd = pmd_offset(pud, va);
28561- if (!pmd_present(*pmd))
28562+ if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
28563 break;
28564
28565 /* should not be large page here */
28566@@ -540,12 +544,10 @@ void __init early_ioremap_page_table_range_init(void)
28567
28568 static void __init pagetable_init(void)
28569 {
28570- pgd_t *pgd_base = swapper_pg_dir;
28571-
28572- permanent_kmaps_init(pgd_base);
28573+ permanent_kmaps_init(swapper_pg_dir);
28574 }
28575
28576-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
28577+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
28578 EXPORT_SYMBOL_GPL(__supported_pte_mask);
28579
28580 /* user-defined highmem size */
28581@@ -752,6 +754,12 @@ void __init mem_init(void)
28582
28583 pci_iommu_alloc();
28584
28585+#ifdef CONFIG_PAX_PER_CPU_PGD
28586+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
28587+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
28588+ KERNEL_PGD_PTRS);
28589+#endif
28590+
28591 #ifdef CONFIG_FLATMEM
28592 BUG_ON(!mem_map);
28593 #endif
28594@@ -780,7 +788,7 @@ void __init mem_init(void)
28595 after_bootmem = 1;
28596
28597 codesize = (unsigned long) &_etext - (unsigned long) &_text;
28598- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
28599+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
28600 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
28601
28602 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
28603@@ -821,10 +829,10 @@ void __init mem_init(void)
28604 ((unsigned long)&__init_end -
28605 (unsigned long)&__init_begin) >> 10,
28606
28607- (unsigned long)&_etext, (unsigned long)&_edata,
28608- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
28609+ (unsigned long)&_sdata, (unsigned long)&_edata,
28610+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
28611
28612- (unsigned long)&_text, (unsigned long)&_etext,
28613+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
28614 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
28615
28616 /*
28617@@ -914,6 +922,7 @@ void set_kernel_text_rw(void)
28618 if (!kernel_set_to_readonly)
28619 return;
28620
28621+ start = ktla_ktva(start);
28622 pr_debug("Set kernel text: %lx - %lx for read write\n",
28623 start, start+size);
28624
28625@@ -928,6 +937,7 @@ void set_kernel_text_ro(void)
28626 if (!kernel_set_to_readonly)
28627 return;
28628
28629+ start = ktla_ktva(start);
28630 pr_debug("Set kernel text: %lx - %lx for read only\n",
28631 start, start+size);
28632
28633@@ -956,6 +966,7 @@ void mark_rodata_ro(void)
28634 unsigned long start = PFN_ALIGN(_text);
28635 unsigned long size = PFN_ALIGN(_etext) - start;
28636
28637+ start = ktla_ktva(start);
28638 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
28639 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
28640 size >> 10);
28641diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
28642index 474e28f..647dd12 100644
28643--- a/arch/x86/mm/init_64.c
28644+++ b/arch/x86/mm/init_64.c
28645@@ -150,7 +150,7 @@ early_param("gbpages", parse_direct_gbpages_on);
28646 * around without checking the pgd every time.
28647 */
28648
28649-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
28650+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
28651 EXPORT_SYMBOL_GPL(__supported_pte_mask);
28652
28653 int force_personality32;
28654@@ -183,12 +183,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
28655
28656 for (address = start; address <= end; address += PGDIR_SIZE) {
28657 const pgd_t *pgd_ref = pgd_offset_k(address);
28658+
28659+#ifdef CONFIG_PAX_PER_CPU_PGD
28660+ unsigned long cpu;
28661+#else
28662 struct page *page;
28663+#endif
28664
28665 if (pgd_none(*pgd_ref))
28666 continue;
28667
28668 spin_lock(&pgd_lock);
28669+
28670+#ifdef CONFIG_PAX_PER_CPU_PGD
28671+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
28672+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
28673+#else
28674 list_for_each_entry(page, &pgd_list, lru) {
28675 pgd_t *pgd;
28676 spinlock_t *pgt_lock;
28677@@ -197,6 +207,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
28678 /* the pgt_lock only for Xen */
28679 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
28680 spin_lock(pgt_lock);
28681+#endif
28682
28683 if (pgd_none(*pgd))
28684 set_pgd(pgd, *pgd_ref);
28685@@ -204,7 +215,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
28686 BUG_ON(pgd_page_vaddr(*pgd)
28687 != pgd_page_vaddr(*pgd_ref));
28688
28689+#ifndef CONFIG_PAX_PER_CPU_PGD
28690 spin_unlock(pgt_lock);
28691+#endif
28692+
28693 }
28694 spin_unlock(&pgd_lock);
28695 }
28696@@ -237,7 +251,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
28697 {
28698 if (pgd_none(*pgd)) {
28699 pud_t *pud = (pud_t *)spp_getpage();
28700- pgd_populate(&init_mm, pgd, pud);
28701+ pgd_populate_kernel(&init_mm, pgd, pud);
28702 if (pud != pud_offset(pgd, 0))
28703 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
28704 pud, pud_offset(pgd, 0));
28705@@ -249,7 +263,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
28706 {
28707 if (pud_none(*pud)) {
28708 pmd_t *pmd = (pmd_t *) spp_getpage();
28709- pud_populate(&init_mm, pud, pmd);
28710+ pud_populate_kernel(&init_mm, pud, pmd);
28711 if (pmd != pmd_offset(pud, 0))
28712 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
28713 pmd, pmd_offset(pud, 0));
28714@@ -278,7 +292,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
28715 pmd = fill_pmd(pud, vaddr);
28716 pte = fill_pte(pmd, vaddr);
28717
28718+ pax_open_kernel();
28719 set_pte(pte, new_pte);
28720+ pax_close_kernel();
28721
28722 /*
28723 * It's enough to flush this one mapping.
28724@@ -337,14 +353,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
28725 pgd = pgd_offset_k((unsigned long)__va(phys));
28726 if (pgd_none(*pgd)) {
28727 pud = (pud_t *) spp_getpage();
28728- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
28729- _PAGE_USER));
28730+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
28731 }
28732 pud = pud_offset(pgd, (unsigned long)__va(phys));
28733 if (pud_none(*pud)) {
28734 pmd = (pmd_t *) spp_getpage();
28735- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
28736- _PAGE_USER));
28737+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
28738 }
28739 pmd = pmd_offset(pud, phys);
28740 BUG_ON(!pmd_none(*pmd));
28741@@ -585,7 +599,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
28742 prot);
28743
28744 spin_lock(&init_mm.page_table_lock);
28745- pud_populate(&init_mm, pud, pmd);
28746+ pud_populate_kernel(&init_mm, pud, pmd);
28747 spin_unlock(&init_mm.page_table_lock);
28748 }
28749 __flush_tlb_all();
28750@@ -626,7 +640,7 @@ kernel_physical_mapping_init(unsigned long start,
28751 page_size_mask);
28752
28753 spin_lock(&init_mm.page_table_lock);
28754- pgd_populate(&init_mm, pgd, pud);
28755+ pgd_populate_kernel(&init_mm, pgd, pud);
28756 spin_unlock(&init_mm.page_table_lock);
28757 pgd_changed = true;
28758 }
28759@@ -1065,6 +1079,12 @@ void __init mem_init(void)
28760
28761 pci_iommu_alloc();
28762
28763+#ifdef CONFIG_PAX_PER_CPU_PGD
28764+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
28765+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
28766+ KERNEL_PGD_PTRS);
28767+#endif
28768+
28769 /* clear_bss() already clear the empty_zero_page */
28770
28771 reservedpages = 0;
28772@@ -1224,8 +1244,8 @@ int kern_addr_valid(unsigned long addr)
28773 static struct vm_area_struct gate_vma = {
28774 .vm_start = VSYSCALL_START,
28775 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
28776- .vm_page_prot = PAGE_READONLY_EXEC,
28777- .vm_flags = VM_READ | VM_EXEC
28778+ .vm_page_prot = PAGE_READONLY,
28779+ .vm_flags = VM_READ
28780 };
28781
28782 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
28783@@ -1259,7 +1279,7 @@ int in_gate_area_no_mm(unsigned long addr)
28784
28785 const char *arch_vma_name(struct vm_area_struct *vma)
28786 {
28787- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
28788+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
28789 return "[vdso]";
28790 if (vma == &gate_vma)
28791 return "[vsyscall]";
28792diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
28793index 7b179b4..6bd17777 100644
28794--- a/arch/x86/mm/iomap_32.c
28795+++ b/arch/x86/mm/iomap_32.c
28796@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
28797 type = kmap_atomic_idx_push();
28798 idx = type + KM_TYPE_NR * smp_processor_id();
28799 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
28800+
28801+ pax_open_kernel();
28802 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
28803+ pax_close_kernel();
28804+
28805 arch_flush_lazy_mmu_mode();
28806
28807 return (void *)vaddr;
28808diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
28809index 78fe3f1..73b95e2 100644
28810--- a/arch/x86/mm/ioremap.c
28811+++ b/arch/x86/mm/ioremap.c
28812@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
28813 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
28814 int is_ram = page_is_ram(pfn);
28815
28816- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
28817+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
28818 return NULL;
28819 WARN_ON_ONCE(is_ram);
28820 }
28821@@ -256,7 +256,7 @@ EXPORT_SYMBOL(ioremap_prot);
28822 *
28823 * Caller must ensure there is only one unmapping for the same pointer.
28824 */
28825-void iounmap(volatile void __iomem *addr)
28826+void iounmap(const volatile void __iomem *addr)
28827 {
28828 struct vm_struct *p, *o;
28829
28830@@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
28831
28832 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
28833 if (page_is_ram(start >> PAGE_SHIFT))
28834+#ifdef CONFIG_HIGHMEM
28835+ if ((start >> PAGE_SHIFT) < max_low_pfn)
28836+#endif
28837 return __va(phys);
28838
28839 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
28840@@ -327,6 +330,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
28841 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
28842 {
28843 if (page_is_ram(phys >> PAGE_SHIFT))
28844+#ifdef CONFIG_HIGHMEM
28845+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
28846+#endif
28847 return;
28848
28849 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
28850@@ -344,7 +350,7 @@ static int __init early_ioremap_debug_setup(char *str)
28851 early_param("early_ioremap_debug", early_ioremap_debug_setup);
28852
28853 static __initdata int after_paging_init;
28854-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
28855+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
28856
28857 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
28858 {
28859@@ -381,8 +387,7 @@ void __init early_ioremap_init(void)
28860 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
28861
28862 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
28863- memset(bm_pte, 0, sizeof(bm_pte));
28864- pmd_populate_kernel(&init_mm, pmd, bm_pte);
28865+ pmd_populate_user(&init_mm, pmd, bm_pte);
28866
28867 /*
28868 * The boot-ioremap range spans multiple pmds, for which
28869diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
28870index d87dd6d..bf3fa66 100644
28871--- a/arch/x86/mm/kmemcheck/kmemcheck.c
28872+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
28873@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
28874 * memory (e.g. tracked pages)? For now, we need this to avoid
28875 * invoking kmemcheck for PnP BIOS calls.
28876 */
28877- if (regs->flags & X86_VM_MASK)
28878+ if (v8086_mode(regs))
28879 return false;
28880- if (regs->cs != __KERNEL_CS)
28881+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
28882 return false;
28883
28884 pte = kmemcheck_pte_lookup(address);
28885diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
28886index 845df68..1d8d29f 100644
28887--- a/arch/x86/mm/mmap.c
28888+++ b/arch/x86/mm/mmap.c
28889@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
28890 * Leave an at least ~128 MB hole with possible stack randomization.
28891 */
28892 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
28893-#define MAX_GAP (TASK_SIZE/6*5)
28894+#define MAX_GAP (pax_task_size/6*5)
28895
28896 static int mmap_is_legacy(void)
28897 {
28898@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
28899 return rnd << PAGE_SHIFT;
28900 }
28901
28902-static unsigned long mmap_base(void)
28903+static unsigned long mmap_base(struct mm_struct *mm)
28904 {
28905 unsigned long gap = rlimit(RLIMIT_STACK);
28906+ unsigned long pax_task_size = TASK_SIZE;
28907+
28908+#ifdef CONFIG_PAX_SEGMEXEC
28909+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
28910+ pax_task_size = SEGMEXEC_TASK_SIZE;
28911+#endif
28912
28913 if (gap < MIN_GAP)
28914 gap = MIN_GAP;
28915 else if (gap > MAX_GAP)
28916 gap = MAX_GAP;
28917
28918- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
28919+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
28920 }
28921
28922 /*
28923 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
28924 * does, but not when emulating X86_32
28925 */
28926-static unsigned long mmap_legacy_base(void)
28927+static unsigned long mmap_legacy_base(struct mm_struct *mm)
28928 {
28929- if (mmap_is_ia32())
28930+ if (mmap_is_ia32()) {
28931+
28932+#ifdef CONFIG_PAX_SEGMEXEC
28933+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
28934+ return SEGMEXEC_TASK_UNMAPPED_BASE;
28935+ else
28936+#endif
28937+
28938 return TASK_UNMAPPED_BASE;
28939- else
28940+ } else
28941 return TASK_UNMAPPED_BASE + mmap_rnd();
28942 }
28943
28944@@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
28945 void arch_pick_mmap_layout(struct mm_struct *mm)
28946 {
28947 if (mmap_is_legacy()) {
28948- mm->mmap_base = mmap_legacy_base();
28949+ mm->mmap_base = mmap_legacy_base(mm);
28950+
28951+#ifdef CONFIG_PAX_RANDMMAP
28952+ if (mm->pax_flags & MF_PAX_RANDMMAP)
28953+ mm->mmap_base += mm->delta_mmap;
28954+#endif
28955+
28956 mm->get_unmapped_area = arch_get_unmapped_area;
28957 mm->unmap_area = arch_unmap_area;
28958 } else {
28959- mm->mmap_base = mmap_base();
28960+ mm->mmap_base = mmap_base(mm);
28961+
28962+#ifdef CONFIG_PAX_RANDMMAP
28963+ if (mm->pax_flags & MF_PAX_RANDMMAP)
28964+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
28965+#endif
28966+
28967 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
28968 mm->unmap_area = arch_unmap_area_topdown;
28969 }
28970diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
28971index dc0b727..f612039 100644
28972--- a/arch/x86/mm/mmio-mod.c
28973+++ b/arch/x86/mm/mmio-mod.c
28974@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
28975 break;
28976 default:
28977 {
28978- unsigned char *ip = (unsigned char *)instptr;
28979+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
28980 my_trace->opcode = MMIO_UNKNOWN_OP;
28981 my_trace->width = 0;
28982 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
28983@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
28984 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
28985 void __iomem *addr)
28986 {
28987- static atomic_t next_id;
28988+ static atomic_unchecked_t next_id;
28989 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
28990 /* These are page-unaligned. */
28991 struct mmiotrace_map map = {
28992@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
28993 .private = trace
28994 },
28995 .phys = offset,
28996- .id = atomic_inc_return(&next_id)
28997+ .id = atomic_inc_return_unchecked(&next_id)
28998 };
28999 map.map_id = trace->id;
29000
29001@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
29002 ioremap_trace_core(offset, size, addr);
29003 }
29004
29005-static void iounmap_trace_core(volatile void __iomem *addr)
29006+static void iounmap_trace_core(const volatile void __iomem *addr)
29007 {
29008 struct mmiotrace_map map = {
29009 .phys = 0,
29010@@ -328,7 +328,7 @@ not_enabled:
29011 }
29012 }
29013
29014-void mmiotrace_iounmap(volatile void __iomem *addr)
29015+void mmiotrace_iounmap(const volatile void __iomem *addr)
29016 {
29017 might_sleep();
29018 if (is_enabled()) /* recheck and proper locking in *_core() */
29019diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
29020index 72fe01e..f1a8daa 100644
29021--- a/arch/x86/mm/numa.c
29022+++ b/arch/x86/mm/numa.c
29023@@ -477,7 +477,7 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
29024 return true;
29025 }
29026
29027-static int __init numa_register_memblks(struct numa_meminfo *mi)
29028+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
29029 {
29030 unsigned long uninitialized_var(pfn_align);
29031 int i, nid;
29032diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
29033index 0e38951..4ca8458 100644
29034--- a/arch/x86/mm/pageattr-test.c
29035+++ b/arch/x86/mm/pageattr-test.c
29036@@ -36,7 +36,7 @@ enum {
29037
29038 static int pte_testbit(pte_t pte)
29039 {
29040- return pte_flags(pte) & _PAGE_UNUSED1;
29041+ return pte_flags(pte) & _PAGE_CPA_TEST;
29042 }
29043
29044 struct split_state {
29045diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
29046index fb4e73e..43f7238 100644
29047--- a/arch/x86/mm/pageattr.c
29048+++ b/arch/x86/mm/pageattr.c
29049@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
29050 */
29051 #ifdef CONFIG_PCI_BIOS
29052 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
29053- pgprot_val(forbidden) |= _PAGE_NX;
29054+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
29055 #endif
29056
29057 /*
29058@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
29059 * Does not cover __inittext since that is gone later on. On
29060 * 64bit we do not enforce !NX on the low mapping
29061 */
29062- if (within(address, (unsigned long)_text, (unsigned long)_etext))
29063- pgprot_val(forbidden) |= _PAGE_NX;
29064+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
29065+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
29066
29067+#ifdef CONFIG_DEBUG_RODATA
29068 /*
29069 * The .rodata section needs to be read-only. Using the pfn
29070 * catches all aliases.
29071@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
29072 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
29073 __pa_symbol(__end_rodata) >> PAGE_SHIFT))
29074 pgprot_val(forbidden) |= _PAGE_RW;
29075+#endif
29076
29077 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
29078 /*
29079@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
29080 }
29081 #endif
29082
29083+#ifdef CONFIG_PAX_KERNEXEC
29084+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
29085+ pgprot_val(forbidden) |= _PAGE_RW;
29086+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
29087+ }
29088+#endif
29089+
29090 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
29091
29092 return prot;
29093@@ -400,23 +409,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
29094 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
29095 {
29096 /* change init_mm */
29097+ pax_open_kernel();
29098 set_pte_atomic(kpte, pte);
29099+
29100 #ifdef CONFIG_X86_32
29101 if (!SHARED_KERNEL_PMD) {
29102+
29103+#ifdef CONFIG_PAX_PER_CPU_PGD
29104+ unsigned long cpu;
29105+#else
29106 struct page *page;
29107+#endif
29108
29109+#ifdef CONFIG_PAX_PER_CPU_PGD
29110+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
29111+ pgd_t *pgd = get_cpu_pgd(cpu);
29112+#else
29113 list_for_each_entry(page, &pgd_list, lru) {
29114- pgd_t *pgd;
29115+ pgd_t *pgd = (pgd_t *)page_address(page);
29116+#endif
29117+
29118 pud_t *pud;
29119 pmd_t *pmd;
29120
29121- pgd = (pgd_t *)page_address(page) + pgd_index(address);
29122+ pgd += pgd_index(address);
29123 pud = pud_offset(pgd, address);
29124 pmd = pmd_offset(pud, address);
29125 set_pte_atomic((pte_t *)pmd, pte);
29126 }
29127 }
29128 #endif
29129+ pax_close_kernel();
29130 }
29131
29132 static int
29133diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
29134index 6574388..87e9bef 100644
29135--- a/arch/x86/mm/pat.c
29136+++ b/arch/x86/mm/pat.c
29137@@ -376,7 +376,7 @@ int free_memtype(u64 start, u64 end)
29138
29139 if (!entry) {
29140 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
29141- current->comm, current->pid, start, end - 1);
29142+ current->comm, task_pid_nr(current), start, end - 1);
29143 return -EINVAL;
29144 }
29145
29146@@ -506,8 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29147
29148 while (cursor < to) {
29149 if (!devmem_is_allowed(pfn)) {
29150- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
29151- current->comm, from, to - 1);
29152+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
29153+ current->comm, from, to - 1, cursor);
29154 return 0;
29155 }
29156 cursor += PAGE_SIZE;
29157@@ -577,7 +577,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
29158 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
29159 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
29160 "for [mem %#010Lx-%#010Lx]\n",
29161- current->comm, current->pid,
29162+ current->comm, task_pid_nr(current),
29163 cattr_name(flags),
29164 base, (unsigned long long)(base + size-1));
29165 return -EINVAL;
29166@@ -612,7 +612,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
29167 flags = lookup_memtype(paddr);
29168 if (want_flags != flags) {
29169 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
29170- current->comm, current->pid,
29171+ current->comm, task_pid_nr(current),
29172 cattr_name(want_flags),
29173 (unsigned long long)paddr,
29174 (unsigned long long)(paddr + size - 1),
29175@@ -634,7 +634,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
29176 free_memtype(paddr, paddr + size);
29177 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
29178 " for [mem %#010Lx-%#010Lx], got %s\n",
29179- current->comm, current->pid,
29180+ current->comm, task_pid_nr(current),
29181 cattr_name(want_flags),
29182 (unsigned long long)paddr,
29183 (unsigned long long)(paddr + size - 1),
29184diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
29185index 9f0614d..92ae64a 100644
29186--- a/arch/x86/mm/pf_in.c
29187+++ b/arch/x86/mm/pf_in.c
29188@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
29189 int i;
29190 enum reason_type rv = OTHERS;
29191
29192- p = (unsigned char *)ins_addr;
29193+ p = (unsigned char *)ktla_ktva(ins_addr);
29194 p += skip_prefix(p, &prf);
29195 p += get_opcode(p, &opcode);
29196
29197@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
29198 struct prefix_bits prf;
29199 int i;
29200
29201- p = (unsigned char *)ins_addr;
29202+ p = (unsigned char *)ktla_ktva(ins_addr);
29203 p += skip_prefix(p, &prf);
29204 p += get_opcode(p, &opcode);
29205
29206@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
29207 struct prefix_bits prf;
29208 int i;
29209
29210- p = (unsigned char *)ins_addr;
29211+ p = (unsigned char *)ktla_ktva(ins_addr);
29212 p += skip_prefix(p, &prf);
29213 p += get_opcode(p, &opcode);
29214
29215@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
29216 struct prefix_bits prf;
29217 int i;
29218
29219- p = (unsigned char *)ins_addr;
29220+ p = (unsigned char *)ktla_ktva(ins_addr);
29221 p += skip_prefix(p, &prf);
29222 p += get_opcode(p, &opcode);
29223 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
29224@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
29225 struct prefix_bits prf;
29226 int i;
29227
29228- p = (unsigned char *)ins_addr;
29229+ p = (unsigned char *)ktla_ktva(ins_addr);
29230 p += skip_prefix(p, &prf);
29231 p += get_opcode(p, &opcode);
29232 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
29233diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
29234index 17fda6a..489c74a 100644
29235--- a/arch/x86/mm/pgtable.c
29236+++ b/arch/x86/mm/pgtable.c
29237@@ -91,10 +91,64 @@ static inline void pgd_list_del(pgd_t *pgd)
29238 list_del(&page->lru);
29239 }
29240
29241-#define UNSHARED_PTRS_PER_PGD \
29242- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
29243+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29244+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
29245
29246+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
29247+{
29248+ unsigned int count = USER_PGD_PTRS;
29249
29250+ while (count--)
29251+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
29252+}
29253+#endif
29254+
29255+#ifdef CONFIG_PAX_PER_CPU_PGD
29256+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
29257+{
29258+ unsigned int count = USER_PGD_PTRS;
29259+
29260+ while (count--) {
29261+ pgd_t pgd;
29262+
29263+#ifdef CONFIG_X86_64
29264+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
29265+#else
29266+ pgd = *src++;
29267+#endif
29268+
29269+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29270+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
29271+#endif
29272+
29273+ *dst++ = pgd;
29274+ }
29275+
29276+}
29277+#endif
29278+
29279+#ifdef CONFIG_X86_64
29280+#define pxd_t pud_t
29281+#define pyd_t pgd_t
29282+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
29283+#define pxd_free(mm, pud) pud_free((mm), (pud))
29284+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
29285+#define pyd_offset(mm, address) pgd_offset((mm), (address))
29286+#define PYD_SIZE PGDIR_SIZE
29287+#else
29288+#define pxd_t pmd_t
29289+#define pyd_t pud_t
29290+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
29291+#define pxd_free(mm, pud) pmd_free((mm), (pud))
29292+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
29293+#define pyd_offset(mm, address) pud_offset((mm), (address))
29294+#define PYD_SIZE PUD_SIZE
29295+#endif
29296+
29297+#ifdef CONFIG_PAX_PER_CPU_PGD
29298+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
29299+static inline void pgd_dtor(pgd_t *pgd) {}
29300+#else
29301 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
29302 {
29303 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
29304@@ -135,6 +189,7 @@ static void pgd_dtor(pgd_t *pgd)
29305 pgd_list_del(pgd);
29306 spin_unlock(&pgd_lock);
29307 }
29308+#endif
29309
29310 /*
29311 * List of all pgd's needed for non-PAE so it can invalidate entries
29312@@ -147,7 +202,7 @@ static void pgd_dtor(pgd_t *pgd)
29313 * -- nyc
29314 */
29315
29316-#ifdef CONFIG_X86_PAE
29317+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
29318 /*
29319 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
29320 * updating the top-level pagetable entries to guarantee the
29321@@ -159,7 +214,7 @@ static void pgd_dtor(pgd_t *pgd)
29322 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
29323 * and initialize the kernel pmds here.
29324 */
29325-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
29326+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
29327
29328 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
29329 {
29330@@ -177,36 +232,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
29331 */
29332 flush_tlb_mm(mm);
29333 }
29334+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
29335+#define PREALLOCATED_PXDS USER_PGD_PTRS
29336 #else /* !CONFIG_X86_PAE */
29337
29338 /* No need to prepopulate any pagetable entries in non-PAE modes. */
29339-#define PREALLOCATED_PMDS 0
29340+#define PREALLOCATED_PXDS 0
29341
29342 #endif /* CONFIG_X86_PAE */
29343
29344-static void free_pmds(pmd_t *pmds[])
29345+static void free_pxds(pxd_t *pxds[])
29346 {
29347 int i;
29348
29349- for(i = 0; i < PREALLOCATED_PMDS; i++)
29350- if (pmds[i])
29351- free_page((unsigned long)pmds[i]);
29352+ for(i = 0; i < PREALLOCATED_PXDS; i++)
29353+ if (pxds[i])
29354+ free_page((unsigned long)pxds[i]);
29355 }
29356
29357-static int preallocate_pmds(pmd_t *pmds[])
29358+static int preallocate_pxds(pxd_t *pxds[])
29359 {
29360 int i;
29361 bool failed = false;
29362
29363- for(i = 0; i < PREALLOCATED_PMDS; i++) {
29364- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
29365- if (pmd == NULL)
29366+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
29367+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
29368+ if (pxd == NULL)
29369 failed = true;
29370- pmds[i] = pmd;
29371+ pxds[i] = pxd;
29372 }
29373
29374 if (failed) {
29375- free_pmds(pmds);
29376+ free_pxds(pxds);
29377 return -ENOMEM;
29378 }
29379
29380@@ -219,51 +276,55 @@ static int preallocate_pmds(pmd_t *pmds[])
29381 * preallocate which never got a corresponding vma will need to be
29382 * freed manually.
29383 */
29384-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
29385+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
29386 {
29387 int i;
29388
29389- for(i = 0; i < PREALLOCATED_PMDS; i++) {
29390+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
29391 pgd_t pgd = pgdp[i];
29392
29393 if (pgd_val(pgd) != 0) {
29394- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
29395+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
29396
29397- pgdp[i] = native_make_pgd(0);
29398+ set_pgd(pgdp + i, native_make_pgd(0));
29399
29400- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
29401- pmd_free(mm, pmd);
29402+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
29403+ pxd_free(mm, pxd);
29404 }
29405 }
29406 }
29407
29408-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
29409+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
29410 {
29411- pud_t *pud;
29412+ pyd_t *pyd;
29413 unsigned long addr;
29414 int i;
29415
29416- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
29417+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
29418 return;
29419
29420- pud = pud_offset(pgd, 0);
29421+#ifdef CONFIG_X86_64
29422+ pyd = pyd_offset(mm, 0L);
29423+#else
29424+ pyd = pyd_offset(pgd, 0L);
29425+#endif
29426
29427- for (addr = i = 0; i < PREALLOCATED_PMDS;
29428- i++, pud++, addr += PUD_SIZE) {
29429- pmd_t *pmd = pmds[i];
29430+ for (addr = i = 0; i < PREALLOCATED_PXDS;
29431+ i++, pyd++, addr += PYD_SIZE) {
29432+ pxd_t *pxd = pxds[i];
29433
29434 if (i >= KERNEL_PGD_BOUNDARY)
29435- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
29436- sizeof(pmd_t) * PTRS_PER_PMD);
29437+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
29438+ sizeof(pxd_t) * PTRS_PER_PMD);
29439
29440- pud_populate(mm, pud, pmd);
29441+ pyd_populate(mm, pyd, pxd);
29442 }
29443 }
29444
29445 pgd_t *pgd_alloc(struct mm_struct *mm)
29446 {
29447 pgd_t *pgd;
29448- pmd_t *pmds[PREALLOCATED_PMDS];
29449+ pxd_t *pxds[PREALLOCATED_PXDS];
29450
29451 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
29452
29453@@ -272,11 +333,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
29454
29455 mm->pgd = pgd;
29456
29457- if (preallocate_pmds(pmds) != 0)
29458+ if (preallocate_pxds(pxds) != 0)
29459 goto out_free_pgd;
29460
29461 if (paravirt_pgd_alloc(mm) != 0)
29462- goto out_free_pmds;
29463+ goto out_free_pxds;
29464
29465 /*
29466 * Make sure that pre-populating the pmds is atomic with
29467@@ -286,14 +347,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
29468 spin_lock(&pgd_lock);
29469
29470 pgd_ctor(mm, pgd);
29471- pgd_prepopulate_pmd(mm, pgd, pmds);
29472+ pgd_prepopulate_pxd(mm, pgd, pxds);
29473
29474 spin_unlock(&pgd_lock);
29475
29476 return pgd;
29477
29478-out_free_pmds:
29479- free_pmds(pmds);
29480+out_free_pxds:
29481+ free_pxds(pxds);
29482 out_free_pgd:
29483 free_page((unsigned long)pgd);
29484 out:
29485@@ -302,7 +363,7 @@ out:
29486
29487 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
29488 {
29489- pgd_mop_up_pmds(mm, pgd);
29490+ pgd_mop_up_pxds(mm, pgd);
29491 pgd_dtor(pgd);
29492 paravirt_pgd_free(mm, pgd);
29493 free_page((unsigned long)pgd);
29494diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
29495index a69bcb8..19068ab 100644
29496--- a/arch/x86/mm/pgtable_32.c
29497+++ b/arch/x86/mm/pgtable_32.c
29498@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
29499 return;
29500 }
29501 pte = pte_offset_kernel(pmd, vaddr);
29502+
29503+ pax_open_kernel();
29504 if (pte_val(pteval))
29505 set_pte_at(&init_mm, vaddr, pte, pteval);
29506 else
29507 pte_clear(&init_mm, vaddr, pte);
29508+ pax_close_kernel();
29509
29510 /*
29511 * It's enough to flush this one mapping.
29512diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
29513index e666cbb..61788c45 100644
29514--- a/arch/x86/mm/physaddr.c
29515+++ b/arch/x86/mm/physaddr.c
29516@@ -10,7 +10,7 @@
29517 #ifdef CONFIG_X86_64
29518
29519 #ifdef CONFIG_DEBUG_VIRTUAL
29520-unsigned long __phys_addr(unsigned long x)
29521+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
29522 {
29523 unsigned long y = x - __START_KERNEL_map;
29524
29525@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
29526 #else
29527
29528 #ifdef CONFIG_DEBUG_VIRTUAL
29529-unsigned long __phys_addr(unsigned long x)
29530+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
29531 {
29532 unsigned long phys_addr = x - PAGE_OFFSET;
29533 /* VMALLOC_* aren't constants */
29534diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
29535index 410531d..0f16030 100644
29536--- a/arch/x86/mm/setup_nx.c
29537+++ b/arch/x86/mm/setup_nx.c
29538@@ -5,8 +5,10 @@
29539 #include <asm/pgtable.h>
29540 #include <asm/proto.h>
29541
29542+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
29543 static int disable_nx __cpuinitdata;
29544
29545+#ifndef CONFIG_PAX_PAGEEXEC
29546 /*
29547 * noexec = on|off
29548 *
29549@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
29550 return 0;
29551 }
29552 early_param("noexec", noexec_setup);
29553+#endif
29554+
29555+#endif
29556
29557 void __cpuinit x86_configure_nx(void)
29558 {
29559+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
29560 if (cpu_has_nx && !disable_nx)
29561 __supported_pte_mask |= _PAGE_NX;
29562 else
29563+#endif
29564 __supported_pte_mask &= ~_PAGE_NX;
29565 }
29566
29567diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
29568index 282375f..e03a98f 100644
29569--- a/arch/x86/mm/tlb.c
29570+++ b/arch/x86/mm/tlb.c
29571@@ -48,7 +48,11 @@ void leave_mm(int cpu)
29572 BUG();
29573 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
29574 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
29575+
29576+#ifndef CONFIG_PAX_PER_CPU_PGD
29577 load_cr3(swapper_pg_dir);
29578+#endif
29579+
29580 }
29581 }
29582 EXPORT_SYMBOL_GPL(leave_mm);
29583diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
29584index 877b9a1..a8ecf42 100644
29585--- a/arch/x86/net/bpf_jit.S
29586+++ b/arch/x86/net/bpf_jit.S
29587@@ -9,6 +9,7 @@
29588 */
29589 #include <linux/linkage.h>
29590 #include <asm/dwarf2.h>
29591+#include <asm/alternative-asm.h>
29592
29593 /*
29594 * Calling convention :
29595@@ -35,6 +36,7 @@ sk_load_word_positive_offset:
29596 jle bpf_slow_path_word
29597 mov (SKBDATA,%rsi),%eax
29598 bswap %eax /* ntohl() */
29599+ pax_force_retaddr
29600 ret
29601
29602 sk_load_half:
29603@@ -52,6 +54,7 @@ sk_load_half_positive_offset:
29604 jle bpf_slow_path_half
29605 movzwl (SKBDATA,%rsi),%eax
29606 rol $8,%ax # ntohs()
29607+ pax_force_retaddr
29608 ret
29609
29610 sk_load_byte:
29611@@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
29612 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
29613 jle bpf_slow_path_byte
29614 movzbl (SKBDATA,%rsi),%eax
29615+ pax_force_retaddr
29616 ret
29617
29618 /**
29619@@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
29620 movzbl (SKBDATA,%rsi),%ebx
29621 and $15,%bl
29622 shl $2,%bl
29623+ pax_force_retaddr
29624 ret
29625
29626 /* rsi contains offset and can be scratched */
29627@@ -109,6 +114,7 @@ bpf_slow_path_word:
29628 js bpf_error
29629 mov -12(%rbp),%eax
29630 bswap %eax
29631+ pax_force_retaddr
29632 ret
29633
29634 bpf_slow_path_half:
29635@@ -117,12 +123,14 @@ bpf_slow_path_half:
29636 mov -12(%rbp),%ax
29637 rol $8,%ax
29638 movzwl %ax,%eax
29639+ pax_force_retaddr
29640 ret
29641
29642 bpf_slow_path_byte:
29643 bpf_slow_path_common(1)
29644 js bpf_error
29645 movzbl -12(%rbp),%eax
29646+ pax_force_retaddr
29647 ret
29648
29649 bpf_slow_path_byte_msh:
29650@@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
29651 and $15,%al
29652 shl $2,%al
29653 xchg %eax,%ebx
29654+ pax_force_retaddr
29655 ret
29656
29657 #define sk_negative_common(SIZE) \
29658@@ -157,6 +166,7 @@ sk_load_word_negative_offset:
29659 sk_negative_common(4)
29660 mov (%rax), %eax
29661 bswap %eax
29662+ pax_force_retaddr
29663 ret
29664
29665 bpf_slow_path_half_neg:
29666@@ -168,6 +178,7 @@ sk_load_half_negative_offset:
29667 mov (%rax),%ax
29668 rol $8,%ax
29669 movzwl %ax,%eax
29670+ pax_force_retaddr
29671 ret
29672
29673 bpf_slow_path_byte_neg:
29674@@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
29675 .globl sk_load_byte_negative_offset
29676 sk_negative_common(1)
29677 movzbl (%rax), %eax
29678+ pax_force_retaddr
29679 ret
29680
29681 bpf_slow_path_byte_msh_neg:
29682@@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
29683 and $15,%al
29684 shl $2,%al
29685 xchg %eax,%ebx
29686+ pax_force_retaddr
29687 ret
29688
29689 bpf_error:
29690@@ -197,4 +210,5 @@ bpf_error:
29691 xor %eax,%eax
29692 mov -8(%rbp),%rbx
29693 leaveq
29694+ pax_force_retaddr
29695 ret
29696diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
29697index 3cbe4538..003d011 100644
29698--- a/arch/x86/net/bpf_jit_comp.c
29699+++ b/arch/x86/net/bpf_jit_comp.c
29700@@ -12,6 +12,7 @@
29701 #include <linux/netdevice.h>
29702 #include <linux/filter.h>
29703 #include <linux/if_vlan.h>
29704+#include <linux/random.h>
29705
29706 /*
29707 * Conventions :
29708@@ -49,13 +50,90 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
29709 return ptr + len;
29710 }
29711
29712+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29713+#define MAX_INSTR_CODE_SIZE 96
29714+#else
29715+#define MAX_INSTR_CODE_SIZE 64
29716+#endif
29717+
29718 #define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0)
29719
29720 #define EMIT1(b1) EMIT(b1, 1)
29721 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
29722 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
29723 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
29724+
29725+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29726+/* original constant will appear in ecx */
29727+#define DILUTE_CONST_SEQUENCE(_off, _key) \
29728+do { \
29729+ /* mov ecx, randkey */ \
29730+ EMIT1(0xb9); \
29731+ EMIT(_key, 4); \
29732+ /* xor ecx, randkey ^ off */ \
29733+ EMIT2(0x81, 0xf1); \
29734+ EMIT((_key) ^ (_off), 4); \
29735+} while (0)
29736+
29737+#define EMIT1_off32(b1, _off) \
29738+do { \
29739+ switch (b1) { \
29740+ case 0x05: /* add eax, imm32 */ \
29741+ case 0x2d: /* sub eax, imm32 */ \
29742+ case 0x25: /* and eax, imm32 */ \
29743+ case 0x0d: /* or eax, imm32 */ \
29744+ case 0xb8: /* mov eax, imm32 */ \
29745+ case 0x35: /* xor eax, imm32 */ \
29746+ case 0x3d: /* cmp eax, imm32 */ \
29747+ case 0xa9: /* test eax, imm32 */ \
29748+ DILUTE_CONST_SEQUENCE(_off, randkey); \
29749+ EMIT2((b1) - 4, 0xc8); /* convert imm instruction to eax, ecx */\
29750+ break; \
29751+ case 0xbb: /* mov ebx, imm32 */ \
29752+ DILUTE_CONST_SEQUENCE(_off, randkey); \
29753+ /* mov ebx, ecx */ \
29754+ EMIT2(0x89, 0xcb); \
29755+ break; \
29756+ case 0xbe: /* mov esi, imm32 */ \
29757+ DILUTE_CONST_SEQUENCE(_off, randkey); \
29758+ /* mov esi, ecx */ \
29759+ EMIT2(0x89, 0xce); \
29760+ break; \
29761+ case 0xe8: /* call rel imm32, always to known funcs */ \
29762+ EMIT1(b1); \
29763+ EMIT(_off, 4); \
29764+ break; \
29765+ case 0xe9: /* jmp rel imm32 */ \
29766+ EMIT1(b1); \
29767+ EMIT(_off, 4); \
29768+ /* prevent fall-through, we're not called if off = 0 */ \
29769+ EMIT(0xcccccccc, 4); \
29770+ EMIT(0xcccccccc, 4); \
29771+ break; \
29772+ default: \
29773+ BUILD_BUG(); \
29774+ } \
29775+} while (0)
29776+
29777+#define EMIT2_off32(b1, b2, _off) \
29778+do { \
29779+ if ((b1) == 0x8d && (b2) == 0xb3) { /* lea esi, [rbx+imm32] */ \
29780+ EMIT2(0x8d, 0xb3); /* lea esi, [rbx+randkey] */ \
29781+ EMIT(randkey, 4); \
29782+ EMIT2(0x8d, 0xb6); /* lea esi, [esi+off-randkey] */ \
29783+ EMIT((_off) - randkey, 4); \
29784+ } else if ((b1) == 0x69 && (b2) == 0xc0) { /* imul eax, imm32 */\
29785+ DILUTE_CONST_SEQUENCE(_off, randkey); \
29786+ /* imul eax, ecx */ \
29787+ EMIT3(0x0f, 0xaf, 0xc1); \
29788+ } else { \
29789+ BUILD_BUG(); \
29790+ } \
29791+} while (0)
29792+#else
29793 #define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0)
29794+#define EMIT2_off32(b1, b2, off) do { EMIT2(b1, b2); EMIT(off, 4);} while (0)
29795+#endif
29796
29797 #define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
29798 #define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
29799@@ -90,6 +168,24 @@ do { \
29800 #define X86_JBE 0x76
29801 #define X86_JA 0x77
29802
29803+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29804+#define APPEND_FLOW_VERIFY() \
29805+do { \
29806+ /* mov ecx, randkey */ \
29807+ EMIT1(0xb9); \
29808+ EMIT(randkey, 4); \
29809+ /* cmp ecx, randkey */ \
29810+ EMIT2(0x81, 0xf9); \
29811+ EMIT(randkey, 4); \
29812+ /* jz after 8 int 3s */ \
29813+ EMIT2(0x74, 0x08); \
29814+ EMIT(0xcccccccc, 4); \
29815+ EMIT(0xcccccccc, 4); \
29816+} while (0)
29817+#else
29818+#define APPEND_FLOW_VERIFY() do { } while (0)
29819+#endif
29820+
29821 #define EMIT_COND_JMP(op, offset) \
29822 do { \
29823 if (is_near(offset)) \
29824@@ -97,6 +193,7 @@ do { \
29825 else { \
29826 EMIT2(0x0f, op + 0x10); \
29827 EMIT(offset, 4); /* jxx .+off32 */ \
29828+ APPEND_FLOW_VERIFY(); \
29829 } \
29830 } while (0)
29831
29832@@ -121,6 +218,11 @@ static inline void bpf_flush_icache(void *start, void *end)
29833 set_fs(old_fs);
29834 }
29835
29836+struct bpf_jit_work {
29837+ struct work_struct work;
29838+ void *image;
29839+};
29840+
29841 #define CHOOSE_LOAD_FUNC(K, func) \
29842 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
29843
29844@@ -146,7 +248,7 @@ static int pkt_type_offset(void)
29845
29846 void bpf_jit_compile(struct sk_filter *fp)
29847 {
29848- u8 temp[64];
29849+ u8 temp[MAX_INSTR_CODE_SIZE];
29850 u8 *prog;
29851 unsigned int proglen, oldproglen = 0;
29852 int ilen, i;
29853@@ -159,6 +261,9 @@ void bpf_jit_compile(struct sk_filter *fp)
29854 unsigned int *addrs;
29855 const struct sock_filter *filter = fp->insns;
29856 int flen = fp->len;
29857+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29858+ unsigned int randkey;
29859+#endif
29860
29861 if (!bpf_jit_enable)
29862 return;
29863@@ -167,11 +272,19 @@ void bpf_jit_compile(struct sk_filter *fp)
29864 if (addrs == NULL)
29865 return;
29866
29867+ fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
29868+ if (!fp->work)
29869+ goto out;
29870+
29871+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29872+ randkey = get_random_int();
29873+#endif
29874+
29875 /* Before first pass, make a rough estimation of addrs[]
29876- * each bpf instruction is translated to less than 64 bytes
29877+ * each bpf instruction is translated to less than MAX_INSTR_CODE_SIZE bytes
29878 */
29879 for (proglen = 0, i = 0; i < flen; i++) {
29880- proglen += 64;
29881+ proglen += MAX_INSTR_CODE_SIZE;
29882 addrs[i] = proglen;
29883 }
29884 cleanup_addr = proglen; /* epilogue address */
29885@@ -282,10 +395,8 @@ void bpf_jit_compile(struct sk_filter *fp)
29886 case BPF_S_ALU_MUL_K: /* A *= K */
29887 if (is_imm8(K))
29888 EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
29889- else {
29890- EMIT2(0x69, 0xc0); /* imul imm32,%eax */
29891- EMIT(K, 4);
29892- }
29893+ else
29894+ EMIT2_off32(0x69, 0xc0, K); /* imul imm32,%eax */
29895 break;
29896 case BPF_S_ALU_DIV_X: /* A /= X; */
29897 seen |= SEEN_XREG;
29898@@ -325,13 +436,23 @@ void bpf_jit_compile(struct sk_filter *fp)
29899 break;
29900 case BPF_S_ALU_MOD_K: /* A %= K; */
29901 EMIT2(0x31, 0xd2); /* xor %edx,%edx */
29902+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29903+ DILUTE_CONST_SEQUENCE(K, randkey);
29904+#else
29905 EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
29906+#endif
29907 EMIT2(0xf7, 0xf1); /* div %ecx */
29908 EMIT2(0x89, 0xd0); /* mov %edx,%eax */
29909 break;
29910 case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
29911+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29912+ DILUTE_CONST_SEQUENCE(K, randkey);
29913+ // imul rax, rcx
29914+ EMIT4(0x48, 0x0f, 0xaf, 0xc1);
29915+#else
29916 EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */
29917 EMIT(K, 4);
29918+#endif
29919 EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */
29920 break;
29921 case BPF_S_ALU_AND_X:
29922@@ -602,8 +723,7 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
29923 if (is_imm8(K)) {
29924 EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
29925 } else {
29926- EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */
29927- EMIT(K, 4);
29928+ EMIT2_off32(0x8d, 0xb3, K); /* lea imm32(%rbx),%esi */
29929 }
29930 } else {
29931 EMIT2(0x89,0xde); /* mov %ebx,%esi */
29932@@ -686,17 +806,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
29933 break;
29934 default:
29935 /* hmm, too complex filter, give up with jit compiler */
29936- goto out;
29937+ goto error;
29938 }
29939 ilen = prog - temp;
29940 if (image) {
29941 if (unlikely(proglen + ilen > oldproglen)) {
29942 pr_err("bpb_jit_compile fatal error\n");
29943- kfree(addrs);
29944- module_free(NULL, image);
29945- return;
29946+ module_free_exec(NULL, image);
29947+ goto error;
29948 }
29949+ pax_open_kernel();
29950 memcpy(image + proglen, temp, ilen);
29951+ pax_close_kernel();
29952 }
29953 proglen += ilen;
29954 addrs[i] = proglen;
29955@@ -717,11 +838,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
29956 break;
29957 }
29958 if (proglen == oldproglen) {
29959- image = module_alloc(max_t(unsigned int,
29960- proglen,
29961- sizeof(struct work_struct)));
29962+ image = module_alloc_exec(proglen);
29963 if (!image)
29964- goto out;
29965+ goto error;
29966 }
29967 oldproglen = proglen;
29968 }
29969@@ -737,7 +856,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
29970 bpf_flush_icache(image, image + proglen);
29971
29972 fp->bpf_func = (void *)image;
29973- }
29974+ } else
29975+error:
29976+ kfree(fp->work);
29977+
29978 out:
29979 kfree(addrs);
29980 return;
29981@@ -745,18 +867,20 @@ out:
29982
29983 static void jit_free_defer(struct work_struct *arg)
29984 {
29985- module_free(NULL, arg);
29986+ module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
29987+ kfree(arg);
29988 }
29989
29990 /* run from softirq, we must use a work_struct to call
29991- * module_free() from process context
29992+ * module_free_exec() from process context
29993 */
29994 void bpf_jit_free(struct sk_filter *fp)
29995 {
29996 if (fp->bpf_func != sk_run_filter) {
29997- struct work_struct *work = (struct work_struct *)fp->bpf_func;
29998+ struct work_struct *work = &fp->work->work;
29999
30000 INIT_WORK(work, jit_free_defer);
30001+ fp->work->image = fp->bpf_func;
30002 schedule_work(work);
30003 }
30004 }
30005diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
30006index d6aa6e8..266395a 100644
30007--- a/arch/x86/oprofile/backtrace.c
30008+++ b/arch/x86/oprofile/backtrace.c
30009@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
30010 struct stack_frame_ia32 *fp;
30011 unsigned long bytes;
30012
30013- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
30014+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
30015 if (bytes != sizeof(bufhead))
30016 return NULL;
30017
30018- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
30019+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
30020
30021 oprofile_add_trace(bufhead[0].return_address);
30022
30023@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
30024 struct stack_frame bufhead[2];
30025 unsigned long bytes;
30026
30027- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
30028+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
30029 if (bytes != sizeof(bufhead))
30030 return NULL;
30031
30032@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
30033 {
30034 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
30035
30036- if (!user_mode_vm(regs)) {
30037+ if (!user_mode(regs)) {
30038 unsigned long stack = kernel_stack_pointer(regs);
30039 if (depth)
30040 dump_trace(NULL, regs, (unsigned long *)stack, 0,
30041diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
30042index 48768df..ba9143c 100644
30043--- a/arch/x86/oprofile/nmi_int.c
30044+++ b/arch/x86/oprofile/nmi_int.c
30045@@ -23,6 +23,7 @@
30046 #include <asm/nmi.h>
30047 #include <asm/msr.h>
30048 #include <asm/apic.h>
30049+#include <asm/pgtable.h>
30050
30051 #include "op_counter.h"
30052 #include "op_x86_model.h"
30053@@ -774,8 +775,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
30054 if (ret)
30055 return ret;
30056
30057- if (!model->num_virt_counters)
30058- model->num_virt_counters = model->num_counters;
30059+ if (!model->num_virt_counters) {
30060+ pax_open_kernel();
30061+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
30062+ pax_close_kernel();
30063+ }
30064
30065 mux_init(ops);
30066
30067diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
30068index b2b9443..be58856 100644
30069--- a/arch/x86/oprofile/op_model_amd.c
30070+++ b/arch/x86/oprofile/op_model_amd.c
30071@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
30072 num_counters = AMD64_NUM_COUNTERS;
30073 }
30074
30075- op_amd_spec.num_counters = num_counters;
30076- op_amd_spec.num_controls = num_counters;
30077- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
30078+ pax_open_kernel();
30079+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
30080+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
30081+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
30082+ pax_close_kernel();
30083
30084 return 0;
30085 }
30086diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
30087index d90528e..0127e2b 100644
30088--- a/arch/x86/oprofile/op_model_ppro.c
30089+++ b/arch/x86/oprofile/op_model_ppro.c
30090@@ -19,6 +19,7 @@
30091 #include <asm/msr.h>
30092 #include <asm/apic.h>
30093 #include <asm/nmi.h>
30094+#include <asm/pgtable.h>
30095
30096 #include "op_x86_model.h"
30097 #include "op_counter.h"
30098@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
30099
30100 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
30101
30102- op_arch_perfmon_spec.num_counters = num_counters;
30103- op_arch_perfmon_spec.num_controls = num_counters;
30104+ pax_open_kernel();
30105+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
30106+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
30107+ pax_close_kernel();
30108 }
30109
30110 static int arch_perfmon_init(struct oprofile_operations *ignore)
30111diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
30112index 71e8a67..6a313bb 100644
30113--- a/arch/x86/oprofile/op_x86_model.h
30114+++ b/arch/x86/oprofile/op_x86_model.h
30115@@ -52,7 +52,7 @@ struct op_x86_model_spec {
30116 void (*switch_ctrl)(struct op_x86_model_spec const *model,
30117 struct op_msrs const * const msrs);
30118 #endif
30119-};
30120+} __do_const;
30121
30122 struct op_counter_config;
30123
30124diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
30125index e9e6ed5..e47ae67 100644
30126--- a/arch/x86/pci/amd_bus.c
30127+++ b/arch/x86/pci/amd_bus.c
30128@@ -337,7 +337,7 @@ static int __cpuinit amd_cpu_notify(struct notifier_block *self,
30129 return NOTIFY_OK;
30130 }
30131
30132-static struct notifier_block __cpuinitdata amd_cpu_notifier = {
30133+static struct notifier_block amd_cpu_notifier = {
30134 .notifier_call = amd_cpu_notify,
30135 };
30136
30137diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
30138index 372e9b8..e775a6c 100644
30139--- a/arch/x86/pci/irq.c
30140+++ b/arch/x86/pci/irq.c
30141@@ -50,7 +50,7 @@ struct irq_router {
30142 struct irq_router_handler {
30143 u16 vendor;
30144 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
30145-};
30146+} __do_const;
30147
30148 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
30149 void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL;
30150@@ -794,7 +794,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
30151 return 0;
30152 }
30153
30154-static __initdata struct irq_router_handler pirq_routers[] = {
30155+static __initconst const struct irq_router_handler pirq_routers[] = {
30156 { PCI_VENDOR_ID_INTEL, intel_router_probe },
30157 { PCI_VENDOR_ID_AL, ali_router_probe },
30158 { PCI_VENDOR_ID_ITE, ite_router_probe },
30159@@ -821,7 +821,7 @@ static struct pci_dev *pirq_router_dev;
30160 static void __init pirq_find_router(struct irq_router *r)
30161 {
30162 struct irq_routing_table *rt = pirq_table;
30163- struct irq_router_handler *h;
30164+ const struct irq_router_handler *h;
30165
30166 #ifdef CONFIG_PCI_BIOS
30167 if (!rt->signature) {
30168@@ -1094,7 +1094,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
30169 return 0;
30170 }
30171
30172-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
30173+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
30174 {
30175 .callback = fix_broken_hp_bios_irq9,
30176 .ident = "HP Pavilion N5400 Series Laptop",
30177diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
30178index 6eb18c4..20d83de 100644
30179--- a/arch/x86/pci/mrst.c
30180+++ b/arch/x86/pci/mrst.c
30181@@ -238,7 +238,9 @@ int __init pci_mrst_init(void)
30182 printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n");
30183 pci_mmcfg_late_init();
30184 pcibios_enable_irq = mrst_pci_irq_enable;
30185- pci_root_ops = pci_mrst_ops;
30186+ pax_open_kernel();
30187+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
30188+ pax_close_kernel();
30189 pci_soc_mode = 1;
30190 /* Continue with standard init */
30191 return 1;
30192diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
30193index c77b24a..c979855 100644
30194--- a/arch/x86/pci/pcbios.c
30195+++ b/arch/x86/pci/pcbios.c
30196@@ -79,7 +79,7 @@ union bios32 {
30197 static struct {
30198 unsigned long address;
30199 unsigned short segment;
30200-} bios32_indirect = { 0, __KERNEL_CS };
30201+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
30202
30203 /*
30204 * Returns the entry point for the given service, NULL on error
30205@@ -92,37 +92,80 @@ static unsigned long bios32_service(unsigned long service)
30206 unsigned long length; /* %ecx */
30207 unsigned long entry; /* %edx */
30208 unsigned long flags;
30209+ struct desc_struct d, *gdt;
30210
30211 local_irq_save(flags);
30212- __asm__("lcall *(%%edi); cld"
30213+
30214+ gdt = get_cpu_gdt_table(smp_processor_id());
30215+
30216+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
30217+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
30218+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
30219+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
30220+
30221+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
30222 : "=a" (return_code),
30223 "=b" (address),
30224 "=c" (length),
30225 "=d" (entry)
30226 : "0" (service),
30227 "1" (0),
30228- "D" (&bios32_indirect));
30229+ "D" (&bios32_indirect),
30230+ "r"(__PCIBIOS_DS)
30231+ : "memory");
30232+
30233+ pax_open_kernel();
30234+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
30235+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
30236+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
30237+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
30238+ pax_close_kernel();
30239+
30240 local_irq_restore(flags);
30241
30242 switch (return_code) {
30243- case 0:
30244- return address + entry;
30245- case 0x80: /* Not present */
30246- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
30247- return 0;
30248- default: /* Shouldn't happen */
30249- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
30250- service, return_code);
30251+ case 0: {
30252+ int cpu;
30253+ unsigned char flags;
30254+
30255+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
30256+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
30257+ printk(KERN_WARNING "bios32_service: not valid\n");
30258 return 0;
30259+ }
30260+ address = address + PAGE_OFFSET;
30261+ length += 16UL; /* some BIOSs underreport this... */
30262+ flags = 4;
30263+ if (length >= 64*1024*1024) {
30264+ length >>= PAGE_SHIFT;
30265+ flags |= 8;
30266+ }
30267+
30268+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
30269+ gdt = get_cpu_gdt_table(cpu);
30270+ pack_descriptor(&d, address, length, 0x9b, flags);
30271+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
30272+ pack_descriptor(&d, address, length, 0x93, flags);
30273+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
30274+ }
30275+ return entry;
30276+ }
30277+ case 0x80: /* Not present */
30278+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
30279+ return 0;
30280+ default: /* Shouldn't happen */
30281+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
30282+ service, return_code);
30283+ return 0;
30284 }
30285 }
30286
30287 static struct {
30288 unsigned long address;
30289 unsigned short segment;
30290-} pci_indirect = { 0, __KERNEL_CS };
30291+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
30292
30293-static int pci_bios_present;
30294+static int pci_bios_present __read_only;
30295
30296 static int check_pcibios(void)
30297 {
30298@@ -131,11 +174,13 @@ static int check_pcibios(void)
30299 unsigned long flags, pcibios_entry;
30300
30301 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
30302- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
30303+ pci_indirect.address = pcibios_entry;
30304
30305 local_irq_save(flags);
30306- __asm__(
30307- "lcall *(%%edi); cld\n\t"
30308+ __asm__("movw %w6, %%ds\n\t"
30309+ "lcall *%%ss:(%%edi); cld\n\t"
30310+ "push %%ss\n\t"
30311+ "pop %%ds\n\t"
30312 "jc 1f\n\t"
30313 "xor %%ah, %%ah\n"
30314 "1:"
30315@@ -144,7 +189,8 @@ static int check_pcibios(void)
30316 "=b" (ebx),
30317 "=c" (ecx)
30318 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
30319- "D" (&pci_indirect)
30320+ "D" (&pci_indirect),
30321+ "r" (__PCIBIOS_DS)
30322 : "memory");
30323 local_irq_restore(flags);
30324
30325@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30326
30327 switch (len) {
30328 case 1:
30329- __asm__("lcall *(%%esi); cld\n\t"
30330+ __asm__("movw %w6, %%ds\n\t"
30331+ "lcall *%%ss:(%%esi); cld\n\t"
30332+ "push %%ss\n\t"
30333+ "pop %%ds\n\t"
30334 "jc 1f\n\t"
30335 "xor %%ah, %%ah\n"
30336 "1:"
30337@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30338 : "1" (PCIBIOS_READ_CONFIG_BYTE),
30339 "b" (bx),
30340 "D" ((long)reg),
30341- "S" (&pci_indirect));
30342+ "S" (&pci_indirect),
30343+ "r" (__PCIBIOS_DS));
30344 /*
30345 * Zero-extend the result beyond 8 bits, do not trust the
30346 * BIOS having done it:
30347@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30348 *value &= 0xff;
30349 break;
30350 case 2:
30351- __asm__("lcall *(%%esi); cld\n\t"
30352+ __asm__("movw %w6, %%ds\n\t"
30353+ "lcall *%%ss:(%%esi); cld\n\t"
30354+ "push %%ss\n\t"
30355+ "pop %%ds\n\t"
30356 "jc 1f\n\t"
30357 "xor %%ah, %%ah\n"
30358 "1:"
30359@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30360 : "1" (PCIBIOS_READ_CONFIG_WORD),
30361 "b" (bx),
30362 "D" ((long)reg),
30363- "S" (&pci_indirect));
30364+ "S" (&pci_indirect),
30365+ "r" (__PCIBIOS_DS));
30366 /*
30367 * Zero-extend the result beyond 16 bits, do not trust the
30368 * BIOS having done it:
30369@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30370 *value &= 0xffff;
30371 break;
30372 case 4:
30373- __asm__("lcall *(%%esi); cld\n\t"
30374+ __asm__("movw %w6, %%ds\n\t"
30375+ "lcall *%%ss:(%%esi); cld\n\t"
30376+ "push %%ss\n\t"
30377+ "pop %%ds\n\t"
30378 "jc 1f\n\t"
30379 "xor %%ah, %%ah\n"
30380 "1:"
30381@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30382 : "1" (PCIBIOS_READ_CONFIG_DWORD),
30383 "b" (bx),
30384 "D" ((long)reg),
30385- "S" (&pci_indirect));
30386+ "S" (&pci_indirect),
30387+ "r" (__PCIBIOS_DS));
30388 break;
30389 }
30390
30391@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
30392
30393 switch (len) {
30394 case 1:
30395- __asm__("lcall *(%%esi); cld\n\t"
30396+ __asm__("movw %w6, %%ds\n\t"
30397+ "lcall *%%ss:(%%esi); cld\n\t"
30398+ "push %%ss\n\t"
30399+ "pop %%ds\n\t"
30400 "jc 1f\n\t"
30401 "xor %%ah, %%ah\n"
30402 "1:"
30403@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
30404 "c" (value),
30405 "b" (bx),
30406 "D" ((long)reg),
30407- "S" (&pci_indirect));
30408+ "S" (&pci_indirect),
30409+ "r" (__PCIBIOS_DS));
30410 break;
30411 case 2:
30412- __asm__("lcall *(%%esi); cld\n\t"
30413+ __asm__("movw %w6, %%ds\n\t"
30414+ "lcall *%%ss:(%%esi); cld\n\t"
30415+ "push %%ss\n\t"
30416+ "pop %%ds\n\t"
30417 "jc 1f\n\t"
30418 "xor %%ah, %%ah\n"
30419 "1:"
30420@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
30421 "c" (value),
30422 "b" (bx),
30423 "D" ((long)reg),
30424- "S" (&pci_indirect));
30425+ "S" (&pci_indirect),
30426+ "r" (__PCIBIOS_DS));
30427 break;
30428 case 4:
30429- __asm__("lcall *(%%esi); cld\n\t"
30430+ __asm__("movw %w6, %%ds\n\t"
30431+ "lcall *%%ss:(%%esi); cld\n\t"
30432+ "push %%ss\n\t"
30433+ "pop %%ds\n\t"
30434 "jc 1f\n\t"
30435 "xor %%ah, %%ah\n"
30436 "1:"
30437@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
30438 "c" (value),
30439 "b" (bx),
30440 "D" ((long)reg),
30441- "S" (&pci_indirect));
30442+ "S" (&pci_indirect),
30443+ "r" (__PCIBIOS_DS));
30444 break;
30445 }
30446
30447@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
30448
30449 DBG("PCI: Fetching IRQ routing table... ");
30450 __asm__("push %%es\n\t"
30451+ "movw %w8, %%ds\n\t"
30452 "push %%ds\n\t"
30453 "pop %%es\n\t"
30454- "lcall *(%%esi); cld\n\t"
30455+ "lcall *%%ss:(%%esi); cld\n\t"
30456 "pop %%es\n\t"
30457+ "push %%ss\n\t"
30458+ "pop %%ds\n"
30459 "jc 1f\n\t"
30460 "xor %%ah, %%ah\n"
30461 "1:"
30462@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
30463 "1" (0),
30464 "D" ((long) &opt),
30465 "S" (&pci_indirect),
30466- "m" (opt)
30467+ "m" (opt),
30468+ "r" (__PCIBIOS_DS)
30469 : "memory");
30470 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
30471 if (ret & 0xff00)
30472@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
30473 {
30474 int ret;
30475
30476- __asm__("lcall *(%%esi); cld\n\t"
30477+ __asm__("movw %w5, %%ds\n\t"
30478+ "lcall *%%ss:(%%esi); cld\n\t"
30479+ "push %%ss\n\t"
30480+ "pop %%ds\n"
30481 "jc 1f\n\t"
30482 "xor %%ah, %%ah\n"
30483 "1:"
30484@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
30485 : "0" (PCIBIOS_SET_PCI_HW_INT),
30486 "b" ((dev->bus->number << 8) | dev->devfn),
30487 "c" ((irq << 8) | (pin + 10)),
30488- "S" (&pci_indirect));
30489+ "S" (&pci_indirect),
30490+ "r" (__PCIBIOS_DS));
30491 return !(ret & 0xff00);
30492 }
30493 EXPORT_SYMBOL(pcibios_set_irq_routing);
30494diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
30495index 90f3a52..714e825 100644
30496--- a/arch/x86/platform/efi/efi.c
30497+++ b/arch/x86/platform/efi/efi.c
30498@@ -1059,7 +1059,10 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
30499 * that by attempting to use more space than is available.
30500 */
30501 unsigned long dummy_size = remaining_size + 1024;
30502- void *dummy = kmalloc(dummy_size, GFP_ATOMIC);
30503+ void *dummy = kzalloc(dummy_size, GFP_ATOMIC);
30504+
30505+ if (!dummy)
30506+ return EFI_OUT_OF_RESOURCES;
30507
30508 status = efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID,
30509 EFI_VARIABLE_NON_VOLATILE |
30510@@ -1079,6 +1082,8 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
30511 0, dummy);
30512 }
30513
30514+ kfree(dummy);
30515+
30516 /*
30517 * The runtime code may now have triggered a garbage collection
30518 * run, so check the variable info again
30519diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
30520index 40e4469..1ab536e 100644
30521--- a/arch/x86/platform/efi/efi_32.c
30522+++ b/arch/x86/platform/efi/efi_32.c
30523@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
30524 {
30525 struct desc_ptr gdt_descr;
30526
30527+#ifdef CONFIG_PAX_KERNEXEC
30528+ struct desc_struct d;
30529+#endif
30530+
30531 local_irq_save(efi_rt_eflags);
30532
30533 load_cr3(initial_page_table);
30534 __flush_tlb_all();
30535
30536+#ifdef CONFIG_PAX_KERNEXEC
30537+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
30538+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
30539+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
30540+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
30541+#endif
30542+
30543 gdt_descr.address = __pa(get_cpu_gdt_table(0));
30544 gdt_descr.size = GDT_SIZE - 1;
30545 load_gdt(&gdt_descr);
30546@@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
30547 {
30548 struct desc_ptr gdt_descr;
30549
30550+#ifdef CONFIG_PAX_KERNEXEC
30551+ struct desc_struct d;
30552+
30553+ memset(&d, 0, sizeof d);
30554+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
30555+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
30556+#endif
30557+
30558 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
30559 gdt_descr.size = GDT_SIZE - 1;
30560 load_gdt(&gdt_descr);
30561diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
30562index fbe66e6..eae5e38 100644
30563--- a/arch/x86/platform/efi/efi_stub_32.S
30564+++ b/arch/x86/platform/efi/efi_stub_32.S
30565@@ -6,7 +6,9 @@
30566 */
30567
30568 #include <linux/linkage.h>
30569+#include <linux/init.h>
30570 #include <asm/page_types.h>
30571+#include <asm/segment.h>
30572
30573 /*
30574 * efi_call_phys(void *, ...) is a function with variable parameters.
30575@@ -20,7 +22,7 @@
30576 * service functions will comply with gcc calling convention, too.
30577 */
30578
30579-.text
30580+__INIT
30581 ENTRY(efi_call_phys)
30582 /*
30583 * 0. The function can only be called in Linux kernel. So CS has been
30584@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
30585 * The mapping of lower virtual memory has been created in prelog and
30586 * epilog.
30587 */
30588- movl $1f, %edx
30589- subl $__PAGE_OFFSET, %edx
30590- jmp *%edx
30591+#ifdef CONFIG_PAX_KERNEXEC
30592+ movl $(__KERNEXEC_EFI_DS), %edx
30593+ mov %edx, %ds
30594+ mov %edx, %es
30595+ mov %edx, %ss
30596+ addl $2f,(1f)
30597+ ljmp *(1f)
30598+
30599+__INITDATA
30600+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
30601+.previous
30602+
30603+2:
30604+ subl $2b,(1b)
30605+#else
30606+ jmp 1f-__PAGE_OFFSET
30607 1:
30608+#endif
30609
30610 /*
30611 * 2. Now on the top of stack is the return
30612@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
30613 * parameter 2, ..., param n. To make things easy, we save the return
30614 * address of efi_call_phys in a global variable.
30615 */
30616- popl %edx
30617- movl %edx, saved_return_addr
30618- /* get the function pointer into ECX*/
30619- popl %ecx
30620- movl %ecx, efi_rt_function_ptr
30621- movl $2f, %edx
30622- subl $__PAGE_OFFSET, %edx
30623- pushl %edx
30624+ popl (saved_return_addr)
30625+ popl (efi_rt_function_ptr)
30626
30627 /*
30628 * 3. Clear PG bit in %CR0.
30629@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
30630 /*
30631 * 5. Call the physical function.
30632 */
30633- jmp *%ecx
30634+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
30635
30636-2:
30637 /*
30638 * 6. After EFI runtime service returns, control will return to
30639 * following instruction. We'd better readjust stack pointer first.
30640@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
30641 movl %cr0, %edx
30642 orl $0x80000000, %edx
30643 movl %edx, %cr0
30644- jmp 1f
30645-1:
30646+
30647 /*
30648 * 8. Now restore the virtual mode from flat mode by
30649 * adding EIP with PAGE_OFFSET.
30650 */
30651- movl $1f, %edx
30652- jmp *%edx
30653+#ifdef CONFIG_PAX_KERNEXEC
30654+ movl $(__KERNEL_DS), %edx
30655+ mov %edx, %ds
30656+ mov %edx, %es
30657+ mov %edx, %ss
30658+ ljmp $(__KERNEL_CS),$1f
30659+#else
30660+ jmp 1f+__PAGE_OFFSET
30661+#endif
30662 1:
30663
30664 /*
30665 * 9. Balance the stack. And because EAX contain the return value,
30666 * we'd better not clobber it.
30667 */
30668- leal efi_rt_function_ptr, %edx
30669- movl (%edx), %ecx
30670- pushl %ecx
30671+ pushl (efi_rt_function_ptr)
30672
30673 /*
30674- * 10. Push the saved return address onto the stack and return.
30675+ * 10. Return to the saved return address.
30676 */
30677- leal saved_return_addr, %edx
30678- movl (%edx), %ecx
30679- pushl %ecx
30680- ret
30681+ jmpl *(saved_return_addr)
30682 ENDPROC(efi_call_phys)
30683 .previous
30684
30685-.data
30686+__INITDATA
30687 saved_return_addr:
30688 .long 0
30689 efi_rt_function_ptr:
30690diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
30691index 4c07cca..2c8427d 100644
30692--- a/arch/x86/platform/efi/efi_stub_64.S
30693+++ b/arch/x86/platform/efi/efi_stub_64.S
30694@@ -7,6 +7,7 @@
30695 */
30696
30697 #include <linux/linkage.h>
30698+#include <asm/alternative-asm.h>
30699
30700 #define SAVE_XMM \
30701 mov %rsp, %rax; \
30702@@ -40,6 +41,7 @@ ENTRY(efi_call0)
30703 call *%rdi
30704 addq $32, %rsp
30705 RESTORE_XMM
30706+ pax_force_retaddr 0, 1
30707 ret
30708 ENDPROC(efi_call0)
30709
30710@@ -50,6 +52,7 @@ ENTRY(efi_call1)
30711 call *%rdi
30712 addq $32, %rsp
30713 RESTORE_XMM
30714+ pax_force_retaddr 0, 1
30715 ret
30716 ENDPROC(efi_call1)
30717
30718@@ -60,6 +63,7 @@ ENTRY(efi_call2)
30719 call *%rdi
30720 addq $32, %rsp
30721 RESTORE_XMM
30722+ pax_force_retaddr 0, 1
30723 ret
30724 ENDPROC(efi_call2)
30725
30726@@ -71,6 +75,7 @@ ENTRY(efi_call3)
30727 call *%rdi
30728 addq $32, %rsp
30729 RESTORE_XMM
30730+ pax_force_retaddr 0, 1
30731 ret
30732 ENDPROC(efi_call3)
30733
30734@@ -83,6 +88,7 @@ ENTRY(efi_call4)
30735 call *%rdi
30736 addq $32, %rsp
30737 RESTORE_XMM
30738+ pax_force_retaddr 0, 1
30739 ret
30740 ENDPROC(efi_call4)
30741
30742@@ -96,6 +102,7 @@ ENTRY(efi_call5)
30743 call *%rdi
30744 addq $48, %rsp
30745 RESTORE_XMM
30746+ pax_force_retaddr 0, 1
30747 ret
30748 ENDPROC(efi_call5)
30749
30750@@ -112,5 +119,6 @@ ENTRY(efi_call6)
30751 call *%rdi
30752 addq $48, %rsp
30753 RESTORE_XMM
30754+ pax_force_retaddr 0, 1
30755 ret
30756 ENDPROC(efi_call6)
30757diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
30758index e31bcd8..f12dc46 100644
30759--- a/arch/x86/platform/mrst/mrst.c
30760+++ b/arch/x86/platform/mrst/mrst.c
30761@@ -78,13 +78,15 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
30762 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
30763 int sfi_mrtc_num;
30764
30765-static void mrst_power_off(void)
30766+static __noreturn void mrst_power_off(void)
30767 {
30768+ BUG();
30769 }
30770
30771-static void mrst_reboot(void)
30772+static __noreturn void mrst_reboot(void)
30773 {
30774 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
30775+ BUG();
30776 }
30777
30778 /* parse all the mtimer info to a static mtimer array */
30779diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
30780index d6ee929..3637cb5 100644
30781--- a/arch/x86/platform/olpc/olpc_dt.c
30782+++ b/arch/x86/platform/olpc/olpc_dt.c
30783@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
30784 return res;
30785 }
30786
30787-static struct of_pdt_ops prom_olpc_ops __initdata = {
30788+static struct of_pdt_ops prom_olpc_ops __initconst = {
30789 .nextprop = olpc_dt_nextprop,
30790 .getproplen = olpc_dt_getproplen,
30791 .getproperty = olpc_dt_getproperty,
30792diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
30793index 3c68768..07e82b8 100644
30794--- a/arch/x86/power/cpu.c
30795+++ b/arch/x86/power/cpu.c
30796@@ -134,7 +134,7 @@ static void do_fpu_end(void)
30797 static void fix_processor_context(void)
30798 {
30799 int cpu = smp_processor_id();
30800- struct tss_struct *t = &per_cpu(init_tss, cpu);
30801+ struct tss_struct *t = init_tss + cpu;
30802
30803 set_tss_desc(cpu, t); /*
30804 * This just modifies memory; should not be
30805@@ -144,8 +144,6 @@ static void fix_processor_context(void)
30806 */
30807
30808 #ifdef CONFIG_X86_64
30809- get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
30810-
30811 syscall_init(); /* This sets MSR_*STAR and related */
30812 #endif
30813 load_TR_desc(); /* This does ltr */
30814diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
30815index a44f457..9140171 100644
30816--- a/arch/x86/realmode/init.c
30817+++ b/arch/x86/realmode/init.c
30818@@ -70,7 +70,13 @@ void __init setup_real_mode(void)
30819 __va(real_mode_header->trampoline_header);
30820
30821 #ifdef CONFIG_X86_32
30822- trampoline_header->start = __pa_symbol(startup_32_smp);
30823+ trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
30824+
30825+#ifdef CONFIG_PAX_KERNEXEC
30826+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
30827+#endif
30828+
30829+ trampoline_header->boot_cs = __BOOT_CS;
30830 trampoline_header->gdt_limit = __BOOT_DS + 7;
30831 trampoline_header->gdt_base = __pa_symbol(boot_gdt);
30832 #else
30833@@ -86,7 +92,7 @@ void __init setup_real_mode(void)
30834 *trampoline_cr4_features = read_cr4();
30835
30836 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
30837- trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
30838+ trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX;
30839 trampoline_pgd[511] = init_level4_pgt[511].pgd;
30840 #endif
30841 }
30842diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
30843index 8869287..d577672 100644
30844--- a/arch/x86/realmode/rm/Makefile
30845+++ b/arch/x86/realmode/rm/Makefile
30846@@ -78,5 +78,8 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
30847 $(call cc-option, -fno-unit-at-a-time)) \
30848 $(call cc-option, -fno-stack-protector) \
30849 $(call cc-option, -mpreferred-stack-boundary=2)
30850+ifdef CONSTIFY_PLUGIN
30851+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
30852+endif
30853 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
30854 GCOV_PROFILE := n
30855diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
30856index a28221d..93c40f1 100644
30857--- a/arch/x86/realmode/rm/header.S
30858+++ b/arch/x86/realmode/rm/header.S
30859@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
30860 #endif
30861 /* APM/BIOS reboot */
30862 .long pa_machine_real_restart_asm
30863-#ifdef CONFIG_X86_64
30864+#ifdef CONFIG_X86_32
30865+ .long __KERNEL_CS
30866+#else
30867 .long __KERNEL32_CS
30868 #endif
30869 END(real_mode_header)
30870diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
30871index c1b2791..f9e31c7 100644
30872--- a/arch/x86/realmode/rm/trampoline_32.S
30873+++ b/arch/x86/realmode/rm/trampoline_32.S
30874@@ -25,6 +25,12 @@
30875 #include <asm/page_types.h>
30876 #include "realmode.h"
30877
30878+#ifdef CONFIG_PAX_KERNEXEC
30879+#define ta(X) (X)
30880+#else
30881+#define ta(X) (pa_ ## X)
30882+#endif
30883+
30884 .text
30885 .code16
30886
30887@@ -39,8 +45,6 @@ ENTRY(trampoline_start)
30888
30889 cli # We should be safe anyway
30890
30891- movl tr_start, %eax # where we need to go
30892-
30893 movl $0xA5A5A5A5, trampoline_status
30894 # write marker for master knows we're running
30895
30896@@ -56,7 +60,7 @@ ENTRY(trampoline_start)
30897 movw $1, %dx # protected mode (PE) bit
30898 lmsw %dx # into protected mode
30899
30900- ljmpl $__BOOT_CS, $pa_startup_32
30901+ ljmpl *(trampoline_header)
30902
30903 .section ".text32","ax"
30904 .code32
30905@@ -67,7 +71,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
30906 .balign 8
30907 GLOBAL(trampoline_header)
30908 tr_start: .space 4
30909- tr_gdt_pad: .space 2
30910+ tr_boot_cs: .space 2
30911 tr_gdt: .space 6
30912 END(trampoline_header)
30913
30914diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
30915index bb360dc..3e5945f 100644
30916--- a/arch/x86/realmode/rm/trampoline_64.S
30917+++ b/arch/x86/realmode/rm/trampoline_64.S
30918@@ -107,7 +107,7 @@ ENTRY(startup_32)
30919 wrmsr
30920
30921 # Enable paging and in turn activate Long Mode
30922- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
30923+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
30924 movl %eax, %cr0
30925
30926 /*
30927diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
30928index 79d67bd..c7e1b90 100644
30929--- a/arch/x86/tools/relocs.c
30930+++ b/arch/x86/tools/relocs.c
30931@@ -12,10 +12,13 @@
30932 #include <regex.h>
30933 #include <tools/le_byteshift.h>
30934
30935+#include "../../../include/generated/autoconf.h"
30936+
30937 static void die(char *fmt, ...);
30938
30939 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
30940 static Elf32_Ehdr ehdr;
30941+static Elf32_Phdr *phdr;
30942 static unsigned long reloc_count, reloc_idx;
30943 static unsigned long *relocs;
30944 static unsigned long reloc16_count, reloc16_idx;
30945@@ -330,9 +333,39 @@ static void read_ehdr(FILE *fp)
30946 }
30947 }
30948
30949+static void read_phdrs(FILE *fp)
30950+{
30951+ unsigned int i;
30952+
30953+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
30954+ if (!phdr) {
30955+ die("Unable to allocate %d program headers\n",
30956+ ehdr.e_phnum);
30957+ }
30958+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
30959+ die("Seek to %d failed: %s\n",
30960+ ehdr.e_phoff, strerror(errno));
30961+ }
30962+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
30963+ die("Cannot read ELF program headers: %s\n",
30964+ strerror(errno));
30965+ }
30966+ for(i = 0; i < ehdr.e_phnum; i++) {
30967+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
30968+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
30969+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
30970+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
30971+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
30972+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
30973+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
30974+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
30975+ }
30976+
30977+}
30978+
30979 static void read_shdrs(FILE *fp)
30980 {
30981- int i;
30982+ unsigned int i;
30983 Elf32_Shdr shdr;
30984
30985 secs = calloc(ehdr.e_shnum, sizeof(struct section));
30986@@ -367,7 +400,7 @@ static void read_shdrs(FILE *fp)
30987
30988 static void read_strtabs(FILE *fp)
30989 {
30990- int i;
30991+ unsigned int i;
30992 for (i = 0; i < ehdr.e_shnum; i++) {
30993 struct section *sec = &secs[i];
30994 if (sec->shdr.sh_type != SHT_STRTAB) {
30995@@ -392,7 +425,7 @@ static void read_strtabs(FILE *fp)
30996
30997 static void read_symtabs(FILE *fp)
30998 {
30999- int i,j;
31000+ unsigned int i,j;
31001 for (i = 0; i < ehdr.e_shnum; i++) {
31002 struct section *sec = &secs[i];
31003 if (sec->shdr.sh_type != SHT_SYMTAB) {
31004@@ -423,9 +456,11 @@ static void read_symtabs(FILE *fp)
31005 }
31006
31007
31008-static void read_relocs(FILE *fp)
31009+static void read_relocs(FILE *fp, int use_real_mode)
31010 {
31011- int i,j;
31012+ unsigned int i,j;
31013+ uint32_t base;
31014+
31015 for (i = 0; i < ehdr.e_shnum; i++) {
31016 struct section *sec = &secs[i];
31017 if (sec->shdr.sh_type != SHT_REL) {
31018@@ -445,9 +480,22 @@ static void read_relocs(FILE *fp)
31019 die("Cannot read symbol table: %s\n",
31020 strerror(errno));
31021 }
31022+ base = 0;
31023+
31024+#ifdef CONFIG_X86_32
31025+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
31026+ if (phdr[j].p_type != PT_LOAD )
31027+ continue;
31028+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
31029+ continue;
31030+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
31031+ break;
31032+ }
31033+#endif
31034+
31035 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
31036 Elf32_Rel *rel = &sec->reltab[j];
31037- rel->r_offset = elf32_to_cpu(rel->r_offset);
31038+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
31039 rel->r_info = elf32_to_cpu(rel->r_info);
31040 }
31041 }
31042@@ -456,13 +504,13 @@ static void read_relocs(FILE *fp)
31043
31044 static void print_absolute_symbols(void)
31045 {
31046- int i;
31047+ unsigned int i;
31048 printf("Absolute symbols\n");
31049 printf(" Num: Value Size Type Bind Visibility Name\n");
31050 for (i = 0; i < ehdr.e_shnum; i++) {
31051 struct section *sec = &secs[i];
31052 char *sym_strtab;
31053- int j;
31054+ unsigned int j;
31055
31056 if (sec->shdr.sh_type != SHT_SYMTAB) {
31057 continue;
31058@@ -489,14 +537,14 @@ static void print_absolute_symbols(void)
31059
31060 static void print_absolute_relocs(void)
31061 {
31062- int i, printed = 0;
31063+ unsigned int i, printed = 0;
31064
31065 for (i = 0; i < ehdr.e_shnum; i++) {
31066 struct section *sec = &secs[i];
31067 struct section *sec_applies, *sec_symtab;
31068 char *sym_strtab;
31069 Elf32_Sym *sh_symtab;
31070- int j;
31071+ unsigned int j;
31072 if (sec->shdr.sh_type != SHT_REL) {
31073 continue;
31074 }
31075@@ -558,13 +606,13 @@ static void print_absolute_relocs(void)
31076 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
31077 int use_real_mode)
31078 {
31079- int i;
31080+ unsigned int i;
31081 /* Walk through the relocations */
31082 for (i = 0; i < ehdr.e_shnum; i++) {
31083 char *sym_strtab;
31084 Elf32_Sym *sh_symtab;
31085 struct section *sec_applies, *sec_symtab;
31086- int j;
31087+ unsigned int j;
31088 struct section *sec = &secs[i];
31089
31090 if (sec->shdr.sh_type != SHT_REL) {
31091@@ -588,6 +636,24 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
31092 sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
31093 r_type = ELF32_R_TYPE(rel->r_info);
31094
31095+ if (!use_real_mode) {
31096+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
31097+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
31098+ continue;
31099+
31100+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
31101+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
31102+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
31103+ continue;
31104+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
31105+ continue;
31106+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
31107+ continue;
31108+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
31109+ continue;
31110+#endif
31111+ }
31112+
31113 shn_abs = sym->st_shndx == SHN_ABS;
31114
31115 switch (r_type) {
31116@@ -681,7 +747,7 @@ static int write32(unsigned int v, FILE *f)
31117
31118 static void emit_relocs(int as_text, int use_real_mode)
31119 {
31120- int i;
31121+ unsigned int i;
31122 /* Count how many relocations I have and allocate space for them. */
31123 reloc_count = 0;
31124 walk_relocs(count_reloc, use_real_mode);
31125@@ -808,10 +874,11 @@ int main(int argc, char **argv)
31126 fname, strerror(errno));
31127 }
31128 read_ehdr(fp);
31129+ read_phdrs(fp);
31130 read_shdrs(fp);
31131 read_strtabs(fp);
31132 read_symtabs(fp);
31133- read_relocs(fp);
31134+ read_relocs(fp, use_real_mode);
31135 if (show_absolute_syms) {
31136 print_absolute_symbols();
31137 goto out;
31138diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
31139index fd14be1..e3c79c0 100644
31140--- a/arch/x86/vdso/Makefile
31141+++ b/arch/x86/vdso/Makefile
31142@@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO $@
31143 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
31144 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
31145
31146-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
31147+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
31148 GCOV_PROFILE := n
31149
31150 #
31151diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
31152index 0faad64..39ef157 100644
31153--- a/arch/x86/vdso/vdso32-setup.c
31154+++ b/arch/x86/vdso/vdso32-setup.c
31155@@ -25,6 +25,7 @@
31156 #include <asm/tlbflush.h>
31157 #include <asm/vdso.h>
31158 #include <asm/proto.h>
31159+#include <asm/mman.h>
31160
31161 enum {
31162 VDSO_DISABLED = 0,
31163@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
31164 void enable_sep_cpu(void)
31165 {
31166 int cpu = get_cpu();
31167- struct tss_struct *tss = &per_cpu(init_tss, cpu);
31168+ struct tss_struct *tss = init_tss + cpu;
31169
31170 if (!boot_cpu_has(X86_FEATURE_SEP)) {
31171 put_cpu();
31172@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
31173 gate_vma.vm_start = FIXADDR_USER_START;
31174 gate_vma.vm_end = FIXADDR_USER_END;
31175 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
31176- gate_vma.vm_page_prot = __P101;
31177+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
31178
31179 return 0;
31180 }
31181@@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
31182 if (compat)
31183 addr = VDSO_HIGH_BASE;
31184 else {
31185- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
31186+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
31187 if (IS_ERR_VALUE(addr)) {
31188 ret = addr;
31189 goto up_fail;
31190 }
31191 }
31192
31193- current->mm->context.vdso = (void *)addr;
31194+ current->mm->context.vdso = addr;
31195
31196 if (compat_uses_vma || !compat) {
31197 /*
31198@@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
31199 }
31200
31201 current_thread_info()->sysenter_return =
31202- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
31203+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
31204
31205 up_fail:
31206 if (ret)
31207- current->mm->context.vdso = NULL;
31208+ current->mm->context.vdso = 0;
31209
31210 up_write(&mm->mmap_sem);
31211
31212@@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init);
31213
31214 const char *arch_vma_name(struct vm_area_struct *vma)
31215 {
31216- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
31217+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
31218 return "[vdso]";
31219+
31220+#ifdef CONFIG_PAX_SEGMEXEC
31221+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
31222+ return "[vdso]";
31223+#endif
31224+
31225 return NULL;
31226 }
31227
31228@@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
31229 * Check to see if the corresponding task was created in compat vdso
31230 * mode.
31231 */
31232- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
31233+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
31234 return &gate_vma;
31235 return NULL;
31236 }
31237diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
31238index 431e875..cbb23f3 100644
31239--- a/arch/x86/vdso/vma.c
31240+++ b/arch/x86/vdso/vma.c
31241@@ -16,8 +16,6 @@
31242 #include <asm/vdso.h>
31243 #include <asm/page.h>
31244
31245-unsigned int __read_mostly vdso_enabled = 1;
31246-
31247 extern char vdso_start[], vdso_end[];
31248 extern unsigned short vdso_sync_cpuid;
31249
31250@@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
31251 * unaligned here as a result of stack start randomization.
31252 */
31253 addr = PAGE_ALIGN(addr);
31254- addr = align_vdso_addr(addr);
31255
31256 return addr;
31257 }
31258@@ -154,30 +151,31 @@ static int setup_additional_pages(struct linux_binprm *bprm,
31259 unsigned size)
31260 {
31261 struct mm_struct *mm = current->mm;
31262- unsigned long addr;
31263+ unsigned long addr = 0;
31264 int ret;
31265
31266- if (!vdso_enabled)
31267- return 0;
31268-
31269 down_write(&mm->mmap_sem);
31270+
31271+#ifdef CONFIG_PAX_RANDMMAP
31272+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
31273+#endif
31274+
31275 addr = vdso_addr(mm->start_stack, size);
31276+ addr = align_vdso_addr(addr);
31277 addr = get_unmapped_area(NULL, addr, size, 0, 0);
31278 if (IS_ERR_VALUE(addr)) {
31279 ret = addr;
31280 goto up_fail;
31281 }
31282
31283- current->mm->context.vdso = (void *)addr;
31284+ mm->context.vdso = addr;
31285
31286 ret = install_special_mapping(mm, addr, size,
31287 VM_READ|VM_EXEC|
31288 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
31289 pages);
31290- if (ret) {
31291- current->mm->context.vdso = NULL;
31292- goto up_fail;
31293- }
31294+ if (ret)
31295+ mm->context.vdso = 0;
31296
31297 up_fail:
31298 up_write(&mm->mmap_sem);
31299@@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
31300 vdsox32_size);
31301 }
31302 #endif
31303-
31304-static __init int vdso_setup(char *s)
31305-{
31306- vdso_enabled = simple_strtoul(s, NULL, 0);
31307- return 0;
31308-}
31309-__setup("vdso=", vdso_setup);
31310diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
31311index cf95e19..17e9f50 100644
31312--- a/arch/x86/xen/enlighten.c
31313+++ b/arch/x86/xen/enlighten.c
31314@@ -100,8 +100,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
31315
31316 struct shared_info xen_dummy_shared_info;
31317
31318-void *xen_initial_gdt;
31319-
31320 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
31321 __read_mostly int xen_have_vector_callback;
31322 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
31323@@ -511,8 +509,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
31324 {
31325 unsigned long va = dtr->address;
31326 unsigned int size = dtr->size + 1;
31327- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
31328- unsigned long frames[pages];
31329+ unsigned long frames[65536 / PAGE_SIZE];
31330 int f;
31331
31332 /*
31333@@ -560,8 +557,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
31334 {
31335 unsigned long va = dtr->address;
31336 unsigned int size = dtr->size + 1;
31337- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
31338- unsigned long frames[pages];
31339+ unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
31340 int f;
31341
31342 /*
31343@@ -569,7 +565,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
31344 * 8-byte entries, or 16 4k pages..
31345 */
31346
31347- BUG_ON(size > 65536);
31348+ BUG_ON(size > GDT_SIZE);
31349 BUG_ON(va & ~PAGE_MASK);
31350
31351 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
31352@@ -954,7 +950,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
31353 return 0;
31354 }
31355
31356-static void set_xen_basic_apic_ops(void)
31357+static void __init set_xen_basic_apic_ops(void)
31358 {
31359 apic->read = xen_apic_read;
31360 apic->write = xen_apic_write;
31361@@ -1260,30 +1256,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
31362 #endif
31363 };
31364
31365-static void xen_reboot(int reason)
31366+static __noreturn void xen_reboot(int reason)
31367 {
31368 struct sched_shutdown r = { .reason = reason };
31369
31370- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
31371- BUG();
31372+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
31373+ BUG();
31374 }
31375
31376-static void xen_restart(char *msg)
31377+static __noreturn void xen_restart(char *msg)
31378 {
31379 xen_reboot(SHUTDOWN_reboot);
31380 }
31381
31382-static void xen_emergency_restart(void)
31383+static __noreturn void xen_emergency_restart(void)
31384 {
31385 xen_reboot(SHUTDOWN_reboot);
31386 }
31387
31388-static void xen_machine_halt(void)
31389+static __noreturn void xen_machine_halt(void)
31390 {
31391 xen_reboot(SHUTDOWN_poweroff);
31392 }
31393
31394-static void xen_machine_power_off(void)
31395+static __noreturn void xen_machine_power_off(void)
31396 {
31397 if (pm_power_off)
31398 pm_power_off();
31399@@ -1385,7 +1381,17 @@ asmlinkage void __init xen_start_kernel(void)
31400 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
31401
31402 /* Work out if we support NX */
31403- x86_configure_nx();
31404+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
31405+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
31406+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
31407+ unsigned l, h;
31408+
31409+ __supported_pte_mask |= _PAGE_NX;
31410+ rdmsr(MSR_EFER, l, h);
31411+ l |= EFER_NX;
31412+ wrmsr(MSR_EFER, l, h);
31413+ }
31414+#endif
31415
31416 xen_setup_features();
31417
31418@@ -1416,13 +1422,6 @@ asmlinkage void __init xen_start_kernel(void)
31419
31420 machine_ops = xen_machine_ops;
31421
31422- /*
31423- * The only reliable way to retain the initial address of the
31424- * percpu gdt_page is to remember it here, so we can go and
31425- * mark it RW later, when the initial percpu area is freed.
31426- */
31427- xen_initial_gdt = &per_cpu(gdt_page, 0);
31428-
31429 xen_smp_init();
31430
31431 #ifdef CONFIG_ACPI_NUMA
31432@@ -1616,7 +1615,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
31433 return NOTIFY_OK;
31434 }
31435
31436-static struct notifier_block xen_hvm_cpu_notifier __cpuinitdata = {
31437+static struct notifier_block xen_hvm_cpu_notifier = {
31438 .notifier_call = xen_hvm_cpu_notify,
31439 };
31440
31441diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
31442index e006c18..b9a7d6c 100644
31443--- a/arch/x86/xen/mmu.c
31444+++ b/arch/x86/xen/mmu.c
31445@@ -1894,6 +1894,9 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
31446 /* L3_k[510] -> level2_kernel_pgt
31447 * L3_i[511] -> level2_fixmap_pgt */
31448 convert_pfn_mfn(level3_kernel_pgt);
31449+ convert_pfn_mfn(level3_vmalloc_start_pgt);
31450+ convert_pfn_mfn(level3_vmalloc_end_pgt);
31451+ convert_pfn_mfn(level3_vmemmap_pgt);
31452
31453 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
31454 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
31455@@ -1923,8 +1926,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
31456 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
31457 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
31458 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
31459+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
31460+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
31461+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
31462 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
31463 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
31464+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
31465 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
31466 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
31467
31468@@ -2110,6 +2117,7 @@ static void __init xen_post_allocator_init(void)
31469 pv_mmu_ops.set_pud = xen_set_pud;
31470 #if PAGETABLE_LEVELS == 4
31471 pv_mmu_ops.set_pgd = xen_set_pgd;
31472+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
31473 #endif
31474
31475 /* This will work as long as patching hasn't happened yet
31476@@ -2188,6 +2196,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
31477 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
31478 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
31479 .set_pgd = xen_set_pgd_hyper,
31480+ .set_pgd_batched = xen_set_pgd_hyper,
31481
31482 .alloc_pud = xen_alloc_pmd_init,
31483 .release_pud = xen_release_pmd_init,
31484diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
31485index 96c4e85..284fded 100644
31486--- a/arch/x86/xen/smp.c
31487+++ b/arch/x86/xen/smp.c
31488@@ -230,11 +230,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
31489 {
31490 BUG_ON(smp_processor_id() != 0);
31491 native_smp_prepare_boot_cpu();
31492-
31493- /* We've switched to the "real" per-cpu gdt, so make sure the
31494- old memory can be recycled */
31495- make_lowmem_page_readwrite(xen_initial_gdt);
31496-
31497 xen_filter_cpu_maps();
31498 xen_setup_vcpu_info_placement();
31499 }
31500@@ -304,7 +299,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
31501 ctxt->user_regs.ss = __KERNEL_DS;
31502 #ifdef CONFIG_X86_32
31503 ctxt->user_regs.fs = __KERNEL_PERCPU;
31504- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
31505+ savesegment(gs, ctxt->user_regs.gs);
31506 #else
31507 ctxt->gs_base_kernel = per_cpu_offset(cpu);
31508 #endif
31509@@ -314,8 +309,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
31510
31511 {
31512 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
31513- ctxt->user_regs.ds = __USER_DS;
31514- ctxt->user_regs.es = __USER_DS;
31515+ ctxt->user_regs.ds = __KERNEL_DS;
31516+ ctxt->user_regs.es = __KERNEL_DS;
31517
31518 xen_copy_trap_info(ctxt->trap_ctxt);
31519
31520@@ -360,13 +355,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle)
31521 int rc;
31522
31523 per_cpu(current_task, cpu) = idle;
31524+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
31525 #ifdef CONFIG_X86_32
31526 irq_ctx_init(cpu);
31527 #else
31528 clear_tsk_thread_flag(idle, TIF_FORK);
31529- per_cpu(kernel_stack, cpu) =
31530- (unsigned long)task_stack_page(idle) -
31531- KERNEL_STACK_OFFSET + THREAD_SIZE;
31532+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
31533 #endif
31534 xen_setup_runstate_info(cpu);
31535 xen_setup_timer(cpu);
31536@@ -642,7 +636,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
31537
31538 void __init xen_smp_init(void)
31539 {
31540- smp_ops = xen_smp_ops;
31541+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
31542 xen_fill_possible_map();
31543 xen_init_spinlocks();
31544 }
31545diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
31546index 33ca6e4..0ded929 100644
31547--- a/arch/x86/xen/xen-asm_32.S
31548+++ b/arch/x86/xen/xen-asm_32.S
31549@@ -84,14 +84,14 @@ ENTRY(xen_iret)
31550 ESP_OFFSET=4 # bytes pushed onto stack
31551
31552 /*
31553- * Store vcpu_info pointer for easy access. Do it this way to
31554- * avoid having to reload %fs
31555+ * Store vcpu_info pointer for easy access.
31556 */
31557 #ifdef CONFIG_SMP
31558- GET_THREAD_INFO(%eax)
31559- movl %ss:TI_cpu(%eax), %eax
31560- movl %ss:__per_cpu_offset(,%eax,4), %eax
31561- mov %ss:xen_vcpu(%eax), %eax
31562+ push %fs
31563+ mov $(__KERNEL_PERCPU), %eax
31564+ mov %eax, %fs
31565+ mov PER_CPU_VAR(xen_vcpu), %eax
31566+ pop %fs
31567 #else
31568 movl %ss:xen_vcpu, %eax
31569 #endif
31570diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
31571index 7faed58..ba4427c 100644
31572--- a/arch/x86/xen/xen-head.S
31573+++ b/arch/x86/xen/xen-head.S
31574@@ -19,6 +19,17 @@ ENTRY(startup_xen)
31575 #ifdef CONFIG_X86_32
31576 mov %esi,xen_start_info
31577 mov $init_thread_union+THREAD_SIZE,%esp
31578+#ifdef CONFIG_SMP
31579+ movl $cpu_gdt_table,%edi
31580+ movl $__per_cpu_load,%eax
31581+ movw %ax,__KERNEL_PERCPU + 2(%edi)
31582+ rorl $16,%eax
31583+ movb %al,__KERNEL_PERCPU + 4(%edi)
31584+ movb %ah,__KERNEL_PERCPU + 7(%edi)
31585+ movl $__per_cpu_end - 1,%eax
31586+ subl $__per_cpu_start,%eax
31587+ movw %ax,__KERNEL_PERCPU + 0(%edi)
31588+#endif
31589 #else
31590 mov %rsi,xen_start_info
31591 mov $init_thread_union+THREAD_SIZE,%rsp
31592diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
31593index a95b417..b6dbd0b 100644
31594--- a/arch/x86/xen/xen-ops.h
31595+++ b/arch/x86/xen/xen-ops.h
31596@@ -10,8 +10,6 @@
31597 extern const char xen_hypervisor_callback[];
31598 extern const char xen_failsafe_callback[];
31599
31600-extern void *xen_initial_gdt;
31601-
31602 struct trap_info;
31603 void xen_copy_trap_info(struct trap_info *traps);
31604
31605diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
31606index 525bd3d..ef888b1 100644
31607--- a/arch/xtensa/variants/dc232b/include/variant/core.h
31608+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
31609@@ -119,9 +119,9 @@
31610 ----------------------------------------------------------------------*/
31611
31612 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
31613-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
31614 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
31615 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
31616+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
31617
31618 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
31619 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
31620diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
31621index 2f33760..835e50a 100644
31622--- a/arch/xtensa/variants/fsf/include/variant/core.h
31623+++ b/arch/xtensa/variants/fsf/include/variant/core.h
31624@@ -11,6 +11,7 @@
31625 #ifndef _XTENSA_CORE_H
31626 #define _XTENSA_CORE_H
31627
31628+#include <linux/const.h>
31629
31630 /****************************************************************************
31631 Parameters Useful for Any Code, USER or PRIVILEGED
31632@@ -112,9 +113,9 @@
31633 ----------------------------------------------------------------------*/
31634
31635 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
31636-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
31637 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
31638 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
31639+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
31640
31641 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
31642 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
31643diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
31644index af00795..2bb8105 100644
31645--- a/arch/xtensa/variants/s6000/include/variant/core.h
31646+++ b/arch/xtensa/variants/s6000/include/variant/core.h
31647@@ -11,6 +11,7 @@
31648 #ifndef _XTENSA_CORE_CONFIGURATION_H
31649 #define _XTENSA_CORE_CONFIGURATION_H
31650
31651+#include <linux/const.h>
31652
31653 /****************************************************************************
31654 Parameters Useful for Any Code, USER or PRIVILEGED
31655@@ -118,9 +119,9 @@
31656 ----------------------------------------------------------------------*/
31657
31658 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
31659-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
31660 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
31661 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
31662+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
31663
31664 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
31665 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
31666diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
31667index 58916af..eb9dbcf6 100644
31668--- a/block/blk-iopoll.c
31669+++ b/block/blk-iopoll.c
31670@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
31671 }
31672 EXPORT_SYMBOL(blk_iopoll_complete);
31673
31674-static void blk_iopoll_softirq(struct softirq_action *h)
31675+static void blk_iopoll_softirq(void)
31676 {
31677 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
31678 int rearm = 0, budget = blk_iopoll_budget;
31679@@ -209,7 +209,7 @@ static int __cpuinit blk_iopoll_cpu_notify(struct notifier_block *self,
31680 return NOTIFY_OK;
31681 }
31682
31683-static struct notifier_block __cpuinitdata blk_iopoll_cpu_notifier = {
31684+static struct notifier_block blk_iopoll_cpu_notifier = {
31685 .notifier_call = blk_iopoll_cpu_notify,
31686 };
31687
31688diff --git a/block/blk-map.c b/block/blk-map.c
31689index 623e1cd..ca1e109 100644
31690--- a/block/blk-map.c
31691+++ b/block/blk-map.c
31692@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
31693 if (!len || !kbuf)
31694 return -EINVAL;
31695
31696- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
31697+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
31698 if (do_copy)
31699 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
31700 else
31701diff --git a/block/blk-softirq.c b/block/blk-softirq.c
31702index 467c8de..f3628c5 100644
31703--- a/block/blk-softirq.c
31704+++ b/block/blk-softirq.c
31705@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
31706 * Softirq action handler - move entries to local list and loop over them
31707 * while passing them to the queue registered handler.
31708 */
31709-static void blk_done_softirq(struct softirq_action *h)
31710+static void blk_done_softirq(void)
31711 {
31712 struct list_head *cpu_list, local_list;
31713
31714@@ -98,7 +98,7 @@ static int __cpuinit blk_cpu_notify(struct notifier_block *self,
31715 return NOTIFY_OK;
31716 }
31717
31718-static struct notifier_block __cpuinitdata blk_cpu_notifier = {
31719+static struct notifier_block blk_cpu_notifier = {
31720 .notifier_call = blk_cpu_notify,
31721 };
31722
31723diff --git a/block/bsg.c b/block/bsg.c
31724index 420a5a9..23834aa 100644
31725--- a/block/bsg.c
31726+++ b/block/bsg.c
31727@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
31728 struct sg_io_v4 *hdr, struct bsg_device *bd,
31729 fmode_t has_write_perm)
31730 {
31731+ unsigned char tmpcmd[sizeof(rq->__cmd)];
31732+ unsigned char *cmdptr;
31733+
31734 if (hdr->request_len > BLK_MAX_CDB) {
31735 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
31736 if (!rq->cmd)
31737 return -ENOMEM;
31738- }
31739+ cmdptr = rq->cmd;
31740+ } else
31741+ cmdptr = tmpcmd;
31742
31743- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
31744+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
31745 hdr->request_len))
31746 return -EFAULT;
31747
31748+ if (cmdptr != rq->cmd)
31749+ memcpy(rq->cmd, cmdptr, hdr->request_len);
31750+
31751 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
31752 if (blk_verify_command(rq->cmd, has_write_perm))
31753 return -EPERM;
31754diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
31755index 7c668c8..db3521c 100644
31756--- a/block/compat_ioctl.c
31757+++ b/block/compat_ioctl.c
31758@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
31759 err |= __get_user(f->spec1, &uf->spec1);
31760 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
31761 err |= __get_user(name, &uf->name);
31762- f->name = compat_ptr(name);
31763+ f->name = (void __force_kernel *)compat_ptr(name);
31764 if (err) {
31765 err = -EFAULT;
31766 goto out;
31767diff --git a/block/genhd.c b/block/genhd.c
31768index 3c001fb..d15a9e8 100644
31769--- a/block/genhd.c
31770+++ b/block/genhd.c
31771@@ -467,21 +467,24 @@ static char *bdevt_str(dev_t devt, char *buf)
31772
31773 /*
31774 * Register device numbers dev..(dev+range-1)
31775- * range must be nonzero
31776+ * Noop if @range is zero.
31777 * The hash chain is sorted on range, so that subranges can override.
31778 */
31779 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
31780 struct kobject *(*probe)(dev_t, int *, void *),
31781 int (*lock)(dev_t, void *), void *data)
31782 {
31783- kobj_map(bdev_map, devt, range, module, probe, lock, data);
31784+ if (range)
31785+ kobj_map(bdev_map, devt, range, module, probe, lock, data);
31786 }
31787
31788 EXPORT_SYMBOL(blk_register_region);
31789
31790+/* undo blk_register_region(), noop if @range is zero */
31791 void blk_unregister_region(dev_t devt, unsigned long range)
31792 {
31793- kobj_unmap(bdev_map, devt, range);
31794+ if (range)
31795+ kobj_unmap(bdev_map, devt, range);
31796 }
31797
31798 EXPORT_SYMBOL(blk_unregister_region);
31799@@ -512,7 +515,7 @@ static void register_disk(struct gendisk *disk)
31800
31801 ddev->parent = disk->driverfs_dev;
31802
31803- dev_set_name(ddev, disk->disk_name);
31804+ dev_set_name(ddev, "%s", disk->disk_name);
31805
31806 /* delay uevents, until we scanned partition table */
31807 dev_set_uevent_suppress(ddev, 1);
31808diff --git a/block/partitions/efi.c b/block/partitions/efi.c
31809index ff5804e..a88acad 100644
31810--- a/block/partitions/efi.c
31811+++ b/block/partitions/efi.c
31812@@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
31813 if (!gpt)
31814 return NULL;
31815
31816+ if (!le32_to_cpu(gpt->num_partition_entries))
31817+ return NULL;
31818+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
31819+ if (!pte)
31820+ return NULL;
31821+
31822 count = le32_to_cpu(gpt->num_partition_entries) *
31823 le32_to_cpu(gpt->sizeof_partition_entry);
31824- if (!count)
31825- return NULL;
31826- pte = kzalloc(count, GFP_KERNEL);
31827- if (!pte)
31828- return NULL;
31829-
31830 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
31831 (u8 *) pte,
31832 count) < count) {
31833diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
31834index 9a87daa..fb17486 100644
31835--- a/block/scsi_ioctl.c
31836+++ b/block/scsi_ioctl.c
31837@@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
31838 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
31839 struct sg_io_hdr *hdr, fmode_t mode)
31840 {
31841- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
31842+ unsigned char tmpcmd[sizeof(rq->__cmd)];
31843+ unsigned char *cmdptr;
31844+
31845+ if (rq->cmd != rq->__cmd)
31846+ cmdptr = rq->cmd;
31847+ else
31848+ cmdptr = tmpcmd;
31849+
31850+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
31851 return -EFAULT;
31852+
31853+ if (cmdptr != rq->cmd)
31854+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
31855+
31856 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
31857 return -EPERM;
31858
31859@@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
31860 int err;
31861 unsigned int in_len, out_len, bytes, opcode, cmdlen;
31862 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
31863+ unsigned char tmpcmd[sizeof(rq->__cmd)];
31864+ unsigned char *cmdptr;
31865
31866 if (!sic)
31867 return -EINVAL;
31868@@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
31869 */
31870 err = -EFAULT;
31871 rq->cmd_len = cmdlen;
31872- if (copy_from_user(rq->cmd, sic->data, cmdlen))
31873+
31874+ if (rq->cmd != rq->__cmd)
31875+ cmdptr = rq->cmd;
31876+ else
31877+ cmdptr = tmpcmd;
31878+
31879+ if (copy_from_user(cmdptr, sic->data, cmdlen))
31880 goto error;
31881
31882+ if (rq->cmd != cmdptr)
31883+ memcpy(rq->cmd, cmdptr, cmdlen);
31884+
31885 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
31886 goto error;
31887
31888diff --git a/crypto/algapi.c b/crypto/algapi.c
31889index 6149a6e..55ed50d 100644
31890--- a/crypto/algapi.c
31891+++ b/crypto/algapi.c
31892@@ -495,7 +495,7 @@ static struct crypto_template *__crypto_lookup_template(const char *name)
31893
31894 struct crypto_template *crypto_lookup_template(const char *name)
31895 {
31896- return try_then_request_module(__crypto_lookup_template(name), name);
31897+ return try_then_request_module(__crypto_lookup_template(name), "%s", name);
31898 }
31899 EXPORT_SYMBOL_GPL(crypto_lookup_template);
31900
31901diff --git a/crypto/cryptd.c b/crypto/cryptd.c
31902index 7bdd61b..afec999 100644
31903--- a/crypto/cryptd.c
31904+++ b/crypto/cryptd.c
31905@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
31906
31907 struct cryptd_blkcipher_request_ctx {
31908 crypto_completion_t complete;
31909-};
31910+} __no_const;
31911
31912 struct cryptd_hash_ctx {
31913 struct crypto_shash *child;
31914@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
31915
31916 struct cryptd_aead_request_ctx {
31917 crypto_completion_t complete;
31918-};
31919+} __no_const;
31920
31921 static void cryptd_queue_worker(struct work_struct *work);
31922
31923diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
31924index b2c99dc..476c9fb 100644
31925--- a/crypto/pcrypt.c
31926+++ b/crypto/pcrypt.c
31927@@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
31928 int ret;
31929
31930 pinst->kobj.kset = pcrypt_kset;
31931- ret = kobject_add(&pinst->kobj, NULL, name);
31932+ ret = kobject_add(&pinst->kobj, NULL, "%s", name);
31933 if (!ret)
31934 kobject_uevent(&pinst->kobj, KOBJ_ADD);
31935
31936@@ -455,8 +455,8 @@ static int pcrypt_init_padata(struct padata_pcrypt *pcrypt,
31937
31938 get_online_cpus();
31939
31940- pcrypt->wq = alloc_workqueue(name,
31941- WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
31942+ pcrypt->wq = alloc_workqueue("%s",
31943+ WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1, name);
31944 if (!pcrypt->wq)
31945 goto err;
31946
31947diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
31948index f220d64..d359ad6 100644
31949--- a/drivers/acpi/apei/apei-internal.h
31950+++ b/drivers/acpi/apei/apei-internal.h
31951@@ -20,7 +20,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
31952 struct apei_exec_ins_type {
31953 u32 flags;
31954 apei_exec_ins_func_t run;
31955-};
31956+} __do_const;
31957
31958 struct apei_exec_context {
31959 u32 ip;
31960diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
31961index fefc2ca..12a535d 100644
31962--- a/drivers/acpi/apei/cper.c
31963+++ b/drivers/acpi/apei/cper.c
31964@@ -39,12 +39,12 @@
31965 */
31966 u64 cper_next_record_id(void)
31967 {
31968- static atomic64_t seq;
31969+ static atomic64_unchecked_t seq;
31970
31971- if (!atomic64_read(&seq))
31972- atomic64_set(&seq, ((u64)get_seconds()) << 32);
31973+ if (!atomic64_read_unchecked(&seq))
31974+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
31975
31976- return atomic64_inc_return(&seq);
31977+ return atomic64_inc_return_unchecked(&seq);
31978 }
31979 EXPORT_SYMBOL_GPL(cper_next_record_id);
31980
31981diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
31982index be60399..778b33e8 100644
31983--- a/drivers/acpi/bgrt.c
31984+++ b/drivers/acpi/bgrt.c
31985@@ -87,8 +87,10 @@ static int __init bgrt_init(void)
31986 return -ENODEV;
31987
31988 sysfs_bin_attr_init(&image_attr);
31989- image_attr.private = bgrt_image;
31990- image_attr.size = bgrt_image_size;
31991+ pax_open_kernel();
31992+ *(void **)&image_attr.private = bgrt_image;
31993+ *(size_t *)&image_attr.size = bgrt_image_size;
31994+ pax_close_kernel();
31995
31996 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
31997 if (!bgrt_kobj)
31998diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
31999index cb96296..b81293b 100644
32000--- a/drivers/acpi/blacklist.c
32001+++ b/drivers/acpi/blacklist.c
32002@@ -52,7 +52,7 @@ struct acpi_blacklist_item {
32003 u32 is_critical_error;
32004 };
32005
32006-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
32007+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
32008
32009 /*
32010 * POLICY: If *anything* doesn't work, put it on the blacklist.
32011@@ -193,7 +193,7 @@ static int __init dmi_disable_osi_win7(const struct dmi_system_id *d)
32012 return 0;
32013 }
32014
32015-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
32016+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
32017 {
32018 .callback = dmi_disable_osi_vista,
32019 .ident = "Fujitsu Siemens",
32020diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
32021index 7586544..636a2f0 100644
32022--- a/drivers/acpi/ec_sys.c
32023+++ b/drivers/acpi/ec_sys.c
32024@@ -12,6 +12,7 @@
32025 #include <linux/acpi.h>
32026 #include <linux/debugfs.h>
32027 #include <linux/module.h>
32028+#include <linux/uaccess.h>
32029 #include "internal.h"
32030
32031 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
32032@@ -34,7 +35,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
32033 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
32034 */
32035 unsigned int size = EC_SPACE_SIZE;
32036- u8 *data = (u8 *) buf;
32037+ u8 data;
32038 loff_t init_off = *off;
32039 int err = 0;
32040
32041@@ -47,9 +48,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
32042 size = count;
32043
32044 while (size) {
32045- err = ec_read(*off, &data[*off - init_off]);
32046+ err = ec_read(*off, &data);
32047 if (err)
32048 return err;
32049+ if (put_user(data, &buf[*off - init_off]))
32050+ return -EFAULT;
32051 *off += 1;
32052 size--;
32053 }
32054@@ -65,7 +68,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
32055
32056 unsigned int size = count;
32057 loff_t init_off = *off;
32058- u8 *data = (u8 *) buf;
32059 int err = 0;
32060
32061 if (*off >= EC_SPACE_SIZE)
32062@@ -76,7 +78,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
32063 }
32064
32065 while (size) {
32066- u8 byte_write = data[*off - init_off];
32067+ u8 byte_write;
32068+ if (get_user(byte_write, &buf[*off - init_off]))
32069+ return -EFAULT;
32070 err = ec_write(*off, byte_write);
32071 if (err)
32072 return err;
32073diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
32074index ee255c6..747c68b 100644
32075--- a/drivers/acpi/processor_idle.c
32076+++ b/drivers/acpi/processor_idle.c
32077@@ -986,7 +986,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
32078 {
32079 int i, count = CPUIDLE_DRIVER_STATE_START;
32080 struct acpi_processor_cx *cx;
32081- struct cpuidle_state *state;
32082+ cpuidle_state_no_const *state;
32083 struct cpuidle_driver *drv = &acpi_idle_driver;
32084
32085 if (!pr->flags.power_setup_done)
32086diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
32087index 41c0504..f8c0836 100644
32088--- a/drivers/acpi/sysfs.c
32089+++ b/drivers/acpi/sysfs.c
32090@@ -420,11 +420,11 @@ static u32 num_counters;
32091 static struct attribute **all_attrs;
32092 static u32 acpi_gpe_count;
32093
32094-static struct attribute_group interrupt_stats_attr_group = {
32095+static attribute_group_no_const interrupt_stats_attr_group = {
32096 .name = "interrupts",
32097 };
32098
32099-static struct kobj_attribute *counter_attrs;
32100+static kobj_attribute_no_const *counter_attrs;
32101
32102 static void delete_gpe_attr_array(void)
32103 {
32104diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
32105index 34c8216..f56c828 100644
32106--- a/drivers/ata/libahci.c
32107+++ b/drivers/ata/libahci.c
32108@@ -1230,7 +1230,7 @@ int ahci_kick_engine(struct ata_port *ap)
32109 }
32110 EXPORT_SYMBOL_GPL(ahci_kick_engine);
32111
32112-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
32113+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
32114 struct ata_taskfile *tf, int is_cmd, u16 flags,
32115 unsigned long timeout_msec)
32116 {
32117diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
32118index cf15aee..e0b7078 100644
32119--- a/drivers/ata/libata-core.c
32120+++ b/drivers/ata/libata-core.c
32121@@ -4792,7 +4792,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
32122 struct ata_port *ap;
32123 unsigned int tag;
32124
32125- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
32126+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
32127 ap = qc->ap;
32128
32129 qc->flags = 0;
32130@@ -4808,7 +4808,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
32131 struct ata_port *ap;
32132 struct ata_link *link;
32133
32134- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
32135+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
32136 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
32137 ap = qc->ap;
32138 link = qc->dev->link;
32139@@ -5926,6 +5926,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
32140 return;
32141
32142 spin_lock(&lock);
32143+ pax_open_kernel();
32144
32145 for (cur = ops->inherits; cur; cur = cur->inherits) {
32146 void **inherit = (void **)cur;
32147@@ -5939,8 +5940,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
32148 if (IS_ERR(*pp))
32149 *pp = NULL;
32150
32151- ops->inherits = NULL;
32152+ *(struct ata_port_operations **)&ops->inherits = NULL;
32153
32154+ pax_close_kernel();
32155 spin_unlock(&lock);
32156 }
32157
32158diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
32159index 405022d..fb70e53 100644
32160--- a/drivers/ata/pata_arasan_cf.c
32161+++ b/drivers/ata/pata_arasan_cf.c
32162@@ -864,7 +864,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
32163 /* Handle platform specific quirks */
32164 if (pdata->quirk) {
32165 if (pdata->quirk & CF_BROKEN_PIO) {
32166- ap->ops->set_piomode = NULL;
32167+ pax_open_kernel();
32168+ *(void **)&ap->ops->set_piomode = NULL;
32169+ pax_close_kernel();
32170 ap->pio_mask = 0;
32171 }
32172 if (pdata->quirk & CF_BROKEN_MWDMA)
32173diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
32174index f9b983a..887b9d8 100644
32175--- a/drivers/atm/adummy.c
32176+++ b/drivers/atm/adummy.c
32177@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
32178 vcc->pop(vcc, skb);
32179 else
32180 dev_kfree_skb_any(skb);
32181- atomic_inc(&vcc->stats->tx);
32182+ atomic_inc_unchecked(&vcc->stats->tx);
32183
32184 return 0;
32185 }
32186diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
32187index 77a7480d..05cde58 100644
32188--- a/drivers/atm/ambassador.c
32189+++ b/drivers/atm/ambassador.c
32190@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
32191 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
32192
32193 // VC layer stats
32194- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
32195+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
32196
32197 // free the descriptor
32198 kfree (tx_descr);
32199@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
32200 dump_skb ("<<<", vc, skb);
32201
32202 // VC layer stats
32203- atomic_inc(&atm_vcc->stats->rx);
32204+ atomic_inc_unchecked(&atm_vcc->stats->rx);
32205 __net_timestamp(skb);
32206 // end of our responsibility
32207 atm_vcc->push (atm_vcc, skb);
32208@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
32209 } else {
32210 PRINTK (KERN_INFO, "dropped over-size frame");
32211 // should we count this?
32212- atomic_inc(&atm_vcc->stats->rx_drop);
32213+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
32214 }
32215
32216 } else {
32217@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
32218 }
32219
32220 if (check_area (skb->data, skb->len)) {
32221- atomic_inc(&atm_vcc->stats->tx_err);
32222+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
32223 return -ENOMEM; // ?
32224 }
32225
32226diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
32227index 0e3f8f9..765a7a5 100644
32228--- a/drivers/atm/atmtcp.c
32229+++ b/drivers/atm/atmtcp.c
32230@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
32231 if (vcc->pop) vcc->pop(vcc,skb);
32232 else dev_kfree_skb(skb);
32233 if (dev_data) return 0;
32234- atomic_inc(&vcc->stats->tx_err);
32235+ atomic_inc_unchecked(&vcc->stats->tx_err);
32236 return -ENOLINK;
32237 }
32238 size = skb->len+sizeof(struct atmtcp_hdr);
32239@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
32240 if (!new_skb) {
32241 if (vcc->pop) vcc->pop(vcc,skb);
32242 else dev_kfree_skb(skb);
32243- atomic_inc(&vcc->stats->tx_err);
32244+ atomic_inc_unchecked(&vcc->stats->tx_err);
32245 return -ENOBUFS;
32246 }
32247 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
32248@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
32249 if (vcc->pop) vcc->pop(vcc,skb);
32250 else dev_kfree_skb(skb);
32251 out_vcc->push(out_vcc,new_skb);
32252- atomic_inc(&vcc->stats->tx);
32253- atomic_inc(&out_vcc->stats->rx);
32254+ atomic_inc_unchecked(&vcc->stats->tx);
32255+ atomic_inc_unchecked(&out_vcc->stats->rx);
32256 return 0;
32257 }
32258
32259@@ -299,7 +299,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
32260 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
32261 read_unlock(&vcc_sklist_lock);
32262 if (!out_vcc) {
32263- atomic_inc(&vcc->stats->tx_err);
32264+ atomic_inc_unchecked(&vcc->stats->tx_err);
32265 goto done;
32266 }
32267 skb_pull(skb,sizeof(struct atmtcp_hdr));
32268@@ -311,8 +311,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
32269 __net_timestamp(new_skb);
32270 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
32271 out_vcc->push(out_vcc,new_skb);
32272- atomic_inc(&vcc->stats->tx);
32273- atomic_inc(&out_vcc->stats->rx);
32274+ atomic_inc_unchecked(&vcc->stats->tx);
32275+ atomic_inc_unchecked(&out_vcc->stats->rx);
32276 done:
32277 if (vcc->pop) vcc->pop(vcc,skb);
32278 else dev_kfree_skb(skb);
32279diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
32280index b1955ba..b179940 100644
32281--- a/drivers/atm/eni.c
32282+++ b/drivers/atm/eni.c
32283@@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
32284 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
32285 vcc->dev->number);
32286 length = 0;
32287- atomic_inc(&vcc->stats->rx_err);
32288+ atomic_inc_unchecked(&vcc->stats->rx_err);
32289 }
32290 else {
32291 length = ATM_CELL_SIZE-1; /* no HEC */
32292@@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
32293 size);
32294 }
32295 eff = length = 0;
32296- atomic_inc(&vcc->stats->rx_err);
32297+ atomic_inc_unchecked(&vcc->stats->rx_err);
32298 }
32299 else {
32300 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
32301@@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
32302 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
32303 vcc->dev->number,vcc->vci,length,size << 2,descr);
32304 length = eff = 0;
32305- atomic_inc(&vcc->stats->rx_err);
32306+ atomic_inc_unchecked(&vcc->stats->rx_err);
32307 }
32308 }
32309 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
32310@@ -767,7 +767,7 @@ rx_dequeued++;
32311 vcc->push(vcc,skb);
32312 pushed++;
32313 }
32314- atomic_inc(&vcc->stats->rx);
32315+ atomic_inc_unchecked(&vcc->stats->rx);
32316 }
32317 wake_up(&eni_dev->rx_wait);
32318 }
32319@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
32320 PCI_DMA_TODEVICE);
32321 if (vcc->pop) vcc->pop(vcc,skb);
32322 else dev_kfree_skb_irq(skb);
32323- atomic_inc(&vcc->stats->tx);
32324+ atomic_inc_unchecked(&vcc->stats->tx);
32325 wake_up(&eni_dev->tx_wait);
32326 dma_complete++;
32327 }
32328diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
32329index b41c948..a002b17 100644
32330--- a/drivers/atm/firestream.c
32331+++ b/drivers/atm/firestream.c
32332@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
32333 }
32334 }
32335
32336- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
32337+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
32338
32339 fs_dprintk (FS_DEBUG_TXMEM, "i");
32340 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
32341@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
32342 #endif
32343 skb_put (skb, qe->p1 & 0xffff);
32344 ATM_SKB(skb)->vcc = atm_vcc;
32345- atomic_inc(&atm_vcc->stats->rx);
32346+ atomic_inc_unchecked(&atm_vcc->stats->rx);
32347 __net_timestamp(skb);
32348 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
32349 atm_vcc->push (atm_vcc, skb);
32350@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
32351 kfree (pe);
32352 }
32353 if (atm_vcc)
32354- atomic_inc(&atm_vcc->stats->rx_drop);
32355+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
32356 break;
32357 case 0x1f: /* Reassembly abort: no buffers. */
32358 /* Silently increment error counter. */
32359 if (atm_vcc)
32360- atomic_inc(&atm_vcc->stats->rx_drop);
32361+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
32362 break;
32363 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
32364 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
32365diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
32366index 204814e..cede831 100644
32367--- a/drivers/atm/fore200e.c
32368+++ b/drivers/atm/fore200e.c
32369@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
32370 #endif
32371 /* check error condition */
32372 if (*entry->status & STATUS_ERROR)
32373- atomic_inc(&vcc->stats->tx_err);
32374+ atomic_inc_unchecked(&vcc->stats->tx_err);
32375 else
32376- atomic_inc(&vcc->stats->tx);
32377+ atomic_inc_unchecked(&vcc->stats->tx);
32378 }
32379 }
32380
32381@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
32382 if (skb == NULL) {
32383 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
32384
32385- atomic_inc(&vcc->stats->rx_drop);
32386+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32387 return -ENOMEM;
32388 }
32389
32390@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
32391
32392 dev_kfree_skb_any(skb);
32393
32394- atomic_inc(&vcc->stats->rx_drop);
32395+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32396 return -ENOMEM;
32397 }
32398
32399 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
32400
32401 vcc->push(vcc, skb);
32402- atomic_inc(&vcc->stats->rx);
32403+ atomic_inc_unchecked(&vcc->stats->rx);
32404
32405 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
32406
32407@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
32408 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
32409 fore200e->atm_dev->number,
32410 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
32411- atomic_inc(&vcc->stats->rx_err);
32412+ atomic_inc_unchecked(&vcc->stats->rx_err);
32413 }
32414 }
32415
32416@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
32417 goto retry_here;
32418 }
32419
32420- atomic_inc(&vcc->stats->tx_err);
32421+ atomic_inc_unchecked(&vcc->stats->tx_err);
32422
32423 fore200e->tx_sat++;
32424 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
32425diff --git a/drivers/atm/he.c b/drivers/atm/he.c
32426index d689126..e78e412 100644
32427--- a/drivers/atm/he.c
32428+++ b/drivers/atm/he.c
32429@@ -1698,7 +1698,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
32430
32431 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
32432 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
32433- atomic_inc(&vcc->stats->rx_drop);
32434+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32435 goto return_host_buffers;
32436 }
32437
32438@@ -1725,7 +1725,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
32439 RBRQ_LEN_ERR(he_dev->rbrq_head)
32440 ? "LEN_ERR" : "",
32441 vcc->vpi, vcc->vci);
32442- atomic_inc(&vcc->stats->rx_err);
32443+ atomic_inc_unchecked(&vcc->stats->rx_err);
32444 goto return_host_buffers;
32445 }
32446
32447@@ -1777,7 +1777,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
32448 vcc->push(vcc, skb);
32449 spin_lock(&he_dev->global_lock);
32450
32451- atomic_inc(&vcc->stats->rx);
32452+ atomic_inc_unchecked(&vcc->stats->rx);
32453
32454 return_host_buffers:
32455 ++pdus_assembled;
32456@@ -2103,7 +2103,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
32457 tpd->vcc->pop(tpd->vcc, tpd->skb);
32458 else
32459 dev_kfree_skb_any(tpd->skb);
32460- atomic_inc(&tpd->vcc->stats->tx_err);
32461+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
32462 }
32463 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
32464 return;
32465@@ -2515,7 +2515,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
32466 vcc->pop(vcc, skb);
32467 else
32468 dev_kfree_skb_any(skb);
32469- atomic_inc(&vcc->stats->tx_err);
32470+ atomic_inc_unchecked(&vcc->stats->tx_err);
32471 return -EINVAL;
32472 }
32473
32474@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
32475 vcc->pop(vcc, skb);
32476 else
32477 dev_kfree_skb_any(skb);
32478- atomic_inc(&vcc->stats->tx_err);
32479+ atomic_inc_unchecked(&vcc->stats->tx_err);
32480 return -EINVAL;
32481 }
32482 #endif
32483@@ -2538,7 +2538,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
32484 vcc->pop(vcc, skb);
32485 else
32486 dev_kfree_skb_any(skb);
32487- atomic_inc(&vcc->stats->tx_err);
32488+ atomic_inc_unchecked(&vcc->stats->tx_err);
32489 spin_unlock_irqrestore(&he_dev->global_lock, flags);
32490 return -ENOMEM;
32491 }
32492@@ -2580,7 +2580,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
32493 vcc->pop(vcc, skb);
32494 else
32495 dev_kfree_skb_any(skb);
32496- atomic_inc(&vcc->stats->tx_err);
32497+ atomic_inc_unchecked(&vcc->stats->tx_err);
32498 spin_unlock_irqrestore(&he_dev->global_lock, flags);
32499 return -ENOMEM;
32500 }
32501@@ -2611,7 +2611,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
32502 __enqueue_tpd(he_dev, tpd, cid);
32503 spin_unlock_irqrestore(&he_dev->global_lock, flags);
32504
32505- atomic_inc(&vcc->stats->tx);
32506+ atomic_inc_unchecked(&vcc->stats->tx);
32507
32508 return 0;
32509 }
32510diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
32511index 1dc0519..1aadaf7 100644
32512--- a/drivers/atm/horizon.c
32513+++ b/drivers/atm/horizon.c
32514@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
32515 {
32516 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
32517 // VC layer stats
32518- atomic_inc(&vcc->stats->rx);
32519+ atomic_inc_unchecked(&vcc->stats->rx);
32520 __net_timestamp(skb);
32521 // end of our responsibility
32522 vcc->push (vcc, skb);
32523@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
32524 dev->tx_iovec = NULL;
32525
32526 // VC layer stats
32527- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
32528+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
32529
32530 // free the skb
32531 hrz_kfree_skb (skb);
32532diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
32533index 272f009..a18ba55 100644
32534--- a/drivers/atm/idt77252.c
32535+++ b/drivers/atm/idt77252.c
32536@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
32537 else
32538 dev_kfree_skb(skb);
32539
32540- atomic_inc(&vcc->stats->tx);
32541+ atomic_inc_unchecked(&vcc->stats->tx);
32542 }
32543
32544 atomic_dec(&scq->used);
32545@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32546 if ((sb = dev_alloc_skb(64)) == NULL) {
32547 printk("%s: Can't allocate buffers for aal0.\n",
32548 card->name);
32549- atomic_add(i, &vcc->stats->rx_drop);
32550+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
32551 break;
32552 }
32553 if (!atm_charge(vcc, sb->truesize)) {
32554 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
32555 card->name);
32556- atomic_add(i - 1, &vcc->stats->rx_drop);
32557+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
32558 dev_kfree_skb(sb);
32559 break;
32560 }
32561@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32562 ATM_SKB(sb)->vcc = vcc;
32563 __net_timestamp(sb);
32564 vcc->push(vcc, sb);
32565- atomic_inc(&vcc->stats->rx);
32566+ atomic_inc_unchecked(&vcc->stats->rx);
32567
32568 cell += ATM_CELL_PAYLOAD;
32569 }
32570@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32571 "(CDC: %08x)\n",
32572 card->name, len, rpp->len, readl(SAR_REG_CDC));
32573 recycle_rx_pool_skb(card, rpp);
32574- atomic_inc(&vcc->stats->rx_err);
32575+ atomic_inc_unchecked(&vcc->stats->rx_err);
32576 return;
32577 }
32578 if (stat & SAR_RSQE_CRC) {
32579 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
32580 recycle_rx_pool_skb(card, rpp);
32581- atomic_inc(&vcc->stats->rx_err);
32582+ atomic_inc_unchecked(&vcc->stats->rx_err);
32583 return;
32584 }
32585 if (skb_queue_len(&rpp->queue) > 1) {
32586@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32587 RXPRINTK("%s: Can't alloc RX skb.\n",
32588 card->name);
32589 recycle_rx_pool_skb(card, rpp);
32590- atomic_inc(&vcc->stats->rx_err);
32591+ atomic_inc_unchecked(&vcc->stats->rx_err);
32592 return;
32593 }
32594 if (!atm_charge(vcc, skb->truesize)) {
32595@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32596 __net_timestamp(skb);
32597
32598 vcc->push(vcc, skb);
32599- atomic_inc(&vcc->stats->rx);
32600+ atomic_inc_unchecked(&vcc->stats->rx);
32601
32602 return;
32603 }
32604@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32605 __net_timestamp(skb);
32606
32607 vcc->push(vcc, skb);
32608- atomic_inc(&vcc->stats->rx);
32609+ atomic_inc_unchecked(&vcc->stats->rx);
32610
32611 if (skb->truesize > SAR_FB_SIZE_3)
32612 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
32613@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
32614 if (vcc->qos.aal != ATM_AAL0) {
32615 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
32616 card->name, vpi, vci);
32617- atomic_inc(&vcc->stats->rx_drop);
32618+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32619 goto drop;
32620 }
32621
32622 if ((sb = dev_alloc_skb(64)) == NULL) {
32623 printk("%s: Can't allocate buffers for AAL0.\n",
32624 card->name);
32625- atomic_inc(&vcc->stats->rx_err);
32626+ atomic_inc_unchecked(&vcc->stats->rx_err);
32627 goto drop;
32628 }
32629
32630@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
32631 ATM_SKB(sb)->vcc = vcc;
32632 __net_timestamp(sb);
32633 vcc->push(vcc, sb);
32634- atomic_inc(&vcc->stats->rx);
32635+ atomic_inc_unchecked(&vcc->stats->rx);
32636
32637 drop:
32638 skb_pull(queue, 64);
32639@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
32640
32641 if (vc == NULL) {
32642 printk("%s: NULL connection in send().\n", card->name);
32643- atomic_inc(&vcc->stats->tx_err);
32644+ atomic_inc_unchecked(&vcc->stats->tx_err);
32645 dev_kfree_skb(skb);
32646 return -EINVAL;
32647 }
32648 if (!test_bit(VCF_TX, &vc->flags)) {
32649 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
32650- atomic_inc(&vcc->stats->tx_err);
32651+ atomic_inc_unchecked(&vcc->stats->tx_err);
32652 dev_kfree_skb(skb);
32653 return -EINVAL;
32654 }
32655@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
32656 break;
32657 default:
32658 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
32659- atomic_inc(&vcc->stats->tx_err);
32660+ atomic_inc_unchecked(&vcc->stats->tx_err);
32661 dev_kfree_skb(skb);
32662 return -EINVAL;
32663 }
32664
32665 if (skb_shinfo(skb)->nr_frags != 0) {
32666 printk("%s: No scatter-gather yet.\n", card->name);
32667- atomic_inc(&vcc->stats->tx_err);
32668+ atomic_inc_unchecked(&vcc->stats->tx_err);
32669 dev_kfree_skb(skb);
32670 return -EINVAL;
32671 }
32672@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
32673
32674 err = queue_skb(card, vc, skb, oam);
32675 if (err) {
32676- atomic_inc(&vcc->stats->tx_err);
32677+ atomic_inc_unchecked(&vcc->stats->tx_err);
32678 dev_kfree_skb(skb);
32679 return err;
32680 }
32681@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
32682 skb = dev_alloc_skb(64);
32683 if (!skb) {
32684 printk("%s: Out of memory in send_oam().\n", card->name);
32685- atomic_inc(&vcc->stats->tx_err);
32686+ atomic_inc_unchecked(&vcc->stats->tx_err);
32687 return -ENOMEM;
32688 }
32689 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
32690diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
32691index 4217f29..88f547a 100644
32692--- a/drivers/atm/iphase.c
32693+++ b/drivers/atm/iphase.c
32694@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
32695 status = (u_short) (buf_desc_ptr->desc_mode);
32696 if (status & (RX_CER | RX_PTE | RX_OFL))
32697 {
32698- atomic_inc(&vcc->stats->rx_err);
32699+ atomic_inc_unchecked(&vcc->stats->rx_err);
32700 IF_ERR(printk("IA: bad packet, dropping it");)
32701 if (status & RX_CER) {
32702 IF_ERR(printk(" cause: packet CRC error\n");)
32703@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
32704 len = dma_addr - buf_addr;
32705 if (len > iadev->rx_buf_sz) {
32706 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
32707- atomic_inc(&vcc->stats->rx_err);
32708+ atomic_inc_unchecked(&vcc->stats->rx_err);
32709 goto out_free_desc;
32710 }
32711
32712@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
32713 ia_vcc = INPH_IA_VCC(vcc);
32714 if (ia_vcc == NULL)
32715 {
32716- atomic_inc(&vcc->stats->rx_err);
32717+ atomic_inc_unchecked(&vcc->stats->rx_err);
32718 atm_return(vcc, skb->truesize);
32719 dev_kfree_skb_any(skb);
32720 goto INCR_DLE;
32721@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
32722 if ((length > iadev->rx_buf_sz) || (length >
32723 (skb->len - sizeof(struct cpcs_trailer))))
32724 {
32725- atomic_inc(&vcc->stats->rx_err);
32726+ atomic_inc_unchecked(&vcc->stats->rx_err);
32727 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
32728 length, skb->len);)
32729 atm_return(vcc, skb->truesize);
32730@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
32731
32732 IF_RX(printk("rx_dle_intr: skb push");)
32733 vcc->push(vcc,skb);
32734- atomic_inc(&vcc->stats->rx);
32735+ atomic_inc_unchecked(&vcc->stats->rx);
32736 iadev->rx_pkt_cnt++;
32737 }
32738 INCR_DLE:
32739@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
32740 {
32741 struct k_sonet_stats *stats;
32742 stats = &PRIV(_ia_dev[board])->sonet_stats;
32743- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
32744- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
32745- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
32746- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
32747- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
32748- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
32749- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
32750- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
32751- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
32752+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
32753+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
32754+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
32755+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
32756+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
32757+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
32758+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
32759+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
32760+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
32761 }
32762 ia_cmds.status = 0;
32763 break;
32764@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
32765 if ((desc == 0) || (desc > iadev->num_tx_desc))
32766 {
32767 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
32768- atomic_inc(&vcc->stats->tx);
32769+ atomic_inc_unchecked(&vcc->stats->tx);
32770 if (vcc->pop)
32771 vcc->pop(vcc, skb);
32772 else
32773@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
32774 ATM_DESC(skb) = vcc->vci;
32775 skb_queue_tail(&iadev->tx_dma_q, skb);
32776
32777- atomic_inc(&vcc->stats->tx);
32778+ atomic_inc_unchecked(&vcc->stats->tx);
32779 iadev->tx_pkt_cnt++;
32780 /* Increment transaction counter */
32781 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
32782
32783 #if 0
32784 /* add flow control logic */
32785- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
32786+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
32787 if (iavcc->vc_desc_cnt > 10) {
32788 vcc->tx_quota = vcc->tx_quota * 3 / 4;
32789 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
32790diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
32791index fa7d701..1e404c7 100644
32792--- a/drivers/atm/lanai.c
32793+++ b/drivers/atm/lanai.c
32794@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
32795 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
32796 lanai_endtx(lanai, lvcc);
32797 lanai_free_skb(lvcc->tx.atmvcc, skb);
32798- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
32799+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
32800 }
32801
32802 /* Try to fill the buffer - don't call unless there is backlog */
32803@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
32804 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
32805 __net_timestamp(skb);
32806 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
32807- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
32808+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
32809 out:
32810 lvcc->rx.buf.ptr = end;
32811 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
32812@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
32813 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
32814 "vcc %d\n", lanai->number, (unsigned int) s, vci);
32815 lanai->stats.service_rxnotaal5++;
32816- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
32817+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
32818 return 0;
32819 }
32820 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
32821@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
32822 int bytes;
32823 read_unlock(&vcc_sklist_lock);
32824 DPRINTK("got trashed rx pdu on vci %d\n", vci);
32825- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
32826+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
32827 lvcc->stats.x.aal5.service_trash++;
32828 bytes = (SERVICE_GET_END(s) * 16) -
32829 (((unsigned long) lvcc->rx.buf.ptr) -
32830@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
32831 }
32832 if (s & SERVICE_STREAM) {
32833 read_unlock(&vcc_sklist_lock);
32834- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
32835+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
32836 lvcc->stats.x.aal5.service_stream++;
32837 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
32838 "PDU on VCI %d!\n", lanai->number, vci);
32839@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
32840 return 0;
32841 }
32842 DPRINTK("got rx crc error on vci %d\n", vci);
32843- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
32844+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
32845 lvcc->stats.x.aal5.service_rxcrc++;
32846 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
32847 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
32848diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
32849index 6587dc2..149833d 100644
32850--- a/drivers/atm/nicstar.c
32851+++ b/drivers/atm/nicstar.c
32852@@ -1641,7 +1641,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
32853 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
32854 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
32855 card->index);
32856- atomic_inc(&vcc->stats->tx_err);
32857+ atomic_inc_unchecked(&vcc->stats->tx_err);
32858 dev_kfree_skb_any(skb);
32859 return -EINVAL;
32860 }
32861@@ -1649,7 +1649,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
32862 if (!vc->tx) {
32863 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
32864 card->index);
32865- atomic_inc(&vcc->stats->tx_err);
32866+ atomic_inc_unchecked(&vcc->stats->tx_err);
32867 dev_kfree_skb_any(skb);
32868 return -EINVAL;
32869 }
32870@@ -1657,14 +1657,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
32871 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
32872 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
32873 card->index);
32874- atomic_inc(&vcc->stats->tx_err);
32875+ atomic_inc_unchecked(&vcc->stats->tx_err);
32876 dev_kfree_skb_any(skb);
32877 return -EINVAL;
32878 }
32879
32880 if (skb_shinfo(skb)->nr_frags != 0) {
32881 printk("nicstar%d: No scatter-gather yet.\n", card->index);
32882- atomic_inc(&vcc->stats->tx_err);
32883+ atomic_inc_unchecked(&vcc->stats->tx_err);
32884 dev_kfree_skb_any(skb);
32885 return -EINVAL;
32886 }
32887@@ -1712,11 +1712,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
32888 }
32889
32890 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
32891- atomic_inc(&vcc->stats->tx_err);
32892+ atomic_inc_unchecked(&vcc->stats->tx_err);
32893 dev_kfree_skb_any(skb);
32894 return -EIO;
32895 }
32896- atomic_inc(&vcc->stats->tx);
32897+ atomic_inc_unchecked(&vcc->stats->tx);
32898
32899 return 0;
32900 }
32901@@ -2033,14 +2033,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32902 printk
32903 ("nicstar%d: Can't allocate buffers for aal0.\n",
32904 card->index);
32905- atomic_add(i, &vcc->stats->rx_drop);
32906+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
32907 break;
32908 }
32909 if (!atm_charge(vcc, sb->truesize)) {
32910 RXPRINTK
32911 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
32912 card->index);
32913- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
32914+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
32915 dev_kfree_skb_any(sb);
32916 break;
32917 }
32918@@ -2055,7 +2055,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32919 ATM_SKB(sb)->vcc = vcc;
32920 __net_timestamp(sb);
32921 vcc->push(vcc, sb);
32922- atomic_inc(&vcc->stats->rx);
32923+ atomic_inc_unchecked(&vcc->stats->rx);
32924 cell += ATM_CELL_PAYLOAD;
32925 }
32926
32927@@ -2072,7 +2072,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32928 if (iovb == NULL) {
32929 printk("nicstar%d: Out of iovec buffers.\n",
32930 card->index);
32931- atomic_inc(&vcc->stats->rx_drop);
32932+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32933 recycle_rx_buf(card, skb);
32934 return;
32935 }
32936@@ -2096,7 +2096,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32937 small or large buffer itself. */
32938 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
32939 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
32940- atomic_inc(&vcc->stats->rx_err);
32941+ atomic_inc_unchecked(&vcc->stats->rx_err);
32942 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
32943 NS_MAX_IOVECS);
32944 NS_PRV_IOVCNT(iovb) = 0;
32945@@ -2116,7 +2116,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32946 ("nicstar%d: Expected a small buffer, and this is not one.\n",
32947 card->index);
32948 which_list(card, skb);
32949- atomic_inc(&vcc->stats->rx_err);
32950+ atomic_inc_unchecked(&vcc->stats->rx_err);
32951 recycle_rx_buf(card, skb);
32952 vc->rx_iov = NULL;
32953 recycle_iov_buf(card, iovb);
32954@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32955 ("nicstar%d: Expected a large buffer, and this is not one.\n",
32956 card->index);
32957 which_list(card, skb);
32958- atomic_inc(&vcc->stats->rx_err);
32959+ atomic_inc_unchecked(&vcc->stats->rx_err);
32960 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
32961 NS_PRV_IOVCNT(iovb));
32962 vc->rx_iov = NULL;
32963@@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32964 printk(" - PDU size mismatch.\n");
32965 else
32966 printk(".\n");
32967- atomic_inc(&vcc->stats->rx_err);
32968+ atomic_inc_unchecked(&vcc->stats->rx_err);
32969 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
32970 NS_PRV_IOVCNT(iovb));
32971 vc->rx_iov = NULL;
32972@@ -2166,7 +2166,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32973 /* skb points to a small buffer */
32974 if (!atm_charge(vcc, skb->truesize)) {
32975 push_rxbufs(card, skb);
32976- atomic_inc(&vcc->stats->rx_drop);
32977+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32978 } else {
32979 skb_put(skb, len);
32980 dequeue_sm_buf(card, skb);
32981@@ -2176,7 +2176,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32982 ATM_SKB(skb)->vcc = vcc;
32983 __net_timestamp(skb);
32984 vcc->push(vcc, skb);
32985- atomic_inc(&vcc->stats->rx);
32986+ atomic_inc_unchecked(&vcc->stats->rx);
32987 }
32988 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
32989 struct sk_buff *sb;
32990@@ -2187,7 +2187,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32991 if (len <= NS_SMBUFSIZE) {
32992 if (!atm_charge(vcc, sb->truesize)) {
32993 push_rxbufs(card, sb);
32994- atomic_inc(&vcc->stats->rx_drop);
32995+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32996 } else {
32997 skb_put(sb, len);
32998 dequeue_sm_buf(card, sb);
32999@@ -2197,7 +2197,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33000 ATM_SKB(sb)->vcc = vcc;
33001 __net_timestamp(sb);
33002 vcc->push(vcc, sb);
33003- atomic_inc(&vcc->stats->rx);
33004+ atomic_inc_unchecked(&vcc->stats->rx);
33005 }
33006
33007 push_rxbufs(card, skb);
33008@@ -2206,7 +2206,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33009
33010 if (!atm_charge(vcc, skb->truesize)) {
33011 push_rxbufs(card, skb);
33012- atomic_inc(&vcc->stats->rx_drop);
33013+ atomic_inc_unchecked(&vcc->stats->rx_drop);
33014 } else {
33015 dequeue_lg_buf(card, skb);
33016 #ifdef NS_USE_DESTRUCTORS
33017@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33018 ATM_SKB(skb)->vcc = vcc;
33019 __net_timestamp(skb);
33020 vcc->push(vcc, skb);
33021- atomic_inc(&vcc->stats->rx);
33022+ atomic_inc_unchecked(&vcc->stats->rx);
33023 }
33024
33025 push_rxbufs(card, sb);
33026@@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33027 printk
33028 ("nicstar%d: Out of huge buffers.\n",
33029 card->index);
33030- atomic_inc(&vcc->stats->rx_drop);
33031+ atomic_inc_unchecked(&vcc->stats->rx_drop);
33032 recycle_iovec_rx_bufs(card,
33033 (struct iovec *)
33034 iovb->data,
33035@@ -2291,7 +2291,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33036 card->hbpool.count++;
33037 } else
33038 dev_kfree_skb_any(hb);
33039- atomic_inc(&vcc->stats->rx_drop);
33040+ atomic_inc_unchecked(&vcc->stats->rx_drop);
33041 } else {
33042 /* Copy the small buffer to the huge buffer */
33043 sb = (struct sk_buff *)iov->iov_base;
33044@@ -2328,7 +2328,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33045 #endif /* NS_USE_DESTRUCTORS */
33046 __net_timestamp(hb);
33047 vcc->push(vcc, hb);
33048- atomic_inc(&vcc->stats->rx);
33049+ atomic_inc_unchecked(&vcc->stats->rx);
33050 }
33051 }
33052
33053diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
33054index 32784d1..4a8434a 100644
33055--- a/drivers/atm/solos-pci.c
33056+++ b/drivers/atm/solos-pci.c
33057@@ -838,7 +838,7 @@ void solos_bh(unsigned long card_arg)
33058 }
33059 atm_charge(vcc, skb->truesize);
33060 vcc->push(vcc, skb);
33061- atomic_inc(&vcc->stats->rx);
33062+ atomic_inc_unchecked(&vcc->stats->rx);
33063 break;
33064
33065 case PKT_STATUS:
33066@@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card)
33067 vcc = SKB_CB(oldskb)->vcc;
33068
33069 if (vcc) {
33070- atomic_inc(&vcc->stats->tx);
33071+ atomic_inc_unchecked(&vcc->stats->tx);
33072 solos_pop(vcc, oldskb);
33073 } else {
33074 dev_kfree_skb_irq(oldskb);
33075diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
33076index 0215934..ce9f5b1 100644
33077--- a/drivers/atm/suni.c
33078+++ b/drivers/atm/suni.c
33079@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
33080
33081
33082 #define ADD_LIMITED(s,v) \
33083- atomic_add((v),&stats->s); \
33084- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
33085+ atomic_add_unchecked((v),&stats->s); \
33086+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
33087
33088
33089 static void suni_hz(unsigned long from_timer)
33090diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
33091index 5120a96..e2572bd 100644
33092--- a/drivers/atm/uPD98402.c
33093+++ b/drivers/atm/uPD98402.c
33094@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
33095 struct sonet_stats tmp;
33096 int error = 0;
33097
33098- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
33099+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
33100 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
33101 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
33102 if (zero && !error) {
33103@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
33104
33105
33106 #define ADD_LIMITED(s,v) \
33107- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
33108- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
33109- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
33110+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
33111+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
33112+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
33113
33114
33115 static void stat_event(struct atm_dev *dev)
33116@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
33117 if (reason & uPD98402_INT_PFM) stat_event(dev);
33118 if (reason & uPD98402_INT_PCO) {
33119 (void) GET(PCOCR); /* clear interrupt cause */
33120- atomic_add(GET(HECCT),
33121+ atomic_add_unchecked(GET(HECCT),
33122 &PRIV(dev)->sonet_stats.uncorr_hcs);
33123 }
33124 if ((reason & uPD98402_INT_RFO) &&
33125@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
33126 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
33127 uPD98402_INT_LOS),PIMR); /* enable them */
33128 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
33129- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
33130- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
33131- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
33132+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
33133+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
33134+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
33135 return 0;
33136 }
33137
33138diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
33139index 969c3c2..9b72956 100644
33140--- a/drivers/atm/zatm.c
33141+++ b/drivers/atm/zatm.c
33142@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
33143 }
33144 if (!size) {
33145 dev_kfree_skb_irq(skb);
33146- if (vcc) atomic_inc(&vcc->stats->rx_err);
33147+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
33148 continue;
33149 }
33150 if (!atm_charge(vcc,skb->truesize)) {
33151@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
33152 skb->len = size;
33153 ATM_SKB(skb)->vcc = vcc;
33154 vcc->push(vcc,skb);
33155- atomic_inc(&vcc->stats->rx);
33156+ atomic_inc_unchecked(&vcc->stats->rx);
33157 }
33158 zout(pos & 0xffff,MTA(mbx));
33159 #if 0 /* probably a stupid idea */
33160@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
33161 skb_queue_head(&zatm_vcc->backlog,skb);
33162 break;
33163 }
33164- atomic_inc(&vcc->stats->tx);
33165+ atomic_inc_unchecked(&vcc->stats->tx);
33166 wake_up(&zatm_vcc->tx_wait);
33167 }
33168
33169diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
33170index d78b204..ecc1929 100644
33171--- a/drivers/base/attribute_container.c
33172+++ b/drivers/base/attribute_container.c
33173@@ -167,7 +167,7 @@ attribute_container_add_device(struct device *dev,
33174 ic->classdev.parent = get_device(dev);
33175 ic->classdev.class = cont->class;
33176 cont->class->dev_release = attribute_container_release;
33177- dev_set_name(&ic->classdev, dev_name(dev));
33178+ dev_set_name(&ic->classdev, "%s", dev_name(dev));
33179 if (fn)
33180 fn(cont, dev, &ic->classdev);
33181 else
33182diff --git a/drivers/base/bus.c b/drivers/base/bus.c
33183index 519865b..e540db3 100644
33184--- a/drivers/base/bus.c
33185+++ b/drivers/base/bus.c
33186@@ -1163,7 +1163,7 @@ int subsys_interface_register(struct subsys_interface *sif)
33187 return -EINVAL;
33188
33189 mutex_lock(&subsys->p->mutex);
33190- list_add_tail(&sif->node, &subsys->p->interfaces);
33191+ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
33192 if (sif->add_dev) {
33193 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
33194 while ((dev = subsys_dev_iter_next(&iter)))
33195@@ -1188,7 +1188,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
33196 subsys = sif->subsys;
33197
33198 mutex_lock(&subsys->p->mutex);
33199- list_del_init(&sif->node);
33200+ pax_list_del_init((struct list_head *)&sif->node);
33201 if (sif->remove_dev) {
33202 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
33203 while ((dev = subsys_dev_iter_next(&iter)))
33204diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
33205index 01fc5b0..917801f 100644
33206--- a/drivers/base/devtmpfs.c
33207+++ b/drivers/base/devtmpfs.c
33208@@ -348,7 +348,7 @@ int devtmpfs_mount(const char *mntdir)
33209 if (!thread)
33210 return 0;
33211
33212- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
33213+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
33214 if (err)
33215 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
33216 else
33217@@ -373,11 +373,11 @@ static int devtmpfsd(void *p)
33218 *err = sys_unshare(CLONE_NEWNS);
33219 if (*err)
33220 goto out;
33221- *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
33222+ *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
33223 if (*err)
33224 goto out;
33225- sys_chdir("/.."); /* will traverse into overmounted root */
33226- sys_chroot(".");
33227+ sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
33228+ sys_chroot((char __force_user *)".");
33229 complete(&setup_done);
33230 while (1) {
33231 spin_lock(&req_lock);
33232diff --git a/drivers/base/node.c b/drivers/base/node.c
33233index fac124a..66bd4ab 100644
33234--- a/drivers/base/node.c
33235+++ b/drivers/base/node.c
33236@@ -625,7 +625,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
33237 struct node_attr {
33238 struct device_attribute attr;
33239 enum node_states state;
33240-};
33241+} __do_const;
33242
33243 static ssize_t show_node_state(struct device *dev,
33244 struct device_attribute *attr, char *buf)
33245diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
33246index 9a6b05a..2fc8fb9 100644
33247--- a/drivers/base/power/domain.c
33248+++ b/drivers/base/power/domain.c
33249@@ -1850,7 +1850,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
33250 {
33251 struct cpuidle_driver *cpuidle_drv;
33252 struct gpd_cpu_data *cpu_data;
33253- struct cpuidle_state *idle_state;
33254+ cpuidle_state_no_const *idle_state;
33255 int ret = 0;
33256
33257 if (IS_ERR_OR_NULL(genpd) || state < 0)
33258@@ -1918,7 +1918,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
33259 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
33260 {
33261 struct gpd_cpu_data *cpu_data;
33262- struct cpuidle_state *idle_state;
33263+ cpuidle_state_no_const *idle_state;
33264 int ret = 0;
33265
33266 if (IS_ERR_OR_NULL(genpd))
33267diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
33268index a53ebd2..8f73eeb 100644
33269--- a/drivers/base/power/sysfs.c
33270+++ b/drivers/base/power/sysfs.c
33271@@ -185,7 +185,7 @@ static ssize_t rtpm_status_show(struct device *dev,
33272 return -EIO;
33273 }
33274 }
33275- return sprintf(buf, p);
33276+ return sprintf(buf, "%s", p);
33277 }
33278
33279 static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
33280diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
33281index 79715e7..df06b3b 100644
33282--- a/drivers/base/power/wakeup.c
33283+++ b/drivers/base/power/wakeup.c
33284@@ -29,14 +29,14 @@ bool events_check_enabled __read_mostly;
33285 * They need to be modified together atomically, so it's better to use one
33286 * atomic variable to hold them both.
33287 */
33288-static atomic_t combined_event_count = ATOMIC_INIT(0);
33289+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
33290
33291 #define IN_PROGRESS_BITS (sizeof(int) * 4)
33292 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
33293
33294 static void split_counters(unsigned int *cnt, unsigned int *inpr)
33295 {
33296- unsigned int comb = atomic_read(&combined_event_count);
33297+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
33298
33299 *cnt = (comb >> IN_PROGRESS_BITS);
33300 *inpr = comb & MAX_IN_PROGRESS;
33301@@ -395,7 +395,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
33302 ws->start_prevent_time = ws->last_time;
33303
33304 /* Increment the counter of events in progress. */
33305- cec = atomic_inc_return(&combined_event_count);
33306+ cec = atomic_inc_return_unchecked(&combined_event_count);
33307
33308 trace_wakeup_source_activate(ws->name, cec);
33309 }
33310@@ -521,7 +521,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
33311 * Increment the counter of registered wakeup events and decrement the
33312 * couter of wakeup events in progress simultaneously.
33313 */
33314- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
33315+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
33316 trace_wakeup_source_deactivate(ws->name, cec);
33317
33318 split_counters(&cnt, &inpr);
33319diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
33320index e8d11b6..7b1b36f 100644
33321--- a/drivers/base/syscore.c
33322+++ b/drivers/base/syscore.c
33323@@ -21,7 +21,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
33324 void register_syscore_ops(struct syscore_ops *ops)
33325 {
33326 mutex_lock(&syscore_ops_lock);
33327- list_add_tail(&ops->node, &syscore_ops_list);
33328+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
33329 mutex_unlock(&syscore_ops_lock);
33330 }
33331 EXPORT_SYMBOL_GPL(register_syscore_ops);
33332@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
33333 void unregister_syscore_ops(struct syscore_ops *ops)
33334 {
33335 mutex_lock(&syscore_ops_lock);
33336- list_del(&ops->node);
33337+ pax_list_del((struct list_head *)&ops->node);
33338 mutex_unlock(&syscore_ops_lock);
33339 }
33340 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
33341diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
33342index dadea48..a1f3835 100644
33343--- a/drivers/block/cciss.c
33344+++ b/drivers/block/cciss.c
33345@@ -1184,6 +1184,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
33346 int err;
33347 u32 cp;
33348
33349+ memset(&arg64, 0, sizeof(arg64));
33350+
33351 err = 0;
33352 err |=
33353 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
33354@@ -3005,7 +3007,7 @@ static void start_io(ctlr_info_t *h)
33355 while (!list_empty(&h->reqQ)) {
33356 c = list_entry(h->reqQ.next, CommandList_struct, list);
33357 /* can't do anything if fifo is full */
33358- if ((h->access.fifo_full(h))) {
33359+ if ((h->access->fifo_full(h))) {
33360 dev_warn(&h->pdev->dev, "fifo full\n");
33361 break;
33362 }
33363@@ -3015,7 +3017,7 @@ static void start_io(ctlr_info_t *h)
33364 h->Qdepth--;
33365
33366 /* Tell the controller execute command */
33367- h->access.submit_command(h, c);
33368+ h->access->submit_command(h, c);
33369
33370 /* Put job onto the completed Q */
33371 addQ(&h->cmpQ, c);
33372@@ -3441,17 +3443,17 @@ startio:
33373
33374 static inline unsigned long get_next_completion(ctlr_info_t *h)
33375 {
33376- return h->access.command_completed(h);
33377+ return h->access->command_completed(h);
33378 }
33379
33380 static inline int interrupt_pending(ctlr_info_t *h)
33381 {
33382- return h->access.intr_pending(h);
33383+ return h->access->intr_pending(h);
33384 }
33385
33386 static inline long interrupt_not_for_us(ctlr_info_t *h)
33387 {
33388- return ((h->access.intr_pending(h) == 0) ||
33389+ return ((h->access->intr_pending(h) == 0) ||
33390 (h->interrupts_enabled == 0));
33391 }
33392
33393@@ -3484,7 +3486,7 @@ static inline u32 next_command(ctlr_info_t *h)
33394 u32 a;
33395
33396 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
33397- return h->access.command_completed(h);
33398+ return h->access->command_completed(h);
33399
33400 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
33401 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
33402@@ -4041,7 +4043,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
33403 trans_support & CFGTBL_Trans_use_short_tags);
33404
33405 /* Change the access methods to the performant access methods */
33406- h->access = SA5_performant_access;
33407+ h->access = &SA5_performant_access;
33408 h->transMethod = CFGTBL_Trans_Performant;
33409
33410 return;
33411@@ -4310,7 +4312,7 @@ static int cciss_pci_init(ctlr_info_t *h)
33412 if (prod_index < 0)
33413 return -ENODEV;
33414 h->product_name = products[prod_index].product_name;
33415- h->access = *(products[prod_index].access);
33416+ h->access = products[prod_index].access;
33417
33418 if (cciss_board_disabled(h)) {
33419 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
33420@@ -5032,7 +5034,7 @@ reinit_after_soft_reset:
33421 }
33422
33423 /* make sure the board interrupts are off */
33424- h->access.set_intr_mask(h, CCISS_INTR_OFF);
33425+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
33426 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
33427 if (rc)
33428 goto clean2;
33429@@ -5082,7 +5084,7 @@ reinit_after_soft_reset:
33430 * fake ones to scoop up any residual completions.
33431 */
33432 spin_lock_irqsave(&h->lock, flags);
33433- h->access.set_intr_mask(h, CCISS_INTR_OFF);
33434+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
33435 spin_unlock_irqrestore(&h->lock, flags);
33436 free_irq(h->intr[h->intr_mode], h);
33437 rc = cciss_request_irq(h, cciss_msix_discard_completions,
33438@@ -5102,9 +5104,9 @@ reinit_after_soft_reset:
33439 dev_info(&h->pdev->dev, "Board READY.\n");
33440 dev_info(&h->pdev->dev,
33441 "Waiting for stale completions to drain.\n");
33442- h->access.set_intr_mask(h, CCISS_INTR_ON);
33443+ h->access->set_intr_mask(h, CCISS_INTR_ON);
33444 msleep(10000);
33445- h->access.set_intr_mask(h, CCISS_INTR_OFF);
33446+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
33447
33448 rc = controller_reset_failed(h->cfgtable);
33449 if (rc)
33450@@ -5127,7 +5129,7 @@ reinit_after_soft_reset:
33451 cciss_scsi_setup(h);
33452
33453 /* Turn the interrupts on so we can service requests */
33454- h->access.set_intr_mask(h, CCISS_INTR_ON);
33455+ h->access->set_intr_mask(h, CCISS_INTR_ON);
33456
33457 /* Get the firmware version */
33458 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
33459@@ -5199,7 +5201,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
33460 kfree(flush_buf);
33461 if (return_code != IO_OK)
33462 dev_warn(&h->pdev->dev, "Error flushing cache\n");
33463- h->access.set_intr_mask(h, CCISS_INTR_OFF);
33464+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
33465 free_irq(h->intr[h->intr_mode], h);
33466 }
33467
33468diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
33469index 7fda30e..eb5dfe0 100644
33470--- a/drivers/block/cciss.h
33471+++ b/drivers/block/cciss.h
33472@@ -101,7 +101,7 @@ struct ctlr_info
33473 /* information about each logical volume */
33474 drive_info_struct *drv[CISS_MAX_LUN];
33475
33476- struct access_method access;
33477+ struct access_method *access;
33478
33479 /* queue and queue Info */
33480 struct list_head reqQ;
33481diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
33482index 3f08713..87d4b4a 100644
33483--- a/drivers/block/cpqarray.c
33484+++ b/drivers/block/cpqarray.c
33485@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
33486 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
33487 goto Enomem4;
33488 }
33489- hba[i]->access.set_intr_mask(hba[i], 0);
33490+ hba[i]->access->set_intr_mask(hba[i], 0);
33491 if (request_irq(hba[i]->intr, do_ida_intr,
33492 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
33493 {
33494@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
33495 add_timer(&hba[i]->timer);
33496
33497 /* Enable IRQ now that spinlock and rate limit timer are set up */
33498- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
33499+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
33500
33501 for(j=0; j<NWD; j++) {
33502 struct gendisk *disk = ida_gendisk[i][j];
33503@@ -694,7 +694,7 @@ DBGINFO(
33504 for(i=0; i<NR_PRODUCTS; i++) {
33505 if (board_id == products[i].board_id) {
33506 c->product_name = products[i].product_name;
33507- c->access = *(products[i].access);
33508+ c->access = products[i].access;
33509 break;
33510 }
33511 }
33512@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
33513 hba[ctlr]->intr = intr;
33514 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
33515 hba[ctlr]->product_name = products[j].product_name;
33516- hba[ctlr]->access = *(products[j].access);
33517+ hba[ctlr]->access = products[j].access;
33518 hba[ctlr]->ctlr = ctlr;
33519 hba[ctlr]->board_id = board_id;
33520 hba[ctlr]->pci_dev = NULL; /* not PCI */
33521@@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
33522
33523 while((c = h->reqQ) != NULL) {
33524 /* Can't do anything if we're busy */
33525- if (h->access.fifo_full(h) == 0)
33526+ if (h->access->fifo_full(h) == 0)
33527 return;
33528
33529 /* Get the first entry from the request Q */
33530@@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
33531 h->Qdepth--;
33532
33533 /* Tell the controller to do our bidding */
33534- h->access.submit_command(h, c);
33535+ h->access->submit_command(h, c);
33536
33537 /* Get onto the completion Q */
33538 addQ(&h->cmpQ, c);
33539@@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
33540 unsigned long flags;
33541 __u32 a,a1;
33542
33543- istat = h->access.intr_pending(h);
33544+ istat = h->access->intr_pending(h);
33545 /* Is this interrupt for us? */
33546 if (istat == 0)
33547 return IRQ_NONE;
33548@@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
33549 */
33550 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
33551 if (istat & FIFO_NOT_EMPTY) {
33552- while((a = h->access.command_completed(h))) {
33553+ while((a = h->access->command_completed(h))) {
33554 a1 = a; a &= ~3;
33555 if ((c = h->cmpQ) == NULL)
33556 {
33557@@ -1195,6 +1195,7 @@ out_passthru:
33558 ida_pci_info_struct pciinfo;
33559
33560 if (!arg) return -EINVAL;
33561+ memset(&pciinfo, 0, sizeof(pciinfo));
33562 pciinfo.bus = host->pci_dev->bus->number;
33563 pciinfo.dev_fn = host->pci_dev->devfn;
33564 pciinfo.board_id = host->board_id;
33565@@ -1449,11 +1450,11 @@ static int sendcmd(
33566 /*
33567 * Disable interrupt
33568 */
33569- info_p->access.set_intr_mask(info_p, 0);
33570+ info_p->access->set_intr_mask(info_p, 0);
33571 /* Make sure there is room in the command FIFO */
33572 /* Actually it should be completely empty at this time. */
33573 for (i = 200000; i > 0; i--) {
33574- temp = info_p->access.fifo_full(info_p);
33575+ temp = info_p->access->fifo_full(info_p);
33576 if (temp != 0) {
33577 break;
33578 }
33579@@ -1466,7 +1467,7 @@ DBG(
33580 /*
33581 * Send the cmd
33582 */
33583- info_p->access.submit_command(info_p, c);
33584+ info_p->access->submit_command(info_p, c);
33585 complete = pollcomplete(ctlr);
33586
33587 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
33588@@ -1549,9 +1550,9 @@ static int revalidate_allvol(ctlr_info_t *host)
33589 * we check the new geometry. Then turn interrupts back on when
33590 * we're done.
33591 */
33592- host->access.set_intr_mask(host, 0);
33593+ host->access->set_intr_mask(host, 0);
33594 getgeometry(ctlr);
33595- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
33596+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
33597
33598 for(i=0; i<NWD; i++) {
33599 struct gendisk *disk = ida_gendisk[ctlr][i];
33600@@ -1591,7 +1592,7 @@ static int pollcomplete(int ctlr)
33601 /* Wait (up to 2 seconds) for a command to complete */
33602
33603 for (i = 200000; i > 0; i--) {
33604- done = hba[ctlr]->access.command_completed(hba[ctlr]);
33605+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
33606 if (done == 0) {
33607 udelay(10); /* a short fixed delay */
33608 } else
33609diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
33610index be73e9d..7fbf140 100644
33611--- a/drivers/block/cpqarray.h
33612+++ b/drivers/block/cpqarray.h
33613@@ -99,7 +99,7 @@ struct ctlr_info {
33614 drv_info_t drv[NWD];
33615 struct proc_dir_entry *proc;
33616
33617- struct access_method access;
33618+ struct access_method *access;
33619
33620 cmdlist_t *reqQ;
33621 cmdlist_t *cmpQ;
33622diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
33623index 6b51afa..17e1191 100644
33624--- a/drivers/block/drbd/drbd_int.h
33625+++ b/drivers/block/drbd/drbd_int.h
33626@@ -582,7 +582,7 @@ struct drbd_epoch {
33627 struct drbd_tconn *tconn;
33628 struct list_head list;
33629 unsigned int barrier_nr;
33630- atomic_t epoch_size; /* increased on every request added. */
33631+ atomic_unchecked_t epoch_size; /* increased on every request added. */
33632 atomic_t active; /* increased on every req. added, and dec on every finished. */
33633 unsigned long flags;
33634 };
33635@@ -1011,7 +1011,7 @@ struct drbd_conf {
33636 int al_tr_cycle;
33637 int al_tr_pos; /* position of the next transaction in the journal */
33638 wait_queue_head_t seq_wait;
33639- atomic_t packet_seq;
33640+ atomic_unchecked_t packet_seq;
33641 unsigned int peer_seq;
33642 spinlock_t peer_seq_lock;
33643 unsigned int minor;
33644@@ -1527,7 +1527,7 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
33645 char __user *uoptval;
33646 int err;
33647
33648- uoptval = (char __user __force *)optval;
33649+ uoptval = (char __force_user *)optval;
33650
33651 set_fs(KERNEL_DS);
33652 if (level == SOL_SOCKET)
33653diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
33654index 54d03d4..332f311 100644
33655--- a/drivers/block/drbd/drbd_main.c
33656+++ b/drivers/block/drbd/drbd_main.c
33657@@ -1317,7 +1317,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
33658 p->sector = sector;
33659 p->block_id = block_id;
33660 p->blksize = blksize;
33661- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
33662+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
33663 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
33664 }
33665
33666@@ -1619,7 +1619,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
33667 return -EIO;
33668 p->sector = cpu_to_be64(req->i.sector);
33669 p->block_id = (unsigned long)req;
33670- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
33671+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
33672 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
33673 if (mdev->state.conn >= C_SYNC_SOURCE &&
33674 mdev->state.conn <= C_PAUSED_SYNC_T)
33675@@ -2574,8 +2574,8 @@ void conn_destroy(struct kref *kref)
33676 {
33677 struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
33678
33679- if (atomic_read(&tconn->current_epoch->epoch_size) != 0)
33680- conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size));
33681+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size) != 0)
33682+ conn_err(tconn, "epoch_size:%d\n", atomic_read_unchecked(&tconn->current_epoch->epoch_size));
33683 kfree(tconn->current_epoch);
33684
33685 idr_destroy(&tconn->volumes);
33686diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
33687index 2f5fffd..b22a1ae 100644
33688--- a/drivers/block/drbd/drbd_receiver.c
33689+++ b/drivers/block/drbd/drbd_receiver.c
33690@@ -833,7 +833,7 @@ int drbd_connected(struct drbd_conf *mdev)
33691 {
33692 int err;
33693
33694- atomic_set(&mdev->packet_seq, 0);
33695+ atomic_set_unchecked(&mdev->packet_seq, 0);
33696 mdev->peer_seq = 0;
33697
33698 mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
33699@@ -1191,7 +1191,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
33700 do {
33701 next_epoch = NULL;
33702
33703- epoch_size = atomic_read(&epoch->epoch_size);
33704+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
33705
33706 switch (ev & ~EV_CLEANUP) {
33707 case EV_PUT:
33708@@ -1231,7 +1231,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
33709 rv = FE_DESTROYED;
33710 } else {
33711 epoch->flags = 0;
33712- atomic_set(&epoch->epoch_size, 0);
33713+ atomic_set_unchecked(&epoch->epoch_size, 0);
33714 /* atomic_set(&epoch->active, 0); is already zero */
33715 if (rv == FE_STILL_LIVE)
33716 rv = FE_RECYCLED;
33717@@ -1449,7 +1449,7 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
33718 conn_wait_active_ee_empty(tconn);
33719 drbd_flush(tconn);
33720
33721- if (atomic_read(&tconn->current_epoch->epoch_size)) {
33722+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
33723 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
33724 if (epoch)
33725 break;
33726@@ -1462,11 +1462,11 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
33727 }
33728
33729 epoch->flags = 0;
33730- atomic_set(&epoch->epoch_size, 0);
33731+ atomic_set_unchecked(&epoch->epoch_size, 0);
33732 atomic_set(&epoch->active, 0);
33733
33734 spin_lock(&tconn->epoch_lock);
33735- if (atomic_read(&tconn->current_epoch->epoch_size)) {
33736+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
33737 list_add(&epoch->list, &tconn->current_epoch->list);
33738 tconn->current_epoch = epoch;
33739 tconn->epochs++;
33740@@ -2170,7 +2170,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
33741
33742 err = wait_for_and_update_peer_seq(mdev, peer_seq);
33743 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
33744- atomic_inc(&tconn->current_epoch->epoch_size);
33745+ atomic_inc_unchecked(&tconn->current_epoch->epoch_size);
33746 err2 = drbd_drain_block(mdev, pi->size);
33747 if (!err)
33748 err = err2;
33749@@ -2204,7 +2204,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
33750
33751 spin_lock(&tconn->epoch_lock);
33752 peer_req->epoch = tconn->current_epoch;
33753- atomic_inc(&peer_req->epoch->epoch_size);
33754+ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
33755 atomic_inc(&peer_req->epoch->active);
33756 spin_unlock(&tconn->epoch_lock);
33757
33758@@ -4345,7 +4345,7 @@ struct data_cmd {
33759 int expect_payload;
33760 size_t pkt_size;
33761 int (*fn)(struct drbd_tconn *, struct packet_info *);
33762-};
33763+} __do_const;
33764
33765 static struct data_cmd drbd_cmd_handler[] = {
33766 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
33767@@ -4465,7 +4465,7 @@ static void conn_disconnect(struct drbd_tconn *tconn)
33768 if (!list_empty(&tconn->current_epoch->list))
33769 conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
33770 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
33771- atomic_set(&tconn->current_epoch->epoch_size, 0);
33772+ atomic_set_unchecked(&tconn->current_epoch->epoch_size, 0);
33773 tconn->send.seen_any_write_yet = false;
33774
33775 conn_info(tconn, "Connection closed\n");
33776@@ -5221,7 +5221,7 @@ static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
33777 struct asender_cmd {
33778 size_t pkt_size;
33779 int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
33780-};
33781+} __do_const;
33782
33783 static struct asender_cmd asender_tbl[] = {
33784 [P_PING] = { 0, got_Ping },
33785diff --git a/drivers/block/loop.c b/drivers/block/loop.c
33786index dfe7583..83768bb 100644
33787--- a/drivers/block/loop.c
33788+++ b/drivers/block/loop.c
33789@@ -231,7 +231,7 @@ static int __do_lo_send_write(struct file *file,
33790 mm_segment_t old_fs = get_fs();
33791
33792 set_fs(get_ds());
33793- bw = file->f_op->write(file, buf, len, &pos);
33794+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
33795 set_fs(old_fs);
33796 if (likely(bw == len))
33797 return 0;
33798diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
33799index 7fecc78..84d217c 100644
33800--- a/drivers/block/nbd.c
33801+++ b/drivers/block/nbd.c
33802@@ -714,7 +714,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
33803 else
33804 blk_queue_flush(nbd->disk->queue, 0);
33805
33806- thread = kthread_create(nbd_thread, nbd, nbd->disk->disk_name);
33807+ thread = kthread_create(nbd_thread, nbd, "%s", nbd->disk->disk_name);
33808 if (IS_ERR(thread)) {
33809 mutex_lock(&nbd->tx_lock);
33810 return PTR_ERR(thread);
33811diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
33812index 2e7de7a..ed86dc0 100644
33813--- a/drivers/block/pktcdvd.c
33814+++ b/drivers/block/pktcdvd.c
33815@@ -83,7 +83,7 @@
33816
33817 #define MAX_SPEED 0xffff
33818
33819-#define ZONE(sector, pd) (((sector) + (pd)->offset) & ~((pd)->settings.size - 1))
33820+#define ZONE(sector, pd) (((sector) + (pd)->offset) & ~((pd)->settings.size - 1UL))
33821
33822 static DEFINE_MUTEX(pktcdvd_mutex);
33823 static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
33824diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
33825index d620b44..d7538c2 100644
33826--- a/drivers/cdrom/cdrom.c
33827+++ b/drivers/cdrom/cdrom.c
33828@@ -416,7 +416,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
33829 ENSURE(reset, CDC_RESET);
33830 ENSURE(generic_packet, CDC_GENERIC_PACKET);
33831 cdi->mc_flags = 0;
33832- cdo->n_minors = 0;
33833 cdi->options = CDO_USE_FFLAGS;
33834
33835 if (autoclose==1 && CDROM_CAN(CDC_CLOSE_TRAY))
33836@@ -436,8 +435,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
33837 else
33838 cdi->cdda_method = CDDA_OLD;
33839
33840- if (!cdo->generic_packet)
33841- cdo->generic_packet = cdrom_dummy_generic_packet;
33842+ if (!cdo->generic_packet) {
33843+ pax_open_kernel();
33844+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
33845+ pax_close_kernel();
33846+ }
33847
33848 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
33849 mutex_lock(&cdrom_mutex);
33850@@ -458,7 +460,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
33851 if (cdi->exit)
33852 cdi->exit(cdi);
33853
33854- cdi->ops->n_minors--;
33855 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
33856 }
33857
33858@@ -2107,7 +2108,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
33859 */
33860 nr = nframes;
33861 do {
33862- cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
33863+ cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
33864 if (cgc.buffer)
33865 break;
33866
33867@@ -2882,7 +2883,7 @@ static noinline int mmc_ioctl_cdrom_read_data(struct cdrom_device_info *cdi,
33868 if (lba < 0)
33869 return -EINVAL;
33870
33871- cgc->buffer = kmalloc(blocksize, GFP_KERNEL);
33872+ cgc->buffer = kzalloc(blocksize, GFP_KERNEL);
33873 if (cgc->buffer == NULL)
33874 return -ENOMEM;
33875
33876@@ -3429,7 +3430,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
33877 struct cdrom_device_info *cdi;
33878 int ret;
33879
33880- ret = scnprintf(info + *pos, max_size - *pos, header);
33881+ ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
33882 if (!ret)
33883 return 1;
33884
33885diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
33886index d59cdcb..11afddf 100644
33887--- a/drivers/cdrom/gdrom.c
33888+++ b/drivers/cdrom/gdrom.c
33889@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
33890 .audio_ioctl = gdrom_audio_ioctl,
33891 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
33892 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
33893- .n_minors = 1,
33894 };
33895
33896 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
33897diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
33898index 3bb6fa3..34013fb 100644
33899--- a/drivers/char/Kconfig
33900+++ b/drivers/char/Kconfig
33901@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
33902
33903 config DEVKMEM
33904 bool "/dev/kmem virtual device support"
33905- default y
33906+ default n
33907+ depends on !GRKERNSEC_KMEM
33908 help
33909 Say Y here if you want to support the /dev/kmem device. The
33910 /dev/kmem device is rarely used, but can be used for certain
33911@@ -582,6 +583,7 @@ config DEVPORT
33912 bool
33913 depends on !M68K
33914 depends on ISA || PCI
33915+ depends on !GRKERNSEC_KMEM
33916 default y
33917
33918 source "drivers/s390/char/Kconfig"
33919diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
33920index a48e05b..6bac831 100644
33921--- a/drivers/char/agp/compat_ioctl.c
33922+++ b/drivers/char/agp/compat_ioctl.c
33923@@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
33924 return -ENOMEM;
33925 }
33926
33927- if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
33928+ if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
33929 sizeof(*usegment) * ureserve.seg_count)) {
33930 kfree(usegment);
33931 kfree(ksegment);
33932diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
33933index 2e04433..771f2cc 100644
33934--- a/drivers/char/agp/frontend.c
33935+++ b/drivers/char/agp/frontend.c
33936@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
33937 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
33938 return -EFAULT;
33939
33940- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
33941+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
33942 return -EFAULT;
33943
33944 client = agp_find_client_by_pid(reserve.pid);
33945@@ -847,7 +847,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
33946 if (segment == NULL)
33947 return -ENOMEM;
33948
33949- if (copy_from_user(segment, (void __user *) reserve.seg_list,
33950+ if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
33951 sizeof(struct agp_segment) * reserve.seg_count)) {
33952 kfree(segment);
33953 return -EFAULT;
33954diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
33955index 21cb980..f15107c 100644
33956--- a/drivers/char/genrtc.c
33957+++ b/drivers/char/genrtc.c
33958@@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct file *file,
33959 switch (cmd) {
33960
33961 case RTC_PLL_GET:
33962+ memset(&pll, 0, sizeof(pll));
33963 if (get_rtc_pll(&pll))
33964 return -EINVAL;
33965 else
33966diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
33967index d784650..e8bfd69 100644
33968--- a/drivers/char/hpet.c
33969+++ b/drivers/char/hpet.c
33970@@ -559,7 +559,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
33971 }
33972
33973 static int
33974-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
33975+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
33976 struct hpet_info *info)
33977 {
33978 struct hpet_timer __iomem *timer;
33979diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
33980index 86fe45c..c0ea948 100644
33981--- a/drivers/char/hw_random/intel-rng.c
33982+++ b/drivers/char/hw_random/intel-rng.c
33983@@ -314,7 +314,7 @@ PFX "RNG, try using the 'no_fwh_detect' option.\n";
33984
33985 if (no_fwh_detect)
33986 return -ENODEV;
33987- printk(warning);
33988+ printk("%s", warning);
33989 return -EBUSY;
33990 }
33991
33992diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
33993index 053201b0..8335cce 100644
33994--- a/drivers/char/ipmi/ipmi_msghandler.c
33995+++ b/drivers/char/ipmi/ipmi_msghandler.c
33996@@ -420,7 +420,7 @@ struct ipmi_smi {
33997 struct proc_dir_entry *proc_dir;
33998 char proc_dir_name[10];
33999
34000- atomic_t stats[IPMI_NUM_STATS];
34001+ atomic_unchecked_t stats[IPMI_NUM_STATS];
34002
34003 /*
34004 * run_to_completion duplicate of smb_info, smi_info
34005@@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
34006
34007
34008 #define ipmi_inc_stat(intf, stat) \
34009- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
34010+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
34011 #define ipmi_get_stat(intf, stat) \
34012- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
34013+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
34014
34015 static int is_lan_addr(struct ipmi_addr *addr)
34016 {
34017@@ -2884,7 +2884,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
34018 INIT_LIST_HEAD(&intf->cmd_rcvrs);
34019 init_waitqueue_head(&intf->waitq);
34020 for (i = 0; i < IPMI_NUM_STATS; i++)
34021- atomic_set(&intf->stats[i], 0);
34022+ atomic_set_unchecked(&intf->stats[i], 0);
34023
34024 intf->proc_dir = NULL;
34025
34026diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
34027index 0ac9b45..6179fb5 100644
34028--- a/drivers/char/ipmi/ipmi_si_intf.c
34029+++ b/drivers/char/ipmi/ipmi_si_intf.c
34030@@ -275,7 +275,7 @@ struct smi_info {
34031 unsigned char slave_addr;
34032
34033 /* Counters and things for the proc filesystem. */
34034- atomic_t stats[SI_NUM_STATS];
34035+ atomic_unchecked_t stats[SI_NUM_STATS];
34036
34037 struct task_struct *thread;
34038
34039@@ -284,9 +284,9 @@ struct smi_info {
34040 };
34041
34042 #define smi_inc_stat(smi, stat) \
34043- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
34044+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
34045 #define smi_get_stat(smi, stat) \
34046- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
34047+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
34048
34049 #define SI_MAX_PARMS 4
34050
34051@@ -3254,7 +3254,7 @@ static int try_smi_init(struct smi_info *new_smi)
34052 atomic_set(&new_smi->req_events, 0);
34053 new_smi->run_to_completion = 0;
34054 for (i = 0; i < SI_NUM_STATS; i++)
34055- atomic_set(&new_smi->stats[i], 0);
34056+ atomic_set_unchecked(&new_smi->stats[i], 0);
34057
34058 new_smi->interrupt_disabled = 1;
34059 atomic_set(&new_smi->stop_operation, 0);
34060diff --git a/drivers/char/mem.c b/drivers/char/mem.c
34061index 2c644af..4b7aede 100644
34062--- a/drivers/char/mem.c
34063+++ b/drivers/char/mem.c
34064@@ -18,6 +18,7 @@
34065 #include <linux/raw.h>
34066 #include <linux/tty.h>
34067 #include <linux/capability.h>
34068+#include <linux/security.h>
34069 #include <linux/ptrace.h>
34070 #include <linux/device.h>
34071 #include <linux/highmem.h>
34072@@ -37,6 +38,10 @@
34073
34074 #define DEVPORT_MINOR 4
34075
34076+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
34077+extern const struct file_operations grsec_fops;
34078+#endif
34079+
34080 static inline unsigned long size_inside_page(unsigned long start,
34081 unsigned long size)
34082 {
34083@@ -68,9 +73,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
34084
34085 while (cursor < to) {
34086 if (!devmem_is_allowed(pfn)) {
34087+#ifdef CONFIG_GRKERNSEC_KMEM
34088+ gr_handle_mem_readwrite(from, to);
34089+#else
34090 printk(KERN_INFO
34091 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
34092 current->comm, from, to);
34093+#endif
34094 return 0;
34095 }
34096 cursor += PAGE_SIZE;
34097@@ -78,6 +87,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
34098 }
34099 return 1;
34100 }
34101+#elif defined(CONFIG_GRKERNSEC_KMEM)
34102+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
34103+{
34104+ return 0;
34105+}
34106 #else
34107 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
34108 {
34109@@ -120,6 +134,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
34110
34111 while (count > 0) {
34112 unsigned long remaining;
34113+ char *temp;
34114
34115 sz = size_inside_page(p, count);
34116
34117@@ -135,7 +150,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
34118 if (!ptr)
34119 return -EFAULT;
34120
34121- remaining = copy_to_user(buf, ptr, sz);
34122+#ifdef CONFIG_PAX_USERCOPY
34123+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
34124+ if (!temp) {
34125+ unxlate_dev_mem_ptr(p, ptr);
34126+ return -ENOMEM;
34127+ }
34128+ memcpy(temp, ptr, sz);
34129+#else
34130+ temp = ptr;
34131+#endif
34132+
34133+ remaining = copy_to_user(buf, temp, sz);
34134+
34135+#ifdef CONFIG_PAX_USERCOPY
34136+ kfree(temp);
34137+#endif
34138+
34139 unxlate_dev_mem_ptr(p, ptr);
34140 if (remaining)
34141 return -EFAULT;
34142@@ -378,7 +409,7 @@ static ssize_t read_oldmem(struct file *file, char __user *buf,
34143 else
34144 csize = count;
34145
34146- rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
34147+ rc = copy_oldmem_page(pfn, (char __force_kernel *)buf, csize, offset, 1);
34148 if (rc < 0)
34149 return rc;
34150 buf += csize;
34151@@ -398,9 +429,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
34152 size_t count, loff_t *ppos)
34153 {
34154 unsigned long p = *ppos;
34155- ssize_t low_count, read, sz;
34156+ ssize_t low_count, read, sz, err = 0;
34157 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
34158- int err = 0;
34159
34160 read = 0;
34161 if (p < (unsigned long) high_memory) {
34162@@ -422,6 +452,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
34163 }
34164 #endif
34165 while (low_count > 0) {
34166+ char *temp;
34167+
34168 sz = size_inside_page(p, low_count);
34169
34170 /*
34171@@ -431,7 +463,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
34172 */
34173 kbuf = xlate_dev_kmem_ptr((char *)p);
34174
34175- if (copy_to_user(buf, kbuf, sz))
34176+#ifdef CONFIG_PAX_USERCOPY
34177+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
34178+ if (!temp)
34179+ return -ENOMEM;
34180+ memcpy(temp, kbuf, sz);
34181+#else
34182+ temp = kbuf;
34183+#endif
34184+
34185+ err = copy_to_user(buf, temp, sz);
34186+
34187+#ifdef CONFIG_PAX_USERCOPY
34188+ kfree(temp);
34189+#endif
34190+
34191+ if (err)
34192 return -EFAULT;
34193 buf += sz;
34194 p += sz;
34195@@ -833,6 +880,9 @@ static const struct memdev {
34196 #ifdef CONFIG_CRASH_DUMP
34197 [12] = { "oldmem", 0, &oldmem_fops, NULL },
34198 #endif
34199+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
34200+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
34201+#endif
34202 };
34203
34204 static int memory_open(struct inode *inode, struct file *filp)
34205@@ -904,7 +954,7 @@ static int __init chr_dev_init(void)
34206 continue;
34207
34208 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
34209- NULL, devlist[minor].name);
34210+ NULL, "%s", devlist[minor].name);
34211 }
34212
34213 return tty_init();
34214diff --git a/drivers/char/mwave/tp3780i.c b/drivers/char/mwave/tp3780i.c
34215index c689697..04e6d6a2 100644
34216--- a/drivers/char/mwave/tp3780i.c
34217+++ b/drivers/char/mwave/tp3780i.c
34218@@ -479,6 +479,7 @@ int tp3780I_QueryAbilities(THINKPAD_BD_DATA * pBDData, MW_ABILITIES * pAbilities
34219 PRINTK_2(TRACE_TP3780I,
34220 "tp3780i::tp3780I_QueryAbilities entry pBDData %p\n", pBDData);
34221
34222+ memset(pAbilities, 0, sizeof(*pAbilities));
34223 /* fill out standard constant fields */
34224 pAbilities->instr_per_sec = pBDData->rDspSettings.uIps;
34225 pAbilities->data_size = pBDData->rDspSettings.uDStoreSize;
34226diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
34227index 9df78e2..01ba9ae 100644
34228--- a/drivers/char/nvram.c
34229+++ b/drivers/char/nvram.c
34230@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
34231
34232 spin_unlock_irq(&rtc_lock);
34233
34234- if (copy_to_user(buf, contents, tmp - contents))
34235+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
34236 return -EFAULT;
34237
34238 *ppos = i;
34239diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
34240index 5c5cc00..ac9edb7 100644
34241--- a/drivers/char/pcmcia/synclink_cs.c
34242+++ b/drivers/char/pcmcia/synclink_cs.c
34243@@ -2345,9 +2345,9 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
34244
34245 if (debug_level >= DEBUG_LEVEL_INFO)
34246 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
34247- __FILE__, __LINE__, info->device_name, port->count);
34248+ __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
34249
34250- WARN_ON(!port->count);
34251+ WARN_ON(!atomic_read(&port->count));
34252
34253 if (tty_port_close_start(port, tty, filp) == 0)
34254 goto cleanup;
34255@@ -2365,7 +2365,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
34256 cleanup:
34257 if (debug_level >= DEBUG_LEVEL_INFO)
34258 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
34259- tty->driver->name, port->count);
34260+ tty->driver->name, atomic_read(&port->count));
34261 }
34262
34263 /* Wait until the transmitter is empty.
34264@@ -2507,7 +2507,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
34265
34266 if (debug_level >= DEBUG_LEVEL_INFO)
34267 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
34268- __FILE__, __LINE__, tty->driver->name, port->count);
34269+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
34270
34271 /* If port is closing, signal caller to try again */
34272 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
34273@@ -2527,11 +2527,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
34274 goto cleanup;
34275 }
34276 spin_lock(&port->lock);
34277- port->count++;
34278+ atomic_inc(&port->count);
34279 spin_unlock(&port->lock);
34280 spin_unlock_irqrestore(&info->netlock, flags);
34281
34282- if (port->count == 1) {
34283+ if (atomic_read(&port->count) == 1) {
34284 /* 1st open on this device, init hardware */
34285 retval = startup(info, tty);
34286 if (retval < 0)
34287@@ -3920,7 +3920,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
34288 unsigned short new_crctype;
34289
34290 /* return error if TTY interface open */
34291- if (info->port.count)
34292+ if (atomic_read(&info->port.count))
34293 return -EBUSY;
34294
34295 switch (encoding)
34296@@ -4024,7 +4024,7 @@ static int hdlcdev_open(struct net_device *dev)
34297
34298 /* arbitrate between network and tty opens */
34299 spin_lock_irqsave(&info->netlock, flags);
34300- if (info->port.count != 0 || info->netcount != 0) {
34301+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
34302 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
34303 spin_unlock_irqrestore(&info->netlock, flags);
34304 return -EBUSY;
34305@@ -4114,7 +4114,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
34306 printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
34307
34308 /* return error if TTY interface open */
34309- if (info->port.count)
34310+ if (atomic_read(&info->port.count))
34311 return -EBUSY;
34312
34313 if (cmd != SIOCWANDEV)
34314diff --git a/drivers/char/random.c b/drivers/char/random.c
34315index eccd7cc..98038d5 100644
34316--- a/drivers/char/random.c
34317+++ b/drivers/char/random.c
34318@@ -272,8 +272,13 @@
34319 /*
34320 * Configuration information
34321 */
34322+#ifdef CONFIG_GRKERNSEC_RANDNET
34323+#define INPUT_POOL_WORDS 512
34324+#define OUTPUT_POOL_WORDS 128
34325+#else
34326 #define INPUT_POOL_WORDS 128
34327 #define OUTPUT_POOL_WORDS 32
34328+#endif
34329 #define SEC_XFER_SIZE 512
34330 #define EXTRACT_SIZE 10
34331
34332@@ -313,10 +318,17 @@ static struct poolinfo {
34333 int poolwords;
34334 int tap1, tap2, tap3, tap4, tap5;
34335 } poolinfo_table[] = {
34336+#ifdef CONFIG_GRKERNSEC_RANDNET
34337+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
34338+ { 512, 411, 308, 208, 104, 1 },
34339+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
34340+ { 128, 103, 76, 51, 25, 1 },
34341+#else
34342 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
34343 { 128, 103, 76, 51, 25, 1 },
34344 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
34345 { 32, 26, 20, 14, 7, 1 },
34346+#endif
34347 #if 0
34348 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
34349 { 2048, 1638, 1231, 819, 411, 1 },
34350@@ -524,8 +536,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
34351 input_rotate += i ? 7 : 14;
34352 }
34353
34354- ACCESS_ONCE(r->input_rotate) = input_rotate;
34355- ACCESS_ONCE(r->add_ptr) = i;
34356+ ACCESS_ONCE_RW(r->input_rotate) = input_rotate;
34357+ ACCESS_ONCE_RW(r->add_ptr) = i;
34358 smp_wmb();
34359
34360 if (out)
34361@@ -1032,7 +1044,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
34362
34363 extract_buf(r, tmp);
34364 i = min_t(int, nbytes, EXTRACT_SIZE);
34365- if (copy_to_user(buf, tmp, i)) {
34366+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
34367 ret = -EFAULT;
34368 break;
34369 }
34370@@ -1368,7 +1380,7 @@ EXPORT_SYMBOL(generate_random_uuid);
34371 #include <linux/sysctl.h>
34372
34373 static int min_read_thresh = 8, min_write_thresh;
34374-static int max_read_thresh = INPUT_POOL_WORDS * 32;
34375+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
34376 static int max_write_thresh = INPUT_POOL_WORDS * 32;
34377 static char sysctl_bootid[16];
34378
34379@@ -1384,7 +1396,7 @@ static char sysctl_bootid[16];
34380 static int proc_do_uuid(ctl_table *table, int write,
34381 void __user *buffer, size_t *lenp, loff_t *ppos)
34382 {
34383- ctl_table fake_table;
34384+ ctl_table_no_const fake_table;
34385 unsigned char buf[64], tmp_uuid[16], *uuid;
34386
34387 uuid = table->data;
34388diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
34389index bf2349db..5456d53 100644
34390--- a/drivers/char/sonypi.c
34391+++ b/drivers/char/sonypi.c
34392@@ -54,6 +54,7 @@
34393
34394 #include <asm/uaccess.h>
34395 #include <asm/io.h>
34396+#include <asm/local.h>
34397
34398 #include <linux/sonypi.h>
34399
34400@@ -490,7 +491,7 @@ static struct sonypi_device {
34401 spinlock_t fifo_lock;
34402 wait_queue_head_t fifo_proc_list;
34403 struct fasync_struct *fifo_async;
34404- int open_count;
34405+ local_t open_count;
34406 int model;
34407 struct input_dev *input_jog_dev;
34408 struct input_dev *input_key_dev;
34409@@ -897,7 +898,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
34410 static int sonypi_misc_release(struct inode *inode, struct file *file)
34411 {
34412 mutex_lock(&sonypi_device.lock);
34413- sonypi_device.open_count--;
34414+ local_dec(&sonypi_device.open_count);
34415 mutex_unlock(&sonypi_device.lock);
34416 return 0;
34417 }
34418@@ -906,9 +907,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
34419 {
34420 mutex_lock(&sonypi_device.lock);
34421 /* Flush input queue on first open */
34422- if (!sonypi_device.open_count)
34423+ if (!local_read(&sonypi_device.open_count))
34424 kfifo_reset(&sonypi_device.fifo);
34425- sonypi_device.open_count++;
34426+ local_inc(&sonypi_device.open_count);
34427 mutex_unlock(&sonypi_device.lock);
34428
34429 return 0;
34430diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
34431index 64420b3..5c40b56 100644
34432--- a/drivers/char/tpm/tpm_acpi.c
34433+++ b/drivers/char/tpm/tpm_acpi.c
34434@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
34435 virt = acpi_os_map_memory(start, len);
34436 if (!virt) {
34437 kfree(log->bios_event_log);
34438+ log->bios_event_log = NULL;
34439 printk("%s: ERROR - Unable to map memory\n", __func__);
34440 return -EIO;
34441 }
34442
34443- memcpy_fromio(log->bios_event_log, virt, len);
34444+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
34445
34446 acpi_os_unmap_memory(virt, len);
34447 return 0;
34448diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
34449index 84ddc55..1d32f1e 100644
34450--- a/drivers/char/tpm/tpm_eventlog.c
34451+++ b/drivers/char/tpm/tpm_eventlog.c
34452@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
34453 event = addr;
34454
34455 if ((event->event_type == 0 && event->event_size == 0) ||
34456- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
34457+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
34458 return NULL;
34459
34460 return addr;
34461@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
34462 return NULL;
34463
34464 if ((event->event_type == 0 && event->event_size == 0) ||
34465- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
34466+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
34467 return NULL;
34468
34469 (*pos)++;
34470@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
34471 int i;
34472
34473 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
34474- seq_putc(m, data[i]);
34475+ if (!seq_putc(m, data[i]))
34476+ return -EFAULT;
34477
34478 return 0;
34479 }
34480diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
34481index ce5f3fc..e2d3e55 100644
34482--- a/drivers/char/virtio_console.c
34483+++ b/drivers/char/virtio_console.c
34484@@ -679,7 +679,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
34485 if (to_user) {
34486 ssize_t ret;
34487
34488- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
34489+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
34490 if (ret)
34491 return -EFAULT;
34492 } else {
34493@@ -778,7 +778,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
34494 if (!port_has_data(port) && !port->host_connected)
34495 return 0;
34496
34497- return fill_readbuf(port, ubuf, count, true);
34498+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
34499 }
34500
34501 static int wait_port_writable(struct port *port, bool nonblock)
34502diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
34503index d7ad425..3e3f81f 100644
34504--- a/drivers/clocksource/arm_arch_timer.c
34505+++ b/drivers/clocksource/arm_arch_timer.c
34506@@ -262,7 +262,7 @@ static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
34507 return NOTIFY_OK;
34508 }
34509
34510-static struct notifier_block arch_timer_cpu_nb __cpuinitdata = {
34511+static struct notifier_block arch_timer_cpu_nb = {
34512 .notifier_call = arch_timer_cpu_notify,
34513 };
34514
34515diff --git a/drivers/clocksource/metag_generic.c b/drivers/clocksource/metag_generic.c
34516index ade7513..069445f 100644
34517--- a/drivers/clocksource/metag_generic.c
34518+++ b/drivers/clocksource/metag_generic.c
34519@@ -169,7 +169,7 @@ static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
34520 return NOTIFY_OK;
34521 }
34522
34523-static struct notifier_block __cpuinitdata arch_timer_cpu_nb = {
34524+static struct notifier_block arch_timer_cpu_nb = {
34525 .notifier_call = arch_timer_cpu_notify,
34526 };
34527
34528diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
34529index bb5939b..d9accb7 100644
34530--- a/drivers/cpufreq/acpi-cpufreq.c
34531+++ b/drivers/cpufreq/acpi-cpufreq.c
34532@@ -172,7 +172,7 @@ static ssize_t show_global_boost(struct kobject *kobj,
34533 return sprintf(buf, "%u\n", boost_enabled);
34534 }
34535
34536-static struct global_attr global_boost = __ATTR(boost, 0644,
34537+static global_attr_no_const global_boost = __ATTR(boost, 0644,
34538 show_global_boost,
34539 store_global_boost);
34540
34541@@ -712,8 +712,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
34542 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
34543 per_cpu(acfreq_data, cpu) = data;
34544
34545- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
34546- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
34547+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
34548+ pax_open_kernel();
34549+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
34550+ pax_close_kernel();
34551+ }
34552
34553 result = acpi_processor_register_performance(data->acpi_data, cpu);
34554 if (result)
34555@@ -839,7 +842,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
34556 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
34557 break;
34558 case ACPI_ADR_SPACE_FIXED_HARDWARE:
34559- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
34560+ pax_open_kernel();
34561+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
34562+ pax_close_kernel();
34563 policy->cur = get_cur_freq_on_cpu(cpu);
34564 break;
34565 default:
34566@@ -850,8 +855,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
34567 acpi_processor_notify_smm(THIS_MODULE);
34568
34569 /* Check for APERF/MPERF support in hardware */
34570- if (boot_cpu_has(X86_FEATURE_APERFMPERF))
34571- acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
34572+ if (boot_cpu_has(X86_FEATURE_APERFMPERF)) {
34573+ pax_open_kernel();
34574+ *(void **)&acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
34575+ pax_close_kernel();
34576+ }
34577
34578 pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
34579 for (i = 0; i < perf->state_count; i++)
34580diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
34581index b02824d..51e44aa 100644
34582--- a/drivers/cpufreq/cpufreq.c
34583+++ b/drivers/cpufreq/cpufreq.c
34584@@ -1813,7 +1813,7 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
34585 return NOTIFY_OK;
34586 }
34587
34588-static struct notifier_block __refdata cpufreq_cpu_notifier = {
34589+static struct notifier_block cpufreq_cpu_notifier = {
34590 .notifier_call = cpufreq_cpu_callback,
34591 };
34592
34593@@ -1845,8 +1845,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
34594
34595 pr_debug("trying to register driver %s\n", driver_data->name);
34596
34597- if (driver_data->setpolicy)
34598- driver_data->flags |= CPUFREQ_CONST_LOOPS;
34599+ if (driver_data->setpolicy) {
34600+ pax_open_kernel();
34601+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
34602+ pax_close_kernel();
34603+ }
34604
34605 spin_lock_irqsave(&cpufreq_driver_lock, flags);
34606 if (cpufreq_driver) {
34607diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
34608index 5a76086..0f4d394 100644
34609--- a/drivers/cpufreq/cpufreq_governor.c
34610+++ b/drivers/cpufreq/cpufreq_governor.c
34611@@ -201,8 +201,8 @@ int cpufreq_governor_dbs(struct dbs_data *dbs_data,
34612 {
34613 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
34614 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
34615- struct cs_ops *cs_ops = NULL;
34616- struct od_ops *od_ops = NULL;
34617+ const struct cs_ops *cs_ops = NULL;
34618+ const struct od_ops *od_ops = NULL;
34619 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
34620 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
34621 struct cpu_dbs_common_info *cpu_cdbs;
34622diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
34623index cc4bd2f..ad142bc 100644
34624--- a/drivers/cpufreq/cpufreq_governor.h
34625+++ b/drivers/cpufreq/cpufreq_governor.h
34626@@ -142,7 +142,7 @@ struct dbs_data {
34627 void (*gov_check_cpu)(int cpu, unsigned int load);
34628
34629 /* Governor specific ops, see below */
34630- void *gov_ops;
34631+ const void *gov_ops;
34632 };
34633
34634 /* Governor specific ops, will be passed to dbs_data->gov_ops */
34635diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
34636index bfd6273..e39dd63 100644
34637--- a/drivers/cpufreq/cpufreq_stats.c
34638+++ b/drivers/cpufreq/cpufreq_stats.c
34639@@ -365,7 +365,7 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
34640 }
34641
34642 /* priority=1 so this will get called before cpufreq_remove_dev */
34643-static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
34644+static struct notifier_block cpufreq_stat_cpu_notifier = {
34645 .notifier_call = cpufreq_stat_cpu_callback,
34646 .priority = 1,
34647 };
34648diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
34649index 827629c9..0bc6a03 100644
34650--- a/drivers/cpufreq/p4-clockmod.c
34651+++ b/drivers/cpufreq/p4-clockmod.c
34652@@ -167,10 +167,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
34653 case 0x0F: /* Core Duo */
34654 case 0x16: /* Celeron Core */
34655 case 0x1C: /* Atom */
34656- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34657+ pax_open_kernel();
34658+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34659+ pax_close_kernel();
34660 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
34661 case 0x0D: /* Pentium M (Dothan) */
34662- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34663+ pax_open_kernel();
34664+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34665+ pax_close_kernel();
34666 /* fall through */
34667 case 0x09: /* Pentium M (Banias) */
34668 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
34669@@ -182,7 +186,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
34670
34671 /* on P-4s, the TSC runs with constant frequency independent whether
34672 * throttling is active or not. */
34673- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34674+ pax_open_kernel();
34675+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34676+ pax_close_kernel();
34677
34678 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
34679 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
34680diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
34681index 3a953d5..f5993f6 100644
34682--- a/drivers/cpufreq/speedstep-centrino.c
34683+++ b/drivers/cpufreq/speedstep-centrino.c
34684@@ -353,8 +353,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
34685 !cpu_has(cpu, X86_FEATURE_EST))
34686 return -ENODEV;
34687
34688- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
34689- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
34690+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
34691+ pax_open_kernel();
34692+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
34693+ pax_close_kernel();
34694+ }
34695
34696 if (policy->cpu != 0)
34697 return -ENODEV;
34698diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
34699index eba6929..0f53baf 100644
34700--- a/drivers/cpuidle/cpuidle.c
34701+++ b/drivers/cpuidle/cpuidle.c
34702@@ -277,7 +277,7 @@ static int poll_idle(struct cpuidle_device *dev,
34703
34704 static void poll_idle_init(struct cpuidle_driver *drv)
34705 {
34706- struct cpuidle_state *state = &drv->states[0];
34707+ cpuidle_state_no_const *state = &drv->states[0];
34708
34709 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
34710 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
34711diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
34712index ea2f8e7..70ac501 100644
34713--- a/drivers/cpuidle/governor.c
34714+++ b/drivers/cpuidle/governor.c
34715@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
34716 mutex_lock(&cpuidle_lock);
34717 if (__cpuidle_find_governor(gov->name) == NULL) {
34718 ret = 0;
34719- list_add_tail(&gov->governor_list, &cpuidle_governors);
34720+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
34721 if (!cpuidle_curr_governor ||
34722 cpuidle_curr_governor->rating < gov->rating)
34723 cpuidle_switch_governor(gov);
34724@@ -135,7 +135,7 @@ void cpuidle_unregister_governor(struct cpuidle_governor *gov)
34725 new_gov = cpuidle_replace_governor(gov->rating);
34726 cpuidle_switch_governor(new_gov);
34727 }
34728- list_del(&gov->governor_list);
34729+ pax_list_del((struct list_head *)&gov->governor_list);
34730 mutex_unlock(&cpuidle_lock);
34731 }
34732
34733diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
34734index 428754a..8bdf9cc 100644
34735--- a/drivers/cpuidle/sysfs.c
34736+++ b/drivers/cpuidle/sysfs.c
34737@@ -131,7 +131,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
34738 NULL
34739 };
34740
34741-static struct attribute_group cpuidle_attr_group = {
34742+static attribute_group_no_const cpuidle_attr_group = {
34743 .attrs = cpuidle_default_attrs,
34744 .name = "cpuidle",
34745 };
34746diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
34747index 3b36797..db0b0c0 100644
34748--- a/drivers/devfreq/devfreq.c
34749+++ b/drivers/devfreq/devfreq.c
34750@@ -477,7 +477,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
34751 GFP_KERNEL);
34752 devfreq->last_stat_updated = jiffies;
34753
34754- dev_set_name(&devfreq->dev, dev_name(dev));
34755+ dev_set_name(&devfreq->dev, "%s", dev_name(dev));
34756 err = device_register(&devfreq->dev);
34757 if (err) {
34758 put_device(&devfreq->dev);
34759@@ -588,7 +588,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
34760 goto err_out;
34761 }
34762
34763- list_add(&governor->node, &devfreq_governor_list);
34764+ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
34765
34766 list_for_each_entry(devfreq, &devfreq_list, node) {
34767 int ret = 0;
34768@@ -676,7 +676,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
34769 }
34770 }
34771
34772- list_del(&governor->node);
34773+ pax_list_del((struct list_head *)&governor->node);
34774 err_out:
34775 mutex_unlock(&devfreq_list_lock);
34776
34777diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c
34778index b70709b..1d8d02a 100644
34779--- a/drivers/dma/sh/shdma.c
34780+++ b/drivers/dma/sh/shdma.c
34781@@ -476,7 +476,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
34782 return ret;
34783 }
34784
34785-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
34786+static struct notifier_block sh_dmae_nmi_notifier = {
34787 .notifier_call = sh_dmae_nmi_handler,
34788
34789 /* Run before NMI debug handler and KGDB */
34790diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
34791index 769d92e..a3dcc1e 100644
34792--- a/drivers/edac/edac_mc_sysfs.c
34793+++ b/drivers/edac/edac_mc_sysfs.c
34794@@ -148,7 +148,7 @@ static const char *edac_caps[] = {
34795 struct dev_ch_attribute {
34796 struct device_attribute attr;
34797 int channel;
34798-};
34799+} __do_const;
34800
34801 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
34802 struct dev_ch_attribute dev_attr_legacy_##_name = \
34803@@ -1003,14 +1003,16 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
34804 }
34805
34806 if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
34807+ pax_open_kernel();
34808 if (mci->get_sdram_scrub_rate) {
34809- dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
34810- dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
34811+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
34812+ *(void **)&dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
34813 }
34814 if (mci->set_sdram_scrub_rate) {
34815- dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
34816- dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
34817+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
34818+ *(void **)&dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
34819 }
34820+ pax_close_kernel();
34821 err = device_create_file(&mci->dev,
34822 &dev_attr_sdram_scrub_rate);
34823 if (err) {
34824diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
34825index e8658e4..22746d6 100644
34826--- a/drivers/edac/edac_pci_sysfs.c
34827+++ b/drivers/edac/edac_pci_sysfs.c
34828@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
34829 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
34830 static int edac_pci_poll_msec = 1000; /* one second workq period */
34831
34832-static atomic_t pci_parity_count = ATOMIC_INIT(0);
34833-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
34834+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
34835+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
34836
34837 static struct kobject *edac_pci_top_main_kobj;
34838 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
34839@@ -235,7 +235,7 @@ struct edac_pci_dev_attribute {
34840 void *value;
34841 ssize_t(*show) (void *, char *);
34842 ssize_t(*store) (void *, const char *, size_t);
34843-};
34844+} __do_const;
34845
34846 /* Set of show/store abstract level functions for PCI Parity object */
34847 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
34848@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34849 edac_printk(KERN_CRIT, EDAC_PCI,
34850 "Signaled System Error on %s\n",
34851 pci_name(dev));
34852- atomic_inc(&pci_nonparity_count);
34853+ atomic_inc_unchecked(&pci_nonparity_count);
34854 }
34855
34856 if (status & (PCI_STATUS_PARITY)) {
34857@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34858 "Master Data Parity Error on %s\n",
34859 pci_name(dev));
34860
34861- atomic_inc(&pci_parity_count);
34862+ atomic_inc_unchecked(&pci_parity_count);
34863 }
34864
34865 if (status & (PCI_STATUS_DETECTED_PARITY)) {
34866@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34867 "Detected Parity Error on %s\n",
34868 pci_name(dev));
34869
34870- atomic_inc(&pci_parity_count);
34871+ atomic_inc_unchecked(&pci_parity_count);
34872 }
34873 }
34874
34875@@ -618,7 +618,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34876 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
34877 "Signaled System Error on %s\n",
34878 pci_name(dev));
34879- atomic_inc(&pci_nonparity_count);
34880+ atomic_inc_unchecked(&pci_nonparity_count);
34881 }
34882
34883 if (status & (PCI_STATUS_PARITY)) {
34884@@ -626,7 +626,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34885 "Master Data Parity Error on "
34886 "%s\n", pci_name(dev));
34887
34888- atomic_inc(&pci_parity_count);
34889+ atomic_inc_unchecked(&pci_parity_count);
34890 }
34891
34892 if (status & (PCI_STATUS_DETECTED_PARITY)) {
34893@@ -634,7 +634,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34894 "Detected Parity Error on %s\n",
34895 pci_name(dev));
34896
34897- atomic_inc(&pci_parity_count);
34898+ atomic_inc_unchecked(&pci_parity_count);
34899 }
34900 }
34901 }
34902@@ -672,7 +672,7 @@ void edac_pci_do_parity_check(void)
34903 if (!check_pci_errors)
34904 return;
34905
34906- before_count = atomic_read(&pci_parity_count);
34907+ before_count = atomic_read_unchecked(&pci_parity_count);
34908
34909 /* scan all PCI devices looking for a Parity Error on devices and
34910 * bridges.
34911@@ -684,7 +684,7 @@ void edac_pci_do_parity_check(void)
34912 /* Only if operator has selected panic on PCI Error */
34913 if (edac_pci_get_panic_on_pe()) {
34914 /* If the count is different 'after' from 'before' */
34915- if (before_count != atomic_read(&pci_parity_count))
34916+ if (before_count != atomic_read_unchecked(&pci_parity_count))
34917 panic("EDAC: PCI Parity Error");
34918 }
34919 }
34920diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
34921index 51b7e3a..aa8a3e8 100644
34922--- a/drivers/edac/mce_amd.h
34923+++ b/drivers/edac/mce_amd.h
34924@@ -77,7 +77,7 @@ struct amd_decoder_ops {
34925 bool (*mc0_mce)(u16, u8);
34926 bool (*mc1_mce)(u16, u8);
34927 bool (*mc2_mce)(u16, u8);
34928-};
34929+} __no_const;
34930
34931 void amd_report_gart_errors(bool);
34932 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
34933diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
34934index 57ea7f4..789e3c3 100644
34935--- a/drivers/firewire/core-card.c
34936+++ b/drivers/firewire/core-card.c
34937@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
34938
34939 void fw_core_remove_card(struct fw_card *card)
34940 {
34941- struct fw_card_driver dummy_driver = dummy_driver_template;
34942+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
34943
34944 card->driver->update_phy_reg(card, 4,
34945 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
34946diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
34947index 27ac423..13573e8 100644
34948--- a/drivers/firewire/core-cdev.c
34949+++ b/drivers/firewire/core-cdev.c
34950@@ -1366,8 +1366,7 @@ static int init_iso_resource(struct client *client,
34951 int ret;
34952
34953 if ((request->channels == 0 && request->bandwidth == 0) ||
34954- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
34955- request->bandwidth < 0)
34956+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
34957 return -EINVAL;
34958
34959 r = kmalloc(sizeof(*r), GFP_KERNEL);
34960diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
34961index 03ce7d9..b70f5da 100644
34962--- a/drivers/firewire/core-device.c
34963+++ b/drivers/firewire/core-device.c
34964@@ -232,7 +232,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
34965 struct config_rom_attribute {
34966 struct device_attribute attr;
34967 u32 key;
34968-};
34969+} __do_const;
34970
34971 static ssize_t show_immediate(struct device *dev,
34972 struct device_attribute *dattr, char *buf)
34973diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
34974index 28a94c7..58da63a 100644
34975--- a/drivers/firewire/core-transaction.c
34976+++ b/drivers/firewire/core-transaction.c
34977@@ -38,6 +38,7 @@
34978 #include <linux/timer.h>
34979 #include <linux/types.h>
34980 #include <linux/workqueue.h>
34981+#include <linux/sched.h>
34982
34983 #include <asm/byteorder.h>
34984
34985diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
34986index 515a42c..5ecf3ba 100644
34987--- a/drivers/firewire/core.h
34988+++ b/drivers/firewire/core.h
34989@@ -111,6 +111,7 @@ struct fw_card_driver {
34990
34991 int (*stop_iso)(struct fw_iso_context *ctx);
34992 };
34993+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
34994
34995 void fw_card_initialize(struct fw_card *card,
34996 const struct fw_card_driver *driver, struct device *device);
34997diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
34998index 94a58a0..f5eba42 100644
34999--- a/drivers/firmware/dmi-id.c
35000+++ b/drivers/firmware/dmi-id.c
35001@@ -16,7 +16,7 @@
35002 struct dmi_device_attribute{
35003 struct device_attribute dev_attr;
35004 int field;
35005-};
35006+} __do_const;
35007 #define to_dmi_dev_attr(_dev_attr) \
35008 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
35009
35010diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
35011index 4cd392d..4b629e1 100644
35012--- a/drivers/firmware/dmi_scan.c
35013+++ b/drivers/firmware/dmi_scan.c
35014@@ -490,11 +490,6 @@ void __init dmi_scan_machine(void)
35015 }
35016 }
35017 else {
35018- /*
35019- * no iounmap() for that ioremap(); it would be a no-op, but
35020- * it's so early in setup that sucker gets confused into doing
35021- * what it shouldn't if we actually call it.
35022- */
35023 p = dmi_ioremap(0xF0000, 0x10000);
35024 if (p == NULL)
35025 goto error;
35026@@ -769,7 +764,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
35027 if (buf == NULL)
35028 return -1;
35029
35030- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
35031+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
35032
35033 iounmap(buf);
35034 return 0;
35035diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
35036index f4baa11..7970c3a 100644
35037--- a/drivers/firmware/efivars.c
35038+++ b/drivers/firmware/efivars.c
35039@@ -139,7 +139,7 @@ struct efivar_attribute {
35040 };
35041
35042 static struct efivars __efivars;
35043-static struct efivar_operations ops;
35044+static efivar_operations_no_const ops __read_only;
35045
35046 #define PSTORE_EFI_ATTRIBUTES \
35047 (EFI_VARIABLE_NON_VOLATILE | \
35048@@ -1844,7 +1844,7 @@ efivar_create_sysfs_entry(struct efivars *efivars,
35049 static int
35050 create_efivars_bin_attributes(struct efivars *efivars)
35051 {
35052- struct bin_attribute *attr;
35053+ bin_attribute_no_const *attr;
35054 int error;
35055
35056 /* new_var */
35057diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
35058index 2a90ba6..07f3733 100644
35059--- a/drivers/firmware/google/memconsole.c
35060+++ b/drivers/firmware/google/memconsole.c
35061@@ -147,7 +147,9 @@ static int __init memconsole_init(void)
35062 if (!found_memconsole())
35063 return -ENODEV;
35064
35065- memconsole_bin_attr.size = memconsole_length;
35066+ pax_open_kernel();
35067+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
35068+ pax_close_kernel();
35069
35070 ret = sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
35071
35072diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
35073index de3c317..b7cd029 100644
35074--- a/drivers/gpio/gpio-ich.c
35075+++ b/drivers/gpio/gpio-ich.c
35076@@ -69,7 +69,7 @@ struct ichx_desc {
35077 /* Some chipsets have quirks, let these use their own request/get */
35078 int (*request)(struct gpio_chip *chip, unsigned offset);
35079 int (*get)(struct gpio_chip *chip, unsigned offset);
35080-};
35081+} __do_const;
35082
35083 static struct {
35084 spinlock_t lock;
35085diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
35086index 9902732..64b62dd 100644
35087--- a/drivers/gpio/gpio-vr41xx.c
35088+++ b/drivers/gpio/gpio-vr41xx.c
35089@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
35090 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
35091 maskl, pendl, maskh, pendh);
35092
35093- atomic_inc(&irq_err_count);
35094+ atomic_inc_unchecked(&irq_err_count);
35095
35096 return -EINVAL;
35097 }
35098diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
35099index 7b2d378..cc947ea 100644
35100--- a/drivers/gpu/drm/drm_crtc_helper.c
35101+++ b/drivers/gpu/drm/drm_crtc_helper.c
35102@@ -319,7 +319,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
35103 struct drm_crtc *tmp;
35104 int crtc_mask = 1;
35105
35106- WARN(!crtc, "checking null crtc?\n");
35107+ BUG_ON(!crtc);
35108
35109 dev = crtc->dev;
35110
35111diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
35112index 25f91cd..a376f55 100644
35113--- a/drivers/gpu/drm/drm_drv.c
35114+++ b/drivers/gpu/drm/drm_drv.c
35115@@ -306,7 +306,7 @@ module_exit(drm_core_exit);
35116 /**
35117 * Copy and IOCTL return string to user space
35118 */
35119-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
35120+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
35121 {
35122 int len;
35123
35124@@ -376,7 +376,7 @@ long drm_ioctl(struct file *filp,
35125 struct drm_file *file_priv = filp->private_data;
35126 struct drm_device *dev;
35127 struct drm_ioctl_desc *ioctl;
35128- drm_ioctl_t *func;
35129+ drm_ioctl_no_const_t func;
35130 unsigned int nr = DRM_IOCTL_NR(cmd);
35131 int retcode = -EINVAL;
35132 char stack_kdata[128];
35133@@ -389,7 +389,7 @@ long drm_ioctl(struct file *filp,
35134 return -ENODEV;
35135
35136 atomic_inc(&dev->ioctl_count);
35137- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
35138+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
35139 ++file_priv->ioctl_count;
35140
35141 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
35142diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c
35143index 48c52f7..0cfb60f 100644
35144--- a/drivers/gpu/drm/drm_encoder_slave.c
35145+++ b/drivers/gpu/drm/drm_encoder_slave.c
35146@@ -54,16 +54,12 @@ int drm_i2c_encoder_init(struct drm_device *dev,
35147 struct i2c_adapter *adap,
35148 const struct i2c_board_info *info)
35149 {
35150- char modalias[sizeof(I2C_MODULE_PREFIX)
35151- + I2C_NAME_SIZE];
35152 struct module *module = NULL;
35153 struct i2c_client *client;
35154 struct drm_i2c_encoder_driver *encoder_drv;
35155 int err = 0;
35156
35157- snprintf(modalias, sizeof(modalias),
35158- "%s%s", I2C_MODULE_PREFIX, info->type);
35159- request_module(modalias);
35160+ request_module("%s%s", I2C_MODULE_PREFIX, info->type);
35161
35162 client = i2c_new_device(adap, info);
35163 if (!client) {
35164diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
35165index 429e07d..e681a2c 100644
35166--- a/drivers/gpu/drm/drm_fops.c
35167+++ b/drivers/gpu/drm/drm_fops.c
35168@@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
35169 }
35170
35171 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
35172- atomic_set(&dev->counts[i], 0);
35173+ atomic_set_unchecked(&dev->counts[i], 0);
35174
35175 dev->sigdata.lock = NULL;
35176
35177@@ -135,7 +135,7 @@ int drm_open(struct inode *inode, struct file *filp)
35178 if (drm_device_is_unplugged(dev))
35179 return -ENODEV;
35180
35181- if (!dev->open_count++)
35182+ if (local_inc_return(&dev->open_count) == 1)
35183 need_setup = 1;
35184 mutex_lock(&dev->struct_mutex);
35185 old_imapping = inode->i_mapping;
35186@@ -151,7 +151,7 @@ int drm_open(struct inode *inode, struct file *filp)
35187 retcode = drm_open_helper(inode, filp, dev);
35188 if (retcode)
35189 goto err_undo;
35190- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
35191+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
35192 if (need_setup) {
35193 retcode = drm_setup(dev);
35194 if (retcode)
35195@@ -166,7 +166,7 @@ err_undo:
35196 iput(container_of(dev->dev_mapping, struct inode, i_data));
35197 dev->dev_mapping = old_mapping;
35198 mutex_unlock(&dev->struct_mutex);
35199- dev->open_count--;
35200+ local_dec(&dev->open_count);
35201 return retcode;
35202 }
35203 EXPORT_SYMBOL(drm_open);
35204@@ -441,7 +441,7 @@ int drm_release(struct inode *inode, struct file *filp)
35205
35206 mutex_lock(&drm_global_mutex);
35207
35208- DRM_DEBUG("open_count = %d\n", dev->open_count);
35209+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
35210
35211 if (dev->driver->preclose)
35212 dev->driver->preclose(dev, file_priv);
35213@@ -450,10 +450,10 @@ int drm_release(struct inode *inode, struct file *filp)
35214 * Begin inline drm_release
35215 */
35216
35217- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
35218+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
35219 task_pid_nr(current),
35220 (long)old_encode_dev(file_priv->minor->device),
35221- dev->open_count);
35222+ local_read(&dev->open_count));
35223
35224 /* Release any auth tokens that might point to this file_priv,
35225 (do that under the drm_global_mutex) */
35226@@ -550,8 +550,8 @@ int drm_release(struct inode *inode, struct file *filp)
35227 * End inline drm_release
35228 */
35229
35230- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
35231- if (!--dev->open_count) {
35232+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
35233+ if (local_dec_and_test(&dev->open_count)) {
35234 if (atomic_read(&dev->ioctl_count)) {
35235 DRM_ERROR("Device busy: %d\n",
35236 atomic_read(&dev->ioctl_count));
35237diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
35238index f731116..629842c 100644
35239--- a/drivers/gpu/drm/drm_global.c
35240+++ b/drivers/gpu/drm/drm_global.c
35241@@ -36,7 +36,7 @@
35242 struct drm_global_item {
35243 struct mutex mutex;
35244 void *object;
35245- int refcount;
35246+ atomic_t refcount;
35247 };
35248
35249 static struct drm_global_item glob[DRM_GLOBAL_NUM];
35250@@ -49,7 +49,7 @@ void drm_global_init(void)
35251 struct drm_global_item *item = &glob[i];
35252 mutex_init(&item->mutex);
35253 item->object = NULL;
35254- item->refcount = 0;
35255+ atomic_set(&item->refcount, 0);
35256 }
35257 }
35258
35259@@ -59,7 +59,7 @@ void drm_global_release(void)
35260 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
35261 struct drm_global_item *item = &glob[i];
35262 BUG_ON(item->object != NULL);
35263- BUG_ON(item->refcount != 0);
35264+ BUG_ON(atomic_read(&item->refcount) != 0);
35265 }
35266 }
35267
35268@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
35269 void *object;
35270
35271 mutex_lock(&item->mutex);
35272- if (item->refcount == 0) {
35273+ if (atomic_read(&item->refcount) == 0) {
35274 item->object = kzalloc(ref->size, GFP_KERNEL);
35275 if (unlikely(item->object == NULL)) {
35276 ret = -ENOMEM;
35277@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
35278 goto out_err;
35279
35280 }
35281- ++item->refcount;
35282+ atomic_inc(&item->refcount);
35283 ref->object = item->object;
35284 object = item->object;
35285 mutex_unlock(&item->mutex);
35286@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
35287 struct drm_global_item *item = &glob[ref->global_type];
35288
35289 mutex_lock(&item->mutex);
35290- BUG_ON(item->refcount == 0);
35291+ BUG_ON(atomic_read(&item->refcount) == 0);
35292 BUG_ON(ref->object != item->object);
35293- if (--item->refcount == 0) {
35294+ if (atomic_dec_and_test(&item->refcount)) {
35295 ref->release(ref);
35296 item->object = NULL;
35297 }
35298diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
35299index d4b20ce..77a8d41 100644
35300--- a/drivers/gpu/drm/drm_info.c
35301+++ b/drivers/gpu/drm/drm_info.c
35302@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
35303 struct drm_local_map *map;
35304 struct drm_map_list *r_list;
35305
35306- /* Hardcoded from _DRM_FRAME_BUFFER,
35307- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
35308- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
35309- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
35310+ static const char * const types[] = {
35311+ [_DRM_FRAME_BUFFER] = "FB",
35312+ [_DRM_REGISTERS] = "REG",
35313+ [_DRM_SHM] = "SHM",
35314+ [_DRM_AGP] = "AGP",
35315+ [_DRM_SCATTER_GATHER] = "SG",
35316+ [_DRM_CONSISTENT] = "PCI",
35317+ [_DRM_GEM] = "GEM" };
35318 const char *type;
35319 int i;
35320
35321@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
35322 map = r_list->map;
35323 if (!map)
35324 continue;
35325- if (map->type < 0 || map->type > 5)
35326+ if (map->type >= ARRAY_SIZE(types))
35327 type = "??";
35328 else
35329 type = types[map->type];
35330@@ -253,7 +257,11 @@ int drm_vma_info(struct seq_file *m, void *data)
35331 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
35332 vma->vm_flags & VM_LOCKED ? 'l' : '-',
35333 vma->vm_flags & VM_IO ? 'i' : '-',
35334+#ifdef CONFIG_GRKERNSEC_HIDESYM
35335+ 0);
35336+#else
35337 vma->vm_pgoff);
35338+#endif
35339
35340 #if defined(__i386__)
35341 pgprot = pgprot_val(vma->vm_page_prot);
35342diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
35343index 2f4c434..dd12cd2 100644
35344--- a/drivers/gpu/drm/drm_ioc32.c
35345+++ b/drivers/gpu/drm/drm_ioc32.c
35346@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
35347 request = compat_alloc_user_space(nbytes);
35348 if (!access_ok(VERIFY_WRITE, request, nbytes))
35349 return -EFAULT;
35350- list = (struct drm_buf_desc *) (request + 1);
35351+ list = (struct drm_buf_desc __user *) (request + 1);
35352
35353 if (__put_user(count, &request->count)
35354 || __put_user(list, &request->list))
35355@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
35356 request = compat_alloc_user_space(nbytes);
35357 if (!access_ok(VERIFY_WRITE, request, nbytes))
35358 return -EFAULT;
35359- list = (struct drm_buf_pub *) (request + 1);
35360+ list = (struct drm_buf_pub __user *) (request + 1);
35361
35362 if (__put_user(count, &request->count)
35363 || __put_user(list, &request->list))
35364@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
35365 return 0;
35366 }
35367
35368-drm_ioctl_compat_t *drm_compat_ioctls[] = {
35369+drm_ioctl_compat_t drm_compat_ioctls[] = {
35370 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
35371 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
35372 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
35373@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
35374 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35375 {
35376 unsigned int nr = DRM_IOCTL_NR(cmd);
35377- drm_ioctl_compat_t *fn;
35378 int ret;
35379
35380 /* Assume that ioctls without an explicit compat routine will just
35381@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35382 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
35383 return drm_ioctl(filp, cmd, arg);
35384
35385- fn = drm_compat_ioctls[nr];
35386-
35387- if (fn != NULL)
35388- ret = (*fn) (filp, cmd, arg);
35389+ if (drm_compat_ioctls[nr] != NULL)
35390+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
35391 else
35392 ret = drm_ioctl(filp, cmd, arg);
35393
35394diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
35395index e77bd8b..1571b85 100644
35396--- a/drivers/gpu/drm/drm_ioctl.c
35397+++ b/drivers/gpu/drm/drm_ioctl.c
35398@@ -252,7 +252,7 @@ int drm_getstats(struct drm_device *dev, void *data,
35399 stats->data[i].value =
35400 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
35401 else
35402- stats->data[i].value = atomic_read(&dev->counts[i]);
35403+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
35404 stats->data[i].type = dev->types[i];
35405 }
35406
35407diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
35408index d752c96..fe08455 100644
35409--- a/drivers/gpu/drm/drm_lock.c
35410+++ b/drivers/gpu/drm/drm_lock.c
35411@@ -86,7 +86,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
35412 if (drm_lock_take(&master->lock, lock->context)) {
35413 master->lock.file_priv = file_priv;
35414 master->lock.lock_time = jiffies;
35415- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
35416+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
35417 break; /* Got lock */
35418 }
35419
35420@@ -157,7 +157,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
35421 return -EINVAL;
35422 }
35423
35424- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
35425+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
35426
35427 if (drm_lock_free(&master->lock, lock->context)) {
35428 /* FIXME: Should really bail out here. */
35429diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
35430index 7d30802..42c6cbb 100644
35431--- a/drivers/gpu/drm/drm_stub.c
35432+++ b/drivers/gpu/drm/drm_stub.c
35433@@ -501,7 +501,7 @@ void drm_unplug_dev(struct drm_device *dev)
35434
35435 drm_device_set_unplugged(dev);
35436
35437- if (dev->open_count == 0) {
35438+ if (local_read(&dev->open_count) == 0) {
35439 drm_put_dev(dev);
35440 }
35441 mutex_unlock(&drm_global_mutex);
35442diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
35443index 0229665..f61329c 100644
35444--- a/drivers/gpu/drm/drm_sysfs.c
35445+++ b/drivers/gpu/drm/drm_sysfs.c
35446@@ -499,7 +499,7 @@ EXPORT_SYMBOL(drm_sysfs_hotplug_event);
35447 int drm_sysfs_device_add(struct drm_minor *minor)
35448 {
35449 int err;
35450- char *minor_str;
35451+ const char *minor_str;
35452
35453 minor->kdev.parent = minor->dev->dev;
35454
35455diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
35456index 004ecdf..db1f6e0 100644
35457--- a/drivers/gpu/drm/i810/i810_dma.c
35458+++ b/drivers/gpu/drm/i810/i810_dma.c
35459@@ -945,8 +945,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
35460 dma->buflist[vertex->idx],
35461 vertex->discard, vertex->used);
35462
35463- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
35464- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
35465+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
35466+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
35467 sarea_priv->last_enqueue = dev_priv->counter - 1;
35468 sarea_priv->last_dispatch = (int)hw_status[5];
35469
35470@@ -1106,8 +1106,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
35471 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
35472 mc->last_render);
35473
35474- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
35475- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
35476+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
35477+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
35478 sarea_priv->last_enqueue = dev_priv->counter - 1;
35479 sarea_priv->last_dispatch = (int)hw_status[5];
35480
35481diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
35482index 6e0acad..93c8289 100644
35483--- a/drivers/gpu/drm/i810/i810_drv.h
35484+++ b/drivers/gpu/drm/i810/i810_drv.h
35485@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
35486 int page_flipping;
35487
35488 wait_queue_head_t irq_queue;
35489- atomic_t irq_received;
35490- atomic_t irq_emitted;
35491+ atomic_unchecked_t irq_received;
35492+ atomic_unchecked_t irq_emitted;
35493
35494 int front_offset;
35495 } drm_i810_private_t;
35496diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
35497index 7299ea4..5314487 100644
35498--- a/drivers/gpu/drm/i915/i915_debugfs.c
35499+++ b/drivers/gpu/drm/i915/i915_debugfs.c
35500@@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
35501 I915_READ(GTIMR));
35502 }
35503 seq_printf(m, "Interrupts received: %d\n",
35504- atomic_read(&dev_priv->irq_received));
35505+ atomic_read_unchecked(&dev_priv->irq_received));
35506 for_each_ring(ring, dev_priv, i) {
35507 if (IS_GEN6(dev) || IS_GEN7(dev)) {
35508 seq_printf(m,
35509diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
35510index 4fa6beb..f930fec 100644
35511--- a/drivers/gpu/drm/i915/i915_dma.c
35512+++ b/drivers/gpu/drm/i915/i915_dma.c
35513@@ -1259,7 +1259,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
35514 bool can_switch;
35515
35516 spin_lock(&dev->count_lock);
35517- can_switch = (dev->open_count == 0);
35518+ can_switch = (local_read(&dev->open_count) == 0);
35519 spin_unlock(&dev->count_lock);
35520 return can_switch;
35521 }
35522diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
35523index ef99b1c..09ce7fb 100644
35524--- a/drivers/gpu/drm/i915/i915_drv.h
35525+++ b/drivers/gpu/drm/i915/i915_drv.h
35526@@ -893,7 +893,7 @@ typedef struct drm_i915_private {
35527 drm_dma_handle_t *status_page_dmah;
35528 struct resource mch_res;
35529
35530- atomic_t irq_received;
35531+ atomic_unchecked_t irq_received;
35532
35533 /* protects the irq masks */
35534 spinlock_t irq_lock;
35535@@ -1775,7 +1775,7 @@ extern struct i2c_adapter *intel_gmbus_get_adapter(
35536 struct drm_i915_private *dev_priv, unsigned port);
35537 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
35538 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
35539-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
35540+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
35541 {
35542 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
35543 }
35544diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
35545index 9a48e1a..f0cbc3e 100644
35546--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
35547+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
35548@@ -729,9 +729,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
35549
35550 static int
35551 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
35552- int count)
35553+ unsigned int count)
35554 {
35555- int i;
35556+ unsigned int i;
35557 int relocs_total = 0;
35558 int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
35559
35560@@ -1195,7 +1195,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
35561 return -ENOMEM;
35562 }
35563 ret = copy_from_user(exec2_list,
35564- (struct drm_i915_relocation_entry __user *)
35565+ (struct drm_i915_gem_exec_object2 __user *)
35566 (uintptr_t) args->buffers_ptr,
35567 sizeof(*exec2_list) * args->buffer_count);
35568 if (ret != 0) {
35569diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
35570index 3c59584..500f2e9 100644
35571--- a/drivers/gpu/drm/i915/i915_ioc32.c
35572+++ b/drivers/gpu/drm/i915/i915_ioc32.c
35573@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
35574 (unsigned long)request);
35575 }
35576
35577-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
35578+static drm_ioctl_compat_t i915_compat_ioctls[] = {
35579 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
35580 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
35581 [DRM_I915_GETPARAM] = compat_i915_getparam,
35582@@ -202,18 +202,15 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
35583 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35584 {
35585 unsigned int nr = DRM_IOCTL_NR(cmd);
35586- drm_ioctl_compat_t *fn = NULL;
35587 int ret;
35588
35589 if (nr < DRM_COMMAND_BASE)
35590 return drm_compat_ioctl(filp, cmd, arg);
35591
35592- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
35593- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
35594-
35595- if (fn != NULL)
35596+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls)) {
35597+ drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
35598 ret = (*fn) (filp, cmd, arg);
35599- else
35600+ } else
35601 ret = drm_ioctl(filp, cmd, arg);
35602
35603 return ret;
35604diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
35605index 3c7bb04..182e049 100644
35606--- a/drivers/gpu/drm/i915/i915_irq.c
35607+++ b/drivers/gpu/drm/i915/i915_irq.c
35608@@ -549,7 +549,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
35609 int pipe;
35610 u32 pipe_stats[I915_MAX_PIPES];
35611
35612- atomic_inc(&dev_priv->irq_received);
35613+ atomic_inc_unchecked(&dev_priv->irq_received);
35614
35615 while (true) {
35616 iir = I915_READ(VLV_IIR);
35617@@ -705,7 +705,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
35618 irqreturn_t ret = IRQ_NONE;
35619 int i;
35620
35621- atomic_inc(&dev_priv->irq_received);
35622+ atomic_inc_unchecked(&dev_priv->irq_received);
35623
35624 /* disable master interrupt before clearing iir */
35625 de_ier = I915_READ(DEIER);
35626@@ -791,7 +791,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
35627 int ret = IRQ_NONE;
35628 u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
35629
35630- atomic_inc(&dev_priv->irq_received);
35631+ atomic_inc_unchecked(&dev_priv->irq_received);
35632
35633 /* disable master interrupt before clearing iir */
35634 de_ier = I915_READ(DEIER);
35635@@ -1886,7 +1886,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
35636 {
35637 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
35638
35639- atomic_set(&dev_priv->irq_received, 0);
35640+ atomic_set_unchecked(&dev_priv->irq_received, 0);
35641
35642 I915_WRITE(HWSTAM, 0xeffe);
35643
35644@@ -1912,7 +1912,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
35645 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
35646 int pipe;
35647
35648- atomic_set(&dev_priv->irq_received, 0);
35649+ atomic_set_unchecked(&dev_priv->irq_received, 0);
35650
35651 /* VLV magic */
35652 I915_WRITE(VLV_IMR, 0);
35653@@ -2208,7 +2208,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
35654 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
35655 int pipe;
35656
35657- atomic_set(&dev_priv->irq_received, 0);
35658+ atomic_set_unchecked(&dev_priv->irq_received, 0);
35659
35660 for_each_pipe(pipe)
35661 I915_WRITE(PIPESTAT(pipe), 0);
35662@@ -2259,7 +2259,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
35663 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
35664 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
35665
35666- atomic_inc(&dev_priv->irq_received);
35667+ atomic_inc_unchecked(&dev_priv->irq_received);
35668
35669 iir = I915_READ16(IIR);
35670 if (iir == 0)
35671@@ -2344,7 +2344,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
35672 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
35673 int pipe;
35674
35675- atomic_set(&dev_priv->irq_received, 0);
35676+ atomic_set_unchecked(&dev_priv->irq_received, 0);
35677
35678 if (I915_HAS_HOTPLUG(dev)) {
35679 I915_WRITE(PORT_HOTPLUG_EN, 0);
35680@@ -2448,7 +2448,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
35681 };
35682 int pipe, ret = IRQ_NONE;
35683
35684- atomic_inc(&dev_priv->irq_received);
35685+ atomic_inc_unchecked(&dev_priv->irq_received);
35686
35687 iir = I915_READ(IIR);
35688 do {
35689@@ -2574,7 +2574,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
35690 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
35691 int pipe;
35692
35693- atomic_set(&dev_priv->irq_received, 0);
35694+ atomic_set_unchecked(&dev_priv->irq_received, 0);
35695
35696 I915_WRITE(PORT_HOTPLUG_EN, 0);
35697 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
35698@@ -2690,7 +2690,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
35699 int irq_received;
35700 int ret = IRQ_NONE, pipe;
35701
35702- atomic_inc(&dev_priv->irq_received);
35703+ atomic_inc_unchecked(&dev_priv->irq_received);
35704
35705 iir = I915_READ(IIR);
35706
35707diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
35708index 2ab65b4..acbd821 100644
35709--- a/drivers/gpu/drm/i915/intel_display.c
35710+++ b/drivers/gpu/drm/i915/intel_display.c
35711@@ -8742,13 +8742,13 @@ struct intel_quirk {
35712 int subsystem_vendor;
35713 int subsystem_device;
35714 void (*hook)(struct drm_device *dev);
35715-};
35716+} __do_const;
35717
35718 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
35719 struct intel_dmi_quirk {
35720 void (*hook)(struct drm_device *dev);
35721 const struct dmi_system_id (*dmi_id_list)[];
35722-};
35723+} __do_const;
35724
35725 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
35726 {
35727@@ -8756,18 +8756,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
35728 return 1;
35729 }
35730
35731-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
35732+static const struct dmi_system_id intel_dmi_quirks_table[] = {
35733 {
35734- .dmi_id_list = &(const struct dmi_system_id[]) {
35735- {
35736- .callback = intel_dmi_reverse_brightness,
35737- .ident = "NCR Corporation",
35738- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
35739- DMI_MATCH(DMI_PRODUCT_NAME, ""),
35740- },
35741- },
35742- { } /* terminating entry */
35743+ .callback = intel_dmi_reverse_brightness,
35744+ .ident = "NCR Corporation",
35745+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
35746+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
35747 },
35748+ },
35749+ { } /* terminating entry */
35750+};
35751+
35752+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
35753+ {
35754+ .dmi_id_list = &intel_dmi_quirks_table,
35755 .hook = quirk_invert_brightness,
35756 },
35757 };
35758diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
35759index 54558a0..2d97005 100644
35760--- a/drivers/gpu/drm/mga/mga_drv.h
35761+++ b/drivers/gpu/drm/mga/mga_drv.h
35762@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
35763 u32 clear_cmd;
35764 u32 maccess;
35765
35766- atomic_t vbl_received; /**< Number of vblanks received. */
35767+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
35768 wait_queue_head_t fence_queue;
35769- atomic_t last_fence_retired;
35770+ atomic_unchecked_t last_fence_retired;
35771 u32 next_fence_to_post;
35772
35773 unsigned int fb_cpp;
35774diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
35775index 709e90d..89a1c0d 100644
35776--- a/drivers/gpu/drm/mga/mga_ioc32.c
35777+++ b/drivers/gpu/drm/mga/mga_ioc32.c
35778@@ -189,7 +189,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
35779 return 0;
35780 }
35781
35782-drm_ioctl_compat_t *mga_compat_ioctls[] = {
35783+drm_ioctl_compat_t mga_compat_ioctls[] = {
35784 [DRM_MGA_INIT] = compat_mga_init,
35785 [DRM_MGA_GETPARAM] = compat_mga_getparam,
35786 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
35787@@ -207,18 +207,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
35788 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35789 {
35790 unsigned int nr = DRM_IOCTL_NR(cmd);
35791- drm_ioctl_compat_t *fn = NULL;
35792 int ret;
35793
35794 if (nr < DRM_COMMAND_BASE)
35795 return drm_compat_ioctl(filp, cmd, arg);
35796
35797- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
35798- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
35799-
35800- if (fn != NULL)
35801+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) {
35802+ drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
35803 ret = (*fn) (filp, cmd, arg);
35804- else
35805+ } else
35806 ret = drm_ioctl(filp, cmd, arg);
35807
35808 return ret;
35809diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
35810index 598c281..60d590e 100644
35811--- a/drivers/gpu/drm/mga/mga_irq.c
35812+++ b/drivers/gpu/drm/mga/mga_irq.c
35813@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
35814 if (crtc != 0)
35815 return 0;
35816
35817- return atomic_read(&dev_priv->vbl_received);
35818+ return atomic_read_unchecked(&dev_priv->vbl_received);
35819 }
35820
35821
35822@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
35823 /* VBLANK interrupt */
35824 if (status & MGA_VLINEPEN) {
35825 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
35826- atomic_inc(&dev_priv->vbl_received);
35827+ atomic_inc_unchecked(&dev_priv->vbl_received);
35828 drm_handle_vblank(dev, 0);
35829 handled = 1;
35830 }
35831@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
35832 if ((prim_start & ~0x03) != (prim_end & ~0x03))
35833 MGA_WRITE(MGA_PRIMEND, prim_end);
35834
35835- atomic_inc(&dev_priv->last_fence_retired);
35836+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
35837 DRM_WAKEUP(&dev_priv->fence_queue);
35838 handled = 1;
35839 }
35840@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
35841 * using fences.
35842 */
35843 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
35844- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
35845+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
35846 - *sequence) <= (1 << 23)));
35847
35848 *sequence = cur_fence;
35849diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
35850index 50a6dd0..ea66ed8 100644
35851--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
35852+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
35853@@ -965,7 +965,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
35854 struct bit_table {
35855 const char id;
35856 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
35857-};
35858+} __no_const;
35859
35860 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
35861
35862diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
35863index 9c39baf..30a22be 100644
35864--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
35865+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
35866@@ -81,7 +81,7 @@ struct nouveau_drm {
35867 struct drm_global_reference mem_global_ref;
35868 struct ttm_bo_global_ref bo_global_ref;
35869 struct ttm_bo_device bdev;
35870- atomic_t validate_sequence;
35871+ atomic_unchecked_t validate_sequence;
35872 int (*move)(struct nouveau_channel *,
35873 struct ttm_buffer_object *,
35874 struct ttm_mem_reg *, struct ttm_mem_reg *);
35875diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
35876index b4b4d0c..b7edc15 100644
35877--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
35878+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
35879@@ -322,7 +322,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
35880 int ret, i;
35881 struct nouveau_bo *res_bo = NULL;
35882
35883- sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
35884+ sequence = atomic_add_return_unchecked(1, &drm->ttm.validate_sequence);
35885 retry:
35886 if (++trycnt > 100000) {
35887 NV_ERROR(cli, "%s failed and gave up.\n", __func__);
35888@@ -359,7 +359,7 @@ retry:
35889 if (ret) {
35890 validate_fini(op, NULL);
35891 if (unlikely(ret == -EAGAIN)) {
35892- sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
35893+ sequence = atomic_add_return_unchecked(1, &drm->ttm.validate_sequence);
35894 ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
35895 sequence);
35896 if (!ret)
35897diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
35898index 08214bc..9208577 100644
35899--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
35900+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
35901@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
35902 unsigned long arg)
35903 {
35904 unsigned int nr = DRM_IOCTL_NR(cmd);
35905- drm_ioctl_compat_t *fn = NULL;
35906+ drm_ioctl_compat_t fn = NULL;
35907 int ret;
35908
35909 if (nr < DRM_COMMAND_BASE)
35910diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
35911index 25d3495..d81aaf6 100644
35912--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
35913+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
35914@@ -62,7 +62,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
35915 bool can_switch;
35916
35917 spin_lock(&dev->count_lock);
35918- can_switch = (dev->open_count == 0);
35919+ can_switch = (local_read(&dev->open_count) == 0);
35920 spin_unlock(&dev->count_lock);
35921 return can_switch;
35922 }
35923diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
35924index d4660cf..70dbe65 100644
35925--- a/drivers/gpu/drm/r128/r128_cce.c
35926+++ b/drivers/gpu/drm/r128/r128_cce.c
35927@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
35928
35929 /* GH: Simple idle check.
35930 */
35931- atomic_set(&dev_priv->idle_count, 0);
35932+ atomic_set_unchecked(&dev_priv->idle_count, 0);
35933
35934 /* We don't support anything other than bus-mastering ring mode,
35935 * but the ring can be in either AGP or PCI space for the ring
35936diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
35937index 930c71b..499aded 100644
35938--- a/drivers/gpu/drm/r128/r128_drv.h
35939+++ b/drivers/gpu/drm/r128/r128_drv.h
35940@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
35941 int is_pci;
35942 unsigned long cce_buffers_offset;
35943
35944- atomic_t idle_count;
35945+ atomic_unchecked_t idle_count;
35946
35947 int page_flipping;
35948 int current_page;
35949 u32 crtc_offset;
35950 u32 crtc_offset_cntl;
35951
35952- atomic_t vbl_received;
35953+ atomic_unchecked_t vbl_received;
35954
35955 u32 color_fmt;
35956 unsigned int front_offset;
35957diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
35958index a954c54..9cc595c 100644
35959--- a/drivers/gpu/drm/r128/r128_ioc32.c
35960+++ b/drivers/gpu/drm/r128/r128_ioc32.c
35961@@ -177,7 +177,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
35962 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
35963 }
35964
35965-drm_ioctl_compat_t *r128_compat_ioctls[] = {
35966+drm_ioctl_compat_t r128_compat_ioctls[] = {
35967 [DRM_R128_INIT] = compat_r128_init,
35968 [DRM_R128_DEPTH] = compat_r128_depth,
35969 [DRM_R128_STIPPLE] = compat_r128_stipple,
35970@@ -196,18 +196,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
35971 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35972 {
35973 unsigned int nr = DRM_IOCTL_NR(cmd);
35974- drm_ioctl_compat_t *fn = NULL;
35975 int ret;
35976
35977 if (nr < DRM_COMMAND_BASE)
35978 return drm_compat_ioctl(filp, cmd, arg);
35979
35980- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls))
35981- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
35982-
35983- if (fn != NULL)
35984+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls)) {
35985+ drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
35986 ret = (*fn) (filp, cmd, arg);
35987- else
35988+ } else
35989 ret = drm_ioctl(filp, cmd, arg);
35990
35991 return ret;
35992diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
35993index 2ea4f09..d391371 100644
35994--- a/drivers/gpu/drm/r128/r128_irq.c
35995+++ b/drivers/gpu/drm/r128/r128_irq.c
35996@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
35997 if (crtc != 0)
35998 return 0;
35999
36000- return atomic_read(&dev_priv->vbl_received);
36001+ return atomic_read_unchecked(&dev_priv->vbl_received);
36002 }
36003
36004 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
36005@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
36006 /* VBLANK interrupt */
36007 if (status & R128_CRTC_VBLANK_INT) {
36008 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
36009- atomic_inc(&dev_priv->vbl_received);
36010+ atomic_inc_unchecked(&dev_priv->vbl_received);
36011 drm_handle_vblank(dev, 0);
36012 return IRQ_HANDLED;
36013 }
36014diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
36015index 19bb7e6..de7e2a2 100644
36016--- a/drivers/gpu/drm/r128/r128_state.c
36017+++ b/drivers/gpu/drm/r128/r128_state.c
36018@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
36019
36020 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
36021 {
36022- if (atomic_read(&dev_priv->idle_count) == 0)
36023+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
36024 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
36025 else
36026- atomic_set(&dev_priv->idle_count, 0);
36027+ atomic_set_unchecked(&dev_priv->idle_count, 0);
36028 }
36029
36030 #endif
36031diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
36032index 5a82b6b..9e69c73 100644
36033--- a/drivers/gpu/drm/radeon/mkregtable.c
36034+++ b/drivers/gpu/drm/radeon/mkregtable.c
36035@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
36036 regex_t mask_rex;
36037 regmatch_t match[4];
36038 char buf[1024];
36039- size_t end;
36040+ long end;
36041 int len;
36042 int done = 0;
36043 int r;
36044 unsigned o;
36045 struct offset *offset;
36046 char last_reg_s[10];
36047- int last_reg;
36048+ unsigned long last_reg;
36049
36050 if (regcomp
36051 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
36052diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
36053index 5073665..31d15a6 100644
36054--- a/drivers/gpu/drm/radeon/radeon_device.c
36055+++ b/drivers/gpu/drm/radeon/radeon_device.c
36056@@ -976,7 +976,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
36057 bool can_switch;
36058
36059 spin_lock(&dev->count_lock);
36060- can_switch = (dev->open_count == 0);
36061+ can_switch = (local_read(&dev->open_count) == 0);
36062 spin_unlock(&dev->count_lock);
36063 return can_switch;
36064 }
36065diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
36066index b369d42..8dd04eb 100644
36067--- a/drivers/gpu/drm/radeon/radeon_drv.h
36068+++ b/drivers/gpu/drm/radeon/radeon_drv.h
36069@@ -258,7 +258,7 @@ typedef struct drm_radeon_private {
36070
36071 /* SW interrupt */
36072 wait_queue_head_t swi_queue;
36073- atomic_t swi_emitted;
36074+ atomic_unchecked_t swi_emitted;
36075 int vblank_crtc;
36076 uint32_t irq_enable_reg;
36077 uint32_t r500_disp_irq_reg;
36078diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
36079index c180df8..5fd8186 100644
36080--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
36081+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
36082@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
36083 request = compat_alloc_user_space(sizeof(*request));
36084 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
36085 || __put_user(req32.param, &request->param)
36086- || __put_user((void __user *)(unsigned long)req32.value,
36087+ || __put_user((unsigned long)req32.value,
36088 &request->value))
36089 return -EFAULT;
36090
36091@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
36092 #define compat_radeon_cp_setparam NULL
36093 #endif /* X86_64 || IA64 */
36094
36095-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
36096+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
36097 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
36098 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
36099 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
36100@@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
36101 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
36102 {
36103 unsigned int nr = DRM_IOCTL_NR(cmd);
36104- drm_ioctl_compat_t *fn = NULL;
36105 int ret;
36106
36107 if (nr < DRM_COMMAND_BASE)
36108 return drm_compat_ioctl(filp, cmd, arg);
36109
36110- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
36111- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
36112-
36113- if (fn != NULL)
36114+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls)) {
36115+ drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
36116 ret = (*fn) (filp, cmd, arg);
36117- else
36118+ } else
36119 ret = drm_ioctl(filp, cmd, arg);
36120
36121 return ret;
36122diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
36123index 8d68e97..9dcfed8 100644
36124--- a/drivers/gpu/drm/radeon/radeon_irq.c
36125+++ b/drivers/gpu/drm/radeon/radeon_irq.c
36126@@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_device * dev)
36127 unsigned int ret;
36128 RING_LOCALS;
36129
36130- atomic_inc(&dev_priv->swi_emitted);
36131- ret = atomic_read(&dev_priv->swi_emitted);
36132+ atomic_inc_unchecked(&dev_priv->swi_emitted);
36133+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
36134
36135 BEGIN_RING(4);
36136 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
36137@@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
36138 drm_radeon_private_t *dev_priv =
36139 (drm_radeon_private_t *) dev->dev_private;
36140
36141- atomic_set(&dev_priv->swi_emitted, 0);
36142+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
36143 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
36144
36145 dev->max_vblank_count = 0x001fffff;
36146diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
36147index 4d20910..6726b6d 100644
36148--- a/drivers/gpu/drm/radeon/radeon_state.c
36149+++ b/drivers/gpu/drm/radeon/radeon_state.c
36150@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
36151 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
36152 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
36153
36154- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
36155+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
36156 sarea_priv->nbox * sizeof(depth_boxes[0])))
36157 return -EFAULT;
36158
36159@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
36160 {
36161 drm_radeon_private_t *dev_priv = dev->dev_private;
36162 drm_radeon_getparam_t *param = data;
36163- int value;
36164+ int value = 0;
36165
36166 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
36167
36168diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
36169index 6c0ce89..66f6d65 100644
36170--- a/drivers/gpu/drm/radeon/radeon_ttm.c
36171+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
36172@@ -782,7 +782,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
36173 man->size = size >> PAGE_SHIFT;
36174 }
36175
36176-static struct vm_operations_struct radeon_ttm_vm_ops;
36177+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
36178 static const struct vm_operations_struct *ttm_vm_ops = NULL;
36179
36180 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
36181@@ -823,8 +823,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
36182 }
36183 if (unlikely(ttm_vm_ops == NULL)) {
36184 ttm_vm_ops = vma->vm_ops;
36185+ pax_open_kernel();
36186 radeon_ttm_vm_ops = *ttm_vm_ops;
36187 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
36188+ pax_close_kernel();
36189 }
36190 vma->vm_ops = &radeon_ttm_vm_ops;
36191 return 0;
36192@@ -862,28 +864,33 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
36193 sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
36194 else
36195 sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
36196- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
36197- radeon_mem_types_list[i].show = &radeon_mm_dump_table;
36198- radeon_mem_types_list[i].driver_features = 0;
36199+ pax_open_kernel();
36200+ *(const char **)&radeon_mem_types_list[i].name = radeon_mem_types_names[i];
36201+ *(void **)&radeon_mem_types_list[i].show = &radeon_mm_dump_table;
36202+ *(u32 *)&radeon_mem_types_list[i].driver_features = 0;
36203 if (i == 0)
36204- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
36205+ *(void **)&radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
36206 else
36207- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
36208-
36209+ *(void **)&radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
36210+ pax_close_kernel();
36211 }
36212 /* Add ttm page pool to debugfs */
36213 sprintf(radeon_mem_types_names[i], "ttm_page_pool");
36214- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
36215- radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
36216- radeon_mem_types_list[i].driver_features = 0;
36217- radeon_mem_types_list[i++].data = NULL;
36218+ pax_open_kernel();
36219+ *(const char **)&radeon_mem_types_list[i].name = radeon_mem_types_names[i];
36220+ *(void **)&radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
36221+ *(u32 *)&radeon_mem_types_list[i].driver_features = 0;
36222+ *(void **)&radeon_mem_types_list[i++].data = NULL;
36223+ pax_close_kernel();
36224 #ifdef CONFIG_SWIOTLB
36225 if (swiotlb_nr_tbl()) {
36226 sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
36227- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
36228- radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
36229- radeon_mem_types_list[i].driver_features = 0;
36230- radeon_mem_types_list[i++].data = NULL;
36231+ pax_open_kernel();
36232+ *(const char **)&radeon_mem_types_list[i].name = radeon_mem_types_names[i];
36233+ *(void **)&radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
36234+ *(u32 *)&radeon_mem_types_list[i].driver_features = 0;
36235+ *(void **)&radeon_mem_types_list[i++].data = NULL;
36236+ pax_close_kernel();
36237 }
36238 #endif
36239 return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
36240diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
36241index fad6633..4ff94de 100644
36242--- a/drivers/gpu/drm/radeon/rs690.c
36243+++ b/drivers/gpu/drm/radeon/rs690.c
36244@@ -304,9 +304,11 @@ static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
36245 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
36246 rdev->pm.sideport_bandwidth.full)
36247 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
36248- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
36249+ read_delay_latency.full = dfixed_const(800 * 1000);
36250 read_delay_latency.full = dfixed_div(read_delay_latency,
36251 rdev->pm.igp_sideport_mclk);
36252+ a.full = dfixed_const(370);
36253+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
36254 } else {
36255 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
36256 rdev->pm.k8_bandwidth.full)
36257diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
36258index dbc2def..0a9f710 100644
36259--- a/drivers/gpu/drm/ttm/ttm_memory.c
36260+++ b/drivers/gpu/drm/ttm/ttm_memory.c
36261@@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
36262 zone->glob = glob;
36263 glob->zone_kernel = zone;
36264 ret = kobject_init_and_add(
36265- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
36266+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
36267 if (unlikely(ret != 0)) {
36268 kobject_put(&zone->kobj);
36269 return ret;
36270@@ -347,7 +347,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
36271 zone->glob = glob;
36272 glob->zone_dma32 = zone;
36273 ret = kobject_init_and_add(
36274- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
36275+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
36276 if (unlikely(ret != 0)) {
36277 kobject_put(&zone->kobj);
36278 return ret;
36279diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
36280index bd2a3b4..122d9ad 100644
36281--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
36282+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
36283@@ -394,9 +394,9 @@ static int ttm_pool_get_num_unused_pages(void)
36284 static int ttm_pool_mm_shrink(struct shrinker *shrink,
36285 struct shrink_control *sc)
36286 {
36287- static atomic_t start_pool = ATOMIC_INIT(0);
36288+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
36289 unsigned i;
36290- unsigned pool_offset = atomic_add_return(1, &start_pool);
36291+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
36292 struct ttm_page_pool *pool;
36293 int shrink_pages = sc->nr_to_scan;
36294
36295diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
36296index 9f4be3d..cbc9fcc 100644
36297--- a/drivers/gpu/drm/udl/udl_fb.c
36298+++ b/drivers/gpu/drm/udl/udl_fb.c
36299@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
36300 fb_deferred_io_cleanup(info);
36301 kfree(info->fbdefio);
36302 info->fbdefio = NULL;
36303- info->fbops->fb_mmap = udl_fb_mmap;
36304 }
36305
36306 pr_warn("released /dev/fb%d user=%d count=%d\n",
36307diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
36308index 893a650..6190d3b 100644
36309--- a/drivers/gpu/drm/via/via_drv.h
36310+++ b/drivers/gpu/drm/via/via_drv.h
36311@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
36312 typedef uint32_t maskarray_t[5];
36313
36314 typedef struct drm_via_irq {
36315- atomic_t irq_received;
36316+ atomic_unchecked_t irq_received;
36317 uint32_t pending_mask;
36318 uint32_t enable_mask;
36319 wait_queue_head_t irq_queue;
36320@@ -75,7 +75,7 @@ typedef struct drm_via_private {
36321 struct timeval last_vblank;
36322 int last_vblank_valid;
36323 unsigned usec_per_vblank;
36324- atomic_t vbl_received;
36325+ atomic_unchecked_t vbl_received;
36326 drm_via_state_t hc_state;
36327 char pci_buf[VIA_PCI_BUF_SIZE];
36328 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
36329diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
36330index ac98964..5dbf512 100644
36331--- a/drivers/gpu/drm/via/via_irq.c
36332+++ b/drivers/gpu/drm/via/via_irq.c
36333@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
36334 if (crtc != 0)
36335 return 0;
36336
36337- return atomic_read(&dev_priv->vbl_received);
36338+ return atomic_read_unchecked(&dev_priv->vbl_received);
36339 }
36340
36341 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
36342@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
36343
36344 status = VIA_READ(VIA_REG_INTERRUPT);
36345 if (status & VIA_IRQ_VBLANK_PENDING) {
36346- atomic_inc(&dev_priv->vbl_received);
36347- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
36348+ atomic_inc_unchecked(&dev_priv->vbl_received);
36349+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
36350 do_gettimeofday(&cur_vblank);
36351 if (dev_priv->last_vblank_valid) {
36352 dev_priv->usec_per_vblank =
36353@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
36354 dev_priv->last_vblank = cur_vblank;
36355 dev_priv->last_vblank_valid = 1;
36356 }
36357- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
36358+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
36359 DRM_DEBUG("US per vblank is: %u\n",
36360 dev_priv->usec_per_vblank);
36361 }
36362@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
36363
36364 for (i = 0; i < dev_priv->num_irqs; ++i) {
36365 if (status & cur_irq->pending_mask) {
36366- atomic_inc(&cur_irq->irq_received);
36367+ atomic_inc_unchecked(&cur_irq->irq_received);
36368 DRM_WAKEUP(&cur_irq->irq_queue);
36369 handled = 1;
36370 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
36371@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
36372 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
36373 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
36374 masks[irq][4]));
36375- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
36376+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
36377 } else {
36378 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
36379 (((cur_irq_sequence =
36380- atomic_read(&cur_irq->irq_received)) -
36381+ atomic_read_unchecked(&cur_irq->irq_received)) -
36382 *sequence) <= (1 << 23)));
36383 }
36384 *sequence = cur_irq_sequence;
36385@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
36386 }
36387
36388 for (i = 0; i < dev_priv->num_irqs; ++i) {
36389- atomic_set(&cur_irq->irq_received, 0);
36390+ atomic_set_unchecked(&cur_irq->irq_received, 0);
36391 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
36392 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
36393 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
36394@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
36395 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
36396 case VIA_IRQ_RELATIVE:
36397 irqwait->request.sequence +=
36398- atomic_read(&cur_irq->irq_received);
36399+ atomic_read_unchecked(&cur_irq->irq_received);
36400 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
36401 case VIA_IRQ_ABSOLUTE:
36402 break;
36403diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
36404index 13aeda7..4a952d1 100644
36405--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
36406+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
36407@@ -290,7 +290,7 @@ struct vmw_private {
36408 * Fencing and IRQs.
36409 */
36410
36411- atomic_t marker_seq;
36412+ atomic_unchecked_t marker_seq;
36413 wait_queue_head_t fence_queue;
36414 wait_queue_head_t fifo_queue;
36415 int fence_queue_waiters; /* Protected by hw_mutex */
36416diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
36417index 3eb1486..0a47ee9 100644
36418--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
36419+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
36420@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
36421 (unsigned int) min,
36422 (unsigned int) fifo->capabilities);
36423
36424- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
36425+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
36426 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
36427 vmw_marker_queue_init(&fifo->marker_queue);
36428 return vmw_fifo_send_fence(dev_priv, &dummy);
36429@@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
36430 if (reserveable)
36431 iowrite32(bytes, fifo_mem +
36432 SVGA_FIFO_RESERVED);
36433- return fifo_mem + (next_cmd >> 2);
36434+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
36435 } else {
36436 need_bounce = true;
36437 }
36438@@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
36439
36440 fm = vmw_fifo_reserve(dev_priv, bytes);
36441 if (unlikely(fm == NULL)) {
36442- *seqno = atomic_read(&dev_priv->marker_seq);
36443+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
36444 ret = -ENOMEM;
36445 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
36446 false, 3*HZ);
36447@@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
36448 }
36449
36450 do {
36451- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
36452+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
36453 } while (*seqno == 0);
36454
36455 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
36456diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
36457index c509d40..3b640c3 100644
36458--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
36459+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
36460@@ -138,7 +138,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
36461 int ret;
36462
36463 num_clips = arg->num_clips;
36464- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
36465+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
36466
36467 if (unlikely(num_clips == 0))
36468 return 0;
36469@@ -222,7 +222,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
36470 int ret;
36471
36472 num_clips = arg->num_clips;
36473- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
36474+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
36475
36476 if (unlikely(num_clips == 0))
36477 return 0;
36478diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
36479index 4640adb..e1384ed 100644
36480--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
36481+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
36482@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
36483 * emitted. Then the fence is stale and signaled.
36484 */
36485
36486- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
36487+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
36488 > VMW_FENCE_WRAP);
36489
36490 return ret;
36491@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
36492
36493 if (fifo_idle)
36494 down_read(&fifo_state->rwsem);
36495- signal_seq = atomic_read(&dev_priv->marker_seq);
36496+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
36497 ret = 0;
36498
36499 for (;;) {
36500diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
36501index 8a8725c2..afed796 100644
36502--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
36503+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
36504@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
36505 while (!vmw_lag_lt(queue, us)) {
36506 spin_lock(&queue->lock);
36507 if (list_empty(&queue->head))
36508- seqno = atomic_read(&dev_priv->marker_seq);
36509+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
36510 else {
36511 marker = list_first_entry(&queue->head,
36512 struct vmw_marker, head);
36513diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
36514index e6dbf09..3dd2540 100644
36515--- a/drivers/hid/hid-core.c
36516+++ b/drivers/hid/hid-core.c
36517@@ -2268,7 +2268,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
36518
36519 int hid_add_device(struct hid_device *hdev)
36520 {
36521- static atomic_t id = ATOMIC_INIT(0);
36522+ static atomic_unchecked_t id = ATOMIC_INIT(0);
36523 int ret;
36524
36525 if (WARN_ON(hdev->status & HID_STAT_ADDED))
36526@@ -2302,7 +2302,7 @@ int hid_add_device(struct hid_device *hdev)
36527 /* XXX hack, any other cleaner solution after the driver core
36528 * is converted to allow more than 20 bytes as the device name? */
36529 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
36530- hdev->vendor, hdev->product, atomic_inc_return(&id));
36531+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
36532
36533 hid_debug_register(hdev, dev_name(&hdev->dev));
36534 ret = device_add(&hdev->dev);
36535diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
36536index 90124ff..3761764 100644
36537--- a/drivers/hid/hid-wiimote-debug.c
36538+++ b/drivers/hid/hid-wiimote-debug.c
36539@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
36540 else if (size == 0)
36541 return -EIO;
36542
36543- if (copy_to_user(u, buf, size))
36544+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
36545 return -EFAULT;
36546
36547 *off += size;
36548diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
36549index 0b122f8..b1d8160 100644
36550--- a/drivers/hv/channel.c
36551+++ b/drivers/hv/channel.c
36552@@ -394,8 +394,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
36553 int ret = 0;
36554 int t;
36555
36556- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
36557- atomic_inc(&vmbus_connection.next_gpadl_handle);
36558+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
36559+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
36560
36561 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
36562 if (ret)
36563diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
36564index 7311589..861e9ef 100644
36565--- a/drivers/hv/hv.c
36566+++ b/drivers/hv/hv.c
36567@@ -112,7 +112,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
36568 u64 output_address = (output) ? virt_to_phys(output) : 0;
36569 u32 output_address_hi = output_address >> 32;
36570 u32 output_address_lo = output_address & 0xFFFFFFFF;
36571- void *hypercall_page = hv_context.hypercall_page;
36572+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
36573
36574 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
36575 "=a"(hv_status_lo) : "d" (control_hi),
36576diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
36577index 12f2f9e..679603c 100644
36578--- a/drivers/hv/hyperv_vmbus.h
36579+++ b/drivers/hv/hyperv_vmbus.h
36580@@ -591,7 +591,7 @@ enum vmbus_connect_state {
36581 struct vmbus_connection {
36582 enum vmbus_connect_state conn_state;
36583
36584- atomic_t next_gpadl_handle;
36585+ atomic_unchecked_t next_gpadl_handle;
36586
36587 /*
36588 * Represents channel interrupts. Each bit position represents a
36589diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
36590index bf421e0..ce2c897 100644
36591--- a/drivers/hv/vmbus_drv.c
36592+++ b/drivers/hv/vmbus_drv.c
36593@@ -668,10 +668,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
36594 {
36595 int ret = 0;
36596
36597- static atomic_t device_num = ATOMIC_INIT(0);
36598+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
36599
36600 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
36601- atomic_inc_return(&device_num));
36602+ atomic_inc_return_unchecked(&device_num));
36603
36604 child_device_obj->device.bus = &hv_bus;
36605 child_device_obj->device.parent = &hv_acpi_dev->dev;
36606diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
36607index 6351aba..dc4aaf4 100644
36608--- a/drivers/hwmon/acpi_power_meter.c
36609+++ b/drivers/hwmon/acpi_power_meter.c
36610@@ -117,7 +117,7 @@ struct sensor_template {
36611 struct device_attribute *devattr,
36612 const char *buf, size_t count);
36613 int index;
36614-};
36615+} __do_const;
36616
36617 /* Averaging interval */
36618 static int update_avg_interval(struct acpi_power_meter_resource *resource)
36619@@ -629,7 +629,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
36620 struct sensor_template *attrs)
36621 {
36622 struct device *dev = &resource->acpi_dev->dev;
36623- struct sensor_device_attribute *sensors =
36624+ sensor_device_attribute_no_const *sensors =
36625 &resource->sensors[resource->num_sensors];
36626 int res = 0;
36627
36628diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
36629index b41baff..4953e4d 100644
36630--- a/drivers/hwmon/applesmc.c
36631+++ b/drivers/hwmon/applesmc.c
36632@@ -1084,7 +1084,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
36633 {
36634 struct applesmc_node_group *grp;
36635 struct applesmc_dev_attr *node;
36636- struct attribute *attr;
36637+ attribute_no_const *attr;
36638 int ret, i;
36639
36640 for (grp = groups; grp->format; grp++) {
36641diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
36642index b25c643..a13460d 100644
36643--- a/drivers/hwmon/asus_atk0110.c
36644+++ b/drivers/hwmon/asus_atk0110.c
36645@@ -152,10 +152,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
36646 struct atk_sensor_data {
36647 struct list_head list;
36648 struct atk_data *data;
36649- struct device_attribute label_attr;
36650- struct device_attribute input_attr;
36651- struct device_attribute limit1_attr;
36652- struct device_attribute limit2_attr;
36653+ device_attribute_no_const label_attr;
36654+ device_attribute_no_const input_attr;
36655+ device_attribute_no_const limit1_attr;
36656+ device_attribute_no_const limit2_attr;
36657 char label_attr_name[ATTR_NAME_SIZE];
36658 char input_attr_name[ATTR_NAME_SIZE];
36659 char limit1_attr_name[ATTR_NAME_SIZE];
36660@@ -275,7 +275,7 @@ static ssize_t atk_name_show(struct device *dev,
36661 static struct device_attribute atk_name_attr =
36662 __ATTR(name, 0444, atk_name_show, NULL);
36663
36664-static void atk_init_attribute(struct device_attribute *attr, char *name,
36665+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
36666 sysfs_show_func show)
36667 {
36668 sysfs_attr_init(&attr->attr);
36669diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
36670index 3f1e297..a6cafb5 100644
36671--- a/drivers/hwmon/coretemp.c
36672+++ b/drivers/hwmon/coretemp.c
36673@@ -791,7 +791,7 @@ static int __cpuinit coretemp_cpu_callback(struct notifier_block *nfb,
36674 return NOTIFY_OK;
36675 }
36676
36677-static struct notifier_block coretemp_cpu_notifier __refdata = {
36678+static struct notifier_block coretemp_cpu_notifier = {
36679 .notifier_call = coretemp_cpu_callback,
36680 };
36681
36682diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
36683index a14f634..2916ee2 100644
36684--- a/drivers/hwmon/ibmaem.c
36685+++ b/drivers/hwmon/ibmaem.c
36686@@ -925,7 +925,7 @@ static int aem_register_sensors(struct aem_data *data,
36687 struct aem_rw_sensor_template *rw)
36688 {
36689 struct device *dev = &data->pdev->dev;
36690- struct sensor_device_attribute *sensors = data->sensors;
36691+ sensor_device_attribute_no_const *sensors = data->sensors;
36692 int err;
36693
36694 /* Set up read-only sensors */
36695diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
36696index 9add6092..ee7ba3f 100644
36697--- a/drivers/hwmon/pmbus/pmbus_core.c
36698+++ b/drivers/hwmon/pmbus/pmbus_core.c
36699@@ -781,7 +781,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
36700 return 0;
36701 }
36702
36703-static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
36704+static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
36705 const char *name,
36706 umode_t mode,
36707 ssize_t (*show)(struct device *dev,
36708@@ -798,7 +798,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
36709 dev_attr->store = store;
36710 }
36711
36712-static void pmbus_attr_init(struct sensor_device_attribute *a,
36713+static void pmbus_attr_init(sensor_device_attribute_no_const *a,
36714 const char *name,
36715 umode_t mode,
36716 ssize_t (*show)(struct device *dev,
36717@@ -820,7 +820,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
36718 u16 reg, u8 mask)
36719 {
36720 struct pmbus_boolean *boolean;
36721- struct sensor_device_attribute *a;
36722+ sensor_device_attribute_no_const *a;
36723
36724 boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
36725 if (!boolean)
36726@@ -845,7 +845,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
36727 bool update, bool readonly)
36728 {
36729 struct pmbus_sensor *sensor;
36730- struct device_attribute *a;
36731+ device_attribute_no_const *a;
36732
36733 sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
36734 if (!sensor)
36735@@ -876,7 +876,7 @@ static int pmbus_add_label(struct pmbus_data *data,
36736 const char *lstring, int index)
36737 {
36738 struct pmbus_label *label;
36739- struct device_attribute *a;
36740+ device_attribute_no_const *a;
36741
36742 label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
36743 if (!label)
36744diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
36745index 2507f90..1645765 100644
36746--- a/drivers/hwmon/sht15.c
36747+++ b/drivers/hwmon/sht15.c
36748@@ -169,7 +169,7 @@ struct sht15_data {
36749 int supply_uv;
36750 bool supply_uv_valid;
36751 struct work_struct update_supply_work;
36752- atomic_t interrupt_handled;
36753+ atomic_unchecked_t interrupt_handled;
36754 };
36755
36756 /**
36757@@ -542,13 +542,13 @@ static int sht15_measurement(struct sht15_data *data,
36758 ret = gpio_direction_input(data->pdata->gpio_data);
36759 if (ret)
36760 return ret;
36761- atomic_set(&data->interrupt_handled, 0);
36762+ atomic_set_unchecked(&data->interrupt_handled, 0);
36763
36764 enable_irq(gpio_to_irq(data->pdata->gpio_data));
36765 if (gpio_get_value(data->pdata->gpio_data) == 0) {
36766 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
36767 /* Only relevant if the interrupt hasn't occurred. */
36768- if (!atomic_read(&data->interrupt_handled))
36769+ if (!atomic_read_unchecked(&data->interrupt_handled))
36770 schedule_work(&data->read_work);
36771 }
36772 ret = wait_event_timeout(data->wait_queue,
36773@@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
36774
36775 /* First disable the interrupt */
36776 disable_irq_nosync(irq);
36777- atomic_inc(&data->interrupt_handled);
36778+ atomic_inc_unchecked(&data->interrupt_handled);
36779 /* Then schedule a reading work struct */
36780 if (data->state != SHT15_READING_NOTHING)
36781 schedule_work(&data->read_work);
36782@@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
36783 * If not, then start the interrupt again - care here as could
36784 * have gone low in meantime so verify it hasn't!
36785 */
36786- atomic_set(&data->interrupt_handled, 0);
36787+ atomic_set_unchecked(&data->interrupt_handled, 0);
36788 enable_irq(gpio_to_irq(data->pdata->gpio_data));
36789 /* If still not occurred or another handler was scheduled */
36790 if (gpio_get_value(data->pdata->gpio_data)
36791- || atomic_read(&data->interrupt_handled))
36792+ || atomic_read_unchecked(&data->interrupt_handled))
36793 return;
36794 }
36795
36796diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
36797index 76f157b..9c0db1b 100644
36798--- a/drivers/hwmon/via-cputemp.c
36799+++ b/drivers/hwmon/via-cputemp.c
36800@@ -296,7 +296,7 @@ static int __cpuinit via_cputemp_cpu_callback(struct notifier_block *nfb,
36801 return NOTIFY_OK;
36802 }
36803
36804-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
36805+static struct notifier_block via_cputemp_cpu_notifier = {
36806 .notifier_call = via_cputemp_cpu_callback,
36807 };
36808
36809diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
36810index 378fcb5..5e91fa8 100644
36811--- a/drivers/i2c/busses/i2c-amd756-s4882.c
36812+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
36813@@ -43,7 +43,7 @@
36814 extern struct i2c_adapter amd756_smbus;
36815
36816 static struct i2c_adapter *s4882_adapter;
36817-static struct i2c_algorithm *s4882_algo;
36818+static i2c_algorithm_no_const *s4882_algo;
36819
36820 /* Wrapper access functions for multiplexed SMBus */
36821 static DEFINE_MUTEX(amd756_lock);
36822diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
36823index 29015eb..af2d8e9 100644
36824--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
36825+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
36826@@ -41,7 +41,7 @@
36827 extern struct i2c_adapter *nforce2_smbus;
36828
36829 static struct i2c_adapter *s4985_adapter;
36830-static struct i2c_algorithm *s4985_algo;
36831+static i2c_algorithm_no_const *s4985_algo;
36832
36833 /* Wrapper access functions for multiplexed SMBus */
36834 static DEFINE_MUTEX(nforce2_lock);
36835diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
36836index c3ccdea..5b3dc1a 100644
36837--- a/drivers/i2c/i2c-dev.c
36838+++ b/drivers/i2c/i2c-dev.c
36839@@ -271,7 +271,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
36840 break;
36841 }
36842
36843- data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
36844+ data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
36845 rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
36846 if (IS_ERR(rdwr_pa[i].buf)) {
36847 res = PTR_ERR(rdwr_pa[i].buf);
36848diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
36849index 8126824..55a2798 100644
36850--- a/drivers/ide/ide-cd.c
36851+++ b/drivers/ide/ide-cd.c
36852@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
36853 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
36854 if ((unsigned long)buf & alignment
36855 || blk_rq_bytes(rq) & q->dma_pad_mask
36856- || object_is_on_stack(buf))
36857+ || object_starts_on_stack(buf))
36858 drive->dma = 0;
36859 }
36860 }
36861diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
36862index 8848f16..f8e6dd8 100644
36863--- a/drivers/iio/industrialio-core.c
36864+++ b/drivers/iio/industrialio-core.c
36865@@ -506,7 +506,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
36866 }
36867
36868 static
36869-int __iio_device_attr_init(struct device_attribute *dev_attr,
36870+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
36871 const char *postfix,
36872 struct iio_chan_spec const *chan,
36873 ssize_t (*readfunc)(struct device *dev,
36874diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
36875index 784b97c..c9ceadf 100644
36876--- a/drivers/infiniband/core/cm.c
36877+++ b/drivers/infiniband/core/cm.c
36878@@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
36879
36880 struct cm_counter_group {
36881 struct kobject obj;
36882- atomic_long_t counter[CM_ATTR_COUNT];
36883+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
36884 };
36885
36886 struct cm_counter_attribute {
36887@@ -1395,7 +1395,7 @@ static void cm_dup_req_handler(struct cm_work *work,
36888 struct ib_mad_send_buf *msg = NULL;
36889 int ret;
36890
36891- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36892+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36893 counter[CM_REQ_COUNTER]);
36894
36895 /* Quick state check to discard duplicate REQs. */
36896@@ -1779,7 +1779,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
36897 if (!cm_id_priv)
36898 return;
36899
36900- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36901+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36902 counter[CM_REP_COUNTER]);
36903 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
36904 if (ret)
36905@@ -1946,7 +1946,7 @@ static int cm_rtu_handler(struct cm_work *work)
36906 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
36907 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
36908 spin_unlock_irq(&cm_id_priv->lock);
36909- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36910+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36911 counter[CM_RTU_COUNTER]);
36912 goto out;
36913 }
36914@@ -2129,7 +2129,7 @@ static int cm_dreq_handler(struct cm_work *work)
36915 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
36916 dreq_msg->local_comm_id);
36917 if (!cm_id_priv) {
36918- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36919+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36920 counter[CM_DREQ_COUNTER]);
36921 cm_issue_drep(work->port, work->mad_recv_wc);
36922 return -EINVAL;
36923@@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_work *work)
36924 case IB_CM_MRA_REP_RCVD:
36925 break;
36926 case IB_CM_TIMEWAIT:
36927- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36928+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36929 counter[CM_DREQ_COUNTER]);
36930 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
36931 goto unlock;
36932@@ -2168,7 +2168,7 @@ static int cm_dreq_handler(struct cm_work *work)
36933 cm_free_msg(msg);
36934 goto deref;
36935 case IB_CM_DREQ_RCVD:
36936- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36937+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36938 counter[CM_DREQ_COUNTER]);
36939 goto unlock;
36940 default:
36941@@ -2535,7 +2535,7 @@ static int cm_mra_handler(struct cm_work *work)
36942 ib_modify_mad(cm_id_priv->av.port->mad_agent,
36943 cm_id_priv->msg, timeout)) {
36944 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
36945- atomic_long_inc(&work->port->
36946+ atomic_long_inc_unchecked(&work->port->
36947 counter_group[CM_RECV_DUPLICATES].
36948 counter[CM_MRA_COUNTER]);
36949 goto out;
36950@@ -2544,7 +2544,7 @@ static int cm_mra_handler(struct cm_work *work)
36951 break;
36952 case IB_CM_MRA_REQ_RCVD:
36953 case IB_CM_MRA_REP_RCVD:
36954- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36955+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36956 counter[CM_MRA_COUNTER]);
36957 /* fall through */
36958 default:
36959@@ -2706,7 +2706,7 @@ static int cm_lap_handler(struct cm_work *work)
36960 case IB_CM_LAP_IDLE:
36961 break;
36962 case IB_CM_MRA_LAP_SENT:
36963- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36964+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36965 counter[CM_LAP_COUNTER]);
36966 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
36967 goto unlock;
36968@@ -2722,7 +2722,7 @@ static int cm_lap_handler(struct cm_work *work)
36969 cm_free_msg(msg);
36970 goto deref;
36971 case IB_CM_LAP_RCVD:
36972- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36973+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36974 counter[CM_LAP_COUNTER]);
36975 goto unlock;
36976 default:
36977@@ -3006,7 +3006,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
36978 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
36979 if (cur_cm_id_priv) {
36980 spin_unlock_irq(&cm.lock);
36981- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36982+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36983 counter[CM_SIDR_REQ_COUNTER]);
36984 goto out; /* Duplicate message. */
36985 }
36986@@ -3218,10 +3218,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
36987 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
36988 msg->retries = 1;
36989
36990- atomic_long_add(1 + msg->retries,
36991+ atomic_long_add_unchecked(1 + msg->retries,
36992 &port->counter_group[CM_XMIT].counter[attr_index]);
36993 if (msg->retries)
36994- atomic_long_add(msg->retries,
36995+ atomic_long_add_unchecked(msg->retries,
36996 &port->counter_group[CM_XMIT_RETRIES].
36997 counter[attr_index]);
36998
36999@@ -3431,7 +3431,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
37000 }
37001
37002 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
37003- atomic_long_inc(&port->counter_group[CM_RECV].
37004+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
37005 counter[attr_id - CM_ATTR_ID_OFFSET]);
37006
37007 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
37008@@ -3636,7 +3636,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
37009 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
37010
37011 return sprintf(buf, "%ld\n",
37012- atomic_long_read(&group->counter[cm_attr->index]));
37013+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
37014 }
37015
37016 static const struct sysfs_ops cm_counter_ops = {
37017diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
37018index 9f5ad7c..588cd84 100644
37019--- a/drivers/infiniband/core/fmr_pool.c
37020+++ b/drivers/infiniband/core/fmr_pool.c
37021@@ -98,8 +98,8 @@ struct ib_fmr_pool {
37022
37023 struct task_struct *thread;
37024
37025- atomic_t req_ser;
37026- atomic_t flush_ser;
37027+ atomic_unchecked_t req_ser;
37028+ atomic_unchecked_t flush_ser;
37029
37030 wait_queue_head_t force_wait;
37031 };
37032@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
37033 struct ib_fmr_pool *pool = pool_ptr;
37034
37035 do {
37036- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
37037+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
37038 ib_fmr_batch_release(pool);
37039
37040- atomic_inc(&pool->flush_ser);
37041+ atomic_inc_unchecked(&pool->flush_ser);
37042 wake_up_interruptible(&pool->force_wait);
37043
37044 if (pool->flush_function)
37045@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
37046 }
37047
37048 set_current_state(TASK_INTERRUPTIBLE);
37049- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
37050+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
37051 !kthread_should_stop())
37052 schedule();
37053 __set_current_state(TASK_RUNNING);
37054@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
37055 pool->dirty_watermark = params->dirty_watermark;
37056 pool->dirty_len = 0;
37057 spin_lock_init(&pool->pool_lock);
37058- atomic_set(&pool->req_ser, 0);
37059- atomic_set(&pool->flush_ser, 0);
37060+ atomic_set_unchecked(&pool->req_ser, 0);
37061+ atomic_set_unchecked(&pool->flush_ser, 0);
37062 init_waitqueue_head(&pool->force_wait);
37063
37064 pool->thread = kthread_run(ib_fmr_cleanup_thread,
37065@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
37066 }
37067 spin_unlock_irq(&pool->pool_lock);
37068
37069- serial = atomic_inc_return(&pool->req_ser);
37070+ serial = atomic_inc_return_unchecked(&pool->req_ser);
37071 wake_up_process(pool->thread);
37072
37073 if (wait_event_interruptible(pool->force_wait,
37074- atomic_read(&pool->flush_ser) - serial >= 0))
37075+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
37076 return -EINTR;
37077
37078 return 0;
37079@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
37080 } else {
37081 list_add_tail(&fmr->list, &pool->dirty_list);
37082 if (++pool->dirty_len >= pool->dirty_watermark) {
37083- atomic_inc(&pool->req_ser);
37084+ atomic_inc_unchecked(&pool->req_ser);
37085 wake_up_process(pool->thread);
37086 }
37087 }
37088diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
37089index 903a92d..9262548 100644
37090--- a/drivers/infiniband/hw/cxgb4/mem.c
37091+++ b/drivers/infiniband/hw/cxgb4/mem.c
37092@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
37093 int err;
37094 struct fw_ri_tpte tpt;
37095 u32 stag_idx;
37096- static atomic_t key;
37097+ static atomic_unchecked_t key;
37098
37099 if (c4iw_fatal_error(rdev))
37100 return -EIO;
37101@@ -139,7 +139,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
37102 if (rdev->stats.stag.cur > rdev->stats.stag.max)
37103 rdev->stats.stag.max = rdev->stats.stag.cur;
37104 mutex_unlock(&rdev->stats.lock);
37105- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
37106+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
37107 }
37108 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
37109 __func__, stag_state, type, pdid, stag_idx);
37110diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
37111index 79b3dbc..96e5fcc 100644
37112--- a/drivers/infiniband/hw/ipath/ipath_rc.c
37113+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
37114@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
37115 struct ib_atomic_eth *ateth;
37116 struct ipath_ack_entry *e;
37117 u64 vaddr;
37118- atomic64_t *maddr;
37119+ atomic64_unchecked_t *maddr;
37120 u64 sdata;
37121 u32 rkey;
37122 u8 next;
37123@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
37124 IB_ACCESS_REMOTE_ATOMIC)))
37125 goto nack_acc_unlck;
37126 /* Perform atomic OP and save result. */
37127- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
37128+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
37129 sdata = be64_to_cpu(ateth->swap_data);
37130 e = &qp->s_ack_queue[qp->r_head_ack_queue];
37131 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
37132- (u64) atomic64_add_return(sdata, maddr) - sdata :
37133+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
37134 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
37135 be64_to_cpu(ateth->compare_data),
37136 sdata);
37137diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
37138index 1f95bba..9530f87 100644
37139--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
37140+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
37141@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
37142 unsigned long flags;
37143 struct ib_wc wc;
37144 u64 sdata;
37145- atomic64_t *maddr;
37146+ atomic64_unchecked_t *maddr;
37147 enum ib_wc_status send_status;
37148
37149 /*
37150@@ -382,11 +382,11 @@ again:
37151 IB_ACCESS_REMOTE_ATOMIC)))
37152 goto acc_err;
37153 /* Perform atomic OP and save result. */
37154- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
37155+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
37156 sdata = wqe->wr.wr.atomic.compare_add;
37157 *(u64 *) sqp->s_sge.sge.vaddr =
37158 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
37159- (u64) atomic64_add_return(sdata, maddr) - sdata :
37160+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
37161 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
37162 sdata, wqe->wr.wr.atomic.swap);
37163 goto send_comp;
37164diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
37165index 9d3e5c1..d9afe4a 100644
37166--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
37167+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
37168@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
37169 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
37170 }
37171
37172-int mthca_QUERY_FW(struct mthca_dev *dev)
37173+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
37174 {
37175 struct mthca_mailbox *mailbox;
37176 u32 *outbox;
37177diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
37178index ed9a989..e0c5871 100644
37179--- a/drivers/infiniband/hw/mthca/mthca_mr.c
37180+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
37181@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
37182 return key;
37183 }
37184
37185-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
37186+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
37187 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
37188 {
37189 struct mthca_mailbox *mailbox;
37190diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
37191index 4291410..d2ab1fb 100644
37192--- a/drivers/infiniband/hw/nes/nes.c
37193+++ b/drivers/infiniband/hw/nes/nes.c
37194@@ -98,7 +98,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
37195 LIST_HEAD(nes_adapter_list);
37196 static LIST_HEAD(nes_dev_list);
37197
37198-atomic_t qps_destroyed;
37199+atomic_unchecked_t qps_destroyed;
37200
37201 static unsigned int ee_flsh_adapter;
37202 static unsigned int sysfs_nonidx_addr;
37203@@ -269,7 +269,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
37204 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
37205 struct nes_adapter *nesadapter = nesdev->nesadapter;
37206
37207- atomic_inc(&qps_destroyed);
37208+ atomic_inc_unchecked(&qps_destroyed);
37209
37210 /* Free the control structures */
37211
37212diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
37213index 33cc589..3bd6538 100644
37214--- a/drivers/infiniband/hw/nes/nes.h
37215+++ b/drivers/infiniband/hw/nes/nes.h
37216@@ -177,17 +177,17 @@ extern unsigned int nes_debug_level;
37217 extern unsigned int wqm_quanta;
37218 extern struct list_head nes_adapter_list;
37219
37220-extern atomic_t cm_connects;
37221-extern atomic_t cm_accepts;
37222-extern atomic_t cm_disconnects;
37223-extern atomic_t cm_closes;
37224-extern atomic_t cm_connecteds;
37225-extern atomic_t cm_connect_reqs;
37226-extern atomic_t cm_rejects;
37227-extern atomic_t mod_qp_timouts;
37228-extern atomic_t qps_created;
37229-extern atomic_t qps_destroyed;
37230-extern atomic_t sw_qps_destroyed;
37231+extern atomic_unchecked_t cm_connects;
37232+extern atomic_unchecked_t cm_accepts;
37233+extern atomic_unchecked_t cm_disconnects;
37234+extern atomic_unchecked_t cm_closes;
37235+extern atomic_unchecked_t cm_connecteds;
37236+extern atomic_unchecked_t cm_connect_reqs;
37237+extern atomic_unchecked_t cm_rejects;
37238+extern atomic_unchecked_t mod_qp_timouts;
37239+extern atomic_unchecked_t qps_created;
37240+extern atomic_unchecked_t qps_destroyed;
37241+extern atomic_unchecked_t sw_qps_destroyed;
37242 extern u32 mh_detected;
37243 extern u32 mh_pauses_sent;
37244 extern u32 cm_packets_sent;
37245@@ -196,16 +196,16 @@ extern u32 cm_packets_created;
37246 extern u32 cm_packets_received;
37247 extern u32 cm_packets_dropped;
37248 extern u32 cm_packets_retrans;
37249-extern atomic_t cm_listens_created;
37250-extern atomic_t cm_listens_destroyed;
37251+extern atomic_unchecked_t cm_listens_created;
37252+extern atomic_unchecked_t cm_listens_destroyed;
37253 extern u32 cm_backlog_drops;
37254-extern atomic_t cm_loopbacks;
37255-extern atomic_t cm_nodes_created;
37256-extern atomic_t cm_nodes_destroyed;
37257-extern atomic_t cm_accel_dropped_pkts;
37258-extern atomic_t cm_resets_recvd;
37259-extern atomic_t pau_qps_created;
37260-extern atomic_t pau_qps_destroyed;
37261+extern atomic_unchecked_t cm_loopbacks;
37262+extern atomic_unchecked_t cm_nodes_created;
37263+extern atomic_unchecked_t cm_nodes_destroyed;
37264+extern atomic_unchecked_t cm_accel_dropped_pkts;
37265+extern atomic_unchecked_t cm_resets_recvd;
37266+extern atomic_unchecked_t pau_qps_created;
37267+extern atomic_unchecked_t pau_qps_destroyed;
37268
37269 extern u32 int_mod_timer_init;
37270 extern u32 int_mod_cq_depth_256;
37271diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
37272index 24b9f1a..00fd004 100644
37273--- a/drivers/infiniband/hw/nes/nes_cm.c
37274+++ b/drivers/infiniband/hw/nes/nes_cm.c
37275@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
37276 u32 cm_packets_retrans;
37277 u32 cm_packets_created;
37278 u32 cm_packets_received;
37279-atomic_t cm_listens_created;
37280-atomic_t cm_listens_destroyed;
37281+atomic_unchecked_t cm_listens_created;
37282+atomic_unchecked_t cm_listens_destroyed;
37283 u32 cm_backlog_drops;
37284-atomic_t cm_loopbacks;
37285-atomic_t cm_nodes_created;
37286-atomic_t cm_nodes_destroyed;
37287-atomic_t cm_accel_dropped_pkts;
37288-atomic_t cm_resets_recvd;
37289+atomic_unchecked_t cm_loopbacks;
37290+atomic_unchecked_t cm_nodes_created;
37291+atomic_unchecked_t cm_nodes_destroyed;
37292+atomic_unchecked_t cm_accel_dropped_pkts;
37293+atomic_unchecked_t cm_resets_recvd;
37294
37295 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
37296 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
37297@@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
37298
37299 static struct nes_cm_core *g_cm_core;
37300
37301-atomic_t cm_connects;
37302-atomic_t cm_accepts;
37303-atomic_t cm_disconnects;
37304-atomic_t cm_closes;
37305-atomic_t cm_connecteds;
37306-atomic_t cm_connect_reqs;
37307-atomic_t cm_rejects;
37308+atomic_unchecked_t cm_connects;
37309+atomic_unchecked_t cm_accepts;
37310+atomic_unchecked_t cm_disconnects;
37311+atomic_unchecked_t cm_closes;
37312+atomic_unchecked_t cm_connecteds;
37313+atomic_unchecked_t cm_connect_reqs;
37314+atomic_unchecked_t cm_rejects;
37315
37316 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
37317 {
37318@@ -1272,7 +1272,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
37319 kfree(listener);
37320 listener = NULL;
37321 ret = 0;
37322- atomic_inc(&cm_listens_destroyed);
37323+ atomic_inc_unchecked(&cm_listens_destroyed);
37324 } else {
37325 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
37326 }
37327@@ -1466,7 +1466,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
37328 cm_node->rem_mac);
37329
37330 add_hte_node(cm_core, cm_node);
37331- atomic_inc(&cm_nodes_created);
37332+ atomic_inc_unchecked(&cm_nodes_created);
37333
37334 return cm_node;
37335 }
37336@@ -1524,7 +1524,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
37337 }
37338
37339 atomic_dec(&cm_core->node_cnt);
37340- atomic_inc(&cm_nodes_destroyed);
37341+ atomic_inc_unchecked(&cm_nodes_destroyed);
37342 nesqp = cm_node->nesqp;
37343 if (nesqp) {
37344 nesqp->cm_node = NULL;
37345@@ -1588,7 +1588,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
37346
37347 static void drop_packet(struct sk_buff *skb)
37348 {
37349- atomic_inc(&cm_accel_dropped_pkts);
37350+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
37351 dev_kfree_skb_any(skb);
37352 }
37353
37354@@ -1651,7 +1651,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
37355 {
37356
37357 int reset = 0; /* whether to send reset in case of err.. */
37358- atomic_inc(&cm_resets_recvd);
37359+ atomic_inc_unchecked(&cm_resets_recvd);
37360 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
37361 " refcnt=%d\n", cm_node, cm_node->state,
37362 atomic_read(&cm_node->ref_count));
37363@@ -2292,7 +2292,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
37364 rem_ref_cm_node(cm_node->cm_core, cm_node);
37365 return NULL;
37366 }
37367- atomic_inc(&cm_loopbacks);
37368+ atomic_inc_unchecked(&cm_loopbacks);
37369 loopbackremotenode->loopbackpartner = cm_node;
37370 loopbackremotenode->tcp_cntxt.rcv_wscale =
37371 NES_CM_DEFAULT_RCV_WND_SCALE;
37372@@ -2567,7 +2567,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
37373 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
37374 else {
37375 rem_ref_cm_node(cm_core, cm_node);
37376- atomic_inc(&cm_accel_dropped_pkts);
37377+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
37378 dev_kfree_skb_any(skb);
37379 }
37380 break;
37381@@ -2875,7 +2875,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
37382
37383 if ((cm_id) && (cm_id->event_handler)) {
37384 if (issue_disconn) {
37385- atomic_inc(&cm_disconnects);
37386+ atomic_inc_unchecked(&cm_disconnects);
37387 cm_event.event = IW_CM_EVENT_DISCONNECT;
37388 cm_event.status = disconn_status;
37389 cm_event.local_addr = cm_id->local_addr;
37390@@ -2897,7 +2897,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
37391 }
37392
37393 if (issue_close) {
37394- atomic_inc(&cm_closes);
37395+ atomic_inc_unchecked(&cm_closes);
37396 nes_disconnect(nesqp, 1);
37397
37398 cm_id->provider_data = nesqp;
37399@@ -3033,7 +3033,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
37400
37401 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
37402 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
37403- atomic_inc(&cm_accepts);
37404+ atomic_inc_unchecked(&cm_accepts);
37405
37406 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
37407 netdev_refcnt_read(nesvnic->netdev));
37408@@ -3228,7 +3228,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
37409 struct nes_cm_core *cm_core;
37410 u8 *start_buff;
37411
37412- atomic_inc(&cm_rejects);
37413+ atomic_inc_unchecked(&cm_rejects);
37414 cm_node = (struct nes_cm_node *)cm_id->provider_data;
37415 loopback = cm_node->loopbackpartner;
37416 cm_core = cm_node->cm_core;
37417@@ -3288,7 +3288,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
37418 ntohl(cm_id->local_addr.sin_addr.s_addr),
37419 ntohs(cm_id->local_addr.sin_port));
37420
37421- atomic_inc(&cm_connects);
37422+ atomic_inc_unchecked(&cm_connects);
37423 nesqp->active_conn = 1;
37424
37425 /* cache the cm_id in the qp */
37426@@ -3398,7 +3398,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
37427 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
37428 return err;
37429 }
37430- atomic_inc(&cm_listens_created);
37431+ atomic_inc_unchecked(&cm_listens_created);
37432 }
37433
37434 cm_id->add_ref(cm_id);
37435@@ -3499,7 +3499,7 @@ static void cm_event_connected(struct nes_cm_event *event)
37436
37437 if (nesqp->destroyed)
37438 return;
37439- atomic_inc(&cm_connecteds);
37440+ atomic_inc_unchecked(&cm_connecteds);
37441 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
37442 " local port 0x%04X. jiffies = %lu.\n",
37443 nesqp->hwqp.qp_id,
37444@@ -3679,7 +3679,7 @@ static void cm_event_reset(struct nes_cm_event *event)
37445
37446 cm_id->add_ref(cm_id);
37447 ret = cm_id->event_handler(cm_id, &cm_event);
37448- atomic_inc(&cm_closes);
37449+ atomic_inc_unchecked(&cm_closes);
37450 cm_event.event = IW_CM_EVENT_CLOSE;
37451 cm_event.status = 0;
37452 cm_event.provider_data = cm_id->provider_data;
37453@@ -3715,7 +3715,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
37454 return;
37455 cm_id = cm_node->cm_id;
37456
37457- atomic_inc(&cm_connect_reqs);
37458+ atomic_inc_unchecked(&cm_connect_reqs);
37459 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
37460 cm_node, cm_id, jiffies);
37461
37462@@ -3755,7 +3755,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
37463 return;
37464 cm_id = cm_node->cm_id;
37465
37466- atomic_inc(&cm_connect_reqs);
37467+ atomic_inc_unchecked(&cm_connect_reqs);
37468 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
37469 cm_node, cm_id, jiffies);
37470
37471diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
37472index 4166452..fc952c3 100644
37473--- a/drivers/infiniband/hw/nes/nes_mgt.c
37474+++ b/drivers/infiniband/hw/nes/nes_mgt.c
37475@@ -40,8 +40,8 @@
37476 #include "nes.h"
37477 #include "nes_mgt.h"
37478
37479-atomic_t pau_qps_created;
37480-atomic_t pau_qps_destroyed;
37481+atomic_unchecked_t pau_qps_created;
37482+atomic_unchecked_t pau_qps_destroyed;
37483
37484 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
37485 {
37486@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
37487 {
37488 struct sk_buff *skb;
37489 unsigned long flags;
37490- atomic_inc(&pau_qps_destroyed);
37491+ atomic_inc_unchecked(&pau_qps_destroyed);
37492
37493 /* Free packets that have not yet been forwarded */
37494 /* Lock is acquired by skb_dequeue when removing the skb */
37495@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
37496 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
37497 skb_queue_head_init(&nesqp->pau_list);
37498 spin_lock_init(&nesqp->pau_lock);
37499- atomic_inc(&pau_qps_created);
37500+ atomic_inc_unchecked(&pau_qps_created);
37501 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
37502 }
37503
37504diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
37505index 85cf4d1..05d8e71 100644
37506--- a/drivers/infiniband/hw/nes/nes_nic.c
37507+++ b/drivers/infiniband/hw/nes/nes_nic.c
37508@@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
37509 target_stat_values[++index] = mh_detected;
37510 target_stat_values[++index] = mh_pauses_sent;
37511 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
37512- target_stat_values[++index] = atomic_read(&cm_connects);
37513- target_stat_values[++index] = atomic_read(&cm_accepts);
37514- target_stat_values[++index] = atomic_read(&cm_disconnects);
37515- target_stat_values[++index] = atomic_read(&cm_connecteds);
37516- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
37517- target_stat_values[++index] = atomic_read(&cm_rejects);
37518- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
37519- target_stat_values[++index] = atomic_read(&qps_created);
37520- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
37521- target_stat_values[++index] = atomic_read(&qps_destroyed);
37522- target_stat_values[++index] = atomic_read(&cm_closes);
37523+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
37524+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
37525+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
37526+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
37527+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
37528+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
37529+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
37530+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
37531+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
37532+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
37533+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
37534 target_stat_values[++index] = cm_packets_sent;
37535 target_stat_values[++index] = cm_packets_bounced;
37536 target_stat_values[++index] = cm_packets_created;
37537 target_stat_values[++index] = cm_packets_received;
37538 target_stat_values[++index] = cm_packets_dropped;
37539 target_stat_values[++index] = cm_packets_retrans;
37540- target_stat_values[++index] = atomic_read(&cm_listens_created);
37541- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
37542+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
37543+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
37544 target_stat_values[++index] = cm_backlog_drops;
37545- target_stat_values[++index] = atomic_read(&cm_loopbacks);
37546- target_stat_values[++index] = atomic_read(&cm_nodes_created);
37547- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
37548- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
37549- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
37550+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
37551+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
37552+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
37553+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
37554+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
37555 target_stat_values[++index] = nesadapter->free_4kpbl;
37556 target_stat_values[++index] = nesadapter->free_256pbl;
37557 target_stat_values[++index] = int_mod_timer_init;
37558 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
37559 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
37560 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
37561- target_stat_values[++index] = atomic_read(&pau_qps_created);
37562- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
37563+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
37564+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
37565 }
37566
37567 /**
37568diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
37569index 8f67fe2..8960859 100644
37570--- a/drivers/infiniband/hw/nes/nes_verbs.c
37571+++ b/drivers/infiniband/hw/nes/nes_verbs.c
37572@@ -46,9 +46,9 @@
37573
37574 #include <rdma/ib_umem.h>
37575
37576-atomic_t mod_qp_timouts;
37577-atomic_t qps_created;
37578-atomic_t sw_qps_destroyed;
37579+atomic_unchecked_t mod_qp_timouts;
37580+atomic_unchecked_t qps_created;
37581+atomic_unchecked_t sw_qps_destroyed;
37582
37583 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
37584
37585@@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
37586 if (init_attr->create_flags)
37587 return ERR_PTR(-EINVAL);
37588
37589- atomic_inc(&qps_created);
37590+ atomic_inc_unchecked(&qps_created);
37591 switch (init_attr->qp_type) {
37592 case IB_QPT_RC:
37593 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
37594@@ -1465,7 +1465,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
37595 struct iw_cm_event cm_event;
37596 int ret = 0;
37597
37598- atomic_inc(&sw_qps_destroyed);
37599+ atomic_inc_unchecked(&sw_qps_destroyed);
37600 nesqp->destroyed = 1;
37601
37602 /* Blow away the connection if it exists. */
37603diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
37604index 4d11575..3e890e5 100644
37605--- a/drivers/infiniband/hw/qib/qib.h
37606+++ b/drivers/infiniband/hw/qib/qib.h
37607@@ -51,6 +51,7 @@
37608 #include <linux/completion.h>
37609 #include <linux/kref.h>
37610 #include <linux/sched.h>
37611+#include <linux/slab.h>
37612
37613 #include "qib_common.h"
37614 #include "qib_verbs.h"
37615diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
37616index da739d9..da1c7f4 100644
37617--- a/drivers/input/gameport/gameport.c
37618+++ b/drivers/input/gameport/gameport.c
37619@@ -487,14 +487,14 @@ EXPORT_SYMBOL(gameport_set_phys);
37620 */
37621 static void gameport_init_port(struct gameport *gameport)
37622 {
37623- static atomic_t gameport_no = ATOMIC_INIT(0);
37624+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
37625
37626 __module_get(THIS_MODULE);
37627
37628 mutex_init(&gameport->drv_mutex);
37629 device_initialize(&gameport->dev);
37630 dev_set_name(&gameport->dev, "gameport%lu",
37631- (unsigned long)atomic_inc_return(&gameport_no) - 1);
37632+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
37633 gameport->dev.bus = &gameport_bus;
37634 gameport->dev.release = gameport_release_port;
37635 if (gameport->parent)
37636diff --git a/drivers/input/input.c b/drivers/input/input.c
37637index c044699..174d71a 100644
37638--- a/drivers/input/input.c
37639+++ b/drivers/input/input.c
37640@@ -2019,7 +2019,7 @@ static void devm_input_device_unregister(struct device *dev, void *res)
37641 */
37642 int input_register_device(struct input_dev *dev)
37643 {
37644- static atomic_t input_no = ATOMIC_INIT(0);
37645+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
37646 struct input_devres *devres = NULL;
37647 struct input_handler *handler;
37648 unsigned int packet_size;
37649@@ -2074,7 +2074,7 @@ int input_register_device(struct input_dev *dev)
37650 dev->setkeycode = input_default_setkeycode;
37651
37652 dev_set_name(&dev->dev, "input%ld",
37653- (unsigned long) atomic_inc_return(&input_no) - 1);
37654+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
37655
37656 error = device_add(&dev->dev);
37657 if (error)
37658diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
37659index 04c69af..5f92d00 100644
37660--- a/drivers/input/joystick/sidewinder.c
37661+++ b/drivers/input/joystick/sidewinder.c
37662@@ -30,6 +30,7 @@
37663 #include <linux/kernel.h>
37664 #include <linux/module.h>
37665 #include <linux/slab.h>
37666+#include <linux/sched.h>
37667 #include <linux/init.h>
37668 #include <linux/input.h>
37669 #include <linux/gameport.h>
37670diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
37671index d6cbfe9..6225402 100644
37672--- a/drivers/input/joystick/xpad.c
37673+++ b/drivers/input/joystick/xpad.c
37674@@ -735,7 +735,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
37675
37676 static int xpad_led_probe(struct usb_xpad *xpad)
37677 {
37678- static atomic_t led_seq = ATOMIC_INIT(0);
37679+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
37680 long led_no;
37681 struct xpad_led *led;
37682 struct led_classdev *led_cdev;
37683@@ -748,7 +748,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
37684 if (!led)
37685 return -ENOMEM;
37686
37687- led_no = (long)atomic_inc_return(&led_seq) - 1;
37688+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
37689
37690 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
37691 led->xpad = xpad;
37692diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
37693index 2f0b39d..7370f13 100644
37694--- a/drivers/input/mouse/psmouse.h
37695+++ b/drivers/input/mouse/psmouse.h
37696@@ -116,7 +116,7 @@ struct psmouse_attribute {
37697 ssize_t (*set)(struct psmouse *psmouse, void *data,
37698 const char *buf, size_t count);
37699 bool protect;
37700-};
37701+} __do_const;
37702 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
37703
37704 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
37705diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
37706index 4c842c3..590b0bf 100644
37707--- a/drivers/input/mousedev.c
37708+++ b/drivers/input/mousedev.c
37709@@ -738,7 +738,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
37710
37711 spin_unlock_irq(&client->packet_lock);
37712
37713- if (copy_to_user(buffer, data, count))
37714+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
37715 return -EFAULT;
37716
37717 return count;
37718diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
37719index 25fc597..558bf3b3 100644
37720--- a/drivers/input/serio/serio.c
37721+++ b/drivers/input/serio/serio.c
37722@@ -496,7 +496,7 @@ static void serio_release_port(struct device *dev)
37723 */
37724 static void serio_init_port(struct serio *serio)
37725 {
37726- static atomic_t serio_no = ATOMIC_INIT(0);
37727+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
37728
37729 __module_get(THIS_MODULE);
37730
37731@@ -507,7 +507,7 @@ static void serio_init_port(struct serio *serio)
37732 mutex_init(&serio->drv_mutex);
37733 device_initialize(&serio->dev);
37734 dev_set_name(&serio->dev, "serio%ld",
37735- (long)atomic_inc_return(&serio_no) - 1);
37736+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
37737 serio->dev.bus = &serio_bus;
37738 serio->dev.release = serio_release_port;
37739 serio->dev.groups = serio_device_attr_groups;
37740diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
37741index b972d43..8943713 100644
37742--- a/drivers/iommu/iommu.c
37743+++ b/drivers/iommu/iommu.c
37744@@ -554,7 +554,7 @@ static struct notifier_block iommu_bus_nb = {
37745 static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
37746 {
37747 bus_register_notifier(bus, &iommu_bus_nb);
37748- bus_for_each_dev(bus, NULL, ops, add_iommu_group);
37749+ bus_for_each_dev(bus, NULL, (void *)ops, add_iommu_group);
37750 }
37751
37752 /**
37753diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
37754index 7c11ff3..a2a0457 100644
37755--- a/drivers/iommu/irq_remapping.c
37756+++ b/drivers/iommu/irq_remapping.c
37757@@ -348,7 +348,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
37758 void panic_if_irq_remap(const char *msg)
37759 {
37760 if (irq_remapping_enabled)
37761- panic(msg);
37762+ panic("%s", msg);
37763 }
37764
37765 static void ir_ack_apic_edge(struct irq_data *data)
37766@@ -369,10 +369,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
37767
37768 void irq_remap_modify_chip_defaults(struct irq_chip *chip)
37769 {
37770- chip->irq_print_chip = ir_print_prefix;
37771- chip->irq_ack = ir_ack_apic_edge;
37772- chip->irq_eoi = ir_ack_apic_level;
37773- chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
37774+ pax_open_kernel();
37775+ *(void **)&chip->irq_print_chip = ir_print_prefix;
37776+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
37777+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
37778+ *(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
37779+ pax_close_kernel();
37780 }
37781
37782 bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
37783diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
37784index fc6aebf..762c5f5 100644
37785--- a/drivers/irqchip/irq-gic.c
37786+++ b/drivers/irqchip/irq-gic.c
37787@@ -83,7 +83,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
37788 * Supported arch specific GIC irq extension.
37789 * Default make them NULL.
37790 */
37791-struct irq_chip gic_arch_extn = {
37792+irq_chip_no_const gic_arch_extn = {
37793 .irq_eoi = NULL,
37794 .irq_mask = NULL,
37795 .irq_unmask = NULL,
37796@@ -332,7 +332,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
37797 chained_irq_exit(chip, desc);
37798 }
37799
37800-static struct irq_chip gic_chip = {
37801+static irq_chip_no_const gic_chip __read_only = {
37802 .name = "GIC",
37803 .irq_mask = gic_mask_irq,
37804 .irq_unmask = gic_unmask_irq,
37805diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
37806index 89562a8..218999b 100644
37807--- a/drivers/isdn/capi/capi.c
37808+++ b/drivers/isdn/capi/capi.c
37809@@ -81,8 +81,8 @@ struct capiminor {
37810
37811 struct capi20_appl *ap;
37812 u32 ncci;
37813- atomic_t datahandle;
37814- atomic_t msgid;
37815+ atomic_unchecked_t datahandle;
37816+ atomic_unchecked_t msgid;
37817
37818 struct tty_port port;
37819 int ttyinstop;
37820@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
37821 capimsg_setu16(s, 2, mp->ap->applid);
37822 capimsg_setu8 (s, 4, CAPI_DATA_B3);
37823 capimsg_setu8 (s, 5, CAPI_RESP);
37824- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
37825+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
37826 capimsg_setu32(s, 8, mp->ncci);
37827 capimsg_setu16(s, 12, datahandle);
37828 }
37829@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
37830 mp->outbytes -= len;
37831 spin_unlock_bh(&mp->outlock);
37832
37833- datahandle = atomic_inc_return(&mp->datahandle);
37834+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
37835 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
37836 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
37837 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
37838 capimsg_setu16(skb->data, 2, mp->ap->applid);
37839 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
37840 capimsg_setu8 (skb->data, 5, CAPI_REQ);
37841- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
37842+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
37843 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
37844 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
37845 capimsg_setu16(skb->data, 16, len); /* Data length */
37846diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
37847index 9b1b274..c123709 100644
37848--- a/drivers/isdn/capi/kcapi.c
37849+++ b/drivers/isdn/capi/kcapi.c
37850@@ -93,7 +93,7 @@ capi_ctr_put(struct capi_ctr *ctr)
37851
37852 static inline struct capi_ctr *get_capi_ctr_by_nr(u16 contr)
37853 {
37854- if (contr - 1 >= CAPI_MAXCONTR)
37855+ if (contr < 1 || contr - 1 >= CAPI_MAXCONTR)
37856 return NULL;
37857
37858 return capi_controller[contr - 1];
37859@@ -103,7 +103,7 @@ static inline struct capi20_appl *__get_capi_appl_by_nr(u16 applid)
37860 {
37861 lockdep_assert_held(&capi_controller_lock);
37862
37863- if (applid - 1 >= CAPI_MAXAPPL)
37864+ if (applid < 1 || applid - 1 >= CAPI_MAXAPPL)
37865 return NULL;
37866
37867 return capi_applications[applid - 1];
37868@@ -111,7 +111,7 @@ static inline struct capi20_appl *__get_capi_appl_by_nr(u16 applid)
37869
37870 static inline struct capi20_appl *get_capi_appl_by_nr(u16 applid)
37871 {
37872- if (applid - 1 >= CAPI_MAXAPPL)
37873+ if (applid < 1 || applid - 1 >= CAPI_MAXAPPL)
37874 return NULL;
37875
37876 return rcu_dereference(capi_applications[applid - 1]);
37877diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
37878index e2b5396..c5486dc 100644
37879--- a/drivers/isdn/gigaset/interface.c
37880+++ b/drivers/isdn/gigaset/interface.c
37881@@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
37882 }
37883 tty->driver_data = cs;
37884
37885- ++cs->port.count;
37886+ atomic_inc(&cs->port.count);
37887
37888- if (cs->port.count == 1) {
37889+ if (atomic_read(&cs->port.count) == 1) {
37890 tty_port_tty_set(&cs->port, tty);
37891 cs->port.low_latency = 1;
37892 }
37893@@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
37894
37895 if (!cs->connected)
37896 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
37897- else if (!cs->port.count)
37898+ else if (!atomic_read(&cs->port.count))
37899 dev_warn(cs->dev, "%s: device not opened\n", __func__);
37900- else if (!--cs->port.count)
37901+ else if (!atomic_dec_return(&cs->port.count))
37902 tty_port_tty_set(&cs->port, NULL);
37903
37904 mutex_unlock(&cs->mutex);
37905diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
37906index 821f7ac..28d4030 100644
37907--- a/drivers/isdn/hardware/avm/b1.c
37908+++ b/drivers/isdn/hardware/avm/b1.c
37909@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
37910 }
37911 if (left) {
37912 if (t4file->user) {
37913- if (copy_from_user(buf, dp, left))
37914+ if (left > sizeof buf || copy_from_user(buf, dp, left))
37915 return -EFAULT;
37916 } else {
37917 memcpy(buf, dp, left);
37918@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
37919 }
37920 if (left) {
37921 if (config->user) {
37922- if (copy_from_user(buf, dp, left))
37923+ if (left > sizeof buf || copy_from_user(buf, dp, left))
37924 return -EFAULT;
37925 } else {
37926 memcpy(buf, dp, left);
37927diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
37928index ebaebdf..acd4405 100644
37929--- a/drivers/isdn/i4l/isdn_tty.c
37930+++ b/drivers/isdn/i4l/isdn_tty.c
37931@@ -1511,9 +1511,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
37932
37933 #ifdef ISDN_DEBUG_MODEM_OPEN
37934 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
37935- port->count);
37936+ atomic_read(&port->count));
37937 #endif
37938- port->count++;
37939+ atomic_inc(&port->count);
37940 port->tty = tty;
37941 /*
37942 * Start up serial port
37943@@ -1557,7 +1557,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
37944 #endif
37945 return;
37946 }
37947- if ((tty->count == 1) && (port->count != 1)) {
37948+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
37949 /*
37950 * Uh, oh. tty->count is 1, which means that the tty
37951 * structure will be freed. Info->count should always
37952@@ -1566,15 +1566,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
37953 * serial port won't be shutdown.
37954 */
37955 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
37956- "info->count is %d\n", port->count);
37957- port->count = 1;
37958+ "info->count is %d\n", atomic_read(&port->count));
37959+ atomic_set(&port->count, 1);
37960 }
37961- if (--port->count < 0) {
37962+ if (atomic_dec_return(&port->count) < 0) {
37963 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
37964- info->line, port->count);
37965- port->count = 0;
37966+ info->line, atomic_read(&port->count));
37967+ atomic_set(&port->count, 0);
37968 }
37969- if (port->count) {
37970+ if (atomic_read(&port->count)) {
37971 #ifdef ISDN_DEBUG_MODEM_OPEN
37972 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
37973 #endif
37974@@ -1628,7 +1628,7 @@ isdn_tty_hangup(struct tty_struct *tty)
37975 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
37976 return;
37977 isdn_tty_shutdown(info);
37978- port->count = 0;
37979+ atomic_set(&port->count, 0);
37980 port->flags &= ~ASYNC_NORMAL_ACTIVE;
37981 port->tty = NULL;
37982 wake_up_interruptible(&port->open_wait);
37983@@ -1973,7 +1973,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
37984 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
37985 modem_info *info = &dev->mdm.info[i];
37986
37987- if (info->port.count == 0)
37988+ if (atomic_read(&info->port.count) == 0)
37989 continue;
37990 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
37991 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
37992diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
37993index e74df7c..03a03ba 100644
37994--- a/drivers/isdn/icn/icn.c
37995+++ b/drivers/isdn/icn/icn.c
37996@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
37997 if (count > len)
37998 count = len;
37999 if (user) {
38000- if (copy_from_user(msg, buf, count))
38001+ if (count > sizeof msg || copy_from_user(msg, buf, count))
38002 return -EFAULT;
38003 } else
38004 memcpy(msg, buf, count);
38005diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
38006index 6a8405d..0bd1c7e 100644
38007--- a/drivers/leds/leds-clevo-mail.c
38008+++ b/drivers/leds/leds-clevo-mail.c
38009@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
38010 * detected as working, but in reality it is not) as low as
38011 * possible.
38012 */
38013-static struct dmi_system_id __initdata clevo_mail_led_dmi_table[] = {
38014+static const struct dmi_system_id __initconst clevo_mail_led_dmi_table[] = {
38015 {
38016 .callback = clevo_mail_led_dmi_callback,
38017 .ident = "Clevo D410J",
38018diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
38019index 64e204e..c6bf189 100644
38020--- a/drivers/leds/leds-ss4200.c
38021+++ b/drivers/leds/leds-ss4200.c
38022@@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
38023 * detected as working, but in reality it is not) as low as
38024 * possible.
38025 */
38026-static struct dmi_system_id __initdata nas_led_whitelist[] = {
38027+static const struct dmi_system_id __initconst nas_led_whitelist[] = {
38028 {
38029 .callback = ss4200_led_dmi_callback,
38030 .ident = "Intel SS4200-E",
38031diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
38032index a5ebc00..982886f 100644
38033--- a/drivers/lguest/core.c
38034+++ b/drivers/lguest/core.c
38035@@ -92,9 +92,17 @@ static __init int map_switcher(void)
38036 * it's worked so far. The end address needs +1 because __get_vm_area
38037 * allocates an extra guard page, so we need space for that.
38038 */
38039+
38040+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
38041+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
38042+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
38043+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
38044+#else
38045 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
38046 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
38047 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
38048+#endif
38049+
38050 if (!switcher_vma) {
38051 err = -ENOMEM;
38052 printk("lguest: could not map switcher pages high\n");
38053@@ -119,7 +127,7 @@ static __init int map_switcher(void)
38054 * Now the Switcher is mapped at the right address, we can't fail!
38055 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
38056 */
38057- memcpy(switcher_vma->addr, start_switcher_text,
38058+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
38059 end_switcher_text - start_switcher_text);
38060
38061 printk(KERN_INFO "lguest: mapped switcher at %p\n",
38062diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
38063index 3b62be16..e33134a 100644
38064--- a/drivers/lguest/page_tables.c
38065+++ b/drivers/lguest/page_tables.c
38066@@ -532,7 +532,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
38067 /*:*/
38068
38069 #ifdef CONFIG_X86_PAE
38070-static void release_pmd(pmd_t *spmd)
38071+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
38072 {
38073 /* If the entry's not present, there's nothing to release. */
38074 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
38075diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
38076index 4af12e1..0e89afe 100644
38077--- a/drivers/lguest/x86/core.c
38078+++ b/drivers/lguest/x86/core.c
38079@@ -59,7 +59,7 @@ static struct {
38080 /* Offset from where switcher.S was compiled to where we've copied it */
38081 static unsigned long switcher_offset(void)
38082 {
38083- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
38084+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
38085 }
38086
38087 /* This cpu's struct lguest_pages. */
38088@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
38089 * These copies are pretty cheap, so we do them unconditionally: */
38090 /* Save the current Host top-level page directory.
38091 */
38092+
38093+#ifdef CONFIG_PAX_PER_CPU_PGD
38094+ pages->state.host_cr3 = read_cr3();
38095+#else
38096 pages->state.host_cr3 = __pa(current->mm->pgd);
38097+#endif
38098+
38099 /*
38100 * Set up the Guest's page tables to see this CPU's pages (and no
38101 * other CPU's pages).
38102@@ -476,7 +482,7 @@ void __init lguest_arch_host_init(void)
38103 * compiled-in switcher code and the high-mapped copy we just made.
38104 */
38105 for (i = 0; i < IDT_ENTRIES; i++)
38106- default_idt_entries[i] += switcher_offset();
38107+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
38108
38109 /*
38110 * Set up the Switcher's per-cpu areas.
38111@@ -559,7 +565,7 @@ void __init lguest_arch_host_init(void)
38112 * it will be undisturbed when we switch. To change %cs and jump we
38113 * need this structure to feed to Intel's "lcall" instruction.
38114 */
38115- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
38116+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
38117 lguest_entry.segment = LGUEST_CS;
38118
38119 /*
38120diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
38121index 40634b0..4f5855e 100644
38122--- a/drivers/lguest/x86/switcher_32.S
38123+++ b/drivers/lguest/x86/switcher_32.S
38124@@ -87,6 +87,7 @@
38125 #include <asm/page.h>
38126 #include <asm/segment.h>
38127 #include <asm/lguest.h>
38128+#include <asm/processor-flags.h>
38129
38130 // We mark the start of the code to copy
38131 // It's placed in .text tho it's never run here
38132@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
38133 // Changes type when we load it: damn Intel!
38134 // For after we switch over our page tables
38135 // That entry will be read-only: we'd crash.
38136+
38137+#ifdef CONFIG_PAX_KERNEXEC
38138+ mov %cr0, %edx
38139+ xor $X86_CR0_WP, %edx
38140+ mov %edx, %cr0
38141+#endif
38142+
38143 movl $(GDT_ENTRY_TSS*8), %edx
38144 ltr %dx
38145
38146@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
38147 // Let's clear it again for our return.
38148 // The GDT descriptor of the Host
38149 // Points to the table after two "size" bytes
38150- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
38151+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
38152 // Clear "used" from type field (byte 5, bit 2)
38153- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
38154+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
38155+
38156+#ifdef CONFIG_PAX_KERNEXEC
38157+ mov %cr0, %eax
38158+ xor $X86_CR0_WP, %eax
38159+ mov %eax, %cr0
38160+#endif
38161
38162 // Once our page table's switched, the Guest is live!
38163 // The Host fades as we run this final step.
38164@@ -295,13 +309,12 @@ deliver_to_host:
38165 // I consulted gcc, and it gave
38166 // These instructions, which I gladly credit:
38167 leal (%edx,%ebx,8), %eax
38168- movzwl (%eax),%edx
38169- movl 4(%eax), %eax
38170- xorw %ax, %ax
38171- orl %eax, %edx
38172+ movl 4(%eax), %edx
38173+ movw (%eax), %dx
38174 // Now the address of the handler's in %edx
38175 // We call it now: its "iret" drops us home.
38176- jmp *%edx
38177+ ljmp $__KERNEL_CS, $1f
38178+1: jmp *%edx
38179
38180 // Every interrupt can come to us here
38181 // But we must truly tell each apart.
38182diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
38183index 4fd9d6a..834fa03 100644
38184--- a/drivers/md/bitmap.c
38185+++ b/drivers/md/bitmap.c
38186@@ -1779,7 +1779,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
38187 chunk_kb ? "KB" : "B");
38188 if (bitmap->storage.file) {
38189 seq_printf(seq, ", file: ");
38190- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
38191+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
38192 }
38193
38194 seq_printf(seq, "\n");
38195diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
38196index aa04f02..2a1309e 100644
38197--- a/drivers/md/dm-ioctl.c
38198+++ b/drivers/md/dm-ioctl.c
38199@@ -1694,7 +1694,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
38200 cmd == DM_LIST_VERSIONS_CMD)
38201 return 0;
38202
38203- if ((cmd == DM_DEV_CREATE_CMD)) {
38204+ if (cmd == DM_DEV_CREATE_CMD) {
38205 if (!*param->name) {
38206 DMWARN("name not supplied when creating device");
38207 return -EINVAL;
38208diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
38209index d053098..05cc375 100644
38210--- a/drivers/md/dm-raid1.c
38211+++ b/drivers/md/dm-raid1.c
38212@@ -40,7 +40,7 @@ enum dm_raid1_error {
38213
38214 struct mirror {
38215 struct mirror_set *ms;
38216- atomic_t error_count;
38217+ atomic_unchecked_t error_count;
38218 unsigned long error_type;
38219 struct dm_dev *dev;
38220 sector_t offset;
38221@@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
38222 struct mirror *m;
38223
38224 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
38225- if (!atomic_read(&m->error_count))
38226+ if (!atomic_read_unchecked(&m->error_count))
38227 return m;
38228
38229 return NULL;
38230@@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
38231 * simple way to tell if a device has encountered
38232 * errors.
38233 */
38234- atomic_inc(&m->error_count);
38235+ atomic_inc_unchecked(&m->error_count);
38236
38237 if (test_and_set_bit(error_type, &m->error_type))
38238 return;
38239@@ -409,7 +409,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
38240 struct mirror *m = get_default_mirror(ms);
38241
38242 do {
38243- if (likely(!atomic_read(&m->error_count)))
38244+ if (likely(!atomic_read_unchecked(&m->error_count)))
38245 return m;
38246
38247 if (m-- == ms->mirror)
38248@@ -423,7 +423,7 @@ static int default_ok(struct mirror *m)
38249 {
38250 struct mirror *default_mirror = get_default_mirror(m->ms);
38251
38252- return !atomic_read(&default_mirror->error_count);
38253+ return !atomic_read_unchecked(&default_mirror->error_count);
38254 }
38255
38256 static int mirror_available(struct mirror_set *ms, struct bio *bio)
38257@@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
38258 */
38259 if (likely(region_in_sync(ms, region, 1)))
38260 m = choose_mirror(ms, bio->bi_sector);
38261- else if (m && atomic_read(&m->error_count))
38262+ else if (m && atomic_read_unchecked(&m->error_count))
38263 m = NULL;
38264
38265 if (likely(m))
38266@@ -927,7 +927,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
38267 }
38268
38269 ms->mirror[mirror].ms = ms;
38270- atomic_set(&(ms->mirror[mirror].error_count), 0);
38271+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
38272 ms->mirror[mirror].error_type = 0;
38273 ms->mirror[mirror].offset = offset;
38274
38275@@ -1340,7 +1340,7 @@ static void mirror_resume(struct dm_target *ti)
38276 */
38277 static char device_status_char(struct mirror *m)
38278 {
38279- if (!atomic_read(&(m->error_count)))
38280+ if (!atomic_read_unchecked(&(m->error_count)))
38281 return 'A';
38282
38283 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
38284diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
38285index 7b8b2b9..9c7d145 100644
38286--- a/drivers/md/dm-stripe.c
38287+++ b/drivers/md/dm-stripe.c
38288@@ -20,7 +20,7 @@ struct stripe {
38289 struct dm_dev *dev;
38290 sector_t physical_start;
38291
38292- atomic_t error_count;
38293+ atomic_unchecked_t error_count;
38294 };
38295
38296 struct stripe_c {
38297@@ -185,7 +185,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
38298 kfree(sc);
38299 return r;
38300 }
38301- atomic_set(&(sc->stripe[i].error_count), 0);
38302+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
38303 }
38304
38305 ti->private = sc;
38306@@ -326,7 +326,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
38307 DMEMIT("%d ", sc->stripes);
38308 for (i = 0; i < sc->stripes; i++) {
38309 DMEMIT("%s ", sc->stripe[i].dev->name);
38310- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
38311+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
38312 'D' : 'A';
38313 }
38314 buffer[i] = '\0';
38315@@ -371,8 +371,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
38316 */
38317 for (i = 0; i < sc->stripes; i++)
38318 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
38319- atomic_inc(&(sc->stripe[i].error_count));
38320- if (atomic_read(&(sc->stripe[i].error_count)) <
38321+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
38322+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
38323 DM_IO_ERROR_THRESHOLD)
38324 schedule_work(&sc->trigger_event);
38325 }
38326diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
38327index 1ff252a..ee384c1 100644
38328--- a/drivers/md/dm-table.c
38329+++ b/drivers/md/dm-table.c
38330@@ -389,7 +389,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
38331 if (!dev_size)
38332 return 0;
38333
38334- if ((start >= dev_size) || (start + len > dev_size)) {
38335+ if ((start >= dev_size) || (len > dev_size - start)) {
38336 DMWARN("%s: %s too small for target: "
38337 "start=%llu, len=%llu, dev_size=%llu",
38338 dm_device_name(ti->table->md), bdevname(bdev, b),
38339diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
38340index 00cee02..b89a29d 100644
38341--- a/drivers/md/dm-thin-metadata.c
38342+++ b/drivers/md/dm-thin-metadata.c
38343@@ -397,7 +397,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
38344 {
38345 pmd->info.tm = pmd->tm;
38346 pmd->info.levels = 2;
38347- pmd->info.value_type.context = pmd->data_sm;
38348+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
38349 pmd->info.value_type.size = sizeof(__le64);
38350 pmd->info.value_type.inc = data_block_inc;
38351 pmd->info.value_type.dec = data_block_dec;
38352@@ -416,7 +416,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
38353
38354 pmd->bl_info.tm = pmd->tm;
38355 pmd->bl_info.levels = 1;
38356- pmd->bl_info.value_type.context = pmd->data_sm;
38357+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
38358 pmd->bl_info.value_type.size = sizeof(__le64);
38359 pmd->bl_info.value_type.inc = data_block_inc;
38360 pmd->bl_info.value_type.dec = data_block_dec;
38361diff --git a/drivers/md/dm.c b/drivers/md/dm.c
38362index 9a0bdad..4df9543 100644
38363--- a/drivers/md/dm.c
38364+++ b/drivers/md/dm.c
38365@@ -169,9 +169,9 @@ struct mapped_device {
38366 /*
38367 * Event handling.
38368 */
38369- atomic_t event_nr;
38370+ atomic_unchecked_t event_nr;
38371 wait_queue_head_t eventq;
38372- atomic_t uevent_seq;
38373+ atomic_unchecked_t uevent_seq;
38374 struct list_head uevent_list;
38375 spinlock_t uevent_lock; /* Protect access to uevent_list */
38376
38377@@ -1879,8 +1879,8 @@ static struct mapped_device *alloc_dev(int minor)
38378 rwlock_init(&md->map_lock);
38379 atomic_set(&md->holders, 1);
38380 atomic_set(&md->open_count, 0);
38381- atomic_set(&md->event_nr, 0);
38382- atomic_set(&md->uevent_seq, 0);
38383+ atomic_set_unchecked(&md->event_nr, 0);
38384+ atomic_set_unchecked(&md->uevent_seq, 0);
38385 INIT_LIST_HEAD(&md->uevent_list);
38386 spin_lock_init(&md->uevent_lock);
38387
38388@@ -2028,7 +2028,7 @@ static void event_callback(void *context)
38389
38390 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
38391
38392- atomic_inc(&md->event_nr);
38393+ atomic_inc_unchecked(&md->event_nr);
38394 wake_up(&md->eventq);
38395 }
38396
38397@@ -2685,18 +2685,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
38398
38399 uint32_t dm_next_uevent_seq(struct mapped_device *md)
38400 {
38401- return atomic_add_return(1, &md->uevent_seq);
38402+ return atomic_add_return_unchecked(1, &md->uevent_seq);
38403 }
38404
38405 uint32_t dm_get_event_nr(struct mapped_device *md)
38406 {
38407- return atomic_read(&md->event_nr);
38408+ return atomic_read_unchecked(&md->event_nr);
38409 }
38410
38411 int dm_wait_event(struct mapped_device *md, int event_nr)
38412 {
38413 return wait_event_interruptible(md->eventq,
38414- (event_nr != atomic_read(&md->event_nr)));
38415+ (event_nr != atomic_read_unchecked(&md->event_nr)));
38416 }
38417
38418 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
38419diff --git a/drivers/md/md.c b/drivers/md/md.c
38420index a4a93b9..4747b63 100644
38421--- a/drivers/md/md.c
38422+++ b/drivers/md/md.c
38423@@ -240,10 +240,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
38424 * start build, activate spare
38425 */
38426 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
38427-static atomic_t md_event_count;
38428+static atomic_unchecked_t md_event_count;
38429 void md_new_event(struct mddev *mddev)
38430 {
38431- atomic_inc(&md_event_count);
38432+ atomic_inc_unchecked(&md_event_count);
38433 wake_up(&md_event_waiters);
38434 }
38435 EXPORT_SYMBOL_GPL(md_new_event);
38436@@ -253,7 +253,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
38437 */
38438 static void md_new_event_inintr(struct mddev *mddev)
38439 {
38440- atomic_inc(&md_event_count);
38441+ atomic_inc_unchecked(&md_event_count);
38442 wake_up(&md_event_waiters);
38443 }
38444
38445@@ -1507,7 +1507,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
38446 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
38447 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
38448 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
38449- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
38450+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
38451
38452 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
38453 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
38454@@ -1751,7 +1751,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
38455 else
38456 sb->resync_offset = cpu_to_le64(0);
38457
38458- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
38459+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
38460
38461 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
38462 sb->size = cpu_to_le64(mddev->dev_sectors);
38463@@ -2751,7 +2751,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
38464 static ssize_t
38465 errors_show(struct md_rdev *rdev, char *page)
38466 {
38467- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
38468+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
38469 }
38470
38471 static ssize_t
38472@@ -2760,7 +2760,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
38473 char *e;
38474 unsigned long n = simple_strtoul(buf, &e, 10);
38475 if (*buf && (*e == 0 || *e == '\n')) {
38476- atomic_set(&rdev->corrected_errors, n);
38477+ atomic_set_unchecked(&rdev->corrected_errors, n);
38478 return len;
38479 }
38480 return -EINVAL;
38481@@ -3210,8 +3210,8 @@ int md_rdev_init(struct md_rdev *rdev)
38482 rdev->sb_loaded = 0;
38483 rdev->bb_page = NULL;
38484 atomic_set(&rdev->nr_pending, 0);
38485- atomic_set(&rdev->read_errors, 0);
38486- atomic_set(&rdev->corrected_errors, 0);
38487+ atomic_set_unchecked(&rdev->read_errors, 0);
38488+ atomic_set_unchecked(&rdev->corrected_errors, 0);
38489
38490 INIT_LIST_HEAD(&rdev->same_set);
38491 init_waitqueue_head(&rdev->blocked_wait);
38492@@ -6994,7 +6994,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
38493
38494 spin_unlock(&pers_lock);
38495 seq_printf(seq, "\n");
38496- seq->poll_event = atomic_read(&md_event_count);
38497+ seq->poll_event = atomic_read_unchecked(&md_event_count);
38498 return 0;
38499 }
38500 if (v == (void*)2) {
38501@@ -7097,7 +7097,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
38502 return error;
38503
38504 seq = file->private_data;
38505- seq->poll_event = atomic_read(&md_event_count);
38506+ seq->poll_event = atomic_read_unchecked(&md_event_count);
38507 return error;
38508 }
38509
38510@@ -7111,7 +7111,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
38511 /* always allow read */
38512 mask = POLLIN | POLLRDNORM;
38513
38514- if (seq->poll_event != atomic_read(&md_event_count))
38515+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
38516 mask |= POLLERR | POLLPRI;
38517 return mask;
38518 }
38519@@ -7155,7 +7155,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
38520 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
38521 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
38522 (int)part_stat_read(&disk->part0, sectors[1]) -
38523- atomic_read(&disk->sync_io);
38524+ atomic_read_unchecked(&disk->sync_io);
38525 /* sync IO will cause sync_io to increase before the disk_stats
38526 * as sync_io is counted when a request starts, and
38527 * disk_stats is counted when it completes.
38528diff --git a/drivers/md/md.h b/drivers/md/md.h
38529index d90fb1a..4174a2b 100644
38530--- a/drivers/md/md.h
38531+++ b/drivers/md/md.h
38532@@ -94,13 +94,13 @@ struct md_rdev {
38533 * only maintained for arrays that
38534 * support hot removal
38535 */
38536- atomic_t read_errors; /* number of consecutive read errors that
38537+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
38538 * we have tried to ignore.
38539 */
38540 struct timespec last_read_error; /* monotonic time since our
38541 * last read error
38542 */
38543- atomic_t corrected_errors; /* number of corrected read errors,
38544+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
38545 * for reporting to userspace and storing
38546 * in superblock.
38547 */
38548@@ -434,7 +434,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
38549
38550 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
38551 {
38552- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
38553+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
38554 }
38555
38556 struct md_personality
38557diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
38558index 1cbfc6b..56e1dbb 100644
38559--- a/drivers/md/persistent-data/dm-space-map.h
38560+++ b/drivers/md/persistent-data/dm-space-map.h
38561@@ -60,6 +60,7 @@ struct dm_space_map {
38562 int (*root_size)(struct dm_space_map *sm, size_t *result);
38563 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
38564 };
38565+typedef struct dm_space_map __no_const dm_space_map_no_const;
38566
38567 /*----------------------------------------------------------------*/
38568
38569diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
38570index 7116798..c81390c 100644
38571--- a/drivers/md/raid1.c
38572+++ b/drivers/md/raid1.c
38573@@ -1836,7 +1836,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
38574 if (r1_sync_page_io(rdev, sect, s,
38575 bio->bi_io_vec[idx].bv_page,
38576 READ) != 0)
38577- atomic_add(s, &rdev->corrected_errors);
38578+ atomic_add_unchecked(s, &rdev->corrected_errors);
38579 }
38580 sectors -= s;
38581 sect += s;
38582@@ -2058,7 +2058,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
38583 test_bit(In_sync, &rdev->flags)) {
38584 if (r1_sync_page_io(rdev, sect, s,
38585 conf->tmppage, READ)) {
38586- atomic_add(s, &rdev->corrected_errors);
38587+ atomic_add_unchecked(s, &rdev->corrected_errors);
38588 printk(KERN_INFO
38589 "md/raid1:%s: read error corrected "
38590 "(%d sectors at %llu on %s)\n",
38591diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
38592index e4ea992..d234520 100644
38593--- a/drivers/md/raid10.c
38594+++ b/drivers/md/raid10.c
38595@@ -1942,7 +1942,7 @@ static void end_sync_read(struct bio *bio, int error)
38596 /* The write handler will notice the lack of
38597 * R10BIO_Uptodate and record any errors etc
38598 */
38599- atomic_add(r10_bio->sectors,
38600+ atomic_add_unchecked(r10_bio->sectors,
38601 &conf->mirrors[d].rdev->corrected_errors);
38602
38603 /* for reconstruct, we always reschedule after a read.
38604@@ -2291,7 +2291,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
38605 {
38606 struct timespec cur_time_mon;
38607 unsigned long hours_since_last;
38608- unsigned int read_errors = atomic_read(&rdev->read_errors);
38609+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
38610
38611 ktime_get_ts(&cur_time_mon);
38612
38613@@ -2313,9 +2313,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
38614 * overflowing the shift of read_errors by hours_since_last.
38615 */
38616 if (hours_since_last >= 8 * sizeof(read_errors))
38617- atomic_set(&rdev->read_errors, 0);
38618+ atomic_set_unchecked(&rdev->read_errors, 0);
38619 else
38620- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
38621+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
38622 }
38623
38624 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
38625@@ -2369,8 +2369,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
38626 return;
38627
38628 check_decay_read_errors(mddev, rdev);
38629- atomic_inc(&rdev->read_errors);
38630- if (atomic_read(&rdev->read_errors) > max_read_errors) {
38631+ atomic_inc_unchecked(&rdev->read_errors);
38632+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
38633 char b[BDEVNAME_SIZE];
38634 bdevname(rdev->bdev, b);
38635
38636@@ -2378,7 +2378,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
38637 "md/raid10:%s: %s: Raid device exceeded "
38638 "read_error threshold [cur %d:max %d]\n",
38639 mdname(mddev), b,
38640- atomic_read(&rdev->read_errors), max_read_errors);
38641+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
38642 printk(KERN_NOTICE
38643 "md/raid10:%s: %s: Failing raid device\n",
38644 mdname(mddev), b);
38645@@ -2533,7 +2533,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
38646 sect +
38647 choose_data_offset(r10_bio, rdev)),
38648 bdevname(rdev->bdev, b));
38649- atomic_add(s, &rdev->corrected_errors);
38650+ atomic_add_unchecked(s, &rdev->corrected_errors);
38651 }
38652
38653 rdev_dec_pending(rdev, mddev);
38654diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
38655index 251ab64..ed23a18 100644
38656--- a/drivers/md/raid5.c
38657+++ b/drivers/md/raid5.c
38658@@ -1763,21 +1763,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
38659 mdname(conf->mddev), STRIPE_SECTORS,
38660 (unsigned long long)s,
38661 bdevname(rdev->bdev, b));
38662- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
38663+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
38664 clear_bit(R5_ReadError, &sh->dev[i].flags);
38665 clear_bit(R5_ReWrite, &sh->dev[i].flags);
38666 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
38667 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
38668
38669- if (atomic_read(&rdev->read_errors))
38670- atomic_set(&rdev->read_errors, 0);
38671+ if (atomic_read_unchecked(&rdev->read_errors))
38672+ atomic_set_unchecked(&rdev->read_errors, 0);
38673 } else {
38674 const char *bdn = bdevname(rdev->bdev, b);
38675 int retry = 0;
38676 int set_bad = 0;
38677
38678 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
38679- atomic_inc(&rdev->read_errors);
38680+ atomic_inc_unchecked(&rdev->read_errors);
38681 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
38682 printk_ratelimited(
38683 KERN_WARNING
38684@@ -1805,7 +1805,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
38685 mdname(conf->mddev),
38686 (unsigned long long)s,
38687 bdn);
38688- } else if (atomic_read(&rdev->read_errors)
38689+ } else if (atomic_read_unchecked(&rdev->read_errors)
38690 > conf->max_nr_stripes)
38691 printk(KERN_WARNING
38692 "md/raid:%s: Too many read errors, failing device %s.\n",
38693diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
38694index 401ef64..836e563 100644
38695--- a/drivers/media/dvb-core/dvbdev.c
38696+++ b/drivers/media/dvb-core/dvbdev.c
38697@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
38698 const struct dvb_device *template, void *priv, int type)
38699 {
38700 struct dvb_device *dvbdev;
38701- struct file_operations *dvbdevfops;
38702+ file_operations_no_const *dvbdevfops;
38703 struct device *clsdev;
38704 int minor;
38705 int id;
38706diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
38707index 9b6c3bb..baeb5c7 100644
38708--- a/drivers/media/dvb-frontends/dib3000.h
38709+++ b/drivers/media/dvb-frontends/dib3000.h
38710@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
38711 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
38712 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
38713 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
38714-};
38715+} __no_const;
38716
38717 #if IS_ENABLED(CONFIG_DVB_DIB3000MB)
38718 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
38719diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
38720index bc78354..42c9459 100644
38721--- a/drivers/media/pci/cx88/cx88-video.c
38722+++ b/drivers/media/pci/cx88/cx88-video.c
38723@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
38724
38725 /* ------------------------------------------------------------------ */
38726
38727-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
38728-static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
38729-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
38730+static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
38731+static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
38732+static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
38733
38734 module_param_array(video_nr, int, NULL, 0444);
38735 module_param_array(vbi_nr, int, NULL, 0444);
38736diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
38737index 96c4a17..1305a79 100644
38738--- a/drivers/media/platform/omap/omap_vout.c
38739+++ b/drivers/media/platform/omap/omap_vout.c
38740@@ -63,7 +63,6 @@ enum omap_vout_channels {
38741 OMAP_VIDEO2,
38742 };
38743
38744-static struct videobuf_queue_ops video_vbq_ops;
38745 /* Variables configurable through module params*/
38746 static u32 video1_numbuffers = 3;
38747 static u32 video2_numbuffers = 3;
38748@@ -1012,6 +1011,12 @@ static int omap_vout_open(struct file *file)
38749 {
38750 struct videobuf_queue *q;
38751 struct omap_vout_device *vout = NULL;
38752+ static struct videobuf_queue_ops video_vbq_ops = {
38753+ .buf_setup = omap_vout_buffer_setup,
38754+ .buf_prepare = omap_vout_buffer_prepare,
38755+ .buf_release = omap_vout_buffer_release,
38756+ .buf_queue = omap_vout_buffer_queue,
38757+ };
38758
38759 vout = video_drvdata(file);
38760 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
38761@@ -1029,10 +1034,6 @@ static int omap_vout_open(struct file *file)
38762 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
38763
38764 q = &vout->vbq;
38765- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
38766- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
38767- video_vbq_ops.buf_release = omap_vout_buffer_release;
38768- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
38769 spin_lock_init(&vout->vbq_lock);
38770
38771 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
38772diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
38773index 04e6490..2df65bf 100644
38774--- a/drivers/media/platform/s5p-tv/mixer.h
38775+++ b/drivers/media/platform/s5p-tv/mixer.h
38776@@ -156,7 +156,7 @@ struct mxr_layer {
38777 /** layer index (unique identifier) */
38778 int idx;
38779 /** callbacks for layer methods */
38780- struct mxr_layer_ops ops;
38781+ struct mxr_layer_ops *ops;
38782 /** format array */
38783 const struct mxr_format **fmt_array;
38784 /** size of format array */
38785diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
38786index b93a21f..2535195 100644
38787--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
38788+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
38789@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
38790 {
38791 struct mxr_layer *layer;
38792 int ret;
38793- struct mxr_layer_ops ops = {
38794+ static struct mxr_layer_ops ops = {
38795 .release = mxr_graph_layer_release,
38796 .buffer_set = mxr_graph_buffer_set,
38797 .stream_set = mxr_graph_stream_set,
38798diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
38799index b713403..53cb5ad 100644
38800--- a/drivers/media/platform/s5p-tv/mixer_reg.c
38801+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
38802@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
38803 layer->update_buf = next;
38804 }
38805
38806- layer->ops.buffer_set(layer, layer->update_buf);
38807+ layer->ops->buffer_set(layer, layer->update_buf);
38808
38809 if (done && done != layer->shadow_buf)
38810 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
38811diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
38812index 82142a2..6de47e8 100644
38813--- a/drivers/media/platform/s5p-tv/mixer_video.c
38814+++ b/drivers/media/platform/s5p-tv/mixer_video.c
38815@@ -209,7 +209,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
38816 layer->geo.src.height = layer->geo.src.full_height;
38817
38818 mxr_geometry_dump(mdev, &layer->geo);
38819- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
38820+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
38821 mxr_geometry_dump(mdev, &layer->geo);
38822 }
38823
38824@@ -227,7 +227,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
38825 layer->geo.dst.full_width = mbus_fmt.width;
38826 layer->geo.dst.full_height = mbus_fmt.height;
38827 layer->geo.dst.field = mbus_fmt.field;
38828- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
38829+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
38830
38831 mxr_geometry_dump(mdev, &layer->geo);
38832 }
38833@@ -333,7 +333,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
38834 /* set source size to highest accepted value */
38835 geo->src.full_width = max(geo->dst.full_width, pix->width);
38836 geo->src.full_height = max(geo->dst.full_height, pix->height);
38837- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
38838+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
38839 mxr_geometry_dump(mdev, &layer->geo);
38840 /* set cropping to total visible screen */
38841 geo->src.width = pix->width;
38842@@ -341,12 +341,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
38843 geo->src.x_offset = 0;
38844 geo->src.y_offset = 0;
38845 /* assure consistency of geometry */
38846- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
38847+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
38848 mxr_geometry_dump(mdev, &layer->geo);
38849 /* set full size to lowest possible value */
38850 geo->src.full_width = 0;
38851 geo->src.full_height = 0;
38852- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
38853+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
38854 mxr_geometry_dump(mdev, &layer->geo);
38855
38856 /* returning results */
38857@@ -473,7 +473,7 @@ static int mxr_s_selection(struct file *file, void *fh,
38858 target->width = s->r.width;
38859 target->height = s->r.height;
38860
38861- layer->ops.fix_geometry(layer, stage, s->flags);
38862+ layer->ops->fix_geometry(layer, stage, s->flags);
38863
38864 /* retrieve update selection rectangle */
38865 res.left = target->x_offset;
38866@@ -938,13 +938,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
38867 mxr_output_get(mdev);
38868
38869 mxr_layer_update_output(layer);
38870- layer->ops.format_set(layer);
38871+ layer->ops->format_set(layer);
38872 /* enabling layer in hardware */
38873 spin_lock_irqsave(&layer->enq_slock, flags);
38874 layer->state = MXR_LAYER_STREAMING;
38875 spin_unlock_irqrestore(&layer->enq_slock, flags);
38876
38877- layer->ops.stream_set(layer, MXR_ENABLE);
38878+ layer->ops->stream_set(layer, MXR_ENABLE);
38879 mxr_streamer_get(mdev);
38880
38881 return 0;
38882@@ -1014,7 +1014,7 @@ static int stop_streaming(struct vb2_queue *vq)
38883 spin_unlock_irqrestore(&layer->enq_slock, flags);
38884
38885 /* disabling layer in hardware */
38886- layer->ops.stream_set(layer, MXR_DISABLE);
38887+ layer->ops->stream_set(layer, MXR_DISABLE);
38888 /* remove one streamer */
38889 mxr_streamer_put(mdev);
38890 /* allow changes in output configuration */
38891@@ -1053,8 +1053,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
38892
38893 void mxr_layer_release(struct mxr_layer *layer)
38894 {
38895- if (layer->ops.release)
38896- layer->ops.release(layer);
38897+ if (layer->ops->release)
38898+ layer->ops->release(layer);
38899 }
38900
38901 void mxr_base_layer_release(struct mxr_layer *layer)
38902@@ -1080,7 +1080,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
38903
38904 layer->mdev = mdev;
38905 layer->idx = idx;
38906- layer->ops = *ops;
38907+ layer->ops = ops;
38908
38909 spin_lock_init(&layer->enq_slock);
38910 INIT_LIST_HEAD(&layer->enq_list);
38911diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
38912index 3d13a63..da31bf1 100644
38913--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
38914+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
38915@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
38916 {
38917 struct mxr_layer *layer;
38918 int ret;
38919- struct mxr_layer_ops ops = {
38920+ static struct mxr_layer_ops ops = {
38921 .release = mxr_vp_layer_release,
38922 .buffer_set = mxr_vp_buffer_set,
38923 .stream_set = mxr_vp_stream_set,
38924diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
38925index 643d80a..56bb96b 100644
38926--- a/drivers/media/radio/radio-cadet.c
38927+++ b/drivers/media/radio/radio-cadet.c
38928@@ -302,6 +302,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
38929 unsigned char readbuf[RDS_BUFFER];
38930 int i = 0;
38931
38932+ if (count > RDS_BUFFER)
38933+ return -EFAULT;
38934 mutex_lock(&dev->lock);
38935 if (dev->rdsstat == 0)
38936 cadet_start_rds(dev);
38937@@ -317,7 +319,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
38938 while (i < count && dev->rdsin != dev->rdsout)
38939 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
38940
38941- if (i && copy_to_user(data, readbuf, i))
38942+ if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
38943 i = -EFAULT;
38944 unlock:
38945 mutex_unlock(&dev->lock);
38946diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
38947index 3940bb0..fb3952a 100644
38948--- a/drivers/media/usb/dvb-usb/cxusb.c
38949+++ b/drivers/media/usb/dvb-usb/cxusb.c
38950@@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
38951
38952 struct dib0700_adapter_state {
38953 int (*set_param_save) (struct dvb_frontend *);
38954-};
38955+} __no_const;
38956
38957 static int dib7070_set_param_override(struct dvb_frontend *fe)
38958 {
38959diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
38960index 9578a67..31aa652 100644
38961--- a/drivers/media/usb/dvb-usb/dw2102.c
38962+++ b/drivers/media/usb/dvb-usb/dw2102.c
38963@@ -115,7 +115,7 @@ struct su3000_state {
38964
38965 struct s6x0_state {
38966 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
38967-};
38968+} __no_const;
38969
38970 /* debug */
38971 static int dvb_usb_dw2102_debug;
38972diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
38973index 7157af3..139e91a 100644
38974--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
38975+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
38976@@ -326,7 +326,7 @@ struct v4l2_buffer32 {
38977 __u32 reserved;
38978 };
38979
38980-static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
38981+static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
38982 enum v4l2_memory memory)
38983 {
38984 void __user *up_pln;
38985@@ -355,7 +355,7 @@ static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
38986 return 0;
38987 }
38988
38989-static int put_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
38990+static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
38991 enum v4l2_memory memory)
38992 {
38993 if (copy_in_user(up32, up, 2 * sizeof(__u32)) ||
38994@@ -772,7 +772,7 @@ static int put_v4l2_subdev_edid32(struct v4l2_subdev_edid *kp, struct v4l2_subde
38995 put_user(kp->start_block, &up->start_block) ||
38996 put_user(kp->blocks, &up->blocks) ||
38997 put_user(tmp, &up->edid) ||
38998- copy_to_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
38999+ copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved)))
39000 return -EFAULT;
39001 return 0;
39002 }
39003diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
39004index aa6e7c7..cb5de87 100644
39005--- a/drivers/media/v4l2-core/v4l2-ioctl.c
39006+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
39007@@ -236,7 +236,7 @@ static void v4l_print_format(const void *arg, bool write_only)
39008 const struct v4l2_vbi_format *vbi;
39009 const struct v4l2_sliced_vbi_format *sliced;
39010 const struct v4l2_window *win;
39011- const struct v4l2_clip *clip;
39012+ const struct v4l2_clip __user *pclip;
39013 unsigned i;
39014
39015 pr_cont("type=%s", prt_names(p->type, v4l2_type_names));
39016@@ -284,12 +284,16 @@ static void v4l_print_format(const void *arg, bool write_only)
39017 win->w.left, win->w.top,
39018 prt_names(win->field, v4l2_field_names),
39019 win->chromakey, win->bitmap, win->global_alpha);
39020- clip = win->clips;
39021+ pclip = win->clips;
39022 for (i = 0; i < win->clipcount; i++) {
39023+ struct v4l2_clip clip;
39024+
39025+ if (copy_from_user(&clip, pclip, sizeof clip))
39026+ break;
39027 printk(KERN_DEBUG "clip %u: wxh=%dx%d, x,y=%d,%d\n",
39028- i, clip->c.width, clip->c.height,
39029- clip->c.left, clip->c.top);
39030- clip = clip->next;
39031+ i, clip.c.width, clip.c.height,
39032+ clip.c.left, clip.c.top);
39033+ pclip = clip.next;
39034 }
39035 break;
39036 case V4L2_BUF_TYPE_VBI_CAPTURE:
39037@@ -1923,7 +1927,8 @@ struct v4l2_ioctl_info {
39038 struct file *file, void *fh, void *p);
39039 } u;
39040 void (*debug)(const void *arg, bool write_only);
39041-};
39042+} __do_const;
39043+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
39044
39045 /* This control needs a priority check */
39046 #define INFO_FL_PRIO (1 << 0)
39047@@ -2108,7 +2113,7 @@ static long __video_do_ioctl(struct file *file,
39048 struct video_device *vfd = video_devdata(file);
39049 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
39050 bool write_only = false;
39051- struct v4l2_ioctl_info default_info;
39052+ v4l2_ioctl_info_no_const default_info;
39053 const struct v4l2_ioctl_info *info;
39054 void *fh = file->private_data;
39055 struct v4l2_fh *vfh = NULL;
39056@@ -2193,7 +2198,7 @@ done:
39057 }
39058
39059 static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
39060- void * __user *user_ptr, void ***kernel_ptr)
39061+ void __user **user_ptr, void ***kernel_ptr)
39062 {
39063 int ret = 0;
39064
39065@@ -2209,7 +2214,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
39066 ret = -EINVAL;
39067 break;
39068 }
39069- *user_ptr = (void __user *)buf->m.planes;
39070+ *user_ptr = (void __force_user *)buf->m.planes;
39071 *kernel_ptr = (void *)&buf->m.planes;
39072 *array_size = sizeof(struct v4l2_plane) * buf->length;
39073 ret = 1;
39074@@ -2244,7 +2249,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
39075 ret = -EINVAL;
39076 break;
39077 }
39078- *user_ptr = (void __user *)ctrls->controls;
39079+ *user_ptr = (void __force_user *)ctrls->controls;
39080 *kernel_ptr = (void *)&ctrls->controls;
39081 *array_size = sizeof(struct v4l2_ext_control)
39082 * ctrls->count;
39083diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
39084index fb69baa..3aeea2e 100644
39085--- a/drivers/message/fusion/mptbase.c
39086+++ b/drivers/message/fusion/mptbase.c
39087@@ -6755,8 +6755,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
39088 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
39089 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
39090
39091+#ifdef CONFIG_GRKERNSEC_HIDESYM
39092+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
39093+#else
39094 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
39095 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
39096+#endif
39097+
39098 /*
39099 * Rounding UP to nearest 4-kB boundary here...
39100 */
39101@@ -6769,7 +6774,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
39102 ioc->facts.GlobalCredits);
39103
39104 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
39105+#ifdef CONFIG_GRKERNSEC_HIDESYM
39106+ NULL, NULL);
39107+#else
39108 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
39109+#endif
39110 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
39111 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
39112 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
39113diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
39114index fa43c39..daeb158 100644
39115--- a/drivers/message/fusion/mptsas.c
39116+++ b/drivers/message/fusion/mptsas.c
39117@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
39118 return 0;
39119 }
39120
39121+static inline void
39122+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
39123+{
39124+ if (phy_info->port_details) {
39125+ phy_info->port_details->rphy = rphy;
39126+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
39127+ ioc->name, rphy));
39128+ }
39129+
39130+ if (rphy) {
39131+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
39132+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
39133+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
39134+ ioc->name, rphy, rphy->dev.release));
39135+ }
39136+}
39137+
39138 /* no mutex */
39139 static void
39140 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
39141@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
39142 return NULL;
39143 }
39144
39145-static inline void
39146-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
39147-{
39148- if (phy_info->port_details) {
39149- phy_info->port_details->rphy = rphy;
39150- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
39151- ioc->name, rphy));
39152- }
39153-
39154- if (rphy) {
39155- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
39156- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
39157- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
39158- ioc->name, rphy, rphy->dev.release));
39159- }
39160-}
39161-
39162 static inline struct sas_port *
39163 mptsas_get_port(struct mptsas_phyinfo *phy_info)
39164 {
39165diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
39166index 164afa7..b6b2e74 100644
39167--- a/drivers/message/fusion/mptscsih.c
39168+++ b/drivers/message/fusion/mptscsih.c
39169@@ -1271,15 +1271,16 @@ mptscsih_info(struct Scsi_Host *SChost)
39170
39171 h = shost_priv(SChost);
39172
39173- if (h) {
39174- if (h->info_kbuf == NULL)
39175- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
39176- return h->info_kbuf;
39177- h->info_kbuf[0] = '\0';
39178+ if (!h)
39179+ return NULL;
39180
39181- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
39182- h->info_kbuf[size-1] = '\0';
39183- }
39184+ if (h->info_kbuf == NULL)
39185+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
39186+ return h->info_kbuf;
39187+ h->info_kbuf[0] = '\0';
39188+
39189+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
39190+ h->info_kbuf[size-1] = '\0';
39191
39192 return h->info_kbuf;
39193 }
39194diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
39195index 8001aa6..b137580 100644
39196--- a/drivers/message/i2o/i2o_proc.c
39197+++ b/drivers/message/i2o/i2o_proc.c
39198@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
39199 "Array Controller Device"
39200 };
39201
39202-static char *chtostr(char *tmp, u8 *chars, int n)
39203-{
39204- tmp[0] = 0;
39205- return strncat(tmp, (char *)chars, n);
39206-}
39207-
39208 static int i2o_report_query_status(struct seq_file *seq, int block_status,
39209 char *group)
39210 {
39211@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
39212 } *result;
39213
39214 i2o_exec_execute_ddm_table ddm_table;
39215- char tmp[28 + 1];
39216
39217 result = kmalloc(sizeof(*result), GFP_KERNEL);
39218 if (!result)
39219@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
39220
39221 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
39222 seq_printf(seq, "%-#8x", ddm_table.module_id);
39223- seq_printf(seq, "%-29s",
39224- chtostr(tmp, ddm_table.module_name_version, 28));
39225+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
39226 seq_printf(seq, "%9d ", ddm_table.data_size);
39227 seq_printf(seq, "%8d", ddm_table.code_size);
39228
39229@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
39230
39231 i2o_driver_result_table *result;
39232 i2o_driver_store_table *dst;
39233- char tmp[28 + 1];
39234
39235 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
39236 if (result == NULL)
39237@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
39238
39239 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
39240 seq_printf(seq, "%-#8x", dst->module_id);
39241- seq_printf(seq, "%-29s",
39242- chtostr(tmp, dst->module_name_version, 28));
39243- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
39244+ seq_printf(seq, "%-.28s", dst->module_name_version);
39245+ seq_printf(seq, "%-.8s", dst->date);
39246 seq_printf(seq, "%8d ", dst->module_size);
39247 seq_printf(seq, "%8d ", dst->mpb_size);
39248 seq_printf(seq, "0x%04x", dst->module_flags);
39249@@ -1250,7 +1240,6 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
39250 // == (allow) 512d bytes (max)
39251 static u16 *work16 = (u16 *) work32;
39252 int token;
39253- char tmp[16 + 1];
39254
39255 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
39256
39257@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
39258 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
39259 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
39260 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
39261- seq_printf(seq, "Vendor info : %s\n",
39262- chtostr(tmp, (u8 *) (work32 + 2), 16));
39263- seq_printf(seq, "Product info : %s\n",
39264- chtostr(tmp, (u8 *) (work32 + 6), 16));
39265- seq_printf(seq, "Description : %s\n",
39266- chtostr(tmp, (u8 *) (work32 + 10), 16));
39267- seq_printf(seq, "Product rev. : %s\n",
39268- chtostr(tmp, (u8 *) (work32 + 14), 8));
39269+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
39270+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
39271+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
39272+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
39273
39274 seq_printf(seq, "Serial number : ");
39275 print_serial_number(seq, (u8 *) (work32 + 16),
39276@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
39277 u8 pad[256]; // allow up to 256 byte (max) serial number
39278 } result;
39279
39280- char tmp[24 + 1];
39281-
39282 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
39283
39284 if (token < 0) {
39285@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
39286 }
39287
39288 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
39289- seq_printf(seq, "Module name : %s\n",
39290- chtostr(tmp, result.module_name, 24));
39291- seq_printf(seq, "Module revision : %s\n",
39292- chtostr(tmp, result.module_rev, 8));
39293+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
39294+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
39295
39296 seq_printf(seq, "Serial number : ");
39297 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
39298@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
39299 u8 instance_number[4];
39300 } result;
39301
39302- char tmp[64 + 1];
39303-
39304 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
39305
39306 if (token < 0) {
39307@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
39308 return 0;
39309 }
39310
39311- seq_printf(seq, "Device name : %s\n",
39312- chtostr(tmp, result.device_name, 64));
39313- seq_printf(seq, "Service name : %s\n",
39314- chtostr(tmp, result.service_name, 64));
39315- seq_printf(seq, "Physical name : %s\n",
39316- chtostr(tmp, result.physical_location, 64));
39317- seq_printf(seq, "Instance number : %s\n",
39318- chtostr(tmp, result.instance_number, 4));
39319+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
39320+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
39321+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
39322+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
39323
39324 return 0;
39325 }
39326diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
39327index a8c08f3..155fe3d 100644
39328--- a/drivers/message/i2o/iop.c
39329+++ b/drivers/message/i2o/iop.c
39330@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
39331
39332 spin_lock_irqsave(&c->context_list_lock, flags);
39333
39334- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
39335- atomic_inc(&c->context_list_counter);
39336+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
39337+ atomic_inc_unchecked(&c->context_list_counter);
39338
39339- entry->context = atomic_read(&c->context_list_counter);
39340+ entry->context = atomic_read_unchecked(&c->context_list_counter);
39341
39342 list_add(&entry->list, &c->context_list);
39343
39344@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
39345
39346 #if BITS_PER_LONG == 64
39347 spin_lock_init(&c->context_list_lock);
39348- atomic_set(&c->context_list_counter, 0);
39349+ atomic_set_unchecked(&c->context_list_counter, 0);
39350 INIT_LIST_HEAD(&c->context_list);
39351 #endif
39352
39353diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
39354index 45ece11..8efa218 100644
39355--- a/drivers/mfd/janz-cmodio.c
39356+++ b/drivers/mfd/janz-cmodio.c
39357@@ -13,6 +13,7 @@
39358
39359 #include <linux/kernel.h>
39360 #include <linux/module.h>
39361+#include <linux/slab.h>
39362 #include <linux/init.h>
39363 #include <linux/pci.h>
39364 #include <linux/interrupt.h>
39365diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
39366index a5f9888..1c0ed56 100644
39367--- a/drivers/mfd/twl4030-irq.c
39368+++ b/drivers/mfd/twl4030-irq.c
39369@@ -35,6 +35,7 @@
39370 #include <linux/of.h>
39371 #include <linux/irqdomain.h>
39372 #include <linux/i2c/twl.h>
39373+#include <asm/pgtable.h>
39374
39375 #include "twl-core.h"
39376
39377@@ -728,10 +729,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
39378 * Install an irq handler for each of the SIH modules;
39379 * clone dummy irq_chip since PIH can't *do* anything
39380 */
39381- twl4030_irq_chip = dummy_irq_chip;
39382- twl4030_irq_chip.name = "twl4030";
39383+ pax_open_kernel();
39384+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
39385+ *(const char **)&twl4030_irq_chip.name = "twl4030";
39386
39387- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
39388+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
39389+ pax_close_kernel();
39390
39391 for (i = irq_base; i < irq_end; i++) {
39392 irq_set_chip_and_handler(i, &twl4030_irq_chip,
39393diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
39394index 277a8db..0e0b754 100644
39395--- a/drivers/mfd/twl6030-irq.c
39396+++ b/drivers/mfd/twl6030-irq.c
39397@@ -387,10 +387,12 @@ int twl6030_init_irq(struct device *dev, int irq_num)
39398 * install an irq handler for each of the modules;
39399 * clone dummy irq_chip since PIH can't *do* anything
39400 */
39401- twl6030_irq_chip = dummy_irq_chip;
39402- twl6030_irq_chip.name = "twl6030";
39403- twl6030_irq_chip.irq_set_type = NULL;
39404- twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;
39405+ pax_open_kernel();
39406+ memcpy((void *)&twl6030_irq_chip, &dummy_irq_chip, sizeof twl6030_irq_chip);
39407+ *(const char **)&twl6030_irq_chip.name = "twl6030";
39408+ *(void **)&twl6030_irq_chip.irq_set_type = NULL;
39409+ *(void **)&twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;
39410+ pax_close_kernel();
39411
39412 for (i = irq_base; i < irq_end; i++) {
39413 irq_set_chip_and_handler(i, &twl6030_irq_chip,
39414diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
39415index f32550a..e3e52a2 100644
39416--- a/drivers/misc/c2port/core.c
39417+++ b/drivers/misc/c2port/core.c
39418@@ -920,7 +920,9 @@ struct c2port_device *c2port_device_register(char *name,
39419 mutex_init(&c2dev->mutex);
39420
39421 /* Create binary file */
39422- c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
39423+ pax_open_kernel();
39424+ *(size_t *)&c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
39425+ pax_close_kernel();
39426 ret = device_create_bin_file(c2dev->dev, &c2port_bin_attrs);
39427 if (unlikely(ret))
39428 goto error_device_create_bin_file;
39429diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
39430index 36f5d52..32311c3 100644
39431--- a/drivers/misc/kgdbts.c
39432+++ b/drivers/misc/kgdbts.c
39433@@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early)
39434 char before[BREAK_INSTR_SIZE];
39435 char after[BREAK_INSTR_SIZE];
39436
39437- probe_kernel_read(before, (char *)kgdbts_break_test,
39438+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
39439 BREAK_INSTR_SIZE);
39440 init_simple_test();
39441 ts.tst = plant_and_detach_test;
39442@@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early)
39443 /* Activate test with initial breakpoint */
39444 if (!is_early)
39445 kgdb_breakpoint();
39446- probe_kernel_read(after, (char *)kgdbts_break_test,
39447+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
39448 BREAK_INSTR_SIZE);
39449 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
39450 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
39451diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
39452index 4a87e5c..76bdf5c 100644
39453--- a/drivers/misc/lis3lv02d/lis3lv02d.c
39454+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
39455@@ -498,7 +498,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
39456 * the lid is closed. This leads to interrupts as soon as a little move
39457 * is done.
39458 */
39459- atomic_inc(&lis3->count);
39460+ atomic_inc_unchecked(&lis3->count);
39461
39462 wake_up_interruptible(&lis3->misc_wait);
39463 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
39464@@ -584,7 +584,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
39465 if (lis3->pm_dev)
39466 pm_runtime_get_sync(lis3->pm_dev);
39467
39468- atomic_set(&lis3->count, 0);
39469+ atomic_set_unchecked(&lis3->count, 0);
39470 return 0;
39471 }
39472
39473@@ -617,7 +617,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
39474 add_wait_queue(&lis3->misc_wait, &wait);
39475 while (true) {
39476 set_current_state(TASK_INTERRUPTIBLE);
39477- data = atomic_xchg(&lis3->count, 0);
39478+ data = atomic_xchg_unchecked(&lis3->count, 0);
39479 if (data)
39480 break;
39481
39482@@ -658,7 +658,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
39483 struct lis3lv02d, miscdev);
39484
39485 poll_wait(file, &lis3->misc_wait, wait);
39486- if (atomic_read(&lis3->count))
39487+ if (atomic_read_unchecked(&lis3->count))
39488 return POLLIN | POLLRDNORM;
39489 return 0;
39490 }
39491diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
39492index c439c82..1f20f57 100644
39493--- a/drivers/misc/lis3lv02d/lis3lv02d.h
39494+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
39495@@ -297,7 +297,7 @@ struct lis3lv02d {
39496 struct input_polled_dev *idev; /* input device */
39497 struct platform_device *pdev; /* platform device */
39498 struct regulator_bulk_data regulators[2];
39499- atomic_t count; /* interrupt count after last read */
39500+ atomic_unchecked_t count; /* interrupt count after last read */
39501 union axis_conversion ac; /* hw -> logical axis */
39502 int mapped_btns[3];
39503
39504diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
39505index 2f30bad..c4c13d0 100644
39506--- a/drivers/misc/sgi-gru/gruhandles.c
39507+++ b/drivers/misc/sgi-gru/gruhandles.c
39508@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
39509 unsigned long nsec;
39510
39511 nsec = CLKS2NSEC(clks);
39512- atomic_long_inc(&mcs_op_statistics[op].count);
39513- atomic_long_add(nsec, &mcs_op_statistics[op].total);
39514+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
39515+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
39516 if (mcs_op_statistics[op].max < nsec)
39517 mcs_op_statistics[op].max = nsec;
39518 }
39519diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
39520index 950dbe9..eeef0f8 100644
39521--- a/drivers/misc/sgi-gru/gruprocfs.c
39522+++ b/drivers/misc/sgi-gru/gruprocfs.c
39523@@ -32,9 +32,9 @@
39524
39525 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
39526
39527-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
39528+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
39529 {
39530- unsigned long val = atomic_long_read(v);
39531+ unsigned long val = atomic_long_read_unchecked(v);
39532
39533 seq_printf(s, "%16lu %s\n", val, id);
39534 }
39535@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
39536
39537 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
39538 for (op = 0; op < mcsop_last; op++) {
39539- count = atomic_long_read(&mcs_op_statistics[op].count);
39540- total = atomic_long_read(&mcs_op_statistics[op].total);
39541+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
39542+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
39543 max = mcs_op_statistics[op].max;
39544 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
39545 count ? total / count : 0, max);
39546diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
39547index 5c3ce24..4915ccb 100644
39548--- a/drivers/misc/sgi-gru/grutables.h
39549+++ b/drivers/misc/sgi-gru/grutables.h
39550@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
39551 * GRU statistics.
39552 */
39553 struct gru_stats_s {
39554- atomic_long_t vdata_alloc;
39555- atomic_long_t vdata_free;
39556- atomic_long_t gts_alloc;
39557- atomic_long_t gts_free;
39558- atomic_long_t gms_alloc;
39559- atomic_long_t gms_free;
39560- atomic_long_t gts_double_allocate;
39561- atomic_long_t assign_context;
39562- atomic_long_t assign_context_failed;
39563- atomic_long_t free_context;
39564- atomic_long_t load_user_context;
39565- atomic_long_t load_kernel_context;
39566- atomic_long_t lock_kernel_context;
39567- atomic_long_t unlock_kernel_context;
39568- atomic_long_t steal_user_context;
39569- atomic_long_t steal_kernel_context;
39570- atomic_long_t steal_context_failed;
39571- atomic_long_t nopfn;
39572- atomic_long_t asid_new;
39573- atomic_long_t asid_next;
39574- atomic_long_t asid_wrap;
39575- atomic_long_t asid_reuse;
39576- atomic_long_t intr;
39577- atomic_long_t intr_cbr;
39578- atomic_long_t intr_tfh;
39579- atomic_long_t intr_spurious;
39580- atomic_long_t intr_mm_lock_failed;
39581- atomic_long_t call_os;
39582- atomic_long_t call_os_wait_queue;
39583- atomic_long_t user_flush_tlb;
39584- atomic_long_t user_unload_context;
39585- atomic_long_t user_exception;
39586- atomic_long_t set_context_option;
39587- atomic_long_t check_context_retarget_intr;
39588- atomic_long_t check_context_unload;
39589- atomic_long_t tlb_dropin;
39590- atomic_long_t tlb_preload_page;
39591- atomic_long_t tlb_dropin_fail_no_asid;
39592- atomic_long_t tlb_dropin_fail_upm;
39593- atomic_long_t tlb_dropin_fail_invalid;
39594- atomic_long_t tlb_dropin_fail_range_active;
39595- atomic_long_t tlb_dropin_fail_idle;
39596- atomic_long_t tlb_dropin_fail_fmm;
39597- atomic_long_t tlb_dropin_fail_no_exception;
39598- atomic_long_t tfh_stale_on_fault;
39599- atomic_long_t mmu_invalidate_range;
39600- atomic_long_t mmu_invalidate_page;
39601- atomic_long_t flush_tlb;
39602- atomic_long_t flush_tlb_gru;
39603- atomic_long_t flush_tlb_gru_tgh;
39604- atomic_long_t flush_tlb_gru_zero_asid;
39605+ atomic_long_unchecked_t vdata_alloc;
39606+ atomic_long_unchecked_t vdata_free;
39607+ atomic_long_unchecked_t gts_alloc;
39608+ atomic_long_unchecked_t gts_free;
39609+ atomic_long_unchecked_t gms_alloc;
39610+ atomic_long_unchecked_t gms_free;
39611+ atomic_long_unchecked_t gts_double_allocate;
39612+ atomic_long_unchecked_t assign_context;
39613+ atomic_long_unchecked_t assign_context_failed;
39614+ atomic_long_unchecked_t free_context;
39615+ atomic_long_unchecked_t load_user_context;
39616+ atomic_long_unchecked_t load_kernel_context;
39617+ atomic_long_unchecked_t lock_kernel_context;
39618+ atomic_long_unchecked_t unlock_kernel_context;
39619+ atomic_long_unchecked_t steal_user_context;
39620+ atomic_long_unchecked_t steal_kernel_context;
39621+ atomic_long_unchecked_t steal_context_failed;
39622+ atomic_long_unchecked_t nopfn;
39623+ atomic_long_unchecked_t asid_new;
39624+ atomic_long_unchecked_t asid_next;
39625+ atomic_long_unchecked_t asid_wrap;
39626+ atomic_long_unchecked_t asid_reuse;
39627+ atomic_long_unchecked_t intr;
39628+ atomic_long_unchecked_t intr_cbr;
39629+ atomic_long_unchecked_t intr_tfh;
39630+ atomic_long_unchecked_t intr_spurious;
39631+ atomic_long_unchecked_t intr_mm_lock_failed;
39632+ atomic_long_unchecked_t call_os;
39633+ atomic_long_unchecked_t call_os_wait_queue;
39634+ atomic_long_unchecked_t user_flush_tlb;
39635+ atomic_long_unchecked_t user_unload_context;
39636+ atomic_long_unchecked_t user_exception;
39637+ atomic_long_unchecked_t set_context_option;
39638+ atomic_long_unchecked_t check_context_retarget_intr;
39639+ atomic_long_unchecked_t check_context_unload;
39640+ atomic_long_unchecked_t tlb_dropin;
39641+ atomic_long_unchecked_t tlb_preload_page;
39642+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
39643+ atomic_long_unchecked_t tlb_dropin_fail_upm;
39644+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
39645+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
39646+ atomic_long_unchecked_t tlb_dropin_fail_idle;
39647+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
39648+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
39649+ atomic_long_unchecked_t tfh_stale_on_fault;
39650+ atomic_long_unchecked_t mmu_invalidate_range;
39651+ atomic_long_unchecked_t mmu_invalidate_page;
39652+ atomic_long_unchecked_t flush_tlb;
39653+ atomic_long_unchecked_t flush_tlb_gru;
39654+ atomic_long_unchecked_t flush_tlb_gru_tgh;
39655+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
39656
39657- atomic_long_t copy_gpa;
39658- atomic_long_t read_gpa;
39659+ atomic_long_unchecked_t copy_gpa;
39660+ atomic_long_unchecked_t read_gpa;
39661
39662- atomic_long_t mesq_receive;
39663- atomic_long_t mesq_receive_none;
39664- atomic_long_t mesq_send;
39665- atomic_long_t mesq_send_failed;
39666- atomic_long_t mesq_noop;
39667- atomic_long_t mesq_send_unexpected_error;
39668- atomic_long_t mesq_send_lb_overflow;
39669- atomic_long_t mesq_send_qlimit_reached;
39670- atomic_long_t mesq_send_amo_nacked;
39671- atomic_long_t mesq_send_put_nacked;
39672- atomic_long_t mesq_page_overflow;
39673- atomic_long_t mesq_qf_locked;
39674- atomic_long_t mesq_qf_noop_not_full;
39675- atomic_long_t mesq_qf_switch_head_failed;
39676- atomic_long_t mesq_qf_unexpected_error;
39677- atomic_long_t mesq_noop_unexpected_error;
39678- atomic_long_t mesq_noop_lb_overflow;
39679- atomic_long_t mesq_noop_qlimit_reached;
39680- atomic_long_t mesq_noop_amo_nacked;
39681- atomic_long_t mesq_noop_put_nacked;
39682- atomic_long_t mesq_noop_page_overflow;
39683+ atomic_long_unchecked_t mesq_receive;
39684+ atomic_long_unchecked_t mesq_receive_none;
39685+ atomic_long_unchecked_t mesq_send;
39686+ atomic_long_unchecked_t mesq_send_failed;
39687+ atomic_long_unchecked_t mesq_noop;
39688+ atomic_long_unchecked_t mesq_send_unexpected_error;
39689+ atomic_long_unchecked_t mesq_send_lb_overflow;
39690+ atomic_long_unchecked_t mesq_send_qlimit_reached;
39691+ atomic_long_unchecked_t mesq_send_amo_nacked;
39692+ atomic_long_unchecked_t mesq_send_put_nacked;
39693+ atomic_long_unchecked_t mesq_page_overflow;
39694+ atomic_long_unchecked_t mesq_qf_locked;
39695+ atomic_long_unchecked_t mesq_qf_noop_not_full;
39696+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
39697+ atomic_long_unchecked_t mesq_qf_unexpected_error;
39698+ atomic_long_unchecked_t mesq_noop_unexpected_error;
39699+ atomic_long_unchecked_t mesq_noop_lb_overflow;
39700+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
39701+ atomic_long_unchecked_t mesq_noop_amo_nacked;
39702+ atomic_long_unchecked_t mesq_noop_put_nacked;
39703+ atomic_long_unchecked_t mesq_noop_page_overflow;
39704
39705 };
39706
39707@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
39708 tghop_invalidate, mcsop_last};
39709
39710 struct mcs_op_statistic {
39711- atomic_long_t count;
39712- atomic_long_t total;
39713+ atomic_long_unchecked_t count;
39714+ atomic_long_unchecked_t total;
39715 unsigned long max;
39716 };
39717
39718@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
39719
39720 #define STAT(id) do { \
39721 if (gru_options & OPT_STATS) \
39722- atomic_long_inc(&gru_stats.id); \
39723+ atomic_long_inc_unchecked(&gru_stats.id); \
39724 } while (0)
39725
39726 #ifdef CONFIG_SGI_GRU_DEBUG
39727diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
39728index c862cd4..0d176fe 100644
39729--- a/drivers/misc/sgi-xp/xp.h
39730+++ b/drivers/misc/sgi-xp/xp.h
39731@@ -288,7 +288,7 @@ struct xpc_interface {
39732 xpc_notify_func, void *);
39733 void (*received) (short, int, void *);
39734 enum xp_retval (*partid_to_nasids) (short, void *);
39735-};
39736+} __no_const;
39737
39738 extern struct xpc_interface xpc_interface;
39739
39740diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
39741index b94d5f7..7f494c5 100644
39742--- a/drivers/misc/sgi-xp/xpc.h
39743+++ b/drivers/misc/sgi-xp/xpc.h
39744@@ -835,6 +835,7 @@ struct xpc_arch_operations {
39745 void (*received_payload) (struct xpc_channel *, void *);
39746 void (*notify_senders_of_disconnect) (struct xpc_channel *);
39747 };
39748+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
39749
39750 /* struct xpc_partition act_state values (for XPC HB) */
39751
39752@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
39753 /* found in xpc_main.c */
39754 extern struct device *xpc_part;
39755 extern struct device *xpc_chan;
39756-extern struct xpc_arch_operations xpc_arch_ops;
39757+extern xpc_arch_operations_no_const xpc_arch_ops;
39758 extern int xpc_disengage_timelimit;
39759 extern int xpc_disengage_timedout;
39760 extern int xpc_activate_IRQ_rcvd;
39761diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
39762index d971817..33bdca5 100644
39763--- a/drivers/misc/sgi-xp/xpc_main.c
39764+++ b/drivers/misc/sgi-xp/xpc_main.c
39765@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
39766 .notifier_call = xpc_system_die,
39767 };
39768
39769-struct xpc_arch_operations xpc_arch_ops;
39770+xpc_arch_operations_no_const xpc_arch_ops;
39771
39772 /*
39773 * Timer function to enforce the timelimit on the partition disengage.
39774@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
39775
39776 if (((die_args->trapnr == X86_TRAP_MF) ||
39777 (die_args->trapnr == X86_TRAP_XF)) &&
39778- !user_mode_vm(die_args->regs))
39779+ !user_mode(die_args->regs))
39780 xpc_die_deactivate();
39781
39782 break;
39783diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
39784index 49f04bc..65660c2 100644
39785--- a/drivers/mmc/core/mmc_ops.c
39786+++ b/drivers/mmc/core/mmc_ops.c
39787@@ -247,7 +247,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
39788 void *data_buf;
39789 int is_on_stack;
39790
39791- is_on_stack = object_is_on_stack(buf);
39792+ is_on_stack = object_starts_on_stack(buf);
39793 if (is_on_stack) {
39794 /*
39795 * dma onto stack is unsafe/nonportable, but callers to this
39796diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
39797index 53b8fd9..615b462 100644
39798--- a/drivers/mmc/host/dw_mmc.h
39799+++ b/drivers/mmc/host/dw_mmc.h
39800@@ -205,5 +205,5 @@ struct dw_mci_drv_data {
39801 int (*parse_dt)(struct dw_mci *host);
39802 int (*setup_bus)(struct dw_mci *host,
39803 struct device_node *slot_np, u8 bus_width);
39804-};
39805+} __do_const;
39806 #endif /* _DW_MMC_H_ */
39807diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
39808index 7363efe..681558e 100644
39809--- a/drivers/mmc/host/sdhci-s3c.c
39810+++ b/drivers/mmc/host/sdhci-s3c.c
39811@@ -720,9 +720,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
39812 * we can use overriding functions instead of default.
39813 */
39814 if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
39815- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
39816- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
39817- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
39818+ pax_open_kernel();
39819+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
39820+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
39821+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
39822+ pax_close_kernel();
39823 }
39824
39825 /* It supports additional host capabilities if needed */
39826diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
39827index a4eb8b5..8c0628f 100644
39828--- a/drivers/mtd/devices/doc2000.c
39829+++ b/drivers/mtd/devices/doc2000.c
39830@@ -753,7 +753,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
39831
39832 /* The ECC will not be calculated correctly if less than 512 is written */
39833 /* DBB-
39834- if (len != 0x200 && eccbuf)
39835+ if (len != 0x200)
39836 printk(KERN_WARNING
39837 "ECC needs a full sector write (adr: %lx size %lx)\n",
39838 (long) to, (long) len);
39839diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
39840index 0c8bb6b..6f35deb 100644
39841--- a/drivers/mtd/nand/denali.c
39842+++ b/drivers/mtd/nand/denali.c
39843@@ -24,6 +24,7 @@
39844 #include <linux/slab.h>
39845 #include <linux/mtd/mtd.h>
39846 #include <linux/module.h>
39847+#include <linux/slab.h>
39848
39849 #include "denali.h"
39850
39851diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
39852index 51b9d6a..52af9a7 100644
39853--- a/drivers/mtd/nftlmount.c
39854+++ b/drivers/mtd/nftlmount.c
39855@@ -24,6 +24,7 @@
39856 #include <asm/errno.h>
39857 #include <linux/delay.h>
39858 #include <linux/slab.h>
39859+#include <linux/sched.h>
39860 #include <linux/mtd/mtd.h>
39861 #include <linux/mtd/nand.h>
39862 #include <linux/mtd/nftl.h>
39863diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
39864index 8dd6ba5..419cc1d 100644
39865--- a/drivers/mtd/sm_ftl.c
39866+++ b/drivers/mtd/sm_ftl.c
39867@@ -56,7 +56,7 @@ ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
39868 #define SM_CIS_VENDOR_OFFSET 0x59
39869 struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
39870 {
39871- struct attribute_group *attr_group;
39872+ attribute_group_no_const *attr_group;
39873 struct attribute **attributes;
39874 struct sm_sysfs_attribute *vendor_attribute;
39875
39876diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
39877index dbbea0e..3f4a0b1 100644
39878--- a/drivers/net/bonding/bond_main.c
39879+++ b/drivers/net/bonding/bond_main.c
39880@@ -4822,7 +4822,7 @@ static unsigned int bond_get_num_tx_queues(void)
39881 return tx_queues;
39882 }
39883
39884-static struct rtnl_link_ops bond_link_ops __read_mostly = {
39885+static struct rtnl_link_ops bond_link_ops = {
39886 .kind = "bond",
39887 .priv_size = sizeof(struct bonding),
39888 .setup = bond_setup,
39889@@ -4947,8 +4947,8 @@ static void __exit bonding_exit(void)
39890
39891 bond_destroy_debugfs();
39892
39893- rtnl_link_unregister(&bond_link_ops);
39894 unregister_pernet_subsys(&bond_net_ops);
39895+ rtnl_link_unregister(&bond_link_ops);
39896
39897 #ifdef CONFIG_NET_POLL_CONTROLLER
39898 /*
39899diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
39900index e1d2643..7f4133b 100644
39901--- a/drivers/net/ethernet/8390/ax88796.c
39902+++ b/drivers/net/ethernet/8390/ax88796.c
39903@@ -872,9 +872,11 @@ static int ax_probe(struct platform_device *pdev)
39904 if (ax->plat->reg_offsets)
39905 ei_local->reg_offset = ax->plat->reg_offsets;
39906 else {
39907+ resource_size_t _mem_size = mem_size;
39908+ do_div(_mem_size, 0x18);
39909 ei_local->reg_offset = ax->reg_offsets;
39910 for (ret = 0; ret < 0x18; ret++)
39911- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
39912+ ax->reg_offsets[ret] = _mem_size * ret;
39913 }
39914
39915 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
39916diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
39917index aee7671..3ca2651 100644
39918--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
39919+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
39920@@ -1093,7 +1093,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
39921 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
39922 {
39923 /* RX_MODE controlling object */
39924- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
39925+ bnx2x_init_rx_mode_obj(bp);
39926
39927 /* multicast configuration controlling object */
39928 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
39929diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
39930index 7306416..5fb7fb5 100644
39931--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
39932+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
39933@@ -2381,15 +2381,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
39934 return rc;
39935 }
39936
39937-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
39938- struct bnx2x_rx_mode_obj *o)
39939+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
39940 {
39941 if (CHIP_IS_E1x(bp)) {
39942- o->wait_comp = bnx2x_empty_rx_mode_wait;
39943- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
39944+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
39945+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
39946 } else {
39947- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
39948- o->config_rx_mode = bnx2x_set_rx_mode_e2;
39949+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
39950+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
39951 }
39952 }
39953
39954diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
39955index ff90760..08d8aed 100644
39956--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
39957+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
39958@@ -1306,8 +1306,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
39959
39960 /********************* RX MODE ****************/
39961
39962-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
39963- struct bnx2x_rx_mode_obj *o);
39964+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
39965
39966 /**
39967 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
39968diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
39969index 25309bf..fcfd54c 100644
39970--- a/drivers/net/ethernet/broadcom/tg3.h
39971+++ b/drivers/net/ethernet/broadcom/tg3.h
39972@@ -147,6 +147,7 @@
39973 #define CHIPREV_ID_5750_A0 0x4000
39974 #define CHIPREV_ID_5750_A1 0x4001
39975 #define CHIPREV_ID_5750_A3 0x4003
39976+#define CHIPREV_ID_5750_C1 0x4201
39977 #define CHIPREV_ID_5750_C2 0x4202
39978 #define CHIPREV_ID_5752_A0_HW 0x5000
39979 #define CHIPREV_ID_5752_A0 0x6000
39980diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
39981index 6e8bc9d..94d957d 100644
39982--- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
39983+++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
39984@@ -244,7 +244,7 @@ bnad_debugfs_lseek(struct file *file, loff_t offset, int orig)
39985 file->f_pos += offset;
39986 break;
39987 case 2:
39988- file->f_pos = debug->buffer_len - offset;
39989+ file->f_pos = debug->buffer_len + offset;
39990 break;
39991 default:
39992 return -EINVAL;
39993diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
39994index 8cffcdf..aadf043 100644
39995--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
39996+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
39997@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
39998 */
39999 struct l2t_skb_cb {
40000 arp_failure_handler_func arp_failure_handler;
40001-};
40002+} __no_const;
40003
40004 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
40005
40006diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
40007index 4c83003..2a2a5b9 100644
40008--- a/drivers/net/ethernet/dec/tulip/de4x5.c
40009+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
40010@@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
40011 for (i=0; i<ETH_ALEN; i++) {
40012 tmp.addr[i] = dev->dev_addr[i];
40013 }
40014- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
40015+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
40016 break;
40017
40018 case DE4X5_SET_HWADDR: /* Set the hardware address */
40019@@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
40020 spin_lock_irqsave(&lp->lock, flags);
40021 memcpy(&statbuf, &lp->pktStats, ioc->len);
40022 spin_unlock_irqrestore(&lp->lock, flags);
40023- if (copy_to_user(ioc->data, &statbuf, ioc->len))
40024+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
40025 return -EFAULT;
40026 break;
40027 }
40028diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
40029index 2886c9b..db71673 100644
40030--- a/drivers/net/ethernet/emulex/benet/be_main.c
40031+++ b/drivers/net/ethernet/emulex/benet/be_main.c
40032@@ -455,7 +455,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
40033
40034 if (wrapped)
40035 newacc += 65536;
40036- ACCESS_ONCE(*acc) = newacc;
40037+ ACCESS_ONCE_RW(*acc) = newacc;
40038 }
40039
40040 void be_parse_stats(struct be_adapter *adapter)
40041diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
40042index 7c361d1..57e3ff1 100644
40043--- a/drivers/net/ethernet/faraday/ftgmac100.c
40044+++ b/drivers/net/ethernet/faraday/ftgmac100.c
40045@@ -31,6 +31,8 @@
40046 #include <linux/netdevice.h>
40047 #include <linux/phy.h>
40048 #include <linux/platform_device.h>
40049+#include <linux/interrupt.h>
40050+#include <linux/irqreturn.h>
40051 #include <net/ip.h>
40052
40053 #include "ftgmac100.h"
40054diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
40055index b5ea8fb..bd25e9a 100644
40056--- a/drivers/net/ethernet/faraday/ftmac100.c
40057+++ b/drivers/net/ethernet/faraday/ftmac100.c
40058@@ -31,6 +31,8 @@
40059 #include <linux/module.h>
40060 #include <linux/netdevice.h>
40061 #include <linux/platform_device.h>
40062+#include <linux/interrupt.h>
40063+#include <linux/irqreturn.h>
40064
40065 #include "ftmac100.h"
40066
40067diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
40068index 331987d..3be1135 100644
40069--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
40070+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
40071@@ -776,7 +776,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
40072 }
40073
40074 /* update the base incval used to calculate frequency adjustment */
40075- ACCESS_ONCE(adapter->base_incval) = incval;
40076+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
40077 smp_mb();
40078
40079 /* need lock to prevent incorrect read while modifying cyclecounter */
40080diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
40081index fbe5363..266b4e3 100644
40082--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
40083+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
40084@@ -3461,7 +3461,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
40085 struct __vxge_hw_fifo *fifo;
40086 struct vxge_hw_fifo_config *config;
40087 u32 txdl_size, txdl_per_memblock;
40088- struct vxge_hw_mempool_cbs fifo_mp_callback;
40089+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
40090+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
40091+ };
40092+
40093 struct __vxge_hw_virtualpath *vpath;
40094
40095 if ((vp == NULL) || (attr == NULL)) {
40096@@ -3544,8 +3547,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
40097 goto exit;
40098 }
40099
40100- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
40101-
40102 fifo->mempool =
40103 __vxge_hw_mempool_create(vpath->hldev,
40104 fifo->config->memblock_size,
40105diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
40106index 5c033f2..7bbb0d8 100644
40107--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
40108+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
40109@@ -1894,7 +1894,9 @@ int qlcnic_83xx_config_default_opmode(struct qlcnic_adapter *adapter)
40110 op_mode = QLCRDX(ahw, QLC_83XX_DRV_OP_MODE);
40111
40112 if (op_mode == QLC_83XX_DEFAULT_OPMODE) {
40113- adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
40114+ pax_open_kernel();
40115+ *(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
40116+ pax_close_kernel();
40117 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
40118 } else {
40119 return -EIO;
40120diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
40121index b0c3de9..fc5857e 100644
40122--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
40123+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
40124@@ -200,15 +200,21 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
40125 if (priv_level == QLCNIC_NON_PRIV_FUNC) {
40126 ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
40127 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
40128- nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
40129+ pax_open_kernel();
40130+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
40131+ pax_close_kernel();
40132 } else if (priv_level == QLCNIC_PRIV_FUNC) {
40133 ahw->op_mode = QLCNIC_PRIV_FUNC;
40134 ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
40135- nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
40136+ pax_open_kernel();
40137+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
40138+ pax_close_kernel();
40139 } else if (priv_level == QLCNIC_MGMT_FUNC) {
40140 ahw->op_mode = QLCNIC_MGMT_FUNC;
40141 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
40142- nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
40143+ pax_open_kernel();
40144+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
40145+ pax_close_kernel();
40146 } else {
40147 return -EIO;
40148 }
40149diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
40150index 15ba8c4..3f56838 100644
40151--- a/drivers/net/ethernet/realtek/r8169.c
40152+++ b/drivers/net/ethernet/realtek/r8169.c
40153@@ -740,22 +740,22 @@ struct rtl8169_private {
40154 struct mdio_ops {
40155 void (*write)(struct rtl8169_private *, int, int);
40156 int (*read)(struct rtl8169_private *, int);
40157- } mdio_ops;
40158+ } __no_const mdio_ops;
40159
40160 struct pll_power_ops {
40161 void (*down)(struct rtl8169_private *);
40162 void (*up)(struct rtl8169_private *);
40163- } pll_power_ops;
40164+ } __no_const pll_power_ops;
40165
40166 struct jumbo_ops {
40167 void (*enable)(struct rtl8169_private *);
40168 void (*disable)(struct rtl8169_private *);
40169- } jumbo_ops;
40170+ } __no_const jumbo_ops;
40171
40172 struct csi_ops {
40173 void (*write)(struct rtl8169_private *, int, int);
40174 u32 (*read)(struct rtl8169_private *, int);
40175- } csi_ops;
40176+ } __no_const csi_ops;
40177
40178 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
40179 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
40180diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
40181index 3f93624..cf01144 100644
40182--- a/drivers/net/ethernet/sfc/ptp.c
40183+++ b/drivers/net/ethernet/sfc/ptp.c
40184@@ -553,7 +553,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
40185 (u32)((u64)ptp->start.dma_addr >> 32));
40186
40187 /* Clear flag that signals MC ready */
40188- ACCESS_ONCE(*start) = 0;
40189+ ACCESS_ONCE_RW(*start) = 0;
40190 efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
40191 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
40192
40193diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
40194index 50617c5..b13724c 100644
40195--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
40196+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
40197@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
40198
40199 writel(value, ioaddr + MMC_CNTRL);
40200
40201- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
40202- MMC_CNTRL, value);
40203+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
40204+// MMC_CNTRL, value);
40205 }
40206
40207 /* To mask all all interrupts.*/
40208diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
40209index e6fe0d8..2b7d752 100644
40210--- a/drivers/net/hyperv/hyperv_net.h
40211+++ b/drivers/net/hyperv/hyperv_net.h
40212@@ -101,7 +101,7 @@ struct rndis_device {
40213
40214 enum rndis_device_state state;
40215 bool link_state;
40216- atomic_t new_req_id;
40217+ atomic_unchecked_t new_req_id;
40218
40219 spinlock_t request_lock;
40220 struct list_head req_list;
40221diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
40222index 0775f0a..d4fb316 100644
40223--- a/drivers/net/hyperv/rndis_filter.c
40224+++ b/drivers/net/hyperv/rndis_filter.c
40225@@ -104,7 +104,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
40226 * template
40227 */
40228 set = &rndis_msg->msg.set_req;
40229- set->req_id = atomic_inc_return(&dev->new_req_id);
40230+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
40231
40232 /* Add to the request list */
40233 spin_lock_irqsave(&dev->request_lock, flags);
40234@@ -752,7 +752,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
40235
40236 /* Setup the rndis set */
40237 halt = &request->request_msg.msg.halt_req;
40238- halt->req_id = atomic_inc_return(&dev->new_req_id);
40239+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
40240
40241 /* Ignore return since this msg is optional. */
40242 rndis_filter_send_request(dev, request);
40243diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
40244index 8f1c256..a2991d1 100644
40245--- a/drivers/net/ieee802154/fakehard.c
40246+++ b/drivers/net/ieee802154/fakehard.c
40247@@ -385,7 +385,7 @@ static int ieee802154fake_probe(struct platform_device *pdev)
40248 phy->transmit_power = 0xbf;
40249
40250 dev->netdev_ops = &fake_ops;
40251- dev->ml_priv = &fake_mlme;
40252+ dev->ml_priv = (void *)&fake_mlme;
40253
40254 priv = netdev_priv(dev);
40255 priv->phy = phy;
40256diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
40257index 011062e..ada88e9 100644
40258--- a/drivers/net/macvlan.c
40259+++ b/drivers/net/macvlan.c
40260@@ -892,13 +892,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
40261 int macvlan_link_register(struct rtnl_link_ops *ops)
40262 {
40263 /* common fields */
40264- ops->priv_size = sizeof(struct macvlan_dev);
40265- ops->validate = macvlan_validate;
40266- ops->maxtype = IFLA_MACVLAN_MAX;
40267- ops->policy = macvlan_policy;
40268- ops->changelink = macvlan_changelink;
40269- ops->get_size = macvlan_get_size;
40270- ops->fill_info = macvlan_fill_info;
40271+ pax_open_kernel();
40272+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
40273+ *(void **)&ops->validate = macvlan_validate;
40274+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
40275+ *(const void **)&ops->policy = macvlan_policy;
40276+ *(void **)&ops->changelink = macvlan_changelink;
40277+ *(void **)&ops->get_size = macvlan_get_size;
40278+ *(void **)&ops->fill_info = macvlan_fill_info;
40279+ pax_close_kernel();
40280
40281 return rtnl_link_register(ops);
40282 };
40283@@ -954,7 +956,7 @@ static int macvlan_device_event(struct notifier_block *unused,
40284 return NOTIFY_DONE;
40285 }
40286
40287-static struct notifier_block macvlan_notifier_block __read_mostly = {
40288+static struct notifier_block macvlan_notifier_block = {
40289 .notifier_call = macvlan_device_event,
40290 };
40291
40292diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
40293index a449439..1e468fe 100644
40294--- a/drivers/net/macvtap.c
40295+++ b/drivers/net/macvtap.c
40296@@ -1090,7 +1090,7 @@ static int macvtap_device_event(struct notifier_block *unused,
40297 return NOTIFY_DONE;
40298 }
40299
40300-static struct notifier_block macvtap_notifier_block __read_mostly = {
40301+static struct notifier_block macvtap_notifier_block = {
40302 .notifier_call = macvtap_device_event,
40303 };
40304
40305diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
40306index daec9b0..6428fcb 100644
40307--- a/drivers/net/phy/mdio-bitbang.c
40308+++ b/drivers/net/phy/mdio-bitbang.c
40309@@ -234,6 +234,7 @@ void free_mdio_bitbang(struct mii_bus *bus)
40310 struct mdiobb_ctrl *ctrl = bus->priv;
40311
40312 module_put(ctrl->ops->owner);
40313+ mdiobus_unregister(bus);
40314 mdiobus_free(bus);
40315 }
40316 EXPORT_SYMBOL(free_mdio_bitbang);
40317diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
40318index 72ff14b..11d442d 100644
40319--- a/drivers/net/ppp/ppp_generic.c
40320+++ b/drivers/net/ppp/ppp_generic.c
40321@@ -999,7 +999,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
40322 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
40323 struct ppp_stats stats;
40324 struct ppp_comp_stats cstats;
40325- char *vers;
40326
40327 switch (cmd) {
40328 case SIOCGPPPSTATS:
40329@@ -1021,8 +1020,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
40330 break;
40331
40332 case SIOCGPPPVER:
40333- vers = PPP_VERSION;
40334- if (copy_to_user(addr, vers, strlen(vers) + 1))
40335+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
40336 break;
40337 err = 0;
40338 break;
40339diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
40340index 1252d9c..80e660b 100644
40341--- a/drivers/net/slip/slhc.c
40342+++ b/drivers/net/slip/slhc.c
40343@@ -488,7 +488,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
40344 register struct tcphdr *thp;
40345 register struct iphdr *ip;
40346 register struct cstate *cs;
40347- int len, hdrlen;
40348+ long len, hdrlen;
40349 unsigned char *cp = icp;
40350
40351 /* We've got a compressed packet; read the change byte */
40352diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
40353index bf34192..fba3500 100644
40354--- a/drivers/net/team/team.c
40355+++ b/drivers/net/team/team.c
40356@@ -2668,7 +2668,7 @@ static int team_device_event(struct notifier_block *unused,
40357 return NOTIFY_DONE;
40358 }
40359
40360-static struct notifier_block team_notifier_block __read_mostly = {
40361+static struct notifier_block team_notifier_block = {
40362 .notifier_call = team_device_event,
40363 };
40364
40365diff --git a/drivers/net/tun.c b/drivers/net/tun.c
40366index 755fa9e..631fdce 100644
40367--- a/drivers/net/tun.c
40368+++ b/drivers/net/tun.c
40369@@ -1841,7 +1841,7 @@ unlock:
40370 }
40371
40372 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
40373- unsigned long arg, int ifreq_len)
40374+ unsigned long arg, size_t ifreq_len)
40375 {
40376 struct tun_file *tfile = file->private_data;
40377 struct tun_struct *tun;
40378@@ -1853,6 +1853,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
40379 int vnet_hdr_sz;
40380 int ret;
40381
40382+ if (ifreq_len > sizeof ifr)
40383+ return -EFAULT;
40384+
40385 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
40386 if (copy_from_user(&ifr, argp, ifreq_len))
40387 return -EFAULT;
40388diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
40389index e2dd324..be92fcf 100644
40390--- a/drivers/net/usb/hso.c
40391+++ b/drivers/net/usb/hso.c
40392@@ -71,7 +71,7 @@
40393 #include <asm/byteorder.h>
40394 #include <linux/serial_core.h>
40395 #include <linux/serial.h>
40396-
40397+#include <asm/local.h>
40398
40399 #define MOD_AUTHOR "Option Wireless"
40400 #define MOD_DESCRIPTION "USB High Speed Option driver"
40401@@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
40402 struct urb *urb;
40403
40404 urb = serial->rx_urb[0];
40405- if (serial->port.count > 0) {
40406+ if (atomic_read(&serial->port.count) > 0) {
40407 count = put_rxbuf_data(urb, serial);
40408 if (count == -1)
40409 return;
40410@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
40411 DUMP1(urb->transfer_buffer, urb->actual_length);
40412
40413 /* Anyone listening? */
40414- if (serial->port.count == 0)
40415+ if (atomic_read(&serial->port.count) == 0)
40416 return;
40417
40418 if (status == 0) {
40419@@ -1298,8 +1298,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
40420 tty_port_tty_set(&serial->port, tty);
40421
40422 /* check for port already opened, if not set the termios */
40423- serial->port.count++;
40424- if (serial->port.count == 1) {
40425+ if (atomic_inc_return(&serial->port.count) == 1) {
40426 serial->rx_state = RX_IDLE;
40427 /* Force default termio settings */
40428 _hso_serial_set_termios(tty, NULL);
40429@@ -1311,7 +1310,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
40430 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
40431 if (result) {
40432 hso_stop_serial_device(serial->parent);
40433- serial->port.count--;
40434+ atomic_dec(&serial->port.count);
40435 kref_put(&serial->parent->ref, hso_serial_ref_free);
40436 }
40437 } else {
40438@@ -1348,10 +1347,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
40439
40440 /* reset the rts and dtr */
40441 /* do the actual close */
40442- serial->port.count--;
40443+ atomic_dec(&serial->port.count);
40444
40445- if (serial->port.count <= 0) {
40446- serial->port.count = 0;
40447+ if (atomic_read(&serial->port.count) <= 0) {
40448+ atomic_set(&serial->port.count, 0);
40449 tty_port_tty_set(&serial->port, NULL);
40450 if (!usb_gone)
40451 hso_stop_serial_device(serial->parent);
40452@@ -1427,7 +1426,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
40453
40454 /* the actual setup */
40455 spin_lock_irqsave(&serial->serial_lock, flags);
40456- if (serial->port.count)
40457+ if (atomic_read(&serial->port.count))
40458 _hso_serial_set_termios(tty, old);
40459 else
40460 tty->termios = *old;
40461@@ -1886,7 +1885,7 @@ static void intr_callback(struct urb *urb)
40462 D1("Pending read interrupt on port %d\n", i);
40463 spin_lock(&serial->serial_lock);
40464 if (serial->rx_state == RX_IDLE &&
40465- serial->port.count > 0) {
40466+ atomic_read(&serial->port.count) > 0) {
40467 /* Setup and send a ctrl req read on
40468 * port i */
40469 if (!serial->rx_urb_filled[0]) {
40470@@ -3066,7 +3065,7 @@ static int hso_resume(struct usb_interface *iface)
40471 /* Start all serial ports */
40472 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
40473 if (serial_table[i] && (serial_table[i]->interface == iface)) {
40474- if (dev2ser(serial_table[i])->port.count) {
40475+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
40476 result =
40477 hso_start_serial_device(serial_table[i], GFP_NOIO);
40478 hso_kick_transmit(dev2ser(serial_table[i]));
40479diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
40480index 7cee7a3..1eb9f3b 100644
40481--- a/drivers/net/vxlan.c
40482+++ b/drivers/net/vxlan.c
40483@@ -1443,7 +1443,7 @@ nla_put_failure:
40484 return -EMSGSIZE;
40485 }
40486
40487-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
40488+static struct rtnl_link_ops vxlan_link_ops = {
40489 .kind = "vxlan",
40490 .maxtype = IFLA_VXLAN_MAX,
40491 .policy = vxlan_policy,
40492diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
40493index 5ac5f7a..5f82012 100644
40494--- a/drivers/net/wireless/at76c50x-usb.c
40495+++ b/drivers/net/wireless/at76c50x-usb.c
40496@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
40497 }
40498
40499 /* Convert timeout from the DFU status to jiffies */
40500-static inline unsigned long at76_get_timeout(struct dfu_status *s)
40501+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
40502 {
40503 return msecs_to_jiffies((s->poll_timeout[2] << 16)
40504 | (s->poll_timeout[1] << 8)
40505diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
40506index 8d78253..bebbb68 100644
40507--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
40508+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
40509@@ -184,8 +184,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40510 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
40511 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
40512
40513- ACCESS_ONCE(ads->ds_link) = i->link;
40514- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
40515+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
40516+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
40517
40518 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
40519 ctl6 = SM(i->keytype, AR_EncrType);
40520@@ -199,26 +199,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40521
40522 if ((i->is_first || i->is_last) &&
40523 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
40524- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
40525+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
40526 | set11nTries(i->rates, 1)
40527 | set11nTries(i->rates, 2)
40528 | set11nTries(i->rates, 3)
40529 | (i->dur_update ? AR_DurUpdateEna : 0)
40530 | SM(0, AR_BurstDur);
40531
40532- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
40533+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
40534 | set11nRate(i->rates, 1)
40535 | set11nRate(i->rates, 2)
40536 | set11nRate(i->rates, 3);
40537 } else {
40538- ACCESS_ONCE(ads->ds_ctl2) = 0;
40539- ACCESS_ONCE(ads->ds_ctl3) = 0;
40540+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
40541+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
40542 }
40543
40544 if (!i->is_first) {
40545- ACCESS_ONCE(ads->ds_ctl0) = 0;
40546- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
40547- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
40548+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
40549+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
40550+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
40551 return;
40552 }
40553
40554@@ -243,7 +243,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40555 break;
40556 }
40557
40558- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
40559+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
40560 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
40561 | SM(i->txpower, AR_XmitPower)
40562 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
40563@@ -253,19 +253,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40564 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
40565 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
40566
40567- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
40568- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
40569+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
40570+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
40571
40572 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
40573 return;
40574
40575- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
40576+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
40577 | set11nPktDurRTSCTS(i->rates, 1);
40578
40579- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
40580+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
40581 | set11nPktDurRTSCTS(i->rates, 3);
40582
40583- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
40584+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
40585 | set11nRateFlags(i->rates, 1)
40586 | set11nRateFlags(i->rates, 2)
40587 | set11nRateFlags(i->rates, 3)
40588diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
40589index 301bf72..3f5654f 100644
40590--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
40591+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
40592@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40593 (i->qcu << AR_TxQcuNum_S) | desc_len;
40594
40595 checksum += val;
40596- ACCESS_ONCE(ads->info) = val;
40597+ ACCESS_ONCE_RW(ads->info) = val;
40598
40599 checksum += i->link;
40600- ACCESS_ONCE(ads->link) = i->link;
40601+ ACCESS_ONCE_RW(ads->link) = i->link;
40602
40603 checksum += i->buf_addr[0];
40604- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
40605+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
40606 checksum += i->buf_addr[1];
40607- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
40608+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
40609 checksum += i->buf_addr[2];
40610- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
40611+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
40612 checksum += i->buf_addr[3];
40613- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
40614+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
40615
40616 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
40617- ACCESS_ONCE(ads->ctl3) = val;
40618+ ACCESS_ONCE_RW(ads->ctl3) = val;
40619 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
40620- ACCESS_ONCE(ads->ctl5) = val;
40621+ ACCESS_ONCE_RW(ads->ctl5) = val;
40622 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
40623- ACCESS_ONCE(ads->ctl7) = val;
40624+ ACCESS_ONCE_RW(ads->ctl7) = val;
40625 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
40626- ACCESS_ONCE(ads->ctl9) = val;
40627+ ACCESS_ONCE_RW(ads->ctl9) = val;
40628
40629 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
40630- ACCESS_ONCE(ads->ctl10) = checksum;
40631+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
40632
40633 if (i->is_first || i->is_last) {
40634- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
40635+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
40636 | set11nTries(i->rates, 1)
40637 | set11nTries(i->rates, 2)
40638 | set11nTries(i->rates, 3)
40639 | (i->dur_update ? AR_DurUpdateEna : 0)
40640 | SM(0, AR_BurstDur);
40641
40642- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
40643+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
40644 | set11nRate(i->rates, 1)
40645 | set11nRate(i->rates, 2)
40646 | set11nRate(i->rates, 3);
40647 } else {
40648- ACCESS_ONCE(ads->ctl13) = 0;
40649- ACCESS_ONCE(ads->ctl14) = 0;
40650+ ACCESS_ONCE_RW(ads->ctl13) = 0;
40651+ ACCESS_ONCE_RW(ads->ctl14) = 0;
40652 }
40653
40654 ads->ctl20 = 0;
40655@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40656
40657 ctl17 = SM(i->keytype, AR_EncrType);
40658 if (!i->is_first) {
40659- ACCESS_ONCE(ads->ctl11) = 0;
40660- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
40661- ACCESS_ONCE(ads->ctl15) = 0;
40662- ACCESS_ONCE(ads->ctl16) = 0;
40663- ACCESS_ONCE(ads->ctl17) = ctl17;
40664- ACCESS_ONCE(ads->ctl18) = 0;
40665- ACCESS_ONCE(ads->ctl19) = 0;
40666+ ACCESS_ONCE_RW(ads->ctl11) = 0;
40667+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
40668+ ACCESS_ONCE_RW(ads->ctl15) = 0;
40669+ ACCESS_ONCE_RW(ads->ctl16) = 0;
40670+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
40671+ ACCESS_ONCE_RW(ads->ctl18) = 0;
40672+ ACCESS_ONCE_RW(ads->ctl19) = 0;
40673 return;
40674 }
40675
40676- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
40677+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
40678 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
40679 | SM(i->txpower, AR_XmitPower)
40680 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
40681@@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40682 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
40683 ctl12 |= SM(val, AR_PAPRDChainMask);
40684
40685- ACCESS_ONCE(ads->ctl12) = ctl12;
40686- ACCESS_ONCE(ads->ctl17) = ctl17;
40687+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
40688+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
40689
40690- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
40691+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
40692 | set11nPktDurRTSCTS(i->rates, 1);
40693
40694- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
40695+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
40696 | set11nPktDurRTSCTS(i->rates, 3);
40697
40698- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
40699+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
40700 | set11nRateFlags(i->rates, 1)
40701 | set11nRateFlags(i->rates, 2)
40702 | set11nRateFlags(i->rates, 3)
40703 | SM(i->rtscts_rate, AR_RTSCTSRate);
40704
40705- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
40706+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
40707 }
40708
40709 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
40710diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
40711index 784e81c..349e01e 100644
40712--- a/drivers/net/wireless/ath/ath9k/hw.h
40713+++ b/drivers/net/wireless/ath/ath9k/hw.h
40714@@ -653,7 +653,7 @@ struct ath_hw_private_ops {
40715
40716 /* ANI */
40717 void (*ani_cache_ini_regs)(struct ath_hw *ah);
40718-};
40719+} __no_const;
40720
40721 /**
40722 * struct ath_spec_scan - parameters for Atheros spectral scan
40723@@ -722,7 +722,7 @@ struct ath_hw_ops {
40724 struct ath_spec_scan *param);
40725 void (*spectral_scan_trigger)(struct ath_hw *ah);
40726 void (*spectral_scan_wait)(struct ath_hw *ah);
40727-};
40728+} __no_const;
40729
40730 struct ath_nf_limits {
40731 s16 max;
40732diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
40733index c353b5f..62aaca2 100644
40734--- a/drivers/net/wireless/iwlegacy/3945-mac.c
40735+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
40736@@ -3639,7 +3639,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
40737 */
40738 if (il3945_mod_params.disable_hw_scan) {
40739 D_INFO("Disabling hw_scan\n");
40740- il3945_mac_ops.hw_scan = NULL;
40741+ pax_open_kernel();
40742+ *(void **)&il3945_mac_ops.hw_scan = NULL;
40743+ pax_close_kernel();
40744 }
40745
40746 D_INFO("*** LOAD DRIVER ***\n");
40747diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
40748index 81d4071..f2071ea 100644
40749--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
40750+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
40751@@ -203,7 +203,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
40752 {
40753 struct iwl_priv *priv = file->private_data;
40754 char buf[64];
40755- int buf_size;
40756+ size_t buf_size;
40757 u32 offset, len;
40758
40759 memset(buf, 0, sizeof(buf));
40760@@ -473,7 +473,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
40761 struct iwl_priv *priv = file->private_data;
40762
40763 char buf[8];
40764- int buf_size;
40765+ size_t buf_size;
40766 u32 reset_flag;
40767
40768 memset(buf, 0, sizeof(buf));
40769@@ -554,7 +554,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
40770 {
40771 struct iwl_priv *priv = file->private_data;
40772 char buf[8];
40773- int buf_size;
40774+ size_t buf_size;
40775 int ht40;
40776
40777 memset(buf, 0, sizeof(buf));
40778@@ -606,7 +606,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
40779 {
40780 struct iwl_priv *priv = file->private_data;
40781 char buf[8];
40782- int buf_size;
40783+ size_t buf_size;
40784 int value;
40785
40786 memset(buf, 0, sizeof(buf));
40787@@ -1871,7 +1871,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
40788 {
40789 struct iwl_priv *priv = file->private_data;
40790 char buf[8];
40791- int buf_size;
40792+ size_t buf_size;
40793 int clear;
40794
40795 memset(buf, 0, sizeof(buf));
40796@@ -1916,7 +1916,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
40797 {
40798 struct iwl_priv *priv = file->private_data;
40799 char buf[8];
40800- int buf_size;
40801+ size_t buf_size;
40802 int trace;
40803
40804 memset(buf, 0, sizeof(buf));
40805@@ -1987,7 +1987,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
40806 {
40807 struct iwl_priv *priv = file->private_data;
40808 char buf[8];
40809- int buf_size;
40810+ size_t buf_size;
40811 int missed;
40812
40813 memset(buf, 0, sizeof(buf));
40814@@ -2028,7 +2028,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
40815
40816 struct iwl_priv *priv = file->private_data;
40817 char buf[8];
40818- int buf_size;
40819+ size_t buf_size;
40820 int plcp;
40821
40822 memset(buf, 0, sizeof(buf));
40823@@ -2088,7 +2088,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
40824
40825 struct iwl_priv *priv = file->private_data;
40826 char buf[8];
40827- int buf_size;
40828+ size_t buf_size;
40829 int flush;
40830
40831 memset(buf, 0, sizeof(buf));
40832@@ -2178,7 +2178,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
40833
40834 struct iwl_priv *priv = file->private_data;
40835 char buf[8];
40836- int buf_size;
40837+ size_t buf_size;
40838 int rts;
40839
40840 if (!priv->cfg->ht_params)
40841@@ -2220,7 +2220,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
40842 {
40843 struct iwl_priv *priv = file->private_data;
40844 char buf[8];
40845- int buf_size;
40846+ size_t buf_size;
40847
40848 memset(buf, 0, sizeof(buf));
40849 buf_size = min(count, sizeof(buf) - 1);
40850@@ -2256,7 +2256,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
40851 struct iwl_priv *priv = file->private_data;
40852 u32 event_log_flag;
40853 char buf[8];
40854- int buf_size;
40855+ size_t buf_size;
40856
40857 /* check that the interface is up */
40858 if (!iwl_is_ready(priv))
40859@@ -2310,7 +2310,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
40860 struct iwl_priv *priv = file->private_data;
40861 char buf[8];
40862 u32 calib_disabled;
40863- int buf_size;
40864+ size_t buf_size;
40865
40866 memset(buf, 0, sizeof(buf));
40867 buf_size = min(count, sizeof(buf) - 1);
40868diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
40869index 12c4f31..484d948 100644
40870--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
40871+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
40872@@ -1328,7 +1328,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
40873 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
40874
40875 char buf[8];
40876- int buf_size;
40877+ size_t buf_size;
40878 u32 reset_flag;
40879
40880 memset(buf, 0, sizeof(buf));
40881@@ -1349,7 +1349,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
40882 {
40883 struct iwl_trans *trans = file->private_data;
40884 char buf[8];
40885- int buf_size;
40886+ size_t buf_size;
40887 int csr;
40888
40889 memset(buf, 0, sizeof(buf));
40890diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
40891index 2b49f48..14fc244 100644
40892--- a/drivers/net/wireless/mac80211_hwsim.c
40893+++ b/drivers/net/wireless/mac80211_hwsim.c
40894@@ -2143,25 +2143,19 @@ static int __init init_mac80211_hwsim(void)
40895
40896 if (channels > 1) {
40897 hwsim_if_comb.num_different_channels = channels;
40898- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
40899- mac80211_hwsim_ops.cancel_hw_scan =
40900- mac80211_hwsim_cancel_hw_scan;
40901- mac80211_hwsim_ops.sw_scan_start = NULL;
40902- mac80211_hwsim_ops.sw_scan_complete = NULL;
40903- mac80211_hwsim_ops.remain_on_channel =
40904- mac80211_hwsim_roc;
40905- mac80211_hwsim_ops.cancel_remain_on_channel =
40906- mac80211_hwsim_croc;
40907- mac80211_hwsim_ops.add_chanctx =
40908- mac80211_hwsim_add_chanctx;
40909- mac80211_hwsim_ops.remove_chanctx =
40910- mac80211_hwsim_remove_chanctx;
40911- mac80211_hwsim_ops.change_chanctx =
40912- mac80211_hwsim_change_chanctx;
40913- mac80211_hwsim_ops.assign_vif_chanctx =
40914- mac80211_hwsim_assign_vif_chanctx;
40915- mac80211_hwsim_ops.unassign_vif_chanctx =
40916- mac80211_hwsim_unassign_vif_chanctx;
40917+ pax_open_kernel();
40918+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
40919+ *(void **)&mac80211_hwsim_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
40920+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
40921+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
40922+ *(void **)&mac80211_hwsim_ops.remain_on_channel = mac80211_hwsim_roc;
40923+ *(void **)&mac80211_hwsim_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
40924+ *(void **)&mac80211_hwsim_ops.add_chanctx = mac80211_hwsim_add_chanctx;
40925+ *(void **)&mac80211_hwsim_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
40926+ *(void **)&mac80211_hwsim_ops.change_chanctx = mac80211_hwsim_change_chanctx;
40927+ *(void **)&mac80211_hwsim_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
40928+ *(void **)&mac80211_hwsim_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
40929+ pax_close_kernel();
40930 }
40931
40932 spin_lock_init(&hwsim_radio_lock);
40933diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c
40934index 753b568..a5f9875 100644
40935--- a/drivers/net/wireless/mwifiex/debugfs.c
40936+++ b/drivers/net/wireless/mwifiex/debugfs.c
40937@@ -26,10 +26,17 @@
40938 static struct dentry *mwifiex_dfs_dir;
40939
40940 static char *bss_modes[] = {
40941- "Unknown",
40942- "Ad-hoc",
40943- "Managed",
40944- "Auto"
40945+ "UNSPECIFIED",
40946+ "ADHOC",
40947+ "STATION",
40948+ "AP",
40949+ "AP_VLAN",
40950+ "WDS",
40951+ "MONITOR",
40952+ "MESH_POINT",
40953+ "P2P_CLIENT",
40954+ "P2P_GO",
40955+ "P2P_DEVICE",
40956 };
40957
40958 /* size/addr for mwifiex_debug_info */
40959@@ -200,7 +207,12 @@ mwifiex_info_read(struct file *file, char __user *ubuf,
40960 p += sprintf(p, "driver_version = %s", fmt);
40961 p += sprintf(p, "\nverext = %s", priv->version_str);
40962 p += sprintf(p, "\ninterface_name=\"%s\"\n", netdev->name);
40963- p += sprintf(p, "bss_mode=\"%s\"\n", bss_modes[info.bss_mode]);
40964+
40965+ if (info.bss_mode >= ARRAY_SIZE(bss_modes))
40966+ p += sprintf(p, "bss_mode=\"%d\"\n", info.bss_mode);
40967+ else
40968+ p += sprintf(p, "bss_mode=\"%s\"\n", bss_modes[info.bss_mode]);
40969+
40970 p += sprintf(p, "media_state=\"%s\"\n",
40971 (!priv->media_connected ? "Disconnected" : "Connected"));
40972 p += sprintf(p, "mac_address=\"%pM\"\n", netdev->dev_addr);
40973diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
40974index 525fd75..6c9f791 100644
40975--- a/drivers/net/wireless/rndis_wlan.c
40976+++ b/drivers/net/wireless/rndis_wlan.c
40977@@ -1238,7 +1238,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
40978
40979 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
40980
40981- if (rts_threshold < 0 || rts_threshold > 2347)
40982+ if (rts_threshold > 2347)
40983 rts_threshold = 2347;
40984
40985 tmp = cpu_to_le32(rts_threshold);
40986diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
40987index 086abb4..8279c30 100644
40988--- a/drivers/net/wireless/rt2x00/rt2x00.h
40989+++ b/drivers/net/wireless/rt2x00/rt2x00.h
40990@@ -396,7 +396,7 @@ struct rt2x00_intf {
40991 * for hardware which doesn't support hardware
40992 * sequence counting.
40993 */
40994- atomic_t seqno;
40995+ atomic_unchecked_t seqno;
40996 };
40997
40998 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
40999diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
41000index 4d91795..62fccff 100644
41001--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
41002+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
41003@@ -251,9 +251,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
41004 * sequence counter given by mac80211.
41005 */
41006 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
41007- seqno = atomic_add_return(0x10, &intf->seqno);
41008+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
41009 else
41010- seqno = atomic_read(&intf->seqno);
41011+ seqno = atomic_read_unchecked(&intf->seqno);
41012
41013 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
41014 hdr->seq_ctrl |= cpu_to_le16(seqno);
41015diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
41016index e57ee48..541cf6c 100644
41017--- a/drivers/net/wireless/ti/wl1251/sdio.c
41018+++ b/drivers/net/wireless/ti/wl1251/sdio.c
41019@@ -269,13 +269,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
41020
41021 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
41022
41023- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
41024- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
41025+ pax_open_kernel();
41026+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
41027+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
41028+ pax_close_kernel();
41029
41030 wl1251_info("using dedicated interrupt line");
41031 } else {
41032- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
41033- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
41034+ pax_open_kernel();
41035+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
41036+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
41037+ pax_close_kernel();
41038
41039 wl1251_info("using SDIO interrupt");
41040 }
41041diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
41042index 09694e3..24ccec7 100644
41043--- a/drivers/net/wireless/ti/wl12xx/main.c
41044+++ b/drivers/net/wireless/ti/wl12xx/main.c
41045@@ -656,7 +656,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
41046 sizeof(wl->conf.mem));
41047
41048 /* read data preparation is only needed by wl127x */
41049- wl->ops->prepare_read = wl127x_prepare_read;
41050+ pax_open_kernel();
41051+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
41052+ pax_close_kernel();
41053
41054 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
41055 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
41056@@ -681,7 +683,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
41057 sizeof(wl->conf.mem));
41058
41059 /* read data preparation is only needed by wl127x */
41060- wl->ops->prepare_read = wl127x_prepare_read;
41061+ pax_open_kernel();
41062+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
41063+ pax_close_kernel();
41064
41065 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
41066 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
41067diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
41068index da3ef1b..4790b95 100644
41069--- a/drivers/net/wireless/ti/wl18xx/main.c
41070+++ b/drivers/net/wireless/ti/wl18xx/main.c
41071@@ -1664,8 +1664,10 @@ static int wl18xx_setup(struct wl1271 *wl)
41072 }
41073
41074 if (!checksum_param) {
41075- wl18xx_ops.set_rx_csum = NULL;
41076- wl18xx_ops.init_vif = NULL;
41077+ pax_open_kernel();
41078+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
41079+ *(void **)&wl18xx_ops.init_vif = NULL;
41080+ pax_close_kernel();
41081 }
41082
41083 /* Enable 11a Band only if we have 5G antennas */
41084diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
41085index 7ef0b4a..ff65c28 100644
41086--- a/drivers/net/wireless/zd1211rw/zd_usb.c
41087+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
41088@@ -386,7 +386,7 @@ static inline void handle_regs_int(struct urb *urb)
41089 {
41090 struct zd_usb *usb = urb->context;
41091 struct zd_usb_interrupt *intr = &usb->intr;
41092- int len;
41093+ unsigned int len;
41094 u16 int_num;
41095
41096 ZD_ASSERT(in_interrupt());
41097diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
41098index d93b2b6..ae50401 100644
41099--- a/drivers/oprofile/buffer_sync.c
41100+++ b/drivers/oprofile/buffer_sync.c
41101@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
41102 if (cookie == NO_COOKIE)
41103 offset = pc;
41104 if (cookie == INVALID_COOKIE) {
41105- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
41106+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
41107 offset = pc;
41108 }
41109 if (cookie != last_cookie) {
41110@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
41111 /* add userspace sample */
41112
41113 if (!mm) {
41114- atomic_inc(&oprofile_stats.sample_lost_no_mm);
41115+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
41116 return 0;
41117 }
41118
41119 cookie = lookup_dcookie(mm, s->eip, &offset);
41120
41121 if (cookie == INVALID_COOKIE) {
41122- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
41123+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
41124 return 0;
41125 }
41126
41127@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
41128 /* ignore backtraces if failed to add a sample */
41129 if (state == sb_bt_start) {
41130 state = sb_bt_ignore;
41131- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
41132+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
41133 }
41134 }
41135 release_mm(mm);
41136diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
41137index c0cc4e7..44d4e54 100644
41138--- a/drivers/oprofile/event_buffer.c
41139+++ b/drivers/oprofile/event_buffer.c
41140@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
41141 }
41142
41143 if (buffer_pos == buffer_size) {
41144- atomic_inc(&oprofile_stats.event_lost_overflow);
41145+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
41146 return;
41147 }
41148
41149diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
41150index ed2c3ec..deda85a 100644
41151--- a/drivers/oprofile/oprof.c
41152+++ b/drivers/oprofile/oprof.c
41153@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
41154 if (oprofile_ops.switch_events())
41155 return;
41156
41157- atomic_inc(&oprofile_stats.multiplex_counter);
41158+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
41159 start_switch_worker();
41160 }
41161
41162diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
41163index 84a208d..d61b0a1 100644
41164--- a/drivers/oprofile/oprofile_files.c
41165+++ b/drivers/oprofile/oprofile_files.c
41166@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
41167
41168 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
41169
41170-static ssize_t timeout_read(struct file *file, char __user *buf,
41171+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
41172 size_t count, loff_t *offset)
41173 {
41174 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
41175diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
41176index 917d28e..d62d981 100644
41177--- a/drivers/oprofile/oprofile_stats.c
41178+++ b/drivers/oprofile/oprofile_stats.c
41179@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
41180 cpu_buf->sample_invalid_eip = 0;
41181 }
41182
41183- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
41184- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
41185- atomic_set(&oprofile_stats.event_lost_overflow, 0);
41186- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
41187- atomic_set(&oprofile_stats.multiplex_counter, 0);
41188+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
41189+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
41190+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
41191+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
41192+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
41193 }
41194
41195
41196diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
41197index 38b6fc0..b5cbfce 100644
41198--- a/drivers/oprofile/oprofile_stats.h
41199+++ b/drivers/oprofile/oprofile_stats.h
41200@@ -13,11 +13,11 @@
41201 #include <linux/atomic.h>
41202
41203 struct oprofile_stat_struct {
41204- atomic_t sample_lost_no_mm;
41205- atomic_t sample_lost_no_mapping;
41206- atomic_t bt_lost_no_mapping;
41207- atomic_t event_lost_overflow;
41208- atomic_t multiplex_counter;
41209+ atomic_unchecked_t sample_lost_no_mm;
41210+ atomic_unchecked_t sample_lost_no_mapping;
41211+ atomic_unchecked_t bt_lost_no_mapping;
41212+ atomic_unchecked_t event_lost_overflow;
41213+ atomic_unchecked_t multiplex_counter;
41214 };
41215
41216 extern struct oprofile_stat_struct oprofile_stats;
41217diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
41218index 7c12d9c..558bf3bb 100644
41219--- a/drivers/oprofile/oprofilefs.c
41220+++ b/drivers/oprofile/oprofilefs.c
41221@@ -190,7 +190,7 @@ static const struct file_operations atomic_ro_fops = {
41222
41223
41224 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
41225- char const *name, atomic_t *val)
41226+ char const *name, atomic_unchecked_t *val)
41227 {
41228 return __oprofilefs_create_file(sb, root, name,
41229 &atomic_ro_fops, 0444, val);
41230diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
41231index 93404f7..4a313d8 100644
41232--- a/drivers/oprofile/timer_int.c
41233+++ b/drivers/oprofile/timer_int.c
41234@@ -93,7 +93,7 @@ static int __cpuinit oprofile_cpu_notify(struct notifier_block *self,
41235 return NOTIFY_OK;
41236 }
41237
41238-static struct notifier_block __refdata oprofile_cpu_notifier = {
41239+static struct notifier_block oprofile_cpu_notifier = {
41240 .notifier_call = oprofile_cpu_notify,
41241 };
41242
41243diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
41244index 3f56bc0..707d642 100644
41245--- a/drivers/parport/procfs.c
41246+++ b/drivers/parport/procfs.c
41247@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
41248
41249 *ppos += len;
41250
41251- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
41252+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
41253 }
41254
41255 #ifdef CONFIG_PARPORT_1284
41256@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
41257
41258 *ppos += len;
41259
41260- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
41261+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
41262 }
41263 #endif /* IEEE1284.3 support. */
41264
41265diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
41266index c35e8ad..fc33beb 100644
41267--- a/drivers/pci/hotplug/acpiphp_ibm.c
41268+++ b/drivers/pci/hotplug/acpiphp_ibm.c
41269@@ -464,7 +464,9 @@ static int __init ibm_acpiphp_init(void)
41270 goto init_cleanup;
41271 }
41272
41273- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
41274+ pax_open_kernel();
41275+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
41276+ pax_close_kernel();
41277 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
41278
41279 return retval;
41280diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
41281index a6a71c4..c91097b 100644
41282--- a/drivers/pci/hotplug/cpcihp_generic.c
41283+++ b/drivers/pci/hotplug/cpcihp_generic.c
41284@@ -73,7 +73,6 @@ static u16 port;
41285 static unsigned int enum_bit;
41286 static u8 enum_mask;
41287
41288-static struct cpci_hp_controller_ops generic_hpc_ops;
41289 static struct cpci_hp_controller generic_hpc;
41290
41291 static int __init validate_parameters(void)
41292@@ -139,6 +138,10 @@ static int query_enum(void)
41293 return ((value & enum_mask) == enum_mask);
41294 }
41295
41296+static struct cpci_hp_controller_ops generic_hpc_ops = {
41297+ .query_enum = query_enum,
41298+};
41299+
41300 static int __init cpcihp_generic_init(void)
41301 {
41302 int status;
41303@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
41304 pci_dev_put(dev);
41305
41306 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
41307- generic_hpc_ops.query_enum = query_enum;
41308 generic_hpc.ops = &generic_hpc_ops;
41309
41310 status = cpci_hp_register_controller(&generic_hpc);
41311diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
41312index 449b4bb..257e2e8 100644
41313--- a/drivers/pci/hotplug/cpcihp_zt5550.c
41314+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
41315@@ -59,7 +59,6 @@
41316 /* local variables */
41317 static bool debug;
41318 static bool poll;
41319-static struct cpci_hp_controller_ops zt5550_hpc_ops;
41320 static struct cpci_hp_controller zt5550_hpc;
41321
41322 /* Primary cPCI bus bridge device */
41323@@ -205,6 +204,10 @@ static int zt5550_hc_disable_irq(void)
41324 return 0;
41325 }
41326
41327+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
41328+ .query_enum = zt5550_hc_query_enum,
41329+};
41330+
41331 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
41332 {
41333 int status;
41334@@ -216,16 +219,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
41335 dbg("returned from zt5550_hc_config");
41336
41337 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
41338- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
41339 zt5550_hpc.ops = &zt5550_hpc_ops;
41340 if(!poll) {
41341 zt5550_hpc.irq = hc_dev->irq;
41342 zt5550_hpc.irq_flags = IRQF_SHARED;
41343 zt5550_hpc.dev_id = hc_dev;
41344
41345- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
41346- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
41347- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
41348+ pax_open_kernel();
41349+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
41350+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
41351+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
41352+ pax_open_kernel();
41353 } else {
41354 info("using ENUM# polling mode");
41355 }
41356diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
41357index 76ba8a1..20ca857 100644
41358--- a/drivers/pci/hotplug/cpqphp_nvram.c
41359+++ b/drivers/pci/hotplug/cpqphp_nvram.c
41360@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
41361
41362 void compaq_nvram_init (void __iomem *rom_start)
41363 {
41364+
41365+#ifndef CONFIG_PAX_KERNEXEC
41366 if (rom_start) {
41367 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
41368 }
41369+#endif
41370+
41371 dbg("int15 entry = %p\n", compaq_int15_entry_point);
41372
41373 /* initialize our int15 lock */
41374diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
41375index 202f4a9..8ee47d0 100644
41376--- a/drivers/pci/hotplug/pci_hotplug_core.c
41377+++ b/drivers/pci/hotplug/pci_hotplug_core.c
41378@@ -448,8 +448,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
41379 return -EINVAL;
41380 }
41381
41382- slot->ops->owner = owner;
41383- slot->ops->mod_name = mod_name;
41384+ pax_open_kernel();
41385+ *(struct module **)&slot->ops->owner = owner;
41386+ *(const char **)&slot->ops->mod_name = mod_name;
41387+ pax_close_kernel();
41388
41389 mutex_lock(&pci_hp_mutex);
41390 /*
41391diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
41392index 7d72c5e..edce02c 100644
41393--- a/drivers/pci/hotplug/pciehp_core.c
41394+++ b/drivers/pci/hotplug/pciehp_core.c
41395@@ -91,7 +91,7 @@ static int init_slot(struct controller *ctrl)
41396 struct slot *slot = ctrl->slot;
41397 struct hotplug_slot *hotplug = NULL;
41398 struct hotplug_slot_info *info = NULL;
41399- struct hotplug_slot_ops *ops = NULL;
41400+ hotplug_slot_ops_no_const *ops = NULL;
41401 char name[SLOT_NAME_SIZE];
41402 int retval = -ENOMEM;
41403
41404diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
41405index 9c6e9bb..2916736 100644
41406--- a/drivers/pci/pci-sysfs.c
41407+++ b/drivers/pci/pci-sysfs.c
41408@@ -1071,7 +1071,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
41409 {
41410 /* allocate attribute structure, piggyback attribute name */
41411 int name_len = write_combine ? 13 : 10;
41412- struct bin_attribute *res_attr;
41413+ bin_attribute_no_const *res_attr;
41414 int retval;
41415
41416 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
41417@@ -1256,7 +1256,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
41418 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
41419 {
41420 int retval;
41421- struct bin_attribute *attr;
41422+ bin_attribute_no_const *attr;
41423
41424 /* If the device has VPD, try to expose it in sysfs. */
41425 if (dev->vpd) {
41426@@ -1303,7 +1303,7 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
41427 {
41428 int retval;
41429 int rom_size = 0;
41430- struct bin_attribute *attr;
41431+ bin_attribute_no_const *attr;
41432
41433 if (!sysfs_initialized)
41434 return -EACCES;
41435diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
41436index 7346ee6..41520eb 100644
41437--- a/drivers/pci/pci.h
41438+++ b/drivers/pci/pci.h
41439@@ -93,7 +93,7 @@ struct pci_vpd_ops {
41440 struct pci_vpd {
41441 unsigned int len;
41442 const struct pci_vpd_ops *ops;
41443- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
41444+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
41445 };
41446
41447 extern int pci_vpd_pci22_init(struct pci_dev *dev);
41448diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
41449index d320df6..ca9a8f6 100644
41450--- a/drivers/pci/pcie/aspm.c
41451+++ b/drivers/pci/pcie/aspm.c
41452@@ -27,9 +27,9 @@
41453 #define MODULE_PARAM_PREFIX "pcie_aspm."
41454
41455 /* Note: those are not register definitions */
41456-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
41457-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
41458-#define ASPM_STATE_L1 (4) /* L1 state */
41459+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
41460+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
41461+#define ASPM_STATE_L1 (4U) /* L1 state */
41462 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
41463 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
41464
41465diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
41466index 563771f..4e3c368 100644
41467--- a/drivers/pci/probe.c
41468+++ b/drivers/pci/probe.c
41469@@ -173,7 +173,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
41470 struct pci_bus_region region;
41471 bool bar_too_big = false, bar_disabled = false;
41472
41473- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
41474+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
41475
41476 /* No printks while decoding is disabled! */
41477 if (!dev->mmio_always_on) {
41478diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
41479index 0b00947..64f7c0a 100644
41480--- a/drivers/pci/proc.c
41481+++ b/drivers/pci/proc.c
41482@@ -465,7 +465,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
41483 static int __init pci_proc_init(void)
41484 {
41485 struct pci_dev *dev = NULL;
41486+
41487+#ifdef CONFIG_GRKERNSEC_PROC_ADD
41488+#ifdef CONFIG_GRKERNSEC_PROC_USER
41489+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
41490+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
41491+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
41492+#endif
41493+#else
41494 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
41495+#endif
41496 proc_create("devices", 0, proc_bus_pci_dir,
41497 &proc_bus_pci_dev_operations);
41498 proc_initialized = 1;
41499diff --git a/drivers/platform/x86/chromeos_laptop.c b/drivers/platform/x86/chromeos_laptop.c
41500index 3e5b4497..dcdfb70 100644
41501--- a/drivers/platform/x86/chromeos_laptop.c
41502+++ b/drivers/platform/x86/chromeos_laptop.c
41503@@ -301,7 +301,7 @@ static int __init setup_tsl2563_als(const struct dmi_system_id *id)
41504 return 0;
41505 }
41506
41507-static struct dmi_system_id __initdata chromeos_laptop_dmi_table[] = {
41508+static struct dmi_system_id __initconst chromeos_laptop_dmi_table[] = {
41509 {
41510 .ident = "Samsung Series 5 550 - Touchpad",
41511 .matches = {
41512diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
41513index 6b22938..bc9700e 100644
41514--- a/drivers/platform/x86/msi-laptop.c
41515+++ b/drivers/platform/x86/msi-laptop.c
41516@@ -1000,12 +1000,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
41517
41518 if (!quirks->ec_read_only) {
41519 /* allow userland write sysfs file */
41520- dev_attr_bluetooth.store = store_bluetooth;
41521- dev_attr_wlan.store = store_wlan;
41522- dev_attr_threeg.store = store_threeg;
41523- dev_attr_bluetooth.attr.mode |= S_IWUSR;
41524- dev_attr_wlan.attr.mode |= S_IWUSR;
41525- dev_attr_threeg.attr.mode |= S_IWUSR;
41526+ pax_open_kernel();
41527+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
41528+ *(void **)&dev_attr_wlan.store = store_wlan;
41529+ *(void **)&dev_attr_threeg.store = store_threeg;
41530+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
41531+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
41532+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
41533+ pax_close_kernel();
41534 }
41535
41536 /* disable hardware control by fn key */
41537diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
41538index 14d4dce..b129917 100644
41539--- a/drivers/platform/x86/sony-laptop.c
41540+++ b/drivers/platform/x86/sony-laptop.c
41541@@ -2465,7 +2465,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
41542 }
41543
41544 /* High speed charging function */
41545-static struct device_attribute *hsc_handle;
41546+static device_attribute_no_const *hsc_handle;
41547
41548 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
41549 struct device_attribute *attr,
41550diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
41551index edec135..59a24a3 100644
41552--- a/drivers/platform/x86/thinkpad_acpi.c
41553+++ b/drivers/platform/x86/thinkpad_acpi.c
41554@@ -2093,7 +2093,7 @@ static int hotkey_mask_get(void)
41555 return 0;
41556 }
41557
41558-void static hotkey_mask_warn_incomplete_mask(void)
41559+static void hotkey_mask_warn_incomplete_mask(void)
41560 {
41561 /* log only what the user can fix... */
41562 const u32 wantedmask = hotkey_driver_mask &
41563@@ -2324,11 +2324,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
41564 }
41565 }
41566
41567-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
41568- struct tp_nvram_state *newn,
41569- const u32 event_mask)
41570-{
41571-
41572 #define TPACPI_COMPARE_KEY(__scancode, __member) \
41573 do { \
41574 if ((event_mask & (1 << __scancode)) && \
41575@@ -2342,36 +2337,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
41576 tpacpi_hotkey_send_key(__scancode); \
41577 } while (0)
41578
41579- void issue_volchange(const unsigned int oldvol,
41580- const unsigned int newvol)
41581- {
41582- unsigned int i = oldvol;
41583+static void issue_volchange(const unsigned int oldvol,
41584+ const unsigned int newvol,
41585+ const u32 event_mask)
41586+{
41587+ unsigned int i = oldvol;
41588
41589- while (i > newvol) {
41590- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
41591- i--;
41592- }
41593- while (i < newvol) {
41594- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
41595- i++;
41596- }
41597+ while (i > newvol) {
41598+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
41599+ i--;
41600 }
41601+ while (i < newvol) {
41602+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
41603+ i++;
41604+ }
41605+}
41606
41607- void issue_brightnesschange(const unsigned int oldbrt,
41608- const unsigned int newbrt)
41609- {
41610- unsigned int i = oldbrt;
41611+static void issue_brightnesschange(const unsigned int oldbrt,
41612+ const unsigned int newbrt,
41613+ const u32 event_mask)
41614+{
41615+ unsigned int i = oldbrt;
41616
41617- while (i > newbrt) {
41618- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
41619- i--;
41620- }
41621- while (i < newbrt) {
41622- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
41623- i++;
41624- }
41625+ while (i > newbrt) {
41626+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
41627+ i--;
41628+ }
41629+ while (i < newbrt) {
41630+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
41631+ i++;
41632 }
41633+}
41634
41635+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
41636+ struct tp_nvram_state *newn,
41637+ const u32 event_mask)
41638+{
41639 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
41640 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
41641 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
41642@@ -2405,7 +2406,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
41643 oldn->volume_level != newn->volume_level) {
41644 /* recently muted, or repeated mute keypress, or
41645 * multiple presses ending in mute */
41646- issue_volchange(oldn->volume_level, newn->volume_level);
41647+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
41648 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
41649 }
41650 } else {
41651@@ -2415,7 +2416,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
41652 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
41653 }
41654 if (oldn->volume_level != newn->volume_level) {
41655- issue_volchange(oldn->volume_level, newn->volume_level);
41656+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
41657 } else if (oldn->volume_toggle != newn->volume_toggle) {
41658 /* repeated vol up/down keypress at end of scale ? */
41659 if (newn->volume_level == 0)
41660@@ -2428,7 +2429,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
41661 /* handle brightness */
41662 if (oldn->brightness_level != newn->brightness_level) {
41663 issue_brightnesschange(oldn->brightness_level,
41664- newn->brightness_level);
41665+ newn->brightness_level,
41666+ event_mask);
41667 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
41668 /* repeated key presses that didn't change state */
41669 if (newn->brightness_level == 0)
41670@@ -2437,10 +2439,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
41671 && !tp_features.bright_unkfw)
41672 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
41673 }
41674+}
41675
41676 #undef TPACPI_COMPARE_KEY
41677 #undef TPACPI_MAY_SEND_KEY
41678-}
41679
41680 /*
41681 * Polling driver
41682diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
41683index 769d265..a3a05ca 100644
41684--- a/drivers/pnp/pnpbios/bioscalls.c
41685+++ b/drivers/pnp/pnpbios/bioscalls.c
41686@@ -58,7 +58,7 @@ do { \
41687 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
41688 } while(0)
41689
41690-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
41691+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
41692 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
41693
41694 /*
41695@@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
41696
41697 cpu = get_cpu();
41698 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
41699+
41700+ pax_open_kernel();
41701 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
41702+ pax_close_kernel();
41703
41704 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
41705 spin_lock_irqsave(&pnp_bios_lock, flags);
41706@@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
41707 :"memory");
41708 spin_unlock_irqrestore(&pnp_bios_lock, flags);
41709
41710+ pax_open_kernel();
41711 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
41712+ pax_close_kernel();
41713+
41714 put_cpu();
41715
41716 /* If we get here and this is set then the PnP BIOS faulted on us. */
41717@@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
41718 return status;
41719 }
41720
41721-void pnpbios_calls_init(union pnp_bios_install_struct *header)
41722+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
41723 {
41724 int i;
41725
41726@@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
41727 pnp_bios_callpoint.offset = header->fields.pm16offset;
41728 pnp_bios_callpoint.segment = PNP_CS16;
41729
41730+ pax_open_kernel();
41731+
41732 for_each_possible_cpu(i) {
41733 struct desc_struct *gdt = get_cpu_gdt_table(i);
41734 if (!gdt)
41735@@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
41736 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
41737 (unsigned long)__va(header->fields.pm16dseg));
41738 }
41739+
41740+ pax_close_kernel();
41741 }
41742diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
41743index 3e6db1c..1fbbdae 100644
41744--- a/drivers/pnp/resource.c
41745+++ b/drivers/pnp/resource.c
41746@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
41747 return 1;
41748
41749 /* check if the resource is valid */
41750- if (*irq < 0 || *irq > 15)
41751+ if (*irq > 15)
41752 return 0;
41753
41754 /* check if the resource is reserved */
41755@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
41756 return 1;
41757
41758 /* check if the resource is valid */
41759- if (*dma < 0 || *dma == 4 || *dma > 7)
41760+ if (*dma == 4 || *dma > 7)
41761 return 0;
41762
41763 /* check if the resource is reserved */
41764diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
41765index 7df7c5f..bd48c47 100644
41766--- a/drivers/power/pda_power.c
41767+++ b/drivers/power/pda_power.c
41768@@ -37,7 +37,11 @@ static int polling;
41769
41770 #ifdef CONFIG_USB_OTG_UTILS
41771 static struct usb_phy *transceiver;
41772-static struct notifier_block otg_nb;
41773+static int otg_handle_notification(struct notifier_block *nb,
41774+ unsigned long event, void *unused);
41775+static struct notifier_block otg_nb = {
41776+ .notifier_call = otg_handle_notification
41777+};
41778 #endif
41779
41780 static struct regulator *ac_draw;
41781@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
41782
41783 #ifdef CONFIG_USB_OTG_UTILS
41784 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
41785- otg_nb.notifier_call = otg_handle_notification;
41786 ret = usb_register_notifier(transceiver, &otg_nb);
41787 if (ret) {
41788 dev_err(dev, "failure to register otg notifier\n");
41789diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
41790index cc439fd..8fa30df 100644
41791--- a/drivers/power/power_supply.h
41792+++ b/drivers/power/power_supply.h
41793@@ -16,12 +16,12 @@ struct power_supply;
41794
41795 #ifdef CONFIG_SYSFS
41796
41797-extern void power_supply_init_attrs(struct device_type *dev_type);
41798+extern void power_supply_init_attrs(void);
41799 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
41800
41801 #else
41802
41803-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
41804+static inline void power_supply_init_attrs(void) {}
41805 #define power_supply_uevent NULL
41806
41807 #endif /* CONFIG_SYSFS */
41808diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
41809index 5deac43..608c5ff 100644
41810--- a/drivers/power/power_supply_core.c
41811+++ b/drivers/power/power_supply_core.c
41812@@ -24,7 +24,10 @@
41813 struct class *power_supply_class;
41814 EXPORT_SYMBOL_GPL(power_supply_class);
41815
41816-static struct device_type power_supply_dev_type;
41817+extern const struct attribute_group *power_supply_attr_groups[];
41818+static struct device_type power_supply_dev_type = {
41819+ .groups = power_supply_attr_groups,
41820+};
41821
41822 static int __power_supply_changed_work(struct device *dev, void *data)
41823 {
41824@@ -393,7 +396,7 @@ static int __init power_supply_class_init(void)
41825 return PTR_ERR(power_supply_class);
41826
41827 power_supply_class->dev_uevent = power_supply_uevent;
41828- power_supply_init_attrs(&power_supply_dev_type);
41829+ power_supply_init_attrs();
41830
41831 return 0;
41832 }
41833diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
41834index 29178f7..c65f324 100644
41835--- a/drivers/power/power_supply_sysfs.c
41836+++ b/drivers/power/power_supply_sysfs.c
41837@@ -230,17 +230,15 @@ static struct attribute_group power_supply_attr_group = {
41838 .is_visible = power_supply_attr_is_visible,
41839 };
41840
41841-static const struct attribute_group *power_supply_attr_groups[] = {
41842+const struct attribute_group *power_supply_attr_groups[] = {
41843 &power_supply_attr_group,
41844 NULL,
41845 };
41846
41847-void power_supply_init_attrs(struct device_type *dev_type)
41848+void power_supply_init_attrs(void)
41849 {
41850 int i;
41851
41852- dev_type->groups = power_supply_attr_groups;
41853-
41854 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
41855 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
41856 }
41857diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
41858index 4d7c635..9860196 100644
41859--- a/drivers/regulator/max8660.c
41860+++ b/drivers/regulator/max8660.c
41861@@ -333,8 +333,10 @@ static int max8660_probe(struct i2c_client *client,
41862 max8660->shadow_regs[MAX8660_OVER1] = 5;
41863 } else {
41864 /* Otherwise devices can be toggled via software */
41865- max8660_dcdc_ops.enable = max8660_dcdc_enable;
41866- max8660_dcdc_ops.disable = max8660_dcdc_disable;
41867+ pax_open_kernel();
41868+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
41869+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
41870+ pax_close_kernel();
41871 }
41872
41873 /*
41874diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
41875index 9a8ea91..c483dd9 100644
41876--- a/drivers/regulator/max8973-regulator.c
41877+++ b/drivers/regulator/max8973-regulator.c
41878@@ -401,9 +401,11 @@ static int max8973_probe(struct i2c_client *client,
41879 if (!pdata->enable_ext_control) {
41880 max->desc.enable_reg = MAX8973_VOUT;
41881 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
41882- max8973_dcdc_ops.enable = regulator_enable_regmap;
41883- max8973_dcdc_ops.disable = regulator_disable_regmap;
41884- max8973_dcdc_ops.is_enabled = regulator_is_enabled_regmap;
41885+ pax_open_kernel();
41886+ *(void **)&max8973_dcdc_ops.enable = regulator_enable_regmap;
41887+ *(void **)&max8973_dcdc_ops.disable = regulator_disable_regmap;
41888+ *(void **)&max8973_dcdc_ops.is_enabled = regulator_is_enabled_regmap;
41889+ pax_close_kernel();
41890 }
41891
41892 max->enable_external_control = pdata->enable_ext_control;
41893diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
41894index 9891aec..beb3083 100644
41895--- a/drivers/regulator/mc13892-regulator.c
41896+++ b/drivers/regulator/mc13892-regulator.c
41897@@ -583,10 +583,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
41898 }
41899 mc13xxx_unlock(mc13892);
41900
41901- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
41902+ pax_open_kernel();
41903+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
41904 = mc13892_vcam_set_mode;
41905- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
41906+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
41907 = mc13892_vcam_get_mode;
41908+ pax_close_kernel();
41909
41910 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
41911 ARRAY_SIZE(mc13892_regulators),
41912diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
41913index cc5bea9..689f7d9 100644
41914--- a/drivers/rtc/rtc-cmos.c
41915+++ b/drivers/rtc/rtc-cmos.c
41916@@ -724,7 +724,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
41917 hpet_rtc_timer_init();
41918
41919 /* export at least the first block of NVRAM */
41920- nvram.size = address_space - NVRAM_OFFSET;
41921+ pax_open_kernel();
41922+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
41923+ pax_close_kernel();
41924 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
41925 if (retval < 0) {
41926 dev_dbg(dev, "can't create nvram file? %d\n", retval);
41927diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
41928index d049393..bb20be0 100644
41929--- a/drivers/rtc/rtc-dev.c
41930+++ b/drivers/rtc/rtc-dev.c
41931@@ -16,6 +16,7 @@
41932 #include <linux/module.h>
41933 #include <linux/rtc.h>
41934 #include <linux/sched.h>
41935+#include <linux/grsecurity.h>
41936 #include "rtc-core.h"
41937
41938 static dev_t rtc_devt;
41939@@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file,
41940 if (copy_from_user(&tm, uarg, sizeof(tm)))
41941 return -EFAULT;
41942
41943+ gr_log_timechange();
41944+
41945 return rtc_set_time(rtc, &tm);
41946
41947 case RTC_PIE_ON:
41948diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
41949index 970a236..3613169 100644
41950--- a/drivers/rtc/rtc-ds1307.c
41951+++ b/drivers/rtc/rtc-ds1307.c
41952@@ -106,7 +106,7 @@ struct ds1307 {
41953 u8 offset; /* register's offset */
41954 u8 regs[11];
41955 u16 nvram_offset;
41956- struct bin_attribute *nvram;
41957+ bin_attribute_no_const *nvram;
41958 enum ds_type type;
41959 unsigned long flags;
41960 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
41961diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
41962index 130f29a..6179d03 100644
41963--- a/drivers/rtc/rtc-m48t59.c
41964+++ b/drivers/rtc/rtc-m48t59.c
41965@@ -482,7 +482,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
41966 goto out;
41967 }
41968
41969- m48t59_nvram_attr.size = pdata->offset;
41970+ pax_open_kernel();
41971+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
41972+ pax_close_kernel();
41973
41974 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
41975 if (ret) {
41976diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
41977index e693af6..2e525b6 100644
41978--- a/drivers/scsi/bfa/bfa_fcpim.h
41979+++ b/drivers/scsi/bfa/bfa_fcpim.h
41980@@ -36,7 +36,7 @@ struct bfa_iotag_s {
41981
41982 struct bfa_itn_s {
41983 bfa_isr_func_t isr;
41984-};
41985+} __no_const;
41986
41987 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
41988 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
41989diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
41990index 23a90e7..9cf04ee 100644
41991--- a/drivers/scsi/bfa/bfa_ioc.h
41992+++ b/drivers/scsi/bfa/bfa_ioc.h
41993@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
41994 bfa_ioc_disable_cbfn_t disable_cbfn;
41995 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
41996 bfa_ioc_reset_cbfn_t reset_cbfn;
41997-};
41998+} __no_const;
41999
42000 /*
42001 * IOC event notification mechanism.
42002@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
42003 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
42004 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
42005 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
42006-};
42007+} __no_const;
42008
42009 /*
42010 * Queue element to wait for room in request queue. FIFO order is
42011diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
42012index 439c012..b63d534 100644
42013--- a/drivers/scsi/bfa/bfad_debugfs.c
42014+++ b/drivers/scsi/bfa/bfad_debugfs.c
42015@@ -186,7 +186,7 @@ bfad_debugfs_lseek(struct file *file, loff_t offset, int orig)
42016 file->f_pos += offset;
42017 break;
42018 case 2:
42019- file->f_pos = debug->buffer_len - offset;
42020+ file->f_pos = debug->buffer_len + offset;
42021 break;
42022 default:
42023 return -EINVAL;
42024diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
42025index adc1f7f..85e1ffd 100644
42026--- a/drivers/scsi/fnic/fnic_debugfs.c
42027+++ b/drivers/scsi/fnic/fnic_debugfs.c
42028@@ -174,7 +174,7 @@ static loff_t fnic_trace_debugfs_lseek(struct file *file,
42029 pos = file->f_pos + offset;
42030 break;
42031 case 2:
42032- pos = fnic_dbg_prt->buffer_len - offset;
42033+ pos = fnic_dbg_prt->buffer_len + offset;
42034 }
42035 return (pos < 0 || pos > fnic_dbg_prt->buffer_len) ?
42036 -EINVAL : (file->f_pos = pos);
42037diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
42038index df0c3c7..b00e1d0 100644
42039--- a/drivers/scsi/hosts.c
42040+++ b/drivers/scsi/hosts.c
42041@@ -42,7 +42,7 @@
42042 #include "scsi_logging.h"
42043
42044
42045-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
42046+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
42047
42048
42049 static void scsi_host_cls_release(struct device *dev)
42050@@ -361,7 +361,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
42051 * subtract one because we increment first then return, but we need to
42052 * know what the next host number was before increment
42053 */
42054- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
42055+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
42056 shost->dma_channel = 0xff;
42057
42058 /* These three are default values which can be overridden */
42059diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
42060index 7f4f790..b75b92a 100644
42061--- a/drivers/scsi/hpsa.c
42062+++ b/drivers/scsi/hpsa.c
42063@@ -554,7 +554,7 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
42064 unsigned long flags;
42065
42066 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
42067- return h->access.command_completed(h, q);
42068+ return h->access->command_completed(h, q);
42069
42070 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
42071 a = rq->head[rq->current_entry];
42072@@ -3422,7 +3422,7 @@ static void start_io(struct ctlr_info *h)
42073 while (!list_empty(&h->reqQ)) {
42074 c = list_entry(h->reqQ.next, struct CommandList, list);
42075 /* can't do anything if fifo is full */
42076- if ((h->access.fifo_full(h))) {
42077+ if ((h->access->fifo_full(h))) {
42078 dev_warn(&h->pdev->dev, "fifo full\n");
42079 break;
42080 }
42081@@ -3444,7 +3444,7 @@ static void start_io(struct ctlr_info *h)
42082
42083 /* Tell the controller execute command */
42084 spin_unlock_irqrestore(&h->lock, flags);
42085- h->access.submit_command(h, c);
42086+ h->access->submit_command(h, c);
42087 spin_lock_irqsave(&h->lock, flags);
42088 }
42089 spin_unlock_irqrestore(&h->lock, flags);
42090@@ -3452,17 +3452,17 @@ static void start_io(struct ctlr_info *h)
42091
42092 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
42093 {
42094- return h->access.command_completed(h, q);
42095+ return h->access->command_completed(h, q);
42096 }
42097
42098 static inline bool interrupt_pending(struct ctlr_info *h)
42099 {
42100- return h->access.intr_pending(h);
42101+ return h->access->intr_pending(h);
42102 }
42103
42104 static inline long interrupt_not_for_us(struct ctlr_info *h)
42105 {
42106- return (h->access.intr_pending(h) == 0) ||
42107+ return (h->access->intr_pending(h) == 0) ||
42108 (h->interrupts_enabled == 0);
42109 }
42110
42111@@ -4364,7 +4364,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
42112 if (prod_index < 0)
42113 return -ENODEV;
42114 h->product_name = products[prod_index].product_name;
42115- h->access = *(products[prod_index].access);
42116+ h->access = products[prod_index].access;
42117
42118 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
42119 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
42120@@ -4646,7 +4646,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
42121
42122 assert_spin_locked(&lockup_detector_lock);
42123 remove_ctlr_from_lockup_detector_list(h);
42124- h->access.set_intr_mask(h, HPSA_INTR_OFF);
42125+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
42126 spin_lock_irqsave(&h->lock, flags);
42127 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
42128 spin_unlock_irqrestore(&h->lock, flags);
42129@@ -4823,7 +4823,7 @@ reinit_after_soft_reset:
42130 }
42131
42132 /* make sure the board interrupts are off */
42133- h->access.set_intr_mask(h, HPSA_INTR_OFF);
42134+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
42135
42136 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
42137 goto clean2;
42138@@ -4857,7 +4857,7 @@ reinit_after_soft_reset:
42139 * fake ones to scoop up any residual completions.
42140 */
42141 spin_lock_irqsave(&h->lock, flags);
42142- h->access.set_intr_mask(h, HPSA_INTR_OFF);
42143+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
42144 spin_unlock_irqrestore(&h->lock, flags);
42145 free_irqs(h);
42146 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
42147@@ -4876,9 +4876,9 @@ reinit_after_soft_reset:
42148 dev_info(&h->pdev->dev, "Board READY.\n");
42149 dev_info(&h->pdev->dev,
42150 "Waiting for stale completions to drain.\n");
42151- h->access.set_intr_mask(h, HPSA_INTR_ON);
42152+ h->access->set_intr_mask(h, HPSA_INTR_ON);
42153 msleep(10000);
42154- h->access.set_intr_mask(h, HPSA_INTR_OFF);
42155+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
42156
42157 rc = controller_reset_failed(h->cfgtable);
42158 if (rc)
42159@@ -4899,7 +4899,7 @@ reinit_after_soft_reset:
42160 }
42161
42162 /* Turn the interrupts on so we can service requests */
42163- h->access.set_intr_mask(h, HPSA_INTR_ON);
42164+ h->access->set_intr_mask(h, HPSA_INTR_ON);
42165
42166 hpsa_hba_inquiry(h);
42167 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
42168@@ -4954,7 +4954,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
42169 * To write all data in the battery backed cache to disks
42170 */
42171 hpsa_flush_cache(h);
42172- h->access.set_intr_mask(h, HPSA_INTR_OFF);
42173+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
42174 hpsa_free_irqs_and_disable_msix(h);
42175 }
42176
42177@@ -5122,7 +5122,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
42178 return;
42179 }
42180 /* Change the access methods to the performant access methods */
42181- h->access = SA5_performant_access;
42182+ h->access = &SA5_performant_access;
42183 h->transMethod = CFGTBL_Trans_Performant;
42184 }
42185
42186diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
42187index 9816479..c5d4e97 100644
42188--- a/drivers/scsi/hpsa.h
42189+++ b/drivers/scsi/hpsa.h
42190@@ -79,7 +79,7 @@ struct ctlr_info {
42191 unsigned int msix_vector;
42192 unsigned int msi_vector;
42193 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
42194- struct access_method access;
42195+ struct access_method *access;
42196
42197 /* queue and queue Info */
42198 struct list_head reqQ;
42199diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
42200index c772d8d..35c362c 100644
42201--- a/drivers/scsi/libfc/fc_exch.c
42202+++ b/drivers/scsi/libfc/fc_exch.c
42203@@ -100,12 +100,12 @@ struct fc_exch_mgr {
42204 u16 pool_max_index;
42205
42206 struct {
42207- atomic_t no_free_exch;
42208- atomic_t no_free_exch_xid;
42209- atomic_t xid_not_found;
42210- atomic_t xid_busy;
42211- atomic_t seq_not_found;
42212- atomic_t non_bls_resp;
42213+ atomic_unchecked_t no_free_exch;
42214+ atomic_unchecked_t no_free_exch_xid;
42215+ atomic_unchecked_t xid_not_found;
42216+ atomic_unchecked_t xid_busy;
42217+ atomic_unchecked_t seq_not_found;
42218+ atomic_unchecked_t non_bls_resp;
42219 } stats;
42220 };
42221
42222@@ -725,7 +725,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
42223 /* allocate memory for exchange */
42224 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
42225 if (!ep) {
42226- atomic_inc(&mp->stats.no_free_exch);
42227+ atomic_inc_unchecked(&mp->stats.no_free_exch);
42228 goto out;
42229 }
42230 memset(ep, 0, sizeof(*ep));
42231@@ -786,7 +786,7 @@ out:
42232 return ep;
42233 err:
42234 spin_unlock_bh(&pool->lock);
42235- atomic_inc(&mp->stats.no_free_exch_xid);
42236+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
42237 mempool_free(ep, mp->ep_pool);
42238 return NULL;
42239 }
42240@@ -929,7 +929,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
42241 xid = ntohs(fh->fh_ox_id); /* we originated exch */
42242 ep = fc_exch_find(mp, xid);
42243 if (!ep) {
42244- atomic_inc(&mp->stats.xid_not_found);
42245+ atomic_inc_unchecked(&mp->stats.xid_not_found);
42246 reject = FC_RJT_OX_ID;
42247 goto out;
42248 }
42249@@ -959,7 +959,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
42250 ep = fc_exch_find(mp, xid);
42251 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
42252 if (ep) {
42253- atomic_inc(&mp->stats.xid_busy);
42254+ atomic_inc_unchecked(&mp->stats.xid_busy);
42255 reject = FC_RJT_RX_ID;
42256 goto rel;
42257 }
42258@@ -970,7 +970,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
42259 }
42260 xid = ep->xid; /* get our XID */
42261 } else if (!ep) {
42262- atomic_inc(&mp->stats.xid_not_found);
42263+ atomic_inc_unchecked(&mp->stats.xid_not_found);
42264 reject = FC_RJT_RX_ID; /* XID not found */
42265 goto out;
42266 }
42267@@ -987,7 +987,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
42268 } else {
42269 sp = &ep->seq;
42270 if (sp->id != fh->fh_seq_id) {
42271- atomic_inc(&mp->stats.seq_not_found);
42272+ atomic_inc_unchecked(&mp->stats.seq_not_found);
42273 if (f_ctl & FC_FC_END_SEQ) {
42274 /*
42275 * Update sequence_id based on incoming last
42276@@ -1437,22 +1437,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
42277
42278 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
42279 if (!ep) {
42280- atomic_inc(&mp->stats.xid_not_found);
42281+ atomic_inc_unchecked(&mp->stats.xid_not_found);
42282 goto out;
42283 }
42284 if (ep->esb_stat & ESB_ST_COMPLETE) {
42285- atomic_inc(&mp->stats.xid_not_found);
42286+ atomic_inc_unchecked(&mp->stats.xid_not_found);
42287 goto rel;
42288 }
42289 if (ep->rxid == FC_XID_UNKNOWN)
42290 ep->rxid = ntohs(fh->fh_rx_id);
42291 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
42292- atomic_inc(&mp->stats.xid_not_found);
42293+ atomic_inc_unchecked(&mp->stats.xid_not_found);
42294 goto rel;
42295 }
42296 if (ep->did != ntoh24(fh->fh_s_id) &&
42297 ep->did != FC_FID_FLOGI) {
42298- atomic_inc(&mp->stats.xid_not_found);
42299+ atomic_inc_unchecked(&mp->stats.xid_not_found);
42300 goto rel;
42301 }
42302 sof = fr_sof(fp);
42303@@ -1461,7 +1461,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
42304 sp->ssb_stat |= SSB_ST_RESP;
42305 sp->id = fh->fh_seq_id;
42306 } else if (sp->id != fh->fh_seq_id) {
42307- atomic_inc(&mp->stats.seq_not_found);
42308+ atomic_inc_unchecked(&mp->stats.seq_not_found);
42309 goto rel;
42310 }
42311
42312@@ -1525,9 +1525,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
42313 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
42314
42315 if (!sp)
42316- atomic_inc(&mp->stats.xid_not_found);
42317+ atomic_inc_unchecked(&mp->stats.xid_not_found);
42318 else
42319- atomic_inc(&mp->stats.non_bls_resp);
42320+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
42321
42322 fc_frame_free(fp);
42323 }
42324@@ -2174,13 +2174,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
42325
42326 list_for_each_entry(ema, &lport->ema_list, ema_list) {
42327 mp = ema->mp;
42328- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
42329+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
42330 st->fc_no_free_exch_xid +=
42331- atomic_read(&mp->stats.no_free_exch_xid);
42332- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
42333- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
42334- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
42335- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
42336+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
42337+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
42338+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
42339+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
42340+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
42341 }
42342 }
42343 EXPORT_SYMBOL(fc_exch_update_stats);
42344diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
42345index bdb81cd..d3c7c2c 100644
42346--- a/drivers/scsi/libsas/sas_ata.c
42347+++ b/drivers/scsi/libsas/sas_ata.c
42348@@ -554,7 +554,7 @@ static struct ata_port_operations sas_sata_ops = {
42349 .postreset = ata_std_postreset,
42350 .error_handler = ata_std_error_handler,
42351 .post_internal_cmd = sas_ata_post_internal,
42352- .qc_defer = ata_std_qc_defer,
42353+ .qc_defer = ata_std_qc_defer,
42354 .qc_prep = ata_noop_qc_prep,
42355 .qc_issue = sas_ata_qc_issue,
42356 .qc_fill_rtf = sas_ata_qc_fill_rtf,
42357diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
42358index 7706c99..3b4fc0c 100644
42359--- a/drivers/scsi/lpfc/lpfc.h
42360+++ b/drivers/scsi/lpfc/lpfc.h
42361@@ -424,7 +424,7 @@ struct lpfc_vport {
42362 struct dentry *debug_nodelist;
42363 struct dentry *vport_debugfs_root;
42364 struct lpfc_debugfs_trc *disc_trc;
42365- atomic_t disc_trc_cnt;
42366+ atomic_unchecked_t disc_trc_cnt;
42367 #endif
42368 uint8_t stat_data_enabled;
42369 uint8_t stat_data_blocked;
42370@@ -853,8 +853,8 @@ struct lpfc_hba {
42371 struct timer_list fabric_block_timer;
42372 unsigned long bit_flags;
42373 #define FABRIC_COMANDS_BLOCKED 0
42374- atomic_t num_rsrc_err;
42375- atomic_t num_cmd_success;
42376+ atomic_unchecked_t num_rsrc_err;
42377+ atomic_unchecked_t num_cmd_success;
42378 unsigned long last_rsrc_error_time;
42379 unsigned long last_ramp_down_time;
42380 unsigned long last_ramp_up_time;
42381@@ -890,7 +890,7 @@ struct lpfc_hba {
42382
42383 struct dentry *debug_slow_ring_trc;
42384 struct lpfc_debugfs_trc *slow_ring_trc;
42385- atomic_t slow_ring_trc_cnt;
42386+ atomic_unchecked_t slow_ring_trc_cnt;
42387 /* iDiag debugfs sub-directory */
42388 struct dentry *idiag_root;
42389 struct dentry *idiag_pci_cfg;
42390diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
42391index f63f5ff..32549a4 100644
42392--- a/drivers/scsi/lpfc/lpfc_debugfs.c
42393+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
42394@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
42395
42396 #include <linux/debugfs.h>
42397
42398-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
42399+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
42400 static unsigned long lpfc_debugfs_start_time = 0L;
42401
42402 /* iDiag */
42403@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
42404 lpfc_debugfs_enable = 0;
42405
42406 len = 0;
42407- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
42408+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
42409 (lpfc_debugfs_max_disc_trc - 1);
42410 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
42411 dtp = vport->disc_trc + i;
42412@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
42413 lpfc_debugfs_enable = 0;
42414
42415 len = 0;
42416- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
42417+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
42418 (lpfc_debugfs_max_slow_ring_trc - 1);
42419 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
42420 dtp = phba->slow_ring_trc + i;
42421@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
42422 !vport || !vport->disc_trc)
42423 return;
42424
42425- index = atomic_inc_return(&vport->disc_trc_cnt) &
42426+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
42427 (lpfc_debugfs_max_disc_trc - 1);
42428 dtp = vport->disc_trc + index;
42429 dtp->fmt = fmt;
42430 dtp->data1 = data1;
42431 dtp->data2 = data2;
42432 dtp->data3 = data3;
42433- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
42434+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
42435 dtp->jif = jiffies;
42436 #endif
42437 return;
42438@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
42439 !phba || !phba->slow_ring_trc)
42440 return;
42441
42442- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
42443+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
42444 (lpfc_debugfs_max_slow_ring_trc - 1);
42445 dtp = phba->slow_ring_trc + index;
42446 dtp->fmt = fmt;
42447 dtp->data1 = data1;
42448 dtp->data2 = data2;
42449 dtp->data3 = data3;
42450- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
42451+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
42452 dtp->jif = jiffies;
42453 #endif
42454 return;
42455@@ -1178,7 +1178,7 @@ lpfc_debugfs_lseek(struct file *file, loff_t off, int whence)
42456 pos = file->f_pos + off;
42457 break;
42458 case 2:
42459- pos = debug->len - off;
42460+ pos = debug->len + off;
42461 }
42462 return (pos < 0 || pos > debug->len) ? -EINVAL : (file->f_pos = pos);
42463 }
42464@@ -4182,7 +4182,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
42465 "slow_ring buffer\n");
42466 goto debug_failed;
42467 }
42468- atomic_set(&phba->slow_ring_trc_cnt, 0);
42469+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
42470 memset(phba->slow_ring_trc, 0,
42471 (sizeof(struct lpfc_debugfs_trc) *
42472 lpfc_debugfs_max_slow_ring_trc));
42473@@ -4228,7 +4228,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
42474 "buffer\n");
42475 goto debug_failed;
42476 }
42477- atomic_set(&vport->disc_trc_cnt, 0);
42478+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
42479
42480 snprintf(name, sizeof(name), "discovery_trace");
42481 vport->debug_disc_trc =
42482diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
42483index 314b4f6..7005d10 100644
42484--- a/drivers/scsi/lpfc/lpfc_init.c
42485+++ b/drivers/scsi/lpfc/lpfc_init.c
42486@@ -10551,8 +10551,10 @@ lpfc_init(void)
42487 "misc_register returned with status %d", error);
42488
42489 if (lpfc_enable_npiv) {
42490- lpfc_transport_functions.vport_create = lpfc_vport_create;
42491- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
42492+ pax_open_kernel();
42493+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
42494+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
42495+ pax_close_kernel();
42496 }
42497 lpfc_transport_template =
42498 fc_attach_transport(&lpfc_transport_functions);
42499diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
42500index 98af07c..7625fb5 100644
42501--- a/drivers/scsi/lpfc/lpfc_scsi.c
42502+++ b/drivers/scsi/lpfc/lpfc_scsi.c
42503@@ -325,7 +325,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
42504 uint32_t evt_posted;
42505
42506 spin_lock_irqsave(&phba->hbalock, flags);
42507- atomic_inc(&phba->num_rsrc_err);
42508+ atomic_inc_unchecked(&phba->num_rsrc_err);
42509 phba->last_rsrc_error_time = jiffies;
42510
42511 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
42512@@ -366,7 +366,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
42513 unsigned long flags;
42514 struct lpfc_hba *phba = vport->phba;
42515 uint32_t evt_posted;
42516- atomic_inc(&phba->num_cmd_success);
42517+ atomic_inc_unchecked(&phba->num_cmd_success);
42518
42519 if (vport->cfg_lun_queue_depth <= queue_depth)
42520 return;
42521@@ -410,8 +410,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
42522 unsigned long num_rsrc_err, num_cmd_success;
42523 int i;
42524
42525- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
42526- num_cmd_success = atomic_read(&phba->num_cmd_success);
42527+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
42528+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
42529
42530 /*
42531 * The error and success command counters are global per
42532@@ -439,8 +439,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
42533 }
42534 }
42535 lpfc_destroy_vport_work_array(phba, vports);
42536- atomic_set(&phba->num_rsrc_err, 0);
42537- atomic_set(&phba->num_cmd_success, 0);
42538+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
42539+ atomic_set_unchecked(&phba->num_cmd_success, 0);
42540 }
42541
42542 /**
42543@@ -474,8 +474,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
42544 }
42545 }
42546 lpfc_destroy_vport_work_array(phba, vports);
42547- atomic_set(&phba->num_rsrc_err, 0);
42548- atomic_set(&phba->num_cmd_success, 0);
42549+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
42550+ atomic_set_unchecked(&phba->num_cmd_success, 0);
42551 }
42552
42553 /**
42554diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
42555index b46f5e9..c4c4ccb 100644
42556--- a/drivers/scsi/pmcraid.c
42557+++ b/drivers/scsi/pmcraid.c
42558@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
42559 res->scsi_dev = scsi_dev;
42560 scsi_dev->hostdata = res;
42561 res->change_detected = 0;
42562- atomic_set(&res->read_failures, 0);
42563- atomic_set(&res->write_failures, 0);
42564+ atomic_set_unchecked(&res->read_failures, 0);
42565+ atomic_set_unchecked(&res->write_failures, 0);
42566 rc = 0;
42567 }
42568 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
42569@@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
42570
42571 /* If this was a SCSI read/write command keep count of errors */
42572 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
42573- atomic_inc(&res->read_failures);
42574+ atomic_inc_unchecked(&res->read_failures);
42575 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
42576- atomic_inc(&res->write_failures);
42577+ atomic_inc_unchecked(&res->write_failures);
42578
42579 if (!RES_IS_GSCSI(res->cfg_entry) &&
42580 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
42581@@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
42582 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
42583 * hrrq_id assigned here in queuecommand
42584 */
42585- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
42586+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
42587 pinstance->num_hrrq;
42588 cmd->cmd_done = pmcraid_io_done;
42589
42590@@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
42591 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
42592 * hrrq_id assigned here in queuecommand
42593 */
42594- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
42595+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
42596 pinstance->num_hrrq;
42597
42598 if (request_size) {
42599@@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
42600
42601 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
42602 /* add resources only after host is added into system */
42603- if (!atomic_read(&pinstance->expose_resources))
42604+ if (!atomic_read_unchecked(&pinstance->expose_resources))
42605 return;
42606
42607 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
42608@@ -5324,8 +5324,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
42609 init_waitqueue_head(&pinstance->reset_wait_q);
42610
42611 atomic_set(&pinstance->outstanding_cmds, 0);
42612- atomic_set(&pinstance->last_message_id, 0);
42613- atomic_set(&pinstance->expose_resources, 0);
42614+ atomic_set_unchecked(&pinstance->last_message_id, 0);
42615+ atomic_set_unchecked(&pinstance->expose_resources, 0);
42616
42617 INIT_LIST_HEAD(&pinstance->free_res_q);
42618 INIT_LIST_HEAD(&pinstance->used_res_q);
42619@@ -6038,7 +6038,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
42620 /* Schedule worker thread to handle CCN and take care of adding and
42621 * removing devices to OS
42622 */
42623- atomic_set(&pinstance->expose_resources, 1);
42624+ atomic_set_unchecked(&pinstance->expose_resources, 1);
42625 schedule_work(&pinstance->worker_q);
42626 return rc;
42627
42628diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
42629index e1d150f..6c6df44 100644
42630--- a/drivers/scsi/pmcraid.h
42631+++ b/drivers/scsi/pmcraid.h
42632@@ -748,7 +748,7 @@ struct pmcraid_instance {
42633 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
42634
42635 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
42636- atomic_t last_message_id;
42637+ atomic_unchecked_t last_message_id;
42638
42639 /* configuration table */
42640 struct pmcraid_config_table *cfg_table;
42641@@ -777,7 +777,7 @@ struct pmcraid_instance {
42642 atomic_t outstanding_cmds;
42643
42644 /* should add/delete resources to mid-layer now ?*/
42645- atomic_t expose_resources;
42646+ atomic_unchecked_t expose_resources;
42647
42648
42649
42650@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
42651 struct pmcraid_config_table_entry_ext cfg_entry_ext;
42652 };
42653 struct scsi_device *scsi_dev; /* Link scsi_device structure */
42654- atomic_t read_failures; /* count of failed READ commands */
42655- atomic_t write_failures; /* count of failed WRITE commands */
42656+ atomic_unchecked_t read_failures; /* count of failed READ commands */
42657+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
42658
42659 /* To indicate add/delete/modify during CCN */
42660 u8 change_detected;
42661diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
42662index b3db9dc..c3b1756 100644
42663--- a/drivers/scsi/qla2xxx/qla_attr.c
42664+++ b/drivers/scsi/qla2xxx/qla_attr.c
42665@@ -1971,7 +1971,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
42666 return 0;
42667 }
42668
42669-struct fc_function_template qla2xxx_transport_functions = {
42670+fc_function_template_no_const qla2xxx_transport_functions = {
42671
42672 .show_host_node_name = 1,
42673 .show_host_port_name = 1,
42674@@ -2018,7 +2018,7 @@ struct fc_function_template qla2xxx_transport_functions = {
42675 .bsg_timeout = qla24xx_bsg_timeout,
42676 };
42677
42678-struct fc_function_template qla2xxx_transport_vport_functions = {
42679+fc_function_template_no_const qla2xxx_transport_vport_functions = {
42680
42681 .show_host_node_name = 1,
42682 .show_host_port_name = 1,
42683diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
42684index b310fa9..b9b3944 100644
42685--- a/drivers/scsi/qla2xxx/qla_gbl.h
42686+++ b/drivers/scsi/qla2xxx/qla_gbl.h
42687@@ -523,8 +523,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
42688 struct device_attribute;
42689 extern struct device_attribute *qla2x00_host_attrs[];
42690 struct fc_function_template;
42691-extern struct fc_function_template qla2xxx_transport_functions;
42692-extern struct fc_function_template qla2xxx_transport_vport_functions;
42693+extern fc_function_template_no_const qla2xxx_transport_functions;
42694+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
42695 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
42696 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
42697 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
42698diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
42699index 2c6dd3d..e5ecd82 100644
42700--- a/drivers/scsi/qla2xxx/qla_os.c
42701+++ b/drivers/scsi/qla2xxx/qla_os.c
42702@@ -1554,8 +1554,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
42703 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
42704 /* Ok, a 64bit DMA mask is applicable. */
42705 ha->flags.enable_64bit_addressing = 1;
42706- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
42707- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
42708+ pax_open_kernel();
42709+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
42710+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
42711+ pax_close_kernel();
42712 return;
42713 }
42714 }
42715diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
42716index 129f5dd..ade53e8 100644
42717--- a/drivers/scsi/qla4xxx/ql4_def.h
42718+++ b/drivers/scsi/qla4xxx/ql4_def.h
42719@@ -275,7 +275,7 @@ struct ddb_entry {
42720 * (4000 only) */
42721 atomic_t relogin_timer; /* Max Time to wait for
42722 * relogin to complete */
42723- atomic_t relogin_retry_count; /* Num of times relogin has been
42724+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
42725 * retried */
42726 uint32_t default_time2wait; /* Default Min time between
42727 * relogins (+aens) */
42728diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
42729index 6142729..b6a85c9 100644
42730--- a/drivers/scsi/qla4xxx/ql4_os.c
42731+++ b/drivers/scsi/qla4xxx/ql4_os.c
42732@@ -2622,12 +2622,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
42733 */
42734 if (!iscsi_is_session_online(cls_sess)) {
42735 /* Reset retry relogin timer */
42736- atomic_inc(&ddb_entry->relogin_retry_count);
42737+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
42738 DEBUG2(ql4_printk(KERN_INFO, ha,
42739 "%s: index[%d] relogin timed out-retrying"
42740 " relogin (%d), retry (%d)\n", __func__,
42741 ddb_entry->fw_ddb_index,
42742- atomic_read(&ddb_entry->relogin_retry_count),
42743+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
42744 ddb_entry->default_time2wait + 4));
42745 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
42746 atomic_set(&ddb_entry->retry_relogin_timer,
42747@@ -4742,7 +4742,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
42748
42749 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
42750 atomic_set(&ddb_entry->relogin_timer, 0);
42751- atomic_set(&ddb_entry->relogin_retry_count, 0);
42752+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
42753 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
42754 ddb_entry->default_relogin_timeout =
42755 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
42756diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
42757index 2c0d0ec..4e8681a 100644
42758--- a/drivers/scsi/scsi.c
42759+++ b/drivers/scsi/scsi.c
42760@@ -661,7 +661,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
42761 unsigned long timeout;
42762 int rtn = 0;
42763
42764- atomic_inc(&cmd->device->iorequest_cnt);
42765+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
42766
42767 /* check if the device is still usable */
42768 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
42769diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
42770index c31187d..0ead8c3 100644
42771--- a/drivers/scsi/scsi_lib.c
42772+++ b/drivers/scsi/scsi_lib.c
42773@@ -1459,7 +1459,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
42774 shost = sdev->host;
42775 scsi_init_cmd_errh(cmd);
42776 cmd->result = DID_NO_CONNECT << 16;
42777- atomic_inc(&cmd->device->iorequest_cnt);
42778+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
42779
42780 /*
42781 * SCSI request completion path will do scsi_device_unbusy(),
42782@@ -1485,9 +1485,9 @@ static void scsi_softirq_done(struct request *rq)
42783
42784 INIT_LIST_HEAD(&cmd->eh_entry);
42785
42786- atomic_inc(&cmd->device->iodone_cnt);
42787+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
42788 if (cmd->result)
42789- atomic_inc(&cmd->device->ioerr_cnt);
42790+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
42791
42792 disposition = scsi_decide_disposition(cmd);
42793 if (disposition != SUCCESS &&
42794diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
42795index 931a7d9..0c2a754 100644
42796--- a/drivers/scsi/scsi_sysfs.c
42797+++ b/drivers/scsi/scsi_sysfs.c
42798@@ -658,7 +658,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
42799 char *buf) \
42800 { \
42801 struct scsi_device *sdev = to_scsi_device(dev); \
42802- unsigned long long count = atomic_read(&sdev->field); \
42803+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
42804 return snprintf(buf, 20, "0x%llx\n", count); \
42805 } \
42806 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
42807diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
42808index 84a1fdf..693b0d6 100644
42809--- a/drivers/scsi/scsi_tgt_lib.c
42810+++ b/drivers/scsi/scsi_tgt_lib.c
42811@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
42812 int err;
42813
42814 dprintk("%lx %u\n", uaddr, len);
42815- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
42816+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
42817 if (err) {
42818 /*
42819 * TODO: need to fixup sg_tablesize, max_segment_size,
42820diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
42821index e894ca7..de9d7660 100644
42822--- a/drivers/scsi/scsi_transport_fc.c
42823+++ b/drivers/scsi/scsi_transport_fc.c
42824@@ -498,7 +498,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
42825 * Netlink Infrastructure
42826 */
42827
42828-static atomic_t fc_event_seq;
42829+static atomic_unchecked_t fc_event_seq;
42830
42831 /**
42832 * fc_get_event_number - Obtain the next sequential FC event number
42833@@ -511,7 +511,7 @@ static atomic_t fc_event_seq;
42834 u32
42835 fc_get_event_number(void)
42836 {
42837- return atomic_add_return(1, &fc_event_seq);
42838+ return atomic_add_return_unchecked(1, &fc_event_seq);
42839 }
42840 EXPORT_SYMBOL(fc_get_event_number);
42841
42842@@ -659,7 +659,7 @@ static __init int fc_transport_init(void)
42843 {
42844 int error;
42845
42846- atomic_set(&fc_event_seq, 0);
42847+ atomic_set_unchecked(&fc_event_seq, 0);
42848
42849 error = transport_class_register(&fc_host_class);
42850 if (error)
42851@@ -849,7 +849,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
42852 char *cp;
42853
42854 *val = simple_strtoul(buf, &cp, 0);
42855- if ((*cp && (*cp != '\n')) || (*val < 0))
42856+ if (*cp && (*cp != '\n'))
42857 return -EINVAL;
42858 /*
42859 * Check for overflow; dev_loss_tmo is u32
42860diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
42861index 0a74b97..fa8d648 100644
42862--- a/drivers/scsi/scsi_transport_iscsi.c
42863+++ b/drivers/scsi/scsi_transport_iscsi.c
42864@@ -79,7 +79,7 @@ struct iscsi_internal {
42865 struct transport_container session_cont;
42866 };
42867
42868-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
42869+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
42870 static struct workqueue_struct *iscsi_eh_timer_workq;
42871
42872 static DEFINE_IDA(iscsi_sess_ida);
42873@@ -1064,7 +1064,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
42874 int err;
42875
42876 ihost = shost->shost_data;
42877- session->sid = atomic_add_return(1, &iscsi_session_nr);
42878+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
42879
42880 if (target_id == ISCSI_MAX_TARGET) {
42881 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
42882@@ -2955,7 +2955,7 @@ static __init int iscsi_transport_init(void)
42883 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
42884 ISCSI_TRANSPORT_VERSION);
42885
42886- atomic_set(&iscsi_session_nr, 0);
42887+ atomic_set_unchecked(&iscsi_session_nr, 0);
42888
42889 err = class_register(&iscsi_transport_class);
42890 if (err)
42891diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
42892index f379c7f..e8fc69c 100644
42893--- a/drivers/scsi/scsi_transport_srp.c
42894+++ b/drivers/scsi/scsi_transport_srp.c
42895@@ -33,7 +33,7 @@
42896 #include "scsi_transport_srp_internal.h"
42897
42898 struct srp_host_attrs {
42899- atomic_t next_port_id;
42900+ atomic_unchecked_t next_port_id;
42901 };
42902 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
42903
42904@@ -61,7 +61,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
42905 struct Scsi_Host *shost = dev_to_shost(dev);
42906 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
42907
42908- atomic_set(&srp_host->next_port_id, 0);
42909+ atomic_set_unchecked(&srp_host->next_port_id, 0);
42910 return 0;
42911 }
42912
42913@@ -210,7 +210,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
42914 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
42915 rport->roles = ids->roles;
42916
42917- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
42918+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
42919 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
42920
42921 transport_setup_device(&rport->dev);
42922diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
42923index 82910cc..7c350ad 100644
42924--- a/drivers/scsi/sd.c
42925+++ b/drivers/scsi/sd.c
42926@@ -2929,7 +2929,7 @@ static int sd_probe(struct device *dev)
42927 sdkp->disk = gd;
42928 sdkp->index = index;
42929 atomic_set(&sdkp->openers, 0);
42930- atomic_set(&sdkp->device->ioerr_cnt, 0);
42931+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
42932
42933 if (!sdp->request_queue->rq_timeout) {
42934 if (sdp->type != TYPE_MOD)
42935diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
42936index 9f0c465..47194ee 100644
42937--- a/drivers/scsi/sg.c
42938+++ b/drivers/scsi/sg.c
42939@@ -1101,7 +1101,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
42940 sdp->disk->disk_name,
42941 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
42942 NULL,
42943- (char *)arg);
42944+ (char __user *)arg);
42945 case BLKTRACESTART:
42946 return blk_trace_startstop(sdp->device->request_queue, 1);
42947 case BLKTRACESTOP:
42948diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
42949index 004b10f..7c98d51 100644
42950--- a/drivers/spi/spi.c
42951+++ b/drivers/spi/spi.c
42952@@ -1620,7 +1620,7 @@ int spi_bus_unlock(struct spi_master *master)
42953 EXPORT_SYMBOL_GPL(spi_bus_unlock);
42954
42955 /* portable code must never pass more than 32 bytes */
42956-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
42957+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
42958
42959 static u8 *buf;
42960
42961diff --git a/drivers/staging/iio/iio_hwmon.c b/drivers/staging/iio/iio_hwmon.c
42962index 93af756..a4bc5bf 100644
42963--- a/drivers/staging/iio/iio_hwmon.c
42964+++ b/drivers/staging/iio/iio_hwmon.c
42965@@ -67,7 +67,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
42966 {
42967 struct device *dev = &pdev->dev;
42968 struct iio_hwmon_state *st;
42969- struct sensor_device_attribute *a;
42970+ sensor_device_attribute_no_const *a;
42971 int ret, i;
42972 int in_i = 1, temp_i = 1, curr_i = 1;
42973 enum iio_chan_type type;
42974diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
42975index 34afc16..ffe44dd 100644
42976--- a/drivers/staging/octeon/ethernet-rx.c
42977+++ b/drivers/staging/octeon/ethernet-rx.c
42978@@ -421,11 +421,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
42979 /* Increment RX stats for virtual ports */
42980 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
42981 #ifdef CONFIG_64BIT
42982- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
42983- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
42984+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
42985+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
42986 #else
42987- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
42988- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
42989+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
42990+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
42991 #endif
42992 }
42993 netif_receive_skb(skb);
42994@@ -437,9 +437,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
42995 dev->name);
42996 */
42997 #ifdef CONFIG_64BIT
42998- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
42999+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
43000 #else
43001- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
43002+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
43003 #endif
43004 dev_kfree_skb_irq(skb);
43005 }
43006diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
43007index c3a90e7..023619a 100644
43008--- a/drivers/staging/octeon/ethernet.c
43009+++ b/drivers/staging/octeon/ethernet.c
43010@@ -252,11 +252,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
43011 * since the RX tasklet also increments it.
43012 */
43013 #ifdef CONFIG_64BIT
43014- atomic64_add(rx_status.dropped_packets,
43015- (atomic64_t *)&priv->stats.rx_dropped);
43016+ atomic64_add_unchecked(rx_status.dropped_packets,
43017+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
43018 #else
43019- atomic_add(rx_status.dropped_packets,
43020- (atomic_t *)&priv->stats.rx_dropped);
43021+ atomic_add_unchecked(rx_status.dropped_packets,
43022+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
43023 #endif
43024 }
43025
43026diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
43027index dc23395..cf7e9b1 100644
43028--- a/drivers/staging/rtl8712/rtl871x_io.h
43029+++ b/drivers/staging/rtl8712/rtl871x_io.h
43030@@ -108,7 +108,7 @@ struct _io_ops {
43031 u8 *pmem);
43032 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
43033 u8 *pmem);
43034-};
43035+} __no_const;
43036
43037 struct io_req {
43038 struct list_head list;
43039diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
43040index 1f5088b..0e59820 100644
43041--- a/drivers/staging/sbe-2t3e3/netdev.c
43042+++ b/drivers/staging/sbe-2t3e3/netdev.c
43043@@ -51,7 +51,7 @@ static int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
43044 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
43045
43046 if (rlen)
43047- if (copy_to_user(data, &resp, rlen))
43048+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
43049 return -EFAULT;
43050
43051 return 0;
43052diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
43053index 5dddc4d..34fcb2f 100644
43054--- a/drivers/staging/usbip/vhci.h
43055+++ b/drivers/staging/usbip/vhci.h
43056@@ -83,7 +83,7 @@ struct vhci_hcd {
43057 unsigned resuming:1;
43058 unsigned long re_timeout;
43059
43060- atomic_t seqnum;
43061+ atomic_unchecked_t seqnum;
43062
43063 /*
43064 * NOTE:
43065diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
43066index f1ca084..7b5c0c3 100644
43067--- a/drivers/staging/usbip/vhci_hcd.c
43068+++ b/drivers/staging/usbip/vhci_hcd.c
43069@@ -441,7 +441,7 @@ static void vhci_tx_urb(struct urb *urb)
43070
43071 spin_lock(&vdev->priv_lock);
43072
43073- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
43074+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
43075 if (priv->seqnum == 0xffff)
43076 dev_info(&urb->dev->dev, "seqnum max\n");
43077
43078@@ -687,7 +687,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
43079 return -ENOMEM;
43080 }
43081
43082- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
43083+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
43084 if (unlink->seqnum == 0xffff)
43085 pr_info("seqnum max\n");
43086
43087@@ -891,7 +891,7 @@ static int vhci_start(struct usb_hcd *hcd)
43088 vdev->rhport = rhport;
43089 }
43090
43091- atomic_set(&vhci->seqnum, 0);
43092+ atomic_set_unchecked(&vhci->seqnum, 0);
43093 spin_lock_init(&vhci->lock);
43094
43095 hcd->power_budget = 0; /* no limit */
43096diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
43097index faf8e60..c46f8ab 100644
43098--- a/drivers/staging/usbip/vhci_rx.c
43099+++ b/drivers/staging/usbip/vhci_rx.c
43100@@ -76,7 +76,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
43101 if (!urb) {
43102 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
43103 pr_info("max seqnum %d\n",
43104- atomic_read(&the_controller->seqnum));
43105+ atomic_read_unchecked(&the_controller->seqnum));
43106 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
43107 return;
43108 }
43109diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
43110index 5f13890..36a044b 100644
43111--- a/drivers/staging/vt6655/hostap.c
43112+++ b/drivers/staging/vt6655/hostap.c
43113@@ -73,14 +73,13 @@ static int msglevel =MSG_LEVEL_INFO;
43114 *
43115 */
43116
43117+static net_device_ops_no_const apdev_netdev_ops;
43118+
43119 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
43120 {
43121 PSDevice apdev_priv;
43122 struct net_device *dev = pDevice->dev;
43123 int ret;
43124- const struct net_device_ops apdev_netdev_ops = {
43125- .ndo_start_xmit = pDevice->tx_80211,
43126- };
43127
43128 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
43129
43130@@ -92,6 +91,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
43131 *apdev_priv = *pDevice;
43132 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
43133
43134+ /* only half broken now */
43135+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
43136 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
43137
43138 pDevice->apdev->type = ARPHRD_IEEE80211;
43139diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
43140index a94e66f..31984d0 100644
43141--- a/drivers/staging/vt6656/hostap.c
43142+++ b/drivers/staging/vt6656/hostap.c
43143@@ -60,14 +60,13 @@ static int msglevel =MSG_LEVEL_INFO;
43144 *
43145 */
43146
43147+static net_device_ops_no_const apdev_netdev_ops;
43148+
43149 static int hostap_enable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
43150 {
43151 struct vnt_private *apdev_priv;
43152 struct net_device *dev = pDevice->dev;
43153 int ret;
43154- const struct net_device_ops apdev_netdev_ops = {
43155- .ndo_start_xmit = pDevice->tx_80211,
43156- };
43157
43158 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
43159
43160@@ -79,6 +78,8 @@ static int hostap_enable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
43161 *apdev_priv = *pDevice;
43162 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
43163
43164+ /* only half broken now */
43165+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
43166 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
43167
43168 pDevice->apdev->type = ARPHRD_IEEE80211;
43169diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
43170index a2b7e03..9ff4bbd 100644
43171--- a/drivers/staging/zcache/tmem.c
43172+++ b/drivers/staging/zcache/tmem.c
43173@@ -50,7 +50,7 @@
43174 * A tmem host implementation must use this function to register callbacks
43175 * for memory allocation.
43176 */
43177-static struct tmem_hostops tmem_hostops;
43178+static tmem_hostops_no_const tmem_hostops;
43179
43180 static void tmem_objnode_tree_init(void);
43181
43182@@ -64,7 +64,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
43183 * A tmem host implementation must use this function to register
43184 * callbacks for a page-accessible memory (PAM) implementation.
43185 */
43186-static struct tmem_pamops tmem_pamops;
43187+static tmem_pamops_no_const tmem_pamops;
43188
43189 void tmem_register_pamops(struct tmem_pamops *m)
43190 {
43191diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
43192index adbe5a8..d387359 100644
43193--- a/drivers/staging/zcache/tmem.h
43194+++ b/drivers/staging/zcache/tmem.h
43195@@ -226,6 +226,7 @@ struct tmem_pamops {
43196 int (*replace_in_obj)(void *, struct tmem_obj *);
43197 #endif
43198 };
43199+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
43200 extern void tmem_register_pamops(struct tmem_pamops *m);
43201
43202 /* memory allocation methods provided by the host implementation */
43203@@ -235,6 +236,7 @@ struct tmem_hostops {
43204 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
43205 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
43206 };
43207+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
43208 extern void tmem_register_hostops(struct tmem_hostops *m);
43209
43210 /* core tmem accessor functions */
43211diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
43212index 2e4d655..fd72e68 100644
43213--- a/drivers/target/target_core_device.c
43214+++ b/drivers/target/target_core_device.c
43215@@ -1414,7 +1414,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
43216 spin_lock_init(&dev->se_port_lock);
43217 spin_lock_init(&dev->se_tmr_lock);
43218 spin_lock_init(&dev->qf_cmd_lock);
43219- atomic_set(&dev->dev_ordered_id, 0);
43220+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
43221 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
43222 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
43223 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
43224diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
43225index fc9a5a0..1d5975e 100644
43226--- a/drivers/target/target_core_transport.c
43227+++ b/drivers/target/target_core_transport.c
43228@@ -1081,7 +1081,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
43229 * Used to determine when ORDERED commands should go from
43230 * Dormant to Active status.
43231 */
43232- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
43233+ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
43234 smp_mb__after_atomic_inc();
43235 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
43236 cmd->se_ordered_id, cmd->sam_task_attr,
43237diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
43238index 345bd0e..61d5375 100644
43239--- a/drivers/tty/cyclades.c
43240+++ b/drivers/tty/cyclades.c
43241@@ -1576,10 +1576,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
43242 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
43243 info->port.count);
43244 #endif
43245- info->port.count++;
43246+ atomic_inc(&info->port.count);
43247 #ifdef CY_DEBUG_COUNT
43248 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
43249- current->pid, info->port.count);
43250+ current->pid, atomic_read(&info->port.count));
43251 #endif
43252
43253 /*
43254@@ -3978,7 +3978,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
43255 for (j = 0; j < cy_card[i].nports; j++) {
43256 info = &cy_card[i].ports[j];
43257
43258- if (info->port.count) {
43259+ if (atomic_read(&info->port.count)) {
43260 /* XXX is the ldisc num worth this? */
43261 struct tty_struct *tty;
43262 struct tty_ldisc *ld;
43263diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
43264index eb255e8..f637a57 100644
43265--- a/drivers/tty/hvc/hvc_console.c
43266+++ b/drivers/tty/hvc/hvc_console.c
43267@@ -338,7 +338,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
43268
43269 spin_lock_irqsave(&hp->port.lock, flags);
43270 /* Check and then increment for fast path open. */
43271- if (hp->port.count++ > 0) {
43272+ if (atomic_inc_return(&hp->port.count) > 1) {
43273 spin_unlock_irqrestore(&hp->port.lock, flags);
43274 hvc_kick();
43275 return 0;
43276@@ -388,7 +388,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
43277
43278 spin_lock_irqsave(&hp->port.lock, flags);
43279
43280- if (--hp->port.count == 0) {
43281+ if (atomic_dec_return(&hp->port.count) == 0) {
43282 spin_unlock_irqrestore(&hp->port.lock, flags);
43283 /* We are done with the tty pointer now. */
43284 tty_port_tty_set(&hp->port, NULL);
43285@@ -406,9 +406,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
43286 */
43287 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
43288 } else {
43289- if (hp->port.count < 0)
43290+ if (atomic_read(&hp->port.count) < 0)
43291 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
43292- hp->vtermno, hp->port.count);
43293+ hp->vtermno, atomic_read(&hp->port.count));
43294 spin_unlock_irqrestore(&hp->port.lock, flags);
43295 }
43296 }
43297@@ -438,12 +438,12 @@ static void hvc_hangup(struct tty_struct *tty)
43298 * open->hangup case this can be called after the final close so prevent
43299 * that from happening for now.
43300 */
43301- if (hp->port.count <= 0) {
43302+ if (atomic_read(&hp->port.count) <= 0) {
43303 spin_unlock_irqrestore(&hp->port.lock, flags);
43304 return;
43305 }
43306
43307- hp->port.count = 0;
43308+ atomic_set(&hp->port.count, 0);
43309 spin_unlock_irqrestore(&hp->port.lock, flags);
43310 tty_port_tty_set(&hp->port, NULL);
43311
43312@@ -491,7 +491,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
43313 return -EPIPE;
43314
43315 /* FIXME what's this (unprotected) check for? */
43316- if (hp->port.count <= 0)
43317+ if (atomic_read(&hp->port.count) <= 0)
43318 return -EIO;
43319
43320 spin_lock_irqsave(&hp->lock, flags);
43321diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
43322index 81e939e..95ead10 100644
43323--- a/drivers/tty/hvc/hvcs.c
43324+++ b/drivers/tty/hvc/hvcs.c
43325@@ -83,6 +83,7 @@
43326 #include <asm/hvcserver.h>
43327 #include <asm/uaccess.h>
43328 #include <asm/vio.h>
43329+#include <asm/local.h>
43330
43331 /*
43332 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
43333@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
43334
43335 spin_lock_irqsave(&hvcsd->lock, flags);
43336
43337- if (hvcsd->port.count > 0) {
43338+ if (atomic_read(&hvcsd->port.count) > 0) {
43339 spin_unlock_irqrestore(&hvcsd->lock, flags);
43340 printk(KERN_INFO "HVCS: vterm state unchanged. "
43341 "The hvcs device node is still in use.\n");
43342@@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
43343 }
43344 }
43345
43346- hvcsd->port.count = 0;
43347+ atomic_set(&hvcsd->port.count, 0);
43348 hvcsd->port.tty = tty;
43349 tty->driver_data = hvcsd;
43350
43351@@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
43352 unsigned long flags;
43353
43354 spin_lock_irqsave(&hvcsd->lock, flags);
43355- hvcsd->port.count++;
43356+ atomic_inc(&hvcsd->port.count);
43357 hvcsd->todo_mask |= HVCS_SCHED_READ;
43358 spin_unlock_irqrestore(&hvcsd->lock, flags);
43359
43360@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
43361 hvcsd = tty->driver_data;
43362
43363 spin_lock_irqsave(&hvcsd->lock, flags);
43364- if (--hvcsd->port.count == 0) {
43365+ if (atomic_dec_and_test(&hvcsd->port.count)) {
43366
43367 vio_disable_interrupts(hvcsd->vdev);
43368
43369@@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
43370
43371 free_irq(irq, hvcsd);
43372 return;
43373- } else if (hvcsd->port.count < 0) {
43374+ } else if (atomic_read(&hvcsd->port.count) < 0) {
43375 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
43376 " is missmanaged.\n",
43377- hvcsd->vdev->unit_address, hvcsd->port.count);
43378+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
43379 }
43380
43381 spin_unlock_irqrestore(&hvcsd->lock, flags);
43382@@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty)
43383
43384 spin_lock_irqsave(&hvcsd->lock, flags);
43385 /* Preserve this so that we know how many kref refs to put */
43386- temp_open_count = hvcsd->port.count;
43387+ temp_open_count = atomic_read(&hvcsd->port.count);
43388
43389 /*
43390 * Don't kref put inside the spinlock because the destruction
43391@@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty)
43392 tty->driver_data = NULL;
43393 hvcsd->port.tty = NULL;
43394
43395- hvcsd->port.count = 0;
43396+ atomic_set(&hvcsd->port.count, 0);
43397
43398 /* This will drop any buffered data on the floor which is OK in a hangup
43399 * scenario. */
43400@@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty,
43401 * the middle of a write operation? This is a crummy place to do this
43402 * but we want to keep it all in the spinlock.
43403 */
43404- if (hvcsd->port.count <= 0) {
43405+ if (atomic_read(&hvcsd->port.count) <= 0) {
43406 spin_unlock_irqrestore(&hvcsd->lock, flags);
43407 return -ENODEV;
43408 }
43409@@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty)
43410 {
43411 struct hvcs_struct *hvcsd = tty->driver_data;
43412
43413- if (!hvcsd || hvcsd->port.count <= 0)
43414+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
43415 return 0;
43416
43417 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
43418diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
43419index 8fd72ff..34a0bed 100644
43420--- a/drivers/tty/ipwireless/tty.c
43421+++ b/drivers/tty/ipwireless/tty.c
43422@@ -29,6 +29,7 @@
43423 #include <linux/tty_driver.h>
43424 #include <linux/tty_flip.h>
43425 #include <linux/uaccess.h>
43426+#include <asm/local.h>
43427
43428 #include "tty.h"
43429 #include "network.h"
43430@@ -99,10 +100,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
43431 mutex_unlock(&tty->ipw_tty_mutex);
43432 return -ENODEV;
43433 }
43434- if (tty->port.count == 0)
43435+ if (atomic_read(&tty->port.count) == 0)
43436 tty->tx_bytes_queued = 0;
43437
43438- tty->port.count++;
43439+ atomic_inc(&tty->port.count);
43440
43441 tty->port.tty = linux_tty;
43442 linux_tty->driver_data = tty;
43443@@ -118,9 +119,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
43444
43445 static void do_ipw_close(struct ipw_tty *tty)
43446 {
43447- tty->port.count--;
43448-
43449- if (tty->port.count == 0) {
43450+ if (atomic_dec_return(&tty->port.count) == 0) {
43451 struct tty_struct *linux_tty = tty->port.tty;
43452
43453 if (linux_tty != NULL) {
43454@@ -141,7 +140,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
43455 return;
43456
43457 mutex_lock(&tty->ipw_tty_mutex);
43458- if (tty->port.count == 0) {
43459+ if (atomic_read(&tty->port.count) == 0) {
43460 mutex_unlock(&tty->ipw_tty_mutex);
43461 return;
43462 }
43463@@ -164,7 +163,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
43464
43465 mutex_lock(&tty->ipw_tty_mutex);
43466
43467- if (!tty->port.count) {
43468+ if (!atomic_read(&tty->port.count)) {
43469 mutex_unlock(&tty->ipw_tty_mutex);
43470 return;
43471 }
43472@@ -206,7 +205,7 @@ static int ipw_write(struct tty_struct *linux_tty,
43473 return -ENODEV;
43474
43475 mutex_lock(&tty->ipw_tty_mutex);
43476- if (!tty->port.count) {
43477+ if (!atomic_read(&tty->port.count)) {
43478 mutex_unlock(&tty->ipw_tty_mutex);
43479 return -EINVAL;
43480 }
43481@@ -246,7 +245,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
43482 if (!tty)
43483 return -ENODEV;
43484
43485- if (!tty->port.count)
43486+ if (!atomic_read(&tty->port.count))
43487 return -EINVAL;
43488
43489 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
43490@@ -288,7 +287,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
43491 if (!tty)
43492 return 0;
43493
43494- if (!tty->port.count)
43495+ if (!atomic_read(&tty->port.count))
43496 return 0;
43497
43498 return tty->tx_bytes_queued;
43499@@ -369,7 +368,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
43500 if (!tty)
43501 return -ENODEV;
43502
43503- if (!tty->port.count)
43504+ if (!atomic_read(&tty->port.count))
43505 return -EINVAL;
43506
43507 return get_control_lines(tty);
43508@@ -385,7 +384,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
43509 if (!tty)
43510 return -ENODEV;
43511
43512- if (!tty->port.count)
43513+ if (!atomic_read(&tty->port.count))
43514 return -EINVAL;
43515
43516 return set_control_lines(tty, set, clear);
43517@@ -399,7 +398,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
43518 if (!tty)
43519 return -ENODEV;
43520
43521- if (!tty->port.count)
43522+ if (!atomic_read(&tty->port.count))
43523 return -EINVAL;
43524
43525 /* FIXME: Exactly how is the tty object locked here .. */
43526@@ -555,7 +554,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
43527 * are gone */
43528 mutex_lock(&ttyj->ipw_tty_mutex);
43529 }
43530- while (ttyj->port.count)
43531+ while (atomic_read(&ttyj->port.count))
43532 do_ipw_close(ttyj);
43533 ipwireless_disassociate_network_ttys(network,
43534 ttyj->channel_idx);
43535diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
43536index adeac25..787a0a1 100644
43537--- a/drivers/tty/moxa.c
43538+++ b/drivers/tty/moxa.c
43539@@ -1193,7 +1193,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
43540 }
43541
43542 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
43543- ch->port.count++;
43544+ atomic_inc(&ch->port.count);
43545 tty->driver_data = ch;
43546 tty_port_tty_set(&ch->port, tty);
43547 mutex_lock(&ch->port.mutex);
43548diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
43549index 4a43ef5d7..aa71f27 100644
43550--- a/drivers/tty/n_gsm.c
43551+++ b/drivers/tty/n_gsm.c
43552@@ -1636,7 +1636,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
43553 spin_lock_init(&dlci->lock);
43554 mutex_init(&dlci->mutex);
43555 dlci->fifo = &dlci->_fifo;
43556- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
43557+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
43558 kfree(dlci);
43559 return NULL;
43560 }
43561@@ -2936,7 +2936,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
43562 struct gsm_dlci *dlci = tty->driver_data;
43563 struct tty_port *port = &dlci->port;
43564
43565- port->count++;
43566+ atomic_inc(&port->count);
43567 dlci_get(dlci);
43568 dlci_get(dlci->gsm->dlci[0]);
43569 mux_get(dlci->gsm);
43570diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
43571index 1f8cba6..47b06c2 100644
43572--- a/drivers/tty/n_tty.c
43573+++ b/drivers/tty/n_tty.c
43574@@ -2205,6 +2205,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
43575 {
43576 *ops = tty_ldisc_N_TTY;
43577 ops->owner = NULL;
43578- ops->refcount = ops->flags = 0;
43579+ atomic_set(&ops->refcount, 0);
43580+ ops->flags = 0;
43581 }
43582 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
43583diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
43584index 125e0fd..8c50690 100644
43585--- a/drivers/tty/pty.c
43586+++ b/drivers/tty/pty.c
43587@@ -800,8 +800,10 @@ static void __init unix98_pty_init(void)
43588 panic("Couldn't register Unix98 pts driver");
43589
43590 /* Now create the /dev/ptmx special device */
43591+ pax_open_kernel();
43592 tty_default_fops(&ptmx_fops);
43593- ptmx_fops.open = ptmx_open;
43594+ *(void **)&ptmx_fops.open = ptmx_open;
43595+ pax_close_kernel();
43596
43597 cdev_init(&ptmx_cdev, &ptmx_fops);
43598 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
43599diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
43600index 1d27003..959f452 100644
43601--- a/drivers/tty/rocket.c
43602+++ b/drivers/tty/rocket.c
43603@@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
43604 tty->driver_data = info;
43605 tty_port_tty_set(port, tty);
43606
43607- if (port->count++ == 0) {
43608+ if (atomic_inc_return(&port->count) == 1) {
43609 atomic_inc(&rp_num_ports_open);
43610
43611 #ifdef ROCKET_DEBUG_OPEN
43612@@ -932,7 +932,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
43613 #endif
43614 }
43615 #ifdef ROCKET_DEBUG_OPEN
43616- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
43617+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
43618 #endif
43619
43620 /*
43621@@ -1527,7 +1527,7 @@ static void rp_hangup(struct tty_struct *tty)
43622 spin_unlock_irqrestore(&info->port.lock, flags);
43623 return;
43624 }
43625- if (info->port.count)
43626+ if (atomic_read(&info->port.count))
43627 atomic_dec(&rp_num_ports_open);
43628 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
43629 spin_unlock_irqrestore(&info->port.lock, flags);
43630diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
43631index 1002054..dd644a8 100644
43632--- a/drivers/tty/serial/kgdboc.c
43633+++ b/drivers/tty/serial/kgdboc.c
43634@@ -24,8 +24,9 @@
43635 #define MAX_CONFIG_LEN 40
43636
43637 static struct kgdb_io kgdboc_io_ops;
43638+static struct kgdb_io kgdboc_io_ops_console;
43639
43640-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
43641+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
43642 static int configured = -1;
43643
43644 static char config[MAX_CONFIG_LEN];
43645@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
43646 kgdboc_unregister_kbd();
43647 if (configured == 1)
43648 kgdb_unregister_io_module(&kgdboc_io_ops);
43649+ else if (configured == 2)
43650+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
43651 }
43652
43653 static int configure_kgdboc(void)
43654@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
43655 int err;
43656 char *cptr = config;
43657 struct console *cons;
43658+ int is_console = 0;
43659
43660 err = kgdboc_option_setup(config);
43661 if (err || !strlen(config) || isspace(config[0]))
43662 goto noconfig;
43663
43664 err = -ENODEV;
43665- kgdboc_io_ops.is_console = 0;
43666 kgdb_tty_driver = NULL;
43667
43668 kgdboc_use_kms = 0;
43669@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
43670 int idx;
43671 if (cons->device && cons->device(cons, &idx) == p &&
43672 idx == tty_line) {
43673- kgdboc_io_ops.is_console = 1;
43674+ is_console = 1;
43675 break;
43676 }
43677 cons = cons->next;
43678@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
43679 kgdb_tty_line = tty_line;
43680
43681 do_register:
43682- err = kgdb_register_io_module(&kgdboc_io_ops);
43683+ if (is_console) {
43684+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
43685+ configured = 2;
43686+ } else {
43687+ err = kgdb_register_io_module(&kgdboc_io_ops);
43688+ configured = 1;
43689+ }
43690 if (err)
43691 goto noconfig;
43692
43693@@ -205,8 +214,6 @@ do_register:
43694 if (err)
43695 goto nmi_con_failed;
43696
43697- configured = 1;
43698-
43699 return 0;
43700
43701 nmi_con_failed:
43702@@ -223,7 +230,7 @@ noconfig:
43703 static int __init init_kgdboc(void)
43704 {
43705 /* Already configured? */
43706- if (configured == 1)
43707+ if (configured >= 1)
43708 return 0;
43709
43710 return configure_kgdboc();
43711@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
43712 if (config[len - 1] == '\n')
43713 config[len - 1] = '\0';
43714
43715- if (configured == 1)
43716+ if (configured >= 1)
43717 cleanup_kgdboc();
43718
43719 /* Go and configure with the new params. */
43720@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
43721 .post_exception = kgdboc_post_exp_handler,
43722 };
43723
43724+static struct kgdb_io kgdboc_io_ops_console = {
43725+ .name = "kgdboc",
43726+ .read_char = kgdboc_get_char,
43727+ .write_char = kgdboc_put_char,
43728+ .pre_exception = kgdboc_pre_exp_handler,
43729+ .post_exception = kgdboc_post_exp_handler,
43730+ .is_console = 1
43731+};
43732+
43733 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
43734 /* This is only available if kgdboc is a built in for early debugging */
43735 static int __init kgdboc_early_init(char *opt)
43736diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
43737index 2769a38..f3dbe48 100644
43738--- a/drivers/tty/serial/samsung.c
43739+++ b/drivers/tty/serial/samsung.c
43740@@ -451,11 +451,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
43741 }
43742 }
43743
43744+static int s3c64xx_serial_startup(struct uart_port *port);
43745 static int s3c24xx_serial_startup(struct uart_port *port)
43746 {
43747 struct s3c24xx_uart_port *ourport = to_ourport(port);
43748 int ret;
43749
43750+ /* Startup sequence is different for s3c64xx and higher SoC's */
43751+ if (s3c24xx_serial_has_interrupt_mask(port))
43752+ return s3c64xx_serial_startup(port);
43753+
43754 dbg("s3c24xx_serial_startup: port=%p (%08lx,%p)\n",
43755 port->mapbase, port->membase);
43756
43757@@ -1120,10 +1125,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
43758 /* setup info for port */
43759 port->dev = &platdev->dev;
43760
43761- /* Startup sequence is different for s3c64xx and higher SoC's */
43762- if (s3c24xx_serial_has_interrupt_mask(port))
43763- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
43764-
43765 port->uartclk = 1;
43766
43767 if (cfg->uart_flags & UPF_CONS_FLOW) {
43768diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
43769index 8fbb6d2..822a9e6 100644
43770--- a/drivers/tty/serial/serial_core.c
43771+++ b/drivers/tty/serial/serial_core.c
43772@@ -1454,7 +1454,7 @@ static void uart_hangup(struct tty_struct *tty)
43773 uart_flush_buffer(tty);
43774 uart_shutdown(tty, state);
43775 spin_lock_irqsave(&port->lock, flags);
43776- port->count = 0;
43777+ atomic_set(&port->count, 0);
43778 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
43779 spin_unlock_irqrestore(&port->lock, flags);
43780 tty_port_tty_set(port, NULL);
43781@@ -1550,7 +1550,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
43782 goto end;
43783 }
43784
43785- port->count++;
43786+ atomic_inc(&port->count);
43787 if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
43788 retval = -ENXIO;
43789 goto err_dec_count;
43790@@ -1578,7 +1578,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
43791 /*
43792 * Make sure the device is in D0 state.
43793 */
43794- if (port->count == 1)
43795+ if (atomic_read(&port->count) == 1)
43796 uart_change_pm(state, UART_PM_STATE_ON);
43797
43798 /*
43799@@ -1596,7 +1596,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
43800 end:
43801 return retval;
43802 err_dec_count:
43803- port->count--;
43804+ atomic_inc(&port->count);
43805 mutex_unlock(&port->mutex);
43806 goto end;
43807 }
43808diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
43809index 8983276..72a4090 100644
43810--- a/drivers/tty/synclink.c
43811+++ b/drivers/tty/synclink.c
43812@@ -3093,7 +3093,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
43813
43814 if (debug_level >= DEBUG_LEVEL_INFO)
43815 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
43816- __FILE__,__LINE__, info->device_name, info->port.count);
43817+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
43818
43819 if (tty_port_close_start(&info->port, tty, filp) == 0)
43820 goto cleanup;
43821@@ -3111,7 +3111,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
43822 cleanup:
43823 if (debug_level >= DEBUG_LEVEL_INFO)
43824 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
43825- tty->driver->name, info->port.count);
43826+ tty->driver->name, atomic_read(&info->port.count));
43827
43828 } /* end of mgsl_close() */
43829
43830@@ -3210,8 +3210,8 @@ static void mgsl_hangup(struct tty_struct *tty)
43831
43832 mgsl_flush_buffer(tty);
43833 shutdown(info);
43834-
43835- info->port.count = 0;
43836+
43837+ atomic_set(&info->port.count, 0);
43838 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
43839 info->port.tty = NULL;
43840
43841@@ -3300,12 +3300,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
43842
43843 if (debug_level >= DEBUG_LEVEL_INFO)
43844 printk("%s(%d):block_til_ready before block on %s count=%d\n",
43845- __FILE__,__LINE__, tty->driver->name, port->count );
43846+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
43847
43848 spin_lock_irqsave(&info->irq_spinlock, flags);
43849 if (!tty_hung_up_p(filp)) {
43850 extra_count = true;
43851- port->count--;
43852+ atomic_dec(&port->count);
43853 }
43854 spin_unlock_irqrestore(&info->irq_spinlock, flags);
43855 port->blocked_open++;
43856@@ -3334,7 +3334,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
43857
43858 if (debug_level >= DEBUG_LEVEL_INFO)
43859 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
43860- __FILE__,__LINE__, tty->driver->name, port->count );
43861+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
43862
43863 tty_unlock(tty);
43864 schedule();
43865@@ -3346,12 +3346,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
43866
43867 /* FIXME: Racy on hangup during close wait */
43868 if (extra_count)
43869- port->count++;
43870+ atomic_inc(&port->count);
43871 port->blocked_open--;
43872
43873 if (debug_level >= DEBUG_LEVEL_INFO)
43874 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
43875- __FILE__,__LINE__, tty->driver->name, port->count );
43876+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
43877
43878 if (!retval)
43879 port->flags |= ASYNC_NORMAL_ACTIVE;
43880@@ -3403,7 +3403,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
43881
43882 if (debug_level >= DEBUG_LEVEL_INFO)
43883 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
43884- __FILE__,__LINE__,tty->driver->name, info->port.count);
43885+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
43886
43887 /* If port is closing, signal caller to try again */
43888 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
43889@@ -3422,10 +3422,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
43890 spin_unlock_irqrestore(&info->netlock, flags);
43891 goto cleanup;
43892 }
43893- info->port.count++;
43894+ atomic_inc(&info->port.count);
43895 spin_unlock_irqrestore(&info->netlock, flags);
43896
43897- if (info->port.count == 1) {
43898+ if (atomic_read(&info->port.count) == 1) {
43899 /* 1st open on this device, init hardware */
43900 retval = startup(info);
43901 if (retval < 0)
43902@@ -3449,8 +3449,8 @@ cleanup:
43903 if (retval) {
43904 if (tty->count == 1)
43905 info->port.tty = NULL; /* tty layer will release tty struct */
43906- if(info->port.count)
43907- info->port.count--;
43908+ if (atomic_read(&info->port.count))
43909+ atomic_dec(&info->port.count);
43910 }
43911
43912 return retval;
43913@@ -7668,7 +7668,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
43914 unsigned short new_crctype;
43915
43916 /* return error if TTY interface open */
43917- if (info->port.count)
43918+ if (atomic_read(&info->port.count))
43919 return -EBUSY;
43920
43921 switch (encoding)
43922@@ -7763,7 +7763,7 @@ static int hdlcdev_open(struct net_device *dev)
43923
43924 /* arbitrate between network and tty opens */
43925 spin_lock_irqsave(&info->netlock, flags);
43926- if (info->port.count != 0 || info->netcount != 0) {
43927+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
43928 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
43929 spin_unlock_irqrestore(&info->netlock, flags);
43930 return -EBUSY;
43931@@ -7849,7 +7849,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
43932 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
43933
43934 /* return error if TTY interface open */
43935- if (info->port.count)
43936+ if (atomic_read(&info->port.count))
43937 return -EBUSY;
43938
43939 if (cmd != SIOCWANDEV)
43940diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
43941index aa9eece..d8baaec 100644
43942--- a/drivers/tty/synclink_gt.c
43943+++ b/drivers/tty/synclink_gt.c
43944@@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp)
43945 tty->driver_data = info;
43946 info->port.tty = tty;
43947
43948- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
43949+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
43950
43951 /* If port is closing, signal caller to try again */
43952 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
43953@@ -691,10 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
43954 mutex_unlock(&info->port.mutex);
43955 goto cleanup;
43956 }
43957- info->port.count++;
43958+ atomic_inc(&info->port.count);
43959 spin_unlock_irqrestore(&info->netlock, flags);
43960
43961- if (info->port.count == 1) {
43962+ if (atomic_read(&info->port.count) == 1) {
43963 /* 1st open on this device, init hardware */
43964 retval = startup(info);
43965 if (retval < 0) {
43966@@ -715,8 +715,8 @@ cleanup:
43967 if (retval) {
43968 if (tty->count == 1)
43969 info->port.tty = NULL; /* tty layer will release tty struct */
43970- if(info->port.count)
43971- info->port.count--;
43972+ if(atomic_read(&info->port.count))
43973+ atomic_dec(&info->port.count);
43974 }
43975
43976 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
43977@@ -729,7 +729,7 @@ static void close(struct tty_struct *tty, struct file *filp)
43978
43979 if (sanity_check(info, tty->name, "close"))
43980 return;
43981- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
43982+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
43983
43984 if (tty_port_close_start(&info->port, tty, filp) == 0)
43985 goto cleanup;
43986@@ -746,7 +746,7 @@ static void close(struct tty_struct *tty, struct file *filp)
43987 tty_port_close_end(&info->port, tty);
43988 info->port.tty = NULL;
43989 cleanup:
43990- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
43991+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
43992 }
43993
43994 static void hangup(struct tty_struct *tty)
43995@@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tty)
43996 shutdown(info);
43997
43998 spin_lock_irqsave(&info->port.lock, flags);
43999- info->port.count = 0;
44000+ atomic_set(&info->port.count, 0);
44001 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
44002 info->port.tty = NULL;
44003 spin_unlock_irqrestore(&info->port.lock, flags);
44004@@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
44005 unsigned short new_crctype;
44006
44007 /* return error if TTY interface open */
44008- if (info->port.count)
44009+ if (atomic_read(&info->port.count))
44010 return -EBUSY;
44011
44012 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
44013@@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_device *dev)
44014
44015 /* arbitrate between network and tty opens */
44016 spin_lock_irqsave(&info->netlock, flags);
44017- if (info->port.count != 0 || info->netcount != 0) {
44018+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
44019 DBGINFO(("%s hdlc_open busy\n", dev->name));
44020 spin_unlock_irqrestore(&info->netlock, flags);
44021 return -EBUSY;
44022@@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
44023 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
44024
44025 /* return error if TTY interface open */
44026- if (info->port.count)
44027+ if (atomic_read(&info->port.count))
44028 return -EBUSY;
44029
44030 if (cmd != SIOCWANDEV)
44031@@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
44032 if (port == NULL)
44033 continue;
44034 spin_lock(&port->lock);
44035- if ((port->port.count || port->netcount) &&
44036+ if ((atomic_read(&port->port.count) || port->netcount) &&
44037 port->pending_bh && !port->bh_running &&
44038 !port->bh_requested) {
44039 DBGISR(("%s bh queued\n", port->device_name));
44040@@ -3302,7 +3302,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
44041 spin_lock_irqsave(&info->lock, flags);
44042 if (!tty_hung_up_p(filp)) {
44043 extra_count = true;
44044- port->count--;
44045+ atomic_dec(&port->count);
44046 }
44047 spin_unlock_irqrestore(&info->lock, flags);
44048 port->blocked_open++;
44049@@ -3339,7 +3339,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
44050 remove_wait_queue(&port->open_wait, &wait);
44051
44052 if (extra_count)
44053- port->count++;
44054+ atomic_inc(&port->count);
44055 port->blocked_open--;
44056
44057 if (!retval)
44058diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
44059index 6d5780c..aa4d8cd 100644
44060--- a/drivers/tty/synclinkmp.c
44061+++ b/drivers/tty/synclinkmp.c
44062@@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp)
44063
44064 if (debug_level >= DEBUG_LEVEL_INFO)
44065 printk("%s(%d):%s open(), old ref count = %d\n",
44066- __FILE__,__LINE__,tty->driver->name, info->port.count);
44067+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
44068
44069 /* If port is closing, signal caller to try again */
44070 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
44071@@ -769,10 +769,10 @@ static int open(struct tty_struct *tty, struct file *filp)
44072 spin_unlock_irqrestore(&info->netlock, flags);
44073 goto cleanup;
44074 }
44075- info->port.count++;
44076+ atomic_inc(&info->port.count);
44077 spin_unlock_irqrestore(&info->netlock, flags);
44078
44079- if (info->port.count == 1) {
44080+ if (atomic_read(&info->port.count) == 1) {
44081 /* 1st open on this device, init hardware */
44082 retval = startup(info);
44083 if (retval < 0)
44084@@ -796,8 +796,8 @@ cleanup:
44085 if (retval) {
44086 if (tty->count == 1)
44087 info->port.tty = NULL; /* tty layer will release tty struct */
44088- if(info->port.count)
44089- info->port.count--;
44090+ if(atomic_read(&info->port.count))
44091+ atomic_dec(&info->port.count);
44092 }
44093
44094 return retval;
44095@@ -815,7 +815,7 @@ static void close(struct tty_struct *tty, struct file *filp)
44096
44097 if (debug_level >= DEBUG_LEVEL_INFO)
44098 printk("%s(%d):%s close() entry, count=%d\n",
44099- __FILE__,__LINE__, info->device_name, info->port.count);
44100+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
44101
44102 if (tty_port_close_start(&info->port, tty, filp) == 0)
44103 goto cleanup;
44104@@ -834,7 +834,7 @@ static void close(struct tty_struct *tty, struct file *filp)
44105 cleanup:
44106 if (debug_level >= DEBUG_LEVEL_INFO)
44107 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
44108- tty->driver->name, info->port.count);
44109+ tty->driver->name, atomic_read(&info->port.count));
44110 }
44111
44112 /* Called by tty_hangup() when a hangup is signaled.
44113@@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tty)
44114 shutdown(info);
44115
44116 spin_lock_irqsave(&info->port.lock, flags);
44117- info->port.count = 0;
44118+ atomic_set(&info->port.count, 0);
44119 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
44120 info->port.tty = NULL;
44121 spin_unlock_irqrestore(&info->port.lock, flags);
44122@@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
44123 unsigned short new_crctype;
44124
44125 /* return error if TTY interface open */
44126- if (info->port.count)
44127+ if (atomic_read(&info->port.count))
44128 return -EBUSY;
44129
44130 switch (encoding)
44131@@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_device *dev)
44132
44133 /* arbitrate between network and tty opens */
44134 spin_lock_irqsave(&info->netlock, flags);
44135- if (info->port.count != 0 || info->netcount != 0) {
44136+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
44137 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
44138 spin_unlock_irqrestore(&info->netlock, flags);
44139 return -EBUSY;
44140@@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
44141 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
44142
44143 /* return error if TTY interface open */
44144- if (info->port.count)
44145+ if (atomic_read(&info->port.count))
44146 return -EBUSY;
44147
44148 if (cmd != SIOCWANDEV)
44149@@ -2620,7 +2620,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
44150 * do not request bottom half processing if the
44151 * device is not open in a normal mode.
44152 */
44153- if ( port && (port->port.count || port->netcount) &&
44154+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
44155 port->pending_bh && !port->bh_running &&
44156 !port->bh_requested ) {
44157 if ( debug_level >= DEBUG_LEVEL_ISR )
44158@@ -3318,12 +3318,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
44159
44160 if (debug_level >= DEBUG_LEVEL_INFO)
44161 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
44162- __FILE__,__LINE__, tty->driver->name, port->count );
44163+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
44164
44165 spin_lock_irqsave(&info->lock, flags);
44166 if (!tty_hung_up_p(filp)) {
44167 extra_count = true;
44168- port->count--;
44169+ atomic_dec(&port->count);
44170 }
44171 spin_unlock_irqrestore(&info->lock, flags);
44172 port->blocked_open++;
44173@@ -3352,7 +3352,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
44174
44175 if (debug_level >= DEBUG_LEVEL_INFO)
44176 printk("%s(%d):%s block_til_ready() count=%d\n",
44177- __FILE__,__LINE__, tty->driver->name, port->count );
44178+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
44179
44180 tty_unlock(tty);
44181 schedule();
44182@@ -3363,12 +3363,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
44183 remove_wait_queue(&port->open_wait, &wait);
44184
44185 if (extra_count)
44186- port->count++;
44187+ atomic_inc(&port->count);
44188 port->blocked_open--;
44189
44190 if (debug_level >= DEBUG_LEVEL_INFO)
44191 printk("%s(%d):%s block_til_ready() after, count=%d\n",
44192- __FILE__,__LINE__, tty->driver->name, port->count );
44193+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
44194
44195 if (!retval)
44196 port->flags |= ASYNC_NORMAL_ACTIVE;
44197diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
44198index 3687f0c..6b9b808 100644
44199--- a/drivers/tty/sysrq.c
44200+++ b/drivers/tty/sysrq.c
44201@@ -995,7 +995,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
44202 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
44203 size_t count, loff_t *ppos)
44204 {
44205- if (count) {
44206+ if (count && capable(CAP_SYS_ADMIN)) {
44207 char c;
44208
44209 if (get_user(c, buf))
44210diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
44211index a9cd0b9..47b9336 100644
44212--- a/drivers/tty/tty_io.c
44213+++ b/drivers/tty/tty_io.c
44214@@ -3398,7 +3398,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
44215
44216 void tty_default_fops(struct file_operations *fops)
44217 {
44218- *fops = tty_fops;
44219+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
44220 }
44221
44222 /*
44223diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
44224index d794087..e4f49e5 100644
44225--- a/drivers/tty/tty_ldisc.c
44226+++ b/drivers/tty/tty_ldisc.c
44227@@ -56,7 +56,7 @@ static void put_ldisc(struct tty_ldisc *ld)
44228 if (atomic_dec_and_test(&ld->users)) {
44229 struct tty_ldisc_ops *ldo = ld->ops;
44230
44231- ldo->refcount--;
44232+ atomic_dec(&ldo->refcount);
44233 module_put(ldo->owner);
44234 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
44235
44236@@ -93,7 +93,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
44237 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
44238 tty_ldiscs[disc] = new_ldisc;
44239 new_ldisc->num = disc;
44240- new_ldisc->refcount = 0;
44241+ atomic_set(&new_ldisc->refcount, 0);
44242 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
44243
44244 return ret;
44245@@ -121,7 +121,7 @@ int tty_unregister_ldisc(int disc)
44246 return -EINVAL;
44247
44248 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
44249- if (tty_ldiscs[disc]->refcount)
44250+ if (atomic_read(&tty_ldiscs[disc]->refcount))
44251 ret = -EBUSY;
44252 else
44253 tty_ldiscs[disc] = NULL;
44254@@ -142,7 +142,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
44255 if (ldops) {
44256 ret = ERR_PTR(-EAGAIN);
44257 if (try_module_get(ldops->owner)) {
44258- ldops->refcount++;
44259+ atomic_inc(&ldops->refcount);
44260 ret = ldops;
44261 }
44262 }
44263@@ -155,7 +155,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
44264 unsigned long flags;
44265
44266 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
44267- ldops->refcount--;
44268+ atomic_dec(&ldops->refcount);
44269 module_put(ldops->owner);
44270 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
44271 }
44272diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
44273index b7ff59d..7c6105e 100644
44274--- a/drivers/tty/tty_port.c
44275+++ b/drivers/tty/tty_port.c
44276@@ -218,7 +218,7 @@ void tty_port_hangup(struct tty_port *port)
44277 unsigned long flags;
44278
44279 spin_lock_irqsave(&port->lock, flags);
44280- port->count = 0;
44281+ atomic_set(&port->count, 0);
44282 port->flags &= ~ASYNC_NORMAL_ACTIVE;
44283 if (port->tty) {
44284 set_bit(TTY_IO_ERROR, &port->tty->flags);
44285@@ -344,7 +344,7 @@ int tty_port_block_til_ready(struct tty_port *port,
44286 /* The port lock protects the port counts */
44287 spin_lock_irqsave(&port->lock, flags);
44288 if (!tty_hung_up_p(filp))
44289- port->count--;
44290+ atomic_dec(&port->count);
44291 port->blocked_open++;
44292 spin_unlock_irqrestore(&port->lock, flags);
44293
44294@@ -386,7 +386,7 @@ int tty_port_block_til_ready(struct tty_port *port,
44295 we must not mess that up further */
44296 spin_lock_irqsave(&port->lock, flags);
44297 if (!tty_hung_up_p(filp))
44298- port->count++;
44299+ atomic_inc(&port->count);
44300 port->blocked_open--;
44301 if (retval == 0)
44302 port->flags |= ASYNC_NORMAL_ACTIVE;
44303@@ -406,19 +406,19 @@ int tty_port_close_start(struct tty_port *port,
44304 return 0;
44305 }
44306
44307- if (tty->count == 1 && port->count != 1) {
44308+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
44309 printk(KERN_WARNING
44310 "tty_port_close_start: tty->count = 1 port count = %d.\n",
44311- port->count);
44312- port->count = 1;
44313+ atomic_read(&port->count));
44314+ atomic_set(&port->count, 1);
44315 }
44316- if (--port->count < 0) {
44317+ if (atomic_dec_return(&port->count) < 0) {
44318 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
44319- port->count);
44320- port->count = 0;
44321+ atomic_read(&port->count));
44322+ atomic_set(&port->count, 0);
44323 }
44324
44325- if (port->count) {
44326+ if (atomic_read(&port->count)) {
44327 spin_unlock_irqrestore(&port->lock, flags);
44328 if (port->ops->drop)
44329 port->ops->drop(port);
44330@@ -516,7 +516,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
44331 {
44332 spin_lock_irq(&port->lock);
44333 if (!tty_hung_up_p(filp))
44334- ++port->count;
44335+ atomic_inc(&port->count);
44336 spin_unlock_irq(&port->lock);
44337 tty_port_tty_set(port, tty);
44338
44339diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
44340index a9af1b9a..1e08e7f 100644
44341--- a/drivers/tty/vt/keyboard.c
44342+++ b/drivers/tty/vt/keyboard.c
44343@@ -647,6 +647,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
44344 kbd->kbdmode == VC_OFF) &&
44345 value != KVAL(K_SAK))
44346 return; /* SAK is allowed even in raw mode */
44347+
44348+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
44349+ {
44350+ void *func = fn_handler[value];
44351+ if (func == fn_show_state || func == fn_show_ptregs ||
44352+ func == fn_show_mem)
44353+ return;
44354+ }
44355+#endif
44356+
44357 fn_handler[value](vc);
44358 }
44359
44360@@ -1795,9 +1805,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
44361 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
44362 return -EFAULT;
44363
44364- if (!capable(CAP_SYS_TTY_CONFIG))
44365- perm = 0;
44366-
44367 switch (cmd) {
44368 case KDGKBENT:
44369 /* Ensure another thread doesn't free it under us */
44370@@ -1812,6 +1819,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
44371 spin_unlock_irqrestore(&kbd_event_lock, flags);
44372 return put_user(val, &user_kbe->kb_value);
44373 case KDSKBENT:
44374+ if (!capable(CAP_SYS_TTY_CONFIG))
44375+ perm = 0;
44376+
44377 if (!perm)
44378 return -EPERM;
44379 if (!i && v == K_NOSUCHMAP) {
44380@@ -1902,9 +1912,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
44381 int i, j, k;
44382 int ret;
44383
44384- if (!capable(CAP_SYS_TTY_CONFIG))
44385- perm = 0;
44386-
44387 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
44388 if (!kbs) {
44389 ret = -ENOMEM;
44390@@ -1938,6 +1945,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
44391 kfree(kbs);
44392 return ((p && *p) ? -EOVERFLOW : 0);
44393 case KDSKBSENT:
44394+ if (!capable(CAP_SYS_TTY_CONFIG))
44395+ perm = 0;
44396+
44397 if (!perm) {
44398 ret = -EPERM;
44399 goto reterr;
44400diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
44401index c8b9262..7e824e6 100644
44402--- a/drivers/uio/uio.c
44403+++ b/drivers/uio/uio.c
44404@@ -25,6 +25,7 @@
44405 #include <linux/kobject.h>
44406 #include <linux/cdev.h>
44407 #include <linux/uio_driver.h>
44408+#include <asm/local.h>
44409
44410 #define UIO_MAX_DEVICES (1U << MINORBITS)
44411
44412@@ -32,10 +33,10 @@ struct uio_device {
44413 struct module *owner;
44414 struct device *dev;
44415 int minor;
44416- atomic_t event;
44417+ atomic_unchecked_t event;
44418 struct fasync_struct *async_queue;
44419 wait_queue_head_t wait;
44420- int vma_count;
44421+ local_t vma_count;
44422 struct uio_info *info;
44423 struct kobject *map_dir;
44424 struct kobject *portio_dir;
44425@@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
44426 struct device_attribute *attr, char *buf)
44427 {
44428 struct uio_device *idev = dev_get_drvdata(dev);
44429- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
44430+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
44431 }
44432
44433 static struct device_attribute uio_class_attributes[] = {
44434@@ -397,7 +398,7 @@ void uio_event_notify(struct uio_info *info)
44435 {
44436 struct uio_device *idev = info->uio_dev;
44437
44438- atomic_inc(&idev->event);
44439+ atomic_inc_unchecked(&idev->event);
44440 wake_up_interruptible(&idev->wait);
44441 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
44442 }
44443@@ -450,7 +451,7 @@ static int uio_open(struct inode *inode, struct file *filep)
44444 }
44445
44446 listener->dev = idev;
44447- listener->event_count = atomic_read(&idev->event);
44448+ listener->event_count = atomic_read_unchecked(&idev->event);
44449 filep->private_data = listener;
44450
44451 if (idev->info->open) {
44452@@ -501,7 +502,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
44453 return -EIO;
44454
44455 poll_wait(filep, &idev->wait, wait);
44456- if (listener->event_count != atomic_read(&idev->event))
44457+ if (listener->event_count != atomic_read_unchecked(&idev->event))
44458 return POLLIN | POLLRDNORM;
44459 return 0;
44460 }
44461@@ -526,7 +527,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
44462 do {
44463 set_current_state(TASK_INTERRUPTIBLE);
44464
44465- event_count = atomic_read(&idev->event);
44466+ event_count = atomic_read_unchecked(&idev->event);
44467 if (event_count != listener->event_count) {
44468 if (copy_to_user(buf, &event_count, count))
44469 retval = -EFAULT;
44470@@ -595,13 +596,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
44471 static void uio_vma_open(struct vm_area_struct *vma)
44472 {
44473 struct uio_device *idev = vma->vm_private_data;
44474- idev->vma_count++;
44475+ local_inc(&idev->vma_count);
44476 }
44477
44478 static void uio_vma_close(struct vm_area_struct *vma)
44479 {
44480 struct uio_device *idev = vma->vm_private_data;
44481- idev->vma_count--;
44482+ local_dec(&idev->vma_count);
44483 }
44484
44485 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
44486@@ -808,7 +809,7 @@ int __uio_register_device(struct module *owner,
44487 idev->owner = owner;
44488 idev->info = info;
44489 init_waitqueue_head(&idev->wait);
44490- atomic_set(&idev->event, 0);
44491+ atomic_set_unchecked(&idev->event, 0);
44492
44493 ret = uio_get_minor(idev);
44494 if (ret)
44495diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
44496index 8a7eb77..c00402f 100644
44497--- a/drivers/usb/atm/cxacru.c
44498+++ b/drivers/usb/atm/cxacru.c
44499@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
44500 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
44501 if (ret < 2)
44502 return -EINVAL;
44503- if (index < 0 || index > 0x7f)
44504+ if (index > 0x7f)
44505 return -EINVAL;
44506 pos += tmp;
44507
44508diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
44509index 35f10bf..6a38a0b 100644
44510--- a/drivers/usb/atm/usbatm.c
44511+++ b/drivers/usb/atm/usbatm.c
44512@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
44513 if (printk_ratelimit())
44514 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
44515 __func__, vpi, vci);
44516- atomic_inc(&vcc->stats->rx_err);
44517+ atomic_inc_unchecked(&vcc->stats->rx_err);
44518 return;
44519 }
44520
44521@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
44522 if (length > ATM_MAX_AAL5_PDU) {
44523 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
44524 __func__, length, vcc);
44525- atomic_inc(&vcc->stats->rx_err);
44526+ atomic_inc_unchecked(&vcc->stats->rx_err);
44527 goto out;
44528 }
44529
44530@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
44531 if (sarb->len < pdu_length) {
44532 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
44533 __func__, pdu_length, sarb->len, vcc);
44534- atomic_inc(&vcc->stats->rx_err);
44535+ atomic_inc_unchecked(&vcc->stats->rx_err);
44536 goto out;
44537 }
44538
44539 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
44540 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
44541 __func__, vcc);
44542- atomic_inc(&vcc->stats->rx_err);
44543+ atomic_inc_unchecked(&vcc->stats->rx_err);
44544 goto out;
44545 }
44546
44547@@ -389,7 +389,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
44548 if (printk_ratelimit())
44549 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
44550 __func__, length);
44551- atomic_inc(&vcc->stats->rx_drop);
44552+ atomic_inc_unchecked(&vcc->stats->rx_drop);
44553 goto out;
44554 }
44555
44556@@ -417,7 +417,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
44557
44558 vcc->push(vcc, skb);
44559
44560- atomic_inc(&vcc->stats->rx);
44561+ atomic_inc_unchecked(&vcc->stats->rx);
44562 out:
44563 skb_trim(sarb, 0);
44564 }
44565@@ -623,7 +623,7 @@ static void usbatm_tx_process(unsigned long data)
44566 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
44567
44568 usbatm_pop(vcc, skb);
44569- atomic_inc(&vcc->stats->tx);
44570+ atomic_inc_unchecked(&vcc->stats->tx);
44571
44572 skb = skb_dequeue(&instance->sndqueue);
44573 }
44574@@ -779,11 +779,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
44575 if (!left--)
44576 return sprintf(page,
44577 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
44578- atomic_read(&atm_dev->stats.aal5.tx),
44579- atomic_read(&atm_dev->stats.aal5.tx_err),
44580- atomic_read(&atm_dev->stats.aal5.rx),
44581- atomic_read(&atm_dev->stats.aal5.rx_err),
44582- atomic_read(&atm_dev->stats.aal5.rx_drop));
44583+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
44584+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
44585+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
44586+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
44587+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
44588
44589 if (!left--) {
44590 if (instance->disconnected)
44591diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
44592index 2a3bbdf..91d72cf 100644
44593--- a/drivers/usb/core/devices.c
44594+++ b/drivers/usb/core/devices.c
44595@@ -126,7 +126,7 @@ static const char format_endpt[] =
44596 * time it gets called.
44597 */
44598 static struct device_connect_event {
44599- atomic_t count;
44600+ atomic_unchecked_t count;
44601 wait_queue_head_t wait;
44602 } device_event = {
44603 .count = ATOMIC_INIT(1),
44604@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
44605
44606 void usbfs_conn_disc_event(void)
44607 {
44608- atomic_add(2, &device_event.count);
44609+ atomic_add_unchecked(2, &device_event.count);
44610 wake_up(&device_event.wait);
44611 }
44612
44613@@ -652,7 +652,7 @@ static unsigned int usb_device_poll(struct file *file,
44614
44615 poll_wait(file, &device_event.wait, wait);
44616
44617- event_count = atomic_read(&device_event.count);
44618+ event_count = atomic_read_unchecked(&device_event.count);
44619 if (file->f_version != event_count) {
44620 file->f_version = event_count;
44621 return POLLIN | POLLRDNORM;
44622diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
44623index f9ec44c..eb5779f 100644
44624--- a/drivers/usb/core/hcd.c
44625+++ b/drivers/usb/core/hcd.c
44626@@ -1526,7 +1526,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
44627 */
44628 usb_get_urb(urb);
44629 atomic_inc(&urb->use_count);
44630- atomic_inc(&urb->dev->urbnum);
44631+ atomic_inc_unchecked(&urb->dev->urbnum);
44632 usbmon_urb_submit(&hcd->self, urb);
44633
44634 /* NOTE requirements on root-hub callers (usbfs and the hub
44635@@ -1553,7 +1553,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
44636 urb->hcpriv = NULL;
44637 INIT_LIST_HEAD(&urb->urb_list);
44638 atomic_dec(&urb->use_count);
44639- atomic_dec(&urb->dev->urbnum);
44640+ atomic_dec_unchecked(&urb->dev->urbnum);
44641 if (atomic_read(&urb->reject))
44642 wake_up(&usb_kill_urb_queue);
44643 usb_put_urb(urb);
44644diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
44645index 444d30e..f15c850 100644
44646--- a/drivers/usb/core/message.c
44647+++ b/drivers/usb/core/message.c
44648@@ -129,7 +129,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
44649 * method can wait for it to complete. Since you don't have a handle on the
44650 * URB used, you can't cancel the request.
44651 */
44652-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
44653+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
44654 __u8 requesttype, __u16 value, __u16 index, void *data,
44655 __u16 size, int timeout)
44656 {
44657diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
44658index 3f81a3d..a3aa993 100644
44659--- a/drivers/usb/core/sysfs.c
44660+++ b/drivers/usb/core/sysfs.c
44661@@ -239,7 +239,7 @@ show_urbnum(struct device *dev, struct device_attribute *attr, char *buf)
44662 struct usb_device *udev;
44663
44664 udev = to_usb_device(dev);
44665- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
44666+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
44667 }
44668 static DEVICE_ATTR(urbnum, S_IRUGO, show_urbnum, NULL);
44669
44670diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
44671index f81b925..78d22ec 100644
44672--- a/drivers/usb/core/usb.c
44673+++ b/drivers/usb/core/usb.c
44674@@ -388,7 +388,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
44675 set_dev_node(&dev->dev, dev_to_node(bus->controller));
44676 dev->state = USB_STATE_ATTACHED;
44677 dev->lpm_disable_count = 1;
44678- atomic_set(&dev->urbnum, 0);
44679+ atomic_set_unchecked(&dev->urbnum, 0);
44680
44681 INIT_LIST_HEAD(&dev->ep0.urb_list);
44682 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
44683diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
44684index 5e29dde..eca992f 100644
44685--- a/drivers/usb/early/ehci-dbgp.c
44686+++ b/drivers/usb/early/ehci-dbgp.c
44687@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
44688
44689 #ifdef CONFIG_KGDB
44690 static struct kgdb_io kgdbdbgp_io_ops;
44691-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
44692+static struct kgdb_io kgdbdbgp_io_ops_console;
44693+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
44694 #else
44695 #define dbgp_kgdb_mode (0)
44696 #endif
44697@@ -1047,6 +1048,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
44698 .write_char = kgdbdbgp_write_char,
44699 };
44700
44701+static struct kgdb_io kgdbdbgp_io_ops_console = {
44702+ .name = "kgdbdbgp",
44703+ .read_char = kgdbdbgp_read_char,
44704+ .write_char = kgdbdbgp_write_char,
44705+ .is_console = 1
44706+};
44707+
44708 static int kgdbdbgp_wait_time;
44709
44710 static int __init kgdbdbgp_parse_config(char *str)
44711@@ -1062,8 +1070,10 @@ static int __init kgdbdbgp_parse_config(char *str)
44712 ptr++;
44713 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
44714 }
44715- kgdb_register_io_module(&kgdbdbgp_io_ops);
44716- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
44717+ if (early_dbgp_console.index != -1)
44718+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
44719+ else
44720+ kgdb_register_io_module(&kgdbdbgp_io_ops);
44721
44722 return 0;
44723 }
44724diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
44725index b369292..9f3ba40 100644
44726--- a/drivers/usb/gadget/u_serial.c
44727+++ b/drivers/usb/gadget/u_serial.c
44728@@ -733,9 +733,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
44729 spin_lock_irq(&port->port_lock);
44730
44731 /* already open? Great. */
44732- if (port->port.count) {
44733+ if (atomic_read(&port->port.count)) {
44734 status = 0;
44735- port->port.count++;
44736+ atomic_inc(&port->port.count);
44737
44738 /* currently opening/closing? wait ... */
44739 } else if (port->openclose) {
44740@@ -794,7 +794,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
44741 tty->driver_data = port;
44742 port->port.tty = tty;
44743
44744- port->port.count = 1;
44745+ atomic_set(&port->port.count, 1);
44746 port->openclose = false;
44747
44748 /* if connected, start the I/O stream */
44749@@ -836,11 +836,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
44750
44751 spin_lock_irq(&port->port_lock);
44752
44753- if (port->port.count != 1) {
44754- if (port->port.count == 0)
44755+ if (atomic_read(&port->port.count) != 1) {
44756+ if (atomic_read(&port->port.count) == 0)
44757 WARN_ON(1);
44758 else
44759- --port->port.count;
44760+ atomic_dec(&port->port.count);
44761 goto exit;
44762 }
44763
44764@@ -850,7 +850,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
44765 * and sleep if necessary
44766 */
44767 port->openclose = true;
44768- port->port.count = 0;
44769+ atomic_set(&port->port.count, 0);
44770
44771 gser = port->port_usb;
44772 if (gser && gser->disconnect)
44773@@ -1066,7 +1066,7 @@ static int gs_closed(struct gs_port *port)
44774 int cond;
44775
44776 spin_lock_irq(&port->port_lock);
44777- cond = (port->port.count == 0) && !port->openclose;
44778+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
44779 spin_unlock_irq(&port->port_lock);
44780 return cond;
44781 }
44782@@ -1209,7 +1209,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
44783 /* if it's already open, start I/O ... and notify the serial
44784 * protocol about open/close status (connect/disconnect).
44785 */
44786- if (port->port.count) {
44787+ if (atomic_read(&port->port.count)) {
44788 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
44789 gs_start_io(port);
44790 if (gser->connect)
44791@@ -1256,7 +1256,7 @@ void gserial_disconnect(struct gserial *gser)
44792
44793 port->port_usb = NULL;
44794 gser->ioport = NULL;
44795- if (port->port.count > 0 || port->openclose) {
44796+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
44797 wake_up_interruptible(&port->drain_wait);
44798 if (port->port.tty)
44799 tty_hangup(port->port.tty);
44800@@ -1272,7 +1272,7 @@ void gserial_disconnect(struct gserial *gser)
44801
44802 /* finally, free any unused/unusable I/O buffers */
44803 spin_lock_irqsave(&port->port_lock, flags);
44804- if (port->port.count == 0 && !port->openclose)
44805+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
44806 gs_buf_free(&port->port_write_buf);
44807 gs_free_requests(gser->out, &port->read_pool, NULL);
44808 gs_free_requests(gser->out, &port->read_queue, NULL);
44809diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
44810index 5f3bcd3..bfca43f 100644
44811--- a/drivers/usb/serial/console.c
44812+++ b/drivers/usb/serial/console.c
44813@@ -124,7 +124,7 @@ static int usb_console_setup(struct console *co, char *options)
44814
44815 info->port = port;
44816
44817- ++port->port.count;
44818+ atomic_inc(&port->port.count);
44819 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
44820 if (serial->type->set_termios) {
44821 /*
44822@@ -174,7 +174,7 @@ static int usb_console_setup(struct console *co, char *options)
44823 }
44824 /* Now that any required fake tty operations are completed restore
44825 * the tty port count */
44826- --port->port.count;
44827+ atomic_dec(&port->port.count);
44828 /* The console is special in terms of closing the device so
44829 * indicate this port is now acting as a system console. */
44830 port->port.console = 1;
44831@@ -187,7 +187,7 @@ static int usb_console_setup(struct console *co, char *options)
44832 free_tty:
44833 kfree(tty);
44834 reset_open_count:
44835- port->port.count = 0;
44836+ atomic_set(&port->port.count, 0);
44837 usb_autopm_put_interface(serial->interface);
44838 error_get_interface:
44839 usb_serial_put(serial);
44840diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
44841index 6c3586a..a94e621 100644
44842--- a/drivers/usb/storage/realtek_cr.c
44843+++ b/drivers/usb/storage/realtek_cr.c
44844@@ -429,7 +429,7 @@ static int rts51x_read_status(struct us_data *us,
44845
44846 buf = kmalloc(len, GFP_NOIO);
44847 if (buf == NULL)
44848- return USB_STOR_TRANSPORT_ERROR;
44849+ return -ENOMEM;
44850
44851 US_DEBUGP("%s, lun = %d\n", __func__, lun);
44852
44853diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
44854index 75f70f0..d467e1a 100644
44855--- a/drivers/usb/storage/usb.h
44856+++ b/drivers/usb/storage/usb.h
44857@@ -63,7 +63,7 @@ struct us_unusual_dev {
44858 __u8 useProtocol;
44859 __u8 useTransport;
44860 int (*initFunction)(struct us_data *);
44861-};
44862+} __do_const;
44863
44864
44865 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
44866diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
44867index d6bea3e..60b250e 100644
44868--- a/drivers/usb/wusbcore/wa-hc.h
44869+++ b/drivers/usb/wusbcore/wa-hc.h
44870@@ -192,7 +192,7 @@ struct wahc {
44871 struct list_head xfer_delayed_list;
44872 spinlock_t xfer_list_lock;
44873 struct work_struct xfer_work;
44874- atomic_t xfer_id_count;
44875+ atomic_unchecked_t xfer_id_count;
44876 };
44877
44878
44879@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
44880 INIT_LIST_HEAD(&wa->xfer_delayed_list);
44881 spin_lock_init(&wa->xfer_list_lock);
44882 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
44883- atomic_set(&wa->xfer_id_count, 1);
44884+ atomic_set_unchecked(&wa->xfer_id_count, 1);
44885 }
44886
44887 /**
44888diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
44889index 6ef94bc..1b41265 100644
44890--- a/drivers/usb/wusbcore/wa-xfer.c
44891+++ b/drivers/usb/wusbcore/wa-xfer.c
44892@@ -296,7 +296,7 @@ out:
44893 */
44894 static void wa_xfer_id_init(struct wa_xfer *xfer)
44895 {
44896- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
44897+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
44898 }
44899
44900 /*
44901diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
44902index 8c55011..eed4ae1a 100644
44903--- a/drivers/video/aty/aty128fb.c
44904+++ b/drivers/video/aty/aty128fb.c
44905@@ -149,7 +149,7 @@ enum {
44906 };
44907
44908 /* Must match above enum */
44909-static char * const r128_family[] = {
44910+static const char * const r128_family[] = {
44911 "AGP",
44912 "PCI",
44913 "PRO AGP",
44914diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
44915index 4f27fdc..d3537e6 100644
44916--- a/drivers/video/aty/atyfb_base.c
44917+++ b/drivers/video/aty/atyfb_base.c
44918@@ -1325,10 +1325,14 @@ static int atyfb_set_par(struct fb_info *info)
44919 par->accel_flags = var->accel_flags; /* hack */
44920
44921 if (var->accel_flags) {
44922- info->fbops->fb_sync = atyfb_sync;
44923+ pax_open_kernel();
44924+ *(void **)&info->fbops->fb_sync = atyfb_sync;
44925+ pax_close_kernel();
44926 info->flags &= ~FBINFO_HWACCEL_DISABLED;
44927 } else {
44928- info->fbops->fb_sync = NULL;
44929+ pax_open_kernel();
44930+ *(void **)&info->fbops->fb_sync = NULL;
44931+ pax_close_kernel();
44932 info->flags |= FBINFO_HWACCEL_DISABLED;
44933 }
44934
44935diff --git a/drivers/video/aty/mach64_cursor.c b/drivers/video/aty/mach64_cursor.c
44936index 95ec042..e6affdd 100644
44937--- a/drivers/video/aty/mach64_cursor.c
44938+++ b/drivers/video/aty/mach64_cursor.c
44939@@ -7,6 +7,7 @@
44940 #include <linux/string.h>
44941
44942 #include <asm/io.h>
44943+#include <asm/pgtable.h>
44944
44945 #ifdef __sparc__
44946 #include <asm/fbio.h>
44947@@ -208,7 +209,9 @@ int aty_init_cursor(struct fb_info *info)
44948 info->sprite.buf_align = 16; /* and 64 lines tall. */
44949 info->sprite.flags = FB_PIXMAP_IO;
44950
44951- info->fbops->fb_cursor = atyfb_cursor;
44952+ pax_open_kernel();
44953+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
44954+ pax_close_kernel();
44955
44956 return 0;
44957 }
44958diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
44959index 6c5ed6b..b727c88 100644
44960--- a/drivers/video/backlight/kb3886_bl.c
44961+++ b/drivers/video/backlight/kb3886_bl.c
44962@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
44963 static unsigned long kb3886bl_flags;
44964 #define KB3886BL_SUSPENDED 0x01
44965
44966-static struct dmi_system_id __initdata kb3886bl_device_table[] = {
44967+static const struct dmi_system_id __initconst kb3886bl_device_table[] = {
44968 {
44969 .ident = "Sahara Touch-iT",
44970 .matches = {
44971diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
44972index 900aa4e..6d49418 100644
44973--- a/drivers/video/fb_defio.c
44974+++ b/drivers/video/fb_defio.c
44975@@ -206,7 +206,9 @@ void fb_deferred_io_init(struct fb_info *info)
44976
44977 BUG_ON(!fbdefio);
44978 mutex_init(&fbdefio->lock);
44979- info->fbops->fb_mmap = fb_deferred_io_mmap;
44980+ pax_open_kernel();
44981+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
44982+ pax_close_kernel();
44983 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
44984 INIT_LIST_HEAD(&fbdefio->pagelist);
44985 if (fbdefio->delay == 0) /* set a default of 1 s */
44986@@ -237,7 +239,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
44987 page->mapping = NULL;
44988 }
44989
44990- info->fbops->fb_mmap = NULL;
44991+ *(void **)&info->fbops->fb_mmap = NULL;
44992 mutex_destroy(&fbdefio->lock);
44993 }
44994 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
44995diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
44996index 5c3960d..15cf8fc 100644
44997--- a/drivers/video/fbcmap.c
44998+++ b/drivers/video/fbcmap.c
44999@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
45000 rc = -ENODEV;
45001 goto out;
45002 }
45003- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
45004- !info->fbops->fb_setcmap)) {
45005+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
45006 rc = -EINVAL;
45007 goto out1;
45008 }
45009diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
45010index 86291dc..7cc5962 100644
45011--- a/drivers/video/fbmem.c
45012+++ b/drivers/video/fbmem.c
45013@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
45014 image->dx += image->width + 8;
45015 }
45016 } else if (rotate == FB_ROTATE_UD) {
45017- for (x = 0; x < num && image->dx >= 0; x++) {
45018+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
45019 info->fbops->fb_imageblit(info, image);
45020 image->dx -= image->width + 8;
45021 }
45022@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
45023 image->dy += image->height + 8;
45024 }
45025 } else if (rotate == FB_ROTATE_CCW) {
45026- for (x = 0; x < num && image->dy >= 0; x++) {
45027+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
45028 info->fbops->fb_imageblit(info, image);
45029 image->dy -= image->height + 8;
45030 }
45031@@ -1166,7 +1166,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
45032 return -EFAULT;
45033 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
45034 return -EINVAL;
45035- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
45036+ if (con2fb.framebuffer >= FB_MAX)
45037 return -EINVAL;
45038 if (!registered_fb[con2fb.framebuffer])
45039 request_module("fb%d", con2fb.framebuffer);
45040diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
45041index 7672d2e..b56437f 100644
45042--- a/drivers/video/i810/i810_accel.c
45043+++ b/drivers/video/i810/i810_accel.c
45044@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
45045 }
45046 }
45047 printk("ringbuffer lockup!!!\n");
45048+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
45049 i810_report_error(mmio);
45050 par->dev_flags |= LOCKUP;
45051 info->pixmap.scan_align = 1;
45052diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
45053index 3c14e43..eafa544 100644
45054--- a/drivers/video/logo/logo_linux_clut224.ppm
45055+++ b/drivers/video/logo/logo_linux_clut224.ppm
45056@@ -1,1604 +1,1123 @@
45057 P3
45058-# Standard 224-color Linux logo
45059 80 80
45060 255
45061- 0 0 0 0 0 0 0 0 0 0 0 0
45062- 0 0 0 0 0 0 0 0 0 0 0 0
45063- 0 0 0 0 0 0 0 0 0 0 0 0
45064- 0 0 0 0 0 0 0 0 0 0 0 0
45065- 0 0 0 0 0 0 0 0 0 0 0 0
45066- 0 0 0 0 0 0 0 0 0 0 0 0
45067- 0 0 0 0 0 0 0 0 0 0 0 0
45068- 0 0 0 0 0 0 0 0 0 0 0 0
45069- 0 0 0 0 0 0 0 0 0 0 0 0
45070- 6 6 6 6 6 6 10 10 10 10 10 10
45071- 10 10 10 6 6 6 6 6 6 6 6 6
45072- 0 0 0 0 0 0 0 0 0 0 0 0
45073- 0 0 0 0 0 0 0 0 0 0 0 0
45074- 0 0 0 0 0 0 0 0 0 0 0 0
45075- 0 0 0 0 0 0 0 0 0 0 0 0
45076- 0 0 0 0 0 0 0 0 0 0 0 0
45077- 0 0 0 0 0 0 0 0 0 0 0 0
45078- 0 0 0 0 0 0 0 0 0 0 0 0
45079- 0 0 0 0 0 0 0 0 0 0 0 0
45080- 0 0 0 0 0 0 0 0 0 0 0 0
45081- 0 0 0 0 0 0 0 0 0 0 0 0
45082- 0 0 0 0 0 0 0 0 0 0 0 0
45083- 0 0 0 0 0 0 0 0 0 0 0 0
45084- 0 0 0 0 0 0 0 0 0 0 0 0
45085- 0 0 0 0 0 0 0 0 0 0 0 0
45086- 0 0 0 0 0 0 0 0 0 0 0 0
45087- 0 0 0 0 0 0 0 0 0 0 0 0
45088- 0 0 0 0 0 0 0 0 0 0 0 0
45089- 0 0 0 6 6 6 10 10 10 14 14 14
45090- 22 22 22 26 26 26 30 30 30 34 34 34
45091- 30 30 30 30 30 30 26 26 26 18 18 18
45092- 14 14 14 10 10 10 6 6 6 0 0 0
45093- 0 0 0 0 0 0 0 0 0 0 0 0
45094- 0 0 0 0 0 0 0 0 0 0 0 0
45095- 0 0 0 0 0 0 0 0 0 0 0 0
45096- 0 0 0 0 0 0 0 0 0 0 0 0
45097- 0 0 0 0 0 0 0 0 0 0 0 0
45098- 0 0 0 0 0 0 0 0 0 0 0 0
45099- 0 0 0 0 0 0 0 0 0 0 0 0
45100- 0 0 0 0 0 0 0 0 0 0 0 0
45101- 0 0 0 0 0 0 0 0 0 0 0 0
45102- 0 0 0 0 0 1 0 0 1 0 0 0
45103- 0 0 0 0 0 0 0 0 0 0 0 0
45104- 0 0 0 0 0 0 0 0 0 0 0 0
45105- 0 0 0 0 0 0 0 0 0 0 0 0
45106- 0 0 0 0 0 0 0 0 0 0 0 0
45107- 0 0 0 0 0 0 0 0 0 0 0 0
45108- 0 0 0 0 0 0 0 0 0 0 0 0
45109- 6 6 6 14 14 14 26 26 26 42 42 42
45110- 54 54 54 66 66 66 78 78 78 78 78 78
45111- 78 78 78 74 74 74 66 66 66 54 54 54
45112- 42 42 42 26 26 26 18 18 18 10 10 10
45113- 6 6 6 0 0 0 0 0 0 0 0 0
45114- 0 0 0 0 0 0 0 0 0 0 0 0
45115- 0 0 0 0 0 0 0 0 0 0 0 0
45116- 0 0 0 0 0 0 0 0 0 0 0 0
45117- 0 0 0 0 0 0 0 0 0 0 0 0
45118- 0 0 0 0 0 0 0 0 0 0 0 0
45119- 0 0 0 0 0 0 0 0 0 0 0 0
45120- 0 0 0 0 0 0 0 0 0 0 0 0
45121- 0 0 0 0 0 0 0 0 0 0 0 0
45122- 0 0 1 0 0 0 0 0 0 0 0 0
45123- 0 0 0 0 0 0 0 0 0 0 0 0
45124- 0 0 0 0 0 0 0 0 0 0 0 0
45125- 0 0 0 0 0 0 0 0 0 0 0 0
45126- 0 0 0 0 0 0 0 0 0 0 0 0
45127- 0 0 0 0 0 0 0 0 0 0 0 0
45128- 0 0 0 0 0 0 0 0 0 10 10 10
45129- 22 22 22 42 42 42 66 66 66 86 86 86
45130- 66 66 66 38 38 38 38 38 38 22 22 22
45131- 26 26 26 34 34 34 54 54 54 66 66 66
45132- 86 86 86 70 70 70 46 46 46 26 26 26
45133- 14 14 14 6 6 6 0 0 0 0 0 0
45134- 0 0 0 0 0 0 0 0 0 0 0 0
45135- 0 0 0 0 0 0 0 0 0 0 0 0
45136- 0 0 0 0 0 0 0 0 0 0 0 0
45137- 0 0 0 0 0 0 0 0 0 0 0 0
45138- 0 0 0 0 0 0 0 0 0 0 0 0
45139- 0 0 0 0 0 0 0 0 0 0 0 0
45140- 0 0 0 0 0 0 0 0 0 0 0 0
45141- 0 0 0 0 0 0 0 0 0 0 0 0
45142- 0 0 1 0 0 1 0 0 1 0 0 0
45143- 0 0 0 0 0 0 0 0 0 0 0 0
45144- 0 0 0 0 0 0 0 0 0 0 0 0
45145- 0 0 0 0 0 0 0 0 0 0 0 0
45146- 0 0 0 0 0 0 0 0 0 0 0 0
45147- 0 0 0 0 0 0 0 0 0 0 0 0
45148- 0 0 0 0 0 0 10 10 10 26 26 26
45149- 50 50 50 82 82 82 58 58 58 6 6 6
45150- 2 2 6 2 2 6 2 2 6 2 2 6
45151- 2 2 6 2 2 6 2 2 6 2 2 6
45152- 6 6 6 54 54 54 86 86 86 66 66 66
45153- 38 38 38 18 18 18 6 6 6 0 0 0
45154- 0 0 0 0 0 0 0 0 0 0 0 0
45155- 0 0 0 0 0 0 0 0 0 0 0 0
45156- 0 0 0 0 0 0 0 0 0 0 0 0
45157- 0 0 0 0 0 0 0 0 0 0 0 0
45158- 0 0 0 0 0 0 0 0 0 0 0 0
45159- 0 0 0 0 0 0 0 0 0 0 0 0
45160- 0 0 0 0 0 0 0 0 0 0 0 0
45161- 0 0 0 0 0 0 0 0 0 0 0 0
45162- 0 0 0 0 0 0 0 0 0 0 0 0
45163- 0 0 0 0 0 0 0 0 0 0 0 0
45164- 0 0 0 0 0 0 0 0 0 0 0 0
45165- 0 0 0 0 0 0 0 0 0 0 0 0
45166- 0 0 0 0 0 0 0 0 0 0 0 0
45167- 0 0 0 0 0 0 0 0 0 0 0 0
45168- 0 0 0 6 6 6 22 22 22 50 50 50
45169- 78 78 78 34 34 34 2 2 6 2 2 6
45170- 2 2 6 2 2 6 2 2 6 2 2 6
45171- 2 2 6 2 2 6 2 2 6 2 2 6
45172- 2 2 6 2 2 6 6 6 6 70 70 70
45173- 78 78 78 46 46 46 22 22 22 6 6 6
45174- 0 0 0 0 0 0 0 0 0 0 0 0
45175- 0 0 0 0 0 0 0 0 0 0 0 0
45176- 0 0 0 0 0 0 0 0 0 0 0 0
45177- 0 0 0 0 0 0 0 0 0 0 0 0
45178- 0 0 0 0 0 0 0 0 0 0 0 0
45179- 0 0 0 0 0 0 0 0 0 0 0 0
45180- 0 0 0 0 0 0 0 0 0 0 0 0
45181- 0 0 0 0 0 0 0 0 0 0 0 0
45182- 0 0 1 0 0 1 0 0 1 0 0 0
45183- 0 0 0 0 0 0 0 0 0 0 0 0
45184- 0 0 0 0 0 0 0 0 0 0 0 0
45185- 0 0 0 0 0 0 0 0 0 0 0 0
45186- 0 0 0 0 0 0 0 0 0 0 0 0
45187- 0 0 0 0 0 0 0 0 0 0 0 0
45188- 6 6 6 18 18 18 42 42 42 82 82 82
45189- 26 26 26 2 2 6 2 2 6 2 2 6
45190- 2 2 6 2 2 6 2 2 6 2 2 6
45191- 2 2 6 2 2 6 2 2 6 14 14 14
45192- 46 46 46 34 34 34 6 6 6 2 2 6
45193- 42 42 42 78 78 78 42 42 42 18 18 18
45194- 6 6 6 0 0 0 0 0 0 0 0 0
45195- 0 0 0 0 0 0 0 0 0 0 0 0
45196- 0 0 0 0 0 0 0 0 0 0 0 0
45197- 0 0 0 0 0 0 0 0 0 0 0 0
45198- 0 0 0 0 0 0 0 0 0 0 0 0
45199- 0 0 0 0 0 0 0 0 0 0 0 0
45200- 0 0 0 0 0 0 0 0 0 0 0 0
45201- 0 0 0 0 0 0 0 0 0 0 0 0
45202- 0 0 1 0 0 0 0 0 1 0 0 0
45203- 0 0 0 0 0 0 0 0 0 0 0 0
45204- 0 0 0 0 0 0 0 0 0 0 0 0
45205- 0 0 0 0 0 0 0 0 0 0 0 0
45206- 0 0 0 0 0 0 0 0 0 0 0 0
45207- 0 0 0 0 0 0 0 0 0 0 0 0
45208- 10 10 10 30 30 30 66 66 66 58 58 58
45209- 2 2 6 2 2 6 2 2 6 2 2 6
45210- 2 2 6 2 2 6 2 2 6 2 2 6
45211- 2 2 6 2 2 6 2 2 6 26 26 26
45212- 86 86 86 101 101 101 46 46 46 10 10 10
45213- 2 2 6 58 58 58 70 70 70 34 34 34
45214- 10 10 10 0 0 0 0 0 0 0 0 0
45215- 0 0 0 0 0 0 0 0 0 0 0 0
45216- 0 0 0 0 0 0 0 0 0 0 0 0
45217- 0 0 0 0 0 0 0 0 0 0 0 0
45218- 0 0 0 0 0 0 0 0 0 0 0 0
45219- 0 0 0 0 0 0 0 0 0 0 0 0
45220- 0 0 0 0 0 0 0 0 0 0 0 0
45221- 0 0 0 0 0 0 0 0 0 0 0 0
45222- 0 0 1 0 0 1 0 0 1 0 0 0
45223- 0 0 0 0 0 0 0 0 0 0 0 0
45224- 0 0 0 0 0 0 0 0 0 0 0 0
45225- 0 0 0 0 0 0 0 0 0 0 0 0
45226- 0 0 0 0 0 0 0 0 0 0 0 0
45227- 0 0 0 0 0 0 0 0 0 0 0 0
45228- 14 14 14 42 42 42 86 86 86 10 10 10
45229- 2 2 6 2 2 6 2 2 6 2 2 6
45230- 2 2 6 2 2 6 2 2 6 2 2 6
45231- 2 2 6 2 2 6 2 2 6 30 30 30
45232- 94 94 94 94 94 94 58 58 58 26 26 26
45233- 2 2 6 6 6 6 78 78 78 54 54 54
45234- 22 22 22 6 6 6 0 0 0 0 0 0
45235- 0 0 0 0 0 0 0 0 0 0 0 0
45236- 0 0 0 0 0 0 0 0 0 0 0 0
45237- 0 0 0 0 0 0 0 0 0 0 0 0
45238- 0 0 0 0 0 0 0 0 0 0 0 0
45239- 0 0 0 0 0 0 0 0 0 0 0 0
45240- 0 0 0 0 0 0 0 0 0 0 0 0
45241- 0 0 0 0 0 0 0 0 0 0 0 0
45242- 0 0 0 0 0 0 0 0 0 0 0 0
45243- 0 0 0 0 0 0 0 0 0 0 0 0
45244- 0 0 0 0 0 0 0 0 0 0 0 0
45245- 0 0 0 0 0 0 0 0 0 0 0 0
45246- 0 0 0 0 0 0 0 0 0 0 0 0
45247- 0 0 0 0 0 0 0 0 0 6 6 6
45248- 22 22 22 62 62 62 62 62 62 2 2 6
45249- 2 2 6 2 2 6 2 2 6 2 2 6
45250- 2 2 6 2 2 6 2 2 6 2 2 6
45251- 2 2 6 2 2 6 2 2 6 26 26 26
45252- 54 54 54 38 38 38 18 18 18 10 10 10
45253- 2 2 6 2 2 6 34 34 34 82 82 82
45254- 38 38 38 14 14 14 0 0 0 0 0 0
45255- 0 0 0 0 0 0 0 0 0 0 0 0
45256- 0 0 0 0 0 0 0 0 0 0 0 0
45257- 0 0 0 0 0 0 0 0 0 0 0 0
45258- 0 0 0 0 0 0 0 0 0 0 0 0
45259- 0 0 0 0 0 0 0 0 0 0 0 0
45260- 0 0 0 0 0 0 0 0 0 0 0 0
45261- 0 0 0 0 0 0 0 0 0 0 0 0
45262- 0 0 0 0 0 1 0 0 1 0 0 0
45263- 0 0 0 0 0 0 0 0 0 0 0 0
45264- 0 0 0 0 0 0 0 0 0 0 0 0
45265- 0 0 0 0 0 0 0 0 0 0 0 0
45266- 0 0 0 0 0 0 0 0 0 0 0 0
45267- 0 0 0 0 0 0 0 0 0 6 6 6
45268- 30 30 30 78 78 78 30 30 30 2 2 6
45269- 2 2 6 2 2 6 2 2 6 2 2 6
45270- 2 2 6 2 2 6 2 2 6 2 2 6
45271- 2 2 6 2 2 6 2 2 6 10 10 10
45272- 10 10 10 2 2 6 2 2 6 2 2 6
45273- 2 2 6 2 2 6 2 2 6 78 78 78
45274- 50 50 50 18 18 18 6 6 6 0 0 0
45275- 0 0 0 0 0 0 0 0 0 0 0 0
45276- 0 0 0 0 0 0 0 0 0 0 0 0
45277- 0 0 0 0 0 0 0 0 0 0 0 0
45278- 0 0 0 0 0 0 0 0 0 0 0 0
45279- 0 0 0 0 0 0 0 0 0 0 0 0
45280- 0 0 0 0 0 0 0 0 0 0 0 0
45281- 0 0 0 0 0 0 0 0 0 0 0 0
45282- 0 0 1 0 0 0 0 0 0 0 0 0
45283- 0 0 0 0 0 0 0 0 0 0 0 0
45284- 0 0 0 0 0 0 0 0 0 0 0 0
45285- 0 0 0 0 0 0 0 0 0 0 0 0
45286- 0 0 0 0 0 0 0 0 0 0 0 0
45287- 0 0 0 0 0 0 0 0 0 10 10 10
45288- 38 38 38 86 86 86 14 14 14 2 2 6
45289- 2 2 6 2 2 6 2 2 6 2 2 6
45290- 2 2 6 2 2 6 2 2 6 2 2 6
45291- 2 2 6 2 2 6 2 2 6 2 2 6
45292- 2 2 6 2 2 6 2 2 6 2 2 6
45293- 2 2 6 2 2 6 2 2 6 54 54 54
45294- 66 66 66 26 26 26 6 6 6 0 0 0
45295- 0 0 0 0 0 0 0 0 0 0 0 0
45296- 0 0 0 0 0 0 0 0 0 0 0 0
45297- 0 0 0 0 0 0 0 0 0 0 0 0
45298- 0 0 0 0 0 0 0 0 0 0 0 0
45299- 0 0 0 0 0 0 0 0 0 0 0 0
45300- 0 0 0 0 0 0 0 0 0 0 0 0
45301- 0 0 0 0 0 0 0 0 0 0 0 0
45302- 0 0 0 0 0 1 0 0 1 0 0 0
45303- 0 0 0 0 0 0 0 0 0 0 0 0
45304- 0 0 0 0 0 0 0 0 0 0 0 0
45305- 0 0 0 0 0 0 0 0 0 0 0 0
45306- 0 0 0 0 0 0 0 0 0 0 0 0
45307- 0 0 0 0 0 0 0 0 0 14 14 14
45308- 42 42 42 82 82 82 2 2 6 2 2 6
45309- 2 2 6 6 6 6 10 10 10 2 2 6
45310- 2 2 6 2 2 6 2 2 6 2 2 6
45311- 2 2 6 2 2 6 2 2 6 6 6 6
45312- 14 14 14 10 10 10 2 2 6 2 2 6
45313- 2 2 6 2 2 6 2 2 6 18 18 18
45314- 82 82 82 34 34 34 10 10 10 0 0 0
45315- 0 0 0 0 0 0 0 0 0 0 0 0
45316- 0 0 0 0 0 0 0 0 0 0 0 0
45317- 0 0 0 0 0 0 0 0 0 0 0 0
45318- 0 0 0 0 0 0 0 0 0 0 0 0
45319- 0 0 0 0 0 0 0 0 0 0 0 0
45320- 0 0 0 0 0 0 0 0 0 0 0 0
45321- 0 0 0 0 0 0 0 0 0 0 0 0
45322- 0 0 1 0 0 0 0 0 0 0 0 0
45323- 0 0 0 0 0 0 0 0 0 0 0 0
45324- 0 0 0 0 0 0 0 0 0 0 0 0
45325- 0 0 0 0 0 0 0 0 0 0 0 0
45326- 0 0 0 0 0 0 0 0 0 0 0 0
45327- 0 0 0 0 0 0 0 0 0 14 14 14
45328- 46 46 46 86 86 86 2 2 6 2 2 6
45329- 6 6 6 6 6 6 22 22 22 34 34 34
45330- 6 6 6 2 2 6 2 2 6 2 2 6
45331- 2 2 6 2 2 6 18 18 18 34 34 34
45332- 10 10 10 50 50 50 22 22 22 2 2 6
45333- 2 2 6 2 2 6 2 2 6 10 10 10
45334- 86 86 86 42 42 42 14 14 14 0 0 0
45335- 0 0 0 0 0 0 0 0 0 0 0 0
45336- 0 0 0 0 0 0 0 0 0 0 0 0
45337- 0 0 0 0 0 0 0 0 0 0 0 0
45338- 0 0 0 0 0 0 0 0 0 0 0 0
45339- 0 0 0 0 0 0 0 0 0 0 0 0
45340- 0 0 0 0 0 0 0 0 0 0 0 0
45341- 0 0 0 0 0 0 0 0 0 0 0 0
45342- 0 0 1 0 0 1 0 0 1 0 0 0
45343- 0 0 0 0 0 0 0 0 0 0 0 0
45344- 0 0 0 0 0 0 0 0 0 0 0 0
45345- 0 0 0 0 0 0 0 0 0 0 0 0
45346- 0 0 0 0 0 0 0 0 0 0 0 0
45347- 0 0 0 0 0 0 0 0 0 14 14 14
45348- 46 46 46 86 86 86 2 2 6 2 2 6
45349- 38 38 38 116 116 116 94 94 94 22 22 22
45350- 22 22 22 2 2 6 2 2 6 2 2 6
45351- 14 14 14 86 86 86 138 138 138 162 162 162
45352-154 154 154 38 38 38 26 26 26 6 6 6
45353- 2 2 6 2 2 6 2 2 6 2 2 6
45354- 86 86 86 46 46 46 14 14 14 0 0 0
45355- 0 0 0 0 0 0 0 0 0 0 0 0
45356- 0 0 0 0 0 0 0 0 0 0 0 0
45357- 0 0 0 0 0 0 0 0 0 0 0 0
45358- 0 0 0 0 0 0 0 0 0 0 0 0
45359- 0 0 0 0 0 0 0 0 0 0 0 0
45360- 0 0 0 0 0 0 0 0 0 0 0 0
45361- 0 0 0 0 0 0 0 0 0 0 0 0
45362- 0 0 0 0 0 0 0 0 0 0 0 0
45363- 0 0 0 0 0 0 0 0 0 0 0 0
45364- 0 0 0 0 0 0 0 0 0 0 0 0
45365- 0 0 0 0 0 0 0 0 0 0 0 0
45366- 0 0 0 0 0 0 0 0 0 0 0 0
45367- 0 0 0 0 0 0 0 0 0 14 14 14
45368- 46 46 46 86 86 86 2 2 6 14 14 14
45369-134 134 134 198 198 198 195 195 195 116 116 116
45370- 10 10 10 2 2 6 2 2 6 6 6 6
45371-101 98 89 187 187 187 210 210 210 218 218 218
45372-214 214 214 134 134 134 14 14 14 6 6 6
45373- 2 2 6 2 2 6 2 2 6 2 2 6
45374- 86 86 86 50 50 50 18 18 18 6 6 6
45375- 0 0 0 0 0 0 0 0 0 0 0 0
45376- 0 0 0 0 0 0 0 0 0 0 0 0
45377- 0 0 0 0 0 0 0 0 0 0 0 0
45378- 0 0 0 0 0 0 0 0 0 0 0 0
45379- 0 0 0 0 0 0 0 0 0 0 0 0
45380- 0 0 0 0 0 0 0 0 0 0 0 0
45381- 0 0 0 0 0 0 0 0 1 0 0 0
45382- 0 0 1 0 0 1 0 0 1 0 0 0
45383- 0 0 0 0 0 0 0 0 0 0 0 0
45384- 0 0 0 0 0 0 0 0 0 0 0 0
45385- 0 0 0 0 0 0 0 0 0 0 0 0
45386- 0 0 0 0 0 0 0 0 0 0 0 0
45387- 0 0 0 0 0 0 0 0 0 14 14 14
45388- 46 46 46 86 86 86 2 2 6 54 54 54
45389-218 218 218 195 195 195 226 226 226 246 246 246
45390- 58 58 58 2 2 6 2 2 6 30 30 30
45391-210 210 210 253 253 253 174 174 174 123 123 123
45392-221 221 221 234 234 234 74 74 74 2 2 6
45393- 2 2 6 2 2 6 2 2 6 2 2 6
45394- 70 70 70 58 58 58 22 22 22 6 6 6
45395- 0 0 0 0 0 0 0 0 0 0 0 0
45396- 0 0 0 0 0 0 0 0 0 0 0 0
45397- 0 0 0 0 0 0 0 0 0 0 0 0
45398- 0 0 0 0 0 0 0 0 0 0 0 0
45399- 0 0 0 0 0 0 0 0 0 0 0 0
45400- 0 0 0 0 0 0 0 0 0 0 0 0
45401- 0 0 0 0 0 0 0 0 0 0 0 0
45402- 0 0 0 0 0 0 0 0 0 0 0 0
45403- 0 0 0 0 0 0 0 0 0 0 0 0
45404- 0 0 0 0 0 0 0 0 0 0 0 0
45405- 0 0 0 0 0 0 0 0 0 0 0 0
45406- 0 0 0 0 0 0 0 0 0 0 0 0
45407- 0 0 0 0 0 0 0 0 0 14 14 14
45408- 46 46 46 82 82 82 2 2 6 106 106 106
45409-170 170 170 26 26 26 86 86 86 226 226 226
45410-123 123 123 10 10 10 14 14 14 46 46 46
45411-231 231 231 190 190 190 6 6 6 70 70 70
45412- 90 90 90 238 238 238 158 158 158 2 2 6
45413- 2 2 6 2 2 6 2 2 6 2 2 6
45414- 70 70 70 58 58 58 22 22 22 6 6 6
45415- 0 0 0 0 0 0 0 0 0 0 0 0
45416- 0 0 0 0 0 0 0 0 0 0 0 0
45417- 0 0 0 0 0 0 0 0 0 0 0 0
45418- 0 0 0 0 0 0 0 0 0 0 0 0
45419- 0 0 0 0 0 0 0 0 0 0 0 0
45420- 0 0 0 0 0 0 0 0 0 0 0 0
45421- 0 0 0 0 0 0 0 0 1 0 0 0
45422- 0 0 1 0 0 1 0 0 1 0 0 0
45423- 0 0 0 0 0 0 0 0 0 0 0 0
45424- 0 0 0 0 0 0 0 0 0 0 0 0
45425- 0 0 0 0 0 0 0 0 0 0 0 0
45426- 0 0 0 0 0 0 0 0 0 0 0 0
45427- 0 0 0 0 0 0 0 0 0 14 14 14
45428- 42 42 42 86 86 86 6 6 6 116 116 116
45429-106 106 106 6 6 6 70 70 70 149 149 149
45430-128 128 128 18 18 18 38 38 38 54 54 54
45431-221 221 221 106 106 106 2 2 6 14 14 14
45432- 46 46 46 190 190 190 198 198 198 2 2 6
45433- 2 2 6 2 2 6 2 2 6 2 2 6
45434- 74 74 74 62 62 62 22 22 22 6 6 6
45435- 0 0 0 0 0 0 0 0 0 0 0 0
45436- 0 0 0 0 0 0 0 0 0 0 0 0
45437- 0 0 0 0 0 0 0 0 0 0 0 0
45438- 0 0 0 0 0 0 0 0 0 0 0 0
45439- 0 0 0 0 0 0 0 0 0 0 0 0
45440- 0 0 0 0 0 0 0 0 0 0 0 0
45441- 0 0 0 0 0 0 0 0 1 0 0 0
45442- 0 0 1 0 0 0 0 0 1 0 0 0
45443- 0 0 0 0 0 0 0 0 0 0 0 0
45444- 0 0 0 0 0 0 0 0 0 0 0 0
45445- 0 0 0 0 0 0 0 0 0 0 0 0
45446- 0 0 0 0 0 0 0 0 0 0 0 0
45447- 0 0 0 0 0 0 0 0 0 14 14 14
45448- 42 42 42 94 94 94 14 14 14 101 101 101
45449-128 128 128 2 2 6 18 18 18 116 116 116
45450-118 98 46 121 92 8 121 92 8 98 78 10
45451-162 162 162 106 106 106 2 2 6 2 2 6
45452- 2 2 6 195 195 195 195 195 195 6 6 6
45453- 2 2 6 2 2 6 2 2 6 2 2 6
45454- 74 74 74 62 62 62 22 22 22 6 6 6
45455- 0 0 0 0 0 0 0 0 0 0 0 0
45456- 0 0 0 0 0 0 0 0 0 0 0 0
45457- 0 0 0 0 0 0 0 0 0 0 0 0
45458- 0 0 0 0 0 0 0 0 0 0 0 0
45459- 0 0 0 0 0 0 0 0 0 0 0 0
45460- 0 0 0 0 0 0 0 0 0 0 0 0
45461- 0 0 0 0 0 0 0 0 1 0 0 1
45462- 0 0 1 0 0 0 0 0 1 0 0 0
45463- 0 0 0 0 0 0 0 0 0 0 0 0
45464- 0 0 0 0 0 0 0 0 0 0 0 0
45465- 0 0 0 0 0 0 0 0 0 0 0 0
45466- 0 0 0 0 0 0 0 0 0 0 0 0
45467- 0 0 0 0 0 0 0 0 0 10 10 10
45468- 38 38 38 90 90 90 14 14 14 58 58 58
45469-210 210 210 26 26 26 54 38 6 154 114 10
45470-226 170 11 236 186 11 225 175 15 184 144 12
45471-215 174 15 175 146 61 37 26 9 2 2 6
45472- 70 70 70 246 246 246 138 138 138 2 2 6
45473- 2 2 6 2 2 6 2 2 6 2 2 6
45474- 70 70 70 66 66 66 26 26 26 6 6 6
45475- 0 0 0 0 0 0 0 0 0 0 0 0
45476- 0 0 0 0 0 0 0 0 0 0 0 0
45477- 0 0 0 0 0 0 0 0 0 0 0 0
45478- 0 0 0 0 0 0 0 0 0 0 0 0
45479- 0 0 0 0 0 0 0 0 0 0 0 0
45480- 0 0 0 0 0 0 0 0 0 0 0 0
45481- 0 0 0 0 0 0 0 0 0 0 0 0
45482- 0 0 0 0 0 0 0 0 0 0 0 0
45483- 0 0 0 0 0 0 0 0 0 0 0 0
45484- 0 0 0 0 0 0 0 0 0 0 0 0
45485- 0 0 0 0 0 0 0 0 0 0 0 0
45486- 0 0 0 0 0 0 0 0 0 0 0 0
45487- 0 0 0 0 0 0 0 0 0 10 10 10
45488- 38 38 38 86 86 86 14 14 14 10 10 10
45489-195 195 195 188 164 115 192 133 9 225 175 15
45490-239 182 13 234 190 10 232 195 16 232 200 30
45491-245 207 45 241 208 19 232 195 16 184 144 12
45492-218 194 134 211 206 186 42 42 42 2 2 6
45493- 2 2 6 2 2 6 2 2 6 2 2 6
45494- 50 50 50 74 74 74 30 30 30 6 6 6
45495- 0 0 0 0 0 0 0 0 0 0 0 0
45496- 0 0 0 0 0 0 0 0 0 0 0 0
45497- 0 0 0 0 0 0 0 0 0 0 0 0
45498- 0 0 0 0 0 0 0 0 0 0 0 0
45499- 0 0 0 0 0 0 0 0 0 0 0 0
45500- 0 0 0 0 0 0 0 0 0 0 0 0
45501- 0 0 0 0 0 0 0 0 0 0 0 0
45502- 0 0 0 0 0 0 0 0 0 0 0 0
45503- 0 0 0 0 0 0 0 0 0 0 0 0
45504- 0 0 0 0 0 0 0 0 0 0 0 0
45505- 0 0 0 0 0 0 0 0 0 0 0 0
45506- 0 0 0 0 0 0 0 0 0 0 0 0
45507- 0 0 0 0 0 0 0 0 0 10 10 10
45508- 34 34 34 86 86 86 14 14 14 2 2 6
45509-121 87 25 192 133 9 219 162 10 239 182 13
45510-236 186 11 232 195 16 241 208 19 244 214 54
45511-246 218 60 246 218 38 246 215 20 241 208 19
45512-241 208 19 226 184 13 121 87 25 2 2 6
45513- 2 2 6 2 2 6 2 2 6 2 2 6
45514- 50 50 50 82 82 82 34 34 34 10 10 10
45515- 0 0 0 0 0 0 0 0 0 0 0 0
45516- 0 0 0 0 0 0 0 0 0 0 0 0
45517- 0 0 0 0 0 0 0 0 0 0 0 0
45518- 0 0 0 0 0 0 0 0 0 0 0 0
45519- 0 0 0 0 0 0 0 0 0 0 0 0
45520- 0 0 0 0 0 0 0 0 0 0 0 0
45521- 0 0 0 0 0 0 0 0 0 0 0 0
45522- 0 0 0 0 0 0 0 0 0 0 0 0
45523- 0 0 0 0 0 0 0 0 0 0 0 0
45524- 0 0 0 0 0 0 0 0 0 0 0 0
45525- 0 0 0 0 0 0 0 0 0 0 0 0
45526- 0 0 0 0 0 0 0 0 0 0 0 0
45527- 0 0 0 0 0 0 0 0 0 10 10 10
45528- 34 34 34 82 82 82 30 30 30 61 42 6
45529-180 123 7 206 145 10 230 174 11 239 182 13
45530-234 190 10 238 202 15 241 208 19 246 218 74
45531-246 218 38 246 215 20 246 215 20 246 215 20
45532-226 184 13 215 174 15 184 144 12 6 6 6
45533- 2 2 6 2 2 6 2 2 6 2 2 6
45534- 26 26 26 94 94 94 42 42 42 14 14 14
45535- 0 0 0 0 0 0 0 0 0 0 0 0
45536- 0 0 0 0 0 0 0 0 0 0 0 0
45537- 0 0 0 0 0 0 0 0 0 0 0 0
45538- 0 0 0 0 0 0 0 0 0 0 0 0
45539- 0 0 0 0 0 0 0 0 0 0 0 0
45540- 0 0 0 0 0 0 0 0 0 0 0 0
45541- 0 0 0 0 0 0 0 0 0 0 0 0
45542- 0 0 0 0 0 0 0 0 0 0 0 0
45543- 0 0 0 0 0 0 0 0 0 0 0 0
45544- 0 0 0 0 0 0 0 0 0 0 0 0
45545- 0 0 0 0 0 0 0 0 0 0 0 0
45546- 0 0 0 0 0 0 0 0 0 0 0 0
45547- 0 0 0 0 0 0 0 0 0 10 10 10
45548- 30 30 30 78 78 78 50 50 50 104 69 6
45549-192 133 9 216 158 10 236 178 12 236 186 11
45550-232 195 16 241 208 19 244 214 54 245 215 43
45551-246 215 20 246 215 20 241 208 19 198 155 10
45552-200 144 11 216 158 10 156 118 10 2 2 6
45553- 2 2 6 2 2 6 2 2 6 2 2 6
45554- 6 6 6 90 90 90 54 54 54 18 18 18
45555- 6 6 6 0 0 0 0 0 0 0 0 0
45556- 0 0 0 0 0 0 0 0 0 0 0 0
45557- 0 0 0 0 0 0 0 0 0 0 0 0
45558- 0 0 0 0 0 0 0 0 0 0 0 0
45559- 0 0 0 0 0 0 0 0 0 0 0 0
45560- 0 0 0 0 0 0 0 0 0 0 0 0
45561- 0 0 0 0 0 0 0 0 0 0 0 0
45562- 0 0 0 0 0 0 0 0 0 0 0 0
45563- 0 0 0 0 0 0 0 0 0 0 0 0
45564- 0 0 0 0 0 0 0 0 0 0 0 0
45565- 0 0 0 0 0 0 0 0 0 0 0 0
45566- 0 0 0 0 0 0 0 0 0 0 0 0
45567- 0 0 0 0 0 0 0 0 0 10 10 10
45568- 30 30 30 78 78 78 46 46 46 22 22 22
45569-137 92 6 210 162 10 239 182 13 238 190 10
45570-238 202 15 241 208 19 246 215 20 246 215 20
45571-241 208 19 203 166 17 185 133 11 210 150 10
45572-216 158 10 210 150 10 102 78 10 2 2 6
45573- 6 6 6 54 54 54 14 14 14 2 2 6
45574- 2 2 6 62 62 62 74 74 74 30 30 30
45575- 10 10 10 0 0 0 0 0 0 0 0 0
45576- 0 0 0 0 0 0 0 0 0 0 0 0
45577- 0 0 0 0 0 0 0 0 0 0 0 0
45578- 0 0 0 0 0 0 0 0 0 0 0 0
45579- 0 0 0 0 0 0 0 0 0 0 0 0
45580- 0 0 0 0 0 0 0 0 0 0 0 0
45581- 0 0 0 0 0 0 0 0 0 0 0 0
45582- 0 0 0 0 0 0 0 0 0 0 0 0
45583- 0 0 0 0 0 0 0 0 0 0 0 0
45584- 0 0 0 0 0 0 0 0 0 0 0 0
45585- 0 0 0 0 0 0 0 0 0 0 0 0
45586- 0 0 0 0 0 0 0 0 0 0 0 0
45587- 0 0 0 0 0 0 0 0 0 10 10 10
45588- 34 34 34 78 78 78 50 50 50 6 6 6
45589- 94 70 30 139 102 15 190 146 13 226 184 13
45590-232 200 30 232 195 16 215 174 15 190 146 13
45591-168 122 10 192 133 9 210 150 10 213 154 11
45592-202 150 34 182 157 106 101 98 89 2 2 6
45593- 2 2 6 78 78 78 116 116 116 58 58 58
45594- 2 2 6 22 22 22 90 90 90 46 46 46
45595- 18 18 18 6 6 6 0 0 0 0 0 0
45596- 0 0 0 0 0 0 0 0 0 0 0 0
45597- 0 0 0 0 0 0 0 0 0 0 0 0
45598- 0 0 0 0 0 0 0 0 0 0 0 0
45599- 0 0 0 0 0 0 0 0 0 0 0 0
45600- 0 0 0 0 0 0 0 0 0 0 0 0
45601- 0 0 0 0 0 0 0 0 0 0 0 0
45602- 0 0 0 0 0 0 0 0 0 0 0 0
45603- 0 0 0 0 0 0 0 0 0 0 0 0
45604- 0 0 0 0 0 0 0 0 0 0 0 0
45605- 0 0 0 0 0 0 0 0 0 0 0 0
45606- 0 0 0 0 0 0 0 0 0 0 0 0
45607- 0 0 0 0 0 0 0 0 0 10 10 10
45608- 38 38 38 86 86 86 50 50 50 6 6 6
45609-128 128 128 174 154 114 156 107 11 168 122 10
45610-198 155 10 184 144 12 197 138 11 200 144 11
45611-206 145 10 206 145 10 197 138 11 188 164 115
45612-195 195 195 198 198 198 174 174 174 14 14 14
45613- 2 2 6 22 22 22 116 116 116 116 116 116
45614- 22 22 22 2 2 6 74 74 74 70 70 70
45615- 30 30 30 10 10 10 0 0 0 0 0 0
45616- 0 0 0 0 0 0 0 0 0 0 0 0
45617- 0 0 0 0 0 0 0 0 0 0 0 0
45618- 0 0 0 0 0 0 0 0 0 0 0 0
45619- 0 0 0 0 0 0 0 0 0 0 0 0
45620- 0 0 0 0 0 0 0 0 0 0 0 0
45621- 0 0 0 0 0 0 0 0 0 0 0 0
45622- 0 0 0 0 0 0 0 0 0 0 0 0
45623- 0 0 0 0 0 0 0 0 0 0 0 0
45624- 0 0 0 0 0 0 0 0 0 0 0 0
45625- 0 0 0 0 0 0 0 0 0 0 0 0
45626- 0 0 0 0 0 0 0 0 0 0 0 0
45627- 0 0 0 0 0 0 6 6 6 18 18 18
45628- 50 50 50 101 101 101 26 26 26 10 10 10
45629-138 138 138 190 190 190 174 154 114 156 107 11
45630-197 138 11 200 144 11 197 138 11 192 133 9
45631-180 123 7 190 142 34 190 178 144 187 187 187
45632-202 202 202 221 221 221 214 214 214 66 66 66
45633- 2 2 6 2 2 6 50 50 50 62 62 62
45634- 6 6 6 2 2 6 10 10 10 90 90 90
45635- 50 50 50 18 18 18 6 6 6 0 0 0
45636- 0 0 0 0 0 0 0 0 0 0 0 0
45637- 0 0 0 0 0 0 0 0 0 0 0 0
45638- 0 0 0 0 0 0 0 0 0 0 0 0
45639- 0 0 0 0 0 0 0 0 0 0 0 0
45640- 0 0 0 0 0 0 0 0 0 0 0 0
45641- 0 0 0 0 0 0 0 0 0 0 0 0
45642- 0 0 0 0 0 0 0 0 0 0 0 0
45643- 0 0 0 0 0 0 0 0 0 0 0 0
45644- 0 0 0 0 0 0 0 0 0 0 0 0
45645- 0 0 0 0 0 0 0 0 0 0 0 0
45646- 0 0 0 0 0 0 0 0 0 0 0 0
45647- 0 0 0 0 0 0 10 10 10 34 34 34
45648- 74 74 74 74 74 74 2 2 6 6 6 6
45649-144 144 144 198 198 198 190 190 190 178 166 146
45650-154 121 60 156 107 11 156 107 11 168 124 44
45651-174 154 114 187 187 187 190 190 190 210 210 210
45652-246 246 246 253 253 253 253 253 253 182 182 182
45653- 6 6 6 2 2 6 2 2 6 2 2 6
45654- 2 2 6 2 2 6 2 2 6 62 62 62
45655- 74 74 74 34 34 34 14 14 14 0 0 0
45656- 0 0 0 0 0 0 0 0 0 0 0 0
45657- 0 0 0 0 0 0 0 0 0 0 0 0
45658- 0 0 0 0 0 0 0 0 0 0 0 0
45659- 0 0 0 0 0 0 0 0 0 0 0 0
45660- 0 0 0 0 0 0 0 0 0 0 0 0
45661- 0 0 0 0 0 0 0 0 0 0 0 0
45662- 0 0 0 0 0 0 0 0 0 0 0 0
45663- 0 0 0 0 0 0 0 0 0 0 0 0
45664- 0 0 0 0 0 0 0 0 0 0 0 0
45665- 0 0 0 0 0 0 0 0 0 0 0 0
45666- 0 0 0 0 0 0 0 0 0 0 0 0
45667- 0 0 0 10 10 10 22 22 22 54 54 54
45668- 94 94 94 18 18 18 2 2 6 46 46 46
45669-234 234 234 221 221 221 190 190 190 190 190 190
45670-190 190 190 187 187 187 187 187 187 190 190 190
45671-190 190 190 195 195 195 214 214 214 242 242 242
45672-253 253 253 253 253 253 253 253 253 253 253 253
45673- 82 82 82 2 2 6 2 2 6 2 2 6
45674- 2 2 6 2 2 6 2 2 6 14 14 14
45675- 86 86 86 54 54 54 22 22 22 6 6 6
45676- 0 0 0 0 0 0 0 0 0 0 0 0
45677- 0 0 0 0 0 0 0 0 0 0 0 0
45678- 0 0 0 0 0 0 0 0 0 0 0 0
45679- 0 0 0 0 0 0 0 0 0 0 0 0
45680- 0 0 0 0 0 0 0 0 0 0 0 0
45681- 0 0 0 0 0 0 0 0 0 0 0 0
45682- 0 0 0 0 0 0 0 0 0 0 0 0
45683- 0 0 0 0 0 0 0 0 0 0 0 0
45684- 0 0 0 0 0 0 0 0 0 0 0 0
45685- 0 0 0 0 0 0 0 0 0 0 0 0
45686- 0 0 0 0 0 0 0 0 0 0 0 0
45687- 6 6 6 18 18 18 46 46 46 90 90 90
45688- 46 46 46 18 18 18 6 6 6 182 182 182
45689-253 253 253 246 246 246 206 206 206 190 190 190
45690-190 190 190 190 190 190 190 190 190 190 190 190
45691-206 206 206 231 231 231 250 250 250 253 253 253
45692-253 253 253 253 253 253 253 253 253 253 253 253
45693-202 202 202 14 14 14 2 2 6 2 2 6
45694- 2 2 6 2 2 6 2 2 6 2 2 6
45695- 42 42 42 86 86 86 42 42 42 18 18 18
45696- 6 6 6 0 0 0 0 0 0 0 0 0
45697- 0 0 0 0 0 0 0 0 0 0 0 0
45698- 0 0 0 0 0 0 0 0 0 0 0 0
45699- 0 0 0 0 0 0 0 0 0 0 0 0
45700- 0 0 0 0 0 0 0 0 0 0 0 0
45701- 0 0 0 0 0 0 0 0 0 0 0 0
45702- 0 0 0 0 0 0 0 0 0 0 0 0
45703- 0 0 0 0 0 0 0 0 0 0 0 0
45704- 0 0 0 0 0 0 0 0 0 0 0 0
45705- 0 0 0 0 0 0 0 0 0 0 0 0
45706- 0 0 0 0 0 0 0 0 0 6 6 6
45707- 14 14 14 38 38 38 74 74 74 66 66 66
45708- 2 2 6 6 6 6 90 90 90 250 250 250
45709-253 253 253 253 253 253 238 238 238 198 198 198
45710-190 190 190 190 190 190 195 195 195 221 221 221
45711-246 246 246 253 253 253 253 253 253 253 253 253
45712-253 253 253 253 253 253 253 253 253 253 253 253
45713-253 253 253 82 82 82 2 2 6 2 2 6
45714- 2 2 6 2 2 6 2 2 6 2 2 6
45715- 2 2 6 78 78 78 70 70 70 34 34 34
45716- 14 14 14 6 6 6 0 0 0 0 0 0
45717- 0 0 0 0 0 0 0 0 0 0 0 0
45718- 0 0 0 0 0 0 0 0 0 0 0 0
45719- 0 0 0 0 0 0 0 0 0 0 0 0
45720- 0 0 0 0 0 0 0 0 0 0 0 0
45721- 0 0 0 0 0 0 0 0 0 0 0 0
45722- 0 0 0 0 0 0 0 0 0 0 0 0
45723- 0 0 0 0 0 0 0 0 0 0 0 0
45724- 0 0 0 0 0 0 0 0 0 0 0 0
45725- 0 0 0 0 0 0 0 0 0 0 0 0
45726- 0 0 0 0 0 0 0 0 0 14 14 14
45727- 34 34 34 66 66 66 78 78 78 6 6 6
45728- 2 2 6 18 18 18 218 218 218 253 253 253
45729-253 253 253 253 253 253 253 253 253 246 246 246
45730-226 226 226 231 231 231 246 246 246 253 253 253
45731-253 253 253 253 253 253 253 253 253 253 253 253
45732-253 253 253 253 253 253 253 253 253 253 253 253
45733-253 253 253 178 178 178 2 2 6 2 2 6
45734- 2 2 6 2 2 6 2 2 6 2 2 6
45735- 2 2 6 18 18 18 90 90 90 62 62 62
45736- 30 30 30 10 10 10 0 0 0 0 0 0
45737- 0 0 0 0 0 0 0 0 0 0 0 0
45738- 0 0 0 0 0 0 0 0 0 0 0 0
45739- 0 0 0 0 0 0 0 0 0 0 0 0
45740- 0 0 0 0 0 0 0 0 0 0 0 0
45741- 0 0 0 0 0 0 0 0 0 0 0 0
45742- 0 0 0 0 0 0 0 0 0 0 0 0
45743- 0 0 0 0 0 0 0 0 0 0 0 0
45744- 0 0 0 0 0 0 0 0 0 0 0 0
45745- 0 0 0 0 0 0 0 0 0 0 0 0
45746- 0 0 0 0 0 0 10 10 10 26 26 26
45747- 58 58 58 90 90 90 18 18 18 2 2 6
45748- 2 2 6 110 110 110 253 253 253 253 253 253
45749-253 253 253 253 253 253 253 253 253 253 253 253
45750-250 250 250 253 253 253 253 253 253 253 253 253
45751-253 253 253 253 253 253 253 253 253 253 253 253
45752-253 253 253 253 253 253 253 253 253 253 253 253
45753-253 253 253 231 231 231 18 18 18 2 2 6
45754- 2 2 6 2 2 6 2 2 6 2 2 6
45755- 2 2 6 2 2 6 18 18 18 94 94 94
45756- 54 54 54 26 26 26 10 10 10 0 0 0
45757- 0 0 0 0 0 0 0 0 0 0 0 0
45758- 0 0 0 0 0 0 0 0 0 0 0 0
45759- 0 0 0 0 0 0 0 0 0 0 0 0
45760- 0 0 0 0 0 0 0 0 0 0 0 0
45761- 0 0 0 0 0 0 0 0 0 0 0 0
45762- 0 0 0 0 0 0 0 0 0 0 0 0
45763- 0 0 0 0 0 0 0 0 0 0 0 0
45764- 0 0 0 0 0 0 0 0 0 0 0 0
45765- 0 0 0 0 0 0 0 0 0 0 0 0
45766- 0 0 0 6 6 6 22 22 22 50 50 50
45767- 90 90 90 26 26 26 2 2 6 2 2 6
45768- 14 14 14 195 195 195 250 250 250 253 253 253
45769-253 253 253 253 253 253 253 253 253 253 253 253
45770-253 253 253 253 253 253 253 253 253 253 253 253
45771-253 253 253 253 253 253 253 253 253 253 253 253
45772-253 253 253 253 253 253 253 253 253 253 253 253
45773-250 250 250 242 242 242 54 54 54 2 2 6
45774- 2 2 6 2 2 6 2 2 6 2 2 6
45775- 2 2 6 2 2 6 2 2 6 38 38 38
45776- 86 86 86 50 50 50 22 22 22 6 6 6
45777- 0 0 0 0 0 0 0 0 0 0 0 0
45778- 0 0 0 0 0 0 0 0 0 0 0 0
45779- 0 0 0 0 0 0 0 0 0 0 0 0
45780- 0 0 0 0 0 0 0 0 0 0 0 0
45781- 0 0 0 0 0 0 0 0 0 0 0 0
45782- 0 0 0 0 0 0 0 0 0 0 0 0
45783- 0 0 0 0 0 0 0 0 0 0 0 0
45784- 0 0 0 0 0 0 0 0 0 0 0 0
45785- 0 0 0 0 0 0 0 0 0 0 0 0
45786- 6 6 6 14 14 14 38 38 38 82 82 82
45787- 34 34 34 2 2 6 2 2 6 2 2 6
45788- 42 42 42 195 195 195 246 246 246 253 253 253
45789-253 253 253 253 253 253 253 253 253 250 250 250
45790-242 242 242 242 242 242 250 250 250 253 253 253
45791-253 253 253 253 253 253 253 253 253 253 253 253
45792-253 253 253 250 250 250 246 246 246 238 238 238
45793-226 226 226 231 231 231 101 101 101 6 6 6
45794- 2 2 6 2 2 6 2 2 6 2 2 6
45795- 2 2 6 2 2 6 2 2 6 2 2 6
45796- 38 38 38 82 82 82 42 42 42 14 14 14
45797- 6 6 6 0 0 0 0 0 0 0 0 0
45798- 0 0 0 0 0 0 0 0 0 0 0 0
45799- 0 0 0 0 0 0 0 0 0 0 0 0
45800- 0 0 0 0 0 0 0 0 0 0 0 0
45801- 0 0 0 0 0 0 0 0 0 0 0 0
45802- 0 0 0 0 0 0 0 0 0 0 0 0
45803- 0 0 0 0 0 0 0 0 0 0 0 0
45804- 0 0 0 0 0 0 0 0 0 0 0 0
45805- 0 0 0 0 0 0 0 0 0 0 0 0
45806- 10 10 10 26 26 26 62 62 62 66 66 66
45807- 2 2 6 2 2 6 2 2 6 6 6 6
45808- 70 70 70 170 170 170 206 206 206 234 234 234
45809-246 246 246 250 250 250 250 250 250 238 238 238
45810-226 226 226 231 231 231 238 238 238 250 250 250
45811-250 250 250 250 250 250 246 246 246 231 231 231
45812-214 214 214 206 206 206 202 202 202 202 202 202
45813-198 198 198 202 202 202 182 182 182 18 18 18
45814- 2 2 6 2 2 6 2 2 6 2 2 6
45815- 2 2 6 2 2 6 2 2 6 2 2 6
45816- 2 2 6 62 62 62 66 66 66 30 30 30
45817- 10 10 10 0 0 0 0 0 0 0 0 0
45818- 0 0 0 0 0 0 0 0 0 0 0 0
45819- 0 0 0 0 0 0 0 0 0 0 0 0
45820- 0 0 0 0 0 0 0 0 0 0 0 0
45821- 0 0 0 0 0 0 0 0 0 0 0 0
45822- 0 0 0 0 0 0 0 0 0 0 0 0
45823- 0 0 0 0 0 0 0 0 0 0 0 0
45824- 0 0 0 0 0 0 0 0 0 0 0 0
45825- 0 0 0 0 0 0 0 0 0 0 0 0
45826- 14 14 14 42 42 42 82 82 82 18 18 18
45827- 2 2 6 2 2 6 2 2 6 10 10 10
45828- 94 94 94 182 182 182 218 218 218 242 242 242
45829-250 250 250 253 253 253 253 253 253 250 250 250
45830-234 234 234 253 253 253 253 253 253 253 253 253
45831-253 253 253 253 253 253 253 253 253 246 246 246
45832-238 238 238 226 226 226 210 210 210 202 202 202
45833-195 195 195 195 195 195 210 210 210 158 158 158
45834- 6 6 6 14 14 14 50 50 50 14 14 14
45835- 2 2 6 2 2 6 2 2 6 2 2 6
45836- 2 2 6 6 6 6 86 86 86 46 46 46
45837- 18 18 18 6 6 6 0 0 0 0 0 0
45838- 0 0 0 0 0 0 0 0 0 0 0 0
45839- 0 0 0 0 0 0 0 0 0 0 0 0
45840- 0 0 0 0 0 0 0 0 0 0 0 0
45841- 0 0 0 0 0 0 0 0 0 0 0 0
45842- 0 0 0 0 0 0 0 0 0 0 0 0
45843- 0 0 0 0 0 0 0 0 0 0 0 0
45844- 0 0 0 0 0 0 0 0 0 0 0 0
45845- 0 0 0 0 0 0 0 0 0 6 6 6
45846- 22 22 22 54 54 54 70 70 70 2 2 6
45847- 2 2 6 10 10 10 2 2 6 22 22 22
45848-166 166 166 231 231 231 250 250 250 253 253 253
45849-253 253 253 253 253 253 253 253 253 250 250 250
45850-242 242 242 253 253 253 253 253 253 253 253 253
45851-253 253 253 253 253 253 253 253 253 253 253 253
45852-253 253 253 253 253 253 253 253 253 246 246 246
45853-231 231 231 206 206 206 198 198 198 226 226 226
45854- 94 94 94 2 2 6 6 6 6 38 38 38
45855- 30 30 30 2 2 6 2 2 6 2 2 6
45856- 2 2 6 2 2 6 62 62 62 66 66 66
45857- 26 26 26 10 10 10 0 0 0 0 0 0
45858- 0 0 0 0 0 0 0 0 0 0 0 0
45859- 0 0 0 0 0 0 0 0 0 0 0 0
45860- 0 0 0 0 0 0 0 0 0 0 0 0
45861- 0 0 0 0 0 0 0 0 0 0 0 0
45862- 0 0 0 0 0 0 0 0 0 0 0 0
45863- 0 0 0 0 0 0 0 0 0 0 0 0
45864- 0 0 0 0 0 0 0 0 0 0 0 0
45865- 0 0 0 0 0 0 0 0 0 10 10 10
45866- 30 30 30 74 74 74 50 50 50 2 2 6
45867- 26 26 26 26 26 26 2 2 6 106 106 106
45868-238 238 238 253 253 253 253 253 253 253 253 253
45869-253 253 253 253 253 253 253 253 253 253 253 253
45870-253 253 253 253 253 253 253 253 253 253 253 253
45871-253 253 253 253 253 253 253 253 253 253 253 253
45872-253 253 253 253 253 253 253 253 253 253 253 253
45873-253 253 253 246 246 246 218 218 218 202 202 202
45874-210 210 210 14 14 14 2 2 6 2 2 6
45875- 30 30 30 22 22 22 2 2 6 2 2 6
45876- 2 2 6 2 2 6 18 18 18 86 86 86
45877- 42 42 42 14 14 14 0 0 0 0 0 0
45878- 0 0 0 0 0 0 0 0 0 0 0 0
45879- 0 0 0 0 0 0 0 0 0 0 0 0
45880- 0 0 0 0 0 0 0 0 0 0 0 0
45881- 0 0 0 0 0 0 0 0 0 0 0 0
45882- 0 0 0 0 0 0 0 0 0 0 0 0
45883- 0 0 0 0 0 0 0 0 0 0 0 0
45884- 0 0 0 0 0 0 0 0 0 0 0 0
45885- 0 0 0 0 0 0 0 0 0 14 14 14
45886- 42 42 42 90 90 90 22 22 22 2 2 6
45887- 42 42 42 2 2 6 18 18 18 218 218 218
45888-253 253 253 253 253 253 253 253 253 253 253 253
45889-253 253 253 253 253 253 253 253 253 253 253 253
45890-253 253 253 253 253 253 253 253 253 253 253 253
45891-253 253 253 253 253 253 253 253 253 253 253 253
45892-253 253 253 253 253 253 253 253 253 253 253 253
45893-253 253 253 253 253 253 250 250 250 221 221 221
45894-218 218 218 101 101 101 2 2 6 14 14 14
45895- 18 18 18 38 38 38 10 10 10 2 2 6
45896- 2 2 6 2 2 6 2 2 6 78 78 78
45897- 58 58 58 22 22 22 6 6 6 0 0 0
45898- 0 0 0 0 0 0 0 0 0 0 0 0
45899- 0 0 0 0 0 0 0 0 0 0 0 0
45900- 0 0 0 0 0 0 0 0 0 0 0 0
45901- 0 0 0 0 0 0 0 0 0 0 0 0
45902- 0 0 0 0 0 0 0 0 0 0 0 0
45903- 0 0 0 0 0 0 0 0 0 0 0 0
45904- 0 0 0 0 0 0 0 0 0 0 0 0
45905- 0 0 0 0 0 0 6 6 6 18 18 18
45906- 54 54 54 82 82 82 2 2 6 26 26 26
45907- 22 22 22 2 2 6 123 123 123 253 253 253
45908-253 253 253 253 253 253 253 253 253 253 253 253
45909-253 253 253 253 253 253 253 253 253 253 253 253
45910-253 253 253 253 253 253 253 253 253 253 253 253
45911-253 253 253 253 253 253 253 253 253 253 253 253
45912-253 253 253 253 253 253 253 253 253 253 253 253
45913-253 253 253 253 253 253 253 253 253 250 250 250
45914-238 238 238 198 198 198 6 6 6 38 38 38
45915- 58 58 58 26 26 26 38 38 38 2 2 6
45916- 2 2 6 2 2 6 2 2 6 46 46 46
45917- 78 78 78 30 30 30 10 10 10 0 0 0
45918- 0 0 0 0 0 0 0 0 0 0 0 0
45919- 0 0 0 0 0 0 0 0 0 0 0 0
45920- 0 0 0 0 0 0 0 0 0 0 0 0
45921- 0 0 0 0 0 0 0 0 0 0 0 0
45922- 0 0 0 0 0 0 0 0 0 0 0 0
45923- 0 0 0 0 0 0 0 0 0 0 0 0
45924- 0 0 0 0 0 0 0 0 0 0 0 0
45925- 0 0 0 0 0 0 10 10 10 30 30 30
45926- 74 74 74 58 58 58 2 2 6 42 42 42
45927- 2 2 6 22 22 22 231 231 231 253 253 253
45928-253 253 253 253 253 253 253 253 253 253 253 253
45929-253 253 253 253 253 253 253 253 253 250 250 250
45930-253 253 253 253 253 253 253 253 253 253 253 253
45931-253 253 253 253 253 253 253 253 253 253 253 253
45932-253 253 253 253 253 253 253 253 253 253 253 253
45933-253 253 253 253 253 253 253 253 253 253 253 253
45934-253 253 253 246 246 246 46 46 46 38 38 38
45935- 42 42 42 14 14 14 38 38 38 14 14 14
45936- 2 2 6 2 2 6 2 2 6 6 6 6
45937- 86 86 86 46 46 46 14 14 14 0 0 0
45938- 0 0 0 0 0 0 0 0 0 0 0 0
45939- 0 0 0 0 0 0 0 0 0 0 0 0
45940- 0 0 0 0 0 0 0 0 0 0 0 0
45941- 0 0 0 0 0 0 0 0 0 0 0 0
45942- 0 0 0 0 0 0 0 0 0 0 0 0
45943- 0 0 0 0 0 0 0 0 0 0 0 0
45944- 0 0 0 0 0 0 0 0 0 0 0 0
45945- 0 0 0 6 6 6 14 14 14 42 42 42
45946- 90 90 90 18 18 18 18 18 18 26 26 26
45947- 2 2 6 116 116 116 253 253 253 253 253 253
45948-253 253 253 253 253 253 253 253 253 253 253 253
45949-253 253 253 253 253 253 250 250 250 238 238 238
45950-253 253 253 253 253 253 253 253 253 253 253 253
45951-253 253 253 253 253 253 253 253 253 253 253 253
45952-253 253 253 253 253 253 253 253 253 253 253 253
45953-253 253 253 253 253 253 253 253 253 253 253 253
45954-253 253 253 253 253 253 94 94 94 6 6 6
45955- 2 2 6 2 2 6 10 10 10 34 34 34
45956- 2 2 6 2 2 6 2 2 6 2 2 6
45957- 74 74 74 58 58 58 22 22 22 6 6 6
45958- 0 0 0 0 0 0 0 0 0 0 0 0
45959- 0 0 0 0 0 0 0 0 0 0 0 0
45960- 0 0 0 0 0 0 0 0 0 0 0 0
45961- 0 0 0 0 0 0 0 0 0 0 0 0
45962- 0 0 0 0 0 0 0 0 0 0 0 0
45963- 0 0 0 0 0 0 0 0 0 0 0 0
45964- 0 0 0 0 0 0 0 0 0 0 0 0
45965- 0 0 0 10 10 10 26 26 26 66 66 66
45966- 82 82 82 2 2 6 38 38 38 6 6 6
45967- 14 14 14 210 210 210 253 253 253 253 253 253
45968-253 253 253 253 253 253 253 253 253 253 253 253
45969-253 253 253 253 253 253 246 246 246 242 242 242
45970-253 253 253 253 253 253 253 253 253 253 253 253
45971-253 253 253 253 253 253 253 253 253 253 253 253
45972-253 253 253 253 253 253 253 253 253 253 253 253
45973-253 253 253 253 253 253 253 253 253 253 253 253
45974-253 253 253 253 253 253 144 144 144 2 2 6
45975- 2 2 6 2 2 6 2 2 6 46 46 46
45976- 2 2 6 2 2 6 2 2 6 2 2 6
45977- 42 42 42 74 74 74 30 30 30 10 10 10
45978- 0 0 0 0 0 0 0 0 0 0 0 0
45979- 0 0 0 0 0 0 0 0 0 0 0 0
45980- 0 0 0 0 0 0 0 0 0 0 0 0
45981- 0 0 0 0 0 0 0 0 0 0 0 0
45982- 0 0 0 0 0 0 0 0 0 0 0 0
45983- 0 0 0 0 0 0 0 0 0 0 0 0
45984- 0 0 0 0 0 0 0 0 0 0 0 0
45985- 6 6 6 14 14 14 42 42 42 90 90 90
45986- 26 26 26 6 6 6 42 42 42 2 2 6
45987- 74 74 74 250 250 250 253 253 253 253 253 253
45988-253 253 253 253 253 253 253 253 253 253 253 253
45989-253 253 253 253 253 253 242 242 242 242 242 242
45990-253 253 253 253 253 253 253 253 253 253 253 253
45991-253 253 253 253 253 253 253 253 253 253 253 253
45992-253 253 253 253 253 253 253 253 253 253 253 253
45993-253 253 253 253 253 253 253 253 253 253 253 253
45994-253 253 253 253 253 253 182 182 182 2 2 6
45995- 2 2 6 2 2 6 2 2 6 46 46 46
45996- 2 2 6 2 2 6 2 2 6 2 2 6
45997- 10 10 10 86 86 86 38 38 38 10 10 10
45998- 0 0 0 0 0 0 0 0 0 0 0 0
45999- 0 0 0 0 0 0 0 0 0 0 0 0
46000- 0 0 0 0 0 0 0 0 0 0 0 0
46001- 0 0 0 0 0 0 0 0 0 0 0 0
46002- 0 0 0 0 0 0 0 0 0 0 0 0
46003- 0 0 0 0 0 0 0 0 0 0 0 0
46004- 0 0 0 0 0 0 0 0 0 0 0 0
46005- 10 10 10 26 26 26 66 66 66 82 82 82
46006- 2 2 6 22 22 22 18 18 18 2 2 6
46007-149 149 149 253 253 253 253 253 253 253 253 253
46008-253 253 253 253 253 253 253 253 253 253 253 253
46009-253 253 253 253 253 253 234 234 234 242 242 242
46010-253 253 253 253 253 253 253 253 253 253 253 253
46011-253 253 253 253 253 253 253 253 253 253 253 253
46012-253 253 253 253 253 253 253 253 253 253 253 253
46013-253 253 253 253 253 253 253 253 253 253 253 253
46014-253 253 253 253 253 253 206 206 206 2 2 6
46015- 2 2 6 2 2 6 2 2 6 38 38 38
46016- 2 2 6 2 2 6 2 2 6 2 2 6
46017- 6 6 6 86 86 86 46 46 46 14 14 14
46018- 0 0 0 0 0 0 0 0 0 0 0 0
46019- 0 0 0 0 0 0 0 0 0 0 0 0
46020- 0 0 0 0 0 0 0 0 0 0 0 0
46021- 0 0 0 0 0 0 0 0 0 0 0 0
46022- 0 0 0 0 0 0 0 0 0 0 0 0
46023- 0 0 0 0 0 0 0 0 0 0 0 0
46024- 0 0 0 0 0 0 0 0 0 6 6 6
46025- 18 18 18 46 46 46 86 86 86 18 18 18
46026- 2 2 6 34 34 34 10 10 10 6 6 6
46027-210 210 210 253 253 253 253 253 253 253 253 253
46028-253 253 253 253 253 253 253 253 253 253 253 253
46029-253 253 253 253 253 253 234 234 234 242 242 242
46030-253 253 253 253 253 253 253 253 253 253 253 253
46031-253 253 253 253 253 253 253 253 253 253 253 253
46032-253 253 253 253 253 253 253 253 253 253 253 253
46033-253 253 253 253 253 253 253 253 253 253 253 253
46034-253 253 253 253 253 253 221 221 221 6 6 6
46035- 2 2 6 2 2 6 6 6 6 30 30 30
46036- 2 2 6 2 2 6 2 2 6 2 2 6
46037- 2 2 6 82 82 82 54 54 54 18 18 18
46038- 6 6 6 0 0 0 0 0 0 0 0 0
46039- 0 0 0 0 0 0 0 0 0 0 0 0
46040- 0 0 0 0 0 0 0 0 0 0 0 0
46041- 0 0 0 0 0 0 0 0 0 0 0 0
46042- 0 0 0 0 0 0 0 0 0 0 0 0
46043- 0 0 0 0 0 0 0 0 0 0 0 0
46044- 0 0 0 0 0 0 0 0 0 10 10 10
46045- 26 26 26 66 66 66 62 62 62 2 2 6
46046- 2 2 6 38 38 38 10 10 10 26 26 26
46047-238 238 238 253 253 253 253 253 253 253 253 253
46048-253 253 253 253 253 253 253 253 253 253 253 253
46049-253 253 253 253 253 253 231 231 231 238 238 238
46050-253 253 253 253 253 253 253 253 253 253 253 253
46051-253 253 253 253 253 253 253 253 253 253 253 253
46052-253 253 253 253 253 253 253 253 253 253 253 253
46053-253 253 253 253 253 253 253 253 253 253 253 253
46054-253 253 253 253 253 253 231 231 231 6 6 6
46055- 2 2 6 2 2 6 10 10 10 30 30 30
46056- 2 2 6 2 2 6 2 2 6 2 2 6
46057- 2 2 6 66 66 66 58 58 58 22 22 22
46058- 6 6 6 0 0 0 0 0 0 0 0 0
46059- 0 0 0 0 0 0 0 0 0 0 0 0
46060- 0 0 0 0 0 0 0 0 0 0 0 0
46061- 0 0 0 0 0 0 0 0 0 0 0 0
46062- 0 0 0 0 0 0 0 0 0 0 0 0
46063- 0 0 0 0 0 0 0 0 0 0 0 0
46064- 0 0 0 0 0 0 0 0 0 10 10 10
46065- 38 38 38 78 78 78 6 6 6 2 2 6
46066- 2 2 6 46 46 46 14 14 14 42 42 42
46067-246 246 246 253 253 253 253 253 253 253 253 253
46068-253 253 253 253 253 253 253 253 253 253 253 253
46069-253 253 253 253 253 253 231 231 231 242 242 242
46070-253 253 253 253 253 253 253 253 253 253 253 253
46071-253 253 253 253 253 253 253 253 253 253 253 253
46072-253 253 253 253 253 253 253 253 253 253 253 253
46073-253 253 253 253 253 253 253 253 253 253 253 253
46074-253 253 253 253 253 253 234 234 234 10 10 10
46075- 2 2 6 2 2 6 22 22 22 14 14 14
46076- 2 2 6 2 2 6 2 2 6 2 2 6
46077- 2 2 6 66 66 66 62 62 62 22 22 22
46078- 6 6 6 0 0 0 0 0 0 0 0 0
46079- 0 0 0 0 0 0 0 0 0 0 0 0
46080- 0 0 0 0 0 0 0 0 0 0 0 0
46081- 0 0 0 0 0 0 0 0 0 0 0 0
46082- 0 0 0 0 0 0 0 0 0 0 0 0
46083- 0 0 0 0 0 0 0 0 0 0 0 0
46084- 0 0 0 0 0 0 6 6 6 18 18 18
46085- 50 50 50 74 74 74 2 2 6 2 2 6
46086- 14 14 14 70 70 70 34 34 34 62 62 62
46087-250 250 250 253 253 253 253 253 253 253 253 253
46088-253 253 253 253 253 253 253 253 253 253 253 253
46089-253 253 253 253 253 253 231 231 231 246 246 246
46090-253 253 253 253 253 253 253 253 253 253 253 253
46091-253 253 253 253 253 253 253 253 253 253 253 253
46092-253 253 253 253 253 253 253 253 253 253 253 253
46093-253 253 253 253 253 253 253 253 253 253 253 253
46094-253 253 253 253 253 253 234 234 234 14 14 14
46095- 2 2 6 2 2 6 30 30 30 2 2 6
46096- 2 2 6 2 2 6 2 2 6 2 2 6
46097- 2 2 6 66 66 66 62 62 62 22 22 22
46098- 6 6 6 0 0 0 0 0 0 0 0 0
46099- 0 0 0 0 0 0 0 0 0 0 0 0
46100- 0 0 0 0 0 0 0 0 0 0 0 0
46101- 0 0 0 0 0 0 0 0 0 0 0 0
46102- 0 0 0 0 0 0 0 0 0 0 0 0
46103- 0 0 0 0 0 0 0 0 0 0 0 0
46104- 0 0 0 0 0 0 6 6 6 18 18 18
46105- 54 54 54 62 62 62 2 2 6 2 2 6
46106- 2 2 6 30 30 30 46 46 46 70 70 70
46107-250 250 250 253 253 253 253 253 253 253 253 253
46108-253 253 253 253 253 253 253 253 253 253 253 253
46109-253 253 253 253 253 253 231 231 231 246 246 246
46110-253 253 253 253 253 253 253 253 253 253 253 253
46111-253 253 253 253 253 253 253 253 253 253 253 253
46112-253 253 253 253 253 253 253 253 253 253 253 253
46113-253 253 253 253 253 253 253 253 253 253 253 253
46114-253 253 253 253 253 253 226 226 226 10 10 10
46115- 2 2 6 6 6 6 30 30 30 2 2 6
46116- 2 2 6 2 2 6 2 2 6 2 2 6
46117- 2 2 6 66 66 66 58 58 58 22 22 22
46118- 6 6 6 0 0 0 0 0 0 0 0 0
46119- 0 0 0 0 0 0 0 0 0 0 0 0
46120- 0 0 0 0 0 0 0 0 0 0 0 0
46121- 0 0 0 0 0 0 0 0 0 0 0 0
46122- 0 0 0 0 0 0 0 0 0 0 0 0
46123- 0 0 0 0 0 0 0 0 0 0 0 0
46124- 0 0 0 0 0 0 6 6 6 22 22 22
46125- 58 58 58 62 62 62 2 2 6 2 2 6
46126- 2 2 6 2 2 6 30 30 30 78 78 78
46127-250 250 250 253 253 253 253 253 253 253 253 253
46128-253 253 253 253 253 253 253 253 253 253 253 253
46129-253 253 253 253 253 253 231 231 231 246 246 246
46130-253 253 253 253 253 253 253 253 253 253 253 253
46131-253 253 253 253 253 253 253 253 253 253 253 253
46132-253 253 253 253 253 253 253 253 253 253 253 253
46133-253 253 253 253 253 253 253 253 253 253 253 253
46134-253 253 253 253 253 253 206 206 206 2 2 6
46135- 22 22 22 34 34 34 18 14 6 22 22 22
46136- 26 26 26 18 18 18 6 6 6 2 2 6
46137- 2 2 6 82 82 82 54 54 54 18 18 18
46138- 6 6 6 0 0 0 0 0 0 0 0 0
46139- 0 0 0 0 0 0 0 0 0 0 0 0
46140- 0 0 0 0 0 0 0 0 0 0 0 0
46141- 0 0 0 0 0 0 0 0 0 0 0 0
46142- 0 0 0 0 0 0 0 0 0 0 0 0
46143- 0 0 0 0 0 0 0 0 0 0 0 0
46144- 0 0 0 0 0 0 6 6 6 26 26 26
46145- 62 62 62 106 106 106 74 54 14 185 133 11
46146-210 162 10 121 92 8 6 6 6 62 62 62
46147-238 238 238 253 253 253 253 253 253 253 253 253
46148-253 253 253 253 253 253 253 253 253 253 253 253
46149-253 253 253 253 253 253 231 231 231 246 246 246
46150-253 253 253 253 253 253 253 253 253 253 253 253
46151-253 253 253 253 253 253 253 253 253 253 253 253
46152-253 253 253 253 253 253 253 253 253 253 253 253
46153-253 253 253 253 253 253 253 253 253 253 253 253
46154-253 253 253 253 253 253 158 158 158 18 18 18
46155- 14 14 14 2 2 6 2 2 6 2 2 6
46156- 6 6 6 18 18 18 66 66 66 38 38 38
46157- 6 6 6 94 94 94 50 50 50 18 18 18
46158- 6 6 6 0 0 0 0 0 0 0 0 0
46159- 0 0 0 0 0 0 0 0 0 0 0 0
46160- 0 0 0 0 0 0 0 0 0 0 0 0
46161- 0 0 0 0 0 0 0 0 0 0 0 0
46162- 0 0 0 0 0 0 0 0 0 0 0 0
46163- 0 0 0 0 0 0 0 0 0 6 6 6
46164- 10 10 10 10 10 10 18 18 18 38 38 38
46165- 78 78 78 142 134 106 216 158 10 242 186 14
46166-246 190 14 246 190 14 156 118 10 10 10 10
46167- 90 90 90 238 238 238 253 253 253 253 253 253
46168-253 253 253 253 253 253 253 253 253 253 253 253
46169-253 253 253 253 253 253 231 231 231 250 250 250
46170-253 253 253 253 253 253 253 253 253 253 253 253
46171-253 253 253 253 253 253 253 253 253 253 253 253
46172-253 253 253 253 253 253 253 253 253 253 253 253
46173-253 253 253 253 253 253 253 253 253 246 230 190
46174-238 204 91 238 204 91 181 142 44 37 26 9
46175- 2 2 6 2 2 6 2 2 6 2 2 6
46176- 2 2 6 2 2 6 38 38 38 46 46 46
46177- 26 26 26 106 106 106 54 54 54 18 18 18
46178- 6 6 6 0 0 0 0 0 0 0 0 0
46179- 0 0 0 0 0 0 0 0 0 0 0 0
46180- 0 0 0 0 0 0 0 0 0 0 0 0
46181- 0 0 0 0 0 0 0 0 0 0 0 0
46182- 0 0 0 0 0 0 0 0 0 0 0 0
46183- 0 0 0 6 6 6 14 14 14 22 22 22
46184- 30 30 30 38 38 38 50 50 50 70 70 70
46185-106 106 106 190 142 34 226 170 11 242 186 14
46186-246 190 14 246 190 14 246 190 14 154 114 10
46187- 6 6 6 74 74 74 226 226 226 253 253 253
46188-253 253 253 253 253 253 253 253 253 253 253 253
46189-253 253 253 253 253 253 231 231 231 250 250 250
46190-253 253 253 253 253 253 253 253 253 253 253 253
46191-253 253 253 253 253 253 253 253 253 253 253 253
46192-253 253 253 253 253 253 253 253 253 253 253 253
46193-253 253 253 253 253 253 253 253 253 228 184 62
46194-241 196 14 241 208 19 232 195 16 38 30 10
46195- 2 2 6 2 2 6 2 2 6 2 2 6
46196- 2 2 6 6 6 6 30 30 30 26 26 26
46197-203 166 17 154 142 90 66 66 66 26 26 26
46198- 6 6 6 0 0 0 0 0 0 0 0 0
46199- 0 0 0 0 0 0 0 0 0 0 0 0
46200- 0 0 0 0 0 0 0 0 0 0 0 0
46201- 0 0 0 0 0 0 0 0 0 0 0 0
46202- 0 0 0 0 0 0 0 0 0 0 0 0
46203- 6 6 6 18 18 18 38 38 38 58 58 58
46204- 78 78 78 86 86 86 101 101 101 123 123 123
46205-175 146 61 210 150 10 234 174 13 246 186 14
46206-246 190 14 246 190 14 246 190 14 238 190 10
46207-102 78 10 2 2 6 46 46 46 198 198 198
46208-253 253 253 253 253 253 253 253 253 253 253 253
46209-253 253 253 253 253 253 234 234 234 242 242 242
46210-253 253 253 253 253 253 253 253 253 253 253 253
46211-253 253 253 253 253 253 253 253 253 253 253 253
46212-253 253 253 253 253 253 253 253 253 253 253 253
46213-253 253 253 253 253 253 253 253 253 224 178 62
46214-242 186 14 241 196 14 210 166 10 22 18 6
46215- 2 2 6 2 2 6 2 2 6 2 2 6
46216- 2 2 6 2 2 6 6 6 6 121 92 8
46217-238 202 15 232 195 16 82 82 82 34 34 34
46218- 10 10 10 0 0 0 0 0 0 0 0 0
46219- 0 0 0 0 0 0 0 0 0 0 0 0
46220- 0 0 0 0 0 0 0 0 0 0 0 0
46221- 0 0 0 0 0 0 0 0 0 0 0 0
46222- 0 0 0 0 0 0 0 0 0 0 0 0
46223- 14 14 14 38 38 38 70 70 70 154 122 46
46224-190 142 34 200 144 11 197 138 11 197 138 11
46225-213 154 11 226 170 11 242 186 14 246 190 14
46226-246 190 14 246 190 14 246 190 14 246 190 14
46227-225 175 15 46 32 6 2 2 6 22 22 22
46228-158 158 158 250 250 250 253 253 253 253 253 253
46229-253 253 253 253 253 253 253 253 253 253 253 253
46230-253 253 253 253 253 253 253 253 253 253 253 253
46231-253 253 253 253 253 253 253 253 253 253 253 253
46232-253 253 253 253 253 253 253 253 253 253 253 253
46233-253 253 253 250 250 250 242 242 242 224 178 62
46234-239 182 13 236 186 11 213 154 11 46 32 6
46235- 2 2 6 2 2 6 2 2 6 2 2 6
46236- 2 2 6 2 2 6 61 42 6 225 175 15
46237-238 190 10 236 186 11 112 100 78 42 42 42
46238- 14 14 14 0 0 0 0 0 0 0 0 0
46239- 0 0 0 0 0 0 0 0 0 0 0 0
46240- 0 0 0 0 0 0 0 0 0 0 0 0
46241- 0 0 0 0 0 0 0 0 0 0 0 0
46242- 0 0 0 0 0 0 0 0 0 6 6 6
46243- 22 22 22 54 54 54 154 122 46 213 154 11
46244-226 170 11 230 174 11 226 170 11 226 170 11
46245-236 178 12 242 186 14 246 190 14 246 190 14
46246-246 190 14 246 190 14 246 190 14 246 190 14
46247-241 196 14 184 144 12 10 10 10 2 2 6
46248- 6 6 6 116 116 116 242 242 242 253 253 253
46249-253 253 253 253 253 253 253 253 253 253 253 253
46250-253 253 253 253 253 253 253 253 253 253 253 253
46251-253 253 253 253 253 253 253 253 253 253 253 253
46252-253 253 253 253 253 253 253 253 253 253 253 253
46253-253 253 253 231 231 231 198 198 198 214 170 54
46254-236 178 12 236 178 12 210 150 10 137 92 6
46255- 18 14 6 2 2 6 2 2 6 2 2 6
46256- 6 6 6 70 47 6 200 144 11 236 178 12
46257-239 182 13 239 182 13 124 112 88 58 58 58
46258- 22 22 22 6 6 6 0 0 0 0 0 0
46259- 0 0 0 0 0 0 0 0 0 0 0 0
46260- 0 0 0 0 0 0 0 0 0 0 0 0
46261- 0 0 0 0 0 0 0 0 0 0 0 0
46262- 0 0 0 0 0 0 0 0 0 10 10 10
46263- 30 30 30 70 70 70 180 133 36 226 170 11
46264-239 182 13 242 186 14 242 186 14 246 186 14
46265-246 190 14 246 190 14 246 190 14 246 190 14
46266-246 190 14 246 190 14 246 190 14 246 190 14
46267-246 190 14 232 195 16 98 70 6 2 2 6
46268- 2 2 6 2 2 6 66 66 66 221 221 221
46269-253 253 253 253 253 253 253 253 253 253 253 253
46270-253 253 253 253 253 253 253 253 253 253 253 253
46271-253 253 253 253 253 253 253 253 253 253 253 253
46272-253 253 253 253 253 253 253 253 253 253 253 253
46273-253 253 253 206 206 206 198 198 198 214 166 58
46274-230 174 11 230 174 11 216 158 10 192 133 9
46275-163 110 8 116 81 8 102 78 10 116 81 8
46276-167 114 7 197 138 11 226 170 11 239 182 13
46277-242 186 14 242 186 14 162 146 94 78 78 78
46278- 34 34 34 14 14 14 6 6 6 0 0 0
46279- 0 0 0 0 0 0 0 0 0 0 0 0
46280- 0 0 0 0 0 0 0 0 0 0 0 0
46281- 0 0 0 0 0 0 0 0 0 0 0 0
46282- 0 0 0 0 0 0 0 0 0 6 6 6
46283- 30 30 30 78 78 78 190 142 34 226 170 11
46284-239 182 13 246 190 14 246 190 14 246 190 14
46285-246 190 14 246 190 14 246 190 14 246 190 14
46286-246 190 14 246 190 14 246 190 14 246 190 14
46287-246 190 14 241 196 14 203 166 17 22 18 6
46288- 2 2 6 2 2 6 2 2 6 38 38 38
46289-218 218 218 253 253 253 253 253 253 253 253 253
46290-253 253 253 253 253 253 253 253 253 253 253 253
46291-253 253 253 253 253 253 253 253 253 253 253 253
46292-253 253 253 253 253 253 253 253 253 253 253 253
46293-250 250 250 206 206 206 198 198 198 202 162 69
46294-226 170 11 236 178 12 224 166 10 210 150 10
46295-200 144 11 197 138 11 192 133 9 197 138 11
46296-210 150 10 226 170 11 242 186 14 246 190 14
46297-246 190 14 246 186 14 225 175 15 124 112 88
46298- 62 62 62 30 30 30 14 14 14 6 6 6
46299- 0 0 0 0 0 0 0 0 0 0 0 0
46300- 0 0 0 0 0 0 0 0 0 0 0 0
46301- 0 0 0 0 0 0 0 0 0 0 0 0
46302- 0 0 0 0 0 0 0 0 0 10 10 10
46303- 30 30 30 78 78 78 174 135 50 224 166 10
46304-239 182 13 246 190 14 246 190 14 246 190 14
46305-246 190 14 246 190 14 246 190 14 246 190 14
46306-246 190 14 246 190 14 246 190 14 246 190 14
46307-246 190 14 246 190 14 241 196 14 139 102 15
46308- 2 2 6 2 2 6 2 2 6 2 2 6
46309- 78 78 78 250 250 250 253 253 253 253 253 253
46310-253 253 253 253 253 253 253 253 253 253 253 253
46311-253 253 253 253 253 253 253 253 253 253 253 253
46312-253 253 253 253 253 253 253 253 253 253 253 253
46313-250 250 250 214 214 214 198 198 198 190 150 46
46314-219 162 10 236 178 12 234 174 13 224 166 10
46315-216 158 10 213 154 11 213 154 11 216 158 10
46316-226 170 11 239 182 13 246 190 14 246 190 14
46317-246 190 14 246 190 14 242 186 14 206 162 42
46318-101 101 101 58 58 58 30 30 30 14 14 14
46319- 6 6 6 0 0 0 0 0 0 0 0 0
46320- 0 0 0 0 0 0 0 0 0 0 0 0
46321- 0 0 0 0 0 0 0 0 0 0 0 0
46322- 0 0 0 0 0 0 0 0 0 10 10 10
46323- 30 30 30 74 74 74 174 135 50 216 158 10
46324-236 178 12 246 190 14 246 190 14 246 190 14
46325-246 190 14 246 190 14 246 190 14 246 190 14
46326-246 190 14 246 190 14 246 190 14 246 190 14
46327-246 190 14 246 190 14 241 196 14 226 184 13
46328- 61 42 6 2 2 6 2 2 6 2 2 6
46329- 22 22 22 238 238 238 253 253 253 253 253 253
46330-253 253 253 253 253 253 253 253 253 253 253 253
46331-253 253 253 253 253 253 253 253 253 253 253 253
46332-253 253 253 253 253 253 253 253 253 253 253 253
46333-253 253 253 226 226 226 187 187 187 180 133 36
46334-216 158 10 236 178 12 239 182 13 236 178 12
46335-230 174 11 226 170 11 226 170 11 230 174 11
46336-236 178 12 242 186 14 246 190 14 246 190 14
46337-246 190 14 246 190 14 246 186 14 239 182 13
46338-206 162 42 106 106 106 66 66 66 34 34 34
46339- 14 14 14 6 6 6 0 0 0 0 0 0
46340- 0 0 0 0 0 0 0 0 0 0 0 0
46341- 0 0 0 0 0 0 0 0 0 0 0 0
46342- 0 0 0 0 0 0 0 0 0 6 6 6
46343- 26 26 26 70 70 70 163 133 67 213 154 11
46344-236 178 12 246 190 14 246 190 14 246 190 14
46345-246 190 14 246 190 14 246 190 14 246 190 14
46346-246 190 14 246 190 14 246 190 14 246 190 14
46347-246 190 14 246 190 14 246 190 14 241 196 14
46348-190 146 13 18 14 6 2 2 6 2 2 6
46349- 46 46 46 246 246 246 253 253 253 253 253 253
46350-253 253 253 253 253 253 253 253 253 253 253 253
46351-253 253 253 253 253 253 253 253 253 253 253 253
46352-253 253 253 253 253 253 253 253 253 253 253 253
46353-253 253 253 221 221 221 86 86 86 156 107 11
46354-216 158 10 236 178 12 242 186 14 246 186 14
46355-242 186 14 239 182 13 239 182 13 242 186 14
46356-242 186 14 246 186 14 246 190 14 246 190 14
46357-246 190 14 246 190 14 246 190 14 246 190 14
46358-242 186 14 225 175 15 142 122 72 66 66 66
46359- 30 30 30 10 10 10 0 0 0 0 0 0
46360- 0 0 0 0 0 0 0 0 0 0 0 0
46361- 0 0 0 0 0 0 0 0 0 0 0 0
46362- 0 0 0 0 0 0 0 0 0 6 6 6
46363- 26 26 26 70 70 70 163 133 67 210 150 10
46364-236 178 12 246 190 14 246 190 14 246 190 14
46365-246 190 14 246 190 14 246 190 14 246 190 14
46366-246 190 14 246 190 14 246 190 14 246 190 14
46367-246 190 14 246 190 14 246 190 14 246 190 14
46368-232 195 16 121 92 8 34 34 34 106 106 106
46369-221 221 221 253 253 253 253 253 253 253 253 253
46370-253 253 253 253 253 253 253 253 253 253 253 253
46371-253 253 253 253 253 253 253 253 253 253 253 253
46372-253 253 253 253 253 253 253 253 253 253 253 253
46373-242 242 242 82 82 82 18 14 6 163 110 8
46374-216 158 10 236 178 12 242 186 14 246 190 14
46375-246 190 14 246 190 14 246 190 14 246 190 14
46376-246 190 14 246 190 14 246 190 14 246 190 14
46377-246 190 14 246 190 14 246 190 14 246 190 14
46378-246 190 14 246 190 14 242 186 14 163 133 67
46379- 46 46 46 18 18 18 6 6 6 0 0 0
46380- 0 0 0 0 0 0 0 0 0 0 0 0
46381- 0 0 0 0 0 0 0 0 0 0 0 0
46382- 0 0 0 0 0 0 0 0 0 10 10 10
46383- 30 30 30 78 78 78 163 133 67 210 150 10
46384-236 178 12 246 186 14 246 190 14 246 190 14
46385-246 190 14 246 190 14 246 190 14 246 190 14
46386-246 190 14 246 190 14 246 190 14 246 190 14
46387-246 190 14 246 190 14 246 190 14 246 190 14
46388-241 196 14 215 174 15 190 178 144 253 253 253
46389-253 253 253 253 253 253 253 253 253 253 253 253
46390-253 253 253 253 253 253 253 253 253 253 253 253
46391-253 253 253 253 253 253 253 253 253 253 253 253
46392-253 253 253 253 253 253 253 253 253 218 218 218
46393- 58 58 58 2 2 6 22 18 6 167 114 7
46394-216 158 10 236 178 12 246 186 14 246 190 14
46395-246 190 14 246 190 14 246 190 14 246 190 14
46396-246 190 14 246 190 14 246 190 14 246 190 14
46397-246 190 14 246 190 14 246 190 14 246 190 14
46398-246 190 14 246 186 14 242 186 14 190 150 46
46399- 54 54 54 22 22 22 6 6 6 0 0 0
46400- 0 0 0 0 0 0 0 0 0 0 0 0
46401- 0 0 0 0 0 0 0 0 0 0 0 0
46402- 0 0 0 0 0 0 0 0 0 14 14 14
46403- 38 38 38 86 86 86 180 133 36 213 154 11
46404-236 178 12 246 186 14 246 190 14 246 190 14
46405-246 190 14 246 190 14 246 190 14 246 190 14
46406-246 190 14 246 190 14 246 190 14 246 190 14
46407-246 190 14 246 190 14 246 190 14 246 190 14
46408-246 190 14 232 195 16 190 146 13 214 214 214
46409-253 253 253 253 253 253 253 253 253 253 253 253
46410-253 253 253 253 253 253 253 253 253 253 253 253
46411-253 253 253 253 253 253 253 253 253 253 253 253
46412-253 253 253 250 250 250 170 170 170 26 26 26
46413- 2 2 6 2 2 6 37 26 9 163 110 8
46414-219 162 10 239 182 13 246 186 14 246 190 14
46415-246 190 14 246 190 14 246 190 14 246 190 14
46416-246 190 14 246 190 14 246 190 14 246 190 14
46417-246 190 14 246 190 14 246 190 14 246 190 14
46418-246 186 14 236 178 12 224 166 10 142 122 72
46419- 46 46 46 18 18 18 6 6 6 0 0 0
46420- 0 0 0 0 0 0 0 0 0 0 0 0
46421- 0 0 0 0 0 0 0 0 0 0 0 0
46422- 0 0 0 0 0 0 6 6 6 18 18 18
46423- 50 50 50 109 106 95 192 133 9 224 166 10
46424-242 186 14 246 190 14 246 190 14 246 190 14
46425-246 190 14 246 190 14 246 190 14 246 190 14
46426-246 190 14 246 190 14 246 190 14 246 190 14
46427-246 190 14 246 190 14 246 190 14 246 190 14
46428-242 186 14 226 184 13 210 162 10 142 110 46
46429-226 226 226 253 253 253 253 253 253 253 253 253
46430-253 253 253 253 253 253 253 253 253 253 253 253
46431-253 253 253 253 253 253 253 253 253 253 253 253
46432-198 198 198 66 66 66 2 2 6 2 2 6
46433- 2 2 6 2 2 6 50 34 6 156 107 11
46434-219 162 10 239 182 13 246 186 14 246 190 14
46435-246 190 14 246 190 14 246 190 14 246 190 14
46436-246 190 14 246 190 14 246 190 14 246 190 14
46437-246 190 14 246 190 14 246 190 14 242 186 14
46438-234 174 13 213 154 11 154 122 46 66 66 66
46439- 30 30 30 10 10 10 0 0 0 0 0 0
46440- 0 0 0 0 0 0 0 0 0 0 0 0
46441- 0 0 0 0 0 0 0 0 0 0 0 0
46442- 0 0 0 0 0 0 6 6 6 22 22 22
46443- 58 58 58 154 121 60 206 145 10 234 174 13
46444-242 186 14 246 186 14 246 190 14 246 190 14
46445-246 190 14 246 190 14 246 190 14 246 190 14
46446-246 190 14 246 190 14 246 190 14 246 190 14
46447-246 190 14 246 190 14 246 190 14 246 190 14
46448-246 186 14 236 178 12 210 162 10 163 110 8
46449- 61 42 6 138 138 138 218 218 218 250 250 250
46450-253 253 253 253 253 253 253 253 253 250 250 250
46451-242 242 242 210 210 210 144 144 144 66 66 66
46452- 6 6 6 2 2 6 2 2 6 2 2 6
46453- 2 2 6 2 2 6 61 42 6 163 110 8
46454-216 158 10 236 178 12 246 190 14 246 190 14
46455-246 190 14 246 190 14 246 190 14 246 190 14
46456-246 190 14 246 190 14 246 190 14 246 190 14
46457-246 190 14 239 182 13 230 174 11 216 158 10
46458-190 142 34 124 112 88 70 70 70 38 38 38
46459- 18 18 18 6 6 6 0 0 0 0 0 0
46460- 0 0 0 0 0 0 0 0 0 0 0 0
46461- 0 0 0 0 0 0 0 0 0 0 0 0
46462- 0 0 0 0 0 0 6 6 6 22 22 22
46463- 62 62 62 168 124 44 206 145 10 224 166 10
46464-236 178 12 239 182 13 242 186 14 242 186 14
46465-246 186 14 246 190 14 246 190 14 246 190 14
46466-246 190 14 246 190 14 246 190 14 246 190 14
46467-246 190 14 246 190 14 246 190 14 246 190 14
46468-246 190 14 236 178 12 216 158 10 175 118 6
46469- 80 54 7 2 2 6 6 6 6 30 30 30
46470- 54 54 54 62 62 62 50 50 50 38 38 38
46471- 14 14 14 2 2 6 2 2 6 2 2 6
46472- 2 2 6 2 2 6 2 2 6 2 2 6
46473- 2 2 6 6 6 6 80 54 7 167 114 7
46474-213 154 11 236 178 12 246 190 14 246 190 14
46475-246 190 14 246 190 14 246 190 14 246 190 14
46476-246 190 14 242 186 14 239 182 13 239 182 13
46477-230 174 11 210 150 10 174 135 50 124 112 88
46478- 82 82 82 54 54 54 34 34 34 18 18 18
46479- 6 6 6 0 0 0 0 0 0 0 0 0
46480- 0 0 0 0 0 0 0 0 0 0 0 0
46481- 0 0 0 0 0 0 0 0 0 0 0 0
46482- 0 0 0 0 0 0 6 6 6 18 18 18
46483- 50 50 50 158 118 36 192 133 9 200 144 11
46484-216 158 10 219 162 10 224 166 10 226 170 11
46485-230 174 11 236 178 12 239 182 13 239 182 13
46486-242 186 14 246 186 14 246 190 14 246 190 14
46487-246 190 14 246 190 14 246 190 14 246 190 14
46488-246 186 14 230 174 11 210 150 10 163 110 8
46489-104 69 6 10 10 10 2 2 6 2 2 6
46490- 2 2 6 2 2 6 2 2 6 2 2 6
46491- 2 2 6 2 2 6 2 2 6 2 2 6
46492- 2 2 6 2 2 6 2 2 6 2 2 6
46493- 2 2 6 6 6 6 91 60 6 167 114 7
46494-206 145 10 230 174 11 242 186 14 246 190 14
46495-246 190 14 246 190 14 246 186 14 242 186 14
46496-239 182 13 230 174 11 224 166 10 213 154 11
46497-180 133 36 124 112 88 86 86 86 58 58 58
46498- 38 38 38 22 22 22 10 10 10 6 6 6
46499- 0 0 0 0 0 0 0 0 0 0 0 0
46500- 0 0 0 0 0 0 0 0 0 0 0 0
46501- 0 0 0 0 0 0 0 0 0 0 0 0
46502- 0 0 0 0 0 0 0 0 0 14 14 14
46503- 34 34 34 70 70 70 138 110 50 158 118 36
46504-167 114 7 180 123 7 192 133 9 197 138 11
46505-200 144 11 206 145 10 213 154 11 219 162 10
46506-224 166 10 230 174 11 239 182 13 242 186 14
46507-246 186 14 246 186 14 246 186 14 246 186 14
46508-239 182 13 216 158 10 185 133 11 152 99 6
46509-104 69 6 18 14 6 2 2 6 2 2 6
46510- 2 2 6 2 2 6 2 2 6 2 2 6
46511- 2 2 6 2 2 6 2 2 6 2 2 6
46512- 2 2 6 2 2 6 2 2 6 2 2 6
46513- 2 2 6 6 6 6 80 54 7 152 99 6
46514-192 133 9 219 162 10 236 178 12 239 182 13
46515-246 186 14 242 186 14 239 182 13 236 178 12
46516-224 166 10 206 145 10 192 133 9 154 121 60
46517- 94 94 94 62 62 62 42 42 42 22 22 22
46518- 14 14 14 6 6 6 0 0 0 0 0 0
46519- 0 0 0 0 0 0 0 0 0 0 0 0
46520- 0 0 0 0 0 0 0 0 0 0 0 0
46521- 0 0 0 0 0 0 0 0 0 0 0 0
46522- 0 0 0 0 0 0 0 0 0 6 6 6
46523- 18 18 18 34 34 34 58 58 58 78 78 78
46524-101 98 89 124 112 88 142 110 46 156 107 11
46525-163 110 8 167 114 7 175 118 6 180 123 7
46526-185 133 11 197 138 11 210 150 10 219 162 10
46527-226 170 11 236 178 12 236 178 12 234 174 13
46528-219 162 10 197 138 11 163 110 8 130 83 6
46529- 91 60 6 10 10 10 2 2 6 2 2 6
46530- 18 18 18 38 38 38 38 38 38 38 38 38
46531- 38 38 38 38 38 38 38 38 38 38 38 38
46532- 38 38 38 38 38 38 26 26 26 2 2 6
46533- 2 2 6 6 6 6 70 47 6 137 92 6
46534-175 118 6 200 144 11 219 162 10 230 174 11
46535-234 174 13 230 174 11 219 162 10 210 150 10
46536-192 133 9 163 110 8 124 112 88 82 82 82
46537- 50 50 50 30 30 30 14 14 14 6 6 6
46538- 0 0 0 0 0 0 0 0 0 0 0 0
46539- 0 0 0 0 0 0 0 0 0 0 0 0
46540- 0 0 0 0 0 0 0 0 0 0 0 0
46541- 0 0 0 0 0 0 0 0 0 0 0 0
46542- 0 0 0 0 0 0 0 0 0 0 0 0
46543- 6 6 6 14 14 14 22 22 22 34 34 34
46544- 42 42 42 58 58 58 74 74 74 86 86 86
46545-101 98 89 122 102 70 130 98 46 121 87 25
46546-137 92 6 152 99 6 163 110 8 180 123 7
46547-185 133 11 197 138 11 206 145 10 200 144 11
46548-180 123 7 156 107 11 130 83 6 104 69 6
46549- 50 34 6 54 54 54 110 110 110 101 98 89
46550- 86 86 86 82 82 82 78 78 78 78 78 78
46551- 78 78 78 78 78 78 78 78 78 78 78 78
46552- 78 78 78 82 82 82 86 86 86 94 94 94
46553-106 106 106 101 101 101 86 66 34 124 80 6
46554-156 107 11 180 123 7 192 133 9 200 144 11
46555-206 145 10 200 144 11 192 133 9 175 118 6
46556-139 102 15 109 106 95 70 70 70 42 42 42
46557- 22 22 22 10 10 10 0 0 0 0 0 0
46558- 0 0 0 0 0 0 0 0 0 0 0 0
46559- 0 0 0 0 0 0 0 0 0 0 0 0
46560- 0 0 0 0 0 0 0 0 0 0 0 0
46561- 0 0 0 0 0 0 0 0 0 0 0 0
46562- 0 0 0 0 0 0 0 0 0 0 0 0
46563- 0 0 0 0 0 0 6 6 6 10 10 10
46564- 14 14 14 22 22 22 30 30 30 38 38 38
46565- 50 50 50 62 62 62 74 74 74 90 90 90
46566-101 98 89 112 100 78 121 87 25 124 80 6
46567-137 92 6 152 99 6 152 99 6 152 99 6
46568-138 86 6 124 80 6 98 70 6 86 66 30
46569-101 98 89 82 82 82 58 58 58 46 46 46
46570- 38 38 38 34 34 34 34 34 34 34 34 34
46571- 34 34 34 34 34 34 34 34 34 34 34 34
46572- 34 34 34 34 34 34 38 38 38 42 42 42
46573- 54 54 54 82 82 82 94 86 76 91 60 6
46574-134 86 6 156 107 11 167 114 7 175 118 6
46575-175 118 6 167 114 7 152 99 6 121 87 25
46576-101 98 89 62 62 62 34 34 34 18 18 18
46577- 6 6 6 0 0 0 0 0 0 0 0 0
46578- 0 0 0 0 0 0 0 0 0 0 0 0
46579- 0 0 0 0 0 0 0 0 0 0 0 0
46580- 0 0 0 0 0 0 0 0 0 0 0 0
46581- 0 0 0 0 0 0 0 0 0 0 0 0
46582- 0 0 0 0 0 0 0 0 0 0 0 0
46583- 0 0 0 0 0 0 0 0 0 0 0 0
46584- 0 0 0 6 6 6 6 6 6 10 10 10
46585- 18 18 18 22 22 22 30 30 30 42 42 42
46586- 50 50 50 66 66 66 86 86 86 101 98 89
46587-106 86 58 98 70 6 104 69 6 104 69 6
46588-104 69 6 91 60 6 82 62 34 90 90 90
46589- 62 62 62 38 38 38 22 22 22 14 14 14
46590- 10 10 10 10 10 10 10 10 10 10 10 10
46591- 10 10 10 10 10 10 6 6 6 10 10 10
46592- 10 10 10 10 10 10 10 10 10 14 14 14
46593- 22 22 22 42 42 42 70 70 70 89 81 66
46594- 80 54 7 104 69 6 124 80 6 137 92 6
46595-134 86 6 116 81 8 100 82 52 86 86 86
46596- 58 58 58 30 30 30 14 14 14 6 6 6
46597- 0 0 0 0 0 0 0 0 0 0 0 0
46598- 0 0 0 0 0 0 0 0 0 0 0 0
46599- 0 0 0 0 0 0 0 0 0 0 0 0
46600- 0 0 0 0 0 0 0 0 0 0 0 0
46601- 0 0 0 0 0 0 0 0 0 0 0 0
46602- 0 0 0 0 0 0 0 0 0 0 0 0
46603- 0 0 0 0 0 0 0 0 0 0 0 0
46604- 0 0 0 0 0 0 0 0 0 0 0 0
46605- 0 0 0 6 6 6 10 10 10 14 14 14
46606- 18 18 18 26 26 26 38 38 38 54 54 54
46607- 70 70 70 86 86 86 94 86 76 89 81 66
46608- 89 81 66 86 86 86 74 74 74 50 50 50
46609- 30 30 30 14 14 14 6 6 6 0 0 0
46610- 0 0 0 0 0 0 0 0 0 0 0 0
46611- 0 0 0 0 0 0 0 0 0 0 0 0
46612- 0 0 0 0 0 0 0 0 0 0 0 0
46613- 6 6 6 18 18 18 34 34 34 58 58 58
46614- 82 82 82 89 81 66 89 81 66 89 81 66
46615- 94 86 66 94 86 76 74 74 74 50 50 50
46616- 26 26 26 14 14 14 6 6 6 0 0 0
46617- 0 0 0 0 0 0 0 0 0 0 0 0
46618- 0 0 0 0 0 0 0 0 0 0 0 0
46619- 0 0 0 0 0 0 0 0 0 0 0 0
46620- 0 0 0 0 0 0 0 0 0 0 0 0
46621- 0 0 0 0 0 0 0 0 0 0 0 0
46622- 0 0 0 0 0 0 0 0 0 0 0 0
46623- 0 0 0 0 0 0 0 0 0 0 0 0
46624- 0 0 0 0 0 0 0 0 0 0 0 0
46625- 0 0 0 0 0 0 0 0 0 0 0 0
46626- 6 6 6 6 6 6 14 14 14 18 18 18
46627- 30 30 30 38 38 38 46 46 46 54 54 54
46628- 50 50 50 42 42 42 30 30 30 18 18 18
46629- 10 10 10 0 0 0 0 0 0 0 0 0
46630- 0 0 0 0 0 0 0 0 0 0 0 0
46631- 0 0 0 0 0 0 0 0 0 0 0 0
46632- 0 0 0 0 0 0 0 0 0 0 0 0
46633- 0 0 0 6 6 6 14 14 14 26 26 26
46634- 38 38 38 50 50 50 58 58 58 58 58 58
46635- 54 54 54 42 42 42 30 30 30 18 18 18
46636- 10 10 10 0 0 0 0 0 0 0 0 0
46637- 0 0 0 0 0 0 0 0 0 0 0 0
46638- 0 0 0 0 0 0 0 0 0 0 0 0
46639- 0 0 0 0 0 0 0 0 0 0 0 0
46640- 0 0 0 0 0 0 0 0 0 0 0 0
46641- 0 0 0 0 0 0 0 0 0 0 0 0
46642- 0 0 0 0 0 0 0 0 0 0 0 0
46643- 0 0 0 0 0 0 0 0 0 0 0 0
46644- 0 0 0 0 0 0 0 0 0 0 0 0
46645- 0 0 0 0 0 0 0 0 0 0 0 0
46646- 0 0 0 0 0 0 0 0 0 6 6 6
46647- 6 6 6 10 10 10 14 14 14 18 18 18
46648- 18 18 18 14 14 14 10 10 10 6 6 6
46649- 0 0 0 0 0 0 0 0 0 0 0 0
46650- 0 0 0 0 0 0 0 0 0 0 0 0
46651- 0 0 0 0 0 0 0 0 0 0 0 0
46652- 0 0 0 0 0 0 0 0 0 0 0 0
46653- 0 0 0 0 0 0 0 0 0 6 6 6
46654- 14 14 14 18 18 18 22 22 22 22 22 22
46655- 18 18 18 14 14 14 10 10 10 6 6 6
46656- 0 0 0 0 0 0 0 0 0 0 0 0
46657- 0 0 0 0 0 0 0 0 0 0 0 0
46658- 0 0 0 0 0 0 0 0 0 0 0 0
46659- 0 0 0 0 0 0 0 0 0 0 0 0
46660- 0 0 0 0 0 0 0 0 0 0 0 0
46661+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46662+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46663+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46664+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46665+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46666+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46667+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46668+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46669+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46670+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46671+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46672+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46673+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46674+4 4 4 4 4 4
46675+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46676+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46677+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46678+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46679+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46680+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46681+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46682+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46683+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46684+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46685+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46686+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46687+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46688+4 4 4 4 4 4
46689+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46690+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46691+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46692+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46693+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46694+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46695+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46696+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46697+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46698+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46699+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46700+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46701+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46702+4 4 4 4 4 4
46703+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46704+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46705+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46706+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46707+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46708+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46709+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46710+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46711+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46712+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46713+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46714+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46715+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46716+4 4 4 4 4 4
46717+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46718+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46719+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46720+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46721+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46722+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46723+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46724+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46725+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46726+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46727+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46728+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46729+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46730+4 4 4 4 4 4
46731+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46732+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46733+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46734+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46735+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46736+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46737+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46738+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46739+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46740+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46741+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46742+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46743+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46744+4 4 4 4 4 4
46745+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46746+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46747+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46748+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46749+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
46750+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
46751+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46752+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46753+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46754+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
46755+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
46756+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
46757+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46758+4 4 4 4 4 4
46759+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46760+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46761+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46762+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46763+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
46764+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
46765+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46766+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46767+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46768+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
46769+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
46770+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
46771+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46772+4 4 4 4 4 4
46773+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46774+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46775+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46776+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46777+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
46778+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
46779+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
46780+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46781+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46782+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
46783+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
46784+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
46785+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
46786+4 4 4 4 4 4
46787+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46788+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46789+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46790+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
46791+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
46792+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
46793+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
46794+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46795+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
46796+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
46797+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
46798+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
46799+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
46800+4 4 4 4 4 4
46801+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46802+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46803+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46804+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
46805+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
46806+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
46807+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
46808+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
46809+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
46810+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
46811+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
46812+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
46813+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
46814+4 4 4 4 4 4
46815+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46816+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46817+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
46818+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
46819+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
46820+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
46821+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
46822+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
46823+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
46824+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
46825+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
46826+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
46827+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
46828+4 4 4 4 4 4
46829+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46830+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46831+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
46832+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
46833+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
46834+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
46835+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
46836+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
46837+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
46838+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
46839+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
46840+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
46841+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
46842+4 4 4 4 4 4
46843+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46844+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46845+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
46846+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
46847+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
46848+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
46849+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
46850+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
46851+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
46852+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
46853+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
46854+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
46855+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
46856+4 4 4 4 4 4
46857+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46858+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46859+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
46860+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
46861+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
46862+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
46863+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
46864+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
46865+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
46866+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
46867+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
46868+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
46869+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
46870+4 4 4 4 4 4
46871+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46872+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46873+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
46874+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
46875+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
46876+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
46877+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
46878+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
46879+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
46880+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
46881+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
46882+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
46883+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
46884+4 4 4 4 4 4
46885+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46886+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
46887+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
46888+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
46889+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
46890+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
46891+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
46892+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
46893+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
46894+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
46895+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
46896+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
46897+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
46898+4 4 4 4 4 4
46899+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46900+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
46901+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
46902+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
46903+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
46904+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
46905+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
46906+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
46907+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
46908+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
46909+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
46910+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
46911+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
46912+0 0 0 4 4 4
46913+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
46914+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
46915+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
46916+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
46917+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
46918+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
46919+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
46920+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
46921+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
46922+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
46923+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
46924+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
46925+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
46926+2 0 0 0 0 0
46927+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
46928+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
46929+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
46930+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
46931+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
46932+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
46933+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
46934+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
46935+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
46936+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
46937+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
46938+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
46939+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
46940+37 38 37 0 0 0
46941+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
46942+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
46943+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
46944+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
46945+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
46946+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
46947+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
46948+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
46949+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
46950+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
46951+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
46952+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
46953+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
46954+85 115 134 4 0 0
46955+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
46956+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
46957+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
46958+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
46959+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
46960+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
46961+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
46962+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
46963+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
46964+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
46965+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
46966+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
46967+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
46968+60 73 81 4 0 0
46969+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
46970+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
46971+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
46972+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
46973+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
46974+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
46975+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
46976+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
46977+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
46978+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
46979+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
46980+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
46981+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
46982+16 19 21 4 0 0
46983+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
46984+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
46985+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
46986+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
46987+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
46988+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
46989+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
46990+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
46991+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
46992+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
46993+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
46994+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
46995+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
46996+4 0 0 4 3 3
46997+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
46998+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
46999+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
47000+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
47001+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
47002+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
47003+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
47004+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
47005+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
47006+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
47007+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
47008+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
47009+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
47010+3 2 2 4 4 4
47011+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
47012+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
47013+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
47014+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
47015+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
47016+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
47017+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
47018+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
47019+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
47020+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
47021+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
47022+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
47023+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
47024+4 4 4 4 4 4
47025+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
47026+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
47027+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
47028+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
47029+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
47030+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
47031+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
47032+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
47033+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
47034+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
47035+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
47036+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
47037+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
47038+4 4 4 4 4 4
47039+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
47040+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
47041+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
47042+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
47043+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
47044+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
47045+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
47046+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
47047+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
47048+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
47049+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
47050+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
47051+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
47052+5 5 5 5 5 5
47053+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
47054+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
47055+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
47056+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
47057+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
47058+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
47059+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
47060+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
47061+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
47062+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
47063+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
47064+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
47065+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
47066+5 5 5 4 4 4
47067+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
47068+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
47069+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
47070+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
47071+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
47072+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
47073+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
47074+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
47075+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
47076+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
47077+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
47078+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
47079+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47080+4 4 4 4 4 4
47081+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
47082+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
47083+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
47084+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
47085+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
47086+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
47087+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
47088+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
47089+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
47090+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
47091+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
47092+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
47093+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47094+4 4 4 4 4 4
47095+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
47096+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
47097+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
47098+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
47099+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
47100+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
47101+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
47102+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
47103+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
47104+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
47105+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
47106+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47107+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47108+4 4 4 4 4 4
47109+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
47110+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
47111+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
47112+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
47113+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
47114+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
47115+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
47116+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
47117+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
47118+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
47119+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
47120+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47121+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47122+4 4 4 4 4 4
47123+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
47124+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
47125+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
47126+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
47127+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
47128+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
47129+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
47130+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
47131+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
47132+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
47133+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47134+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47135+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47136+4 4 4 4 4 4
47137+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
47138+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
47139+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
47140+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
47141+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
47142+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
47143+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
47144+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
47145+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
47146+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
47147+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
47148+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47149+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47150+4 4 4 4 4 4
47151+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
47152+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
47153+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
47154+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
47155+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
47156+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
47157+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
47158+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
47159+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
47160+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
47161+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
47162+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47163+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47164+4 4 4 4 4 4
47165+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
47166+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
47167+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
47168+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
47169+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
47170+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
47171+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
47172+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
47173+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
47174+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
47175+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47176+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47177+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47178+4 4 4 4 4 4
47179+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
47180+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
47181+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
47182+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
47183+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
47184+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
47185+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
47186+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
47187+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
47188+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
47189+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47190+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47191+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47192+4 4 4 4 4 4
47193+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
47194+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
47195+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
47196+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
47197+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
47198+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
47199+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
47200+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
47201+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
47202+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
47203+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47204+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47205+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47206+4 4 4 4 4 4
47207+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
47208+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
47209+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
47210+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
47211+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
47212+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
47213+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
47214+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
47215+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
47216+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47217+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47218+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47219+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47220+4 4 4 4 4 4
47221+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
47222+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
47223+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
47224+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
47225+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
47226+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
47227+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
47228+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
47229+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
47230+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47231+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47232+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47233+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47234+4 4 4 4 4 4
47235+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
47236+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
47237+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
47238+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
47239+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
47240+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
47241+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
47242+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
47243+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
47244+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47245+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47246+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47248+4 4 4 4 4 4
47249+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
47250+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
47251+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
47252+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
47253+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
47254+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
47255+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
47256+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
47257+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
47258+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47259+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47260+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47262+4 4 4 4 4 4
47263+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
47264+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
47265+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
47266+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
47267+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
47268+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
47269+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
47270+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
47271+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
47272+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47273+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47274+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47276+4 4 4 4 4 4
47277+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
47278+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
47279+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
47280+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
47281+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
47282+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
47283+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
47284+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
47285+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
47286+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47287+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47288+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47289+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47290+4 4 4 4 4 4
47291+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
47292+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
47293+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
47294+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
47295+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
47296+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
47297+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
47298+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
47299+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
47300+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47301+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47302+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47303+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47304+4 4 4 4 4 4
47305+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
47306+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
47307+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
47308+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
47309+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
47310+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
47311+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
47312+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
47313+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
47314+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47315+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47316+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47317+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47318+4 4 4 4 4 4
47319+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
47320+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
47321+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
47322+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
47323+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
47324+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
47325+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
47326+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
47327+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
47328+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47329+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47330+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47331+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47332+4 4 4 4 4 4
47333+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
47334+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
47335+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
47336+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
47337+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
47338+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
47339+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
47340+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
47341+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
47342+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47343+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47344+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47345+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47346+4 4 4 4 4 4
47347+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
47348+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
47349+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
47350+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
47351+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
47352+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
47353+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
47354+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
47355+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
47356+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47357+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47358+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47359+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47360+4 4 4 4 4 4
47361+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
47362+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
47363+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
47364+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
47365+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
47366+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
47367+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
47368+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
47369+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
47370+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47371+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47372+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47373+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47374+4 4 4 4 4 4
47375+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
47376+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
47377+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
47378+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
47379+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
47380+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
47381+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
47382+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
47383+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
47384+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47385+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47386+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47387+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47388+4 4 4 4 4 4
47389+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
47390+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
47391+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
47392+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
47393+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
47394+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
47395+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
47396+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
47397+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
47398+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47399+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47400+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47401+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47402+4 4 4 4 4 4
47403+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
47404+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
47405+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
47406+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
47407+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
47408+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
47409+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
47410+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
47411+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
47412+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47413+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47414+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47415+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47416+4 4 4 4 4 4
47417+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
47418+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
47419+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
47420+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
47421+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
47422+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
47423+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
47424+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
47425+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
47426+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
47427+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47428+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47429+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47430+4 4 4 4 4 4
47431+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
47432+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
47433+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
47434+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
47435+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
47436+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
47437+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
47438+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
47439+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
47440+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
47441+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47442+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47443+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47444+4 4 4 4 4 4
47445+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
47446+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
47447+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
47448+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
47449+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
47450+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
47451+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47452+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
47453+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
47454+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
47455+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
47456+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47457+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47458+4 4 4 4 4 4
47459+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
47460+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
47461+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
47462+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
47463+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
47464+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
47465+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
47466+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
47467+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
47468+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
47469+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47470+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47471+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47472+4 4 4 4 4 4
47473+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
47474+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
47475+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
47476+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
47477+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
47478+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
47479+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
47480+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
47481+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
47482+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
47483+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47484+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47485+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47486+4 4 4 4 4 4
47487+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
47488+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
47489+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
47490+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
47491+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
47492+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
47493+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
47494+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
47495+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
47496+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
47497+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47498+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47499+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47500+4 4 4 4 4 4
47501+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
47502+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
47503+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
47504+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
47505+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
47506+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
47507+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
47508+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
47509+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
47510+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
47511+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47512+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47513+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47514+4 4 4 4 4 4
47515+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
47516+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
47517+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
47518+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
47519+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
47520+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
47521+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
47522+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
47523+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
47524+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
47525+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47526+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47527+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47528+4 4 4 4 4 4
47529+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
47530+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
47531+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
47532+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
47533+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
47534+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
47535+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
47536+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
47537+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
47538+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47539+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47540+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47541+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47542+4 4 4 4 4 4
47543+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
47544+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
47545+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
47546+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
47547+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
47548+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
47549+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
47550+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
47551+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
47552+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47553+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47554+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47555+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47556+4 4 4 4 4 4
47557+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
47558+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
47559+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
47560+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
47561+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
47562+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
47563+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
47564+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
47565+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47566+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47567+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47568+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47569+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47570+4 4 4 4 4 4
47571+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
47572+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
47573+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
47574+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
47575+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
47576+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
47577+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
47578+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
47579+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47580+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47581+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47582+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47583+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47584+4 4 4 4 4 4
47585+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
47586+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
47587+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
47588+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
47589+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
47590+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
47591+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
47592+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
47593+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47594+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47595+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47596+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47597+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47598+4 4 4 4 4 4
47599+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
47600+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
47601+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
47602+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
47603+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
47604+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
47605+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
47606+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
47607+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47608+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47609+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47610+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47611+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47612+4 4 4 4 4 4
47613+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47614+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
47615+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
47616+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
47617+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
47618+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
47619+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
47620+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
47621+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47622+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47623+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47624+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47625+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47626+4 4 4 4 4 4
47627+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47628+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
47629+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
47630+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
47631+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
47632+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
47633+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
47634+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
47635+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47636+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47637+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47638+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47639+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47640+4 4 4 4 4 4
47641+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47642+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47643+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
47644+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
47645+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
47646+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
47647+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
47648+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
47649+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47650+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47651+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47652+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47653+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47654+4 4 4 4 4 4
47655+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47656+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47657+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
47658+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
47659+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
47660+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
47661+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
47662+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47663+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47664+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47665+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47666+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47667+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47668+4 4 4 4 4 4
47669+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47670+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47671+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47672+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
47673+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
47674+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
47675+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
47676+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47677+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47678+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47679+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47680+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47681+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47682+4 4 4 4 4 4
47683+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47684+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47685+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47686+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
47687+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
47688+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
47689+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
47690+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47691+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47692+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47693+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47694+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47695+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47696+4 4 4 4 4 4
47697+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47698+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47699+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47700+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
47701+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
47702+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
47703+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
47704+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47705+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47706+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47707+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47708+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47709+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47710+4 4 4 4 4 4
47711+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47712+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47713+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47714+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
47715+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
47716+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
47717+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47718+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47719+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47720+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47721+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47722+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47723+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47724+4 4 4 4 4 4
47725+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47726+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47727+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47728+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47729+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
47730+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
47731+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
47732+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47733+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47734+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47735+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47736+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47737+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47738+4 4 4 4 4 4
47739+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47740+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47741+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47742+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47743+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
47744+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
47745+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47746+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47747+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47748+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47749+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47750+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47751+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47752+4 4 4 4 4 4
47753+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47754+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47755+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47756+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47757+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
47758+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
47759+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47760+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47761+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47762+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47763+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47764+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47765+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47766+4 4 4 4 4 4
47767+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47768+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47769+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47770+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47771+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
47772+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
47773+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47774+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47775+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47776+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47777+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47778+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47779+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47780+4 4 4 4 4 4
47781diff --git a/drivers/video/mb862xx/mb862xxfb_accel.c b/drivers/video/mb862xx/mb862xxfb_accel.c
47782index fe92eed..106e085 100644
47783--- a/drivers/video/mb862xx/mb862xxfb_accel.c
47784+++ b/drivers/video/mb862xx/mb862xxfb_accel.c
47785@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
47786 struct mb862xxfb_par *par = info->par;
47787
47788 if (info->var.bits_per_pixel == 32) {
47789- info->fbops->fb_fillrect = cfb_fillrect;
47790- info->fbops->fb_copyarea = cfb_copyarea;
47791- info->fbops->fb_imageblit = cfb_imageblit;
47792+ pax_open_kernel();
47793+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
47794+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
47795+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
47796+ pax_close_kernel();
47797 } else {
47798 outreg(disp, GC_L0EM, 3);
47799- info->fbops->fb_fillrect = mb86290fb_fillrect;
47800- info->fbops->fb_copyarea = mb86290fb_copyarea;
47801- info->fbops->fb_imageblit = mb86290fb_imageblit;
47802+ pax_open_kernel();
47803+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
47804+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
47805+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
47806+ pax_close_kernel();
47807 }
47808 outreg(draw, GDC_REG_DRAW_BASE, 0);
47809 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
47810diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
47811index ff22871..b129bed 100644
47812--- a/drivers/video/nvidia/nvidia.c
47813+++ b/drivers/video/nvidia/nvidia.c
47814@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
47815 info->fix.line_length = (info->var.xres_virtual *
47816 info->var.bits_per_pixel) >> 3;
47817 if (info->var.accel_flags) {
47818- info->fbops->fb_imageblit = nvidiafb_imageblit;
47819- info->fbops->fb_fillrect = nvidiafb_fillrect;
47820- info->fbops->fb_copyarea = nvidiafb_copyarea;
47821- info->fbops->fb_sync = nvidiafb_sync;
47822+ pax_open_kernel();
47823+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
47824+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
47825+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
47826+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
47827+ pax_close_kernel();
47828 info->pixmap.scan_align = 4;
47829 info->flags &= ~FBINFO_HWACCEL_DISABLED;
47830 info->flags |= FBINFO_READS_FAST;
47831 NVResetGraphics(info);
47832 } else {
47833- info->fbops->fb_imageblit = cfb_imageblit;
47834- info->fbops->fb_fillrect = cfb_fillrect;
47835- info->fbops->fb_copyarea = cfb_copyarea;
47836- info->fbops->fb_sync = NULL;
47837+ pax_open_kernel();
47838+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
47839+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
47840+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
47841+ *(void **)&info->fbops->fb_sync = NULL;
47842+ pax_close_kernel();
47843 info->pixmap.scan_align = 1;
47844 info->flags |= FBINFO_HWACCEL_DISABLED;
47845 info->flags &= ~FBINFO_READS_FAST;
47846@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
47847 info->pixmap.size = 8 * 1024;
47848 info->pixmap.flags = FB_PIXMAP_SYSTEM;
47849
47850- if (!hwcur)
47851- info->fbops->fb_cursor = NULL;
47852+ if (!hwcur) {
47853+ pax_open_kernel();
47854+ *(void **)&info->fbops->fb_cursor = NULL;
47855+ pax_close_kernel();
47856+ }
47857
47858 info->var.accel_flags = (!noaccel);
47859
47860diff --git a/drivers/video/output.c b/drivers/video/output.c
47861index 0d6f2cd..6285b97 100644
47862--- a/drivers/video/output.c
47863+++ b/drivers/video/output.c
47864@@ -97,7 +97,7 @@ struct output_device *video_output_register(const char *name,
47865 new_dev->props = op;
47866 new_dev->dev.class = &video_output_class;
47867 new_dev->dev.parent = dev;
47868- dev_set_name(&new_dev->dev, name);
47869+ dev_set_name(&new_dev->dev, "%s", name);
47870 dev_set_drvdata(&new_dev->dev, devdata);
47871 ret_code = device_register(&new_dev->dev);
47872 if (ret_code) {
47873diff --git a/drivers/video/s1d13xxxfb.c b/drivers/video/s1d13xxxfb.c
47874index 76d9053..dec2bfd 100644
47875--- a/drivers/video/s1d13xxxfb.c
47876+++ b/drivers/video/s1d13xxxfb.c
47877@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
47878
47879 switch(prod_id) {
47880 case S1D13506_PROD_ID: /* activate acceleration */
47881- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
47882- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
47883+ pax_open_kernel();
47884+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
47885+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
47886+ pax_close_kernel();
47887 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
47888 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
47889 break;
47890diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
47891index 97bd662..39fab85 100644
47892--- a/drivers/video/smscufx.c
47893+++ b/drivers/video/smscufx.c
47894@@ -1171,7 +1171,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
47895 fb_deferred_io_cleanup(info);
47896 kfree(info->fbdefio);
47897 info->fbdefio = NULL;
47898- info->fbops->fb_mmap = ufx_ops_mmap;
47899+ pax_open_kernel();
47900+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
47901+ pax_close_kernel();
47902 }
47903
47904 pr_debug("released /dev/fb%d user=%d count=%d",
47905diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
47906index 86d449e..8e04dc5 100644
47907--- a/drivers/video/udlfb.c
47908+++ b/drivers/video/udlfb.c
47909@@ -619,11 +619,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
47910 dlfb_urb_completion(urb);
47911
47912 error:
47913- atomic_add(bytes_sent, &dev->bytes_sent);
47914- atomic_add(bytes_identical, &dev->bytes_identical);
47915- atomic_add(width*height*2, &dev->bytes_rendered);
47916+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
47917+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
47918+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
47919 end_cycles = get_cycles();
47920- atomic_add(((unsigned int) ((end_cycles - start_cycles)
47921+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
47922 >> 10)), /* Kcycles */
47923 &dev->cpu_kcycles_used);
47924
47925@@ -744,11 +744,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
47926 dlfb_urb_completion(urb);
47927
47928 error:
47929- atomic_add(bytes_sent, &dev->bytes_sent);
47930- atomic_add(bytes_identical, &dev->bytes_identical);
47931- atomic_add(bytes_rendered, &dev->bytes_rendered);
47932+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
47933+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
47934+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
47935 end_cycles = get_cycles();
47936- atomic_add(((unsigned int) ((end_cycles - start_cycles)
47937+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
47938 >> 10)), /* Kcycles */
47939 &dev->cpu_kcycles_used);
47940 }
47941@@ -989,7 +989,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
47942 fb_deferred_io_cleanup(info);
47943 kfree(info->fbdefio);
47944 info->fbdefio = NULL;
47945- info->fbops->fb_mmap = dlfb_ops_mmap;
47946+ pax_open_kernel();
47947+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
47948+ pax_close_kernel();
47949 }
47950
47951 pr_warn("released /dev/fb%d user=%d count=%d\n",
47952@@ -1372,7 +1374,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
47953 struct fb_info *fb_info = dev_get_drvdata(fbdev);
47954 struct dlfb_data *dev = fb_info->par;
47955 return snprintf(buf, PAGE_SIZE, "%u\n",
47956- atomic_read(&dev->bytes_rendered));
47957+ atomic_read_unchecked(&dev->bytes_rendered));
47958 }
47959
47960 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
47961@@ -1380,7 +1382,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
47962 struct fb_info *fb_info = dev_get_drvdata(fbdev);
47963 struct dlfb_data *dev = fb_info->par;
47964 return snprintf(buf, PAGE_SIZE, "%u\n",
47965- atomic_read(&dev->bytes_identical));
47966+ atomic_read_unchecked(&dev->bytes_identical));
47967 }
47968
47969 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
47970@@ -1388,7 +1390,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
47971 struct fb_info *fb_info = dev_get_drvdata(fbdev);
47972 struct dlfb_data *dev = fb_info->par;
47973 return snprintf(buf, PAGE_SIZE, "%u\n",
47974- atomic_read(&dev->bytes_sent));
47975+ atomic_read_unchecked(&dev->bytes_sent));
47976 }
47977
47978 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
47979@@ -1396,7 +1398,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
47980 struct fb_info *fb_info = dev_get_drvdata(fbdev);
47981 struct dlfb_data *dev = fb_info->par;
47982 return snprintf(buf, PAGE_SIZE, "%u\n",
47983- atomic_read(&dev->cpu_kcycles_used));
47984+ atomic_read_unchecked(&dev->cpu_kcycles_used));
47985 }
47986
47987 static ssize_t edid_show(
47988@@ -1456,10 +1458,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
47989 struct fb_info *fb_info = dev_get_drvdata(fbdev);
47990 struct dlfb_data *dev = fb_info->par;
47991
47992- atomic_set(&dev->bytes_rendered, 0);
47993- atomic_set(&dev->bytes_identical, 0);
47994- atomic_set(&dev->bytes_sent, 0);
47995- atomic_set(&dev->cpu_kcycles_used, 0);
47996+ atomic_set_unchecked(&dev->bytes_rendered, 0);
47997+ atomic_set_unchecked(&dev->bytes_identical, 0);
47998+ atomic_set_unchecked(&dev->bytes_sent, 0);
47999+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
48000
48001 return count;
48002 }
48003diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
48004index d428445..79a78df 100644
48005--- a/drivers/video/uvesafb.c
48006+++ b/drivers/video/uvesafb.c
48007@@ -19,6 +19,7 @@
48008 #include <linux/io.h>
48009 #include <linux/mutex.h>
48010 #include <linux/slab.h>
48011+#include <linux/moduleloader.h>
48012 #include <video/edid.h>
48013 #include <video/uvesafb.h>
48014 #ifdef CONFIG_X86
48015@@ -569,10 +570,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
48016 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
48017 par->pmi_setpal = par->ypan = 0;
48018 } else {
48019+
48020+#ifdef CONFIG_PAX_KERNEXEC
48021+#ifdef CONFIG_MODULES
48022+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
48023+#endif
48024+ if (!par->pmi_code) {
48025+ par->pmi_setpal = par->ypan = 0;
48026+ return 0;
48027+ }
48028+#endif
48029+
48030 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
48031 + task->t.regs.edi);
48032+
48033+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
48034+ pax_open_kernel();
48035+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
48036+ pax_close_kernel();
48037+
48038+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
48039+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
48040+#else
48041 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
48042 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
48043+#endif
48044+
48045 printk(KERN_INFO "uvesafb: protected mode interface info at "
48046 "%04x:%04x\n",
48047 (u16)task->t.regs.es, (u16)task->t.regs.edi);
48048@@ -817,13 +840,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
48049 par->ypan = ypan;
48050
48051 if (par->pmi_setpal || par->ypan) {
48052+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
48053 if (__supported_pte_mask & _PAGE_NX) {
48054 par->pmi_setpal = par->ypan = 0;
48055 printk(KERN_WARNING "uvesafb: NX protection is actively."
48056 "We have better not to use the PMI.\n");
48057- } else {
48058+ } else
48059+#endif
48060 uvesafb_vbe_getpmi(task, par);
48061- }
48062 }
48063 #else
48064 /* The protected mode interface is not available on non-x86. */
48065@@ -1457,8 +1481,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
48066 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
48067
48068 /* Disable blanking if the user requested so. */
48069- if (!blank)
48070- info->fbops->fb_blank = NULL;
48071+ if (!blank) {
48072+ pax_open_kernel();
48073+ *(void **)&info->fbops->fb_blank = NULL;
48074+ pax_close_kernel();
48075+ }
48076
48077 /*
48078 * Find out how much IO memory is required for the mode with
48079@@ -1534,8 +1561,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
48080 info->flags = FBINFO_FLAG_DEFAULT |
48081 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
48082
48083- if (!par->ypan)
48084- info->fbops->fb_pan_display = NULL;
48085+ if (!par->ypan) {
48086+ pax_open_kernel();
48087+ *(void **)&info->fbops->fb_pan_display = NULL;
48088+ pax_close_kernel();
48089+ }
48090 }
48091
48092 static void uvesafb_init_mtrr(struct fb_info *info)
48093@@ -1836,6 +1866,11 @@ out:
48094 if (par->vbe_modes)
48095 kfree(par->vbe_modes);
48096
48097+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
48098+ if (par->pmi_code)
48099+ module_free_exec(NULL, par->pmi_code);
48100+#endif
48101+
48102 framebuffer_release(info);
48103 return err;
48104 }
48105@@ -1862,6 +1897,12 @@ static int uvesafb_remove(struct platform_device *dev)
48106 kfree(par->vbe_state_orig);
48107 if (par->vbe_state_saved)
48108 kfree(par->vbe_state_saved);
48109+
48110+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
48111+ if (par->pmi_code)
48112+ module_free_exec(NULL, par->pmi_code);
48113+#endif
48114+
48115 }
48116
48117 framebuffer_release(info);
48118diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
48119index 501b340..d80aa17 100644
48120--- a/drivers/video/vesafb.c
48121+++ b/drivers/video/vesafb.c
48122@@ -9,6 +9,7 @@
48123 */
48124
48125 #include <linux/module.h>
48126+#include <linux/moduleloader.h>
48127 #include <linux/kernel.h>
48128 #include <linux/errno.h>
48129 #include <linux/string.h>
48130@@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
48131 static int vram_total __initdata; /* Set total amount of memory */
48132 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
48133 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
48134-static void (*pmi_start)(void) __read_mostly;
48135-static void (*pmi_pal) (void) __read_mostly;
48136+static void (*pmi_start)(void) __read_only;
48137+static void (*pmi_pal) (void) __read_only;
48138 static int depth __read_mostly;
48139 static int vga_compat __read_mostly;
48140 /* --------------------------------------------------------------------- */
48141@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
48142 unsigned int size_vmode;
48143 unsigned int size_remap;
48144 unsigned int size_total;
48145+ void *pmi_code = NULL;
48146
48147 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
48148 return -ENODEV;
48149@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
48150 size_remap = size_total;
48151 vesafb_fix.smem_len = size_remap;
48152
48153-#ifndef __i386__
48154- screen_info.vesapm_seg = 0;
48155-#endif
48156-
48157 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
48158 printk(KERN_WARNING
48159 "vesafb: cannot reserve video memory at 0x%lx\n",
48160@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
48161 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
48162 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
48163
48164+#ifdef __i386__
48165+
48166+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
48167+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
48168+ if (!pmi_code)
48169+#elif !defined(CONFIG_PAX_KERNEXEC)
48170+ if (0)
48171+#endif
48172+
48173+#endif
48174+ screen_info.vesapm_seg = 0;
48175+
48176 if (screen_info.vesapm_seg) {
48177- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
48178- screen_info.vesapm_seg,screen_info.vesapm_off);
48179+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
48180+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
48181 }
48182
48183 if (screen_info.vesapm_seg < 0xc000)
48184@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
48185
48186 if (ypan || pmi_setpal) {
48187 unsigned short *pmi_base;
48188+
48189 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
48190- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
48191- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
48192+
48193+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
48194+ pax_open_kernel();
48195+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
48196+#else
48197+ pmi_code = pmi_base;
48198+#endif
48199+
48200+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
48201+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
48202+
48203+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
48204+ pmi_start = ktva_ktla(pmi_start);
48205+ pmi_pal = ktva_ktla(pmi_pal);
48206+ pax_close_kernel();
48207+#endif
48208+
48209 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
48210 if (pmi_base[3]) {
48211 printk(KERN_INFO "vesafb: pmi: ports = ");
48212@@ -472,8 +498,11 @@ static int __init vesafb_probe(struct platform_device *dev)
48213 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
48214 (ypan ? FBINFO_HWACCEL_YPAN : 0);
48215
48216- if (!ypan)
48217- info->fbops->fb_pan_display = NULL;
48218+ if (!ypan) {
48219+ pax_open_kernel();
48220+ *(void **)&info->fbops->fb_pan_display = NULL;
48221+ pax_close_kernel();
48222+ }
48223
48224 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
48225 err = -ENOMEM;
48226@@ -488,6 +517,11 @@ static int __init vesafb_probe(struct platform_device *dev)
48227 info->node, info->fix.id);
48228 return 0;
48229 err:
48230+
48231+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
48232+ module_free_exec(NULL, pmi_code);
48233+#endif
48234+
48235 if (info->screen_base)
48236 iounmap(info->screen_base);
48237 framebuffer_release(info);
48238diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
48239index 88714ae..16c2e11 100644
48240--- a/drivers/video/via/via_clock.h
48241+++ b/drivers/video/via/via_clock.h
48242@@ -56,7 +56,7 @@ struct via_clock {
48243
48244 void (*set_engine_pll_state)(u8 state);
48245 void (*set_engine_pll)(struct via_pll_config config);
48246-};
48247+} __no_const;
48248
48249
48250 static inline u32 get_pll_internal_frequency(u32 ref_freq,
48251diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
48252index fef20db..d28b1ab 100644
48253--- a/drivers/xen/xenfs/xenstored.c
48254+++ b/drivers/xen/xenfs/xenstored.c
48255@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
48256 static int xsd_kva_open(struct inode *inode, struct file *file)
48257 {
48258 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
48259+#ifdef CONFIG_GRKERNSEC_HIDESYM
48260+ NULL);
48261+#else
48262 xen_store_interface);
48263+#endif
48264+
48265 if (!file->private_data)
48266 return -ENOMEM;
48267 return 0;
48268diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
48269index 0ad61c6..f198bd7 100644
48270--- a/fs/9p/vfs_addr.c
48271+++ b/fs/9p/vfs_addr.c
48272@@ -185,7 +185,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
48273
48274 retval = v9fs_file_write_internal(inode,
48275 v9inode->writeback_fid,
48276- (__force const char __user *)buffer,
48277+ (const char __force_user *)buffer,
48278 len, &offset, 0);
48279 if (retval > 0)
48280 retval = 0;
48281diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
48282index d86edc8..40ff2fb 100644
48283--- a/fs/9p/vfs_inode.c
48284+++ b/fs/9p/vfs_inode.c
48285@@ -1314,7 +1314,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
48286 void
48287 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
48288 {
48289- char *s = nd_get_link(nd);
48290+ const char *s = nd_get_link(nd);
48291
48292 p9_debug(P9_DEBUG_VFS, " %s %s\n",
48293 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
48294diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
48295index 0efd152..b5802ad 100644
48296--- a/fs/Kconfig.binfmt
48297+++ b/fs/Kconfig.binfmt
48298@@ -89,7 +89,7 @@ config HAVE_AOUT
48299
48300 config BINFMT_AOUT
48301 tristate "Kernel support for a.out and ECOFF binaries"
48302- depends on HAVE_AOUT
48303+ depends on HAVE_AOUT && BROKEN
48304 ---help---
48305 A.out (Assembler.OUTput) is a set of formats for libraries and
48306 executables used in the earliest versions of UNIX. Linux used
48307diff --git a/fs/aio.c b/fs/aio.c
48308index 1dc8786..d3b29e8 100644
48309--- a/fs/aio.c
48310+++ b/fs/aio.c
48311@@ -111,7 +111,7 @@ static int aio_setup_ring(struct kioctx *ctx)
48312 size += sizeof(struct io_event) * nr_events;
48313 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
48314
48315- if (nr_pages < 0)
48316+ if (nr_pages <= 0)
48317 return -EINVAL;
48318
48319 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
48320@@ -1375,18 +1375,19 @@ static ssize_t aio_fsync(struct kiocb *iocb)
48321 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
48322 {
48323 ssize_t ret;
48324+ struct iovec iovstack;
48325
48326 #ifdef CONFIG_COMPAT
48327 if (compat)
48328 ret = compat_rw_copy_check_uvector(type,
48329 (struct compat_iovec __user *)kiocb->ki_buf,
48330- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
48331+ kiocb->ki_nbytes, 1, &iovstack,
48332 &kiocb->ki_iovec);
48333 else
48334 #endif
48335 ret = rw_copy_check_uvector(type,
48336 (struct iovec __user *)kiocb->ki_buf,
48337- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
48338+ kiocb->ki_nbytes, 1, &iovstack,
48339 &kiocb->ki_iovec);
48340 if (ret < 0)
48341 goto out;
48342@@ -1395,6 +1396,10 @@ static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
48343 if (ret < 0)
48344 goto out;
48345
48346+ if (kiocb->ki_iovec == &iovstack) {
48347+ kiocb->ki_inline_vec = iovstack;
48348+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
48349+ }
48350 kiocb->ki_nr_segs = kiocb->ki_nbytes;
48351 kiocb->ki_cur_seg = 0;
48352 /* ki_nbytes/left now reflect bytes instead of segs */
48353diff --git a/fs/attr.c b/fs/attr.c
48354index 1449adb..a2038c2 100644
48355--- a/fs/attr.c
48356+++ b/fs/attr.c
48357@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
48358 unsigned long limit;
48359
48360 limit = rlimit(RLIMIT_FSIZE);
48361+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
48362 if (limit != RLIM_INFINITY && offset > limit)
48363 goto out_sig;
48364 if (offset > inode->i_sb->s_maxbytes)
48365diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
48366index 3db70da..7aeec5b 100644
48367--- a/fs/autofs4/waitq.c
48368+++ b/fs/autofs4/waitq.c
48369@@ -59,7 +59,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
48370 {
48371 unsigned long sigpipe, flags;
48372 mm_segment_t fs;
48373- const char *data = (const char *)addr;
48374+ const char __user *data = (const char __force_user *)addr;
48375 ssize_t wr = 0;
48376
48377 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
48378@@ -346,6 +346,10 @@ static int validate_request(struct autofs_wait_queue **wait,
48379 return 1;
48380 }
48381
48382+#ifdef CONFIG_GRKERNSEC_HIDESYM
48383+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
48384+#endif
48385+
48386 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
48387 enum autofs_notify notify)
48388 {
48389@@ -379,7 +383,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
48390
48391 /* If this is a direct mount request create a dummy name */
48392 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
48393+#ifdef CONFIG_GRKERNSEC_HIDESYM
48394+ /* this name does get written to userland via autofs4_write() */
48395+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
48396+#else
48397 qstr.len = sprintf(name, "%p", dentry);
48398+#endif
48399 else {
48400 qstr.len = autofs4_getpath(sbi, dentry, &name);
48401 if (!qstr.len) {
48402diff --git a/fs/befs/endian.h b/fs/befs/endian.h
48403index 2722387..c8dd2a7 100644
48404--- a/fs/befs/endian.h
48405+++ b/fs/befs/endian.h
48406@@ -11,7 +11,7 @@
48407
48408 #include <asm/byteorder.h>
48409
48410-static inline u64
48411+static inline u64 __intentional_overflow(-1)
48412 fs64_to_cpu(const struct super_block *sb, fs64 n)
48413 {
48414 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
48415@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
48416 return (__force fs64)cpu_to_be64(n);
48417 }
48418
48419-static inline u32
48420+static inline u32 __intentional_overflow(-1)
48421 fs32_to_cpu(const struct super_block *sb, fs32 n)
48422 {
48423 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
48424diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
48425index 8615ee8..388ed68 100644
48426--- a/fs/befs/linuxvfs.c
48427+++ b/fs/befs/linuxvfs.c
48428@@ -510,7 +510,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
48429 {
48430 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
48431 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
48432- char *link = nd_get_link(nd);
48433+ const char *link = nd_get_link(nd);
48434 if (!IS_ERR(link))
48435 kfree(link);
48436 }
48437diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
48438index bbc8f88..7c7ac97 100644
48439--- a/fs/binfmt_aout.c
48440+++ b/fs/binfmt_aout.c
48441@@ -16,6 +16,7 @@
48442 #include <linux/string.h>
48443 #include <linux/fs.h>
48444 #include <linux/file.h>
48445+#include <linux/security.h>
48446 #include <linux/stat.h>
48447 #include <linux/fcntl.h>
48448 #include <linux/ptrace.h>
48449@@ -59,6 +60,8 @@ static int aout_core_dump(struct coredump_params *cprm)
48450 #endif
48451 # define START_STACK(u) ((void __user *)u.start_stack)
48452
48453+ memset(&dump, 0, sizeof(dump));
48454+
48455 fs = get_fs();
48456 set_fs(KERNEL_DS);
48457 has_dumped = 1;
48458@@ -70,10 +73,12 @@ static int aout_core_dump(struct coredump_params *cprm)
48459
48460 /* If the size of the dump file exceeds the rlimit, then see what would happen
48461 if we wrote the stack, but not the data area. */
48462+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
48463 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
48464 dump.u_dsize = 0;
48465
48466 /* Make sure we have enough room to write the stack and data areas. */
48467+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
48468 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
48469 dump.u_ssize = 0;
48470
48471@@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
48472 rlim = rlimit(RLIMIT_DATA);
48473 if (rlim >= RLIM_INFINITY)
48474 rlim = ~0;
48475+
48476+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
48477 if (ex.a_data + ex.a_bss > rlim)
48478 return -ENOMEM;
48479
48480@@ -268,6 +275,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
48481
48482 install_exec_creds(bprm);
48483
48484+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
48485+ current->mm->pax_flags = 0UL;
48486+#endif
48487+
48488+#ifdef CONFIG_PAX_PAGEEXEC
48489+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
48490+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
48491+
48492+#ifdef CONFIG_PAX_EMUTRAMP
48493+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
48494+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
48495+#endif
48496+
48497+#ifdef CONFIG_PAX_MPROTECT
48498+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
48499+ current->mm->pax_flags |= MF_PAX_MPROTECT;
48500+#endif
48501+
48502+ }
48503+#endif
48504+
48505 if (N_MAGIC(ex) == OMAGIC) {
48506 unsigned long text_addr, map_size;
48507 loff_t pos;
48508@@ -333,7 +361,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
48509 }
48510
48511 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
48512- PROT_READ | PROT_WRITE | PROT_EXEC,
48513+ PROT_READ | PROT_WRITE,
48514 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
48515 fd_offset + ex.a_text);
48516 if (error != N_DATADDR(ex)) {
48517diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
48518index 86af964..5d53bf6 100644
48519--- a/fs/binfmt_elf.c
48520+++ b/fs/binfmt_elf.c
48521@@ -34,6 +34,7 @@
48522 #include <linux/utsname.h>
48523 #include <linux/coredump.h>
48524 #include <linux/sched.h>
48525+#include <linux/xattr.h>
48526 #include <asm/uaccess.h>
48527 #include <asm/param.h>
48528 #include <asm/page.h>
48529@@ -60,6 +61,10 @@ static int elf_core_dump(struct coredump_params *cprm);
48530 #define elf_core_dump NULL
48531 #endif
48532
48533+#ifdef CONFIG_PAX_MPROTECT
48534+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
48535+#endif
48536+
48537 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
48538 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
48539 #else
48540@@ -79,6 +84,11 @@ static struct linux_binfmt elf_format = {
48541 .load_binary = load_elf_binary,
48542 .load_shlib = load_elf_library,
48543 .core_dump = elf_core_dump,
48544+
48545+#ifdef CONFIG_PAX_MPROTECT
48546+ .handle_mprotect= elf_handle_mprotect,
48547+#endif
48548+
48549 .min_coredump = ELF_EXEC_PAGESIZE,
48550 };
48551
48552@@ -86,6 +96,8 @@ static struct linux_binfmt elf_format = {
48553
48554 static int set_brk(unsigned long start, unsigned long end)
48555 {
48556+ unsigned long e = end;
48557+
48558 start = ELF_PAGEALIGN(start);
48559 end = ELF_PAGEALIGN(end);
48560 if (end > start) {
48561@@ -94,7 +106,7 @@ static int set_brk(unsigned long start, unsigned long end)
48562 if (BAD_ADDR(addr))
48563 return addr;
48564 }
48565- current->mm->start_brk = current->mm->brk = end;
48566+ current->mm->start_brk = current->mm->brk = e;
48567 return 0;
48568 }
48569
48570@@ -155,12 +167,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
48571 elf_addr_t __user *u_rand_bytes;
48572 const char *k_platform = ELF_PLATFORM;
48573 const char *k_base_platform = ELF_BASE_PLATFORM;
48574- unsigned char k_rand_bytes[16];
48575+ u32 k_rand_bytes[4];
48576 int items;
48577 elf_addr_t *elf_info;
48578 int ei_index = 0;
48579 const struct cred *cred = current_cred();
48580 struct vm_area_struct *vma;
48581+ unsigned long saved_auxv[AT_VECTOR_SIZE];
48582
48583 /*
48584 * In some cases (e.g. Hyper-Threading), we want to avoid L1
48585@@ -202,8 +215,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
48586 * Generate 16 random bytes for userspace PRNG seeding.
48587 */
48588 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
48589- u_rand_bytes = (elf_addr_t __user *)
48590- STACK_ALLOC(p, sizeof(k_rand_bytes));
48591+ srandom32(k_rand_bytes[0] ^ random32());
48592+ srandom32(k_rand_bytes[1] ^ random32());
48593+ srandom32(k_rand_bytes[2] ^ random32());
48594+ srandom32(k_rand_bytes[3] ^ random32());
48595+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
48596+ u_rand_bytes = (elf_addr_t __user *) p;
48597 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
48598 return -EFAULT;
48599
48600@@ -315,9 +332,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
48601 return -EFAULT;
48602 current->mm->env_end = p;
48603
48604+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
48605+
48606 /* Put the elf_info on the stack in the right place. */
48607 sp = (elf_addr_t __user *)envp + 1;
48608- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
48609+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
48610 return -EFAULT;
48611 return 0;
48612 }
48613@@ -385,15 +404,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
48614 an ELF header */
48615
48616 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
48617- struct file *interpreter, unsigned long *interp_map_addr,
48618- unsigned long no_base)
48619+ struct file *interpreter, unsigned long no_base)
48620 {
48621 struct elf_phdr *elf_phdata;
48622 struct elf_phdr *eppnt;
48623- unsigned long load_addr = 0;
48624+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
48625 int load_addr_set = 0;
48626 unsigned long last_bss = 0, elf_bss = 0;
48627- unsigned long error = ~0UL;
48628+ unsigned long error = -EINVAL;
48629 unsigned long total_size;
48630 int retval, i, size;
48631
48632@@ -439,6 +457,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
48633 goto out_close;
48634 }
48635
48636+#ifdef CONFIG_PAX_SEGMEXEC
48637+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
48638+ pax_task_size = SEGMEXEC_TASK_SIZE;
48639+#endif
48640+
48641 eppnt = elf_phdata;
48642 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
48643 if (eppnt->p_type == PT_LOAD) {
48644@@ -462,8 +485,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
48645 map_addr = elf_map(interpreter, load_addr + vaddr,
48646 eppnt, elf_prot, elf_type, total_size);
48647 total_size = 0;
48648- if (!*interp_map_addr)
48649- *interp_map_addr = map_addr;
48650 error = map_addr;
48651 if (BAD_ADDR(map_addr))
48652 goto out_close;
48653@@ -482,8 +503,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
48654 k = load_addr + eppnt->p_vaddr;
48655 if (BAD_ADDR(k) ||
48656 eppnt->p_filesz > eppnt->p_memsz ||
48657- eppnt->p_memsz > TASK_SIZE ||
48658- TASK_SIZE - eppnt->p_memsz < k) {
48659+ eppnt->p_memsz > pax_task_size ||
48660+ pax_task_size - eppnt->p_memsz < k) {
48661 error = -ENOMEM;
48662 goto out_close;
48663 }
48664@@ -535,6 +556,315 @@ out:
48665 return error;
48666 }
48667
48668+#ifdef CONFIG_PAX_PT_PAX_FLAGS
48669+#ifdef CONFIG_PAX_SOFTMODE
48670+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
48671+{
48672+ unsigned long pax_flags = 0UL;
48673+
48674+#ifdef CONFIG_PAX_PAGEEXEC
48675+ if (elf_phdata->p_flags & PF_PAGEEXEC)
48676+ pax_flags |= MF_PAX_PAGEEXEC;
48677+#endif
48678+
48679+#ifdef CONFIG_PAX_SEGMEXEC
48680+ if (elf_phdata->p_flags & PF_SEGMEXEC)
48681+ pax_flags |= MF_PAX_SEGMEXEC;
48682+#endif
48683+
48684+#ifdef CONFIG_PAX_EMUTRAMP
48685+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
48686+ pax_flags |= MF_PAX_EMUTRAMP;
48687+#endif
48688+
48689+#ifdef CONFIG_PAX_MPROTECT
48690+ if (elf_phdata->p_flags & PF_MPROTECT)
48691+ pax_flags |= MF_PAX_MPROTECT;
48692+#endif
48693+
48694+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
48695+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
48696+ pax_flags |= MF_PAX_RANDMMAP;
48697+#endif
48698+
48699+ return pax_flags;
48700+}
48701+#endif
48702+
48703+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
48704+{
48705+ unsigned long pax_flags = 0UL;
48706+
48707+#ifdef CONFIG_PAX_PAGEEXEC
48708+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
48709+ pax_flags |= MF_PAX_PAGEEXEC;
48710+#endif
48711+
48712+#ifdef CONFIG_PAX_SEGMEXEC
48713+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
48714+ pax_flags |= MF_PAX_SEGMEXEC;
48715+#endif
48716+
48717+#ifdef CONFIG_PAX_EMUTRAMP
48718+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
48719+ pax_flags |= MF_PAX_EMUTRAMP;
48720+#endif
48721+
48722+#ifdef CONFIG_PAX_MPROTECT
48723+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
48724+ pax_flags |= MF_PAX_MPROTECT;
48725+#endif
48726+
48727+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
48728+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
48729+ pax_flags |= MF_PAX_RANDMMAP;
48730+#endif
48731+
48732+ return pax_flags;
48733+}
48734+#endif
48735+
48736+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
48737+#ifdef CONFIG_PAX_SOFTMODE
48738+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
48739+{
48740+ unsigned long pax_flags = 0UL;
48741+
48742+#ifdef CONFIG_PAX_PAGEEXEC
48743+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
48744+ pax_flags |= MF_PAX_PAGEEXEC;
48745+#endif
48746+
48747+#ifdef CONFIG_PAX_SEGMEXEC
48748+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
48749+ pax_flags |= MF_PAX_SEGMEXEC;
48750+#endif
48751+
48752+#ifdef CONFIG_PAX_EMUTRAMP
48753+ if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
48754+ pax_flags |= MF_PAX_EMUTRAMP;
48755+#endif
48756+
48757+#ifdef CONFIG_PAX_MPROTECT
48758+ if (pax_flags_softmode & MF_PAX_MPROTECT)
48759+ pax_flags |= MF_PAX_MPROTECT;
48760+#endif
48761+
48762+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
48763+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
48764+ pax_flags |= MF_PAX_RANDMMAP;
48765+#endif
48766+
48767+ return pax_flags;
48768+}
48769+#endif
48770+
48771+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
48772+{
48773+ unsigned long pax_flags = 0UL;
48774+
48775+#ifdef CONFIG_PAX_PAGEEXEC
48776+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
48777+ pax_flags |= MF_PAX_PAGEEXEC;
48778+#endif
48779+
48780+#ifdef CONFIG_PAX_SEGMEXEC
48781+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
48782+ pax_flags |= MF_PAX_SEGMEXEC;
48783+#endif
48784+
48785+#ifdef CONFIG_PAX_EMUTRAMP
48786+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
48787+ pax_flags |= MF_PAX_EMUTRAMP;
48788+#endif
48789+
48790+#ifdef CONFIG_PAX_MPROTECT
48791+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
48792+ pax_flags |= MF_PAX_MPROTECT;
48793+#endif
48794+
48795+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
48796+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
48797+ pax_flags |= MF_PAX_RANDMMAP;
48798+#endif
48799+
48800+ return pax_flags;
48801+}
48802+#endif
48803+
48804+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
48805+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
48806+{
48807+ unsigned long pax_flags = 0UL;
48808+
48809+#ifdef CONFIG_PAX_EI_PAX
48810+
48811+#ifdef CONFIG_PAX_PAGEEXEC
48812+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
48813+ pax_flags |= MF_PAX_PAGEEXEC;
48814+#endif
48815+
48816+#ifdef CONFIG_PAX_SEGMEXEC
48817+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
48818+ pax_flags |= MF_PAX_SEGMEXEC;
48819+#endif
48820+
48821+#ifdef CONFIG_PAX_EMUTRAMP
48822+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
48823+ pax_flags |= MF_PAX_EMUTRAMP;
48824+#endif
48825+
48826+#ifdef CONFIG_PAX_MPROTECT
48827+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
48828+ pax_flags |= MF_PAX_MPROTECT;
48829+#endif
48830+
48831+#ifdef CONFIG_PAX_ASLR
48832+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
48833+ pax_flags |= MF_PAX_RANDMMAP;
48834+#endif
48835+
48836+#else
48837+
48838+#ifdef CONFIG_PAX_PAGEEXEC
48839+ pax_flags |= MF_PAX_PAGEEXEC;
48840+#endif
48841+
48842+#ifdef CONFIG_PAX_SEGMEXEC
48843+ pax_flags |= MF_PAX_SEGMEXEC;
48844+#endif
48845+
48846+#ifdef CONFIG_PAX_MPROTECT
48847+ pax_flags |= MF_PAX_MPROTECT;
48848+#endif
48849+
48850+#ifdef CONFIG_PAX_RANDMMAP
48851+ if (randomize_va_space)
48852+ pax_flags |= MF_PAX_RANDMMAP;
48853+#endif
48854+
48855+#endif
48856+
48857+ return pax_flags;
48858+}
48859+
48860+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
48861+{
48862+
48863+#ifdef CONFIG_PAX_PT_PAX_FLAGS
48864+ unsigned long i;
48865+
48866+ for (i = 0UL; i < elf_ex->e_phnum; i++)
48867+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
48868+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
48869+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
48870+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
48871+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
48872+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
48873+ return ~0UL;
48874+
48875+#ifdef CONFIG_PAX_SOFTMODE
48876+ if (pax_softmode)
48877+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
48878+ else
48879+#endif
48880+
48881+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
48882+ break;
48883+ }
48884+#endif
48885+
48886+ return ~0UL;
48887+}
48888+
48889+static unsigned long pax_parse_xattr_pax(struct file * const file)
48890+{
48891+
48892+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
48893+ ssize_t xattr_size, i;
48894+ unsigned char xattr_value[sizeof("pemrs") - 1];
48895+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
48896+
48897+ xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
48898+ if (xattr_size <= 0 || xattr_size > sizeof xattr_value)
48899+ return ~0UL;
48900+
48901+ for (i = 0; i < xattr_size; i++)
48902+ switch (xattr_value[i]) {
48903+ default:
48904+ return ~0UL;
48905+
48906+#define parse_flag(option1, option2, flag) \
48907+ case option1: \
48908+ if (pax_flags_hardmode & MF_PAX_##flag) \
48909+ return ~0UL; \
48910+ pax_flags_hardmode |= MF_PAX_##flag; \
48911+ break; \
48912+ case option2: \
48913+ if (pax_flags_softmode & MF_PAX_##flag) \
48914+ return ~0UL; \
48915+ pax_flags_softmode |= MF_PAX_##flag; \
48916+ break;
48917+
48918+ parse_flag('p', 'P', PAGEEXEC);
48919+ parse_flag('e', 'E', EMUTRAMP);
48920+ parse_flag('m', 'M', MPROTECT);
48921+ parse_flag('r', 'R', RANDMMAP);
48922+ parse_flag('s', 'S', SEGMEXEC);
48923+
48924+#undef parse_flag
48925+ }
48926+
48927+ if (pax_flags_hardmode & pax_flags_softmode)
48928+ return ~0UL;
48929+
48930+#ifdef CONFIG_PAX_SOFTMODE
48931+ if (pax_softmode)
48932+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
48933+ else
48934+#endif
48935+
48936+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
48937+#else
48938+ return ~0UL;
48939+#endif
48940+
48941+}
48942+
48943+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
48944+{
48945+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
48946+
48947+ pax_flags = pax_parse_ei_pax(elf_ex);
48948+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
48949+ xattr_pax_flags = pax_parse_xattr_pax(file);
48950+
48951+ if (pt_pax_flags == ~0UL)
48952+ pt_pax_flags = xattr_pax_flags;
48953+ else if (xattr_pax_flags == ~0UL)
48954+ xattr_pax_flags = pt_pax_flags;
48955+ if (pt_pax_flags != xattr_pax_flags)
48956+ return -EINVAL;
48957+ if (pt_pax_flags != ~0UL)
48958+ pax_flags = pt_pax_flags;
48959+
48960+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
48961+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
48962+ if ((__supported_pte_mask & _PAGE_NX))
48963+ pax_flags &= ~MF_PAX_SEGMEXEC;
48964+ else
48965+ pax_flags &= ~MF_PAX_PAGEEXEC;
48966+ }
48967+#endif
48968+
48969+ if (0 > pax_check_flags(&pax_flags))
48970+ return -EINVAL;
48971+
48972+ current->mm->pax_flags = pax_flags;
48973+ return 0;
48974+}
48975+#endif
48976+
48977 /*
48978 * These are the functions used to load ELF style executables and shared
48979 * libraries. There is no binary dependent code anywhere else.
48980@@ -551,6 +881,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
48981 {
48982 unsigned int random_variable = 0;
48983
48984+#ifdef CONFIG_PAX_RANDUSTACK
48985+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
48986+ return stack_top - current->mm->delta_stack;
48987+#endif
48988+
48989 if ((current->flags & PF_RANDOMIZE) &&
48990 !(current->personality & ADDR_NO_RANDOMIZE)) {
48991 random_variable = get_random_int() & STACK_RND_MASK;
48992@@ -569,7 +904,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
48993 unsigned long load_addr = 0, load_bias = 0;
48994 int load_addr_set = 0;
48995 char * elf_interpreter = NULL;
48996- unsigned long error;
48997+ unsigned long error = 0;
48998 struct elf_phdr *elf_ppnt, *elf_phdata;
48999 unsigned long elf_bss, elf_brk;
49000 int retval, i;
49001@@ -579,12 +914,12 @@ static int load_elf_binary(struct linux_binprm *bprm)
49002 unsigned long start_code, end_code, start_data, end_data;
49003 unsigned long reloc_func_desc __maybe_unused = 0;
49004 int executable_stack = EXSTACK_DEFAULT;
49005- unsigned long def_flags = 0;
49006 struct pt_regs *regs = current_pt_regs();
49007 struct {
49008 struct elfhdr elf_ex;
49009 struct elfhdr interp_elf_ex;
49010 } *loc;
49011+ unsigned long pax_task_size = TASK_SIZE;
49012
49013 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
49014 if (!loc) {
49015@@ -720,11 +1055,81 @@ static int load_elf_binary(struct linux_binprm *bprm)
49016 goto out_free_dentry;
49017
49018 /* OK, This is the point of no return */
49019- current->mm->def_flags = def_flags;
49020+
49021+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
49022+ current->mm->pax_flags = 0UL;
49023+#endif
49024+
49025+#ifdef CONFIG_PAX_DLRESOLVE
49026+ current->mm->call_dl_resolve = 0UL;
49027+#endif
49028+
49029+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
49030+ current->mm->call_syscall = 0UL;
49031+#endif
49032+
49033+#ifdef CONFIG_PAX_ASLR
49034+ current->mm->delta_mmap = 0UL;
49035+ current->mm->delta_stack = 0UL;
49036+#endif
49037+
49038+ current->mm->def_flags = 0;
49039+
49040+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
49041+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
49042+ send_sig(SIGKILL, current, 0);
49043+ goto out_free_dentry;
49044+ }
49045+#endif
49046+
49047+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
49048+ pax_set_initial_flags(bprm);
49049+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
49050+ if (pax_set_initial_flags_func)
49051+ (pax_set_initial_flags_func)(bprm);
49052+#endif
49053+
49054+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
49055+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
49056+ current->mm->context.user_cs_limit = PAGE_SIZE;
49057+ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
49058+ }
49059+#endif
49060+
49061+#ifdef CONFIG_PAX_SEGMEXEC
49062+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
49063+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
49064+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
49065+ pax_task_size = SEGMEXEC_TASK_SIZE;
49066+ current->mm->def_flags |= VM_NOHUGEPAGE;
49067+ }
49068+#endif
49069+
49070+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
49071+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
49072+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
49073+ put_cpu();
49074+ }
49075+#endif
49076
49077 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
49078 may depend on the personality. */
49079 SET_PERSONALITY(loc->elf_ex);
49080+
49081+#ifdef CONFIG_PAX_ASLR
49082+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
49083+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
49084+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
49085+ }
49086+#endif
49087+
49088+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
49089+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
49090+ executable_stack = EXSTACK_DISABLE_X;
49091+ current->personality &= ~READ_IMPLIES_EXEC;
49092+ } else
49093+#endif
49094+
49095 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
49096 current->personality |= READ_IMPLIES_EXEC;
49097
49098@@ -815,6 +1220,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
49099 #else
49100 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
49101 #endif
49102+
49103+#ifdef CONFIG_PAX_RANDMMAP
49104+ /* PaX: randomize base address at the default exe base if requested */
49105+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
49106+#ifdef CONFIG_SPARC64
49107+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
49108+#else
49109+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
49110+#endif
49111+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
49112+ elf_flags |= MAP_FIXED;
49113+ }
49114+#endif
49115+
49116 }
49117
49118 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
49119@@ -847,9 +1266,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
49120 * allowed task size. Note that p_filesz must always be
49121 * <= p_memsz so it is only necessary to check p_memsz.
49122 */
49123- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
49124- elf_ppnt->p_memsz > TASK_SIZE ||
49125- TASK_SIZE - elf_ppnt->p_memsz < k) {
49126+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
49127+ elf_ppnt->p_memsz > pax_task_size ||
49128+ pax_task_size - elf_ppnt->p_memsz < k) {
49129 /* set_brk can never work. Avoid overflows. */
49130 send_sig(SIGKILL, current, 0);
49131 retval = -EINVAL;
49132@@ -888,17 +1307,45 @@ static int load_elf_binary(struct linux_binprm *bprm)
49133 goto out_free_dentry;
49134 }
49135 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
49136- send_sig(SIGSEGV, current, 0);
49137- retval = -EFAULT; /* Nobody gets to see this, but.. */
49138- goto out_free_dentry;
49139+ /*
49140+ * This bss-zeroing can fail if the ELF
49141+ * file specifies odd protections. So
49142+ * we don't check the return value
49143+ */
49144 }
49145
49146+#ifdef CONFIG_PAX_RANDMMAP
49147+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
49148+ unsigned long start, size, flags;
49149+ vm_flags_t vm_flags;
49150+
49151+ start = ELF_PAGEALIGN(elf_brk);
49152+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
49153+ flags = MAP_FIXED | MAP_PRIVATE;
49154+ vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
49155+
49156+ down_write(&current->mm->mmap_sem);
49157+ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
49158+ retval = -ENOMEM;
49159+ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
49160+// if (current->personality & ADDR_NO_RANDOMIZE)
49161+// vm_flags |= VM_READ | VM_MAYREAD;
49162+ start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0);
49163+ retval = IS_ERR_VALUE(start) ? start : 0;
49164+ }
49165+ up_write(&current->mm->mmap_sem);
49166+ if (retval == 0)
49167+ retval = set_brk(start + size, start + size + PAGE_SIZE);
49168+ if (retval < 0) {
49169+ send_sig(SIGKILL, current, 0);
49170+ goto out_free_dentry;
49171+ }
49172+ }
49173+#endif
49174+
49175 if (elf_interpreter) {
49176- unsigned long interp_map_addr = 0;
49177-
49178 elf_entry = load_elf_interp(&loc->interp_elf_ex,
49179 interpreter,
49180- &interp_map_addr,
49181 load_bias);
49182 if (!IS_ERR((void *)elf_entry)) {
49183 /*
49184@@ -1120,7 +1567,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
49185 * Decide what to dump of a segment, part, all or none.
49186 */
49187 static unsigned long vma_dump_size(struct vm_area_struct *vma,
49188- unsigned long mm_flags)
49189+ unsigned long mm_flags, long signr)
49190 {
49191 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
49192
49193@@ -1158,7 +1605,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
49194 if (vma->vm_file == NULL)
49195 return 0;
49196
49197- if (FILTER(MAPPED_PRIVATE))
49198+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
49199 goto whole;
49200
49201 /*
49202@@ -1383,9 +1830,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
49203 {
49204 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
49205 int i = 0;
49206- do
49207+ do {
49208 i += 2;
49209- while (auxv[i - 2] != AT_NULL);
49210+ } while (auxv[i - 2] != AT_NULL);
49211 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
49212 }
49213
49214@@ -1394,7 +1841,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
49215 {
49216 mm_segment_t old_fs = get_fs();
49217 set_fs(KERNEL_DS);
49218- copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
49219+ copy_siginfo_to_user((user_siginfo_t __force_user *) csigdata, siginfo);
49220 set_fs(old_fs);
49221 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
49222 }
49223@@ -2015,14 +2462,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
49224 }
49225
49226 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
49227- unsigned long mm_flags)
49228+ struct coredump_params *cprm)
49229 {
49230 struct vm_area_struct *vma;
49231 size_t size = 0;
49232
49233 for (vma = first_vma(current, gate_vma); vma != NULL;
49234 vma = next_vma(vma, gate_vma))
49235- size += vma_dump_size(vma, mm_flags);
49236+ size += vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
49237 return size;
49238 }
49239
49240@@ -2116,7 +2563,7 @@ static int elf_core_dump(struct coredump_params *cprm)
49241
49242 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
49243
49244- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
49245+ offset += elf_core_vma_data_size(gate_vma, cprm);
49246 offset += elf_core_extra_data_size();
49247 e_shoff = offset;
49248
49249@@ -2130,10 +2577,12 @@ static int elf_core_dump(struct coredump_params *cprm)
49250 offset = dataoff;
49251
49252 size += sizeof(*elf);
49253+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
49254 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
49255 goto end_coredump;
49256
49257 size += sizeof(*phdr4note);
49258+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
49259 if (size > cprm->limit
49260 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
49261 goto end_coredump;
49262@@ -2147,7 +2596,7 @@ static int elf_core_dump(struct coredump_params *cprm)
49263 phdr.p_offset = offset;
49264 phdr.p_vaddr = vma->vm_start;
49265 phdr.p_paddr = 0;
49266- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
49267+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
49268 phdr.p_memsz = vma->vm_end - vma->vm_start;
49269 offset += phdr.p_filesz;
49270 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
49271@@ -2158,6 +2607,7 @@ static int elf_core_dump(struct coredump_params *cprm)
49272 phdr.p_align = ELF_EXEC_PAGESIZE;
49273
49274 size += sizeof(phdr);
49275+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
49276 if (size > cprm->limit
49277 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
49278 goto end_coredump;
49279@@ -2182,7 +2632,7 @@ static int elf_core_dump(struct coredump_params *cprm)
49280 unsigned long addr;
49281 unsigned long end;
49282
49283- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
49284+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
49285
49286 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
49287 struct page *page;
49288@@ -2191,6 +2641,7 @@ static int elf_core_dump(struct coredump_params *cprm)
49289 page = get_dump_page(addr);
49290 if (page) {
49291 void *kaddr = kmap(page);
49292+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
49293 stop = ((size += PAGE_SIZE) > cprm->limit) ||
49294 !dump_write(cprm->file, kaddr,
49295 PAGE_SIZE);
49296@@ -2208,6 +2659,7 @@ static int elf_core_dump(struct coredump_params *cprm)
49297
49298 if (e_phnum == PN_XNUM) {
49299 size += sizeof(*shdr4extnum);
49300+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
49301 if (size > cprm->limit
49302 || !dump_write(cprm->file, shdr4extnum,
49303 sizeof(*shdr4extnum)))
49304@@ -2228,6 +2680,97 @@ out:
49305
49306 #endif /* CONFIG_ELF_CORE */
49307
49308+#ifdef CONFIG_PAX_MPROTECT
49309+/* PaX: non-PIC ELF libraries need relocations on their executable segments
49310+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
49311+ * we'll remove VM_MAYWRITE for good on RELRO segments.
49312+ *
49313+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
49314+ * basis because we want to allow the common case and not the special ones.
49315+ */
49316+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
49317+{
49318+ struct elfhdr elf_h;
49319+ struct elf_phdr elf_p;
49320+ unsigned long i;
49321+ unsigned long oldflags;
49322+ bool is_textrel_rw, is_textrel_rx, is_relro;
49323+
49324+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
49325+ return;
49326+
49327+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
49328+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
49329+
49330+#ifdef CONFIG_PAX_ELFRELOCS
49331+ /* possible TEXTREL */
49332+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
49333+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
49334+#else
49335+ is_textrel_rw = false;
49336+ is_textrel_rx = false;
49337+#endif
49338+
49339+ /* possible RELRO */
49340+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
49341+
49342+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
49343+ return;
49344+
49345+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
49346+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
49347+
49348+#ifdef CONFIG_PAX_ETEXECRELOCS
49349+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
49350+#else
49351+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
49352+#endif
49353+
49354+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
49355+ !elf_check_arch(&elf_h) ||
49356+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
49357+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
49358+ return;
49359+
49360+ for (i = 0UL; i < elf_h.e_phnum; i++) {
49361+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
49362+ return;
49363+ switch (elf_p.p_type) {
49364+ case PT_DYNAMIC:
49365+ if (!is_textrel_rw && !is_textrel_rx)
49366+ continue;
49367+ i = 0UL;
49368+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
49369+ elf_dyn dyn;
49370+
49371+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
49372+ return;
49373+ if (dyn.d_tag == DT_NULL)
49374+ return;
49375+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
49376+ gr_log_textrel(vma);
49377+ if (is_textrel_rw)
49378+ vma->vm_flags |= VM_MAYWRITE;
49379+ else
49380+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
49381+ vma->vm_flags &= ~VM_MAYWRITE;
49382+ return;
49383+ }
49384+ i++;
49385+ }
49386+ return;
49387+
49388+ case PT_GNU_RELRO:
49389+ if (!is_relro)
49390+ continue;
49391+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
49392+ vma->vm_flags &= ~VM_MAYWRITE;
49393+ return;
49394+ }
49395+ }
49396+}
49397+#endif
49398+
49399 static int __init init_elf_binfmt(void)
49400 {
49401 register_binfmt(&elf_format);
49402diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
49403index 2036d21..b0430d0 100644
49404--- a/fs/binfmt_flat.c
49405+++ b/fs/binfmt_flat.c
49406@@ -562,7 +562,9 @@ static int load_flat_file(struct linux_binprm * bprm,
49407 realdatastart = (unsigned long) -ENOMEM;
49408 printk("Unable to allocate RAM for process data, errno %d\n",
49409 (int)-realdatastart);
49410+ down_write(&current->mm->mmap_sem);
49411 vm_munmap(textpos, text_len);
49412+ up_write(&current->mm->mmap_sem);
49413 ret = realdatastart;
49414 goto err;
49415 }
49416@@ -586,8 +588,10 @@ static int load_flat_file(struct linux_binprm * bprm,
49417 }
49418 if (IS_ERR_VALUE(result)) {
49419 printk("Unable to read data+bss, errno %d\n", (int)-result);
49420+ down_write(&current->mm->mmap_sem);
49421 vm_munmap(textpos, text_len);
49422 vm_munmap(realdatastart, len);
49423+ up_write(&current->mm->mmap_sem);
49424 ret = result;
49425 goto err;
49426 }
49427@@ -654,8 +658,10 @@ static int load_flat_file(struct linux_binprm * bprm,
49428 }
49429 if (IS_ERR_VALUE(result)) {
49430 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
49431+ down_write(&current->mm->mmap_sem);
49432 vm_munmap(textpos, text_len + data_len + extra +
49433 MAX_SHARED_LIBS * sizeof(unsigned long));
49434+ up_write(&current->mm->mmap_sem);
49435 ret = result;
49436 goto err;
49437 }
49438diff --git a/fs/bio.c b/fs/bio.c
49439index b96fc6c..431d628 100644
49440--- a/fs/bio.c
49441+++ b/fs/bio.c
49442@@ -818,7 +818,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
49443 /*
49444 * Overflow, abort
49445 */
49446- if (end < start)
49447+ if (end < start || end - start > INT_MAX - nr_pages)
49448 return ERR_PTR(-EINVAL);
49449
49450 nr_pages += end - start;
49451@@ -952,7 +952,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
49452 /*
49453 * Overflow, abort
49454 */
49455- if (end < start)
49456+ if (end < start || end - start > INT_MAX - nr_pages)
49457 return ERR_PTR(-EINVAL);
49458
49459 nr_pages += end - start;
49460@@ -1214,7 +1214,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
49461 const int read = bio_data_dir(bio) == READ;
49462 struct bio_map_data *bmd = bio->bi_private;
49463 int i;
49464- char *p = bmd->sgvecs[0].iov_base;
49465+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
49466
49467 __bio_for_each_segment(bvec, bio, i, 0) {
49468 char *addr = page_address(bvec->bv_page);
49469diff --git a/fs/block_dev.c b/fs/block_dev.c
49470index aae187a..fd790ba 100644
49471--- a/fs/block_dev.c
49472+++ b/fs/block_dev.c
49473@@ -652,7 +652,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
49474 else if (bdev->bd_contains == bdev)
49475 return true; /* is a whole device which isn't held */
49476
49477- else if (whole->bd_holder == bd_may_claim)
49478+ else if (whole->bd_holder == (void *)bd_may_claim)
49479 return true; /* is a partition of a device that is being partitioned */
49480 else if (whole->bd_holder != NULL)
49481 return false; /* is a partition of a held device */
49482diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
49483index ca9d8f1..8c0142d 100644
49484--- a/fs/btrfs/ctree.c
49485+++ b/fs/btrfs/ctree.c
49486@@ -1036,9 +1036,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
49487 free_extent_buffer(buf);
49488 add_root_to_dirty_list(root);
49489 } else {
49490- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
49491- parent_start = parent->start;
49492- else
49493+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
49494+ if (parent)
49495+ parent_start = parent->start;
49496+ else
49497+ parent_start = 0;
49498+ } else
49499 parent_start = 0;
49500
49501 WARN_ON(trans->transid != btrfs_header_generation(parent));
49502diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
49503index f49b62f..07834ab 100644
49504--- a/fs/btrfs/ioctl.c
49505+++ b/fs/btrfs/ioctl.c
49506@@ -3077,9 +3077,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
49507 for (i = 0; i < num_types; i++) {
49508 struct btrfs_space_info *tmp;
49509
49510+ /* Don't copy in more than we allocated */
49511 if (!slot_count)
49512 break;
49513
49514+ slot_count--;
49515+
49516 info = NULL;
49517 rcu_read_lock();
49518 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
49519@@ -3101,10 +3104,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
49520 memcpy(dest, &space, sizeof(space));
49521 dest++;
49522 space_args.total_spaces++;
49523- slot_count--;
49524 }
49525- if (!slot_count)
49526- break;
49527 }
49528 up_read(&info->groups_sem);
49529 }
49530diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
49531index f6b8859..54fe8c5 100644
49532--- a/fs/btrfs/super.c
49533+++ b/fs/btrfs/super.c
49534@@ -266,7 +266,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
49535 function, line, errstr);
49536 return;
49537 }
49538- ACCESS_ONCE(trans->transaction->aborted) = errno;
49539+ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
49540 __btrfs_std_error(root->fs_info, function, line, errno, NULL);
49541 }
49542 /*
49543diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
49544index 622f469..e8d2d55 100644
49545--- a/fs/cachefiles/bind.c
49546+++ b/fs/cachefiles/bind.c
49547@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
49548 args);
49549
49550 /* start by checking things over */
49551- ASSERT(cache->fstop_percent >= 0 &&
49552- cache->fstop_percent < cache->fcull_percent &&
49553+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
49554 cache->fcull_percent < cache->frun_percent &&
49555 cache->frun_percent < 100);
49556
49557- ASSERT(cache->bstop_percent >= 0 &&
49558- cache->bstop_percent < cache->bcull_percent &&
49559+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
49560 cache->bcull_percent < cache->brun_percent &&
49561 cache->brun_percent < 100);
49562
49563diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
49564index 0a1467b..6a53245 100644
49565--- a/fs/cachefiles/daemon.c
49566+++ b/fs/cachefiles/daemon.c
49567@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
49568 if (n > buflen)
49569 return -EMSGSIZE;
49570
49571- if (copy_to_user(_buffer, buffer, n) != 0)
49572+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
49573 return -EFAULT;
49574
49575 return n;
49576@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
49577 if (test_bit(CACHEFILES_DEAD, &cache->flags))
49578 return -EIO;
49579
49580- if (datalen < 0 || datalen > PAGE_SIZE - 1)
49581+ if (datalen > PAGE_SIZE - 1)
49582 return -EOPNOTSUPP;
49583
49584 /* drag the command string into the kernel so we can parse it */
49585@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
49586 if (args[0] != '%' || args[1] != '\0')
49587 return -EINVAL;
49588
49589- if (fstop < 0 || fstop >= cache->fcull_percent)
49590+ if (fstop >= cache->fcull_percent)
49591 return cachefiles_daemon_range_error(cache, args);
49592
49593 cache->fstop_percent = fstop;
49594@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
49595 if (args[0] != '%' || args[1] != '\0')
49596 return -EINVAL;
49597
49598- if (bstop < 0 || bstop >= cache->bcull_percent)
49599+ if (bstop >= cache->bcull_percent)
49600 return cachefiles_daemon_range_error(cache, args);
49601
49602 cache->bstop_percent = bstop;
49603diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
49604index 4938251..7e01445 100644
49605--- a/fs/cachefiles/internal.h
49606+++ b/fs/cachefiles/internal.h
49607@@ -59,7 +59,7 @@ struct cachefiles_cache {
49608 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
49609 struct rb_root active_nodes; /* active nodes (can't be culled) */
49610 rwlock_t active_lock; /* lock for active_nodes */
49611- atomic_t gravecounter; /* graveyard uniquifier */
49612+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
49613 unsigned frun_percent; /* when to stop culling (% files) */
49614 unsigned fcull_percent; /* when to start culling (% files) */
49615 unsigned fstop_percent; /* when to stop allocating (% files) */
49616@@ -171,19 +171,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
49617 * proc.c
49618 */
49619 #ifdef CONFIG_CACHEFILES_HISTOGRAM
49620-extern atomic_t cachefiles_lookup_histogram[HZ];
49621-extern atomic_t cachefiles_mkdir_histogram[HZ];
49622-extern atomic_t cachefiles_create_histogram[HZ];
49623+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
49624+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
49625+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
49626
49627 extern int __init cachefiles_proc_init(void);
49628 extern void cachefiles_proc_cleanup(void);
49629 static inline
49630-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
49631+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
49632 {
49633 unsigned long jif = jiffies - start_jif;
49634 if (jif >= HZ)
49635 jif = HZ - 1;
49636- atomic_inc(&histogram[jif]);
49637+ atomic_inc_unchecked(&histogram[jif]);
49638 }
49639
49640 #else
49641diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
49642index 8c01c5fc..15f982e 100644
49643--- a/fs/cachefiles/namei.c
49644+++ b/fs/cachefiles/namei.c
49645@@ -317,7 +317,7 @@ try_again:
49646 /* first step is to make up a grave dentry in the graveyard */
49647 sprintf(nbuffer, "%08x%08x",
49648 (uint32_t) get_seconds(),
49649- (uint32_t) atomic_inc_return(&cache->gravecounter));
49650+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
49651
49652 /* do the multiway lock magic */
49653 trap = lock_rename(cache->graveyard, dir);
49654diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
49655index eccd339..4c1d995 100644
49656--- a/fs/cachefiles/proc.c
49657+++ b/fs/cachefiles/proc.c
49658@@ -14,9 +14,9 @@
49659 #include <linux/seq_file.h>
49660 #include "internal.h"
49661
49662-atomic_t cachefiles_lookup_histogram[HZ];
49663-atomic_t cachefiles_mkdir_histogram[HZ];
49664-atomic_t cachefiles_create_histogram[HZ];
49665+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
49666+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
49667+atomic_unchecked_t cachefiles_create_histogram[HZ];
49668
49669 /*
49670 * display the latency histogram
49671@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
49672 return 0;
49673 default:
49674 index = (unsigned long) v - 3;
49675- x = atomic_read(&cachefiles_lookup_histogram[index]);
49676- y = atomic_read(&cachefiles_mkdir_histogram[index]);
49677- z = atomic_read(&cachefiles_create_histogram[index]);
49678+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
49679+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
49680+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
49681 if (x == 0 && y == 0 && z == 0)
49682 return 0;
49683
49684diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
49685index 4809922..aab2c39 100644
49686--- a/fs/cachefiles/rdwr.c
49687+++ b/fs/cachefiles/rdwr.c
49688@@ -965,7 +965,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
49689 old_fs = get_fs();
49690 set_fs(KERNEL_DS);
49691 ret = file->f_op->write(
49692- file, (const void __user *) data, len, &pos);
49693+ file, (const void __force_user *) data, len, &pos);
49694 set_fs(old_fs);
49695 kunmap(page);
49696 if (ret != len)
49697diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
49698index 6d797f4..0ace2e5 100644
49699--- a/fs/ceph/dir.c
49700+++ b/fs/ceph/dir.c
49701@@ -243,7 +243,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
49702 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
49703 struct ceph_mds_client *mdsc = fsc->mdsc;
49704 unsigned frag = fpos_frag(filp->f_pos);
49705- int off = fpos_off(filp->f_pos);
49706+ unsigned int off = fpos_off(filp->f_pos);
49707 int err;
49708 u32 ftype;
49709 struct ceph_mds_reply_info_parsed *rinfo;
49710diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
49711index d9ea6ed..1e6c8ac 100644
49712--- a/fs/cifs/cifs_debug.c
49713+++ b/fs/cifs/cifs_debug.c
49714@@ -267,8 +267,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
49715
49716 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
49717 #ifdef CONFIG_CIFS_STATS2
49718- atomic_set(&totBufAllocCount, 0);
49719- atomic_set(&totSmBufAllocCount, 0);
49720+ atomic_set_unchecked(&totBufAllocCount, 0);
49721+ atomic_set_unchecked(&totSmBufAllocCount, 0);
49722 #endif /* CONFIG_CIFS_STATS2 */
49723 spin_lock(&cifs_tcp_ses_lock);
49724 list_for_each(tmp1, &cifs_tcp_ses_list) {
49725@@ -281,7 +281,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
49726 tcon = list_entry(tmp3,
49727 struct cifs_tcon,
49728 tcon_list);
49729- atomic_set(&tcon->num_smbs_sent, 0);
49730+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
49731 if (server->ops->clear_stats)
49732 server->ops->clear_stats(tcon);
49733 }
49734@@ -313,8 +313,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
49735 smBufAllocCount.counter, cifs_min_small);
49736 #ifdef CONFIG_CIFS_STATS2
49737 seq_printf(m, "Total Large %d Small %d Allocations\n",
49738- atomic_read(&totBufAllocCount),
49739- atomic_read(&totSmBufAllocCount));
49740+ atomic_read_unchecked(&totBufAllocCount),
49741+ atomic_read_unchecked(&totSmBufAllocCount));
49742 #endif /* CONFIG_CIFS_STATS2 */
49743
49744 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
49745@@ -343,7 +343,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
49746 if (tcon->need_reconnect)
49747 seq_puts(m, "\tDISCONNECTED ");
49748 seq_printf(m, "\nSMBs: %d",
49749- atomic_read(&tcon->num_smbs_sent));
49750+ atomic_read_unchecked(&tcon->num_smbs_sent));
49751 if (server->ops->print_stats)
49752 server->ops->print_stats(m, tcon);
49753 }
49754diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
49755index 345fc89..b2acae5 100644
49756--- a/fs/cifs/cifsfs.c
49757+++ b/fs/cifs/cifsfs.c
49758@@ -1033,7 +1033,7 @@ cifs_init_request_bufs(void)
49759 /* cERROR(1, "CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize); */
49760 cifs_req_cachep = kmem_cache_create("cifs_request",
49761 CIFSMaxBufSize + max_hdr_size, 0,
49762- SLAB_HWCACHE_ALIGN, NULL);
49763+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
49764 if (cifs_req_cachep == NULL)
49765 return -ENOMEM;
49766
49767@@ -1060,7 +1060,7 @@ cifs_init_request_bufs(void)
49768 efficient to alloc 1 per page off the slab compared to 17K (5page)
49769 alloc of large cifs buffers even when page debugging is on */
49770 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
49771- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
49772+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
49773 NULL);
49774 if (cifs_sm_req_cachep == NULL) {
49775 mempool_destroy(cifs_req_poolp);
49776@@ -1145,8 +1145,8 @@ init_cifs(void)
49777 atomic_set(&bufAllocCount, 0);
49778 atomic_set(&smBufAllocCount, 0);
49779 #ifdef CONFIG_CIFS_STATS2
49780- atomic_set(&totBufAllocCount, 0);
49781- atomic_set(&totSmBufAllocCount, 0);
49782+ atomic_set_unchecked(&totBufAllocCount, 0);
49783+ atomic_set_unchecked(&totSmBufAllocCount, 0);
49784 #endif /* CONFIG_CIFS_STATS2 */
49785
49786 atomic_set(&midCount, 0);
49787diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
49788index 4f07f6f..55de8ce 100644
49789--- a/fs/cifs/cifsglob.h
49790+++ b/fs/cifs/cifsglob.h
49791@@ -751,35 +751,35 @@ struct cifs_tcon {
49792 __u16 Flags; /* optional support bits */
49793 enum statusEnum tidStatus;
49794 #ifdef CONFIG_CIFS_STATS
49795- atomic_t num_smbs_sent;
49796+ atomic_unchecked_t num_smbs_sent;
49797 union {
49798 struct {
49799- atomic_t num_writes;
49800- atomic_t num_reads;
49801- atomic_t num_flushes;
49802- atomic_t num_oplock_brks;
49803- atomic_t num_opens;
49804- atomic_t num_closes;
49805- atomic_t num_deletes;
49806- atomic_t num_mkdirs;
49807- atomic_t num_posixopens;
49808- atomic_t num_posixmkdirs;
49809- atomic_t num_rmdirs;
49810- atomic_t num_renames;
49811- atomic_t num_t2renames;
49812- atomic_t num_ffirst;
49813- atomic_t num_fnext;
49814- atomic_t num_fclose;
49815- atomic_t num_hardlinks;
49816- atomic_t num_symlinks;
49817- atomic_t num_locks;
49818- atomic_t num_acl_get;
49819- atomic_t num_acl_set;
49820+ atomic_unchecked_t num_writes;
49821+ atomic_unchecked_t num_reads;
49822+ atomic_unchecked_t num_flushes;
49823+ atomic_unchecked_t num_oplock_brks;
49824+ atomic_unchecked_t num_opens;
49825+ atomic_unchecked_t num_closes;
49826+ atomic_unchecked_t num_deletes;
49827+ atomic_unchecked_t num_mkdirs;
49828+ atomic_unchecked_t num_posixopens;
49829+ atomic_unchecked_t num_posixmkdirs;
49830+ atomic_unchecked_t num_rmdirs;
49831+ atomic_unchecked_t num_renames;
49832+ atomic_unchecked_t num_t2renames;
49833+ atomic_unchecked_t num_ffirst;
49834+ atomic_unchecked_t num_fnext;
49835+ atomic_unchecked_t num_fclose;
49836+ atomic_unchecked_t num_hardlinks;
49837+ atomic_unchecked_t num_symlinks;
49838+ atomic_unchecked_t num_locks;
49839+ atomic_unchecked_t num_acl_get;
49840+ atomic_unchecked_t num_acl_set;
49841 } cifs_stats;
49842 #ifdef CONFIG_CIFS_SMB2
49843 struct {
49844- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
49845- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
49846+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
49847+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
49848 } smb2_stats;
49849 #endif /* CONFIG_CIFS_SMB2 */
49850 } stats;
49851@@ -1080,7 +1080,7 @@ convert_delimiter(char *path, char delim)
49852 }
49853
49854 #ifdef CONFIG_CIFS_STATS
49855-#define cifs_stats_inc atomic_inc
49856+#define cifs_stats_inc atomic_inc_unchecked
49857
49858 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
49859 unsigned int bytes)
49860@@ -1445,8 +1445,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
49861 /* Various Debug counters */
49862 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
49863 #ifdef CONFIG_CIFS_STATS2
49864-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
49865-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
49866+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
49867+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
49868 #endif
49869 GLOBAL_EXTERN atomic_t smBufAllocCount;
49870 GLOBAL_EXTERN atomic_t midCount;
49871diff --git a/fs/cifs/link.c b/fs/cifs/link.c
49872index 9f6c4c4..8de307a 100644
49873--- a/fs/cifs/link.c
49874+++ b/fs/cifs/link.c
49875@@ -616,7 +616,7 @@ symlink_exit:
49876
49877 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
49878 {
49879- char *p = nd_get_link(nd);
49880+ const char *p = nd_get_link(nd);
49881 if (!IS_ERR(p))
49882 kfree(p);
49883 }
49884diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
49885index 1b15bf8..1ce489e 100644
49886--- a/fs/cifs/misc.c
49887+++ b/fs/cifs/misc.c
49888@@ -169,7 +169,7 @@ cifs_buf_get(void)
49889 memset(ret_buf, 0, buf_size + 3);
49890 atomic_inc(&bufAllocCount);
49891 #ifdef CONFIG_CIFS_STATS2
49892- atomic_inc(&totBufAllocCount);
49893+ atomic_inc_unchecked(&totBufAllocCount);
49894 #endif /* CONFIG_CIFS_STATS2 */
49895 }
49896
49897@@ -204,7 +204,7 @@ cifs_small_buf_get(void)
49898 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
49899 atomic_inc(&smBufAllocCount);
49900 #ifdef CONFIG_CIFS_STATS2
49901- atomic_inc(&totSmBufAllocCount);
49902+ atomic_inc_unchecked(&totSmBufAllocCount);
49903 #endif /* CONFIG_CIFS_STATS2 */
49904
49905 }
49906diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
49907index 47bc5a8..10decbe 100644
49908--- a/fs/cifs/smb1ops.c
49909+++ b/fs/cifs/smb1ops.c
49910@@ -586,27 +586,27 @@ static void
49911 cifs_clear_stats(struct cifs_tcon *tcon)
49912 {
49913 #ifdef CONFIG_CIFS_STATS
49914- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
49915- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
49916- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
49917- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
49918- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
49919- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
49920- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
49921- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
49922- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
49923- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
49924- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
49925- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
49926- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
49927- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
49928- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
49929- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
49930- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
49931- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
49932- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
49933- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
49934- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
49935+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
49936+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
49937+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
49938+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
49939+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
49940+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
49941+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
49942+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
49943+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
49944+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
49945+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
49946+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
49947+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
49948+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
49949+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
49950+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
49951+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
49952+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
49953+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
49954+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
49955+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
49956 #endif
49957 }
49958
49959@@ -615,36 +615,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
49960 {
49961 #ifdef CONFIG_CIFS_STATS
49962 seq_printf(m, " Oplocks breaks: %d",
49963- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
49964+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
49965 seq_printf(m, "\nReads: %d Bytes: %llu",
49966- atomic_read(&tcon->stats.cifs_stats.num_reads),
49967+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
49968 (long long)(tcon->bytes_read));
49969 seq_printf(m, "\nWrites: %d Bytes: %llu",
49970- atomic_read(&tcon->stats.cifs_stats.num_writes),
49971+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
49972 (long long)(tcon->bytes_written));
49973 seq_printf(m, "\nFlushes: %d",
49974- atomic_read(&tcon->stats.cifs_stats.num_flushes));
49975+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
49976 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
49977- atomic_read(&tcon->stats.cifs_stats.num_locks),
49978- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
49979- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
49980+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
49981+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
49982+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
49983 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
49984- atomic_read(&tcon->stats.cifs_stats.num_opens),
49985- atomic_read(&tcon->stats.cifs_stats.num_closes),
49986- atomic_read(&tcon->stats.cifs_stats.num_deletes));
49987+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
49988+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
49989+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
49990 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
49991- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
49992- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
49993+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
49994+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
49995 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
49996- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
49997- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
49998+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
49999+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
50000 seq_printf(m, "\nRenames: %d T2 Renames %d",
50001- atomic_read(&tcon->stats.cifs_stats.num_renames),
50002- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
50003+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
50004+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
50005 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
50006- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
50007- atomic_read(&tcon->stats.cifs_stats.num_fnext),
50008- atomic_read(&tcon->stats.cifs_stats.num_fclose));
50009+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
50010+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
50011+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
50012 #endif
50013 }
50014
50015diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
50016index bceffe7..cd1ae59 100644
50017--- a/fs/cifs/smb2ops.c
50018+++ b/fs/cifs/smb2ops.c
50019@@ -274,8 +274,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
50020 #ifdef CONFIG_CIFS_STATS
50021 int i;
50022 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
50023- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
50024- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
50025+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
50026+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
50027 }
50028 #endif
50029 }
50030@@ -284,66 +284,66 @@ static void
50031 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
50032 {
50033 #ifdef CONFIG_CIFS_STATS
50034- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
50035- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
50036+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
50037+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
50038 seq_printf(m, "\nNegotiates: %d sent %d failed",
50039- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
50040- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
50041+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
50042+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
50043 seq_printf(m, "\nSessionSetups: %d sent %d failed",
50044- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
50045- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
50046+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
50047+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
50048 #define SMB2LOGOFF 0x0002 /* trivial request/resp */
50049 seq_printf(m, "\nLogoffs: %d sent %d failed",
50050- atomic_read(&sent[SMB2_LOGOFF_HE]),
50051- atomic_read(&failed[SMB2_LOGOFF_HE]));
50052+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
50053+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
50054 seq_printf(m, "\nTreeConnects: %d sent %d failed",
50055- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
50056- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
50057+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
50058+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
50059 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
50060- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
50061- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
50062+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
50063+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
50064 seq_printf(m, "\nCreates: %d sent %d failed",
50065- atomic_read(&sent[SMB2_CREATE_HE]),
50066- atomic_read(&failed[SMB2_CREATE_HE]));
50067+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
50068+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
50069 seq_printf(m, "\nCloses: %d sent %d failed",
50070- atomic_read(&sent[SMB2_CLOSE_HE]),
50071- atomic_read(&failed[SMB2_CLOSE_HE]));
50072+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
50073+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
50074 seq_printf(m, "\nFlushes: %d sent %d failed",
50075- atomic_read(&sent[SMB2_FLUSH_HE]),
50076- atomic_read(&failed[SMB2_FLUSH_HE]));
50077+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
50078+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
50079 seq_printf(m, "\nReads: %d sent %d failed",
50080- atomic_read(&sent[SMB2_READ_HE]),
50081- atomic_read(&failed[SMB2_READ_HE]));
50082+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
50083+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
50084 seq_printf(m, "\nWrites: %d sent %d failed",
50085- atomic_read(&sent[SMB2_WRITE_HE]),
50086- atomic_read(&failed[SMB2_WRITE_HE]));
50087+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
50088+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
50089 seq_printf(m, "\nLocks: %d sent %d failed",
50090- atomic_read(&sent[SMB2_LOCK_HE]),
50091- atomic_read(&failed[SMB2_LOCK_HE]));
50092+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
50093+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
50094 seq_printf(m, "\nIOCTLs: %d sent %d failed",
50095- atomic_read(&sent[SMB2_IOCTL_HE]),
50096- atomic_read(&failed[SMB2_IOCTL_HE]));
50097+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
50098+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
50099 seq_printf(m, "\nCancels: %d sent %d failed",
50100- atomic_read(&sent[SMB2_CANCEL_HE]),
50101- atomic_read(&failed[SMB2_CANCEL_HE]));
50102+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
50103+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
50104 seq_printf(m, "\nEchos: %d sent %d failed",
50105- atomic_read(&sent[SMB2_ECHO_HE]),
50106- atomic_read(&failed[SMB2_ECHO_HE]));
50107+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
50108+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
50109 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
50110- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
50111- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
50112+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
50113+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
50114 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
50115- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
50116- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
50117+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
50118+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
50119 seq_printf(m, "\nQueryInfos: %d sent %d failed",
50120- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
50121- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
50122+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
50123+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
50124 seq_printf(m, "\nSetInfos: %d sent %d failed",
50125- atomic_read(&sent[SMB2_SET_INFO_HE]),
50126- atomic_read(&failed[SMB2_SET_INFO_HE]));
50127+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
50128+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
50129 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
50130- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
50131- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
50132+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
50133+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
50134 #endif
50135 }
50136
50137diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
50138index 41d9d07..dbb4772 100644
50139--- a/fs/cifs/smb2pdu.c
50140+++ b/fs/cifs/smb2pdu.c
50141@@ -1761,8 +1761,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
50142 default:
50143 cERROR(1, "info level %u isn't supported",
50144 srch_inf->info_level);
50145- rc = -EINVAL;
50146- goto qdir_exit;
50147+ return -EINVAL;
50148 }
50149
50150 req->FileIndex = cpu_to_le32(index);
50151diff --git a/fs/coda/cache.c b/fs/coda/cache.c
50152index 1da168c..8bc7ff6 100644
50153--- a/fs/coda/cache.c
50154+++ b/fs/coda/cache.c
50155@@ -24,7 +24,7 @@
50156 #include "coda_linux.h"
50157 #include "coda_cache.h"
50158
50159-static atomic_t permission_epoch = ATOMIC_INIT(0);
50160+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
50161
50162 /* replace or extend an acl cache hit */
50163 void coda_cache_enter(struct inode *inode, int mask)
50164@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
50165 struct coda_inode_info *cii = ITOC(inode);
50166
50167 spin_lock(&cii->c_lock);
50168- cii->c_cached_epoch = atomic_read(&permission_epoch);
50169+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
50170 if (!uid_eq(cii->c_uid, current_fsuid())) {
50171 cii->c_uid = current_fsuid();
50172 cii->c_cached_perm = mask;
50173@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
50174 {
50175 struct coda_inode_info *cii = ITOC(inode);
50176 spin_lock(&cii->c_lock);
50177- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
50178+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
50179 spin_unlock(&cii->c_lock);
50180 }
50181
50182 /* remove all acl caches */
50183 void coda_cache_clear_all(struct super_block *sb)
50184 {
50185- atomic_inc(&permission_epoch);
50186+ atomic_inc_unchecked(&permission_epoch);
50187 }
50188
50189
50190@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
50191 spin_lock(&cii->c_lock);
50192 hit = (mask & cii->c_cached_perm) == mask &&
50193 uid_eq(cii->c_uid, current_fsuid()) &&
50194- cii->c_cached_epoch == atomic_read(&permission_epoch);
50195+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
50196 spin_unlock(&cii->c_lock);
50197
50198 return hit;
50199diff --git a/fs/compat.c b/fs/compat.c
50200index d487985..c9e04b1 100644
50201--- a/fs/compat.c
50202+++ b/fs/compat.c
50203@@ -54,7 +54,7 @@
50204 #include <asm/ioctls.h>
50205 #include "internal.h"
50206
50207-int compat_log = 1;
50208+int compat_log = 0;
50209
50210 int compat_printk(const char *fmt, ...)
50211 {
50212@@ -490,7 +490,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
50213
50214 set_fs(KERNEL_DS);
50215 /* The __user pointer cast is valid because of the set_fs() */
50216- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
50217+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
50218 set_fs(oldfs);
50219 /* truncating is ok because it's a user address */
50220 if (!ret)
50221@@ -548,7 +548,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
50222 goto out;
50223
50224 ret = -EINVAL;
50225- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
50226+ if (nr_segs > UIO_MAXIOV)
50227 goto out;
50228 if (nr_segs > fast_segs) {
50229 ret = -ENOMEM;
50230@@ -835,6 +835,7 @@ struct compat_old_linux_dirent {
50231
50232 struct compat_readdir_callback {
50233 struct compat_old_linux_dirent __user *dirent;
50234+ struct file * file;
50235 int result;
50236 };
50237
50238@@ -852,6 +853,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
50239 buf->result = -EOVERFLOW;
50240 return -EOVERFLOW;
50241 }
50242+
50243+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
50244+ return 0;
50245+
50246 buf->result++;
50247 dirent = buf->dirent;
50248 if (!access_ok(VERIFY_WRITE, dirent,
50249@@ -882,6 +887,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
50250
50251 buf.result = 0;
50252 buf.dirent = dirent;
50253+ buf.file = f.file;
50254
50255 error = vfs_readdir(f.file, compat_fillonedir, &buf);
50256 if (buf.result)
50257@@ -901,6 +907,7 @@ struct compat_linux_dirent {
50258 struct compat_getdents_callback {
50259 struct compat_linux_dirent __user *current_dir;
50260 struct compat_linux_dirent __user *previous;
50261+ struct file * file;
50262 int count;
50263 int error;
50264 };
50265@@ -922,6 +929,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
50266 buf->error = -EOVERFLOW;
50267 return -EOVERFLOW;
50268 }
50269+
50270+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
50271+ return 0;
50272+
50273 dirent = buf->previous;
50274 if (dirent) {
50275 if (__put_user(offset, &dirent->d_off))
50276@@ -967,6 +978,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
50277 buf.previous = NULL;
50278 buf.count = count;
50279 buf.error = 0;
50280+ buf.file = f.file;
50281
50282 error = vfs_readdir(f.file, compat_filldir, &buf);
50283 if (error >= 0)
50284@@ -987,6 +999,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
50285 struct compat_getdents_callback64 {
50286 struct linux_dirent64 __user *current_dir;
50287 struct linux_dirent64 __user *previous;
50288+ struct file * file;
50289 int count;
50290 int error;
50291 };
50292@@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
50293 buf->error = -EINVAL; /* only used if we fail.. */
50294 if (reclen > buf->count)
50295 return -EINVAL;
50296+
50297+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
50298+ return 0;
50299+
50300 dirent = buf->previous;
50301
50302 if (dirent) {
50303@@ -1052,13 +1069,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
50304 buf.previous = NULL;
50305 buf.count = count;
50306 buf.error = 0;
50307+ buf.file = f.file;
50308
50309 error = vfs_readdir(f.file, compat_filldir64, &buf);
50310 if (error >= 0)
50311 error = buf.error;
50312 lastdirent = buf.previous;
50313 if (lastdirent) {
50314- typeof(lastdirent->d_off) d_off = f.file->f_pos;
50315+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
50316 if (__put_user_unaligned(d_off, &lastdirent->d_off))
50317 error = -EFAULT;
50318 else
50319diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
50320index a81147e..20bf2b5 100644
50321--- a/fs/compat_binfmt_elf.c
50322+++ b/fs/compat_binfmt_elf.c
50323@@ -30,11 +30,13 @@
50324 #undef elf_phdr
50325 #undef elf_shdr
50326 #undef elf_note
50327+#undef elf_dyn
50328 #undef elf_addr_t
50329 #define elfhdr elf32_hdr
50330 #define elf_phdr elf32_phdr
50331 #define elf_shdr elf32_shdr
50332 #define elf_note elf32_note
50333+#define elf_dyn Elf32_Dyn
50334 #define elf_addr_t Elf32_Addr
50335
50336 /*
50337diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
50338index 3ced75f..b28d192 100644
50339--- a/fs/compat_ioctl.c
50340+++ b/fs/compat_ioctl.c
50341@@ -623,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
50342 return -EFAULT;
50343 if (__get_user(udata, &ss32->iomem_base))
50344 return -EFAULT;
50345- ss.iomem_base = compat_ptr(udata);
50346+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
50347 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
50348 __get_user(ss.port_high, &ss32->port_high))
50349 return -EFAULT;
50350@@ -704,8 +704,8 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
50351 for (i = 0; i < nmsgs; i++) {
50352 if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
50353 return -EFAULT;
50354- if (get_user(datap, &umsgs[i].buf) ||
50355- put_user(compat_ptr(datap), &tmsgs[i].buf))
50356+ if (get_user(datap, (u8 __user * __user *)&umsgs[i].buf) ||
50357+ put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
50358 return -EFAULT;
50359 }
50360 return sys_ioctl(fd, cmd, (unsigned long)tdata);
50361@@ -798,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
50362 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
50363 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
50364 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
50365- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
50366+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
50367 return -EFAULT;
50368
50369 return ioctl_preallocate(file, p);
50370@@ -1620,8 +1620,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
50371 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
50372 {
50373 unsigned int a, b;
50374- a = *(unsigned int *)p;
50375- b = *(unsigned int *)q;
50376+ a = *(const unsigned int *)p;
50377+ b = *(const unsigned int *)q;
50378 if (a > b)
50379 return 1;
50380 if (a < b)
50381diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
50382index 7aabc6a..34c1197 100644
50383--- a/fs/configfs/dir.c
50384+++ b/fs/configfs/dir.c
50385@@ -1565,7 +1565,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
50386 }
50387 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
50388 struct configfs_dirent *next;
50389- const char * name;
50390+ const unsigned char * name;
50391+ char d_name[sizeof(next->s_dentry->d_iname)];
50392 int len;
50393 struct inode *inode = NULL;
50394
50395@@ -1575,7 +1576,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
50396 continue;
50397
50398 name = configfs_get_name(next);
50399- len = strlen(name);
50400+ if (next->s_dentry && name == next->s_dentry->d_iname) {
50401+ len = next->s_dentry->d_name.len;
50402+ memcpy(d_name, name, len);
50403+ name = d_name;
50404+ } else
50405+ len = strlen(name);
50406
50407 /*
50408 * We'll have a dentry and an inode for
50409diff --git a/fs/coredump.c b/fs/coredump.c
50410index c647965..a77bff3 100644
50411--- a/fs/coredump.c
50412+++ b/fs/coredump.c
50413@@ -52,7 +52,7 @@ struct core_name {
50414 char *corename;
50415 int used, size;
50416 };
50417-static atomic_t call_count = ATOMIC_INIT(1);
50418+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
50419
50420 /* The maximal length of core_pattern is also specified in sysctl.c */
50421
50422@@ -60,7 +60,7 @@ static int expand_corename(struct core_name *cn)
50423 {
50424 char *old_corename = cn->corename;
50425
50426- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
50427+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
50428 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
50429
50430 if (!cn->corename) {
50431@@ -157,7 +157,7 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm)
50432 int pid_in_pattern = 0;
50433 int err = 0;
50434
50435- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
50436+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
50437 cn->corename = kmalloc(cn->size, GFP_KERNEL);
50438 cn->used = 0;
50439
50440@@ -414,17 +414,17 @@ static void wait_for_dump_helpers(struct file *file)
50441 pipe = file_inode(file)->i_pipe;
50442
50443 pipe_lock(pipe);
50444- pipe->readers++;
50445- pipe->writers--;
50446+ atomic_inc(&pipe->readers);
50447+ atomic_dec(&pipe->writers);
50448
50449- while ((pipe->readers > 1) && (!signal_pending(current))) {
50450+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
50451 wake_up_interruptible_sync(&pipe->wait);
50452 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
50453 pipe_wait(pipe);
50454 }
50455
50456- pipe->readers--;
50457- pipe->writers++;
50458+ atomic_dec(&pipe->readers);
50459+ atomic_inc(&pipe->writers);
50460 pipe_unlock(pipe);
50461
50462 }
50463@@ -471,7 +471,8 @@ void do_coredump(siginfo_t *siginfo)
50464 int ispipe;
50465 struct files_struct *displaced;
50466 bool need_nonrelative = false;
50467- static atomic_t core_dump_count = ATOMIC_INIT(0);
50468+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
50469+ long signr = siginfo->si_signo;
50470 struct coredump_params cprm = {
50471 .siginfo = siginfo,
50472 .regs = signal_pt_regs(),
50473@@ -484,7 +485,10 @@ void do_coredump(siginfo_t *siginfo)
50474 .mm_flags = mm->flags,
50475 };
50476
50477- audit_core_dumps(siginfo->si_signo);
50478+ audit_core_dumps(signr);
50479+
50480+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
50481+ gr_handle_brute_attach(cprm.mm_flags);
50482
50483 binfmt = mm->binfmt;
50484 if (!binfmt || !binfmt->core_dump)
50485@@ -508,7 +512,7 @@ void do_coredump(siginfo_t *siginfo)
50486 need_nonrelative = true;
50487 }
50488
50489- retval = coredump_wait(siginfo->si_signo, &core_state);
50490+ retval = coredump_wait(signr, &core_state);
50491 if (retval < 0)
50492 goto fail_creds;
50493
50494@@ -556,7 +560,7 @@ void do_coredump(siginfo_t *siginfo)
50495 }
50496 cprm.limit = RLIM_INFINITY;
50497
50498- dump_count = atomic_inc_return(&core_dump_count);
50499+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
50500 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
50501 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
50502 task_tgid_vnr(current), current->comm);
50503@@ -583,6 +587,8 @@ void do_coredump(siginfo_t *siginfo)
50504 } else {
50505 struct inode *inode;
50506
50507+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
50508+
50509 if (cprm.limit < binfmt->min_coredump)
50510 goto fail_unlock;
50511
50512@@ -640,7 +646,7 @@ close_fail:
50513 filp_close(cprm.file, NULL);
50514 fail_dropcount:
50515 if (ispipe)
50516- atomic_dec(&core_dump_count);
50517+ atomic_dec_unchecked(&core_dump_count);
50518 fail_unlock:
50519 kfree(cn.corename);
50520 fail_corename:
50521@@ -659,7 +665,7 @@ fail:
50522 */
50523 int dump_write(struct file *file, const void *addr, int nr)
50524 {
50525- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
50526+ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
50527 }
50528 EXPORT_SYMBOL(dump_write);
50529
50530diff --git a/fs/dcache.c b/fs/dcache.c
50531index e689268..f36956e 100644
50532--- a/fs/dcache.c
50533+++ b/fs/dcache.c
50534@@ -3100,7 +3100,7 @@ void __init vfs_caches_init(unsigned long mempages)
50535 mempages -= reserve;
50536
50537 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
50538- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
50539+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
50540
50541 dcache_init();
50542 inode_init();
50543diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
50544index 4888cb3..e0f7cf8 100644
50545--- a/fs/debugfs/inode.c
50546+++ b/fs/debugfs/inode.c
50547@@ -415,7 +415,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
50548 */
50549 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
50550 {
50551+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
50552+ return __create_file(name, S_IFDIR | S_IRWXU,
50553+#else
50554 return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
50555+#endif
50556 parent, NULL, NULL);
50557 }
50558 EXPORT_SYMBOL_GPL(debugfs_create_dir);
50559diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
50560index 5eab400..810a3f5 100644
50561--- a/fs/ecryptfs/inode.c
50562+++ b/fs/ecryptfs/inode.c
50563@@ -674,7 +674,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
50564 old_fs = get_fs();
50565 set_fs(get_ds());
50566 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
50567- (char __user *)lower_buf,
50568+ (char __force_user *)lower_buf,
50569 PATH_MAX);
50570 set_fs(old_fs);
50571 if (rc < 0)
50572@@ -706,7 +706,7 @@ out:
50573 static void
50574 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
50575 {
50576- char *buf = nd_get_link(nd);
50577+ const char *buf = nd_get_link(nd);
50578 if (!IS_ERR(buf)) {
50579 /* Free the char* */
50580 kfree(buf);
50581diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
50582index e4141f2..d8263e8 100644
50583--- a/fs/ecryptfs/miscdev.c
50584+++ b/fs/ecryptfs/miscdev.c
50585@@ -304,7 +304,7 @@ check_list:
50586 goto out_unlock_msg_ctx;
50587 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
50588 if (msg_ctx->msg) {
50589- if (copy_to_user(&buf[i], packet_length, packet_length_size))
50590+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
50591 goto out_unlock_msg_ctx;
50592 i += packet_length_size;
50593 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
50594diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
50595index 6a16053..2155147 100644
50596--- a/fs/ecryptfs/read_write.c
50597+++ b/fs/ecryptfs/read_write.c
50598@@ -240,7 +240,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
50599 return -EIO;
50600 fs_save = get_fs();
50601 set_fs(get_ds());
50602- rc = vfs_read(lower_file, data, size, &offset);
50603+ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
50604 set_fs(fs_save);
50605 return rc;
50606 }
50607diff --git a/fs/exec.c b/fs/exec.c
50608index 6d56ff2..3bc6638 100644
50609--- a/fs/exec.c
50610+++ b/fs/exec.c
50611@@ -55,8 +55,20 @@
50612 #include <linux/pipe_fs_i.h>
50613 #include <linux/oom.h>
50614 #include <linux/compat.h>
50615+#include <linux/random.h>
50616+#include <linux/seq_file.h>
50617+#include <linux/coredump.h>
50618+#include <linux/mman.h>
50619+
50620+#ifdef CONFIG_PAX_REFCOUNT
50621+#include <linux/kallsyms.h>
50622+#include <linux/kdebug.h>
50623+#endif
50624+
50625+#include <trace/events/fs.h>
50626
50627 #include <asm/uaccess.h>
50628+#include <asm/sections.h>
50629 #include <asm/mmu_context.h>
50630 #include <asm/tlb.h>
50631
50632@@ -66,6 +78,18 @@
50633
50634 #include <trace/events/sched.h>
50635
50636+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
50637+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
50638+{
50639+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
50640+}
50641+#endif
50642+
50643+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
50644+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
50645+EXPORT_SYMBOL(pax_set_initial_flags_func);
50646+#endif
50647+
50648 int suid_dumpable = 0;
50649
50650 static LIST_HEAD(formats);
50651@@ -75,8 +99,8 @@ void __register_binfmt(struct linux_binfmt * fmt, int insert)
50652 {
50653 BUG_ON(!fmt);
50654 write_lock(&binfmt_lock);
50655- insert ? list_add(&fmt->lh, &formats) :
50656- list_add_tail(&fmt->lh, &formats);
50657+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
50658+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
50659 write_unlock(&binfmt_lock);
50660 }
50661
50662@@ -85,7 +109,7 @@ EXPORT_SYMBOL(__register_binfmt);
50663 void unregister_binfmt(struct linux_binfmt * fmt)
50664 {
50665 write_lock(&binfmt_lock);
50666- list_del(&fmt->lh);
50667+ pax_list_del((struct list_head *)&fmt->lh);
50668 write_unlock(&binfmt_lock);
50669 }
50670
50671@@ -180,18 +204,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
50672 int write)
50673 {
50674 struct page *page;
50675- int ret;
50676
50677-#ifdef CONFIG_STACK_GROWSUP
50678- if (write) {
50679- ret = expand_downwards(bprm->vma, pos);
50680- if (ret < 0)
50681- return NULL;
50682- }
50683-#endif
50684- ret = get_user_pages(current, bprm->mm, pos,
50685- 1, write, 1, &page, NULL);
50686- if (ret <= 0)
50687+ if (0 > expand_downwards(bprm->vma, pos))
50688+ return NULL;
50689+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
50690 return NULL;
50691
50692 if (write) {
50693@@ -207,6 +223,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
50694 if (size <= ARG_MAX)
50695 return page;
50696
50697+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50698+ // only allow 512KB for argv+env on suid/sgid binaries
50699+ // to prevent easy ASLR exhaustion
50700+ if (((!uid_eq(bprm->cred->euid, current_euid())) ||
50701+ (!gid_eq(bprm->cred->egid, current_egid()))) &&
50702+ (size > (512 * 1024))) {
50703+ put_page(page);
50704+ return NULL;
50705+ }
50706+#endif
50707+
50708 /*
50709 * Limit to 1/4-th the stack size for the argv+env strings.
50710 * This ensures that:
50711@@ -266,6 +293,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
50712 vma->vm_end = STACK_TOP_MAX;
50713 vma->vm_start = vma->vm_end - PAGE_SIZE;
50714 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
50715+
50716+#ifdef CONFIG_PAX_SEGMEXEC
50717+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
50718+#endif
50719+
50720 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
50721 INIT_LIST_HEAD(&vma->anon_vma_chain);
50722
50723@@ -276,6 +308,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
50724 mm->stack_vm = mm->total_vm = 1;
50725 up_write(&mm->mmap_sem);
50726 bprm->p = vma->vm_end - sizeof(void *);
50727+
50728+#ifdef CONFIG_PAX_RANDUSTACK
50729+ if (randomize_va_space)
50730+ bprm->p ^= random32() & ~PAGE_MASK;
50731+#endif
50732+
50733 return 0;
50734 err:
50735 up_write(&mm->mmap_sem);
50736@@ -396,7 +434,7 @@ struct user_arg_ptr {
50737 } ptr;
50738 };
50739
50740-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
50741+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
50742 {
50743 const char __user *native;
50744
50745@@ -405,14 +443,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
50746 compat_uptr_t compat;
50747
50748 if (get_user(compat, argv.ptr.compat + nr))
50749- return ERR_PTR(-EFAULT);
50750+ return (const char __force_user *)ERR_PTR(-EFAULT);
50751
50752 return compat_ptr(compat);
50753 }
50754 #endif
50755
50756 if (get_user(native, argv.ptr.native + nr))
50757- return ERR_PTR(-EFAULT);
50758+ return (const char __force_user *)ERR_PTR(-EFAULT);
50759
50760 return native;
50761 }
50762@@ -431,7 +469,7 @@ static int count(struct user_arg_ptr argv, int max)
50763 if (!p)
50764 break;
50765
50766- if (IS_ERR(p))
50767+ if (IS_ERR((const char __force_kernel *)p))
50768 return -EFAULT;
50769
50770 if (i >= max)
50771@@ -466,7 +504,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
50772
50773 ret = -EFAULT;
50774 str = get_user_arg_ptr(argv, argc);
50775- if (IS_ERR(str))
50776+ if (IS_ERR((const char __force_kernel *)str))
50777 goto out;
50778
50779 len = strnlen_user(str, MAX_ARG_STRLEN);
50780@@ -548,7 +586,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
50781 int r;
50782 mm_segment_t oldfs = get_fs();
50783 struct user_arg_ptr argv = {
50784- .ptr.native = (const char __user *const __user *)__argv,
50785+ .ptr.native = (const char __force_user * const __force_user *)__argv,
50786 };
50787
50788 set_fs(KERNEL_DS);
50789@@ -583,7 +621,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
50790 unsigned long new_end = old_end - shift;
50791 struct mmu_gather tlb;
50792
50793- BUG_ON(new_start > new_end);
50794+ if (new_start >= new_end || new_start < mmap_min_addr)
50795+ return -ENOMEM;
50796
50797 /*
50798 * ensure there are no vmas between where we want to go
50799@@ -592,6 +631,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
50800 if (vma != find_vma(mm, new_start))
50801 return -EFAULT;
50802
50803+#ifdef CONFIG_PAX_SEGMEXEC
50804+ BUG_ON(pax_find_mirror_vma(vma));
50805+#endif
50806+
50807 /*
50808 * cover the whole range: [new_start, old_end)
50809 */
50810@@ -672,10 +715,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
50811 stack_top = arch_align_stack(stack_top);
50812 stack_top = PAGE_ALIGN(stack_top);
50813
50814- if (unlikely(stack_top < mmap_min_addr) ||
50815- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
50816- return -ENOMEM;
50817-
50818 stack_shift = vma->vm_end - stack_top;
50819
50820 bprm->p -= stack_shift;
50821@@ -687,8 +726,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
50822 bprm->exec -= stack_shift;
50823
50824 down_write(&mm->mmap_sem);
50825+
50826+ /* Move stack pages down in memory. */
50827+ if (stack_shift) {
50828+ ret = shift_arg_pages(vma, stack_shift);
50829+ if (ret)
50830+ goto out_unlock;
50831+ }
50832+
50833 vm_flags = VM_STACK_FLAGS;
50834
50835+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
50836+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
50837+ vm_flags &= ~VM_EXEC;
50838+
50839+#ifdef CONFIG_PAX_MPROTECT
50840+ if (mm->pax_flags & MF_PAX_MPROTECT)
50841+ vm_flags &= ~VM_MAYEXEC;
50842+#endif
50843+
50844+ }
50845+#endif
50846+
50847 /*
50848 * Adjust stack execute permissions; explicitly enable for
50849 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
50850@@ -707,13 +766,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
50851 goto out_unlock;
50852 BUG_ON(prev != vma);
50853
50854- /* Move stack pages down in memory. */
50855- if (stack_shift) {
50856- ret = shift_arg_pages(vma, stack_shift);
50857- if (ret)
50858- goto out_unlock;
50859- }
50860-
50861 /* mprotect_fixup is overkill to remove the temporary stack flags */
50862 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
50863
50864@@ -737,6 +789,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
50865 #endif
50866 current->mm->start_stack = bprm->p;
50867 ret = expand_stack(vma, stack_base);
50868+
50869+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
50870+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
50871+ unsigned long size;
50872+ vm_flags_t vm_flags;
50873+
50874+ size = STACK_TOP - vma->vm_end;
50875+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
50876+
50877+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0);
50878+
50879+#ifdef CONFIG_X86
50880+ if (!ret) {
50881+ size = mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
50882+ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0);
50883+ }
50884+#endif
50885+
50886+ }
50887+#endif
50888+
50889 if (ret)
50890 ret = -EFAULT;
50891
50892@@ -772,6 +845,8 @@ struct file *open_exec(const char *name)
50893
50894 fsnotify_open(file);
50895
50896+ trace_open_exec(name);
50897+
50898 err = deny_write_access(file);
50899 if (err)
50900 goto exit;
50901@@ -795,7 +870,7 @@ int kernel_read(struct file *file, loff_t offset,
50902 old_fs = get_fs();
50903 set_fs(get_ds());
50904 /* The cast to a user pointer is valid due to the set_fs() */
50905- result = vfs_read(file, (void __user *)addr, count, &pos);
50906+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
50907 set_fs(old_fs);
50908 return result;
50909 }
50910@@ -1250,7 +1325,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
50911 }
50912 rcu_read_unlock();
50913
50914- if (p->fs->users > n_fs) {
50915+ if (atomic_read(&p->fs->users) > n_fs) {
50916 bprm->unsafe |= LSM_UNSAFE_SHARE;
50917 } else {
50918 res = -EAGAIN;
50919@@ -1450,6 +1525,31 @@ int search_binary_handler(struct linux_binprm *bprm)
50920
50921 EXPORT_SYMBOL(search_binary_handler);
50922
50923+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50924+static DEFINE_PER_CPU(u64, exec_counter);
50925+static int __init init_exec_counters(void)
50926+{
50927+ unsigned int cpu;
50928+
50929+ for_each_possible_cpu(cpu) {
50930+ per_cpu(exec_counter, cpu) = (u64)cpu;
50931+ }
50932+
50933+ return 0;
50934+}
50935+early_initcall(init_exec_counters);
50936+static inline void increment_exec_counter(void)
50937+{
50938+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
50939+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
50940+}
50941+#else
50942+static inline void increment_exec_counter(void) {}
50943+#endif
50944+
50945+extern void gr_handle_exec_args(struct linux_binprm *bprm,
50946+ struct user_arg_ptr argv);
50947+
50948 /*
50949 * sys_execve() executes a new program.
50950 */
50951@@ -1457,6 +1557,11 @@ static int do_execve_common(const char *filename,
50952 struct user_arg_ptr argv,
50953 struct user_arg_ptr envp)
50954 {
50955+#ifdef CONFIG_GRKERNSEC
50956+ struct file *old_exec_file;
50957+ struct acl_subject_label *old_acl;
50958+ struct rlimit old_rlim[RLIM_NLIMITS];
50959+#endif
50960 struct linux_binprm *bprm;
50961 struct file *file;
50962 struct files_struct *displaced;
50963@@ -1464,6 +1569,8 @@ static int do_execve_common(const char *filename,
50964 int retval;
50965 const struct cred *cred = current_cred();
50966
50967+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&cred->user->processes), 1);
50968+
50969 /*
50970 * We move the actual failure in case of RLIMIT_NPROC excess from
50971 * set*uid() to execve() because too many poorly written programs
50972@@ -1504,12 +1611,27 @@ static int do_execve_common(const char *filename,
50973 if (IS_ERR(file))
50974 goto out_unmark;
50975
50976+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
50977+ retval = -EPERM;
50978+ goto out_file;
50979+ }
50980+
50981 sched_exec();
50982
50983 bprm->file = file;
50984 bprm->filename = filename;
50985 bprm->interp = filename;
50986
50987+ if (gr_process_user_ban()) {
50988+ retval = -EPERM;
50989+ goto out_file;
50990+ }
50991+
50992+ if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) {
50993+ retval = -EACCES;
50994+ goto out_file;
50995+ }
50996+
50997 retval = bprm_mm_init(bprm);
50998 if (retval)
50999 goto out_file;
51000@@ -1526,24 +1648,65 @@ static int do_execve_common(const char *filename,
51001 if (retval < 0)
51002 goto out;
51003
51004+#ifdef CONFIG_GRKERNSEC
51005+ old_acl = current->acl;
51006+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
51007+ old_exec_file = current->exec_file;
51008+ get_file(file);
51009+ current->exec_file = file;
51010+#endif
51011+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51012+ /* limit suid stack to 8MB
51013+ * we saved the old limits above and will restore them if this exec fails
51014+ */
51015+ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
51016+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
51017+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
51018+#endif
51019+
51020+ if (!gr_tpe_allow(file)) {
51021+ retval = -EACCES;
51022+ goto out_fail;
51023+ }
51024+
51025+ if (gr_check_crash_exec(file)) {
51026+ retval = -EACCES;
51027+ goto out_fail;
51028+ }
51029+
51030+ retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt,
51031+ bprm->unsafe);
51032+ if (retval < 0)
51033+ goto out_fail;
51034+
51035 retval = copy_strings_kernel(1, &bprm->filename, bprm);
51036 if (retval < 0)
51037- goto out;
51038+ goto out_fail;
51039
51040 bprm->exec = bprm->p;
51041 retval = copy_strings(bprm->envc, envp, bprm);
51042 if (retval < 0)
51043- goto out;
51044+ goto out_fail;
51045
51046 retval = copy_strings(bprm->argc, argv, bprm);
51047 if (retval < 0)
51048- goto out;
51049+ goto out_fail;
51050+
51051+ gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt);
51052+
51053+ gr_handle_exec_args(bprm, argv);
51054
51055 retval = search_binary_handler(bprm);
51056 if (retval < 0)
51057- goto out;
51058+ goto out_fail;
51059+#ifdef CONFIG_GRKERNSEC
51060+ if (old_exec_file)
51061+ fput(old_exec_file);
51062+#endif
51063
51064 /* execve succeeded */
51065+
51066+ increment_exec_counter();
51067 current->fs->in_exec = 0;
51068 current->in_execve = 0;
51069 acct_update_integrals(current);
51070@@ -1552,6 +1715,14 @@ static int do_execve_common(const char *filename,
51071 put_files_struct(displaced);
51072 return retval;
51073
51074+out_fail:
51075+#ifdef CONFIG_GRKERNSEC
51076+ current->acl = old_acl;
51077+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
51078+ fput(current->exec_file);
51079+ current->exec_file = old_exec_file;
51080+#endif
51081+
51082 out:
51083 if (bprm->mm) {
51084 acct_arg_size(bprm, 0);
51085@@ -1700,3 +1871,283 @@ asmlinkage long compat_sys_execve(const char __user * filename,
51086 return error;
51087 }
51088 #endif
51089+
51090+int pax_check_flags(unsigned long *flags)
51091+{
51092+ int retval = 0;
51093+
51094+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
51095+ if (*flags & MF_PAX_SEGMEXEC)
51096+ {
51097+ *flags &= ~MF_PAX_SEGMEXEC;
51098+ retval = -EINVAL;
51099+ }
51100+#endif
51101+
51102+ if ((*flags & MF_PAX_PAGEEXEC)
51103+
51104+#ifdef CONFIG_PAX_PAGEEXEC
51105+ && (*flags & MF_PAX_SEGMEXEC)
51106+#endif
51107+
51108+ )
51109+ {
51110+ *flags &= ~MF_PAX_PAGEEXEC;
51111+ retval = -EINVAL;
51112+ }
51113+
51114+ if ((*flags & MF_PAX_MPROTECT)
51115+
51116+#ifdef CONFIG_PAX_MPROTECT
51117+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
51118+#endif
51119+
51120+ )
51121+ {
51122+ *flags &= ~MF_PAX_MPROTECT;
51123+ retval = -EINVAL;
51124+ }
51125+
51126+ if ((*flags & MF_PAX_EMUTRAMP)
51127+
51128+#ifdef CONFIG_PAX_EMUTRAMP
51129+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
51130+#endif
51131+
51132+ )
51133+ {
51134+ *flags &= ~MF_PAX_EMUTRAMP;
51135+ retval = -EINVAL;
51136+ }
51137+
51138+ return retval;
51139+}
51140+
51141+EXPORT_SYMBOL(pax_check_flags);
51142+
51143+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
51144+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
51145+{
51146+ struct task_struct *tsk = current;
51147+ struct mm_struct *mm = current->mm;
51148+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
51149+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
51150+ char *path_exec = NULL;
51151+ char *path_fault = NULL;
51152+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
51153+ siginfo_t info = { };
51154+
51155+ if (buffer_exec && buffer_fault) {
51156+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
51157+
51158+ down_read(&mm->mmap_sem);
51159+ vma = mm->mmap;
51160+ while (vma && (!vma_exec || !vma_fault)) {
51161+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
51162+ vma_exec = vma;
51163+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
51164+ vma_fault = vma;
51165+ vma = vma->vm_next;
51166+ }
51167+ if (vma_exec) {
51168+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
51169+ if (IS_ERR(path_exec))
51170+ path_exec = "<path too long>";
51171+ else {
51172+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
51173+ if (path_exec) {
51174+ *path_exec = 0;
51175+ path_exec = buffer_exec;
51176+ } else
51177+ path_exec = "<path too long>";
51178+ }
51179+ }
51180+ if (vma_fault) {
51181+ start = vma_fault->vm_start;
51182+ end = vma_fault->vm_end;
51183+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
51184+ if (vma_fault->vm_file) {
51185+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
51186+ if (IS_ERR(path_fault))
51187+ path_fault = "<path too long>";
51188+ else {
51189+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
51190+ if (path_fault) {
51191+ *path_fault = 0;
51192+ path_fault = buffer_fault;
51193+ } else
51194+ path_fault = "<path too long>";
51195+ }
51196+ } else
51197+ path_fault = "<anonymous mapping>";
51198+ }
51199+ up_read(&mm->mmap_sem);
51200+ }
51201+ if (tsk->signal->curr_ip)
51202+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
51203+ else
51204+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
51205+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
51206+ from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
51207+ free_page((unsigned long)buffer_exec);
51208+ free_page((unsigned long)buffer_fault);
51209+ pax_report_insns(regs, pc, sp);
51210+ info.si_signo = SIGKILL;
51211+ info.si_errno = 0;
51212+ info.si_code = SI_KERNEL;
51213+ info.si_pid = 0;
51214+ info.si_uid = 0;
51215+ do_coredump(&info);
51216+}
51217+#endif
51218+
51219+#ifdef CONFIG_PAX_REFCOUNT
51220+void pax_report_refcount_overflow(struct pt_regs *regs)
51221+{
51222+ if (current->signal->curr_ip)
51223+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
51224+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
51225+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
51226+ else
51227+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
51228+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
51229+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
51230+ show_regs(regs);
51231+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
51232+}
51233+#endif
51234+
51235+#ifdef CONFIG_PAX_USERCOPY
51236+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
51237+static noinline int check_stack_object(const void *obj, unsigned long len)
51238+{
51239+ const void * const stack = task_stack_page(current);
51240+ const void * const stackend = stack + THREAD_SIZE;
51241+
51242+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
51243+ const void *frame = NULL;
51244+ const void *oldframe;
51245+#endif
51246+
51247+ if (obj + len < obj)
51248+ return -1;
51249+
51250+ if (obj + len <= stack || stackend <= obj)
51251+ return 0;
51252+
51253+ if (obj < stack || stackend < obj + len)
51254+ return -1;
51255+
51256+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
51257+ oldframe = __builtin_frame_address(1);
51258+ if (oldframe)
51259+ frame = __builtin_frame_address(2);
51260+ /*
51261+ low ----------------------------------------------> high
51262+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
51263+ ^----------------^
51264+ allow copies only within here
51265+ */
51266+ while (stack <= frame && frame < stackend) {
51267+ /* if obj + len extends past the last frame, this
51268+ check won't pass and the next frame will be 0,
51269+ causing us to bail out and correctly report
51270+ the copy as invalid
51271+ */
51272+ if (obj + len <= frame)
51273+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
51274+ oldframe = frame;
51275+ frame = *(const void * const *)frame;
51276+ }
51277+ return -1;
51278+#else
51279+ return 1;
51280+#endif
51281+}
51282+
51283+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
51284+{
51285+ if (current->signal->curr_ip)
51286+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
51287+ &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
51288+ else
51289+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
51290+ to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
51291+ dump_stack();
51292+ gr_handle_kernel_exploit();
51293+ do_group_exit(SIGKILL);
51294+}
51295+#endif
51296+
51297+#ifdef CONFIG_PAX_USERCOPY
51298+static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
51299+{
51300+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
51301+ unsigned long textlow = ktla_ktva((unsigned long)_stext);
51302+#ifdef CONFIG_MODULES
51303+ unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
51304+#else
51305+ unsigned long texthigh = ktla_ktva((unsigned long)_etext);
51306+#endif
51307+
51308+#else
51309+ unsigned long textlow = (unsigned long)_stext;
51310+ unsigned long texthigh = (unsigned long)_etext;
51311+#endif
51312+
51313+ if (high <= textlow || low > texthigh)
51314+ return false;
51315+ else
51316+ return true;
51317+}
51318+#endif
51319+
51320+void __check_object_size(const void *ptr, unsigned long n, bool to_user)
51321+{
51322+
51323+#ifdef CONFIG_PAX_USERCOPY
51324+ const char *type;
51325+
51326+ if (!n)
51327+ return;
51328+
51329+ type = check_heap_object(ptr, n);
51330+ if (!type) {
51331+ int ret = check_stack_object(ptr, n);
51332+ if (ret == 1 || ret == 2)
51333+ return;
51334+ if (ret == 0) {
51335+ if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
51336+ type = "<kernel text>";
51337+ else
51338+ return;
51339+ } else
51340+ type = "<process stack>";
51341+ }
51342+
51343+ pax_report_usercopy(ptr, n, to_user, type);
51344+#endif
51345+
51346+}
51347+EXPORT_SYMBOL(__check_object_size);
51348+
51349+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
51350+void pax_track_stack(void)
51351+{
51352+ unsigned long sp = (unsigned long)&sp;
51353+ if (sp < current_thread_info()->lowest_stack &&
51354+ sp > (unsigned long)task_stack_page(current))
51355+ current_thread_info()->lowest_stack = sp;
51356+}
51357+EXPORT_SYMBOL(pax_track_stack);
51358+#endif
51359+
51360+#ifdef CONFIG_PAX_SIZE_OVERFLOW
51361+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
51362+{
51363+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
51364+ dump_stack();
51365+ do_group_exit(SIGKILL);
51366+}
51367+EXPORT_SYMBOL(report_size_overflow);
51368+#endif
51369diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
51370index 9f9992b..8b59411 100644
51371--- a/fs/ext2/balloc.c
51372+++ b/fs/ext2/balloc.c
51373@@ -1184,10 +1184,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
51374
51375 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
51376 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
51377- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
51378+ if (free_blocks < root_blocks + 1 &&
51379 !uid_eq(sbi->s_resuid, current_fsuid()) &&
51380 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
51381- !in_group_p (sbi->s_resgid))) {
51382+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
51383 return 0;
51384 }
51385 return 1;
51386diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
51387index 22548f5..41521d8 100644
51388--- a/fs/ext3/balloc.c
51389+++ b/fs/ext3/balloc.c
51390@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
51391
51392 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
51393 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
51394- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
51395+ if (free_blocks < root_blocks + 1 &&
51396 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
51397 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
51398- !in_group_p (sbi->s_resgid))) {
51399+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
51400 return 0;
51401 }
51402 return 1;
51403diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
51404index 92e68b3..115d987 100644
51405--- a/fs/ext4/balloc.c
51406+++ b/fs/ext4/balloc.c
51407@@ -505,8 +505,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
51408 /* Hm, nope. Are (enough) root reserved clusters available? */
51409 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
51410 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
51411- capable(CAP_SYS_RESOURCE) ||
51412- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
51413+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
51414+ capable_nolog(CAP_SYS_RESOURCE)) {
51415
51416 if (free_clusters >= (nclusters + dirty_clusters))
51417 return 1;
51418diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
51419index 3b83cd6..0f34dcd 100644
51420--- a/fs/ext4/ext4.h
51421+++ b/fs/ext4/ext4.h
51422@@ -1254,19 +1254,19 @@ struct ext4_sb_info {
51423 unsigned long s_mb_last_start;
51424
51425 /* stats for buddy allocator */
51426- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
51427- atomic_t s_bal_success; /* we found long enough chunks */
51428- atomic_t s_bal_allocated; /* in blocks */
51429- atomic_t s_bal_ex_scanned; /* total extents scanned */
51430- atomic_t s_bal_goals; /* goal hits */
51431- atomic_t s_bal_breaks; /* too long searches */
51432- atomic_t s_bal_2orders; /* 2^order hits */
51433+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
51434+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
51435+ atomic_unchecked_t s_bal_allocated; /* in blocks */
51436+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
51437+ atomic_unchecked_t s_bal_goals; /* goal hits */
51438+ atomic_unchecked_t s_bal_breaks; /* too long searches */
51439+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
51440 spinlock_t s_bal_lock;
51441 unsigned long s_mb_buddies_generated;
51442 unsigned long long s_mb_generation_time;
51443- atomic_t s_mb_lost_chunks;
51444- atomic_t s_mb_preallocated;
51445- atomic_t s_mb_discarded;
51446+ atomic_unchecked_t s_mb_lost_chunks;
51447+ atomic_unchecked_t s_mb_preallocated;
51448+ atomic_unchecked_t s_mb_discarded;
51449 atomic_t s_lock_busy;
51450
51451 /* locality groups */
51452diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
51453index f3190ab..84ffb21 100644
51454--- a/fs/ext4/mballoc.c
51455+++ b/fs/ext4/mballoc.c
51456@@ -1754,7 +1754,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
51457 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
51458
51459 if (EXT4_SB(sb)->s_mb_stats)
51460- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
51461+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
51462
51463 break;
51464 }
51465@@ -2059,7 +2059,7 @@ repeat:
51466 ac->ac_status = AC_STATUS_CONTINUE;
51467 ac->ac_flags |= EXT4_MB_HINT_FIRST;
51468 cr = 3;
51469- atomic_inc(&sbi->s_mb_lost_chunks);
51470+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
51471 goto repeat;
51472 }
51473 }
51474@@ -2567,25 +2567,25 @@ int ext4_mb_release(struct super_block *sb)
51475 if (sbi->s_mb_stats) {
51476 ext4_msg(sb, KERN_INFO,
51477 "mballoc: %u blocks %u reqs (%u success)",
51478- atomic_read(&sbi->s_bal_allocated),
51479- atomic_read(&sbi->s_bal_reqs),
51480- atomic_read(&sbi->s_bal_success));
51481+ atomic_read_unchecked(&sbi->s_bal_allocated),
51482+ atomic_read_unchecked(&sbi->s_bal_reqs),
51483+ atomic_read_unchecked(&sbi->s_bal_success));
51484 ext4_msg(sb, KERN_INFO,
51485 "mballoc: %u extents scanned, %u goal hits, "
51486 "%u 2^N hits, %u breaks, %u lost",
51487- atomic_read(&sbi->s_bal_ex_scanned),
51488- atomic_read(&sbi->s_bal_goals),
51489- atomic_read(&sbi->s_bal_2orders),
51490- atomic_read(&sbi->s_bal_breaks),
51491- atomic_read(&sbi->s_mb_lost_chunks));
51492+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
51493+ atomic_read_unchecked(&sbi->s_bal_goals),
51494+ atomic_read_unchecked(&sbi->s_bal_2orders),
51495+ atomic_read_unchecked(&sbi->s_bal_breaks),
51496+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
51497 ext4_msg(sb, KERN_INFO,
51498 "mballoc: %lu generated and it took %Lu",
51499 sbi->s_mb_buddies_generated,
51500 sbi->s_mb_generation_time);
51501 ext4_msg(sb, KERN_INFO,
51502 "mballoc: %u preallocated, %u discarded",
51503- atomic_read(&sbi->s_mb_preallocated),
51504- atomic_read(&sbi->s_mb_discarded));
51505+ atomic_read_unchecked(&sbi->s_mb_preallocated),
51506+ atomic_read_unchecked(&sbi->s_mb_discarded));
51507 }
51508
51509 free_percpu(sbi->s_locality_groups);
51510@@ -3039,16 +3039,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
51511 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
51512
51513 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
51514- atomic_inc(&sbi->s_bal_reqs);
51515- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
51516+ atomic_inc_unchecked(&sbi->s_bal_reqs);
51517+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
51518 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
51519- atomic_inc(&sbi->s_bal_success);
51520- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
51521+ atomic_inc_unchecked(&sbi->s_bal_success);
51522+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
51523 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
51524 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
51525- atomic_inc(&sbi->s_bal_goals);
51526+ atomic_inc_unchecked(&sbi->s_bal_goals);
51527 if (ac->ac_found > sbi->s_mb_max_to_scan)
51528- atomic_inc(&sbi->s_bal_breaks);
51529+ atomic_inc_unchecked(&sbi->s_bal_breaks);
51530 }
51531
51532 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
51533@@ -3448,7 +3448,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
51534 trace_ext4_mb_new_inode_pa(ac, pa);
51535
51536 ext4_mb_use_inode_pa(ac, pa);
51537- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
51538+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
51539
51540 ei = EXT4_I(ac->ac_inode);
51541 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
51542@@ -3508,7 +3508,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
51543 trace_ext4_mb_new_group_pa(ac, pa);
51544
51545 ext4_mb_use_group_pa(ac, pa);
51546- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
51547+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
51548
51549 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
51550 lg = ac->ac_lg;
51551@@ -3597,7 +3597,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
51552 * from the bitmap and continue.
51553 */
51554 }
51555- atomic_add(free, &sbi->s_mb_discarded);
51556+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
51557
51558 return err;
51559 }
51560@@ -3615,7 +3615,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
51561 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
51562 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
51563 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
51564- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
51565+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
51566 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
51567
51568 return 0;
51569diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
51570index b3b1f7d..cff51d5 100644
51571--- a/fs/ext4/mmp.c
51572+++ b/fs/ext4/mmp.c
51573@@ -113,7 +113,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
51574 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
51575 const char *function, unsigned int line, const char *msg)
51576 {
51577- __ext4_warning(sb, function, line, msg);
51578+ __ext4_warning(sb, function, line, "%s", msg);
51579 __ext4_warning(sb, function, line,
51580 "MMP failure info: last update time: %llu, last update "
51581 "node: %s, last update device: %s\n",
51582diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
51583index 3beae6a..8cc5637 100644
51584--- a/fs/ext4/resize.c
51585+++ b/fs/ext4/resize.c
51586@@ -79,12 +79,20 @@ static int verify_group_input(struct super_block *sb,
51587 ext4_fsblk_t end = start + input->blocks_count;
51588 ext4_group_t group = input->group;
51589 ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group;
51590- unsigned overhead = ext4_group_overhead_blocks(sb, group);
51591- ext4_fsblk_t metaend = start + overhead;
51592+ unsigned overhead;
51593+ ext4_fsblk_t metaend;
51594 struct buffer_head *bh = NULL;
51595 ext4_grpblk_t free_blocks_count, offset;
51596 int err = -EINVAL;
51597
51598+ if (group != sbi->s_groups_count) {
51599+ ext4_warning(sb, "Cannot add at group %u (only %u groups)",
51600+ input->group, sbi->s_groups_count);
51601+ return -EINVAL;
51602+ }
51603+
51604+ overhead = ext4_group_overhead_blocks(sb, group);
51605+ metaend = start + overhead;
51606 input->free_blocks_count = free_blocks_count =
51607 input->blocks_count - 2 - overhead - sbi->s_itb_per_group;
51608
51609@@ -96,10 +104,7 @@ static int verify_group_input(struct super_block *sb,
51610 free_blocks_count, input->reserved_blocks);
51611
51612 ext4_get_group_no_and_offset(sb, start, NULL, &offset);
51613- if (group != sbi->s_groups_count)
51614- ext4_warning(sb, "Cannot add at group %u (only %u groups)",
51615- input->group, sbi->s_groups_count);
51616- else if (offset != 0)
51617+ if (offset != 0)
51618 ext4_warning(sb, "Last group not full");
51619 else if (input->reserved_blocks > input->blocks_count / 5)
51620 ext4_warning(sb, "Reserved blocks too high (%u)",
51621diff --git a/fs/ext4/super.c b/fs/ext4/super.c
51622index febbe0e..d0cdc02 100644
51623--- a/fs/ext4/super.c
51624+++ b/fs/ext4/super.c
51625@@ -1238,7 +1238,7 @@ static ext4_fsblk_t get_sb_block(void **data)
51626 }
51627
51628 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
51629-static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
51630+static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
51631 "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
51632
51633 #ifdef CONFIG_QUOTA
51634@@ -2380,7 +2380,7 @@ struct ext4_attr {
51635 ssize_t (*store)(struct ext4_attr *, struct ext4_sb_info *,
51636 const char *, size_t);
51637 int offset;
51638-};
51639+} __do_const;
51640
51641 static int parse_strtoul(const char *buf,
51642 unsigned long max, unsigned long *value)
51643diff --git a/fs/fcntl.c b/fs/fcntl.c
51644index 6599222..e7bf0de 100644
51645--- a/fs/fcntl.c
51646+++ b/fs/fcntl.c
51647@@ -107,6 +107,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
51648 if (err)
51649 return err;
51650
51651+ if (gr_handle_chroot_fowner(pid, type))
51652+ return -ENOENT;
51653+ if (gr_check_protected_task_fowner(pid, type))
51654+ return -EACCES;
51655+
51656 f_modown(filp, pid, type, force);
51657 return 0;
51658 }
51659diff --git a/fs/fhandle.c b/fs/fhandle.c
51660index 999ff5c..41f4109 100644
51661--- a/fs/fhandle.c
51662+++ b/fs/fhandle.c
51663@@ -67,8 +67,7 @@ static long do_sys_name_to_handle(struct path *path,
51664 } else
51665 retval = 0;
51666 /* copy the mount id */
51667- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
51668- sizeof(*mnt_id)) ||
51669+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
51670 copy_to_user(ufh, handle,
51671 sizeof(struct file_handle) + handle_bytes))
51672 retval = -EFAULT;
51673diff --git a/fs/fifo.c b/fs/fifo.c
51674index cf6f434..3d7942c 100644
51675--- a/fs/fifo.c
51676+++ b/fs/fifo.c
51677@@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
51678 */
51679 filp->f_op = &read_pipefifo_fops;
51680 pipe->r_counter++;
51681- if (pipe->readers++ == 0)
51682+ if (atomic_inc_return(&pipe->readers) == 1)
51683 wake_up_partner(inode);
51684
51685- if (!pipe->writers) {
51686+ if (!atomic_read(&pipe->writers)) {
51687 if ((filp->f_flags & O_NONBLOCK)) {
51688 /* suppress POLLHUP until we have
51689 * seen a writer */
51690@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
51691 * errno=ENXIO when there is no process reading the FIFO.
51692 */
51693 ret = -ENXIO;
51694- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
51695+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
51696 goto err;
51697
51698 filp->f_op = &write_pipefifo_fops;
51699 pipe->w_counter++;
51700- if (!pipe->writers++)
51701+ if (atomic_inc_return(&pipe->writers) == 1)
51702 wake_up_partner(inode);
51703
51704- if (!pipe->readers) {
51705+ if (!atomic_read(&pipe->readers)) {
51706 if (wait_for_partner(inode, &pipe->r_counter))
51707 goto err_wr;
51708 }
51709@@ -104,11 +104,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
51710 */
51711 filp->f_op = &rdwr_pipefifo_fops;
51712
51713- pipe->readers++;
51714- pipe->writers++;
51715+ atomic_inc(&pipe->readers);
51716+ atomic_inc(&pipe->writers);
51717 pipe->r_counter++;
51718 pipe->w_counter++;
51719- if (pipe->readers == 1 || pipe->writers == 1)
51720+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
51721 wake_up_partner(inode);
51722 break;
51723
51724@@ -122,19 +122,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
51725 return 0;
51726
51727 err_rd:
51728- if (!--pipe->readers)
51729+ if (atomic_dec_and_test(&pipe->readers))
51730 wake_up_interruptible(&pipe->wait);
51731 ret = -ERESTARTSYS;
51732 goto err;
51733
51734 err_wr:
51735- if (!--pipe->writers)
51736+ if (atomic_dec_and_test(&pipe->writers))
51737 wake_up_interruptible(&pipe->wait);
51738 ret = -ERESTARTSYS;
51739 goto err;
51740
51741 err:
51742- if (!pipe->readers && !pipe->writers)
51743+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
51744 free_pipe_info(inode);
51745
51746 err_nocleanup:
51747diff --git a/fs/file.c b/fs/file.c
51748index 3906d95..5fe379b 100644
51749--- a/fs/file.c
51750+++ b/fs/file.c
51751@@ -16,6 +16,7 @@
51752 #include <linux/slab.h>
51753 #include <linux/vmalloc.h>
51754 #include <linux/file.h>
51755+#include <linux/security.h>
51756 #include <linux/fdtable.h>
51757 #include <linux/bitops.h>
51758 #include <linux/interrupt.h>
51759@@ -892,6 +893,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
51760 if (!file)
51761 return __close_fd(files, fd);
51762
51763+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
51764 if (fd >= rlimit(RLIMIT_NOFILE))
51765 return -EBADF;
51766
51767@@ -918,6 +920,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
51768 if (unlikely(oldfd == newfd))
51769 return -EINVAL;
51770
51771+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
51772 if (newfd >= rlimit(RLIMIT_NOFILE))
51773 return -EBADF;
51774
51775@@ -973,6 +976,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
51776 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
51777 {
51778 int err;
51779+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
51780 if (from >= rlimit(RLIMIT_NOFILE))
51781 return -EINVAL;
51782 err = alloc_fd(from, flags);
51783diff --git a/fs/filesystems.c b/fs/filesystems.c
51784index 92567d9..fcd8cbf 100644
51785--- a/fs/filesystems.c
51786+++ b/fs/filesystems.c
51787@@ -273,7 +273,11 @@ struct file_system_type *get_fs_type(const char *name)
51788 int len = dot ? dot - name : strlen(name);
51789
51790 fs = __get_fs_type(name, len);
51791+#ifdef CONFIG_GRKERNSEC_MODHARDEN
51792+ if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
51793+#else
51794 if (!fs && (request_module("fs-%.*s", len, name) == 0))
51795+#endif
51796 fs = __get_fs_type(name, len);
51797
51798 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
51799diff --git a/fs/fs_struct.c b/fs/fs_struct.c
51800index d8ac61d..79a36f0 100644
51801--- a/fs/fs_struct.c
51802+++ b/fs/fs_struct.c
51803@@ -4,6 +4,7 @@
51804 #include <linux/path.h>
51805 #include <linux/slab.h>
51806 #include <linux/fs_struct.h>
51807+#include <linux/grsecurity.h>
51808 #include "internal.h"
51809
51810 /*
51811@@ -19,6 +20,7 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
51812 write_seqcount_begin(&fs->seq);
51813 old_root = fs->root;
51814 fs->root = *path;
51815+ gr_set_chroot_entries(current, path);
51816 write_seqcount_end(&fs->seq);
51817 spin_unlock(&fs->lock);
51818 if (old_root.dentry)
51819@@ -67,6 +69,10 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
51820 int hits = 0;
51821 spin_lock(&fs->lock);
51822 write_seqcount_begin(&fs->seq);
51823+ /* this root replacement is only done by pivot_root,
51824+ leave grsec's chroot tagging alone for this task
51825+ so that a pivoted root isn't treated as a chroot
51826+ */
51827 hits += replace_path(&fs->root, old_root, new_root);
51828 hits += replace_path(&fs->pwd, old_root, new_root);
51829 write_seqcount_end(&fs->seq);
51830@@ -99,7 +105,8 @@ void exit_fs(struct task_struct *tsk)
51831 task_lock(tsk);
51832 spin_lock(&fs->lock);
51833 tsk->fs = NULL;
51834- kill = !--fs->users;
51835+ gr_clear_chroot_entries(tsk);
51836+ kill = !atomic_dec_return(&fs->users);
51837 spin_unlock(&fs->lock);
51838 task_unlock(tsk);
51839 if (kill)
51840@@ -112,7 +119,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
51841 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
51842 /* We don't need to lock fs - think why ;-) */
51843 if (fs) {
51844- fs->users = 1;
51845+ atomic_set(&fs->users, 1);
51846 fs->in_exec = 0;
51847 spin_lock_init(&fs->lock);
51848 seqcount_init(&fs->seq);
51849@@ -121,6 +128,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
51850 spin_lock(&old->lock);
51851 fs->root = old->root;
51852 path_get(&fs->root);
51853+ /* instead of calling gr_set_chroot_entries here,
51854+ we call it from every caller of this function
51855+ */
51856 fs->pwd = old->pwd;
51857 path_get(&fs->pwd);
51858 spin_unlock(&old->lock);
51859@@ -139,8 +149,9 @@ int unshare_fs_struct(void)
51860
51861 task_lock(current);
51862 spin_lock(&fs->lock);
51863- kill = !--fs->users;
51864+ kill = !atomic_dec_return(&fs->users);
51865 current->fs = new_fs;
51866+ gr_set_chroot_entries(current, &new_fs->root);
51867 spin_unlock(&fs->lock);
51868 task_unlock(current);
51869
51870@@ -153,13 +164,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
51871
51872 int current_umask(void)
51873 {
51874- return current->fs->umask;
51875+ return current->fs->umask | gr_acl_umask();
51876 }
51877 EXPORT_SYMBOL(current_umask);
51878
51879 /* to be mentioned only in INIT_TASK */
51880 struct fs_struct init_fs = {
51881- .users = 1,
51882+ .users = ATOMIC_INIT(1),
51883 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
51884 .seq = SEQCNT_ZERO,
51885 .umask = 0022,
51886diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
51887index e2cba1f..17a25bb 100644
51888--- a/fs/fscache/cookie.c
51889+++ b/fs/fscache/cookie.c
51890@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
51891 parent ? (char *) parent->def->name : "<no-parent>",
51892 def->name, netfs_data);
51893
51894- fscache_stat(&fscache_n_acquires);
51895+ fscache_stat_unchecked(&fscache_n_acquires);
51896
51897 /* if there's no parent cookie, then we don't create one here either */
51898 if (!parent) {
51899- fscache_stat(&fscache_n_acquires_null);
51900+ fscache_stat_unchecked(&fscache_n_acquires_null);
51901 _leave(" [no parent]");
51902 return NULL;
51903 }
51904@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
51905 /* allocate and initialise a cookie */
51906 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
51907 if (!cookie) {
51908- fscache_stat(&fscache_n_acquires_oom);
51909+ fscache_stat_unchecked(&fscache_n_acquires_oom);
51910 _leave(" [ENOMEM]");
51911 return NULL;
51912 }
51913@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
51914
51915 switch (cookie->def->type) {
51916 case FSCACHE_COOKIE_TYPE_INDEX:
51917- fscache_stat(&fscache_n_cookie_index);
51918+ fscache_stat_unchecked(&fscache_n_cookie_index);
51919 break;
51920 case FSCACHE_COOKIE_TYPE_DATAFILE:
51921- fscache_stat(&fscache_n_cookie_data);
51922+ fscache_stat_unchecked(&fscache_n_cookie_data);
51923 break;
51924 default:
51925- fscache_stat(&fscache_n_cookie_special);
51926+ fscache_stat_unchecked(&fscache_n_cookie_special);
51927 break;
51928 }
51929
51930@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
51931 if (fscache_acquire_non_index_cookie(cookie) < 0) {
51932 atomic_dec(&parent->n_children);
51933 __fscache_cookie_put(cookie);
51934- fscache_stat(&fscache_n_acquires_nobufs);
51935+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
51936 _leave(" = NULL");
51937 return NULL;
51938 }
51939 }
51940
51941- fscache_stat(&fscache_n_acquires_ok);
51942+ fscache_stat_unchecked(&fscache_n_acquires_ok);
51943 _leave(" = %p", cookie);
51944 return cookie;
51945 }
51946@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
51947 cache = fscache_select_cache_for_object(cookie->parent);
51948 if (!cache) {
51949 up_read(&fscache_addremove_sem);
51950- fscache_stat(&fscache_n_acquires_no_cache);
51951+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
51952 _leave(" = -ENOMEDIUM [no cache]");
51953 return -ENOMEDIUM;
51954 }
51955@@ -255,12 +255,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
51956 object = cache->ops->alloc_object(cache, cookie);
51957 fscache_stat_d(&fscache_n_cop_alloc_object);
51958 if (IS_ERR(object)) {
51959- fscache_stat(&fscache_n_object_no_alloc);
51960+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
51961 ret = PTR_ERR(object);
51962 goto error;
51963 }
51964
51965- fscache_stat(&fscache_n_object_alloc);
51966+ fscache_stat_unchecked(&fscache_n_object_alloc);
51967
51968 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
51969
51970@@ -376,7 +376,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
51971
51972 _enter("{%s}", cookie->def->name);
51973
51974- fscache_stat(&fscache_n_invalidates);
51975+ fscache_stat_unchecked(&fscache_n_invalidates);
51976
51977 /* Only permit invalidation of data files. Invalidating an index will
51978 * require the caller to release all its attachments to the tree rooted
51979@@ -434,10 +434,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
51980 {
51981 struct fscache_object *object;
51982
51983- fscache_stat(&fscache_n_updates);
51984+ fscache_stat_unchecked(&fscache_n_updates);
51985
51986 if (!cookie) {
51987- fscache_stat(&fscache_n_updates_null);
51988+ fscache_stat_unchecked(&fscache_n_updates_null);
51989 _leave(" [no cookie]");
51990 return;
51991 }
51992@@ -471,12 +471,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
51993 struct fscache_object *object;
51994 unsigned long event;
51995
51996- fscache_stat(&fscache_n_relinquishes);
51997+ fscache_stat_unchecked(&fscache_n_relinquishes);
51998 if (retire)
51999- fscache_stat(&fscache_n_relinquishes_retire);
52000+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
52001
52002 if (!cookie) {
52003- fscache_stat(&fscache_n_relinquishes_null);
52004+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
52005 _leave(" [no cookie]");
52006 return;
52007 }
52008@@ -492,7 +492,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
52009
52010 /* wait for the cookie to finish being instantiated (or to fail) */
52011 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
52012- fscache_stat(&fscache_n_relinquishes_waitcrt);
52013+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
52014 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
52015 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
52016 }
52017diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
52018index ee38fef..0a326d4 100644
52019--- a/fs/fscache/internal.h
52020+++ b/fs/fscache/internal.h
52021@@ -148,101 +148,101 @@ extern void fscache_proc_cleanup(void);
52022 * stats.c
52023 */
52024 #ifdef CONFIG_FSCACHE_STATS
52025-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
52026-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
52027+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
52028+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
52029
52030-extern atomic_t fscache_n_op_pend;
52031-extern atomic_t fscache_n_op_run;
52032-extern atomic_t fscache_n_op_enqueue;
52033-extern atomic_t fscache_n_op_deferred_release;
52034-extern atomic_t fscache_n_op_release;
52035-extern atomic_t fscache_n_op_gc;
52036-extern atomic_t fscache_n_op_cancelled;
52037-extern atomic_t fscache_n_op_rejected;
52038+extern atomic_unchecked_t fscache_n_op_pend;
52039+extern atomic_unchecked_t fscache_n_op_run;
52040+extern atomic_unchecked_t fscache_n_op_enqueue;
52041+extern atomic_unchecked_t fscache_n_op_deferred_release;
52042+extern atomic_unchecked_t fscache_n_op_release;
52043+extern atomic_unchecked_t fscache_n_op_gc;
52044+extern atomic_unchecked_t fscache_n_op_cancelled;
52045+extern atomic_unchecked_t fscache_n_op_rejected;
52046
52047-extern atomic_t fscache_n_attr_changed;
52048-extern atomic_t fscache_n_attr_changed_ok;
52049-extern atomic_t fscache_n_attr_changed_nobufs;
52050-extern atomic_t fscache_n_attr_changed_nomem;
52051-extern atomic_t fscache_n_attr_changed_calls;
52052+extern atomic_unchecked_t fscache_n_attr_changed;
52053+extern atomic_unchecked_t fscache_n_attr_changed_ok;
52054+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
52055+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
52056+extern atomic_unchecked_t fscache_n_attr_changed_calls;
52057
52058-extern atomic_t fscache_n_allocs;
52059-extern atomic_t fscache_n_allocs_ok;
52060-extern atomic_t fscache_n_allocs_wait;
52061-extern atomic_t fscache_n_allocs_nobufs;
52062-extern atomic_t fscache_n_allocs_intr;
52063-extern atomic_t fscache_n_allocs_object_dead;
52064-extern atomic_t fscache_n_alloc_ops;
52065-extern atomic_t fscache_n_alloc_op_waits;
52066+extern atomic_unchecked_t fscache_n_allocs;
52067+extern atomic_unchecked_t fscache_n_allocs_ok;
52068+extern atomic_unchecked_t fscache_n_allocs_wait;
52069+extern atomic_unchecked_t fscache_n_allocs_nobufs;
52070+extern atomic_unchecked_t fscache_n_allocs_intr;
52071+extern atomic_unchecked_t fscache_n_allocs_object_dead;
52072+extern atomic_unchecked_t fscache_n_alloc_ops;
52073+extern atomic_unchecked_t fscache_n_alloc_op_waits;
52074
52075-extern atomic_t fscache_n_retrievals;
52076-extern atomic_t fscache_n_retrievals_ok;
52077-extern atomic_t fscache_n_retrievals_wait;
52078-extern atomic_t fscache_n_retrievals_nodata;
52079-extern atomic_t fscache_n_retrievals_nobufs;
52080-extern atomic_t fscache_n_retrievals_intr;
52081-extern atomic_t fscache_n_retrievals_nomem;
52082-extern atomic_t fscache_n_retrievals_object_dead;
52083-extern atomic_t fscache_n_retrieval_ops;
52084-extern atomic_t fscache_n_retrieval_op_waits;
52085+extern atomic_unchecked_t fscache_n_retrievals;
52086+extern atomic_unchecked_t fscache_n_retrievals_ok;
52087+extern atomic_unchecked_t fscache_n_retrievals_wait;
52088+extern atomic_unchecked_t fscache_n_retrievals_nodata;
52089+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
52090+extern atomic_unchecked_t fscache_n_retrievals_intr;
52091+extern atomic_unchecked_t fscache_n_retrievals_nomem;
52092+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
52093+extern atomic_unchecked_t fscache_n_retrieval_ops;
52094+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
52095
52096-extern atomic_t fscache_n_stores;
52097-extern atomic_t fscache_n_stores_ok;
52098-extern atomic_t fscache_n_stores_again;
52099-extern atomic_t fscache_n_stores_nobufs;
52100-extern atomic_t fscache_n_stores_oom;
52101-extern atomic_t fscache_n_store_ops;
52102-extern atomic_t fscache_n_store_calls;
52103-extern atomic_t fscache_n_store_pages;
52104-extern atomic_t fscache_n_store_radix_deletes;
52105-extern atomic_t fscache_n_store_pages_over_limit;
52106+extern atomic_unchecked_t fscache_n_stores;
52107+extern atomic_unchecked_t fscache_n_stores_ok;
52108+extern atomic_unchecked_t fscache_n_stores_again;
52109+extern atomic_unchecked_t fscache_n_stores_nobufs;
52110+extern atomic_unchecked_t fscache_n_stores_oom;
52111+extern atomic_unchecked_t fscache_n_store_ops;
52112+extern atomic_unchecked_t fscache_n_store_calls;
52113+extern atomic_unchecked_t fscache_n_store_pages;
52114+extern atomic_unchecked_t fscache_n_store_radix_deletes;
52115+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
52116
52117-extern atomic_t fscache_n_store_vmscan_not_storing;
52118-extern atomic_t fscache_n_store_vmscan_gone;
52119-extern atomic_t fscache_n_store_vmscan_busy;
52120-extern atomic_t fscache_n_store_vmscan_cancelled;
52121-extern atomic_t fscache_n_store_vmscan_wait;
52122+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
52123+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
52124+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
52125+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
52126+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
52127
52128-extern atomic_t fscache_n_marks;
52129-extern atomic_t fscache_n_uncaches;
52130+extern atomic_unchecked_t fscache_n_marks;
52131+extern atomic_unchecked_t fscache_n_uncaches;
52132
52133-extern atomic_t fscache_n_acquires;
52134-extern atomic_t fscache_n_acquires_null;
52135-extern atomic_t fscache_n_acquires_no_cache;
52136-extern atomic_t fscache_n_acquires_ok;
52137-extern atomic_t fscache_n_acquires_nobufs;
52138-extern atomic_t fscache_n_acquires_oom;
52139+extern atomic_unchecked_t fscache_n_acquires;
52140+extern atomic_unchecked_t fscache_n_acquires_null;
52141+extern atomic_unchecked_t fscache_n_acquires_no_cache;
52142+extern atomic_unchecked_t fscache_n_acquires_ok;
52143+extern atomic_unchecked_t fscache_n_acquires_nobufs;
52144+extern atomic_unchecked_t fscache_n_acquires_oom;
52145
52146-extern atomic_t fscache_n_invalidates;
52147-extern atomic_t fscache_n_invalidates_run;
52148+extern atomic_unchecked_t fscache_n_invalidates;
52149+extern atomic_unchecked_t fscache_n_invalidates_run;
52150
52151-extern atomic_t fscache_n_updates;
52152-extern atomic_t fscache_n_updates_null;
52153-extern atomic_t fscache_n_updates_run;
52154+extern atomic_unchecked_t fscache_n_updates;
52155+extern atomic_unchecked_t fscache_n_updates_null;
52156+extern atomic_unchecked_t fscache_n_updates_run;
52157
52158-extern atomic_t fscache_n_relinquishes;
52159-extern atomic_t fscache_n_relinquishes_null;
52160-extern atomic_t fscache_n_relinquishes_waitcrt;
52161-extern atomic_t fscache_n_relinquishes_retire;
52162+extern atomic_unchecked_t fscache_n_relinquishes;
52163+extern atomic_unchecked_t fscache_n_relinquishes_null;
52164+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
52165+extern atomic_unchecked_t fscache_n_relinquishes_retire;
52166
52167-extern atomic_t fscache_n_cookie_index;
52168-extern atomic_t fscache_n_cookie_data;
52169-extern atomic_t fscache_n_cookie_special;
52170+extern atomic_unchecked_t fscache_n_cookie_index;
52171+extern atomic_unchecked_t fscache_n_cookie_data;
52172+extern atomic_unchecked_t fscache_n_cookie_special;
52173
52174-extern atomic_t fscache_n_object_alloc;
52175-extern atomic_t fscache_n_object_no_alloc;
52176-extern atomic_t fscache_n_object_lookups;
52177-extern atomic_t fscache_n_object_lookups_negative;
52178-extern atomic_t fscache_n_object_lookups_positive;
52179-extern atomic_t fscache_n_object_lookups_timed_out;
52180-extern atomic_t fscache_n_object_created;
52181-extern atomic_t fscache_n_object_avail;
52182-extern atomic_t fscache_n_object_dead;
52183+extern atomic_unchecked_t fscache_n_object_alloc;
52184+extern atomic_unchecked_t fscache_n_object_no_alloc;
52185+extern atomic_unchecked_t fscache_n_object_lookups;
52186+extern atomic_unchecked_t fscache_n_object_lookups_negative;
52187+extern atomic_unchecked_t fscache_n_object_lookups_positive;
52188+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
52189+extern atomic_unchecked_t fscache_n_object_created;
52190+extern atomic_unchecked_t fscache_n_object_avail;
52191+extern atomic_unchecked_t fscache_n_object_dead;
52192
52193-extern atomic_t fscache_n_checkaux_none;
52194-extern atomic_t fscache_n_checkaux_okay;
52195-extern atomic_t fscache_n_checkaux_update;
52196-extern atomic_t fscache_n_checkaux_obsolete;
52197+extern atomic_unchecked_t fscache_n_checkaux_none;
52198+extern atomic_unchecked_t fscache_n_checkaux_okay;
52199+extern atomic_unchecked_t fscache_n_checkaux_update;
52200+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
52201
52202 extern atomic_t fscache_n_cop_alloc_object;
52203 extern atomic_t fscache_n_cop_lookup_object;
52204@@ -267,6 +267,11 @@ static inline void fscache_stat(atomic_t *stat)
52205 atomic_inc(stat);
52206 }
52207
52208+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
52209+{
52210+ atomic_inc_unchecked(stat);
52211+}
52212+
52213 static inline void fscache_stat_d(atomic_t *stat)
52214 {
52215 atomic_dec(stat);
52216@@ -279,6 +284,7 @@ extern const struct file_operations fscache_stats_fops;
52217
52218 #define __fscache_stat(stat) (NULL)
52219 #define fscache_stat(stat) do {} while (0)
52220+#define fscache_stat_unchecked(stat) do {} while (0)
52221 #define fscache_stat_d(stat) do {} while (0)
52222 #endif
52223
52224diff --git a/fs/fscache/object.c b/fs/fscache/object.c
52225index 50d41c1..10ee117 100644
52226--- a/fs/fscache/object.c
52227+++ b/fs/fscache/object.c
52228@@ -143,7 +143,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
52229 /* Invalidate an object on disk */
52230 case FSCACHE_OBJECT_INVALIDATING:
52231 clear_bit(FSCACHE_OBJECT_EV_INVALIDATE, &object->events);
52232- fscache_stat(&fscache_n_invalidates_run);
52233+ fscache_stat_unchecked(&fscache_n_invalidates_run);
52234 fscache_stat(&fscache_n_cop_invalidate_object);
52235 fscache_invalidate_object(object);
52236 fscache_stat_d(&fscache_n_cop_invalidate_object);
52237@@ -153,7 +153,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
52238 /* update the object metadata on disk */
52239 case FSCACHE_OBJECT_UPDATING:
52240 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
52241- fscache_stat(&fscache_n_updates_run);
52242+ fscache_stat_unchecked(&fscache_n_updates_run);
52243 fscache_stat(&fscache_n_cop_update_object);
52244 object->cache->ops->update_object(object);
52245 fscache_stat_d(&fscache_n_cop_update_object);
52246@@ -242,7 +242,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
52247 spin_lock(&object->lock);
52248 object->state = FSCACHE_OBJECT_DEAD;
52249 spin_unlock(&object->lock);
52250- fscache_stat(&fscache_n_object_dead);
52251+ fscache_stat_unchecked(&fscache_n_object_dead);
52252 goto terminal_transit;
52253
52254 /* handle the parent cache of this object being withdrawn from
52255@@ -257,7 +257,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
52256 spin_lock(&object->lock);
52257 object->state = FSCACHE_OBJECT_DEAD;
52258 spin_unlock(&object->lock);
52259- fscache_stat(&fscache_n_object_dead);
52260+ fscache_stat_unchecked(&fscache_n_object_dead);
52261 goto terminal_transit;
52262
52263 /* complain about the object being woken up once it is
52264@@ -495,7 +495,7 @@ static void fscache_lookup_object(struct fscache_object *object)
52265 parent->cookie->def->name, cookie->def->name,
52266 object->cache->tag->name);
52267
52268- fscache_stat(&fscache_n_object_lookups);
52269+ fscache_stat_unchecked(&fscache_n_object_lookups);
52270 fscache_stat(&fscache_n_cop_lookup_object);
52271 ret = object->cache->ops->lookup_object(object);
52272 fscache_stat_d(&fscache_n_cop_lookup_object);
52273@@ -506,7 +506,7 @@ static void fscache_lookup_object(struct fscache_object *object)
52274 if (ret == -ETIMEDOUT) {
52275 /* probably stuck behind another object, so move this one to
52276 * the back of the queue */
52277- fscache_stat(&fscache_n_object_lookups_timed_out);
52278+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
52279 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
52280 }
52281
52282@@ -529,7 +529,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
52283
52284 spin_lock(&object->lock);
52285 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
52286- fscache_stat(&fscache_n_object_lookups_negative);
52287+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
52288
52289 /* transit here to allow write requests to begin stacking up
52290 * and read requests to begin returning ENODATA */
52291@@ -575,7 +575,7 @@ void fscache_obtained_object(struct fscache_object *object)
52292 * result, in which case there may be data available */
52293 spin_lock(&object->lock);
52294 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
52295- fscache_stat(&fscache_n_object_lookups_positive);
52296+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
52297
52298 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
52299
52300@@ -589,7 +589,7 @@ void fscache_obtained_object(struct fscache_object *object)
52301 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
52302 } else {
52303 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
52304- fscache_stat(&fscache_n_object_created);
52305+ fscache_stat_unchecked(&fscache_n_object_created);
52306
52307 object->state = FSCACHE_OBJECT_AVAILABLE;
52308 spin_unlock(&object->lock);
52309@@ -634,7 +634,7 @@ static void fscache_object_available(struct fscache_object *object)
52310 fscache_enqueue_dependents(object);
52311
52312 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
52313- fscache_stat(&fscache_n_object_avail);
52314+ fscache_stat_unchecked(&fscache_n_object_avail);
52315
52316 _leave("");
52317 }
52318@@ -894,7 +894,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
52319 enum fscache_checkaux result;
52320
52321 if (!object->cookie->def->check_aux) {
52322- fscache_stat(&fscache_n_checkaux_none);
52323+ fscache_stat_unchecked(&fscache_n_checkaux_none);
52324 return FSCACHE_CHECKAUX_OKAY;
52325 }
52326
52327@@ -903,17 +903,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
52328 switch (result) {
52329 /* entry okay as is */
52330 case FSCACHE_CHECKAUX_OKAY:
52331- fscache_stat(&fscache_n_checkaux_okay);
52332+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
52333 break;
52334
52335 /* entry requires update */
52336 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
52337- fscache_stat(&fscache_n_checkaux_update);
52338+ fscache_stat_unchecked(&fscache_n_checkaux_update);
52339 break;
52340
52341 /* entry requires deletion */
52342 case FSCACHE_CHECKAUX_OBSOLETE:
52343- fscache_stat(&fscache_n_checkaux_obsolete);
52344+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
52345 break;
52346
52347 default:
52348diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
52349index 762a9ec..2023284 100644
52350--- a/fs/fscache/operation.c
52351+++ b/fs/fscache/operation.c
52352@@ -17,7 +17,7 @@
52353 #include <linux/slab.h>
52354 #include "internal.h"
52355
52356-atomic_t fscache_op_debug_id;
52357+atomic_unchecked_t fscache_op_debug_id;
52358 EXPORT_SYMBOL(fscache_op_debug_id);
52359
52360 /**
52361@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
52362 ASSERTCMP(atomic_read(&op->usage), >, 0);
52363 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
52364
52365- fscache_stat(&fscache_n_op_enqueue);
52366+ fscache_stat_unchecked(&fscache_n_op_enqueue);
52367 switch (op->flags & FSCACHE_OP_TYPE) {
52368 case FSCACHE_OP_ASYNC:
52369 _debug("queue async");
52370@@ -73,7 +73,7 @@ static void fscache_run_op(struct fscache_object *object,
52371 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
52372 if (op->processor)
52373 fscache_enqueue_operation(op);
52374- fscache_stat(&fscache_n_op_run);
52375+ fscache_stat_unchecked(&fscache_n_op_run);
52376 }
52377
52378 /*
52379@@ -105,11 +105,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
52380 if (object->n_in_progress > 0) {
52381 atomic_inc(&op->usage);
52382 list_add_tail(&op->pend_link, &object->pending_ops);
52383- fscache_stat(&fscache_n_op_pend);
52384+ fscache_stat_unchecked(&fscache_n_op_pend);
52385 } else if (!list_empty(&object->pending_ops)) {
52386 atomic_inc(&op->usage);
52387 list_add_tail(&op->pend_link, &object->pending_ops);
52388- fscache_stat(&fscache_n_op_pend);
52389+ fscache_stat_unchecked(&fscache_n_op_pend);
52390 fscache_start_operations(object);
52391 } else {
52392 ASSERTCMP(object->n_in_progress, ==, 0);
52393@@ -125,7 +125,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
52394 object->n_exclusive++; /* reads and writes must wait */
52395 atomic_inc(&op->usage);
52396 list_add_tail(&op->pend_link, &object->pending_ops);
52397- fscache_stat(&fscache_n_op_pend);
52398+ fscache_stat_unchecked(&fscache_n_op_pend);
52399 ret = 0;
52400 } else {
52401 /* If we're in any other state, there must have been an I/O
52402@@ -215,11 +215,11 @@ int fscache_submit_op(struct fscache_object *object,
52403 if (object->n_exclusive > 0) {
52404 atomic_inc(&op->usage);
52405 list_add_tail(&op->pend_link, &object->pending_ops);
52406- fscache_stat(&fscache_n_op_pend);
52407+ fscache_stat_unchecked(&fscache_n_op_pend);
52408 } else if (!list_empty(&object->pending_ops)) {
52409 atomic_inc(&op->usage);
52410 list_add_tail(&op->pend_link, &object->pending_ops);
52411- fscache_stat(&fscache_n_op_pend);
52412+ fscache_stat_unchecked(&fscache_n_op_pend);
52413 fscache_start_operations(object);
52414 } else {
52415 ASSERTCMP(object->n_exclusive, ==, 0);
52416@@ -231,12 +231,12 @@ int fscache_submit_op(struct fscache_object *object,
52417 object->n_ops++;
52418 atomic_inc(&op->usage);
52419 list_add_tail(&op->pend_link, &object->pending_ops);
52420- fscache_stat(&fscache_n_op_pend);
52421+ fscache_stat_unchecked(&fscache_n_op_pend);
52422 ret = 0;
52423 } else if (object->state == FSCACHE_OBJECT_DYING ||
52424 object->state == FSCACHE_OBJECT_LC_DYING ||
52425 object->state == FSCACHE_OBJECT_WITHDRAWING) {
52426- fscache_stat(&fscache_n_op_rejected);
52427+ fscache_stat_unchecked(&fscache_n_op_rejected);
52428 op->state = FSCACHE_OP_ST_CANCELLED;
52429 ret = -ENOBUFS;
52430 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
52431@@ -315,7 +315,7 @@ int fscache_cancel_op(struct fscache_operation *op,
52432 ret = -EBUSY;
52433 if (op->state == FSCACHE_OP_ST_PENDING) {
52434 ASSERT(!list_empty(&op->pend_link));
52435- fscache_stat(&fscache_n_op_cancelled);
52436+ fscache_stat_unchecked(&fscache_n_op_cancelled);
52437 list_del_init(&op->pend_link);
52438 if (do_cancel)
52439 do_cancel(op);
52440@@ -347,7 +347,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
52441 while (!list_empty(&object->pending_ops)) {
52442 op = list_entry(object->pending_ops.next,
52443 struct fscache_operation, pend_link);
52444- fscache_stat(&fscache_n_op_cancelled);
52445+ fscache_stat_unchecked(&fscache_n_op_cancelled);
52446 list_del_init(&op->pend_link);
52447
52448 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
52449@@ -419,7 +419,7 @@ void fscache_put_operation(struct fscache_operation *op)
52450 op->state, ==, FSCACHE_OP_ST_CANCELLED);
52451 op->state = FSCACHE_OP_ST_DEAD;
52452
52453- fscache_stat(&fscache_n_op_release);
52454+ fscache_stat_unchecked(&fscache_n_op_release);
52455
52456 if (op->release) {
52457 op->release(op);
52458@@ -442,7 +442,7 @@ void fscache_put_operation(struct fscache_operation *op)
52459 * lock, and defer it otherwise */
52460 if (!spin_trylock(&object->lock)) {
52461 _debug("defer put");
52462- fscache_stat(&fscache_n_op_deferred_release);
52463+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
52464
52465 cache = object->cache;
52466 spin_lock(&cache->op_gc_list_lock);
52467@@ -495,7 +495,7 @@ void fscache_operation_gc(struct work_struct *work)
52468
52469 _debug("GC DEFERRED REL OBJ%x OP%x",
52470 object->debug_id, op->debug_id);
52471- fscache_stat(&fscache_n_op_gc);
52472+ fscache_stat_unchecked(&fscache_n_op_gc);
52473
52474 ASSERTCMP(atomic_read(&op->usage), ==, 0);
52475 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
52476diff --git a/fs/fscache/page.c b/fs/fscache/page.c
52477index ff000e5..c44ec6d 100644
52478--- a/fs/fscache/page.c
52479+++ b/fs/fscache/page.c
52480@@ -61,7 +61,7 @@ try_again:
52481 val = radix_tree_lookup(&cookie->stores, page->index);
52482 if (!val) {
52483 rcu_read_unlock();
52484- fscache_stat(&fscache_n_store_vmscan_not_storing);
52485+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
52486 __fscache_uncache_page(cookie, page);
52487 return true;
52488 }
52489@@ -91,11 +91,11 @@ try_again:
52490 spin_unlock(&cookie->stores_lock);
52491
52492 if (xpage) {
52493- fscache_stat(&fscache_n_store_vmscan_cancelled);
52494- fscache_stat(&fscache_n_store_radix_deletes);
52495+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
52496+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
52497 ASSERTCMP(xpage, ==, page);
52498 } else {
52499- fscache_stat(&fscache_n_store_vmscan_gone);
52500+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
52501 }
52502
52503 wake_up_bit(&cookie->flags, 0);
52504@@ -110,11 +110,11 @@ page_busy:
52505 * sleeping on memory allocation, so we may need to impose a timeout
52506 * too. */
52507 if (!(gfp & __GFP_WAIT)) {
52508- fscache_stat(&fscache_n_store_vmscan_busy);
52509+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
52510 return false;
52511 }
52512
52513- fscache_stat(&fscache_n_store_vmscan_wait);
52514+ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
52515 __fscache_wait_on_page_write(cookie, page);
52516 gfp &= ~__GFP_WAIT;
52517 goto try_again;
52518@@ -140,7 +140,7 @@ static void fscache_end_page_write(struct fscache_object *object,
52519 FSCACHE_COOKIE_STORING_TAG);
52520 if (!radix_tree_tag_get(&cookie->stores, page->index,
52521 FSCACHE_COOKIE_PENDING_TAG)) {
52522- fscache_stat(&fscache_n_store_radix_deletes);
52523+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
52524 xpage = radix_tree_delete(&cookie->stores, page->index);
52525 }
52526 spin_unlock(&cookie->stores_lock);
52527@@ -161,7 +161,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
52528
52529 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
52530
52531- fscache_stat(&fscache_n_attr_changed_calls);
52532+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
52533
52534 if (fscache_object_is_active(object)) {
52535 fscache_stat(&fscache_n_cop_attr_changed);
52536@@ -187,11 +187,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
52537
52538 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
52539
52540- fscache_stat(&fscache_n_attr_changed);
52541+ fscache_stat_unchecked(&fscache_n_attr_changed);
52542
52543 op = kzalloc(sizeof(*op), GFP_KERNEL);
52544 if (!op) {
52545- fscache_stat(&fscache_n_attr_changed_nomem);
52546+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
52547 _leave(" = -ENOMEM");
52548 return -ENOMEM;
52549 }
52550@@ -209,7 +209,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
52551 if (fscache_submit_exclusive_op(object, op) < 0)
52552 goto nobufs;
52553 spin_unlock(&cookie->lock);
52554- fscache_stat(&fscache_n_attr_changed_ok);
52555+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
52556 fscache_put_operation(op);
52557 _leave(" = 0");
52558 return 0;
52559@@ -217,7 +217,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
52560 nobufs:
52561 spin_unlock(&cookie->lock);
52562 kfree(op);
52563- fscache_stat(&fscache_n_attr_changed_nobufs);
52564+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
52565 _leave(" = %d", -ENOBUFS);
52566 return -ENOBUFS;
52567 }
52568@@ -255,7 +255,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
52569 /* allocate a retrieval operation and attempt to submit it */
52570 op = kzalloc(sizeof(*op), GFP_NOIO);
52571 if (!op) {
52572- fscache_stat(&fscache_n_retrievals_nomem);
52573+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
52574 return NULL;
52575 }
52576
52577@@ -283,13 +283,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
52578 return 0;
52579 }
52580
52581- fscache_stat(&fscache_n_retrievals_wait);
52582+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
52583
52584 jif = jiffies;
52585 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
52586 fscache_wait_bit_interruptible,
52587 TASK_INTERRUPTIBLE) != 0) {
52588- fscache_stat(&fscache_n_retrievals_intr);
52589+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
52590 _leave(" = -ERESTARTSYS");
52591 return -ERESTARTSYS;
52592 }
52593@@ -318,8 +318,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
52594 */
52595 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
52596 struct fscache_retrieval *op,
52597- atomic_t *stat_op_waits,
52598- atomic_t *stat_object_dead)
52599+ atomic_unchecked_t *stat_op_waits,
52600+ atomic_unchecked_t *stat_object_dead)
52601 {
52602 int ret;
52603
52604@@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
52605 goto check_if_dead;
52606
52607 _debug(">>> WT");
52608- fscache_stat(stat_op_waits);
52609+ fscache_stat_unchecked(stat_op_waits);
52610 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
52611 fscache_wait_bit_interruptible,
52612 TASK_INTERRUPTIBLE) != 0) {
52613@@ -344,14 +344,14 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
52614
52615 check_if_dead:
52616 if (op->op.state == FSCACHE_OP_ST_CANCELLED) {
52617- fscache_stat(stat_object_dead);
52618+ fscache_stat_unchecked(stat_object_dead);
52619 _leave(" = -ENOBUFS [cancelled]");
52620 return -ENOBUFS;
52621 }
52622 if (unlikely(fscache_object_is_dead(object))) {
52623 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->op.state);
52624 fscache_cancel_op(&op->op, fscache_do_cancel_retrieval);
52625- fscache_stat(stat_object_dead);
52626+ fscache_stat_unchecked(stat_object_dead);
52627 return -ENOBUFS;
52628 }
52629 return 0;
52630@@ -378,7 +378,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
52631
52632 _enter("%p,%p,,,", cookie, page);
52633
52634- fscache_stat(&fscache_n_retrievals);
52635+ fscache_stat_unchecked(&fscache_n_retrievals);
52636
52637 if (hlist_empty(&cookie->backing_objects))
52638 goto nobufs;
52639@@ -417,7 +417,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
52640 goto nobufs_unlock_dec;
52641 spin_unlock(&cookie->lock);
52642
52643- fscache_stat(&fscache_n_retrieval_ops);
52644+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
52645
52646 /* pin the netfs read context in case we need to do the actual netfs
52647 * read because we've encountered a cache read failure */
52648@@ -447,15 +447,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
52649
52650 error:
52651 if (ret == -ENOMEM)
52652- fscache_stat(&fscache_n_retrievals_nomem);
52653+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
52654 else if (ret == -ERESTARTSYS)
52655- fscache_stat(&fscache_n_retrievals_intr);
52656+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
52657 else if (ret == -ENODATA)
52658- fscache_stat(&fscache_n_retrievals_nodata);
52659+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
52660 else if (ret < 0)
52661- fscache_stat(&fscache_n_retrievals_nobufs);
52662+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
52663 else
52664- fscache_stat(&fscache_n_retrievals_ok);
52665+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
52666
52667 fscache_put_retrieval(op);
52668 _leave(" = %d", ret);
52669@@ -467,7 +467,7 @@ nobufs_unlock:
52670 spin_unlock(&cookie->lock);
52671 kfree(op);
52672 nobufs:
52673- fscache_stat(&fscache_n_retrievals_nobufs);
52674+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
52675 _leave(" = -ENOBUFS");
52676 return -ENOBUFS;
52677 }
52678@@ -505,7 +505,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
52679
52680 _enter("%p,,%d,,,", cookie, *nr_pages);
52681
52682- fscache_stat(&fscache_n_retrievals);
52683+ fscache_stat_unchecked(&fscache_n_retrievals);
52684
52685 if (hlist_empty(&cookie->backing_objects))
52686 goto nobufs;
52687@@ -541,7 +541,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
52688 goto nobufs_unlock_dec;
52689 spin_unlock(&cookie->lock);
52690
52691- fscache_stat(&fscache_n_retrieval_ops);
52692+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
52693
52694 /* pin the netfs read context in case we need to do the actual netfs
52695 * read because we've encountered a cache read failure */
52696@@ -571,15 +571,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
52697
52698 error:
52699 if (ret == -ENOMEM)
52700- fscache_stat(&fscache_n_retrievals_nomem);
52701+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
52702 else if (ret == -ERESTARTSYS)
52703- fscache_stat(&fscache_n_retrievals_intr);
52704+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
52705 else if (ret == -ENODATA)
52706- fscache_stat(&fscache_n_retrievals_nodata);
52707+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
52708 else if (ret < 0)
52709- fscache_stat(&fscache_n_retrievals_nobufs);
52710+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
52711 else
52712- fscache_stat(&fscache_n_retrievals_ok);
52713+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
52714
52715 fscache_put_retrieval(op);
52716 _leave(" = %d", ret);
52717@@ -591,7 +591,7 @@ nobufs_unlock:
52718 spin_unlock(&cookie->lock);
52719 kfree(op);
52720 nobufs:
52721- fscache_stat(&fscache_n_retrievals_nobufs);
52722+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
52723 _leave(" = -ENOBUFS");
52724 return -ENOBUFS;
52725 }
52726@@ -615,7 +615,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
52727
52728 _enter("%p,%p,,,", cookie, page);
52729
52730- fscache_stat(&fscache_n_allocs);
52731+ fscache_stat_unchecked(&fscache_n_allocs);
52732
52733 if (hlist_empty(&cookie->backing_objects))
52734 goto nobufs;
52735@@ -647,7 +647,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
52736 goto nobufs_unlock;
52737 spin_unlock(&cookie->lock);
52738
52739- fscache_stat(&fscache_n_alloc_ops);
52740+ fscache_stat_unchecked(&fscache_n_alloc_ops);
52741
52742 ret = fscache_wait_for_retrieval_activation(
52743 object, op,
52744@@ -663,11 +663,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
52745
52746 error:
52747 if (ret == -ERESTARTSYS)
52748- fscache_stat(&fscache_n_allocs_intr);
52749+ fscache_stat_unchecked(&fscache_n_allocs_intr);
52750 else if (ret < 0)
52751- fscache_stat(&fscache_n_allocs_nobufs);
52752+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
52753 else
52754- fscache_stat(&fscache_n_allocs_ok);
52755+ fscache_stat_unchecked(&fscache_n_allocs_ok);
52756
52757 fscache_put_retrieval(op);
52758 _leave(" = %d", ret);
52759@@ -677,7 +677,7 @@ nobufs_unlock:
52760 spin_unlock(&cookie->lock);
52761 kfree(op);
52762 nobufs:
52763- fscache_stat(&fscache_n_allocs_nobufs);
52764+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
52765 _leave(" = -ENOBUFS");
52766 return -ENOBUFS;
52767 }
52768@@ -736,7 +736,7 @@ static void fscache_write_op(struct fscache_operation *_op)
52769
52770 spin_lock(&cookie->stores_lock);
52771
52772- fscache_stat(&fscache_n_store_calls);
52773+ fscache_stat_unchecked(&fscache_n_store_calls);
52774
52775 /* find a page to store */
52776 page = NULL;
52777@@ -747,7 +747,7 @@ static void fscache_write_op(struct fscache_operation *_op)
52778 page = results[0];
52779 _debug("gang %d [%lx]", n, page->index);
52780 if (page->index > op->store_limit) {
52781- fscache_stat(&fscache_n_store_pages_over_limit);
52782+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
52783 goto superseded;
52784 }
52785
52786@@ -759,7 +759,7 @@ static void fscache_write_op(struct fscache_operation *_op)
52787 spin_unlock(&cookie->stores_lock);
52788 spin_unlock(&object->lock);
52789
52790- fscache_stat(&fscache_n_store_pages);
52791+ fscache_stat_unchecked(&fscache_n_store_pages);
52792 fscache_stat(&fscache_n_cop_write_page);
52793 ret = object->cache->ops->write_page(op, page);
52794 fscache_stat_d(&fscache_n_cop_write_page);
52795@@ -860,7 +860,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
52796 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
52797 ASSERT(PageFsCache(page));
52798
52799- fscache_stat(&fscache_n_stores);
52800+ fscache_stat_unchecked(&fscache_n_stores);
52801
52802 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
52803 _leave(" = -ENOBUFS [invalidating]");
52804@@ -916,7 +916,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
52805 spin_unlock(&cookie->stores_lock);
52806 spin_unlock(&object->lock);
52807
52808- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
52809+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
52810 op->store_limit = object->store_limit;
52811
52812 if (fscache_submit_op(object, &op->op) < 0)
52813@@ -924,8 +924,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
52814
52815 spin_unlock(&cookie->lock);
52816 radix_tree_preload_end();
52817- fscache_stat(&fscache_n_store_ops);
52818- fscache_stat(&fscache_n_stores_ok);
52819+ fscache_stat_unchecked(&fscache_n_store_ops);
52820+ fscache_stat_unchecked(&fscache_n_stores_ok);
52821
52822 /* the work queue now carries its own ref on the object */
52823 fscache_put_operation(&op->op);
52824@@ -933,14 +933,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
52825 return 0;
52826
52827 already_queued:
52828- fscache_stat(&fscache_n_stores_again);
52829+ fscache_stat_unchecked(&fscache_n_stores_again);
52830 already_pending:
52831 spin_unlock(&cookie->stores_lock);
52832 spin_unlock(&object->lock);
52833 spin_unlock(&cookie->lock);
52834 radix_tree_preload_end();
52835 kfree(op);
52836- fscache_stat(&fscache_n_stores_ok);
52837+ fscache_stat_unchecked(&fscache_n_stores_ok);
52838 _leave(" = 0");
52839 return 0;
52840
52841@@ -959,14 +959,14 @@ nobufs:
52842 spin_unlock(&cookie->lock);
52843 radix_tree_preload_end();
52844 kfree(op);
52845- fscache_stat(&fscache_n_stores_nobufs);
52846+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
52847 _leave(" = -ENOBUFS");
52848 return -ENOBUFS;
52849
52850 nomem_free:
52851 kfree(op);
52852 nomem:
52853- fscache_stat(&fscache_n_stores_oom);
52854+ fscache_stat_unchecked(&fscache_n_stores_oom);
52855 _leave(" = -ENOMEM");
52856 return -ENOMEM;
52857 }
52858@@ -984,7 +984,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
52859 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
52860 ASSERTCMP(page, !=, NULL);
52861
52862- fscache_stat(&fscache_n_uncaches);
52863+ fscache_stat_unchecked(&fscache_n_uncaches);
52864
52865 /* cache withdrawal may beat us to it */
52866 if (!PageFsCache(page))
52867@@ -1035,7 +1035,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
52868 struct fscache_cookie *cookie = op->op.object->cookie;
52869
52870 #ifdef CONFIG_FSCACHE_STATS
52871- atomic_inc(&fscache_n_marks);
52872+ atomic_inc_unchecked(&fscache_n_marks);
52873 #endif
52874
52875 _debug("- mark %p{%lx}", page, page->index);
52876diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
52877index 40d13c7..ddf52b9 100644
52878--- a/fs/fscache/stats.c
52879+++ b/fs/fscache/stats.c
52880@@ -18,99 +18,99 @@
52881 /*
52882 * operation counters
52883 */
52884-atomic_t fscache_n_op_pend;
52885-atomic_t fscache_n_op_run;
52886-atomic_t fscache_n_op_enqueue;
52887-atomic_t fscache_n_op_requeue;
52888-atomic_t fscache_n_op_deferred_release;
52889-atomic_t fscache_n_op_release;
52890-atomic_t fscache_n_op_gc;
52891-atomic_t fscache_n_op_cancelled;
52892-atomic_t fscache_n_op_rejected;
52893+atomic_unchecked_t fscache_n_op_pend;
52894+atomic_unchecked_t fscache_n_op_run;
52895+atomic_unchecked_t fscache_n_op_enqueue;
52896+atomic_unchecked_t fscache_n_op_requeue;
52897+atomic_unchecked_t fscache_n_op_deferred_release;
52898+atomic_unchecked_t fscache_n_op_release;
52899+atomic_unchecked_t fscache_n_op_gc;
52900+atomic_unchecked_t fscache_n_op_cancelled;
52901+atomic_unchecked_t fscache_n_op_rejected;
52902
52903-atomic_t fscache_n_attr_changed;
52904-atomic_t fscache_n_attr_changed_ok;
52905-atomic_t fscache_n_attr_changed_nobufs;
52906-atomic_t fscache_n_attr_changed_nomem;
52907-atomic_t fscache_n_attr_changed_calls;
52908+atomic_unchecked_t fscache_n_attr_changed;
52909+atomic_unchecked_t fscache_n_attr_changed_ok;
52910+atomic_unchecked_t fscache_n_attr_changed_nobufs;
52911+atomic_unchecked_t fscache_n_attr_changed_nomem;
52912+atomic_unchecked_t fscache_n_attr_changed_calls;
52913
52914-atomic_t fscache_n_allocs;
52915-atomic_t fscache_n_allocs_ok;
52916-atomic_t fscache_n_allocs_wait;
52917-atomic_t fscache_n_allocs_nobufs;
52918-atomic_t fscache_n_allocs_intr;
52919-atomic_t fscache_n_allocs_object_dead;
52920-atomic_t fscache_n_alloc_ops;
52921-atomic_t fscache_n_alloc_op_waits;
52922+atomic_unchecked_t fscache_n_allocs;
52923+atomic_unchecked_t fscache_n_allocs_ok;
52924+atomic_unchecked_t fscache_n_allocs_wait;
52925+atomic_unchecked_t fscache_n_allocs_nobufs;
52926+atomic_unchecked_t fscache_n_allocs_intr;
52927+atomic_unchecked_t fscache_n_allocs_object_dead;
52928+atomic_unchecked_t fscache_n_alloc_ops;
52929+atomic_unchecked_t fscache_n_alloc_op_waits;
52930
52931-atomic_t fscache_n_retrievals;
52932-atomic_t fscache_n_retrievals_ok;
52933-atomic_t fscache_n_retrievals_wait;
52934-atomic_t fscache_n_retrievals_nodata;
52935-atomic_t fscache_n_retrievals_nobufs;
52936-atomic_t fscache_n_retrievals_intr;
52937-atomic_t fscache_n_retrievals_nomem;
52938-atomic_t fscache_n_retrievals_object_dead;
52939-atomic_t fscache_n_retrieval_ops;
52940-atomic_t fscache_n_retrieval_op_waits;
52941+atomic_unchecked_t fscache_n_retrievals;
52942+atomic_unchecked_t fscache_n_retrievals_ok;
52943+atomic_unchecked_t fscache_n_retrievals_wait;
52944+atomic_unchecked_t fscache_n_retrievals_nodata;
52945+atomic_unchecked_t fscache_n_retrievals_nobufs;
52946+atomic_unchecked_t fscache_n_retrievals_intr;
52947+atomic_unchecked_t fscache_n_retrievals_nomem;
52948+atomic_unchecked_t fscache_n_retrievals_object_dead;
52949+atomic_unchecked_t fscache_n_retrieval_ops;
52950+atomic_unchecked_t fscache_n_retrieval_op_waits;
52951
52952-atomic_t fscache_n_stores;
52953-atomic_t fscache_n_stores_ok;
52954-atomic_t fscache_n_stores_again;
52955-atomic_t fscache_n_stores_nobufs;
52956-atomic_t fscache_n_stores_oom;
52957-atomic_t fscache_n_store_ops;
52958-atomic_t fscache_n_store_calls;
52959-atomic_t fscache_n_store_pages;
52960-atomic_t fscache_n_store_radix_deletes;
52961-atomic_t fscache_n_store_pages_over_limit;
52962+atomic_unchecked_t fscache_n_stores;
52963+atomic_unchecked_t fscache_n_stores_ok;
52964+atomic_unchecked_t fscache_n_stores_again;
52965+atomic_unchecked_t fscache_n_stores_nobufs;
52966+atomic_unchecked_t fscache_n_stores_oom;
52967+atomic_unchecked_t fscache_n_store_ops;
52968+atomic_unchecked_t fscache_n_store_calls;
52969+atomic_unchecked_t fscache_n_store_pages;
52970+atomic_unchecked_t fscache_n_store_radix_deletes;
52971+atomic_unchecked_t fscache_n_store_pages_over_limit;
52972
52973-atomic_t fscache_n_store_vmscan_not_storing;
52974-atomic_t fscache_n_store_vmscan_gone;
52975-atomic_t fscache_n_store_vmscan_busy;
52976-atomic_t fscache_n_store_vmscan_cancelled;
52977-atomic_t fscache_n_store_vmscan_wait;
52978+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
52979+atomic_unchecked_t fscache_n_store_vmscan_gone;
52980+atomic_unchecked_t fscache_n_store_vmscan_busy;
52981+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
52982+atomic_unchecked_t fscache_n_store_vmscan_wait;
52983
52984-atomic_t fscache_n_marks;
52985-atomic_t fscache_n_uncaches;
52986+atomic_unchecked_t fscache_n_marks;
52987+atomic_unchecked_t fscache_n_uncaches;
52988
52989-atomic_t fscache_n_acquires;
52990-atomic_t fscache_n_acquires_null;
52991-atomic_t fscache_n_acquires_no_cache;
52992-atomic_t fscache_n_acquires_ok;
52993-atomic_t fscache_n_acquires_nobufs;
52994-atomic_t fscache_n_acquires_oom;
52995+atomic_unchecked_t fscache_n_acquires;
52996+atomic_unchecked_t fscache_n_acquires_null;
52997+atomic_unchecked_t fscache_n_acquires_no_cache;
52998+atomic_unchecked_t fscache_n_acquires_ok;
52999+atomic_unchecked_t fscache_n_acquires_nobufs;
53000+atomic_unchecked_t fscache_n_acquires_oom;
53001
53002-atomic_t fscache_n_invalidates;
53003-atomic_t fscache_n_invalidates_run;
53004+atomic_unchecked_t fscache_n_invalidates;
53005+atomic_unchecked_t fscache_n_invalidates_run;
53006
53007-atomic_t fscache_n_updates;
53008-atomic_t fscache_n_updates_null;
53009-atomic_t fscache_n_updates_run;
53010+atomic_unchecked_t fscache_n_updates;
53011+atomic_unchecked_t fscache_n_updates_null;
53012+atomic_unchecked_t fscache_n_updates_run;
53013
53014-atomic_t fscache_n_relinquishes;
53015-atomic_t fscache_n_relinquishes_null;
53016-atomic_t fscache_n_relinquishes_waitcrt;
53017-atomic_t fscache_n_relinquishes_retire;
53018+atomic_unchecked_t fscache_n_relinquishes;
53019+atomic_unchecked_t fscache_n_relinquishes_null;
53020+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
53021+atomic_unchecked_t fscache_n_relinquishes_retire;
53022
53023-atomic_t fscache_n_cookie_index;
53024-atomic_t fscache_n_cookie_data;
53025-atomic_t fscache_n_cookie_special;
53026+atomic_unchecked_t fscache_n_cookie_index;
53027+atomic_unchecked_t fscache_n_cookie_data;
53028+atomic_unchecked_t fscache_n_cookie_special;
53029
53030-atomic_t fscache_n_object_alloc;
53031-atomic_t fscache_n_object_no_alloc;
53032-atomic_t fscache_n_object_lookups;
53033-atomic_t fscache_n_object_lookups_negative;
53034-atomic_t fscache_n_object_lookups_positive;
53035-atomic_t fscache_n_object_lookups_timed_out;
53036-atomic_t fscache_n_object_created;
53037-atomic_t fscache_n_object_avail;
53038-atomic_t fscache_n_object_dead;
53039+atomic_unchecked_t fscache_n_object_alloc;
53040+atomic_unchecked_t fscache_n_object_no_alloc;
53041+atomic_unchecked_t fscache_n_object_lookups;
53042+atomic_unchecked_t fscache_n_object_lookups_negative;
53043+atomic_unchecked_t fscache_n_object_lookups_positive;
53044+atomic_unchecked_t fscache_n_object_lookups_timed_out;
53045+atomic_unchecked_t fscache_n_object_created;
53046+atomic_unchecked_t fscache_n_object_avail;
53047+atomic_unchecked_t fscache_n_object_dead;
53048
53049-atomic_t fscache_n_checkaux_none;
53050-atomic_t fscache_n_checkaux_okay;
53051-atomic_t fscache_n_checkaux_update;
53052-atomic_t fscache_n_checkaux_obsolete;
53053+atomic_unchecked_t fscache_n_checkaux_none;
53054+atomic_unchecked_t fscache_n_checkaux_okay;
53055+atomic_unchecked_t fscache_n_checkaux_update;
53056+atomic_unchecked_t fscache_n_checkaux_obsolete;
53057
53058 atomic_t fscache_n_cop_alloc_object;
53059 atomic_t fscache_n_cop_lookup_object;
53060@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
53061 seq_puts(m, "FS-Cache statistics\n");
53062
53063 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
53064- atomic_read(&fscache_n_cookie_index),
53065- atomic_read(&fscache_n_cookie_data),
53066- atomic_read(&fscache_n_cookie_special));
53067+ atomic_read_unchecked(&fscache_n_cookie_index),
53068+ atomic_read_unchecked(&fscache_n_cookie_data),
53069+ atomic_read_unchecked(&fscache_n_cookie_special));
53070
53071 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
53072- atomic_read(&fscache_n_object_alloc),
53073- atomic_read(&fscache_n_object_no_alloc),
53074- atomic_read(&fscache_n_object_avail),
53075- atomic_read(&fscache_n_object_dead));
53076+ atomic_read_unchecked(&fscache_n_object_alloc),
53077+ atomic_read_unchecked(&fscache_n_object_no_alloc),
53078+ atomic_read_unchecked(&fscache_n_object_avail),
53079+ atomic_read_unchecked(&fscache_n_object_dead));
53080 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
53081- atomic_read(&fscache_n_checkaux_none),
53082- atomic_read(&fscache_n_checkaux_okay),
53083- atomic_read(&fscache_n_checkaux_update),
53084- atomic_read(&fscache_n_checkaux_obsolete));
53085+ atomic_read_unchecked(&fscache_n_checkaux_none),
53086+ atomic_read_unchecked(&fscache_n_checkaux_okay),
53087+ atomic_read_unchecked(&fscache_n_checkaux_update),
53088+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
53089
53090 seq_printf(m, "Pages : mrk=%u unc=%u\n",
53091- atomic_read(&fscache_n_marks),
53092- atomic_read(&fscache_n_uncaches));
53093+ atomic_read_unchecked(&fscache_n_marks),
53094+ atomic_read_unchecked(&fscache_n_uncaches));
53095
53096 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
53097 " oom=%u\n",
53098- atomic_read(&fscache_n_acquires),
53099- atomic_read(&fscache_n_acquires_null),
53100- atomic_read(&fscache_n_acquires_no_cache),
53101- atomic_read(&fscache_n_acquires_ok),
53102- atomic_read(&fscache_n_acquires_nobufs),
53103- atomic_read(&fscache_n_acquires_oom));
53104+ atomic_read_unchecked(&fscache_n_acquires),
53105+ atomic_read_unchecked(&fscache_n_acquires_null),
53106+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
53107+ atomic_read_unchecked(&fscache_n_acquires_ok),
53108+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
53109+ atomic_read_unchecked(&fscache_n_acquires_oom));
53110
53111 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
53112- atomic_read(&fscache_n_object_lookups),
53113- atomic_read(&fscache_n_object_lookups_negative),
53114- atomic_read(&fscache_n_object_lookups_positive),
53115- atomic_read(&fscache_n_object_created),
53116- atomic_read(&fscache_n_object_lookups_timed_out));
53117+ atomic_read_unchecked(&fscache_n_object_lookups),
53118+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
53119+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
53120+ atomic_read_unchecked(&fscache_n_object_created),
53121+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
53122
53123 seq_printf(m, "Invals : n=%u run=%u\n",
53124- atomic_read(&fscache_n_invalidates),
53125- atomic_read(&fscache_n_invalidates_run));
53126+ atomic_read_unchecked(&fscache_n_invalidates),
53127+ atomic_read_unchecked(&fscache_n_invalidates_run));
53128
53129 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
53130- atomic_read(&fscache_n_updates),
53131- atomic_read(&fscache_n_updates_null),
53132- atomic_read(&fscache_n_updates_run));
53133+ atomic_read_unchecked(&fscache_n_updates),
53134+ atomic_read_unchecked(&fscache_n_updates_null),
53135+ atomic_read_unchecked(&fscache_n_updates_run));
53136
53137 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
53138- atomic_read(&fscache_n_relinquishes),
53139- atomic_read(&fscache_n_relinquishes_null),
53140- atomic_read(&fscache_n_relinquishes_waitcrt),
53141- atomic_read(&fscache_n_relinquishes_retire));
53142+ atomic_read_unchecked(&fscache_n_relinquishes),
53143+ atomic_read_unchecked(&fscache_n_relinquishes_null),
53144+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
53145+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
53146
53147 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
53148- atomic_read(&fscache_n_attr_changed),
53149- atomic_read(&fscache_n_attr_changed_ok),
53150- atomic_read(&fscache_n_attr_changed_nobufs),
53151- atomic_read(&fscache_n_attr_changed_nomem),
53152- atomic_read(&fscache_n_attr_changed_calls));
53153+ atomic_read_unchecked(&fscache_n_attr_changed),
53154+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
53155+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
53156+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
53157+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
53158
53159 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
53160- atomic_read(&fscache_n_allocs),
53161- atomic_read(&fscache_n_allocs_ok),
53162- atomic_read(&fscache_n_allocs_wait),
53163- atomic_read(&fscache_n_allocs_nobufs),
53164- atomic_read(&fscache_n_allocs_intr));
53165+ atomic_read_unchecked(&fscache_n_allocs),
53166+ atomic_read_unchecked(&fscache_n_allocs_ok),
53167+ atomic_read_unchecked(&fscache_n_allocs_wait),
53168+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
53169+ atomic_read_unchecked(&fscache_n_allocs_intr));
53170 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
53171- atomic_read(&fscache_n_alloc_ops),
53172- atomic_read(&fscache_n_alloc_op_waits),
53173- atomic_read(&fscache_n_allocs_object_dead));
53174+ atomic_read_unchecked(&fscache_n_alloc_ops),
53175+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
53176+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
53177
53178 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
53179 " int=%u oom=%u\n",
53180- atomic_read(&fscache_n_retrievals),
53181- atomic_read(&fscache_n_retrievals_ok),
53182- atomic_read(&fscache_n_retrievals_wait),
53183- atomic_read(&fscache_n_retrievals_nodata),
53184- atomic_read(&fscache_n_retrievals_nobufs),
53185- atomic_read(&fscache_n_retrievals_intr),
53186- atomic_read(&fscache_n_retrievals_nomem));
53187+ atomic_read_unchecked(&fscache_n_retrievals),
53188+ atomic_read_unchecked(&fscache_n_retrievals_ok),
53189+ atomic_read_unchecked(&fscache_n_retrievals_wait),
53190+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
53191+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
53192+ atomic_read_unchecked(&fscache_n_retrievals_intr),
53193+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
53194 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
53195- atomic_read(&fscache_n_retrieval_ops),
53196- atomic_read(&fscache_n_retrieval_op_waits),
53197- atomic_read(&fscache_n_retrievals_object_dead));
53198+ atomic_read_unchecked(&fscache_n_retrieval_ops),
53199+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
53200+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
53201
53202 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
53203- atomic_read(&fscache_n_stores),
53204- atomic_read(&fscache_n_stores_ok),
53205- atomic_read(&fscache_n_stores_again),
53206- atomic_read(&fscache_n_stores_nobufs),
53207- atomic_read(&fscache_n_stores_oom));
53208+ atomic_read_unchecked(&fscache_n_stores),
53209+ atomic_read_unchecked(&fscache_n_stores_ok),
53210+ atomic_read_unchecked(&fscache_n_stores_again),
53211+ atomic_read_unchecked(&fscache_n_stores_nobufs),
53212+ atomic_read_unchecked(&fscache_n_stores_oom));
53213 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
53214- atomic_read(&fscache_n_store_ops),
53215- atomic_read(&fscache_n_store_calls),
53216- atomic_read(&fscache_n_store_pages),
53217- atomic_read(&fscache_n_store_radix_deletes),
53218- atomic_read(&fscache_n_store_pages_over_limit));
53219+ atomic_read_unchecked(&fscache_n_store_ops),
53220+ atomic_read_unchecked(&fscache_n_store_calls),
53221+ atomic_read_unchecked(&fscache_n_store_pages),
53222+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
53223+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
53224
53225 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
53226- atomic_read(&fscache_n_store_vmscan_not_storing),
53227- atomic_read(&fscache_n_store_vmscan_gone),
53228- atomic_read(&fscache_n_store_vmscan_busy),
53229- atomic_read(&fscache_n_store_vmscan_cancelled),
53230- atomic_read(&fscache_n_store_vmscan_wait));
53231+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
53232+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
53233+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
53234+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
53235+ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
53236
53237 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
53238- atomic_read(&fscache_n_op_pend),
53239- atomic_read(&fscache_n_op_run),
53240- atomic_read(&fscache_n_op_enqueue),
53241- atomic_read(&fscache_n_op_cancelled),
53242- atomic_read(&fscache_n_op_rejected));
53243+ atomic_read_unchecked(&fscache_n_op_pend),
53244+ atomic_read_unchecked(&fscache_n_op_run),
53245+ atomic_read_unchecked(&fscache_n_op_enqueue),
53246+ atomic_read_unchecked(&fscache_n_op_cancelled),
53247+ atomic_read_unchecked(&fscache_n_op_rejected));
53248 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
53249- atomic_read(&fscache_n_op_deferred_release),
53250- atomic_read(&fscache_n_op_release),
53251- atomic_read(&fscache_n_op_gc));
53252+ atomic_read_unchecked(&fscache_n_op_deferred_release),
53253+ atomic_read_unchecked(&fscache_n_op_release),
53254+ atomic_read_unchecked(&fscache_n_op_gc));
53255
53256 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
53257 atomic_read(&fscache_n_cop_alloc_object),
53258diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
53259index 6f96a8d..6019bb9 100644
53260--- a/fs/fuse/cuse.c
53261+++ b/fs/fuse/cuse.c
53262@@ -597,10 +597,12 @@ static int __init cuse_init(void)
53263 INIT_LIST_HEAD(&cuse_conntbl[i]);
53264
53265 /* inherit and extend fuse_dev_operations */
53266- cuse_channel_fops = fuse_dev_operations;
53267- cuse_channel_fops.owner = THIS_MODULE;
53268- cuse_channel_fops.open = cuse_channel_open;
53269- cuse_channel_fops.release = cuse_channel_release;
53270+ pax_open_kernel();
53271+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
53272+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
53273+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
53274+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
53275+ pax_close_kernel();
53276
53277 cuse_class = class_create(THIS_MODULE, "cuse");
53278 if (IS_ERR(cuse_class))
53279diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
53280index 11dfa0c..6f64416 100644
53281--- a/fs/fuse/dev.c
53282+++ b/fs/fuse/dev.c
53283@@ -1294,7 +1294,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
53284 ret = 0;
53285 pipe_lock(pipe);
53286
53287- if (!pipe->readers) {
53288+ if (!atomic_read(&pipe->readers)) {
53289 send_sig(SIGPIPE, current, 0);
53290 if (!ret)
53291 ret = -EPIPE;
53292diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
53293index 185c479..51b9986 100644
53294--- a/fs/fuse/dir.c
53295+++ b/fs/fuse/dir.c
53296@@ -1415,7 +1415,7 @@ static char *read_link(struct dentry *dentry)
53297 return link;
53298 }
53299
53300-static void free_link(char *link)
53301+static void free_link(const char *link)
53302 {
53303 if (!IS_ERR(link))
53304 free_page((unsigned long) link);
53305diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
53306index cc00bd1..3edb692 100644
53307--- a/fs/gfs2/inode.c
53308+++ b/fs/gfs2/inode.c
53309@@ -1500,7 +1500,7 @@ out:
53310
53311 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
53312 {
53313- char *s = nd_get_link(nd);
53314+ const char *s = nd_get_link(nd);
53315 if (!IS_ERR(s))
53316 kfree(s);
53317 }
53318diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
53319index a3f868a..bb308ae 100644
53320--- a/fs/hugetlbfs/inode.c
53321+++ b/fs/hugetlbfs/inode.c
53322@@ -152,6 +152,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
53323 struct mm_struct *mm = current->mm;
53324 struct vm_area_struct *vma;
53325 struct hstate *h = hstate_file(file);
53326+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
53327 struct vm_unmapped_area_info info;
53328
53329 if (len & ~huge_page_mask(h))
53330@@ -165,17 +166,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
53331 return addr;
53332 }
53333
53334+#ifdef CONFIG_PAX_RANDMMAP
53335+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
53336+#endif
53337+
53338 if (addr) {
53339 addr = ALIGN(addr, huge_page_size(h));
53340 vma = find_vma(mm, addr);
53341- if (TASK_SIZE - len >= addr &&
53342- (!vma || addr + len <= vma->vm_start))
53343+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
53344 return addr;
53345 }
53346
53347 info.flags = 0;
53348 info.length = len;
53349 info.low_limit = TASK_UNMAPPED_BASE;
53350+
53351+#ifdef CONFIG_PAX_RANDMMAP
53352+ if (mm->pax_flags & MF_PAX_RANDMMAP)
53353+ info.low_limit += mm->delta_mmap;
53354+#endif
53355+
53356 info.high_limit = TASK_SIZE;
53357 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
53358 info.align_offset = 0;
53359@@ -898,7 +908,7 @@ static struct file_system_type hugetlbfs_fs_type = {
53360 };
53361 MODULE_ALIAS_FS("hugetlbfs");
53362
53363-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
53364+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
53365
53366 static int can_do_hugetlb_shm(void)
53367 {
53368diff --git a/fs/inode.c b/fs/inode.c
53369index a898b3d..9b5a214 100644
53370--- a/fs/inode.c
53371+++ b/fs/inode.c
53372@@ -878,8 +878,8 @@ unsigned int get_next_ino(void)
53373
53374 #ifdef CONFIG_SMP
53375 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
53376- static atomic_t shared_last_ino;
53377- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
53378+ static atomic_unchecked_t shared_last_ino;
53379+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
53380
53381 res = next - LAST_INO_BATCH;
53382 }
53383diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
53384index 4a6cf28..d3a29d3 100644
53385--- a/fs/jffs2/erase.c
53386+++ b/fs/jffs2/erase.c
53387@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
53388 struct jffs2_unknown_node marker = {
53389 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
53390 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
53391- .totlen = cpu_to_je32(c->cleanmarker_size)
53392+ .totlen = cpu_to_je32(c->cleanmarker_size),
53393+ .hdr_crc = cpu_to_je32(0)
53394 };
53395
53396 jffs2_prealloc_raw_node_refs(c, jeb, 1);
53397diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
53398index a6597d6..41b30ec 100644
53399--- a/fs/jffs2/wbuf.c
53400+++ b/fs/jffs2/wbuf.c
53401@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
53402 {
53403 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
53404 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
53405- .totlen = constant_cpu_to_je32(8)
53406+ .totlen = constant_cpu_to_je32(8),
53407+ .hdr_crc = constant_cpu_to_je32(0)
53408 };
53409
53410 /*
53411diff --git a/fs/jfs/super.c b/fs/jfs/super.c
53412index 2003e83..40db287 100644
53413--- a/fs/jfs/super.c
53414+++ b/fs/jfs/super.c
53415@@ -856,7 +856,7 @@ static int __init init_jfs_fs(void)
53416
53417 jfs_inode_cachep =
53418 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
53419- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
53420+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
53421 init_once);
53422 if (jfs_inode_cachep == NULL)
53423 return -ENOMEM;
53424diff --git a/fs/libfs.c b/fs/libfs.c
53425index 916da8c..1588998 100644
53426--- a/fs/libfs.c
53427+++ b/fs/libfs.c
53428@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
53429
53430 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
53431 struct dentry *next;
53432+ char d_name[sizeof(next->d_iname)];
53433+ const unsigned char *name;
53434+
53435 next = list_entry(p, struct dentry, d_u.d_child);
53436 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
53437 if (!simple_positive(next)) {
53438@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
53439
53440 spin_unlock(&next->d_lock);
53441 spin_unlock(&dentry->d_lock);
53442- if (filldir(dirent, next->d_name.name,
53443+ name = next->d_name.name;
53444+ if (name == next->d_iname) {
53445+ memcpy(d_name, name, next->d_name.len);
53446+ name = d_name;
53447+ }
53448+ if (filldir(dirent, name,
53449 next->d_name.len, filp->f_pos,
53450 next->d_inode->i_ino,
53451 dt_type(next->d_inode)) < 0)
53452diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
53453index 9760ecb..9b838ef 100644
53454--- a/fs/lockd/clntproc.c
53455+++ b/fs/lockd/clntproc.c
53456@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
53457 /*
53458 * Cookie counter for NLM requests
53459 */
53460-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
53461+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
53462
53463 void nlmclnt_next_cookie(struct nlm_cookie *c)
53464 {
53465- u32 cookie = atomic_inc_return(&nlm_cookie);
53466+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
53467
53468 memcpy(c->data, &cookie, 4);
53469 c->len=4;
53470diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
53471index a2aa97d..10d6c41 100644
53472--- a/fs/lockd/svc.c
53473+++ b/fs/lockd/svc.c
53474@@ -305,7 +305,7 @@ static int lockd_start_svc(struct svc_serv *serv)
53475 svc_sock_update_bufs(serv);
53476 serv->sv_maxconn = nlm_max_connections;
53477
53478- nlmsvc_task = kthread_run(lockd, nlmsvc_rqst, serv->sv_name);
53479+ nlmsvc_task = kthread_run(lockd, nlmsvc_rqst, "%s", serv->sv_name);
53480 if (IS_ERR(nlmsvc_task)) {
53481 error = PTR_ERR(nlmsvc_task);
53482 printk(KERN_WARNING
53483diff --git a/fs/locks.c b/fs/locks.c
53484index cb424a4..850e4dd 100644
53485--- a/fs/locks.c
53486+++ b/fs/locks.c
53487@@ -2064,16 +2064,16 @@ void locks_remove_flock(struct file *filp)
53488 return;
53489
53490 if (filp->f_op && filp->f_op->flock) {
53491- struct file_lock fl = {
53492+ struct file_lock flock = {
53493 .fl_pid = current->tgid,
53494 .fl_file = filp,
53495 .fl_flags = FL_FLOCK,
53496 .fl_type = F_UNLCK,
53497 .fl_end = OFFSET_MAX,
53498 };
53499- filp->f_op->flock(filp, F_SETLKW, &fl);
53500- if (fl.fl_ops && fl.fl_ops->fl_release_private)
53501- fl.fl_ops->fl_release_private(&fl);
53502+ filp->f_op->flock(filp, F_SETLKW, &flock);
53503+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
53504+ flock.fl_ops->fl_release_private(&flock);
53505 }
53506
53507 lock_flocks();
53508diff --git a/fs/namei.c b/fs/namei.c
53509index 85e40d1..b66744e 100644
53510--- a/fs/namei.c
53511+++ b/fs/namei.c
53512@@ -319,16 +319,32 @@ int generic_permission(struct inode *inode, int mask)
53513 if (ret != -EACCES)
53514 return ret;
53515
53516+#ifdef CONFIG_GRKERNSEC
53517+ /* we'll block if we have to log due to a denied capability use */
53518+ if (mask & MAY_NOT_BLOCK)
53519+ return -ECHILD;
53520+#endif
53521+
53522 if (S_ISDIR(inode->i_mode)) {
53523 /* DACs are overridable for directories */
53524- if (inode_capable(inode, CAP_DAC_OVERRIDE))
53525- return 0;
53526 if (!(mask & MAY_WRITE))
53527- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
53528+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
53529+ inode_capable(inode, CAP_DAC_READ_SEARCH))
53530 return 0;
53531+ if (inode_capable(inode, CAP_DAC_OVERRIDE))
53532+ return 0;
53533 return -EACCES;
53534 }
53535 /*
53536+ * Searching includes executable on directories, else just read.
53537+ */
53538+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
53539+ if (mask == MAY_READ)
53540+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
53541+ inode_capable(inode, CAP_DAC_READ_SEARCH))
53542+ return 0;
53543+
53544+ /*
53545 * Read/write DACs are always overridable.
53546 * Executable DACs are overridable when there is
53547 * at least one exec bit set.
53548@@ -337,14 +353,6 @@ int generic_permission(struct inode *inode, int mask)
53549 if (inode_capable(inode, CAP_DAC_OVERRIDE))
53550 return 0;
53551
53552- /*
53553- * Searching includes executable on directories, else just read.
53554- */
53555- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
53556- if (mask == MAY_READ)
53557- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
53558- return 0;
53559-
53560 return -EACCES;
53561 }
53562
53563@@ -820,7 +828,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
53564 {
53565 struct dentry *dentry = link->dentry;
53566 int error;
53567- char *s;
53568+ const char *s;
53569
53570 BUG_ON(nd->flags & LOOKUP_RCU);
53571
53572@@ -841,6 +849,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
53573 if (error)
53574 goto out_put_nd_path;
53575
53576+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
53577+ dentry->d_inode, dentry, nd->path.mnt)) {
53578+ error = -EACCES;
53579+ goto out_put_nd_path;
53580+ }
53581+
53582 nd->last_type = LAST_BIND;
53583 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
53584 error = PTR_ERR(*p);
53585@@ -1588,6 +1602,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
53586 if (res)
53587 break;
53588 res = walk_component(nd, path, LOOKUP_FOLLOW);
53589+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
53590+ res = -EACCES;
53591 put_link(nd, &link, cookie);
53592 } while (res > 0);
53593
53594@@ -1686,7 +1702,7 @@ EXPORT_SYMBOL(full_name_hash);
53595 static inline unsigned long hash_name(const char *name, unsigned int *hashp)
53596 {
53597 unsigned long a, b, adata, bdata, mask, hash, len;
53598- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
53599+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
53600
53601 hash = a = 0;
53602 len = -sizeof(unsigned long);
53603@@ -1968,6 +1984,8 @@ static int path_lookupat(int dfd, const char *name,
53604 if (err)
53605 break;
53606 err = lookup_last(nd, &path);
53607+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
53608+ err = -EACCES;
53609 put_link(nd, &link, cookie);
53610 }
53611 }
53612@@ -1975,6 +1993,13 @@ static int path_lookupat(int dfd, const char *name,
53613 if (!err)
53614 err = complete_walk(nd);
53615
53616+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
53617+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
53618+ path_put(&nd->path);
53619+ err = -ENOENT;
53620+ }
53621+ }
53622+
53623 if (!err && nd->flags & LOOKUP_DIRECTORY) {
53624 if (!nd->inode->i_op->lookup) {
53625 path_put(&nd->path);
53626@@ -2002,8 +2027,15 @@ static int filename_lookup(int dfd, struct filename *name,
53627 retval = path_lookupat(dfd, name->name,
53628 flags | LOOKUP_REVAL, nd);
53629
53630- if (likely(!retval))
53631+ if (likely(!retval)) {
53632 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
53633+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
53634+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
53635+ path_put(&nd->path);
53636+ return -ENOENT;
53637+ }
53638+ }
53639+ }
53640 return retval;
53641 }
53642
53643@@ -2381,6 +2413,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
53644 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
53645 return -EPERM;
53646
53647+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
53648+ return -EPERM;
53649+ if (gr_handle_rawio(inode))
53650+ return -EPERM;
53651+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
53652+ return -EACCES;
53653+
53654 return 0;
53655 }
53656
53657@@ -2602,7 +2641,7 @@ looked_up:
53658 * cleared otherwise prior to returning.
53659 */
53660 static int lookup_open(struct nameidata *nd, struct path *path,
53661- struct file *file,
53662+ struct path *link, struct file *file,
53663 const struct open_flags *op,
53664 bool got_write, int *opened)
53665 {
53666@@ -2637,6 +2676,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
53667 /* Negative dentry, just create the file */
53668 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
53669 umode_t mode = op->mode;
53670+
53671+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
53672+ error = -EACCES;
53673+ goto out_dput;
53674+ }
53675+
53676+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
53677+ error = -EACCES;
53678+ goto out_dput;
53679+ }
53680+
53681 if (!IS_POSIXACL(dir->d_inode))
53682 mode &= ~current_umask();
53683 /*
53684@@ -2658,6 +2708,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
53685 nd->flags & LOOKUP_EXCL);
53686 if (error)
53687 goto out_dput;
53688+ else
53689+ gr_handle_create(dentry, nd->path.mnt);
53690 }
53691 out_no_open:
53692 path->dentry = dentry;
53693@@ -2672,7 +2724,7 @@ out_dput:
53694 /*
53695 * Handle the last step of open()
53696 */
53697-static int do_last(struct nameidata *nd, struct path *path,
53698+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
53699 struct file *file, const struct open_flags *op,
53700 int *opened, struct filename *name)
53701 {
53702@@ -2701,16 +2753,32 @@ static int do_last(struct nameidata *nd, struct path *path,
53703 error = complete_walk(nd);
53704 if (error)
53705 return error;
53706+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
53707+ error = -ENOENT;
53708+ goto out;
53709+ }
53710 audit_inode(name, nd->path.dentry, 0);
53711 if (open_flag & O_CREAT) {
53712 error = -EISDIR;
53713 goto out;
53714 }
53715+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
53716+ error = -EACCES;
53717+ goto out;
53718+ }
53719 goto finish_open;
53720 case LAST_BIND:
53721 error = complete_walk(nd);
53722 if (error)
53723 return error;
53724+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
53725+ error = -ENOENT;
53726+ goto out;
53727+ }
53728+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
53729+ error = -EACCES;
53730+ goto out;
53731+ }
53732 audit_inode(name, dir, 0);
53733 goto finish_open;
53734 }
53735@@ -2759,7 +2827,7 @@ retry_lookup:
53736 */
53737 }
53738 mutex_lock(&dir->d_inode->i_mutex);
53739- error = lookup_open(nd, path, file, op, got_write, opened);
53740+ error = lookup_open(nd, path, link, file, op, got_write, opened);
53741 mutex_unlock(&dir->d_inode->i_mutex);
53742
53743 if (error <= 0) {
53744@@ -2783,11 +2851,28 @@ retry_lookup:
53745 goto finish_open_created;
53746 }
53747
53748+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
53749+ error = -ENOENT;
53750+ goto exit_dput;
53751+ }
53752+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
53753+ error = -EACCES;
53754+ goto exit_dput;
53755+ }
53756+
53757 /*
53758 * create/update audit record if it already exists.
53759 */
53760- if (path->dentry->d_inode)
53761+ if (path->dentry->d_inode) {
53762+ /* only check if O_CREAT is specified, all other checks need to go
53763+ into may_open */
53764+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
53765+ error = -EACCES;
53766+ goto exit_dput;
53767+ }
53768+
53769 audit_inode(name, path->dentry, 0);
53770+ }
53771
53772 /*
53773 * If atomic_open() acquired write access it is dropped now due to
53774@@ -2828,6 +2913,11 @@ finish_lookup:
53775 }
53776 }
53777 BUG_ON(inode != path->dentry->d_inode);
53778+ /* if we're resolving a symlink to another symlink */
53779+ if (link && gr_handle_symlink_owner(link, inode)) {
53780+ error = -EACCES;
53781+ goto out;
53782+ }
53783 return 1;
53784 }
53785
53786@@ -2837,7 +2927,6 @@ finish_lookup:
53787 save_parent.dentry = nd->path.dentry;
53788 save_parent.mnt = mntget(path->mnt);
53789 nd->path.dentry = path->dentry;
53790-
53791 }
53792 nd->inode = inode;
53793 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
53794@@ -2846,6 +2935,16 @@ finish_lookup:
53795 path_put(&save_parent);
53796 return error;
53797 }
53798+
53799+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
53800+ error = -ENOENT;
53801+ goto out;
53802+ }
53803+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
53804+ error = -EACCES;
53805+ goto out;
53806+ }
53807+
53808 error = -EISDIR;
53809 if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode))
53810 goto out;
53811@@ -2944,7 +3043,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
53812 if (unlikely(error))
53813 goto out;
53814
53815- error = do_last(nd, &path, file, op, &opened, pathname);
53816+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
53817 while (unlikely(error > 0)) { /* trailing symlink */
53818 struct path link = path;
53819 void *cookie;
53820@@ -2962,7 +3061,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
53821 error = follow_link(&link, nd, &cookie);
53822 if (unlikely(error))
53823 break;
53824- error = do_last(nd, &path, file, op, &opened, pathname);
53825+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
53826 put_link(nd, &link, cookie);
53827 }
53828 out:
53829@@ -3062,8 +3161,12 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
53830 goto unlock;
53831
53832 error = -EEXIST;
53833- if (dentry->d_inode)
53834+ if (dentry->d_inode) {
53835+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
53836+ error = -ENOENT;
53837+ }
53838 goto fail;
53839+ }
53840 /*
53841 * Special case - lookup gave negative, but... we had foo/bar/
53842 * From the vfs_mknod() POV we just have a negative dentry -
53843@@ -3115,6 +3218,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
53844 }
53845 EXPORT_SYMBOL(user_path_create);
53846
53847+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
53848+{
53849+ struct filename *tmp = getname(pathname);
53850+ struct dentry *res;
53851+ if (IS_ERR(tmp))
53852+ return ERR_CAST(tmp);
53853+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
53854+ if (IS_ERR(res))
53855+ putname(tmp);
53856+ else
53857+ *to = tmp;
53858+ return res;
53859+}
53860+
53861 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
53862 {
53863 int error = may_create(dir, dentry);
53864@@ -3177,6 +3294,17 @@ retry:
53865
53866 if (!IS_POSIXACL(path.dentry->d_inode))
53867 mode &= ~current_umask();
53868+
53869+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
53870+ error = -EPERM;
53871+ goto out;
53872+ }
53873+
53874+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
53875+ error = -EACCES;
53876+ goto out;
53877+ }
53878+
53879 error = security_path_mknod(&path, dentry, mode, dev);
53880 if (error)
53881 goto out;
53882@@ -3193,6 +3321,8 @@ retry:
53883 break;
53884 }
53885 out:
53886+ if (!error)
53887+ gr_handle_create(dentry, path.mnt);
53888 done_path_create(&path, dentry);
53889 if (retry_estale(error, lookup_flags)) {
53890 lookup_flags |= LOOKUP_REVAL;
53891@@ -3245,9 +3375,16 @@ retry:
53892
53893 if (!IS_POSIXACL(path.dentry->d_inode))
53894 mode &= ~current_umask();
53895+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
53896+ error = -EACCES;
53897+ goto out;
53898+ }
53899 error = security_path_mkdir(&path, dentry, mode);
53900 if (!error)
53901 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
53902+ if (!error)
53903+ gr_handle_create(dentry, path.mnt);
53904+out:
53905 done_path_create(&path, dentry);
53906 if (retry_estale(error, lookup_flags)) {
53907 lookup_flags |= LOOKUP_REVAL;
53908@@ -3328,6 +3465,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
53909 struct filename *name;
53910 struct dentry *dentry;
53911 struct nameidata nd;
53912+ ino_t saved_ino = 0;
53913+ dev_t saved_dev = 0;
53914 unsigned int lookup_flags = 0;
53915 retry:
53916 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
53917@@ -3360,10 +3499,21 @@ retry:
53918 error = -ENOENT;
53919 goto exit3;
53920 }
53921+
53922+ saved_ino = dentry->d_inode->i_ino;
53923+ saved_dev = gr_get_dev_from_dentry(dentry);
53924+
53925+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
53926+ error = -EACCES;
53927+ goto exit3;
53928+ }
53929+
53930 error = security_path_rmdir(&nd.path, dentry);
53931 if (error)
53932 goto exit3;
53933 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
53934+ if (!error && (saved_dev || saved_ino))
53935+ gr_handle_delete(saved_ino, saved_dev);
53936 exit3:
53937 dput(dentry);
53938 exit2:
53939@@ -3429,6 +3579,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
53940 struct dentry *dentry;
53941 struct nameidata nd;
53942 struct inode *inode = NULL;
53943+ ino_t saved_ino = 0;
53944+ dev_t saved_dev = 0;
53945 unsigned int lookup_flags = 0;
53946 retry:
53947 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
53948@@ -3455,10 +3607,22 @@ retry:
53949 if (!inode)
53950 goto slashes;
53951 ihold(inode);
53952+
53953+ if (inode->i_nlink <= 1) {
53954+ saved_ino = inode->i_ino;
53955+ saved_dev = gr_get_dev_from_dentry(dentry);
53956+ }
53957+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
53958+ error = -EACCES;
53959+ goto exit2;
53960+ }
53961+
53962 error = security_path_unlink(&nd.path, dentry);
53963 if (error)
53964 goto exit2;
53965 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
53966+ if (!error && (saved_ino || saved_dev))
53967+ gr_handle_delete(saved_ino, saved_dev);
53968 exit2:
53969 dput(dentry);
53970 }
53971@@ -3536,9 +3700,17 @@ retry:
53972 if (IS_ERR(dentry))
53973 goto out_putname;
53974
53975+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
53976+ error = -EACCES;
53977+ goto out;
53978+ }
53979+
53980 error = security_path_symlink(&path, dentry, from->name);
53981 if (!error)
53982 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
53983+ if (!error)
53984+ gr_handle_create(dentry, path.mnt);
53985+out:
53986 done_path_create(&path, dentry);
53987 if (retry_estale(error, lookup_flags)) {
53988 lookup_flags |= LOOKUP_REVAL;
53989@@ -3612,6 +3784,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
53990 {
53991 struct dentry *new_dentry;
53992 struct path old_path, new_path;
53993+ struct filename *to = NULL;
53994 int how = 0;
53995 int error;
53996
53997@@ -3635,7 +3808,7 @@ retry:
53998 if (error)
53999 return error;
54000
54001- new_dentry = user_path_create(newdfd, newname, &new_path,
54002+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
54003 (how & LOOKUP_REVAL));
54004 error = PTR_ERR(new_dentry);
54005 if (IS_ERR(new_dentry))
54006@@ -3647,11 +3820,28 @@ retry:
54007 error = may_linkat(&old_path);
54008 if (unlikely(error))
54009 goto out_dput;
54010+
54011+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
54012+ old_path.dentry->d_inode,
54013+ old_path.dentry->d_inode->i_mode, to)) {
54014+ error = -EACCES;
54015+ goto out_dput;
54016+ }
54017+
54018+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
54019+ old_path.dentry, old_path.mnt, to)) {
54020+ error = -EACCES;
54021+ goto out_dput;
54022+ }
54023+
54024 error = security_path_link(old_path.dentry, &new_path, new_dentry);
54025 if (error)
54026 goto out_dput;
54027 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
54028+ if (!error)
54029+ gr_handle_create(new_dentry, new_path.mnt);
54030 out_dput:
54031+ putname(to);
54032 done_path_create(&new_path, new_dentry);
54033 if (retry_estale(error, how)) {
54034 how |= LOOKUP_REVAL;
54035@@ -3897,12 +4087,21 @@ retry:
54036 if (new_dentry == trap)
54037 goto exit5;
54038
54039+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
54040+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
54041+ to);
54042+ if (error)
54043+ goto exit5;
54044+
54045 error = security_path_rename(&oldnd.path, old_dentry,
54046 &newnd.path, new_dentry);
54047 if (error)
54048 goto exit5;
54049 error = vfs_rename(old_dir->d_inode, old_dentry,
54050 new_dir->d_inode, new_dentry);
54051+ if (!error)
54052+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
54053+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
54054 exit5:
54055 dput(new_dentry);
54056 exit4:
54057@@ -3934,6 +4133,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
54058
54059 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
54060 {
54061+ char tmpbuf[64];
54062+ const char *newlink;
54063 int len;
54064
54065 len = PTR_ERR(link);
54066@@ -3943,7 +4144,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
54067 len = strlen(link);
54068 if (len > (unsigned) buflen)
54069 len = buflen;
54070- if (copy_to_user(buffer, link, len))
54071+
54072+ if (len < sizeof(tmpbuf)) {
54073+ memcpy(tmpbuf, link, len);
54074+ newlink = tmpbuf;
54075+ } else
54076+ newlink = link;
54077+
54078+ if (copy_to_user(buffer, newlink, len))
54079 len = -EFAULT;
54080 out:
54081 return len;
54082diff --git a/fs/namespace.c b/fs/namespace.c
54083index e945b81..fc018e2 100644
54084--- a/fs/namespace.c
54085+++ b/fs/namespace.c
54086@@ -1219,6 +1219,9 @@ static int do_umount(struct mount *mnt, int flags)
54087 if (!(sb->s_flags & MS_RDONLY))
54088 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
54089 up_write(&sb->s_umount);
54090+
54091+ gr_log_remount(mnt->mnt_devname, retval);
54092+
54093 return retval;
54094 }
54095
54096@@ -1238,6 +1241,9 @@ static int do_umount(struct mount *mnt, int flags)
54097 br_write_unlock(&vfsmount_lock);
54098 up_write(&namespace_sem);
54099 release_mounts(&umount_list);
54100+
54101+ gr_log_unmount(mnt->mnt_devname, retval);
54102+
54103 return retval;
54104 }
54105
54106@@ -1257,7 +1263,7 @@ static inline bool may_mount(void)
54107 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
54108 */
54109
54110-SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
54111+SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
54112 {
54113 struct path path;
54114 struct mount *mnt;
54115@@ -1297,7 +1303,7 @@ out:
54116 /*
54117 * The 2.0 compatible umount. No flags.
54118 */
54119-SYSCALL_DEFINE1(oldumount, char __user *, name)
54120+SYSCALL_DEFINE1(oldumount, const char __user *, name)
54121 {
54122 return sys_umount(name, 0);
54123 }
54124@@ -2267,6 +2273,16 @@ long do_mount(const char *dev_name, const char *dir_name,
54125 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
54126 MS_STRICTATIME);
54127
54128+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
54129+ retval = -EPERM;
54130+ goto dput_out;
54131+ }
54132+
54133+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
54134+ retval = -EPERM;
54135+ goto dput_out;
54136+ }
54137+
54138 if (flags & MS_REMOUNT)
54139 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
54140 data_page);
54141@@ -2281,6 +2297,9 @@ long do_mount(const char *dev_name, const char *dir_name,
54142 dev_name, data_page);
54143 dput_out:
54144 path_put(&path);
54145+
54146+ gr_log_mount(dev_name, dir_name, retval);
54147+
54148 return retval;
54149 }
54150
54151@@ -2454,8 +2473,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
54152 }
54153 EXPORT_SYMBOL(mount_subtree);
54154
54155-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
54156- char __user *, type, unsigned long, flags, void __user *, data)
54157+SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
54158+ const char __user *, type, unsigned long, flags, void __user *, data)
54159 {
54160 int ret;
54161 char *kernel_type;
54162@@ -2567,6 +2586,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
54163 if (error)
54164 goto out2;
54165
54166+ if (gr_handle_chroot_pivot()) {
54167+ error = -EPERM;
54168+ goto out2;
54169+ }
54170+
54171 get_fs_root(current->fs, &root);
54172 error = lock_mount(&old);
54173 if (error)
54174@@ -2815,7 +2839,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
54175 !nsown_capable(CAP_SYS_ADMIN))
54176 return -EPERM;
54177
54178- if (fs->users != 1)
54179+ if (atomic_read(&fs->users) != 1)
54180 return -EINVAL;
54181
54182 get_mnt_ns(mnt_ns);
54183diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
54184index 5088b57..eabd719 100644
54185--- a/fs/nfs/callback.c
54186+++ b/fs/nfs/callback.c
54187@@ -208,7 +208,6 @@ static int nfs_callback_start_svc(int minorversion, struct rpc_xprt *xprt,
54188 struct svc_rqst *rqstp;
54189 int (*callback_svc)(void *vrqstp);
54190 struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion];
54191- char svc_name[12];
54192 int ret;
54193
54194 nfs_callback_bc_serv(minorversion, xprt, serv);
54195@@ -232,10 +231,9 @@ static int nfs_callback_start_svc(int minorversion, struct rpc_xprt *xprt,
54196
54197 svc_sock_update_bufs(serv);
54198
54199- sprintf(svc_name, "nfsv4.%u-svc", minorversion);
54200 cb_info->serv = serv;
54201 cb_info->rqst = rqstp;
54202- cb_info->task = kthread_run(callback_svc, cb_info->rqst, svc_name);
54203+ cb_info->task = kthread_run(callback_svc, cb_info->rqst, "nfsv4.%u-svc", minorversion);
54204 if (IS_ERR(cb_info->task)) {
54205 ret = PTR_ERR(cb_info->task);
54206 svc_exit_thread(cb_info->rqst);
54207diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
54208index 59461c9..b17c57e 100644
54209--- a/fs/nfs/callback_xdr.c
54210+++ b/fs/nfs/callback_xdr.c
54211@@ -51,7 +51,7 @@ struct callback_op {
54212 callback_decode_arg_t decode_args;
54213 callback_encode_res_t encode_res;
54214 long res_maxsize;
54215-};
54216+} __do_const;
54217
54218 static struct callback_op callback_ops[];
54219
54220diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
54221index 1f94167..79c4ce4 100644
54222--- a/fs/nfs/inode.c
54223+++ b/fs/nfs/inode.c
54224@@ -1041,16 +1041,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
54225 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
54226 }
54227
54228-static atomic_long_t nfs_attr_generation_counter;
54229+static atomic_long_unchecked_t nfs_attr_generation_counter;
54230
54231 static unsigned long nfs_read_attr_generation_counter(void)
54232 {
54233- return atomic_long_read(&nfs_attr_generation_counter);
54234+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
54235 }
54236
54237 unsigned long nfs_inc_attr_generation_counter(void)
54238 {
54239- return atomic_long_inc_return(&nfs_attr_generation_counter);
54240+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
54241 }
54242
54243 void nfs_fattr_init(struct nfs_fattr *fattr)
54244diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
54245index d41a351..7899577 100644
54246--- a/fs/nfs/nfs4state.c
54247+++ b/fs/nfs/nfs4state.c
54248@@ -1182,7 +1182,7 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
54249 snprintf(buf, sizeof(buf), "%s-manager",
54250 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR));
54251 rcu_read_unlock();
54252- task = kthread_run(nfs4_run_state_manager, clp, buf);
54253+ task = kthread_run(nfs4_run_state_manager, clp, "%s", buf);
54254 if (IS_ERR(task)) {
54255 printk(KERN_ERR "%s: kthread_run: %ld\n",
54256 __func__, PTR_ERR(task));
54257diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
54258index d401d01..10b3e62 100644
54259--- a/fs/nfsd/nfs4proc.c
54260+++ b/fs/nfsd/nfs4proc.c
54261@@ -1109,7 +1109,7 @@ struct nfsd4_operation {
54262 nfsd4op_rsize op_rsize_bop;
54263 stateid_getter op_get_currentstateid;
54264 stateid_setter op_set_currentstateid;
54265-};
54266+} __do_const;
54267
54268 static struct nfsd4_operation nfsd4_ops[];
54269
54270diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
54271index 6eb0dc5..29067a9 100644
54272--- a/fs/nfsd/nfs4xdr.c
54273+++ b/fs/nfsd/nfs4xdr.c
54274@@ -1457,7 +1457,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
54275
54276 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
54277
54278-static nfsd4_dec nfsd4_dec_ops[] = {
54279+static const nfsd4_dec nfsd4_dec_ops[] = {
54280 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
54281 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
54282 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
54283@@ -1497,7 +1497,7 @@ static nfsd4_dec nfsd4_dec_ops[] = {
54284 [OP_RELEASE_LOCKOWNER] = (nfsd4_dec)nfsd4_decode_release_lockowner,
54285 };
54286
54287-static nfsd4_dec nfsd41_dec_ops[] = {
54288+static const nfsd4_dec nfsd41_dec_ops[] = {
54289 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
54290 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
54291 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
54292@@ -1559,7 +1559,7 @@ static nfsd4_dec nfsd41_dec_ops[] = {
54293 };
54294
54295 struct nfsd4_minorversion_ops {
54296- nfsd4_dec *decoders;
54297+ const nfsd4_dec *decoders;
54298 int nops;
54299 };
54300
54301diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
54302index ca05f6d..b88c3a7 100644
54303--- a/fs/nfsd/nfscache.c
54304+++ b/fs/nfsd/nfscache.c
54305@@ -461,13 +461,16 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
54306 {
54307 struct svc_cacherep *rp = rqstp->rq_cacherep;
54308 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
54309- int len;
54310+ long len;
54311
54312 if (!rp)
54313 return;
54314
54315- len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
54316- len >>= 2;
54317+ if (statp) {
54318+ len = (char*)statp - (char*)resv->iov_base;
54319+ len = resv->iov_len - len;
54320+ len >>= 2;
54321+ }
54322
54323 /* Don't cache excessive amounts of data and XDR failures */
54324 if (!statp || len > (256 >> 2)) {
54325diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
54326index 2b2e239..c915b48 100644
54327--- a/fs/nfsd/vfs.c
54328+++ b/fs/nfsd/vfs.c
54329@@ -939,7 +939,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
54330 } else {
54331 oldfs = get_fs();
54332 set_fs(KERNEL_DS);
54333- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
54334+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
54335 set_fs(oldfs);
54336 }
54337
54338@@ -1026,7 +1026,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
54339
54340 /* Write the data. */
54341 oldfs = get_fs(); set_fs(KERNEL_DS);
54342- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
54343+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
54344 set_fs(oldfs);
54345 if (host_err < 0)
54346 goto out_nfserr;
54347@@ -1572,7 +1572,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
54348 */
54349
54350 oldfs = get_fs(); set_fs(KERNEL_DS);
54351- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
54352+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
54353 set_fs(oldfs);
54354
54355 if (host_err < 0)
54356diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
54357index fea6bd5..8ee9d81 100644
54358--- a/fs/nls/nls_base.c
54359+++ b/fs/nls/nls_base.c
54360@@ -234,20 +234,22 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
54361
54362 int register_nls(struct nls_table * nls)
54363 {
54364- struct nls_table ** tmp = &tables;
54365+ struct nls_table *tmp = tables;
54366
54367 if (nls->next)
54368 return -EBUSY;
54369
54370 spin_lock(&nls_lock);
54371- while (*tmp) {
54372- if (nls == *tmp) {
54373+ while (tmp) {
54374+ if (nls == tmp) {
54375 spin_unlock(&nls_lock);
54376 return -EBUSY;
54377 }
54378- tmp = &(*tmp)->next;
54379+ tmp = tmp->next;
54380 }
54381- nls->next = tables;
54382+ pax_open_kernel();
54383+ *(struct nls_table **)&nls->next = tables;
54384+ pax_close_kernel();
54385 tables = nls;
54386 spin_unlock(&nls_lock);
54387 return 0;
54388@@ -255,12 +257,14 @@ int register_nls(struct nls_table * nls)
54389
54390 int unregister_nls(struct nls_table * nls)
54391 {
54392- struct nls_table ** tmp = &tables;
54393+ struct nls_table * const * tmp = &tables;
54394
54395 spin_lock(&nls_lock);
54396 while (*tmp) {
54397 if (nls == *tmp) {
54398- *tmp = nls->next;
54399+ pax_open_kernel();
54400+ *(struct nls_table **)tmp = nls->next;
54401+ pax_close_kernel();
54402 spin_unlock(&nls_lock);
54403 return 0;
54404 }
54405diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
54406index 7424929..35f6be5 100644
54407--- a/fs/nls/nls_euc-jp.c
54408+++ b/fs/nls/nls_euc-jp.c
54409@@ -561,8 +561,10 @@ static int __init init_nls_euc_jp(void)
54410 p_nls = load_nls("cp932");
54411
54412 if (p_nls) {
54413- table.charset2upper = p_nls->charset2upper;
54414- table.charset2lower = p_nls->charset2lower;
54415+ pax_open_kernel();
54416+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
54417+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
54418+ pax_close_kernel();
54419 return register_nls(&table);
54420 }
54421
54422diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
54423index e7bc1d7..06bd4bb 100644
54424--- a/fs/nls/nls_koi8-ru.c
54425+++ b/fs/nls/nls_koi8-ru.c
54426@@ -63,8 +63,10 @@ static int __init init_nls_koi8_ru(void)
54427 p_nls = load_nls("koi8-u");
54428
54429 if (p_nls) {
54430- table.charset2upper = p_nls->charset2upper;
54431- table.charset2lower = p_nls->charset2lower;
54432+ pax_open_kernel();
54433+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
54434+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
54435+ pax_close_kernel();
54436 return register_nls(&table);
54437 }
54438
54439diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
54440index 5d84442..2c034ba 100644
54441--- a/fs/notify/fanotify/fanotify_user.c
54442+++ b/fs/notify/fanotify/fanotify_user.c
54443@@ -121,6 +121,7 @@ static int fill_event_metadata(struct fsnotify_group *group,
54444 metadata->event_len = FAN_EVENT_METADATA_LEN;
54445 metadata->metadata_len = FAN_EVENT_METADATA_LEN;
54446 metadata->vers = FANOTIFY_METADATA_VERSION;
54447+ metadata->reserved = 0;
54448 metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS;
54449 metadata->pid = pid_vnr(event->tgid);
54450 if (unlikely(event->mask & FAN_Q_OVERFLOW))
54451@@ -251,8 +252,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
54452
54453 fd = fanotify_event_metadata.fd;
54454 ret = -EFAULT;
54455- if (copy_to_user(buf, &fanotify_event_metadata,
54456- fanotify_event_metadata.event_len))
54457+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
54458+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
54459 goto out_close_fd;
54460
54461 ret = prepare_for_access_response(group, event, fd);
54462diff --git a/fs/notify/notification.c b/fs/notify/notification.c
54463index 7b51b05..5ea5ef6 100644
54464--- a/fs/notify/notification.c
54465+++ b/fs/notify/notification.c
54466@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
54467 * get set to 0 so it will never get 'freed'
54468 */
54469 static struct fsnotify_event *q_overflow_event;
54470-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
54471+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
54472
54473 /**
54474 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
54475@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
54476 */
54477 u32 fsnotify_get_cookie(void)
54478 {
54479- return atomic_inc_return(&fsnotify_sync_cookie);
54480+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
54481 }
54482 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
54483
54484diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
54485index aa411c3..c260a84 100644
54486--- a/fs/ntfs/dir.c
54487+++ b/fs/ntfs/dir.c
54488@@ -1329,7 +1329,7 @@ find_next_index_buffer:
54489 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
54490 ~(s64)(ndir->itype.index.block_size - 1)));
54491 /* Bounds checks. */
54492- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
54493+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
54494 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
54495 "inode 0x%lx or driver bug.", vdir->i_ino);
54496 goto err_out;
54497diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
54498index 5b2d4f0..c6de396 100644
54499--- a/fs/ntfs/file.c
54500+++ b/fs/ntfs/file.c
54501@@ -2242,6 +2242,6 @@ const struct inode_operations ntfs_file_inode_ops = {
54502 #endif /* NTFS_RW */
54503 };
54504
54505-const struct file_operations ntfs_empty_file_ops = {};
54506+const struct file_operations ntfs_empty_file_ops __read_only;
54507
54508-const struct inode_operations ntfs_empty_inode_ops = {};
54509+const struct inode_operations ntfs_empty_inode_ops __read_only;
54510diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
54511index aebeacd..0dcdd26 100644
54512--- a/fs/ocfs2/localalloc.c
54513+++ b/fs/ocfs2/localalloc.c
54514@@ -1278,7 +1278,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
54515 goto bail;
54516 }
54517
54518- atomic_inc(&osb->alloc_stats.moves);
54519+ atomic_inc_unchecked(&osb->alloc_stats.moves);
54520
54521 bail:
54522 if (handle)
54523diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
54524index d355e6e..578d905 100644
54525--- a/fs/ocfs2/ocfs2.h
54526+++ b/fs/ocfs2/ocfs2.h
54527@@ -235,11 +235,11 @@ enum ocfs2_vol_state
54528
54529 struct ocfs2_alloc_stats
54530 {
54531- atomic_t moves;
54532- atomic_t local_data;
54533- atomic_t bitmap_data;
54534- atomic_t bg_allocs;
54535- atomic_t bg_extends;
54536+ atomic_unchecked_t moves;
54537+ atomic_unchecked_t local_data;
54538+ atomic_unchecked_t bitmap_data;
54539+ atomic_unchecked_t bg_allocs;
54540+ atomic_unchecked_t bg_extends;
54541 };
54542
54543 enum ocfs2_local_alloc_state
54544diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
54545index b7e74b5..19c6536 100644
54546--- a/fs/ocfs2/suballoc.c
54547+++ b/fs/ocfs2/suballoc.c
54548@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
54549 mlog_errno(status);
54550 goto bail;
54551 }
54552- atomic_inc(&osb->alloc_stats.bg_extends);
54553+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
54554
54555 /* You should never ask for this much metadata */
54556 BUG_ON(bits_wanted >
54557@@ -2007,7 +2007,7 @@ int ocfs2_claim_metadata(handle_t *handle,
54558 mlog_errno(status);
54559 goto bail;
54560 }
54561- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
54562+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
54563
54564 *suballoc_loc = res.sr_bg_blkno;
54565 *suballoc_bit_start = res.sr_bit_offset;
54566@@ -2171,7 +2171,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
54567 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
54568 res->sr_bits);
54569
54570- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
54571+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
54572
54573 BUG_ON(res->sr_bits != 1);
54574
54575@@ -2213,7 +2213,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
54576 mlog_errno(status);
54577 goto bail;
54578 }
54579- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
54580+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
54581
54582 BUG_ON(res.sr_bits != 1);
54583
54584@@ -2317,7 +2317,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
54585 cluster_start,
54586 num_clusters);
54587 if (!status)
54588- atomic_inc(&osb->alloc_stats.local_data);
54589+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
54590 } else {
54591 if (min_clusters > (osb->bitmap_cpg - 1)) {
54592 /* The only paths asking for contiguousness
54593@@ -2343,7 +2343,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
54594 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
54595 res.sr_bg_blkno,
54596 res.sr_bit_offset);
54597- atomic_inc(&osb->alloc_stats.bitmap_data);
54598+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
54599 *num_clusters = res.sr_bits;
54600 }
54601 }
54602diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
54603index 01b8516..579c4df 100644
54604--- a/fs/ocfs2/super.c
54605+++ b/fs/ocfs2/super.c
54606@@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
54607 "%10s => GlobalAllocs: %d LocalAllocs: %d "
54608 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
54609 "Stats",
54610- atomic_read(&osb->alloc_stats.bitmap_data),
54611- atomic_read(&osb->alloc_stats.local_data),
54612- atomic_read(&osb->alloc_stats.bg_allocs),
54613- atomic_read(&osb->alloc_stats.moves),
54614- atomic_read(&osb->alloc_stats.bg_extends));
54615+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
54616+ atomic_read_unchecked(&osb->alloc_stats.local_data),
54617+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
54618+ atomic_read_unchecked(&osb->alloc_stats.moves),
54619+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
54620
54621 out += snprintf(buf + out, len - out,
54622 "%10s => State: %u Descriptor: %llu Size: %u bits "
54623@@ -2122,11 +2122,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
54624 spin_lock_init(&osb->osb_xattr_lock);
54625 ocfs2_init_steal_slots(osb);
54626
54627- atomic_set(&osb->alloc_stats.moves, 0);
54628- atomic_set(&osb->alloc_stats.local_data, 0);
54629- atomic_set(&osb->alloc_stats.bitmap_data, 0);
54630- atomic_set(&osb->alloc_stats.bg_allocs, 0);
54631- atomic_set(&osb->alloc_stats.bg_extends, 0);
54632+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
54633+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
54634+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
54635+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
54636+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
54637
54638 /* Copy the blockcheck stats from the superblock probe */
54639 osb->osb_ecc_stats = *stats;
54640diff --git a/fs/open.c b/fs/open.c
54641index 6835446..eadf09f 100644
54642--- a/fs/open.c
54643+++ b/fs/open.c
54644@@ -32,6 +32,8 @@
54645 #include <linux/dnotify.h>
54646 #include <linux/compat.h>
54647
54648+#define CREATE_TRACE_POINTS
54649+#include <trace/events/fs.h>
54650 #include "internal.h"
54651
54652 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
54653@@ -102,6 +104,8 @@ long vfs_truncate(struct path *path, loff_t length)
54654 error = locks_verify_truncate(inode, NULL, length);
54655 if (!error)
54656 error = security_path_truncate(path);
54657+ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
54658+ error = -EACCES;
54659 if (!error)
54660 error = do_truncate(path->dentry, length, 0, NULL);
54661
54662@@ -186,6 +190,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
54663 error = locks_verify_truncate(inode, f.file, length);
54664 if (!error)
54665 error = security_path_truncate(&f.file->f_path);
54666+ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
54667+ error = -EACCES;
54668 if (!error)
54669 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
54670 sb_end_write(inode->i_sb);
54671@@ -388,6 +394,9 @@ retry:
54672 if (__mnt_is_readonly(path.mnt))
54673 res = -EROFS;
54674
54675+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
54676+ res = -EACCES;
54677+
54678 out_path_release:
54679 path_put(&path);
54680 if (retry_estale(res, lookup_flags)) {
54681@@ -419,6 +428,8 @@ retry:
54682 if (error)
54683 goto dput_and_out;
54684
54685+ gr_log_chdir(path.dentry, path.mnt);
54686+
54687 set_fs_pwd(current->fs, &path);
54688
54689 dput_and_out:
54690@@ -448,6 +459,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
54691 goto out_putf;
54692
54693 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
54694+
54695+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
54696+ error = -EPERM;
54697+
54698+ if (!error)
54699+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
54700+
54701 if (!error)
54702 set_fs_pwd(current->fs, &f.file->f_path);
54703 out_putf:
54704@@ -477,7 +495,13 @@ retry:
54705 if (error)
54706 goto dput_and_out;
54707
54708+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
54709+ goto dput_and_out;
54710+
54711 set_fs_root(current->fs, &path);
54712+
54713+ gr_handle_chroot_chdir(&path);
54714+
54715 error = 0;
54716 dput_and_out:
54717 path_put(&path);
54718@@ -499,6 +523,16 @@ static int chmod_common(struct path *path, umode_t mode)
54719 if (error)
54720 return error;
54721 mutex_lock(&inode->i_mutex);
54722+
54723+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
54724+ error = -EACCES;
54725+ goto out_unlock;
54726+ }
54727+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
54728+ error = -EACCES;
54729+ goto out_unlock;
54730+ }
54731+
54732 error = security_path_chmod(path, mode);
54733 if (error)
54734 goto out_unlock;
54735@@ -559,6 +593,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
54736 uid = make_kuid(current_user_ns(), user);
54737 gid = make_kgid(current_user_ns(), group);
54738
54739+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
54740+ return -EACCES;
54741+
54742 newattrs.ia_valid = ATTR_CTIME;
54743 if (user != (uid_t) -1) {
54744 if (!uid_valid(uid))
54745@@ -974,6 +1011,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
54746 } else {
54747 fsnotify_open(f);
54748 fd_install(fd, f);
54749+ trace_do_sys_open(tmp->name, flags, mode);
54750 }
54751 }
54752 putname(tmp);
54753diff --git a/fs/pipe.c b/fs/pipe.c
54754index 2234f3f..f9083a1 100644
54755--- a/fs/pipe.c
54756+++ b/fs/pipe.c
54757@@ -438,9 +438,9 @@ redo:
54758 }
54759 if (bufs) /* More to do? */
54760 continue;
54761- if (!pipe->writers)
54762+ if (!atomic_read(&pipe->writers))
54763 break;
54764- if (!pipe->waiting_writers) {
54765+ if (!atomic_read(&pipe->waiting_writers)) {
54766 /* syscall merging: Usually we must not sleep
54767 * if O_NONBLOCK is set, or if we got some data.
54768 * But if a writer sleeps in kernel space, then
54769@@ -504,7 +504,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
54770 mutex_lock(&inode->i_mutex);
54771 pipe = inode->i_pipe;
54772
54773- if (!pipe->readers) {
54774+ if (!atomic_read(&pipe->readers)) {
54775 send_sig(SIGPIPE, current, 0);
54776 ret = -EPIPE;
54777 goto out;
54778@@ -553,7 +553,7 @@ redo1:
54779 for (;;) {
54780 int bufs;
54781
54782- if (!pipe->readers) {
54783+ if (!atomic_read(&pipe->readers)) {
54784 send_sig(SIGPIPE, current, 0);
54785 if (!ret)
54786 ret = -EPIPE;
54787@@ -644,9 +644,9 @@ redo2:
54788 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
54789 do_wakeup = 0;
54790 }
54791- pipe->waiting_writers++;
54792+ atomic_inc(&pipe->waiting_writers);
54793 pipe_wait(pipe);
54794- pipe->waiting_writers--;
54795+ atomic_dec(&pipe->waiting_writers);
54796 }
54797 out:
54798 mutex_unlock(&inode->i_mutex);
54799@@ -716,7 +716,7 @@ pipe_poll(struct file *filp, poll_table *wait)
54800 mask = 0;
54801 if (filp->f_mode & FMODE_READ) {
54802 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
54803- if (!pipe->writers && filp->f_version != pipe->w_counter)
54804+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
54805 mask |= POLLHUP;
54806 }
54807
54808@@ -726,7 +726,7 @@ pipe_poll(struct file *filp, poll_table *wait)
54809 * Most Unices do not set POLLERR for FIFOs but on Linux they
54810 * behave exactly like pipes for poll().
54811 */
54812- if (!pipe->readers)
54813+ if (!atomic_read(&pipe->readers))
54814 mask |= POLLERR;
54815 }
54816
54817@@ -740,10 +740,10 @@ pipe_release(struct inode *inode, int decr, int decw)
54818
54819 mutex_lock(&inode->i_mutex);
54820 pipe = inode->i_pipe;
54821- pipe->readers -= decr;
54822- pipe->writers -= decw;
54823+ atomic_sub(decr, &pipe->readers);
54824+ atomic_sub(decw, &pipe->writers);
54825
54826- if (!pipe->readers && !pipe->writers) {
54827+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
54828 free_pipe_info(inode);
54829 } else {
54830 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
54831@@ -833,7 +833,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
54832
54833 if (inode->i_pipe) {
54834 ret = 0;
54835- inode->i_pipe->readers++;
54836+ atomic_inc(&inode->i_pipe->readers);
54837 }
54838
54839 mutex_unlock(&inode->i_mutex);
54840@@ -850,7 +850,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
54841
54842 if (inode->i_pipe) {
54843 ret = 0;
54844- inode->i_pipe->writers++;
54845+ atomic_inc(&inode->i_pipe->writers);
54846 }
54847
54848 mutex_unlock(&inode->i_mutex);
54849@@ -871,9 +871,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
54850 if (inode->i_pipe) {
54851 ret = 0;
54852 if (filp->f_mode & FMODE_READ)
54853- inode->i_pipe->readers++;
54854+ atomic_inc(&inode->i_pipe->readers);
54855 if (filp->f_mode & FMODE_WRITE)
54856- inode->i_pipe->writers++;
54857+ atomic_inc(&inode->i_pipe->writers);
54858 }
54859
54860 mutex_unlock(&inode->i_mutex);
54861@@ -965,7 +965,7 @@ void free_pipe_info(struct inode *inode)
54862 inode->i_pipe = NULL;
54863 }
54864
54865-static struct vfsmount *pipe_mnt __read_mostly;
54866+struct vfsmount *pipe_mnt __read_mostly;
54867
54868 /*
54869 * pipefs_dname() is called from d_path().
54870@@ -995,7 +995,8 @@ static struct inode * get_pipe_inode(void)
54871 goto fail_iput;
54872 inode->i_pipe = pipe;
54873
54874- pipe->readers = pipe->writers = 1;
54875+ atomic_set(&pipe->readers, 1);
54876+ atomic_set(&pipe->writers, 1);
54877 inode->i_fop = &rdwr_pipefifo_fops;
54878
54879 /*
54880diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
54881index 15af622..0e9f4467 100644
54882--- a/fs/proc/Kconfig
54883+++ b/fs/proc/Kconfig
54884@@ -30,12 +30,12 @@ config PROC_FS
54885
54886 config PROC_KCORE
54887 bool "/proc/kcore support" if !ARM
54888- depends on PROC_FS && MMU
54889+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
54890
54891 config PROC_VMCORE
54892 bool "/proc/vmcore support"
54893- depends on PROC_FS && CRASH_DUMP
54894- default y
54895+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
54896+ default n
54897 help
54898 Exports the dump image of crashed kernel in ELF format.
54899
54900@@ -59,8 +59,8 @@ config PROC_SYSCTL
54901 limited in memory.
54902
54903 config PROC_PAGE_MONITOR
54904- default y
54905- depends on PROC_FS && MMU
54906+ default n
54907+ depends on PROC_FS && MMU && !GRKERNSEC
54908 bool "Enable /proc page monitoring" if EXPERT
54909 help
54910 Various /proc files exist to monitor process memory utilization:
54911diff --git a/fs/proc/array.c b/fs/proc/array.c
54912index cbd0f1b..adec3f0 100644
54913--- a/fs/proc/array.c
54914+++ b/fs/proc/array.c
54915@@ -60,6 +60,7 @@
54916 #include <linux/tty.h>
54917 #include <linux/string.h>
54918 #include <linux/mman.h>
54919+#include <linux/grsecurity.h>
54920 #include <linux/proc_fs.h>
54921 #include <linux/ioport.h>
54922 #include <linux/uaccess.h>
54923@@ -363,6 +364,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
54924 seq_putc(m, '\n');
54925 }
54926
54927+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
54928+static inline void task_pax(struct seq_file *m, struct task_struct *p)
54929+{
54930+ if (p->mm)
54931+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
54932+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
54933+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
54934+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
54935+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
54936+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
54937+ else
54938+ seq_printf(m, "PaX:\t-----\n");
54939+}
54940+#endif
54941+
54942 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
54943 struct pid *pid, struct task_struct *task)
54944 {
54945@@ -381,9 +397,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
54946 task_cpus_allowed(m, task);
54947 cpuset_task_status_allowed(m, task);
54948 task_context_switch_counts(m, task);
54949+
54950+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
54951+ task_pax(m, task);
54952+#endif
54953+
54954+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
54955+ task_grsec_rbac(m, task);
54956+#endif
54957+
54958 return 0;
54959 }
54960
54961+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54962+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
54963+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
54964+ _mm->pax_flags & MF_PAX_SEGMEXEC))
54965+#endif
54966+
54967 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
54968 struct pid *pid, struct task_struct *task, int whole)
54969 {
54970@@ -405,6 +436,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
54971 char tcomm[sizeof(task->comm)];
54972 unsigned long flags;
54973
54974+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54975+ if (current->exec_id != m->exec_id) {
54976+ gr_log_badprocpid("stat");
54977+ return 0;
54978+ }
54979+#endif
54980+
54981 state = *get_task_state(task);
54982 vsize = eip = esp = 0;
54983 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
54984@@ -476,6 +514,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
54985 gtime = task_gtime(task);
54986 }
54987
54988+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54989+ if (PAX_RAND_FLAGS(mm)) {
54990+ eip = 0;
54991+ esp = 0;
54992+ wchan = 0;
54993+ }
54994+#endif
54995+#ifdef CONFIG_GRKERNSEC_HIDESYM
54996+ wchan = 0;
54997+ eip =0;
54998+ esp =0;
54999+#endif
55000+
55001 /* scale priority and nice values from timeslices to -20..20 */
55002 /* to make it look like a "normal" Unix priority/nice value */
55003 priority = task_prio(task);
55004@@ -512,9 +563,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
55005 seq_put_decimal_ull(m, ' ', vsize);
55006 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
55007 seq_put_decimal_ull(m, ' ', rsslim);
55008+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55009+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
55010+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
55011+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
55012+#else
55013 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
55014 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
55015 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
55016+#endif
55017 seq_put_decimal_ull(m, ' ', esp);
55018 seq_put_decimal_ull(m, ' ', eip);
55019 /* The signal information here is obsolete.
55020@@ -536,7 +593,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
55021 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
55022 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
55023
55024- if (mm && permitted) {
55025+ if (mm && permitted
55026+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55027+ && !PAX_RAND_FLAGS(mm)
55028+#endif
55029+ ) {
55030 seq_put_decimal_ull(m, ' ', mm->start_data);
55031 seq_put_decimal_ull(m, ' ', mm->end_data);
55032 seq_put_decimal_ull(m, ' ', mm->start_brk);
55033@@ -574,8 +635,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
55034 struct pid *pid, struct task_struct *task)
55035 {
55036 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
55037- struct mm_struct *mm = get_task_mm(task);
55038+ struct mm_struct *mm;
55039
55040+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55041+ if (current->exec_id != m->exec_id) {
55042+ gr_log_badprocpid("statm");
55043+ return 0;
55044+ }
55045+#endif
55046+ mm = get_task_mm(task);
55047 if (mm) {
55048 size = task_statm(mm, &shared, &text, &data, &resident);
55049 mmput(mm);
55050@@ -598,6 +666,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
55051 return 0;
55052 }
55053
55054+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
55055+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
55056+{
55057+ return sprintf(buffer, "%pI4\n", &task->signal->curr_ip);
55058+}
55059+#endif
55060+
55061 #ifdef CONFIG_CHECKPOINT_RESTORE
55062 static struct pid *
55063 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
55064diff --git a/fs/proc/base.c b/fs/proc/base.c
55065index 69078c7..3e12a75 100644
55066--- a/fs/proc/base.c
55067+++ b/fs/proc/base.c
55068@@ -112,6 +112,14 @@ struct pid_entry {
55069 union proc_op op;
55070 };
55071
55072+struct getdents_callback {
55073+ struct linux_dirent __user * current_dir;
55074+ struct linux_dirent __user * previous;
55075+ struct file * file;
55076+ int count;
55077+ int error;
55078+};
55079+
55080 #define NOD(NAME, MODE, IOP, FOP, OP) { \
55081 .name = (NAME), \
55082 .len = sizeof(NAME) - 1, \
55083@@ -209,6 +217,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
55084 if (!mm->arg_end)
55085 goto out_mm; /* Shh! No looking before we're done */
55086
55087+ if (gr_acl_handle_procpidmem(task))
55088+ goto out_mm;
55089+
55090 len = mm->arg_end - mm->arg_start;
55091
55092 if (len > PAGE_SIZE)
55093@@ -236,12 +247,28 @@ out:
55094 return res;
55095 }
55096
55097+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55098+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
55099+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
55100+ _mm->pax_flags & MF_PAX_SEGMEXEC))
55101+#endif
55102+
55103 static int proc_pid_auxv(struct task_struct *task, char *buffer)
55104 {
55105 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
55106 int res = PTR_ERR(mm);
55107 if (mm && !IS_ERR(mm)) {
55108 unsigned int nwords = 0;
55109+
55110+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55111+ /* allow if we're currently ptracing this task */
55112+ if (PAX_RAND_FLAGS(mm) &&
55113+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
55114+ mmput(mm);
55115+ return 0;
55116+ }
55117+#endif
55118+
55119 do {
55120 nwords += 2;
55121 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
55122@@ -255,7 +282,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
55123 }
55124
55125
55126-#ifdef CONFIG_KALLSYMS
55127+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
55128 /*
55129 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
55130 * Returns the resolved symbol. If that fails, simply return the address.
55131@@ -294,7 +321,7 @@ static void unlock_trace(struct task_struct *task)
55132 mutex_unlock(&task->signal->cred_guard_mutex);
55133 }
55134
55135-#ifdef CONFIG_STACKTRACE
55136+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
55137
55138 #define MAX_STACK_TRACE_DEPTH 64
55139
55140@@ -486,7 +513,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
55141 return count;
55142 }
55143
55144-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
55145+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
55146 static int proc_pid_syscall(struct task_struct *task, char *buffer)
55147 {
55148 long nr;
55149@@ -515,7 +542,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
55150 /************************************************************************/
55151
55152 /* permission checks */
55153-static int proc_fd_access_allowed(struct inode *inode)
55154+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
55155 {
55156 struct task_struct *task;
55157 int allowed = 0;
55158@@ -525,7 +552,10 @@ static int proc_fd_access_allowed(struct inode *inode)
55159 */
55160 task = get_proc_task(inode);
55161 if (task) {
55162- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
55163+ if (log)
55164+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
55165+ else
55166+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
55167 put_task_struct(task);
55168 }
55169 return allowed;
55170@@ -556,10 +586,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
55171 struct task_struct *task,
55172 int hide_pid_min)
55173 {
55174+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
55175+ return false;
55176+
55177+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55178+ rcu_read_lock();
55179+ {
55180+ const struct cred *tmpcred = current_cred();
55181+ const struct cred *cred = __task_cred(task);
55182+
55183+ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
55184+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
55185+ || in_group_p(grsec_proc_gid)
55186+#endif
55187+ ) {
55188+ rcu_read_unlock();
55189+ return true;
55190+ }
55191+ }
55192+ rcu_read_unlock();
55193+
55194+ if (!pid->hide_pid)
55195+ return false;
55196+#endif
55197+
55198 if (pid->hide_pid < hide_pid_min)
55199 return true;
55200 if (in_group_p(pid->pid_gid))
55201 return true;
55202+
55203 return ptrace_may_access(task, PTRACE_MODE_READ);
55204 }
55205
55206@@ -577,7 +632,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
55207 put_task_struct(task);
55208
55209 if (!has_perms) {
55210+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55211+ {
55212+#else
55213 if (pid->hide_pid == 2) {
55214+#endif
55215 /*
55216 * Let's make getdents(), stat(), and open()
55217 * consistent with each other. If a process
55218@@ -675,6 +734,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
55219 if (!task)
55220 return -ESRCH;
55221
55222+ if (gr_acl_handle_procpidmem(task)) {
55223+ put_task_struct(task);
55224+ return -EPERM;
55225+ }
55226+
55227 mm = mm_access(task, mode);
55228 put_task_struct(task);
55229
55230@@ -690,6 +754,10 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
55231
55232 file->private_data = mm;
55233
55234+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55235+ file->f_version = current->exec_id;
55236+#endif
55237+
55238 return 0;
55239 }
55240
55241@@ -711,6 +779,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
55242 ssize_t copied;
55243 char *page;
55244
55245+#ifdef CONFIG_GRKERNSEC
55246+ if (write)
55247+ return -EPERM;
55248+#endif
55249+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55250+ if (file->f_version != current->exec_id) {
55251+ gr_log_badprocpid("mem");
55252+ return 0;
55253+ }
55254+#endif
55255+
55256 if (!mm)
55257 return 0;
55258
55259@@ -723,7 +802,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
55260 goto free;
55261
55262 while (count > 0) {
55263- int this_len = min_t(int, count, PAGE_SIZE);
55264+ ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
55265
55266 if (write && copy_from_user(page, buf, this_len)) {
55267 copied = -EFAULT;
55268@@ -815,6 +894,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
55269 if (!mm)
55270 return 0;
55271
55272+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55273+ if (file->f_version != current->exec_id) {
55274+ gr_log_badprocpid("environ");
55275+ return 0;
55276+ }
55277+#endif
55278+
55279 page = (char *)__get_free_page(GFP_TEMPORARY);
55280 if (!page)
55281 return -ENOMEM;
55282@@ -824,7 +910,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
55283 goto free;
55284 while (count > 0) {
55285 size_t this_len, max_len;
55286- int retval;
55287+ ssize_t retval;
55288
55289 if (src >= (mm->env_end - mm->env_start))
55290 break;
55291@@ -1430,7 +1516,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
55292 int error = -EACCES;
55293
55294 /* Are we allowed to snoop on the tasks file descriptors? */
55295- if (!proc_fd_access_allowed(inode))
55296+ if (!proc_fd_access_allowed(inode, 0))
55297 goto out;
55298
55299 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
55300@@ -1474,8 +1560,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
55301 struct path path;
55302
55303 /* Are we allowed to snoop on the tasks file descriptors? */
55304- if (!proc_fd_access_allowed(inode))
55305- goto out;
55306+ /* logging this is needed for learning on chromium to work properly,
55307+ but we don't want to flood the logs from 'ps' which does a readlink
55308+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
55309+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
55310+ */
55311+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
55312+ if (!proc_fd_access_allowed(inode,0))
55313+ goto out;
55314+ } else {
55315+ if (!proc_fd_access_allowed(inode,1))
55316+ goto out;
55317+ }
55318
55319 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
55320 if (error)
55321@@ -1525,7 +1621,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
55322 rcu_read_lock();
55323 cred = __task_cred(task);
55324 inode->i_uid = cred->euid;
55325+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
55326+ inode->i_gid = grsec_proc_gid;
55327+#else
55328 inode->i_gid = cred->egid;
55329+#endif
55330 rcu_read_unlock();
55331 }
55332 security_task_to_inode(task, inode);
55333@@ -1561,10 +1661,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
55334 return -ENOENT;
55335 }
55336 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
55337+#ifdef CONFIG_GRKERNSEC_PROC_USER
55338+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
55339+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55340+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
55341+#endif
55342 task_dumpable(task)) {
55343 cred = __task_cred(task);
55344 stat->uid = cred->euid;
55345+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
55346+ stat->gid = grsec_proc_gid;
55347+#else
55348 stat->gid = cred->egid;
55349+#endif
55350 }
55351 }
55352 rcu_read_unlock();
55353@@ -1602,11 +1711,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
55354
55355 if (task) {
55356 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
55357+#ifdef CONFIG_GRKERNSEC_PROC_USER
55358+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
55359+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55360+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
55361+#endif
55362 task_dumpable(task)) {
55363 rcu_read_lock();
55364 cred = __task_cred(task);
55365 inode->i_uid = cred->euid;
55366+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
55367+ inode->i_gid = grsec_proc_gid;
55368+#else
55369 inode->i_gid = cred->egid;
55370+#endif
55371 rcu_read_unlock();
55372 } else {
55373 inode->i_uid = GLOBAL_ROOT_UID;
55374@@ -2059,6 +2177,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
55375 if (!task)
55376 goto out_no_task;
55377
55378+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
55379+ goto out;
55380+
55381 /*
55382 * Yes, it does not scale. And it should not. Don't add
55383 * new entries into /proc/<tgid>/ without very good reasons.
55384@@ -2103,6 +2224,9 @@ static int proc_pident_readdir(struct file *filp,
55385 if (!task)
55386 goto out_no_task;
55387
55388+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
55389+ goto out;
55390+
55391 ret = 0;
55392 i = filp->f_pos;
55393 switch (i) {
55394@@ -2516,7 +2640,7 @@ static const struct pid_entry tgid_base_stuff[] = {
55395 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
55396 #endif
55397 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
55398-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
55399+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
55400 INF("syscall", S_IRUGO, proc_pid_syscall),
55401 #endif
55402 INF("cmdline", S_IRUGO, proc_pid_cmdline),
55403@@ -2541,10 +2665,10 @@ static const struct pid_entry tgid_base_stuff[] = {
55404 #ifdef CONFIG_SECURITY
55405 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
55406 #endif
55407-#ifdef CONFIG_KALLSYMS
55408+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
55409 INF("wchan", S_IRUGO, proc_pid_wchan),
55410 #endif
55411-#ifdef CONFIG_STACKTRACE
55412+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
55413 ONE("stack", S_IRUGO, proc_pid_stack),
55414 #endif
55415 #ifdef CONFIG_SCHEDSTATS
55416@@ -2578,6 +2702,9 @@ static const struct pid_entry tgid_base_stuff[] = {
55417 #ifdef CONFIG_HARDWALL
55418 INF("hardwall", S_IRUGO, proc_pid_hardwall),
55419 #endif
55420+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
55421+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
55422+#endif
55423 #ifdef CONFIG_USER_NS
55424 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
55425 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
55426@@ -2707,7 +2834,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
55427 if (!inode)
55428 goto out;
55429
55430+#ifdef CONFIG_GRKERNSEC_PROC_USER
55431+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
55432+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55433+ inode->i_gid = grsec_proc_gid;
55434+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
55435+#else
55436 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
55437+#endif
55438 inode->i_op = &proc_tgid_base_inode_operations;
55439 inode->i_fop = &proc_tgid_base_operations;
55440 inode->i_flags|=S_IMMUTABLE;
55441@@ -2745,7 +2879,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
55442 if (!task)
55443 goto out;
55444
55445+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
55446+ goto out_put_task;
55447+
55448 result = proc_pid_instantiate(dir, dentry, task, NULL);
55449+out_put_task:
55450 put_task_struct(task);
55451 out:
55452 return result;
55453@@ -2808,6 +2946,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
55454 static int fake_filldir(void *buf, const char *name, int namelen,
55455 loff_t offset, u64 ino, unsigned d_type)
55456 {
55457+ struct getdents_callback * __buf = (struct getdents_callback *) buf;
55458+ __buf->error = -EINVAL;
55459 return 0;
55460 }
55461
55462@@ -2859,7 +2999,7 @@ static const struct pid_entry tid_base_stuff[] = {
55463 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
55464 #endif
55465 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
55466-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
55467+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
55468 INF("syscall", S_IRUGO, proc_pid_syscall),
55469 #endif
55470 INF("cmdline", S_IRUGO, proc_pid_cmdline),
55471@@ -2886,10 +3026,10 @@ static const struct pid_entry tid_base_stuff[] = {
55472 #ifdef CONFIG_SECURITY
55473 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
55474 #endif
55475-#ifdef CONFIG_KALLSYMS
55476+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
55477 INF("wchan", S_IRUGO, proc_pid_wchan),
55478 #endif
55479-#ifdef CONFIG_STACKTRACE
55480+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
55481 ONE("stack", S_IRUGO, proc_pid_stack),
55482 #endif
55483 #ifdef CONFIG_SCHEDSTATS
55484diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
55485index 82676e3..5f8518a 100644
55486--- a/fs/proc/cmdline.c
55487+++ b/fs/proc/cmdline.c
55488@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
55489
55490 static int __init proc_cmdline_init(void)
55491 {
55492+#ifdef CONFIG_GRKERNSEC_PROC_ADD
55493+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
55494+#else
55495 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
55496+#endif
55497 return 0;
55498 }
55499 module_init(proc_cmdline_init);
55500diff --git a/fs/proc/devices.c b/fs/proc/devices.c
55501index b143471..bb105e5 100644
55502--- a/fs/proc/devices.c
55503+++ b/fs/proc/devices.c
55504@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
55505
55506 static int __init proc_devices_init(void)
55507 {
55508+#ifdef CONFIG_GRKERNSEC_PROC_ADD
55509+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
55510+#else
55511 proc_create("devices", 0, NULL, &proc_devinfo_operations);
55512+#endif
55513 return 0;
55514 }
55515 module_init(proc_devices_init);
55516diff --git a/fs/proc/fd.c b/fs/proc/fd.c
55517index d7a4a28..0201742 100644
55518--- a/fs/proc/fd.c
55519+++ b/fs/proc/fd.c
55520@@ -25,7 +25,8 @@ static int seq_show(struct seq_file *m, void *v)
55521 if (!task)
55522 return -ENOENT;
55523
55524- files = get_files_struct(task);
55525+ if (!gr_acl_handle_procpidmem(task))
55526+ files = get_files_struct(task);
55527 put_task_struct(task);
55528
55529 if (files) {
55530@@ -302,11 +303,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
55531 */
55532 int proc_fd_permission(struct inode *inode, int mask)
55533 {
55534+ struct task_struct *task;
55535 int rv = generic_permission(inode, mask);
55536- if (rv == 0)
55537- return 0;
55538+
55539 if (task_pid(current) == proc_pid(inode))
55540 rv = 0;
55541+
55542+ task = get_proc_task(inode);
55543+ if (task == NULL)
55544+ return rv;
55545+
55546+ if (gr_acl_handle_procpidmem(task))
55547+ rv = -EACCES;
55548+
55549+ put_task_struct(task);
55550+
55551 return rv;
55552 }
55553
55554diff --git a/fs/proc/inode.c b/fs/proc/inode.c
55555index 869116c..820cb27 100644
55556--- a/fs/proc/inode.c
55557+++ b/fs/proc/inode.c
55558@@ -22,11 +22,17 @@
55559 #include <linux/seq_file.h>
55560 #include <linux/slab.h>
55561 #include <linux/mount.h>
55562+#include <linux/grsecurity.h>
55563
55564 #include <asm/uaccess.h>
55565
55566 #include "internal.h"
55567
55568+#ifdef CONFIG_PROC_SYSCTL
55569+extern const struct inode_operations proc_sys_inode_operations;
55570+extern const struct inode_operations proc_sys_dir_operations;
55571+#endif
55572+
55573 static void proc_evict_inode(struct inode *inode)
55574 {
55575 struct proc_dir_entry *de;
55576@@ -54,6 +60,13 @@ static void proc_evict_inode(struct inode *inode)
55577 ns = PROC_I(inode)->ns;
55578 if (ns_ops && ns)
55579 ns_ops->put(ns);
55580+
55581+#ifdef CONFIG_PROC_SYSCTL
55582+ if (inode->i_op == &proc_sys_inode_operations ||
55583+ inode->i_op == &proc_sys_dir_operations)
55584+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
55585+#endif
55586+
55587 }
55588
55589 static struct kmem_cache * proc_inode_cachep;
55590@@ -456,7 +469,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
55591 if (de->mode) {
55592 inode->i_mode = de->mode;
55593 inode->i_uid = de->uid;
55594+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
55595+ inode->i_gid = grsec_proc_gid;
55596+#else
55597 inode->i_gid = de->gid;
55598+#endif
55599 }
55600 if (de->size)
55601 inode->i_size = de->size;
55602diff --git a/fs/proc/internal.h b/fs/proc/internal.h
55603index 85ff3a4..a512bd8 100644
55604--- a/fs/proc/internal.h
55605+++ b/fs/proc/internal.h
55606@@ -56,6 +56,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
55607 struct pid *pid, struct task_struct *task);
55608 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
55609 struct pid *pid, struct task_struct *task);
55610+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
55611+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
55612+#endif
55613 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
55614
55615 extern const struct file_operations proc_tid_children_operations;
55616diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
55617index eda6f01..006ae24 100644
55618--- a/fs/proc/kcore.c
55619+++ b/fs/proc/kcore.c
55620@@ -481,9 +481,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
55621 * the addresses in the elf_phdr on our list.
55622 */
55623 start = kc_offset_to_vaddr(*fpos - elf_buflen);
55624- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
55625+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
55626+ if (tsz > buflen)
55627 tsz = buflen;
55628-
55629+
55630 while (buflen) {
55631 struct kcore_list *m;
55632
55633@@ -512,20 +513,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
55634 kfree(elf_buf);
55635 } else {
55636 if (kern_addr_valid(start)) {
55637- unsigned long n;
55638+ char *elf_buf;
55639+ mm_segment_t oldfs;
55640
55641- n = copy_to_user(buffer, (char *)start, tsz);
55642- /*
55643- * We cannot distinguish between fault on source
55644- * and fault on destination. When this happens
55645- * we clear too and hope it will trigger the
55646- * EFAULT again.
55647- */
55648- if (n) {
55649- if (clear_user(buffer + tsz - n,
55650- n))
55651+ elf_buf = kmalloc(tsz, GFP_KERNEL);
55652+ if (!elf_buf)
55653+ return -ENOMEM;
55654+ oldfs = get_fs();
55655+ set_fs(KERNEL_DS);
55656+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
55657+ set_fs(oldfs);
55658+ if (copy_to_user(buffer, elf_buf, tsz)) {
55659+ kfree(elf_buf);
55660 return -EFAULT;
55661+ }
55662 }
55663+ set_fs(oldfs);
55664+ kfree(elf_buf);
55665 } else {
55666 if (clear_user(buffer, tsz))
55667 return -EFAULT;
55668@@ -545,6 +549,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
55669
55670 static int open_kcore(struct inode *inode, struct file *filp)
55671 {
55672+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
55673+ return -EPERM;
55674+#endif
55675 if (!capable(CAP_SYS_RAWIO))
55676 return -EPERM;
55677 if (kcore_need_update)
55678diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
55679index 1efaaa1..834e49a 100644
55680--- a/fs/proc/meminfo.c
55681+++ b/fs/proc/meminfo.c
55682@@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
55683 vmi.used >> 10,
55684 vmi.largest_chunk >> 10
55685 #ifdef CONFIG_MEMORY_FAILURE
55686- ,atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
55687+ ,atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10)
55688 #endif
55689 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
55690 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
55691diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
55692index ccfd99b..1b7e255 100644
55693--- a/fs/proc/nommu.c
55694+++ b/fs/proc/nommu.c
55695@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
55696 if (len < 1)
55697 len = 1;
55698 seq_printf(m, "%*c", len, ' ');
55699- seq_path(m, &file->f_path, "");
55700+ seq_path(m, &file->f_path, "\n\\");
55701 }
55702
55703 seq_putc(m, '\n');
55704diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
55705index b4ac657..0842bd2 100644
55706--- a/fs/proc/proc_net.c
55707+++ b/fs/proc/proc_net.c
55708@@ -23,6 +23,7 @@
55709 #include <linux/nsproxy.h>
55710 #include <net/net_namespace.h>
55711 #include <linux/seq_file.h>
55712+#include <linux/grsecurity.h>
55713
55714 #include "internal.h"
55715
55716@@ -105,6 +106,17 @@ static struct net *get_proc_task_net(struct inode *dir)
55717 struct task_struct *task;
55718 struct nsproxy *ns;
55719 struct net *net = NULL;
55720+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55721+ const struct cred *cred = current_cred();
55722+#endif
55723+
55724+#ifdef CONFIG_GRKERNSEC_PROC_USER
55725+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
55726+ return net;
55727+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55728+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
55729+ return net;
55730+#endif
55731
55732 rcu_read_lock();
55733 task = pid_task(proc_pid(dir), PIDTYPE_PID);
55734diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
55735index ac05f33..1e6dc7e 100644
55736--- a/fs/proc/proc_sysctl.c
55737+++ b/fs/proc/proc_sysctl.c
55738@@ -13,11 +13,15 @@
55739 #include <linux/module.h>
55740 #include "internal.h"
55741
55742+extern int gr_handle_chroot_sysctl(const int op);
55743+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
55744+ const int op);
55745+
55746 static const struct dentry_operations proc_sys_dentry_operations;
55747 static const struct file_operations proc_sys_file_operations;
55748-static const struct inode_operations proc_sys_inode_operations;
55749+const struct inode_operations proc_sys_inode_operations;
55750 static const struct file_operations proc_sys_dir_file_operations;
55751-static const struct inode_operations proc_sys_dir_operations;
55752+const struct inode_operations proc_sys_dir_operations;
55753
55754 void proc_sys_poll_notify(struct ctl_table_poll *poll)
55755 {
55756@@ -467,6 +471,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
55757
55758 err = NULL;
55759 d_set_d_op(dentry, &proc_sys_dentry_operations);
55760+
55761+ gr_handle_proc_create(dentry, inode);
55762+
55763 d_add(dentry, inode);
55764
55765 out:
55766@@ -482,6 +489,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
55767 struct inode *inode = file_inode(filp);
55768 struct ctl_table_header *head = grab_header(inode);
55769 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
55770+ int op = write ? MAY_WRITE : MAY_READ;
55771 ssize_t error;
55772 size_t res;
55773
55774@@ -493,7 +501,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
55775 * and won't be until we finish.
55776 */
55777 error = -EPERM;
55778- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
55779+ if (sysctl_perm(head, table, op))
55780 goto out;
55781
55782 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
55783@@ -501,6 +509,22 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
55784 if (!table->proc_handler)
55785 goto out;
55786
55787+#ifdef CONFIG_GRKERNSEC
55788+ error = -EPERM;
55789+ if (gr_handle_chroot_sysctl(op))
55790+ goto out;
55791+ dget(filp->f_path.dentry);
55792+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
55793+ dput(filp->f_path.dentry);
55794+ goto out;
55795+ }
55796+ dput(filp->f_path.dentry);
55797+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
55798+ goto out;
55799+ if (write && !capable(CAP_SYS_ADMIN))
55800+ goto out;
55801+#endif
55802+
55803 /* careful: calling conventions are nasty here */
55804 res = count;
55805 error = table->proc_handler(table, write, buf, &res, ppos);
55806@@ -598,6 +622,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
55807 return -ENOMEM;
55808 } else {
55809 d_set_d_op(child, &proc_sys_dentry_operations);
55810+
55811+ gr_handle_proc_create(child, inode);
55812+
55813 d_add(child, inode);
55814 }
55815 } else {
55816@@ -641,6 +668,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
55817 if ((*pos)++ < file->f_pos)
55818 return 0;
55819
55820+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
55821+ return 0;
55822+
55823 if (unlikely(S_ISLNK(table->mode)))
55824 res = proc_sys_link_fill_cache(file, dirent, filldir, head, table);
55825 else
55826@@ -751,6 +781,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
55827 if (IS_ERR(head))
55828 return PTR_ERR(head);
55829
55830+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
55831+ return -ENOENT;
55832+
55833 generic_fillattr(inode, stat);
55834 if (table)
55835 stat->mode = (stat->mode & S_IFMT) | table->mode;
55836@@ -773,13 +806,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
55837 .llseek = generic_file_llseek,
55838 };
55839
55840-static const struct inode_operations proc_sys_inode_operations = {
55841+const struct inode_operations proc_sys_inode_operations = {
55842 .permission = proc_sys_permission,
55843 .setattr = proc_sys_setattr,
55844 .getattr = proc_sys_getattr,
55845 };
55846
55847-static const struct inode_operations proc_sys_dir_operations = {
55848+const struct inode_operations proc_sys_dir_operations = {
55849 .lookup = proc_sys_lookup,
55850 .permission = proc_sys_permission,
55851 .setattr = proc_sys_setattr,
55852@@ -855,7 +888,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
55853 static struct ctl_dir *new_dir(struct ctl_table_set *set,
55854 const char *name, int namelen)
55855 {
55856- struct ctl_table *table;
55857+ ctl_table_no_const *table;
55858 struct ctl_dir *new;
55859 struct ctl_node *node;
55860 char *new_name;
55861@@ -867,7 +900,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
55862 return NULL;
55863
55864 node = (struct ctl_node *)(new + 1);
55865- table = (struct ctl_table *)(node + 1);
55866+ table = (ctl_table_no_const *)(node + 1);
55867 new_name = (char *)(table + 2);
55868 memcpy(new_name, name, namelen);
55869 new_name[namelen] = '\0';
55870@@ -1036,7 +1069,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
55871 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
55872 struct ctl_table_root *link_root)
55873 {
55874- struct ctl_table *link_table, *entry, *link;
55875+ ctl_table_no_const *link_table, *link;
55876+ struct ctl_table *entry;
55877 struct ctl_table_header *links;
55878 struct ctl_node *node;
55879 char *link_name;
55880@@ -1059,7 +1093,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
55881 return NULL;
55882
55883 node = (struct ctl_node *)(links + 1);
55884- link_table = (struct ctl_table *)(node + nr_entries);
55885+ link_table = (ctl_table_no_const *)(node + nr_entries);
55886 link_name = (char *)&link_table[nr_entries + 1];
55887
55888 for (link = link_table, entry = table; entry->procname; link++, entry++) {
55889@@ -1307,8 +1341,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
55890 struct ctl_table_header ***subheader, struct ctl_table_set *set,
55891 struct ctl_table *table)
55892 {
55893- struct ctl_table *ctl_table_arg = NULL;
55894- struct ctl_table *entry, *files;
55895+ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
55896+ struct ctl_table *entry;
55897 int nr_files = 0;
55898 int nr_dirs = 0;
55899 int err = -ENOMEM;
55900@@ -1320,10 +1354,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
55901 nr_files++;
55902 }
55903
55904- files = table;
55905 /* If there are mixed files and directories we need a new table */
55906 if (nr_dirs && nr_files) {
55907- struct ctl_table *new;
55908+ ctl_table_no_const *new;
55909 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
55910 GFP_KERNEL);
55911 if (!files)
55912@@ -1341,7 +1374,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
55913 /* Register everything except a directory full of subdirectories */
55914 if (nr_files || !nr_dirs) {
55915 struct ctl_table_header *header;
55916- header = __register_sysctl_table(set, path, files);
55917+ header = __register_sysctl_table(set, path, files ? files : table);
55918 if (!header) {
55919 kfree(ctl_table_arg);
55920 goto out;
55921diff --git a/fs/proc/root.c b/fs/proc/root.c
55922index 9c7fab1..ed1c8e0 100644
55923--- a/fs/proc/root.c
55924+++ b/fs/proc/root.c
55925@@ -180,7 +180,15 @@ void __init proc_root_init(void)
55926 #ifdef CONFIG_PROC_DEVICETREE
55927 proc_device_tree_init();
55928 #endif
55929+#ifdef CONFIG_GRKERNSEC_PROC_ADD
55930+#ifdef CONFIG_GRKERNSEC_PROC_USER
55931+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
55932+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55933+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
55934+#endif
55935+#else
55936 proc_mkdir("bus", NULL);
55937+#endif
55938 proc_sys_init();
55939 }
55940
55941diff --git a/fs/proc/self.c b/fs/proc/self.c
55942index aa5cc3b..c91a5d0 100644
55943--- a/fs/proc/self.c
55944+++ b/fs/proc/self.c
55945@@ -37,7 +37,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
55946 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
55947 void *cookie)
55948 {
55949- char *s = nd_get_link(nd);
55950+ const char *s = nd_get_link(nd);
55951 if (!IS_ERR(s))
55952 kfree(s);
55953 }
55954diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
55955index 3e636d8..83e3b71 100644
55956--- a/fs/proc/task_mmu.c
55957+++ b/fs/proc/task_mmu.c
55958@@ -11,12 +11,19 @@
55959 #include <linux/rmap.h>
55960 #include <linux/swap.h>
55961 #include <linux/swapops.h>
55962+#include <linux/grsecurity.h>
55963
55964 #include <asm/elf.h>
55965 #include <asm/uaccess.h>
55966 #include <asm/tlbflush.h>
55967 #include "internal.h"
55968
55969+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55970+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
55971+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
55972+ _mm->pax_flags & MF_PAX_SEGMEXEC))
55973+#endif
55974+
55975 void task_mem(struct seq_file *m, struct mm_struct *mm)
55976 {
55977 unsigned long data, text, lib, swap;
55978@@ -52,8 +59,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
55979 "VmExe:\t%8lu kB\n"
55980 "VmLib:\t%8lu kB\n"
55981 "VmPTE:\t%8lu kB\n"
55982- "VmSwap:\t%8lu kB\n",
55983- hiwater_vm << (PAGE_SHIFT-10),
55984+ "VmSwap:\t%8lu kB\n"
55985+
55986+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
55987+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
55988+#endif
55989+
55990+ ,hiwater_vm << (PAGE_SHIFT-10),
55991 total_vm << (PAGE_SHIFT-10),
55992 mm->locked_vm << (PAGE_SHIFT-10),
55993 mm->pinned_vm << (PAGE_SHIFT-10),
55994@@ -62,7 +74,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
55995 data << (PAGE_SHIFT-10),
55996 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
55997 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
55998- swap << (PAGE_SHIFT-10));
55999+ swap << (PAGE_SHIFT-10)
56000+
56001+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
56002+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56003+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
56004+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
56005+#else
56006+ , mm->context.user_cs_base
56007+ , mm->context.user_cs_limit
56008+#endif
56009+#endif
56010+
56011+ );
56012 }
56013
56014 unsigned long task_vsize(struct mm_struct *mm)
56015@@ -277,13 +301,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
56016 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
56017 }
56018
56019- /* We don't show the stack guard page in /proc/maps */
56020+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56021+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
56022+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
56023+#else
56024 start = vma->vm_start;
56025- if (stack_guard_page_start(vma, start))
56026- start += PAGE_SIZE;
56027 end = vma->vm_end;
56028- if (stack_guard_page_end(vma, end))
56029- end -= PAGE_SIZE;
56030+#endif
56031
56032 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
56033 start,
56034@@ -292,7 +316,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
56035 flags & VM_WRITE ? 'w' : '-',
56036 flags & VM_EXEC ? 'x' : '-',
56037 flags & VM_MAYSHARE ? 's' : 'p',
56038+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56039+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
56040+#else
56041 pgoff,
56042+#endif
56043 MAJOR(dev), MINOR(dev), ino, &len);
56044
56045 /*
56046@@ -301,7 +329,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
56047 */
56048 if (file) {
56049 pad_len_spaces(m, len);
56050- seq_path(m, &file->f_path, "\n");
56051+ seq_path(m, &file->f_path, "\n\\");
56052 goto done;
56053 }
56054
56055@@ -327,8 +355,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
56056 * Thread stack in /proc/PID/task/TID/maps or
56057 * the main process stack.
56058 */
56059- if (!is_pid || (vma->vm_start <= mm->start_stack &&
56060- vma->vm_end >= mm->start_stack)) {
56061+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
56062+ (vma->vm_start <= mm->start_stack &&
56063+ vma->vm_end >= mm->start_stack)) {
56064 name = "[stack]";
56065 } else {
56066 /* Thread stack in /proc/PID/maps */
56067@@ -352,6 +381,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
56068 struct proc_maps_private *priv = m->private;
56069 struct task_struct *task = priv->task;
56070
56071+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56072+ if (current->exec_id != m->exec_id) {
56073+ gr_log_badprocpid("maps");
56074+ return 0;
56075+ }
56076+#endif
56077+
56078 show_map_vma(m, vma, is_pid);
56079
56080 if (m->count < m->size) /* vma is copied successfully */
56081@@ -589,12 +625,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
56082 .private = &mss,
56083 };
56084
56085+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56086+ if (current->exec_id != m->exec_id) {
56087+ gr_log_badprocpid("smaps");
56088+ return 0;
56089+ }
56090+#endif
56091 memset(&mss, 0, sizeof mss);
56092- mss.vma = vma;
56093- /* mmap_sem is held in m_start */
56094- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
56095- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
56096-
56097+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56098+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
56099+#endif
56100+ mss.vma = vma;
56101+ /* mmap_sem is held in m_start */
56102+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
56103+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
56104+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56105+ }
56106+#endif
56107 show_map_vma(m, vma, is_pid);
56108
56109 seq_printf(m,
56110@@ -612,7 +659,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
56111 "KernelPageSize: %8lu kB\n"
56112 "MMUPageSize: %8lu kB\n"
56113 "Locked: %8lu kB\n",
56114+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56115+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
56116+#else
56117 (vma->vm_end - vma->vm_start) >> 10,
56118+#endif
56119 mss.resident >> 10,
56120 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
56121 mss.shared_clean >> 10,
56122@@ -1264,6 +1315,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
56123 int n;
56124 char buffer[50];
56125
56126+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56127+ if (current->exec_id != m->exec_id) {
56128+ gr_log_badprocpid("numa_maps");
56129+ return 0;
56130+ }
56131+#endif
56132+
56133 if (!mm)
56134 return 0;
56135
56136@@ -1281,11 +1339,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
56137 mpol_to_str(buffer, sizeof(buffer), pol);
56138 mpol_cond_put(pol);
56139
56140+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56141+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
56142+#else
56143 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
56144+#endif
56145
56146 if (file) {
56147 seq_printf(m, " file=");
56148- seq_path(m, &file->f_path, "\n\t= ");
56149+ seq_path(m, &file->f_path, "\n\t\\= ");
56150 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
56151 seq_printf(m, " heap");
56152 } else {
56153diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
56154index 56123a6..5a2f6ec 100644
56155--- a/fs/proc/task_nommu.c
56156+++ b/fs/proc/task_nommu.c
56157@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
56158 else
56159 bytes += kobjsize(mm);
56160
56161- if (current->fs && current->fs->users > 1)
56162+ if (current->fs && atomic_read(&current->fs->users) > 1)
56163 sbytes += kobjsize(current->fs);
56164 else
56165 bytes += kobjsize(current->fs);
56166@@ -168,7 +168,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
56167
56168 if (file) {
56169 pad_len_spaces(m, len);
56170- seq_path(m, &file->f_path, "");
56171+ seq_path(m, &file->f_path, "\n\\");
56172 } else if (mm) {
56173 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
56174
56175diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
56176index b870f74..e9048df 100644
56177--- a/fs/proc/vmcore.c
56178+++ b/fs/proc/vmcore.c
56179@@ -98,9 +98,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
56180 nr_bytes = count;
56181
56182 /* If pfn is not ram, return zeros for sparse dump files */
56183- if (pfn_is_ram(pfn) == 0)
56184- memset(buf, 0, nr_bytes);
56185- else {
56186+ if (pfn_is_ram(pfn) == 0) {
56187+ if (userbuf) {
56188+ if (clear_user((char __force_user *)buf, nr_bytes))
56189+ return -EFAULT;
56190+ } else
56191+ memset(buf, 0, nr_bytes);
56192+ } else {
56193 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
56194 offset, userbuf);
56195 if (tmp < 0)
56196@@ -185,7 +189,7 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer,
56197 if (tsz > nr_bytes)
56198 tsz = nr_bytes;
56199
56200- tmp = read_from_oldmem(buffer, tsz, &start, 1);
56201+ tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, 1);
56202 if (tmp < 0)
56203 return tmp;
56204 buflen -= tsz;
56205diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
56206index b00fcc9..e0c6381 100644
56207--- a/fs/qnx6/qnx6.h
56208+++ b/fs/qnx6/qnx6.h
56209@@ -74,7 +74,7 @@ enum {
56210 BYTESEX_BE,
56211 };
56212
56213-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
56214+static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
56215 {
56216 if (sbi->s_bytesex == BYTESEX_LE)
56217 return le64_to_cpu((__force __le64)n);
56218@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
56219 return (__force __fs64)cpu_to_be64(n);
56220 }
56221
56222-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
56223+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
56224 {
56225 if (sbi->s_bytesex == BYTESEX_LE)
56226 return le32_to_cpu((__force __le32)n);
56227diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
56228index 16e8abb..2dcf914 100644
56229--- a/fs/quota/netlink.c
56230+++ b/fs/quota/netlink.c
56231@@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
56232 void quota_send_warning(struct kqid qid, dev_t dev,
56233 const char warntype)
56234 {
56235- static atomic_t seq;
56236+ static atomic_unchecked_t seq;
56237 struct sk_buff *skb;
56238 void *msg_head;
56239 int ret;
56240@@ -49,7 +49,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
56241 "VFS: Not enough memory to send quota warning.\n");
56242 return;
56243 }
56244- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
56245+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
56246 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
56247 if (!msg_head) {
56248 printk(KERN_ERR
56249diff --git a/fs/read_write.c b/fs/read_write.c
56250index e6ddc8d..9155227 100644
56251--- a/fs/read_write.c
56252+++ b/fs/read_write.c
56253@@ -429,7 +429,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
56254
56255 old_fs = get_fs();
56256 set_fs(get_ds());
56257- p = (__force const char __user *)buf;
56258+ p = (const char __force_user *)buf;
56259 if (count > MAX_RW_COUNT)
56260 count = MAX_RW_COUNT;
56261 if (file->f_op->write)
56262diff --git a/fs/readdir.c b/fs/readdir.c
56263index fee38e0..12fdf47 100644
56264--- a/fs/readdir.c
56265+++ b/fs/readdir.c
56266@@ -17,6 +17,7 @@
56267 #include <linux/security.h>
56268 #include <linux/syscalls.h>
56269 #include <linux/unistd.h>
56270+#include <linux/namei.h>
56271
56272 #include <asm/uaccess.h>
56273
56274@@ -67,6 +68,7 @@ struct old_linux_dirent {
56275
56276 struct readdir_callback {
56277 struct old_linux_dirent __user * dirent;
56278+ struct file * file;
56279 int result;
56280 };
56281
56282@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
56283 buf->result = -EOVERFLOW;
56284 return -EOVERFLOW;
56285 }
56286+
56287+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
56288+ return 0;
56289+
56290 buf->result++;
56291 dirent = buf->dirent;
56292 if (!access_ok(VERIFY_WRITE, dirent,
56293@@ -114,6 +120,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
56294
56295 buf.result = 0;
56296 buf.dirent = dirent;
56297+ buf.file = f.file;
56298
56299 error = vfs_readdir(f.file, fillonedir, &buf);
56300 if (buf.result)
56301@@ -139,6 +146,7 @@ struct linux_dirent {
56302 struct getdents_callback {
56303 struct linux_dirent __user * current_dir;
56304 struct linux_dirent __user * previous;
56305+ struct file * file;
56306 int count;
56307 int error;
56308 };
56309@@ -160,6 +168,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
56310 buf->error = -EOVERFLOW;
56311 return -EOVERFLOW;
56312 }
56313+
56314+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
56315+ return 0;
56316+
56317 dirent = buf->previous;
56318 if (dirent) {
56319 if (__put_user(offset, &dirent->d_off))
56320@@ -205,6 +217,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
56321 buf.previous = NULL;
56322 buf.count = count;
56323 buf.error = 0;
56324+ buf.file = f.file;
56325
56326 error = vfs_readdir(f.file, filldir, &buf);
56327 if (error >= 0)
56328@@ -223,6 +236,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
56329 struct getdents_callback64 {
56330 struct linux_dirent64 __user * current_dir;
56331 struct linux_dirent64 __user * previous;
56332+ struct file *file;
56333 int count;
56334 int error;
56335 };
56336@@ -238,6 +252,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
56337 buf->error = -EINVAL; /* only used if we fail.. */
56338 if (reclen > buf->count)
56339 return -EINVAL;
56340+
56341+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
56342+ return 0;
56343+
56344 dirent = buf->previous;
56345 if (dirent) {
56346 if (__put_user(offset, &dirent->d_off))
56347@@ -283,6 +301,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
56348
56349 buf.current_dir = dirent;
56350 buf.previous = NULL;
56351+ buf.file = f.file;
56352 buf.count = count;
56353 buf.error = 0;
56354
56355@@ -291,7 +310,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
56356 error = buf.error;
56357 lastdirent = buf.previous;
56358 if (lastdirent) {
56359- typeof(lastdirent->d_off) d_off = f.file->f_pos;
56360+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
56361 if (__put_user(d_off, &lastdirent->d_off))
56362 error = -EFAULT;
56363 else
56364diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
56365index 2b7882b..1c5ef48 100644
56366--- a/fs/reiserfs/do_balan.c
56367+++ b/fs/reiserfs/do_balan.c
56368@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
56369 return;
56370 }
56371
56372- atomic_inc(&(fs_generation(tb->tb_sb)));
56373+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
56374 do_balance_starts(tb);
56375
56376 /* balance leaf returns 0 except if combining L R and S into
56377diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
56378index 9cc0740a..46bf953 100644
56379--- a/fs/reiserfs/procfs.c
56380+++ b/fs/reiserfs/procfs.c
56381@@ -112,7 +112,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
56382 "SMALL_TAILS " : "NO_TAILS ",
56383 replay_only(sb) ? "REPLAY_ONLY " : "",
56384 convert_reiserfs(sb) ? "CONV " : "",
56385- atomic_read(&r->s_generation_counter),
56386+ atomic_read_unchecked(&r->s_generation_counter),
56387 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
56388 SF(s_do_balance), SF(s_unneeded_left_neighbor),
56389 SF(s_good_search_by_key_reada), SF(s_bmaps),
56390diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
56391index 157e474..65a6114 100644
56392--- a/fs/reiserfs/reiserfs.h
56393+++ b/fs/reiserfs/reiserfs.h
56394@@ -453,7 +453,7 @@ struct reiserfs_sb_info {
56395 /* Comment? -Hans */
56396 wait_queue_head_t s_wait;
56397 /* To be obsoleted soon by per buffer seals.. -Hans */
56398- atomic_t s_generation_counter; // increased by one every time the
56399+ atomic_unchecked_t s_generation_counter; // increased by one every time the
56400 // tree gets re-balanced
56401 unsigned long s_properties; /* File system properties. Currently holds
56402 on-disk FS format */
56403@@ -1978,7 +1978,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
56404 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
56405
56406 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
56407-#define get_generation(s) atomic_read (&fs_generation(s))
56408+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
56409 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
56410 #define __fs_changed(gen,s) (gen != get_generation (s))
56411 #define fs_changed(gen,s) \
56412diff --git a/fs/select.c b/fs/select.c
56413index 8c1c96c..a0f9b6d 100644
56414--- a/fs/select.c
56415+++ b/fs/select.c
56416@@ -20,6 +20,7 @@
56417 #include <linux/export.h>
56418 #include <linux/slab.h>
56419 #include <linux/poll.h>
56420+#include <linux/security.h>
56421 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
56422 #include <linux/file.h>
56423 #include <linux/fdtable.h>
56424@@ -827,6 +828,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
56425 struct poll_list *walk = head;
56426 unsigned long todo = nfds;
56427
56428+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
56429 if (nfds > rlimit(RLIMIT_NOFILE))
56430 return -EINVAL;
56431
56432diff --git a/fs/seq_file.c b/fs/seq_file.c
56433index 38bb59f..a304f9d 100644
56434--- a/fs/seq_file.c
56435+++ b/fs/seq_file.c
56436@@ -10,6 +10,7 @@
56437 #include <linux/seq_file.h>
56438 #include <linux/slab.h>
56439 #include <linux/cred.h>
56440+#include <linux/sched.h>
56441
56442 #include <asm/uaccess.h>
56443 #include <asm/page.h>
56444@@ -60,6 +61,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
56445 #ifdef CONFIG_USER_NS
56446 p->user_ns = file->f_cred->user_ns;
56447 #endif
56448+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56449+ p->exec_id = current->exec_id;
56450+#endif
56451
56452 /*
56453 * Wrappers around seq_open(e.g. swaps_open) need to be
56454@@ -96,7 +100,7 @@ static int traverse(struct seq_file *m, loff_t offset)
56455 return 0;
56456 }
56457 if (!m->buf) {
56458- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
56459+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
56460 if (!m->buf)
56461 return -ENOMEM;
56462 }
56463@@ -136,7 +140,7 @@ static int traverse(struct seq_file *m, loff_t offset)
56464 Eoverflow:
56465 m->op->stop(m, p);
56466 kfree(m->buf);
56467- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
56468+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
56469 return !m->buf ? -ENOMEM : -EAGAIN;
56470 }
56471
56472@@ -191,7 +195,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
56473
56474 /* grab buffer if we didn't have one */
56475 if (!m->buf) {
56476- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
56477+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
56478 if (!m->buf)
56479 goto Enomem;
56480 }
56481@@ -232,7 +236,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
56482 goto Fill;
56483 m->op->stop(m, p);
56484 kfree(m->buf);
56485- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
56486+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
56487 if (!m->buf)
56488 goto Enomem;
56489 m->count = 0;
56490@@ -581,7 +585,7 @@ static void single_stop(struct seq_file *p, void *v)
56491 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
56492 void *data)
56493 {
56494- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
56495+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
56496 int res = -ENOMEM;
56497
56498 if (op) {
56499diff --git a/fs/splice.c b/fs/splice.c
56500index 29e394e..b13c247 100644
56501--- a/fs/splice.c
56502+++ b/fs/splice.c
56503@@ -195,7 +195,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
56504 pipe_lock(pipe);
56505
56506 for (;;) {
56507- if (!pipe->readers) {
56508+ if (!atomic_read(&pipe->readers)) {
56509 send_sig(SIGPIPE, current, 0);
56510 if (!ret)
56511 ret = -EPIPE;
56512@@ -249,9 +249,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
56513 do_wakeup = 0;
56514 }
56515
56516- pipe->waiting_writers++;
56517+ atomic_inc(&pipe->waiting_writers);
56518 pipe_wait(pipe);
56519- pipe->waiting_writers--;
56520+ atomic_dec(&pipe->waiting_writers);
56521 }
56522
56523 pipe_unlock(pipe);
56524@@ -564,7 +564,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
56525 old_fs = get_fs();
56526 set_fs(get_ds());
56527 /* The cast to a user pointer is valid due to the set_fs() */
56528- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
56529+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
56530 set_fs(old_fs);
56531
56532 return res;
56533@@ -579,7 +579,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
56534 old_fs = get_fs();
56535 set_fs(get_ds());
56536 /* The cast to a user pointer is valid due to the set_fs() */
56537- res = vfs_write(file, (__force const char __user *)buf, count, &pos);
56538+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
56539 set_fs(old_fs);
56540
56541 return res;
56542@@ -632,7 +632,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
56543 goto err;
56544
56545 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
56546- vec[i].iov_base = (void __user *) page_address(page);
56547+ vec[i].iov_base = (void __force_user *) page_address(page);
56548 vec[i].iov_len = this_len;
56549 spd.pages[i] = page;
56550 spd.nr_pages++;
56551@@ -853,10 +853,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
56552 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
56553 {
56554 while (!pipe->nrbufs) {
56555- if (!pipe->writers)
56556+ if (!atomic_read(&pipe->writers))
56557 return 0;
56558
56559- if (!pipe->waiting_writers && sd->num_spliced)
56560+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
56561 return 0;
56562
56563 if (sd->flags & SPLICE_F_NONBLOCK)
56564@@ -1192,7 +1192,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
56565 * out of the pipe right after the splice_to_pipe(). So set
56566 * PIPE_READERS appropriately.
56567 */
56568- pipe->readers = 1;
56569+ atomic_set(&pipe->readers, 1);
56570
56571 current->splice_pipe = pipe;
56572 }
56573@@ -1741,9 +1741,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
56574 ret = -ERESTARTSYS;
56575 break;
56576 }
56577- if (!pipe->writers)
56578+ if (!atomic_read(&pipe->writers))
56579 break;
56580- if (!pipe->waiting_writers) {
56581+ if (!atomic_read(&pipe->waiting_writers)) {
56582 if (flags & SPLICE_F_NONBLOCK) {
56583 ret = -EAGAIN;
56584 break;
56585@@ -1775,7 +1775,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
56586 pipe_lock(pipe);
56587
56588 while (pipe->nrbufs >= pipe->buffers) {
56589- if (!pipe->readers) {
56590+ if (!atomic_read(&pipe->readers)) {
56591 send_sig(SIGPIPE, current, 0);
56592 ret = -EPIPE;
56593 break;
56594@@ -1788,9 +1788,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
56595 ret = -ERESTARTSYS;
56596 break;
56597 }
56598- pipe->waiting_writers++;
56599+ atomic_inc(&pipe->waiting_writers);
56600 pipe_wait(pipe);
56601- pipe->waiting_writers--;
56602+ atomic_dec(&pipe->waiting_writers);
56603 }
56604
56605 pipe_unlock(pipe);
56606@@ -1826,14 +1826,14 @@ retry:
56607 pipe_double_lock(ipipe, opipe);
56608
56609 do {
56610- if (!opipe->readers) {
56611+ if (!atomic_read(&opipe->readers)) {
56612 send_sig(SIGPIPE, current, 0);
56613 if (!ret)
56614 ret = -EPIPE;
56615 break;
56616 }
56617
56618- if (!ipipe->nrbufs && !ipipe->writers)
56619+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
56620 break;
56621
56622 /*
56623@@ -1930,7 +1930,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
56624 pipe_double_lock(ipipe, opipe);
56625
56626 do {
56627- if (!opipe->readers) {
56628+ if (!atomic_read(&opipe->readers)) {
56629 send_sig(SIGPIPE, current, 0);
56630 if (!ret)
56631 ret = -EPIPE;
56632@@ -1975,7 +1975,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
56633 * return EAGAIN if we have the potential of some data in the
56634 * future, otherwise just return 0
56635 */
56636- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
56637+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
56638 ret = -EAGAIN;
56639
56640 pipe_unlock(ipipe);
56641diff --git a/fs/stat.c b/fs/stat.c
56642index 04ce1ac..a13dd1e 100644
56643--- a/fs/stat.c
56644+++ b/fs/stat.c
56645@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
56646 stat->gid = inode->i_gid;
56647 stat->rdev = inode->i_rdev;
56648 stat->size = i_size_read(inode);
56649- stat->atime = inode->i_atime;
56650- stat->mtime = inode->i_mtime;
56651+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
56652+ stat->atime = inode->i_ctime;
56653+ stat->mtime = inode->i_ctime;
56654+ } else {
56655+ stat->atime = inode->i_atime;
56656+ stat->mtime = inode->i_mtime;
56657+ }
56658 stat->ctime = inode->i_ctime;
56659 stat->blksize = (1 << inode->i_blkbits);
56660 stat->blocks = inode->i_blocks;
56661@@ -46,8 +51,14 @@ int vfs_getattr(struct path *path, struct kstat *stat)
56662 if (retval)
56663 return retval;
56664
56665- if (inode->i_op->getattr)
56666- return inode->i_op->getattr(path->mnt, path->dentry, stat);
56667+ if (inode->i_op->getattr) {
56668+ retval = inode->i_op->getattr(path->mnt, path->dentry, stat);
56669+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
56670+ stat->atime = stat->ctime;
56671+ stat->mtime = stat->ctime;
56672+ }
56673+ return retval;
56674+ }
56675
56676 generic_fillattr(inode, stat);
56677 return 0;
56678diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
56679index 15c68f9..36a8b3e 100644
56680--- a/fs/sysfs/bin.c
56681+++ b/fs/sysfs/bin.c
56682@@ -235,13 +235,13 @@ static int bin_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
56683 return ret;
56684 }
56685
56686-static int bin_access(struct vm_area_struct *vma, unsigned long addr,
56687- void *buf, int len, int write)
56688+static ssize_t bin_access(struct vm_area_struct *vma, unsigned long addr,
56689+ void *buf, size_t len, int write)
56690 {
56691 struct file *file = vma->vm_file;
56692 struct bin_buffer *bb = file->private_data;
56693 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
56694- int ret;
56695+ ssize_t ret;
56696
56697 if (!bb->vm_ops)
56698 return -EINVAL;
56699diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
56700index 6f31590..3c87c8a 100644
56701--- a/fs/sysfs/dir.c
56702+++ b/fs/sysfs/dir.c
56703@@ -40,7 +40,7 @@ static DEFINE_IDA(sysfs_ino_ida);
56704 *
56705 * Returns 31 bit hash of ns + name (so it fits in an off_t )
56706 */
56707-static unsigned int sysfs_name_hash(const void *ns, const char *name)
56708+static unsigned int sysfs_name_hash(const void *ns, const unsigned char *name)
56709 {
56710 unsigned long hash = init_name_hash();
56711 unsigned int len = strlen(name);
56712@@ -685,6 +685,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
56713 struct sysfs_dirent *sd;
56714 int rc;
56715
56716+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
56717+ const char *parent_name = parent_sd->s_name;
56718+
56719+ mode = S_IFDIR | S_IRWXU;
56720+
56721+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
56722+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
56723+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
56724+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
56725+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
56726+#endif
56727+
56728 /* allocate */
56729 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
56730 if (!sd)
56731diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
56732index 602f56d..6853db8 100644
56733--- a/fs/sysfs/file.c
56734+++ b/fs/sysfs/file.c
56735@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
56736
56737 struct sysfs_open_dirent {
56738 atomic_t refcnt;
56739- atomic_t event;
56740+ atomic_unchecked_t event;
56741 wait_queue_head_t poll;
56742 struct list_head buffers; /* goes through sysfs_buffer.list */
56743 };
56744@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
56745 if (!sysfs_get_active(attr_sd))
56746 return -ENODEV;
56747
56748- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
56749+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
56750 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
56751
56752 sysfs_put_active(attr_sd);
56753@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
56754 return -ENOMEM;
56755
56756 atomic_set(&new_od->refcnt, 0);
56757- atomic_set(&new_od->event, 1);
56758+ atomic_set_unchecked(&new_od->event, 1);
56759 init_waitqueue_head(&new_od->poll);
56760 INIT_LIST_HEAD(&new_od->buffers);
56761 goto retry;
56762@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
56763
56764 sysfs_put_active(attr_sd);
56765
56766- if (buffer->event != atomic_read(&od->event))
56767+ if (buffer->event != atomic_read_unchecked(&od->event))
56768 goto trigger;
56769
56770 return DEFAULT_POLLMASK;
56771@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
56772
56773 od = sd->s_attr.open;
56774 if (od) {
56775- atomic_inc(&od->event);
56776+ atomic_inc_unchecked(&od->event);
56777 wake_up_interruptible(&od->poll);
56778 }
56779
56780diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
56781index 8c940df..25b733e 100644
56782--- a/fs/sysfs/symlink.c
56783+++ b/fs/sysfs/symlink.c
56784@@ -305,7 +305,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
56785
56786 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
56787 {
56788- char *page = nd_get_link(nd);
56789+ const char *page = nd_get_link(nd);
56790 if (!IS_ERR(page))
56791 free_page((unsigned long)page);
56792 }
56793diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
56794index 69d4889..a810bd4 100644
56795--- a/fs/sysv/sysv.h
56796+++ b/fs/sysv/sysv.h
56797@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
56798 #endif
56799 }
56800
56801-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
56802+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
56803 {
56804 if (sbi->s_bytesex == BYTESEX_PDP)
56805 return PDP_swab((__force __u32)n);
56806diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
56807index e18b988..f1d4ad0f 100644
56808--- a/fs/ubifs/io.c
56809+++ b/fs/ubifs/io.c
56810@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
56811 return err;
56812 }
56813
56814-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
56815+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
56816 {
56817 int err;
56818
56819diff --git a/fs/udf/misc.c b/fs/udf/misc.c
56820index c175b4d..8f36a16 100644
56821--- a/fs/udf/misc.c
56822+++ b/fs/udf/misc.c
56823@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
56824
56825 u8 udf_tag_checksum(const struct tag *t)
56826 {
56827- u8 *data = (u8 *)t;
56828+ const u8 *data = (const u8 *)t;
56829 u8 checksum = 0;
56830 int i;
56831 for (i = 0; i < sizeof(struct tag); ++i)
56832diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
56833index 8d974c4..b82f6ec 100644
56834--- a/fs/ufs/swab.h
56835+++ b/fs/ufs/swab.h
56836@@ -22,7 +22,7 @@ enum {
56837 BYTESEX_BE
56838 };
56839
56840-static inline u64
56841+static inline u64 __intentional_overflow(-1)
56842 fs64_to_cpu(struct super_block *sbp, __fs64 n)
56843 {
56844 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
56845@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
56846 return (__force __fs64)cpu_to_be64(n);
56847 }
56848
56849-static inline u32
56850+static inline u32 __intentional_overflow(-1)
56851 fs32_to_cpu(struct super_block *sbp, __fs32 n)
56852 {
56853 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
56854diff --git a/fs/utimes.c b/fs/utimes.c
56855index f4fb7ec..3fe03c0 100644
56856--- a/fs/utimes.c
56857+++ b/fs/utimes.c
56858@@ -1,6 +1,7 @@
56859 #include <linux/compiler.h>
56860 #include <linux/file.h>
56861 #include <linux/fs.h>
56862+#include <linux/security.h>
56863 #include <linux/linkage.h>
56864 #include <linux/mount.h>
56865 #include <linux/namei.h>
56866@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
56867 goto mnt_drop_write_and_out;
56868 }
56869 }
56870+
56871+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
56872+ error = -EACCES;
56873+ goto mnt_drop_write_and_out;
56874+ }
56875+
56876 mutex_lock(&inode->i_mutex);
56877 error = notify_change(path->dentry, &newattrs);
56878 mutex_unlock(&inode->i_mutex);
56879diff --git a/fs/xattr.c b/fs/xattr.c
56880index 3377dff..4d074d9 100644
56881--- a/fs/xattr.c
56882+++ b/fs/xattr.c
56883@@ -227,6 +227,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
56884 return rc;
56885 }
56886
56887+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
56888+ssize_t
56889+pax_getxattr(struct dentry *dentry, void *value, size_t size)
56890+{
56891+ struct inode *inode = dentry->d_inode;
56892+ ssize_t error;
56893+
56894+ error = inode_permission(inode, MAY_EXEC);
56895+ if (error)
56896+ return error;
56897+
56898+ if (inode->i_op->getxattr)
56899+ error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
56900+ else
56901+ error = -EOPNOTSUPP;
56902+
56903+ return error;
56904+}
56905+EXPORT_SYMBOL(pax_getxattr);
56906+#endif
56907+
56908 ssize_t
56909 vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
56910 {
56911@@ -319,7 +340,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
56912 * Extended attribute SET operations
56913 */
56914 static long
56915-setxattr(struct dentry *d, const char __user *name, const void __user *value,
56916+setxattr(struct path *path, const char __user *name, const void __user *value,
56917 size_t size, int flags)
56918 {
56919 int error;
56920@@ -355,7 +376,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
56921 posix_acl_fix_xattr_from_user(kvalue, size);
56922 }
56923
56924- error = vfs_setxattr(d, kname, kvalue, size, flags);
56925+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
56926+ error = -EACCES;
56927+ goto out;
56928+ }
56929+
56930+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
56931 out:
56932 if (vvalue)
56933 vfree(vvalue);
56934@@ -377,7 +403,7 @@ retry:
56935 return error;
56936 error = mnt_want_write(path.mnt);
56937 if (!error) {
56938- error = setxattr(path.dentry, name, value, size, flags);
56939+ error = setxattr(&path, name, value, size, flags);
56940 mnt_drop_write(path.mnt);
56941 }
56942 path_put(&path);
56943@@ -401,7 +427,7 @@ retry:
56944 return error;
56945 error = mnt_want_write(path.mnt);
56946 if (!error) {
56947- error = setxattr(path.dentry, name, value, size, flags);
56948+ error = setxattr(&path, name, value, size, flags);
56949 mnt_drop_write(path.mnt);
56950 }
56951 path_put(&path);
56952@@ -416,16 +442,14 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
56953 const void __user *,value, size_t, size, int, flags)
56954 {
56955 struct fd f = fdget(fd);
56956- struct dentry *dentry;
56957 int error = -EBADF;
56958
56959 if (!f.file)
56960 return error;
56961- dentry = f.file->f_path.dentry;
56962- audit_inode(NULL, dentry, 0);
56963+ audit_inode(NULL, f.file->f_path.dentry, 0);
56964 error = mnt_want_write_file(f.file);
56965 if (!error) {
56966- error = setxattr(dentry, name, value, size, flags);
56967+ error = setxattr(&f.file->f_path, name, value, size, flags);
56968 mnt_drop_write_file(f.file);
56969 }
56970 fdput(f);
56971diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
56972index 9fbea87..6b19972 100644
56973--- a/fs/xattr_acl.c
56974+++ b/fs/xattr_acl.c
56975@@ -76,8 +76,8 @@ struct posix_acl *
56976 posix_acl_from_xattr(struct user_namespace *user_ns,
56977 const void *value, size_t size)
56978 {
56979- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
56980- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
56981+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
56982+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
56983 int count;
56984 struct posix_acl *acl;
56985 struct posix_acl_entry *acl_e;
56986diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
56987index b44af92..06073da 100644
56988--- a/fs/xfs/xfs_bmap.c
56989+++ b/fs/xfs/xfs_bmap.c
56990@@ -192,7 +192,7 @@ xfs_bmap_validate_ret(
56991 int nmap,
56992 int ret_nmap);
56993 #else
56994-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
56995+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
56996 #endif /* DEBUG */
56997
56998 STATIC int
56999diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
57000index 1b9fc3e..e1bdde0 100644
57001--- a/fs/xfs/xfs_dir2_sf.c
57002+++ b/fs/xfs/xfs_dir2_sf.c
57003@@ -851,7 +851,15 @@ xfs_dir2_sf_getdents(
57004 }
57005
57006 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
57007- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
57008+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
57009+ char name[sfep->namelen];
57010+ memcpy(name, sfep->name, sfep->namelen);
57011+ if (filldir(dirent, name, sfep->namelen,
57012+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
57013+ *offset = off & 0x7fffffff;
57014+ return 0;
57015+ }
57016+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
57017 off & 0x7fffffff, ino, DT_UNKNOWN)) {
57018 *offset = off & 0x7fffffff;
57019 return 0;
57020diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
57021index d681e34..2a3f5ab 100644
57022--- a/fs/xfs/xfs_ioctl.c
57023+++ b/fs/xfs/xfs_ioctl.c
57024@@ -127,7 +127,7 @@ xfs_find_handle(
57025 }
57026
57027 error = -EFAULT;
57028- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
57029+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
57030 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
57031 goto out_put;
57032
57033diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
57034index ca9ecaa..60100c7 100644
57035--- a/fs/xfs/xfs_iops.c
57036+++ b/fs/xfs/xfs_iops.c
57037@@ -395,7 +395,7 @@ xfs_vn_put_link(
57038 struct nameidata *nd,
57039 void *p)
57040 {
57041- char *s = nd_get_link(nd);
57042+ const char *s = nd_get_link(nd);
57043
57044 if (!IS_ERR(s))
57045 kfree(s);
57046diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
57047new file mode 100644
57048index 0000000..ba9c5e3
57049--- /dev/null
57050+++ b/grsecurity/Kconfig
57051@@ -0,0 +1,1053 @@
57052+#
57053+# grecurity configuration
57054+#
57055+menu "Memory Protections"
57056+depends on GRKERNSEC
57057+
57058+config GRKERNSEC_KMEM
57059+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
57060+ default y if GRKERNSEC_CONFIG_AUTO
57061+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
57062+ help
57063+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
57064+ be written to or read from to modify or leak the contents of the running
57065+ kernel. /dev/port will also not be allowed to be opened and support
57066+ for /dev/cpu/*/msr will be removed. If you have module
57067+ support disabled, enabling this will close up five ways that are
57068+ currently used to insert malicious code into the running kernel.
57069+
57070+ Even with all these features enabled, we still highly recommend that
57071+ you use the RBAC system, as it is still possible for an attacker to
57072+ modify the running kernel through privileged I/O granted by ioperm/iopl.
57073+
57074+ If you are not using XFree86, you may be able to stop this additional
57075+ case by enabling the 'Disable privileged I/O' option. Though nothing
57076+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
57077+ but only to video memory, which is the only writing we allow in this
57078+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
57079+ not be allowed to mprotect it with PROT_WRITE later.
57080+ Enabling this feature will prevent the "cpupower" and "powertop" tools
57081+ from working.
57082+
57083+ It is highly recommended that you say Y here if you meet all the
57084+ conditions above.
57085+
57086+config GRKERNSEC_VM86
57087+ bool "Restrict VM86 mode"
57088+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
57089+ depends on X86_32
57090+
57091+ help
57092+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
57093+ make use of a special execution mode on 32bit x86 processors called
57094+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
57095+ video cards and will still work with this option enabled. The purpose
57096+ of the option is to prevent exploitation of emulation errors in
57097+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
57098+ Nearly all users should be able to enable this option.
57099+
57100+config GRKERNSEC_IO
57101+ bool "Disable privileged I/O"
57102+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
57103+ depends on X86
57104+ select RTC_CLASS
57105+ select RTC_INTF_DEV
57106+ select RTC_DRV_CMOS
57107+
57108+ help
57109+ If you say Y here, all ioperm and iopl calls will return an error.
57110+ Ioperm and iopl can be used to modify the running kernel.
57111+ Unfortunately, some programs need this access to operate properly,
57112+ the most notable of which are XFree86 and hwclock. hwclock can be
57113+ remedied by having RTC support in the kernel, so real-time
57114+ clock support is enabled if this option is enabled, to ensure
57115+ that hwclock operates correctly. XFree86 still will not
57116+ operate correctly with this option enabled, so DO NOT CHOOSE Y
57117+ IF YOU USE XFree86. If you use XFree86 and you still want to
57118+ protect your kernel against modification, use the RBAC system.
57119+
57120+config GRKERNSEC_JIT_HARDEN
57121+ bool "Harden BPF JIT against spray attacks"
57122+ default y if GRKERNSEC_CONFIG_AUTO
57123+ depends on BPF_JIT
57124+ help
57125+ If you say Y here, the native code generated by the kernel's Berkeley
57126+ Packet Filter (BPF) JIT engine will be hardened against JIT-spraying
57127+ attacks that attempt to fit attacker-beneficial instructions in
57128+ 32bit immediate fields of JIT-generated native instructions. The
57129+ attacker will generally aim to cause an unintended instruction sequence
57130+ of JIT-generated native code to execute by jumping into the middle of
57131+ a generated instruction. This feature effectively randomizes the 32bit
57132+ immediate constants present in the generated code to thwart such attacks.
57133+
57134+ If you're using KERNEXEC, it's recommended that you enable this option
57135+ to supplement the hardening of the kernel.
57136+
57137+config GRKERNSEC_PERF_HARDEN
57138+ bool "Disable unprivileged PERF_EVENTS usage by default"
57139+ default y if GRKERNSEC_CONFIG_AUTO
57140+ depends on PERF_EVENTS
57141+ help
57142+ If you say Y here, the range of acceptable values for the
57143+ /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and
57144+ default to a new value: 3. When the sysctl is set to this value, no
57145+ unprivileged use of the PERF_EVENTS syscall interface will be permitted.
57146+
57147+ Though PERF_EVENTS can be used legitimately for performance monitoring
57148+ and low-level application profiling, it is forced on regardless of
57149+ configuration, has been at fault for several vulnerabilities, and
57150+ creates new opportunities for side channels and other information leaks.
57151+
57152+ This feature puts PERF_EVENTS into a secure default state and permits
57153+ the administrator to change out of it temporarily if unprivileged
57154+ application profiling is needed.
57155+
57156+config GRKERNSEC_RAND_THREADSTACK
57157+ bool "Insert random gaps between thread stacks"
57158+ default y if GRKERNSEC_CONFIG_AUTO
57159+ depends on PAX_RANDMMAP && !PPC && BROKEN
57160+ help
57161+ If you say Y here, a random-sized gap will be enforced between allocated
57162+ thread stacks. Glibc's NPTL and other threading libraries that
57163+ pass MAP_STACK to the kernel for thread stack allocation are supported.
57164+ The implementation currently provides 8 bits of entropy for the gap.
57165+
57166+ Many distributions do not compile threaded remote services with the
57167+ -fstack-check argument to GCC, causing the variable-sized stack-based
57168+ allocator, alloca(), to not probe the stack on allocation. This
57169+ permits an unbounded alloca() to skip over any guard page and potentially
57170+ modify another thread's stack reliably. An enforced random gap
57171+ reduces the reliability of such an attack and increases the chance
57172+ that such a read/write to another thread's stack instead lands in
57173+ an unmapped area, causing a crash and triggering grsecurity's
57174+ anti-bruteforcing logic.
57175+
57176+config GRKERNSEC_PROC_MEMMAP
57177+ bool "Harden ASLR against information leaks and entropy reduction"
57178+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
57179+ depends on PAX_NOEXEC || PAX_ASLR
57180+ help
57181+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
57182+ give no information about the addresses of its mappings if
57183+ PaX features that rely on random addresses are enabled on the task.
57184+ In addition to sanitizing this information and disabling other
57185+ dangerous sources of information, this option causes reads of sensitive
57186+ /proc/<pid> entries where the file descriptor was opened in a different
57187+ task than the one performing the read. Such attempts are logged.
57188+ This option also limits argv/env strings for suid/sgid binaries
57189+ to 512KB to prevent a complete exhaustion of the stack entropy provided
57190+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
57191+ binaries to prevent alternative mmap layouts from being abused.
57192+
57193+ If you use PaX it is essential that you say Y here as it closes up
57194+ several holes that make full ASLR useless locally.
57195+
57196+config GRKERNSEC_BRUTE
57197+ bool "Deter exploit bruteforcing"
57198+ default y if GRKERNSEC_CONFIG_AUTO
57199+ help
57200+ If you say Y here, attempts to bruteforce exploits against forking
57201+ daemons such as apache or sshd, as well as against suid/sgid binaries
57202+ will be deterred. When a child of a forking daemon is killed by PaX
57203+ or crashes due to an illegal instruction or other suspicious signal,
57204+ the parent process will be delayed 30 seconds upon every subsequent
57205+ fork until the administrator is able to assess the situation and
57206+ restart the daemon.
57207+ In the suid/sgid case, the attempt is logged, the user has all their
57208+ processes terminated, and they are prevented from executing any further
57209+ processes for 15 minutes.
57210+ It is recommended that you also enable signal logging in the auditing
57211+ section so that logs are generated when a process triggers a suspicious
57212+ signal.
57213+ If the sysctl option is enabled, a sysctl option with name
57214+ "deter_bruteforce" is created.
57215+
57216+
57217+config GRKERNSEC_MODHARDEN
57218+ bool "Harden module auto-loading"
57219+ default y if GRKERNSEC_CONFIG_AUTO
57220+ depends on MODULES
57221+ help
57222+ If you say Y here, module auto-loading in response to use of some
57223+ feature implemented by an unloaded module will be restricted to
57224+ root users. Enabling this option helps defend against attacks
57225+ by unprivileged users who abuse the auto-loading behavior to
57226+ cause a vulnerable module to load that is then exploited.
57227+
57228+ If this option prevents a legitimate use of auto-loading for a
57229+ non-root user, the administrator can execute modprobe manually
57230+ with the exact name of the module mentioned in the alert log.
57231+ Alternatively, the administrator can add the module to the list
57232+ of modules loaded at boot by modifying init scripts.
57233+
57234+ Modification of init scripts will most likely be needed on
57235+ Ubuntu servers with encrypted home directory support enabled,
57236+ as the first non-root user logging in will cause the ecb(aes),
57237+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
57238+
57239+config GRKERNSEC_HIDESYM
57240+ bool "Hide kernel symbols"
57241+ default y if GRKERNSEC_CONFIG_AUTO
57242+ select PAX_USERCOPY_SLABS
57243+ help
57244+ If you say Y here, getting information on loaded modules, and
57245+ displaying all kernel symbols through a syscall will be restricted
57246+ to users with CAP_SYS_MODULE. For software compatibility reasons,
57247+ /proc/kallsyms will be restricted to the root user. The RBAC
57248+ system can hide that entry even from root.
57249+
57250+ This option also prevents leaking of kernel addresses through
57251+ several /proc entries.
57252+
57253+ Note that this option is only effective provided the following
57254+ conditions are met:
57255+ 1) The kernel using grsecurity is not precompiled by some distribution
57256+ 2) You have also enabled GRKERNSEC_DMESG
57257+ 3) You are using the RBAC system and hiding other files such as your
57258+ kernel image and System.map. Alternatively, enabling this option
57259+ causes the permissions on /boot, /lib/modules, and the kernel
57260+ source directory to change at compile time to prevent
57261+ reading by non-root users.
57262+ If the above conditions are met, this option will aid in providing a
57263+ useful protection against local kernel exploitation of overflows
57264+ and arbitrary read/write vulnerabilities.
57265+
57266+ It is highly recommended that you enable GRKERNSEC_PERF_HARDEN
57267+ in addition to this feature.
57268+
57269+config GRKERNSEC_KERN_LOCKOUT
57270+ bool "Active kernel exploit response"
57271+ default y if GRKERNSEC_CONFIG_AUTO
57272+ depends on X86 || ARM || PPC || SPARC
57273+ help
57274+ If you say Y here, when a PaX alert is triggered due to suspicious
57275+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
57276+ or an OOPS occurs due to bad memory accesses, instead of just
57277+ terminating the offending process (and potentially allowing
57278+ a subsequent exploit from the same user), we will take one of two
57279+ actions:
57280+ If the user was root, we will panic the system
57281+ If the user was non-root, we will log the attempt, terminate
57282+ all processes owned by the user, then prevent them from creating
57283+ any new processes until the system is restarted
57284+ This deters repeated kernel exploitation/bruteforcing attempts
57285+ and is useful for later forensics.
57286+
57287+endmenu
57288+menu "Role Based Access Control Options"
57289+depends on GRKERNSEC
57290+
57291+config GRKERNSEC_RBAC_DEBUG
57292+ bool
57293+
57294+config GRKERNSEC_NO_RBAC
57295+ bool "Disable RBAC system"
57296+ help
57297+ If you say Y here, the /dev/grsec device will be removed from the kernel,
57298+ preventing the RBAC system from being enabled. You should only say Y
57299+ here if you have no intention of using the RBAC system, so as to prevent
57300+ an attacker with root access from misusing the RBAC system to hide files
57301+ and processes when loadable module support and /dev/[k]mem have been
57302+ locked down.
57303+
57304+config GRKERNSEC_ACL_HIDEKERN
57305+ bool "Hide kernel processes"
57306+ help
57307+ If you say Y here, all kernel threads will be hidden to all
57308+ processes but those whose subject has the "view hidden processes"
57309+ flag.
57310+
57311+config GRKERNSEC_ACL_MAXTRIES
57312+ int "Maximum tries before password lockout"
57313+ default 3
57314+ help
57315+ This option enforces the maximum number of times a user can attempt
57316+ to authorize themselves with the grsecurity RBAC system before being
57317+ denied the ability to attempt authorization again for a specified time.
57318+ The lower the number, the harder it will be to brute-force a password.
57319+
57320+config GRKERNSEC_ACL_TIMEOUT
57321+ int "Time to wait after max password tries, in seconds"
57322+ default 30
57323+ help
57324+ This option specifies the time the user must wait after attempting to
57325+ authorize to the RBAC system with the maximum number of invalid
57326+ passwords. The higher the number, the harder it will be to brute-force
57327+ a password.
57328+
57329+endmenu
57330+menu "Filesystem Protections"
57331+depends on GRKERNSEC
57332+
57333+config GRKERNSEC_PROC
57334+ bool "Proc restrictions"
57335+ default y if GRKERNSEC_CONFIG_AUTO
57336+ help
57337+ If you say Y here, the permissions of the /proc filesystem
57338+ will be altered to enhance system security and privacy. You MUST
57339+ choose either a user only restriction or a user and group restriction.
57340+ Depending upon the option you choose, you can either restrict users to
57341+ see only the processes they themselves run, or choose a group that can
57342+ view all processes and files normally restricted to root if you choose
57343+ the "restrict to user only" option. NOTE: If you're running identd or
57344+ ntpd as a non-root user, you will have to run it as the group you
57345+ specify here.
57346+
57347+config GRKERNSEC_PROC_USER
57348+ bool "Restrict /proc to user only"
57349+ depends on GRKERNSEC_PROC
57350+ help
57351+ If you say Y here, non-root users will only be able to view their own
57352+ processes, and restricts them from viewing network-related information,
57353+ and viewing kernel symbol and module information.
57354+
57355+config GRKERNSEC_PROC_USERGROUP
57356+ bool "Allow special group"
57357+ default y if GRKERNSEC_CONFIG_AUTO
57358+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
57359+ help
57360+ If you say Y here, you will be able to select a group that will be
57361+ able to view all processes and network-related information. If you've
57362+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
57363+ remain hidden. This option is useful if you want to run identd as
57364+ a non-root user. The group you select may also be chosen at boot time
57365+ via "grsec_proc_gid=" on the kernel commandline.
57366+
57367+config GRKERNSEC_PROC_GID
57368+ int "GID for special group"
57369+ depends on GRKERNSEC_PROC_USERGROUP
57370+ default 1001
57371+
57372+config GRKERNSEC_PROC_ADD
57373+ bool "Additional restrictions"
57374+ default y if GRKERNSEC_CONFIG_AUTO
57375+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
57376+ help
57377+ If you say Y here, additional restrictions will be placed on
57378+ /proc that keep normal users from viewing device information and
57379+ slabinfo information that could be useful for exploits.
57380+
57381+config GRKERNSEC_LINK
57382+ bool "Linking restrictions"
57383+ default y if GRKERNSEC_CONFIG_AUTO
57384+ help
57385+ If you say Y here, /tmp race exploits will be prevented, since users
57386+ will no longer be able to follow symlinks owned by other users in
57387+ world-writable +t directories (e.g. /tmp), unless the owner of the
57388+ symlink is the owner of the directory. users will also not be
57389+ able to hardlink to files they do not own. If the sysctl option is
57390+ enabled, a sysctl option with name "linking_restrictions" is created.
57391+
57392+config GRKERNSEC_SYMLINKOWN
57393+ bool "Kernel-enforced SymlinksIfOwnerMatch"
57394+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
57395+ help
57396+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
57397+ that prevents it from being used as a security feature. As Apache
57398+ verifies the symlink by performing a stat() against the target of
57399+ the symlink before it is followed, an attacker can setup a symlink
57400+ to point to a same-owned file, then replace the symlink with one
57401+ that targets another user's file just after Apache "validates" the
57402+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
57403+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
57404+ will be in place for the group you specify. If the sysctl option
57405+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
57406+ created.
57407+
57408+config GRKERNSEC_SYMLINKOWN_GID
57409+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
57410+ depends on GRKERNSEC_SYMLINKOWN
57411+ default 1006
57412+ help
57413+ Setting this GID determines what group kernel-enforced
57414+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
57415+ is enabled, a sysctl option with name "symlinkown_gid" is created.
57416+
57417+config GRKERNSEC_FIFO
57418+ bool "FIFO restrictions"
57419+ default y if GRKERNSEC_CONFIG_AUTO
57420+ help
57421+ If you say Y here, users will not be able to write to FIFOs they don't
57422+ own in world-writable +t directories (e.g. /tmp), unless the owner of
57423+ the FIFO is the same owner of the directory it's held in. If the sysctl
57424+ option is enabled, a sysctl option with name "fifo_restrictions" is
57425+ created.
57426+
57427+config GRKERNSEC_SYSFS_RESTRICT
57428+ bool "Sysfs/debugfs restriction"
57429+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
57430+ depends on SYSFS
57431+ help
57432+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
57433+ any filesystem normally mounted under it (e.g. debugfs) will be
57434+ mostly accessible only by root. These filesystems generally provide access
57435+ to hardware and debug information that isn't appropriate for unprivileged
57436+ users of the system. Sysfs and debugfs have also become a large source
57437+ of new vulnerabilities, ranging from infoleaks to local compromise.
57438+ There has been very little oversight with an eye toward security involved
57439+ in adding new exporters of information to these filesystems, so their
57440+ use is discouraged.
57441+ For reasons of compatibility, a few directories have been whitelisted
57442+ for access by non-root users:
57443+ /sys/fs/selinux
57444+ /sys/fs/fuse
57445+ /sys/devices/system/cpu
57446+
57447+config GRKERNSEC_ROFS
57448+ bool "Runtime read-only mount protection"
57449+ help
57450+ If you say Y here, a sysctl option with name "romount_protect" will
57451+ be created. By setting this option to 1 at runtime, filesystems
57452+ will be protected in the following ways:
57453+ * No new writable mounts will be allowed
57454+ * Existing read-only mounts won't be able to be remounted read/write
57455+ * Write operations will be denied on all block devices
57456+ This option acts independently of grsec_lock: once it is set to 1,
57457+ it cannot be turned off. Therefore, please be mindful of the resulting
57458+ behavior if this option is enabled in an init script on a read-only
57459+ filesystem. This feature is mainly intended for secure embedded systems.
57460+
57461+config GRKERNSEC_DEVICE_SIDECHANNEL
57462+ bool "Eliminate stat/notify-based device sidechannels"
57463+ default y if GRKERNSEC_CONFIG_AUTO
57464+ help
57465+ If you say Y here, timing analyses on block or character
57466+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
57467+ will be thwarted for unprivileged users. If a process without
57468+ CAP_MKNOD stats such a device, the last access and last modify times
57469+ will match the device's create time. No access or modify events
57470+ will be triggered through inotify/dnotify/fanotify for such devices.
57471+ This feature will prevent attacks that may at a minimum
57472+ allow an attacker to determine the administrator's password length.
57473+
57474+config GRKERNSEC_CHROOT
57475+ bool "Chroot jail restrictions"
57476+ default y if GRKERNSEC_CONFIG_AUTO
57477+ help
57478+ If you say Y here, you will be able to choose several options that will
57479+ make breaking out of a chrooted jail much more difficult. If you
57480+ encounter no software incompatibilities with the following options, it
57481+ is recommended that you enable each one.
57482+
57483+config GRKERNSEC_CHROOT_MOUNT
57484+ bool "Deny mounts"
57485+ default y if GRKERNSEC_CONFIG_AUTO
57486+ depends on GRKERNSEC_CHROOT
57487+ help
57488+ If you say Y here, processes inside a chroot will not be able to
57489+ mount or remount filesystems. If the sysctl option is enabled, a
57490+ sysctl option with name "chroot_deny_mount" is created.
57491+
57492+config GRKERNSEC_CHROOT_DOUBLE
57493+ bool "Deny double-chroots"
57494+ default y if GRKERNSEC_CONFIG_AUTO
57495+ depends on GRKERNSEC_CHROOT
57496+ help
57497+ If you say Y here, processes inside a chroot will not be able to chroot
57498+ again outside the chroot. This is a widely used method of breaking
57499+ out of a chroot jail and should not be allowed. If the sysctl
57500+ option is enabled, a sysctl option with name
57501+ "chroot_deny_chroot" is created.
57502+
57503+config GRKERNSEC_CHROOT_PIVOT
57504+ bool "Deny pivot_root in chroot"
57505+ default y if GRKERNSEC_CONFIG_AUTO
57506+ depends on GRKERNSEC_CHROOT
57507+ help
57508+ If you say Y here, processes inside a chroot will not be able to use
57509+ a function called pivot_root() that was introduced in Linux 2.3.41. It
57510+ works similar to chroot in that it changes the root filesystem. This
57511+ function could be misused in a chrooted process to attempt to break out
57512+ of the chroot, and therefore should not be allowed. If the sysctl
57513+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
57514+ created.
57515+
57516+config GRKERNSEC_CHROOT_CHDIR
57517+ bool "Enforce chdir(\"/\") on all chroots"
57518+ default y if GRKERNSEC_CONFIG_AUTO
57519+ depends on GRKERNSEC_CHROOT
57520+ help
57521+ If you say Y here, the current working directory of all newly-chrooted
57522+ applications will be set to the the root directory of the chroot.
57523+ The man page on chroot(2) states:
57524+ Note that this call does not change the current working
57525+ directory, so that `.' can be outside the tree rooted at
57526+ `/'. In particular, the super-user can escape from a
57527+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
57528+
57529+ It is recommended that you say Y here, since it's not known to break
57530+ any software. If the sysctl option is enabled, a sysctl option with
57531+ name "chroot_enforce_chdir" is created.
57532+
57533+config GRKERNSEC_CHROOT_CHMOD
57534+ bool "Deny (f)chmod +s"
57535+ default y if GRKERNSEC_CONFIG_AUTO
57536+ depends on GRKERNSEC_CHROOT
57537+ help
57538+ If you say Y here, processes inside a chroot will not be able to chmod
57539+ or fchmod files to make them have suid or sgid bits. This protects
57540+ against another published method of breaking a chroot. If the sysctl
57541+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
57542+ created.
57543+
57544+config GRKERNSEC_CHROOT_FCHDIR
57545+ bool "Deny fchdir out of chroot"
57546+ default y if GRKERNSEC_CONFIG_AUTO
57547+ depends on GRKERNSEC_CHROOT
57548+ help
57549+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
57550+ to a file descriptor of the chrooting process that points to a directory
57551+ outside the filesystem will be stopped. If the sysctl option
57552+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
57553+
57554+config GRKERNSEC_CHROOT_MKNOD
57555+ bool "Deny mknod"
57556+ default y if GRKERNSEC_CONFIG_AUTO
57557+ depends on GRKERNSEC_CHROOT
57558+ help
57559+ If you say Y here, processes inside a chroot will not be allowed to
57560+ mknod. The problem with using mknod inside a chroot is that it
57561+ would allow an attacker to create a device entry that is the same
57562+ as one on the physical root of your system, which could range from
57563+ anything from the console device to a device for your harddrive (which
57564+ they could then use to wipe the drive or steal data). It is recommended
57565+ that you say Y here, unless you run into software incompatibilities.
57566+ If the sysctl option is enabled, a sysctl option with name
57567+ "chroot_deny_mknod" is created.
57568+
57569+config GRKERNSEC_CHROOT_SHMAT
57570+ bool "Deny shmat() out of chroot"
57571+ default y if GRKERNSEC_CONFIG_AUTO
57572+ depends on GRKERNSEC_CHROOT
57573+ help
57574+ If you say Y here, processes inside a chroot will not be able to attach
57575+ to shared memory segments that were created outside of the chroot jail.
57576+ It is recommended that you say Y here. If the sysctl option is enabled,
57577+ a sysctl option with name "chroot_deny_shmat" is created.
57578+
57579+config GRKERNSEC_CHROOT_UNIX
57580+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
57581+ default y if GRKERNSEC_CONFIG_AUTO
57582+ depends on GRKERNSEC_CHROOT
57583+ help
57584+ If you say Y here, processes inside a chroot will not be able to
57585+ connect to abstract (meaning not belonging to a filesystem) Unix
57586+ domain sockets that were bound outside of a chroot. It is recommended
57587+ that you say Y here. If the sysctl option is enabled, a sysctl option
57588+ with name "chroot_deny_unix" is created.
57589+
57590+config GRKERNSEC_CHROOT_FINDTASK
57591+ bool "Protect outside processes"
57592+ default y if GRKERNSEC_CONFIG_AUTO
57593+ depends on GRKERNSEC_CHROOT
57594+ help
57595+ If you say Y here, processes inside a chroot will not be able to
57596+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
57597+ getsid, or view any process outside of the chroot. If the sysctl
57598+ option is enabled, a sysctl option with name "chroot_findtask" is
57599+ created.
57600+
57601+config GRKERNSEC_CHROOT_NICE
57602+ bool "Restrict priority changes"
57603+ default y if GRKERNSEC_CONFIG_AUTO
57604+ depends on GRKERNSEC_CHROOT
57605+ help
57606+ If you say Y here, processes inside a chroot will not be able to raise
57607+ the priority of processes in the chroot, or alter the priority of
57608+ processes outside the chroot. This provides more security than simply
57609+ removing CAP_SYS_NICE from the process' capability set. If the
57610+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
57611+ is created.
57612+
57613+config GRKERNSEC_CHROOT_SYSCTL
57614+ bool "Deny sysctl writes"
57615+ default y if GRKERNSEC_CONFIG_AUTO
57616+ depends on GRKERNSEC_CHROOT
57617+ help
57618+ If you say Y here, an attacker in a chroot will not be able to
57619+ write to sysctl entries, either by sysctl(2) or through a /proc
57620+ interface. It is strongly recommended that you say Y here. If the
57621+ sysctl option is enabled, a sysctl option with name
57622+ "chroot_deny_sysctl" is created.
57623+
57624+config GRKERNSEC_CHROOT_CAPS
57625+ bool "Capability restrictions"
57626+ default y if GRKERNSEC_CONFIG_AUTO
57627+ depends on GRKERNSEC_CHROOT
57628+ help
57629+ If you say Y here, the capabilities on all processes within a
57630+ chroot jail will be lowered to stop module insertion, raw i/o,
57631+ system and net admin tasks, rebooting the system, modifying immutable
57632+ files, modifying IPC owned by another, and changing the system time.
57633+ This is left an option because it can break some apps. Disable this
57634+ if your chrooted apps are having problems performing those kinds of
57635+ tasks. If the sysctl option is enabled, a sysctl option with
57636+ name "chroot_caps" is created.
57637+
57638+config GRKERNSEC_CHROOT_INITRD
57639+ bool "Exempt initrd tasks from restrictions"
57640+ default y if GRKERNSEC_CONFIG_AUTO
57641+ depends on GRKERNSEC_CHROOT && BLK_DEV_RAM
57642+ help
57643+ If you say Y here, tasks started prior to init will be exempted from
57644+ grsecurity's chroot restrictions. This option is mainly meant to
57645+ resolve Plymouth's performing privileged operations unnecessarily
57646+ in a chroot.
57647+
57648+endmenu
57649+menu "Kernel Auditing"
57650+depends on GRKERNSEC
57651+
57652+config GRKERNSEC_AUDIT_GROUP
57653+ bool "Single group for auditing"
57654+ help
57655+ If you say Y here, the exec and chdir logging features will only operate
57656+ on a group you specify. This option is recommended if you only want to
57657+ watch certain users instead of having a large amount of logs from the
57658+ entire system. If the sysctl option is enabled, a sysctl option with
57659+ name "audit_group" is created.
57660+
57661+config GRKERNSEC_AUDIT_GID
57662+ int "GID for auditing"
57663+ depends on GRKERNSEC_AUDIT_GROUP
57664+ default 1007
57665+
57666+config GRKERNSEC_EXECLOG
57667+ bool "Exec logging"
57668+ help
57669+ If you say Y here, all execve() calls will be logged (since the
57670+ other exec*() calls are frontends to execve(), all execution
57671+ will be logged). Useful for shell-servers that like to keep track
57672+ of their users. If the sysctl option is enabled, a sysctl option with
57673+ name "exec_logging" is created.
57674+ WARNING: This option when enabled will produce a LOT of logs, especially
57675+ on an active system.
57676+
57677+config GRKERNSEC_RESLOG
57678+ bool "Resource logging"
57679+ default y if GRKERNSEC_CONFIG_AUTO
57680+ help
57681+ If you say Y here, all attempts to overstep resource limits will
57682+ be logged with the resource name, the requested size, and the current
57683+ limit. It is highly recommended that you say Y here. If the sysctl
57684+ option is enabled, a sysctl option with name "resource_logging" is
57685+ created. If the RBAC system is enabled, the sysctl value is ignored.
57686+
57687+config GRKERNSEC_CHROOT_EXECLOG
57688+ bool "Log execs within chroot"
57689+ help
57690+ If you say Y here, all executions inside a chroot jail will be logged
57691+ to syslog. This can cause a large amount of logs if certain
57692+ applications (eg. djb's daemontools) are installed on the system, and
57693+ is therefore left as an option. If the sysctl option is enabled, a
57694+ sysctl option with name "chroot_execlog" is created.
57695+
57696+config GRKERNSEC_AUDIT_PTRACE
57697+ bool "Ptrace logging"
57698+ help
57699+ If you say Y here, all attempts to attach to a process via ptrace
57700+ will be logged. If the sysctl option is enabled, a sysctl option
57701+ with name "audit_ptrace" is created.
57702+
57703+config GRKERNSEC_AUDIT_CHDIR
57704+ bool "Chdir logging"
57705+ help
57706+ If you say Y here, all chdir() calls will be logged. If the sysctl
57707+ option is enabled, a sysctl option with name "audit_chdir" is created.
57708+
57709+config GRKERNSEC_AUDIT_MOUNT
57710+ bool "(Un)Mount logging"
57711+ help
57712+ If you say Y here, all mounts and unmounts will be logged. If the
57713+ sysctl option is enabled, a sysctl option with name "audit_mount" is
57714+ created.
57715+
57716+config GRKERNSEC_SIGNAL
57717+ bool "Signal logging"
57718+ default y if GRKERNSEC_CONFIG_AUTO
57719+ help
57720+ If you say Y here, certain important signals will be logged, such as
57721+ SIGSEGV, which will as a result inform you of when a error in a program
57722+ occurred, which in some cases could mean a possible exploit attempt.
57723+ If the sysctl option is enabled, a sysctl option with name
57724+ "signal_logging" is created.
57725+
57726+config GRKERNSEC_FORKFAIL
57727+ bool "Fork failure logging"
57728+ help
57729+ If you say Y here, all failed fork() attempts will be logged.
57730+ This could suggest a fork bomb, or someone attempting to overstep
57731+ their process limit. If the sysctl option is enabled, a sysctl option
57732+ with name "forkfail_logging" is created.
57733+
57734+config GRKERNSEC_TIME
57735+ bool "Time change logging"
57736+ default y if GRKERNSEC_CONFIG_AUTO
57737+ help
57738+ If you say Y here, any changes of the system clock will be logged.
57739+ If the sysctl option is enabled, a sysctl option with name
57740+ "timechange_logging" is created.
57741+
57742+config GRKERNSEC_PROC_IPADDR
57743+ bool "/proc/<pid>/ipaddr support"
57744+ default y if GRKERNSEC_CONFIG_AUTO
57745+ help
57746+ If you say Y here, a new entry will be added to each /proc/<pid>
57747+ directory that contains the IP address of the person using the task.
57748+ The IP is carried across local TCP and AF_UNIX stream sockets.
57749+ This information can be useful for IDS/IPSes to perform remote response
57750+ to a local attack. The entry is readable by only the owner of the
57751+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
57752+ the RBAC system), and thus does not create privacy concerns.
57753+
57754+config GRKERNSEC_RWXMAP_LOG
57755+ bool 'Denied RWX mmap/mprotect logging'
57756+ default y if GRKERNSEC_CONFIG_AUTO
57757+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
57758+ help
57759+ If you say Y here, calls to mmap() and mprotect() with explicit
57760+ usage of PROT_WRITE and PROT_EXEC together will be logged when
57761+ denied by the PAX_MPROTECT feature. If the sysctl option is
57762+ enabled, a sysctl option with name "rwxmap_logging" is created.
57763+
57764+config GRKERNSEC_AUDIT_TEXTREL
57765+ bool 'ELF text relocations logging (READ HELP)'
57766+ depends on PAX_MPROTECT
57767+ help
57768+ If you say Y here, text relocations will be logged with the filename
57769+ of the offending library or binary. The purpose of the feature is
57770+ to help Linux distribution developers get rid of libraries and
57771+ binaries that need text relocations which hinder the future progress
57772+ of PaX. Only Linux distribution developers should say Y here, and
57773+ never on a production machine, as this option creates an information
57774+ leak that could aid an attacker in defeating the randomization of
57775+ a single memory region. If the sysctl option is enabled, a sysctl
57776+ option with name "audit_textrel" is created.
57777+
57778+endmenu
57779+
57780+menu "Executable Protections"
57781+depends on GRKERNSEC
57782+
57783+config GRKERNSEC_DMESG
57784+ bool "Dmesg(8) restriction"
57785+ default y if GRKERNSEC_CONFIG_AUTO
57786+ help
57787+ If you say Y here, non-root users will not be able to use dmesg(8)
57788+ to view the contents of the kernel's circular log buffer.
57789+ The kernel's log buffer often contains kernel addresses and other
57790+ identifying information useful to an attacker in fingerprinting a
57791+ system for a targeted exploit.
57792+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
57793+ created.
57794+
57795+config GRKERNSEC_HARDEN_PTRACE
57796+ bool "Deter ptrace-based process snooping"
57797+ default y if GRKERNSEC_CONFIG_AUTO
57798+ help
57799+ If you say Y here, TTY sniffers and other malicious monitoring
57800+ programs implemented through ptrace will be defeated. If you
57801+ have been using the RBAC system, this option has already been
57802+ enabled for several years for all users, with the ability to make
57803+ fine-grained exceptions.
57804+
57805+ This option only affects the ability of non-root users to ptrace
57806+ processes that are not a descendent of the ptracing process.
57807+ This means that strace ./binary and gdb ./binary will still work,
57808+ but attaching to arbitrary processes will not. If the sysctl
57809+ option is enabled, a sysctl option with name "harden_ptrace" is
57810+ created.
57811+
57812+config GRKERNSEC_PTRACE_READEXEC
57813+ bool "Require read access to ptrace sensitive binaries"
57814+ default y if GRKERNSEC_CONFIG_AUTO
57815+ help
57816+ If you say Y here, unprivileged users will not be able to ptrace unreadable
57817+ binaries. This option is useful in environments that
57818+ remove the read bits (e.g. file mode 4711) from suid binaries to
57819+ prevent infoleaking of their contents. This option adds
57820+ consistency to the use of that file mode, as the binary could normally
57821+ be read out when run without privileges while ptracing.
57822+
57823+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
57824+ is created.
57825+
57826+config GRKERNSEC_SETXID
57827+ bool "Enforce consistent multithreaded privileges"
57828+ default y if GRKERNSEC_CONFIG_AUTO
57829+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
57830+ help
57831+ If you say Y here, a change from a root uid to a non-root uid
57832+ in a multithreaded application will cause the resulting uids,
57833+ gids, supplementary groups, and capabilities in that thread
57834+ to be propagated to the other threads of the process. In most
57835+ cases this is unnecessary, as glibc will emulate this behavior
57836+ on behalf of the application. Other libcs do not act in the
57837+ same way, allowing the other threads of the process to continue
57838+ running with root privileges. If the sysctl option is enabled,
57839+ a sysctl option with name "consistent_setxid" is created.
57840+
57841+config GRKERNSEC_TPE
57842+ bool "Trusted Path Execution (TPE)"
57843+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
57844+ help
57845+ If you say Y here, you will be able to choose a gid to add to the
57846+ supplementary groups of users you want to mark as "untrusted."
57847+ These users will not be able to execute any files that are not in
57848+ root-owned directories writable only by root. If the sysctl option
57849+ is enabled, a sysctl option with name "tpe" is created.
57850+
57851+config GRKERNSEC_TPE_ALL
57852+ bool "Partially restrict all non-root users"
57853+ depends on GRKERNSEC_TPE
57854+ help
57855+ If you say Y here, all non-root users will be covered under
57856+ a weaker TPE restriction. This is separate from, and in addition to,
57857+ the main TPE options that you have selected elsewhere. Thus, if a
57858+ "trusted" GID is chosen, this restriction applies to even that GID.
57859+ Under this restriction, all non-root users will only be allowed to
57860+ execute files in directories they own that are not group or
57861+ world-writable, or in directories owned by root and writable only by
57862+ root. If the sysctl option is enabled, a sysctl option with name
57863+ "tpe_restrict_all" is created.
57864+
57865+config GRKERNSEC_TPE_INVERT
57866+ bool "Invert GID option"
57867+ depends on GRKERNSEC_TPE
57868+ help
57869+ If you say Y here, the group you specify in the TPE configuration will
57870+ decide what group TPE restrictions will be *disabled* for. This
57871+ option is useful if you want TPE restrictions to be applied to most
57872+ users on the system. If the sysctl option is enabled, a sysctl option
57873+ with name "tpe_invert" is created. Unlike other sysctl options, this
57874+ entry will default to on for backward-compatibility.
57875+
57876+config GRKERNSEC_TPE_GID
57877+ int
57878+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
57879+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
57880+
57881+config GRKERNSEC_TPE_UNTRUSTED_GID
57882+ int "GID for TPE-untrusted users"
57883+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
57884+ default 1005
57885+ help
57886+ Setting this GID determines what group TPE restrictions will be
57887+ *enabled* for. If the sysctl option is enabled, a sysctl option
57888+ with name "tpe_gid" is created.
57889+
57890+config GRKERNSEC_TPE_TRUSTED_GID
57891+ int "GID for TPE-trusted users"
57892+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
57893+ default 1005
57894+ help
57895+ Setting this GID determines what group TPE restrictions will be
57896+ *disabled* for. If the sysctl option is enabled, a sysctl option
57897+ with name "tpe_gid" is created.
57898+
57899+endmenu
57900+menu "Network Protections"
57901+depends on GRKERNSEC
57902+
57903+config GRKERNSEC_RANDNET
57904+ bool "Larger entropy pools"
57905+ default y if GRKERNSEC_CONFIG_AUTO
57906+ help
57907+ If you say Y here, the entropy pools used for many features of Linux
57908+ and grsecurity will be doubled in size. Since several grsecurity
57909+ features use additional randomness, it is recommended that you say Y
57910+ here. Saying Y here has a similar effect as modifying
57911+ /proc/sys/kernel/random/poolsize.
57912+
57913+config GRKERNSEC_BLACKHOLE
57914+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
57915+ default y if GRKERNSEC_CONFIG_AUTO
57916+ depends on NET
57917+ help
57918+ If you say Y here, neither TCP resets nor ICMP
57919+ destination-unreachable packets will be sent in response to packets
57920+ sent to ports for which no associated listening process exists.
57921+ This feature supports both IPV4 and IPV6 and exempts the
57922+ loopback interface from blackholing. Enabling this feature
57923+ makes a host more resilient to DoS attacks and reduces network
57924+ visibility against scanners.
57925+
57926+ The blackhole feature as-implemented is equivalent to the FreeBSD
57927+ blackhole feature, as it prevents RST responses to all packets, not
57928+ just SYNs. Under most application behavior this causes no
57929+ problems, but applications (like haproxy) may not close certain
57930+ connections in a way that cleanly terminates them on the remote
57931+ end, leaving the remote host in LAST_ACK state. Because of this
57932+ side-effect and to prevent intentional LAST_ACK DoSes, this
57933+ feature also adds automatic mitigation against such attacks.
57934+ The mitigation drastically reduces the amount of time a socket
57935+ can spend in LAST_ACK state. If you're using haproxy and not
57936+ all servers it connects to have this option enabled, consider
57937+ disabling this feature on the haproxy host.
57938+
57939+ If the sysctl option is enabled, two sysctl options with names
57940+ "ip_blackhole" and "lastack_retries" will be created.
57941+ While "ip_blackhole" takes the standard zero/non-zero on/off
57942+ toggle, "lastack_retries" uses the same kinds of values as
57943+ "tcp_retries1" and "tcp_retries2". The default value of 4
57944+ prevents a socket from lasting more than 45 seconds in LAST_ACK
57945+ state.
57946+
57947+config GRKERNSEC_NO_SIMULT_CONNECT
57948+ bool "Disable TCP Simultaneous Connect"
57949+ default y if GRKERNSEC_CONFIG_AUTO
57950+ depends on NET
57951+ help
57952+ If you say Y here, a feature by Willy Tarreau will be enabled that
57953+ removes a weakness in Linux's strict implementation of TCP that
57954+ allows two clients to connect to each other without either entering
57955+ a listening state. The weakness allows an attacker to easily prevent
57956+ a client from connecting to a known server provided the source port
57957+ for the connection is guessed correctly.
57958+
57959+ As the weakness could be used to prevent an antivirus or IPS from
57960+ fetching updates, or prevent an SSL gateway from fetching a CRL,
57961+ it should be eliminated by enabling this option. Though Linux is
57962+ one of few operating systems supporting simultaneous connect, it
57963+ has no legitimate use in practice and is rarely supported by firewalls.
57964+
57965+config GRKERNSEC_SOCKET
57966+ bool "Socket restrictions"
57967+ depends on NET
57968+ help
57969+ If you say Y here, you will be able to choose from several options.
57970+ If you assign a GID on your system and add it to the supplementary
57971+ groups of users you want to restrict socket access to, this patch
57972+ will perform up to three things, based on the option(s) you choose.
57973+
57974+config GRKERNSEC_SOCKET_ALL
57975+ bool "Deny any sockets to group"
57976+ depends on GRKERNSEC_SOCKET
57977+ help
57978+ If you say Y here, you will be able to choose a GID of whose users will
57979+ be unable to connect to other hosts from your machine or run server
57980+ applications from your machine. If the sysctl option is enabled, a
57981+ sysctl option with name "socket_all" is created.
57982+
57983+config GRKERNSEC_SOCKET_ALL_GID
57984+ int "GID to deny all sockets for"
57985+ depends on GRKERNSEC_SOCKET_ALL
57986+ default 1004
57987+ help
57988+ Here you can choose the GID to disable socket access for. Remember to
57989+ add the users you want socket access disabled for to the GID
57990+ specified here. If the sysctl option is enabled, a sysctl option
57991+ with name "socket_all_gid" is created.
57992+
57993+config GRKERNSEC_SOCKET_CLIENT
57994+ bool "Deny client sockets to group"
57995+ depends on GRKERNSEC_SOCKET
57996+ help
57997+ If you say Y here, you will be able to choose a GID of whose users will
57998+ be unable to connect to other hosts from your machine, but will be
57999+ able to run servers. If this option is enabled, all users in the group
58000+ you specify will have to use passive mode when initiating ftp transfers
58001+ from the shell on your machine. If the sysctl option is enabled, a
58002+ sysctl option with name "socket_client" is created.
58003+
58004+config GRKERNSEC_SOCKET_CLIENT_GID
58005+ int "GID to deny client sockets for"
58006+ depends on GRKERNSEC_SOCKET_CLIENT
58007+ default 1003
58008+ help
58009+ Here you can choose the GID to disable client socket access for.
58010+ Remember to add the users you want client socket access disabled for to
58011+ the GID specified here. If the sysctl option is enabled, a sysctl
58012+ option with name "socket_client_gid" is created.
58013+
58014+config GRKERNSEC_SOCKET_SERVER
58015+ bool "Deny server sockets to group"
58016+ depends on GRKERNSEC_SOCKET
58017+ help
58018+ If you say Y here, you will be able to choose a GID of whose users will
58019+ be unable to run server applications from your machine. If the sysctl
58020+ option is enabled, a sysctl option with name "socket_server" is created.
58021+
58022+config GRKERNSEC_SOCKET_SERVER_GID
58023+ int "GID to deny server sockets for"
58024+ depends on GRKERNSEC_SOCKET_SERVER
58025+ default 1002
58026+ help
58027+ Here you can choose the GID to disable server socket access for.
58028+ Remember to add the users you want server socket access disabled for to
58029+ the GID specified here. If the sysctl option is enabled, a sysctl
58030+ option with name "socket_server_gid" is created.
58031+
58032+endmenu
58033+menu "Sysctl Support"
58034+depends on GRKERNSEC && SYSCTL
58035+
58036+config GRKERNSEC_SYSCTL
58037+ bool "Sysctl support"
58038+ default y if GRKERNSEC_CONFIG_AUTO
58039+ help
58040+ If you say Y here, you will be able to change the options that
58041+ grsecurity runs with at bootup, without having to recompile your
58042+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
58043+ to enable (1) or disable (0) various features. All the sysctl entries
58044+ are mutable until the "grsec_lock" entry is set to a non-zero value.
58045+ All features enabled in the kernel configuration are disabled at boot
58046+ if you do not say Y to the "Turn on features by default" option.
58047+ All options should be set at startup, and the grsec_lock entry should
58048+ be set to a non-zero value after all the options are set.
58049+ *THIS IS EXTREMELY IMPORTANT*
58050+
58051+config GRKERNSEC_SYSCTL_DISTRO
58052+ bool "Extra sysctl support for distro makers (READ HELP)"
58053+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
58054+ help
58055+ If you say Y here, additional sysctl options will be created
58056+ for features that affect processes running as root. Therefore,
58057+ it is critical when using this option that the grsec_lock entry be
58058+ enabled after boot. Only distros with prebuilt kernel packages
58059+ with this option enabled that can ensure grsec_lock is enabled
58060+ after boot should use this option.
58061+ *Failure to set grsec_lock after boot makes all grsec features
58062+ this option covers useless*
58063+
58064+ Currently this option creates the following sysctl entries:
58065+ "Disable Privileged I/O": "disable_priv_io"
58066+
58067+config GRKERNSEC_SYSCTL_ON
58068+ bool "Turn on features by default"
58069+ default y if GRKERNSEC_CONFIG_AUTO
58070+ depends on GRKERNSEC_SYSCTL
58071+ help
58072+ If you say Y here, instead of having all features enabled in the
58073+ kernel configuration disabled at boot time, the features will be
58074+ enabled at boot time. It is recommended you say Y here unless
58075+ there is some reason you would want all sysctl-tunable features to
58076+ be disabled by default. As mentioned elsewhere, it is important
58077+ to enable the grsec_lock entry once you have finished modifying
58078+ the sysctl entries.
58079+
58080+endmenu
58081+menu "Logging Options"
58082+depends on GRKERNSEC
58083+
58084+config GRKERNSEC_FLOODTIME
58085+ int "Seconds in between log messages (minimum)"
58086+ default 10
58087+ help
58088+ This option allows you to enforce the number of seconds between
58089+ grsecurity log messages. The default should be suitable for most
58090+ people, however, if you choose to change it, choose a value small enough
58091+ to allow informative logs to be produced, but large enough to
58092+ prevent flooding.
58093+
58094+config GRKERNSEC_FLOODBURST
58095+ int "Number of messages in a burst (maximum)"
58096+ default 6
58097+ help
58098+ This option allows you to choose the maximum number of messages allowed
58099+ within the flood time interval you chose in a separate option. The
58100+ default should be suitable for most people, however if you find that
58101+ many of your logs are being interpreted as flooding, you may want to
58102+ raise this value.
58103+
58104+endmenu
58105diff --git a/grsecurity/Makefile b/grsecurity/Makefile
58106new file mode 100644
58107index 0000000..1b9afa9
58108--- /dev/null
58109+++ b/grsecurity/Makefile
58110@@ -0,0 +1,38 @@
58111+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
58112+# during 2001-2009 it has been completely redesigned by Brad Spengler
58113+# into an RBAC system
58114+#
58115+# All code in this directory and various hooks inserted throughout the kernel
58116+# are copyright Brad Spengler - Open Source Security, Inc., and released
58117+# under the GPL v2 or higher
58118+
58119+KBUILD_CFLAGS += -Werror
58120+
58121+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
58122+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
58123+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
58124+
58125+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
58126+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
58127+ gracl_learn.o grsec_log.o
58128+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
58129+
58130+ifdef CONFIG_NET
58131+obj-y += grsec_sock.o
58132+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
58133+endif
58134+
58135+ifndef CONFIG_GRKERNSEC
58136+obj-y += grsec_disabled.o
58137+endif
58138+
58139+ifdef CONFIG_GRKERNSEC_HIDESYM
58140+extra-y := grsec_hidesym.o
58141+$(obj)/grsec_hidesym.o:
58142+ @-chmod -f 500 /boot
58143+ @-chmod -f 500 /lib/modules
58144+ @-chmod -f 500 /lib64/modules
58145+ @-chmod -f 500 /lib32/modules
58146+ @-chmod -f 700 .
58147+ @echo ' grsec: protected kernel image paths'
58148+endif
58149diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
58150new file mode 100644
58151index 0000000..1248ee0
58152--- /dev/null
58153+++ b/grsecurity/gracl.c
58154@@ -0,0 +1,4073 @@
58155+#include <linux/kernel.h>
58156+#include <linux/module.h>
58157+#include <linux/sched.h>
58158+#include <linux/mm.h>
58159+#include <linux/file.h>
58160+#include <linux/fs.h>
58161+#include <linux/namei.h>
58162+#include <linux/mount.h>
58163+#include <linux/tty.h>
58164+#include <linux/proc_fs.h>
58165+#include <linux/lglock.h>
58166+#include <linux/slab.h>
58167+#include <linux/vmalloc.h>
58168+#include <linux/types.h>
58169+#include <linux/sysctl.h>
58170+#include <linux/netdevice.h>
58171+#include <linux/ptrace.h>
58172+#include <linux/gracl.h>
58173+#include <linux/gralloc.h>
58174+#include <linux/security.h>
58175+#include <linux/grinternal.h>
58176+#include <linux/pid_namespace.h>
58177+#include <linux/stop_machine.h>
58178+#include <linux/fdtable.h>
58179+#include <linux/percpu.h>
58180+#include <linux/lglock.h>
58181+#include <linux/hugetlb.h>
58182+#include <linux/posix-timers.h>
58183+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
58184+#include <linux/magic.h>
58185+#include <linux/pagemap.h>
58186+#include "../fs/btrfs/async-thread.h"
58187+#include "../fs/btrfs/ctree.h"
58188+#include "../fs/btrfs/btrfs_inode.h"
58189+#endif
58190+#include "../fs/mount.h"
58191+
58192+#include <asm/uaccess.h>
58193+#include <asm/errno.h>
58194+#include <asm/mman.h>
58195+
58196+extern struct lglock vfsmount_lock;
58197+
58198+static struct acl_role_db acl_role_set;
58199+static struct name_db name_set;
58200+static struct inodev_db inodev_set;
58201+
58202+/* for keeping track of userspace pointers used for subjects, so we
58203+ can share references in the kernel as well
58204+*/
58205+
58206+static struct path real_root;
58207+
58208+static struct acl_subj_map_db subj_map_set;
58209+
58210+static struct acl_role_label *default_role;
58211+
58212+static struct acl_role_label *role_list;
58213+
58214+static u16 acl_sp_role_value;
58215+
58216+extern char *gr_shared_page[4];
58217+static DEFINE_MUTEX(gr_dev_mutex);
58218+DEFINE_RWLOCK(gr_inode_lock);
58219+
58220+struct gr_arg *gr_usermode;
58221+
58222+static unsigned int gr_status __read_only = GR_STATUS_INIT;
58223+
58224+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
58225+extern void gr_clear_learn_entries(void);
58226+
58227+unsigned char *gr_system_salt;
58228+unsigned char *gr_system_sum;
58229+
58230+static struct sprole_pw **acl_special_roles = NULL;
58231+static __u16 num_sprole_pws = 0;
58232+
58233+static struct acl_role_label *kernel_role = NULL;
58234+
58235+static unsigned int gr_auth_attempts = 0;
58236+static unsigned long gr_auth_expires = 0UL;
58237+
58238+#ifdef CONFIG_NET
58239+extern struct vfsmount *sock_mnt;
58240+#endif
58241+
58242+extern struct vfsmount *pipe_mnt;
58243+extern struct vfsmount *shm_mnt;
58244+
58245+#ifdef CONFIG_HUGETLBFS
58246+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
58247+#endif
58248+
58249+static struct acl_object_label *fakefs_obj_rw;
58250+static struct acl_object_label *fakefs_obj_rwx;
58251+
58252+extern int gr_init_uidset(void);
58253+extern void gr_free_uidset(void);
58254+extern void gr_remove_uid(uid_t uid);
58255+extern int gr_find_uid(uid_t uid);
58256+
58257+__inline__ int
58258+gr_acl_is_enabled(void)
58259+{
58260+ return (gr_status & GR_READY);
58261+}
58262+
58263+static inline dev_t __get_dev(const struct dentry *dentry)
58264+{
58265+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
58266+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
58267+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
58268+ else
58269+#endif
58270+ return dentry->d_sb->s_dev;
58271+}
58272+
58273+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
58274+{
58275+ return __get_dev(dentry);
58276+}
58277+
58278+static char gr_task_roletype_to_char(struct task_struct *task)
58279+{
58280+ switch (task->role->roletype &
58281+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
58282+ GR_ROLE_SPECIAL)) {
58283+ case GR_ROLE_DEFAULT:
58284+ return 'D';
58285+ case GR_ROLE_USER:
58286+ return 'U';
58287+ case GR_ROLE_GROUP:
58288+ return 'G';
58289+ case GR_ROLE_SPECIAL:
58290+ return 'S';
58291+ }
58292+
58293+ return 'X';
58294+}
58295+
58296+char gr_roletype_to_char(void)
58297+{
58298+ return gr_task_roletype_to_char(current);
58299+}
58300+
58301+__inline__ int
58302+gr_acl_tpe_check(void)
58303+{
58304+ if (unlikely(!(gr_status & GR_READY)))
58305+ return 0;
58306+ if (current->role->roletype & GR_ROLE_TPE)
58307+ return 1;
58308+ else
58309+ return 0;
58310+}
58311+
58312+int
58313+gr_handle_rawio(const struct inode *inode)
58314+{
58315+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
58316+ if (inode && S_ISBLK(inode->i_mode) &&
58317+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
58318+ !capable(CAP_SYS_RAWIO))
58319+ return 1;
58320+#endif
58321+ return 0;
58322+}
58323+
58324+static int
58325+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
58326+{
58327+ if (likely(lena != lenb))
58328+ return 0;
58329+
58330+ return !memcmp(a, b, lena);
58331+}
58332+
58333+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
58334+{
58335+ *buflen -= namelen;
58336+ if (*buflen < 0)
58337+ return -ENAMETOOLONG;
58338+ *buffer -= namelen;
58339+ memcpy(*buffer, str, namelen);
58340+ return 0;
58341+}
58342+
58343+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
58344+{
58345+ return prepend(buffer, buflen, name->name, name->len);
58346+}
58347+
58348+static int prepend_path(const struct path *path, struct path *root,
58349+ char **buffer, int *buflen)
58350+{
58351+ struct dentry *dentry = path->dentry;
58352+ struct vfsmount *vfsmnt = path->mnt;
58353+ struct mount *mnt = real_mount(vfsmnt);
58354+ bool slash = false;
58355+ int error = 0;
58356+
58357+ while (dentry != root->dentry || vfsmnt != root->mnt) {
58358+ struct dentry * parent;
58359+
58360+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
58361+ /* Global root? */
58362+ if (!mnt_has_parent(mnt)) {
58363+ goto out;
58364+ }
58365+ dentry = mnt->mnt_mountpoint;
58366+ mnt = mnt->mnt_parent;
58367+ vfsmnt = &mnt->mnt;
58368+ continue;
58369+ }
58370+ parent = dentry->d_parent;
58371+ prefetch(parent);
58372+ spin_lock(&dentry->d_lock);
58373+ error = prepend_name(buffer, buflen, &dentry->d_name);
58374+ spin_unlock(&dentry->d_lock);
58375+ if (!error)
58376+ error = prepend(buffer, buflen, "/", 1);
58377+ if (error)
58378+ break;
58379+
58380+ slash = true;
58381+ dentry = parent;
58382+ }
58383+
58384+out:
58385+ if (!error && !slash)
58386+ error = prepend(buffer, buflen, "/", 1);
58387+
58388+ return error;
58389+}
58390+
58391+/* this must be called with vfsmount_lock and rename_lock held */
58392+
58393+static char *__our_d_path(const struct path *path, struct path *root,
58394+ char *buf, int buflen)
58395+{
58396+ char *res = buf + buflen;
58397+ int error;
58398+
58399+ prepend(&res, &buflen, "\0", 1);
58400+ error = prepend_path(path, root, &res, &buflen);
58401+ if (error)
58402+ return ERR_PTR(error);
58403+
58404+ return res;
58405+}
58406+
58407+static char *
58408+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
58409+{
58410+ char *retval;
58411+
58412+ retval = __our_d_path(path, root, buf, buflen);
58413+ if (unlikely(IS_ERR(retval)))
58414+ retval = strcpy(buf, "<path too long>");
58415+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
58416+ retval[1] = '\0';
58417+
58418+ return retval;
58419+}
58420+
58421+static char *
58422+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
58423+ char *buf, int buflen)
58424+{
58425+ struct path path;
58426+ char *res;
58427+
58428+ path.dentry = (struct dentry *)dentry;
58429+ path.mnt = (struct vfsmount *)vfsmnt;
58430+
58431+ /* we can use real_root.dentry, real_root.mnt, because this is only called
58432+ by the RBAC system */
58433+ res = gen_full_path(&path, &real_root, buf, buflen);
58434+
58435+ return res;
58436+}
58437+
58438+static char *
58439+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
58440+ char *buf, int buflen)
58441+{
58442+ char *res;
58443+ struct path path;
58444+ struct path root;
58445+ struct task_struct *reaper = init_pid_ns.child_reaper;
58446+
58447+ path.dentry = (struct dentry *)dentry;
58448+ path.mnt = (struct vfsmount *)vfsmnt;
58449+
58450+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
58451+ get_fs_root(reaper->fs, &root);
58452+
58453+ br_read_lock(&vfsmount_lock);
58454+ write_seqlock(&rename_lock);
58455+ res = gen_full_path(&path, &root, buf, buflen);
58456+ write_sequnlock(&rename_lock);
58457+ br_read_unlock(&vfsmount_lock);
58458+
58459+ path_put(&root);
58460+ return res;
58461+}
58462+
58463+static char *
58464+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
58465+{
58466+ char *ret;
58467+ br_read_lock(&vfsmount_lock);
58468+ write_seqlock(&rename_lock);
58469+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
58470+ PAGE_SIZE);
58471+ write_sequnlock(&rename_lock);
58472+ br_read_unlock(&vfsmount_lock);
58473+ return ret;
58474+}
58475+
58476+static char *
58477+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
58478+{
58479+ char *ret;
58480+ char *buf;
58481+ int buflen;
58482+
58483+ br_read_lock(&vfsmount_lock);
58484+ write_seqlock(&rename_lock);
58485+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
58486+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
58487+ buflen = (int)(ret - buf);
58488+ if (buflen >= 5)
58489+ prepend(&ret, &buflen, "/proc", 5);
58490+ else
58491+ ret = strcpy(buf, "<path too long>");
58492+ write_sequnlock(&rename_lock);
58493+ br_read_unlock(&vfsmount_lock);
58494+ return ret;
58495+}
58496+
58497+char *
58498+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
58499+{
58500+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
58501+ PAGE_SIZE);
58502+}
58503+
58504+char *
58505+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
58506+{
58507+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
58508+ PAGE_SIZE);
58509+}
58510+
58511+char *
58512+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
58513+{
58514+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
58515+ PAGE_SIZE);
58516+}
58517+
58518+char *
58519+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
58520+{
58521+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
58522+ PAGE_SIZE);
58523+}
58524+
58525+char *
58526+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
58527+{
58528+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
58529+ PAGE_SIZE);
58530+}
58531+
58532+__inline__ __u32
58533+to_gr_audit(const __u32 reqmode)
58534+{
58535+ /* masks off auditable permission flags, then shifts them to create
58536+ auditing flags, and adds the special case of append auditing if
58537+ we're requesting write */
58538+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
58539+}
58540+
58541+struct acl_subject_label *
58542+lookup_subject_map(const struct acl_subject_label *userp)
58543+{
58544+ unsigned int index = gr_shash(userp, subj_map_set.s_size);
58545+ struct subject_map *match;
58546+
58547+ match = subj_map_set.s_hash[index];
58548+
58549+ while (match && match->user != userp)
58550+ match = match->next;
58551+
58552+ if (match != NULL)
58553+ return match->kernel;
58554+ else
58555+ return NULL;
58556+}
58557+
58558+static void
58559+insert_subj_map_entry(struct subject_map *subjmap)
58560+{
58561+ unsigned int index = gr_shash(subjmap->user, subj_map_set.s_size);
58562+ struct subject_map **curr;
58563+
58564+ subjmap->prev = NULL;
58565+
58566+ curr = &subj_map_set.s_hash[index];
58567+ if (*curr != NULL)
58568+ (*curr)->prev = subjmap;
58569+
58570+ subjmap->next = *curr;
58571+ *curr = subjmap;
58572+
58573+ return;
58574+}
58575+
58576+static struct acl_role_label *
58577+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
58578+ const gid_t gid)
58579+{
58580+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
58581+ struct acl_role_label *match;
58582+ struct role_allowed_ip *ipp;
58583+ unsigned int x;
58584+ u32 curr_ip = task->signal->curr_ip;
58585+
58586+ task->signal->saved_ip = curr_ip;
58587+
58588+ match = acl_role_set.r_hash[index];
58589+
58590+ while (match) {
58591+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
58592+ for (x = 0; x < match->domain_child_num; x++) {
58593+ if (match->domain_children[x] == uid)
58594+ goto found;
58595+ }
58596+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
58597+ break;
58598+ match = match->next;
58599+ }
58600+found:
58601+ if (match == NULL) {
58602+ try_group:
58603+ index = gr_rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
58604+ match = acl_role_set.r_hash[index];
58605+
58606+ while (match) {
58607+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
58608+ for (x = 0; x < match->domain_child_num; x++) {
58609+ if (match->domain_children[x] == gid)
58610+ goto found2;
58611+ }
58612+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
58613+ break;
58614+ match = match->next;
58615+ }
58616+found2:
58617+ if (match == NULL)
58618+ match = default_role;
58619+ if (match->allowed_ips == NULL)
58620+ return match;
58621+ else {
58622+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
58623+ if (likely
58624+ ((ntohl(curr_ip) & ipp->netmask) ==
58625+ (ntohl(ipp->addr) & ipp->netmask)))
58626+ return match;
58627+ }
58628+ match = default_role;
58629+ }
58630+ } else if (match->allowed_ips == NULL) {
58631+ return match;
58632+ } else {
58633+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
58634+ if (likely
58635+ ((ntohl(curr_ip) & ipp->netmask) ==
58636+ (ntohl(ipp->addr) & ipp->netmask)))
58637+ return match;
58638+ }
58639+ goto try_group;
58640+ }
58641+
58642+ return match;
58643+}
58644+
58645+struct acl_subject_label *
58646+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
58647+ const struct acl_role_label *role)
58648+{
58649+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
58650+ struct acl_subject_label *match;
58651+
58652+ match = role->subj_hash[index];
58653+
58654+ while (match && (match->inode != ino || match->device != dev ||
58655+ (match->mode & GR_DELETED))) {
58656+ match = match->next;
58657+ }
58658+
58659+ if (match && !(match->mode & GR_DELETED))
58660+ return match;
58661+ else
58662+ return NULL;
58663+}
58664+
58665+struct acl_subject_label *
58666+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
58667+ const struct acl_role_label *role)
58668+{
58669+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
58670+ struct acl_subject_label *match;
58671+
58672+ match = role->subj_hash[index];
58673+
58674+ while (match && (match->inode != ino || match->device != dev ||
58675+ !(match->mode & GR_DELETED))) {
58676+ match = match->next;
58677+ }
58678+
58679+ if (match && (match->mode & GR_DELETED))
58680+ return match;
58681+ else
58682+ return NULL;
58683+}
58684+
58685+static struct acl_object_label *
58686+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
58687+ const struct acl_subject_label *subj)
58688+{
58689+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
58690+ struct acl_object_label *match;
58691+
58692+ match = subj->obj_hash[index];
58693+
58694+ while (match && (match->inode != ino || match->device != dev ||
58695+ (match->mode & GR_DELETED))) {
58696+ match = match->next;
58697+ }
58698+
58699+ if (match && !(match->mode & GR_DELETED))
58700+ return match;
58701+ else
58702+ return NULL;
58703+}
58704+
58705+static struct acl_object_label *
58706+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
58707+ const struct acl_subject_label *subj)
58708+{
58709+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
58710+ struct acl_object_label *match;
58711+
58712+ match = subj->obj_hash[index];
58713+
58714+ while (match && (match->inode != ino || match->device != dev ||
58715+ !(match->mode & GR_DELETED))) {
58716+ match = match->next;
58717+ }
58718+
58719+ if (match && (match->mode & GR_DELETED))
58720+ return match;
58721+
58722+ match = subj->obj_hash[index];
58723+
58724+ while (match && (match->inode != ino || match->device != dev ||
58725+ (match->mode & GR_DELETED))) {
58726+ match = match->next;
58727+ }
58728+
58729+ if (match && !(match->mode & GR_DELETED))
58730+ return match;
58731+ else
58732+ return NULL;
58733+}
58734+
58735+static struct name_entry *
58736+lookup_name_entry(const char *name)
58737+{
58738+ unsigned int len = strlen(name);
58739+ unsigned int key = full_name_hash(name, len);
58740+ unsigned int index = key % name_set.n_size;
58741+ struct name_entry *match;
58742+
58743+ match = name_set.n_hash[index];
58744+
58745+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
58746+ match = match->next;
58747+
58748+ return match;
58749+}
58750+
58751+static struct name_entry *
58752+lookup_name_entry_create(const char *name)
58753+{
58754+ unsigned int len = strlen(name);
58755+ unsigned int key = full_name_hash(name, len);
58756+ unsigned int index = key % name_set.n_size;
58757+ struct name_entry *match;
58758+
58759+ match = name_set.n_hash[index];
58760+
58761+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
58762+ !match->deleted))
58763+ match = match->next;
58764+
58765+ if (match && match->deleted)
58766+ return match;
58767+
58768+ match = name_set.n_hash[index];
58769+
58770+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
58771+ match->deleted))
58772+ match = match->next;
58773+
58774+ if (match && !match->deleted)
58775+ return match;
58776+ else
58777+ return NULL;
58778+}
58779+
58780+static struct inodev_entry *
58781+lookup_inodev_entry(const ino_t ino, const dev_t dev)
58782+{
58783+ unsigned int index = gr_fhash(ino, dev, inodev_set.i_size);
58784+ struct inodev_entry *match;
58785+
58786+ match = inodev_set.i_hash[index];
58787+
58788+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
58789+ match = match->next;
58790+
58791+ return match;
58792+}
58793+
58794+static void
58795+insert_inodev_entry(struct inodev_entry *entry)
58796+{
58797+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
58798+ inodev_set.i_size);
58799+ struct inodev_entry **curr;
58800+
58801+ entry->prev = NULL;
58802+
58803+ curr = &inodev_set.i_hash[index];
58804+ if (*curr != NULL)
58805+ (*curr)->prev = entry;
58806+
58807+ entry->next = *curr;
58808+ *curr = entry;
58809+
58810+ return;
58811+}
58812+
58813+static void
58814+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
58815+{
58816+ unsigned int index =
58817+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
58818+ struct acl_role_label **curr;
58819+ struct acl_role_label *tmp, *tmp2;
58820+
58821+ curr = &acl_role_set.r_hash[index];
58822+
58823+ /* simple case, slot is empty, just set it to our role */
58824+ if (*curr == NULL) {
58825+ *curr = role;
58826+ } else {
58827+ /* example:
58828+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
58829+ 2 -> 3
58830+ */
58831+ /* first check to see if we can already be reached via this slot */
58832+ tmp = *curr;
58833+ while (tmp && tmp != role)
58834+ tmp = tmp->next;
58835+ if (tmp == role) {
58836+ /* we don't need to add ourselves to this slot's chain */
58837+ return;
58838+ }
58839+ /* we need to add ourselves to this chain, two cases */
58840+ if (role->next == NULL) {
58841+ /* simple case, append the current chain to our role */
58842+ role->next = *curr;
58843+ *curr = role;
58844+ } else {
58845+ /* 1 -> 2 -> 3 -> 4
58846+ 2 -> 3 -> 4
58847+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
58848+ */
58849+ /* trickier case: walk our role's chain until we find
58850+ the role for the start of the current slot's chain */
58851+ tmp = role;
58852+ tmp2 = *curr;
58853+ while (tmp->next && tmp->next != tmp2)
58854+ tmp = tmp->next;
58855+ if (tmp->next == tmp2) {
58856+ /* from example above, we found 3, so just
58857+ replace this slot's chain with ours */
58858+ *curr = role;
58859+ } else {
58860+ /* we didn't find a subset of our role's chain
58861+ in the current slot's chain, so append their
58862+ chain to ours, and set us as the first role in
58863+ the slot's chain
58864+
58865+ we could fold this case with the case above,
58866+ but making it explicit for clarity
58867+ */
58868+ tmp->next = tmp2;
58869+ *curr = role;
58870+ }
58871+ }
58872+ }
58873+
58874+ return;
58875+}
58876+
58877+static void
58878+insert_acl_role_label(struct acl_role_label *role)
58879+{
58880+ int i;
58881+
58882+ if (role_list == NULL) {
58883+ role_list = role;
58884+ role->prev = NULL;
58885+ } else {
58886+ role->prev = role_list;
58887+ role_list = role;
58888+ }
58889+
58890+ /* used for hash chains */
58891+ role->next = NULL;
58892+
58893+ if (role->roletype & GR_ROLE_DOMAIN) {
58894+ for (i = 0; i < role->domain_child_num; i++)
58895+ __insert_acl_role_label(role, role->domain_children[i]);
58896+ } else
58897+ __insert_acl_role_label(role, role->uidgid);
58898+}
58899+
58900+static int
58901+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
58902+{
58903+ struct name_entry **curr, *nentry;
58904+ struct inodev_entry *ientry;
58905+ unsigned int len = strlen(name);
58906+ unsigned int key = full_name_hash(name, len);
58907+ unsigned int index = key % name_set.n_size;
58908+
58909+ curr = &name_set.n_hash[index];
58910+
58911+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
58912+ curr = &((*curr)->next);
58913+
58914+ if (*curr != NULL)
58915+ return 1;
58916+
58917+ nentry = acl_alloc(sizeof (struct name_entry));
58918+ if (nentry == NULL)
58919+ return 0;
58920+ ientry = acl_alloc(sizeof (struct inodev_entry));
58921+ if (ientry == NULL)
58922+ return 0;
58923+ ientry->nentry = nentry;
58924+
58925+ nentry->key = key;
58926+ nentry->name = name;
58927+ nentry->inode = inode;
58928+ nentry->device = device;
58929+ nentry->len = len;
58930+ nentry->deleted = deleted;
58931+
58932+ nentry->prev = NULL;
58933+ curr = &name_set.n_hash[index];
58934+ if (*curr != NULL)
58935+ (*curr)->prev = nentry;
58936+ nentry->next = *curr;
58937+ *curr = nentry;
58938+
58939+ /* insert us into the table searchable by inode/dev */
58940+ insert_inodev_entry(ientry);
58941+
58942+ return 1;
58943+}
58944+
58945+static void
58946+insert_acl_obj_label(struct acl_object_label *obj,
58947+ struct acl_subject_label *subj)
58948+{
58949+ unsigned int index =
58950+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
58951+ struct acl_object_label **curr;
58952+
58953+
58954+ obj->prev = NULL;
58955+
58956+ curr = &subj->obj_hash[index];
58957+ if (*curr != NULL)
58958+ (*curr)->prev = obj;
58959+
58960+ obj->next = *curr;
58961+ *curr = obj;
58962+
58963+ return;
58964+}
58965+
58966+static void
58967+insert_acl_subj_label(struct acl_subject_label *obj,
58968+ struct acl_role_label *role)
58969+{
58970+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
58971+ struct acl_subject_label **curr;
58972+
58973+ obj->prev = NULL;
58974+
58975+ curr = &role->subj_hash[index];
58976+ if (*curr != NULL)
58977+ (*curr)->prev = obj;
58978+
58979+ obj->next = *curr;
58980+ *curr = obj;
58981+
58982+ return;
58983+}
58984+
58985+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
58986+
58987+static void *
58988+create_table(__u32 * len, int elementsize)
58989+{
58990+ unsigned int table_sizes[] = {
58991+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
58992+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
58993+ 4194301, 8388593, 16777213, 33554393, 67108859
58994+ };
58995+ void *newtable = NULL;
58996+ unsigned int pwr = 0;
58997+
58998+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
58999+ table_sizes[pwr] <= *len)
59000+ pwr++;
59001+
59002+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
59003+ return newtable;
59004+
59005+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
59006+ newtable =
59007+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
59008+ else
59009+ newtable = vmalloc(table_sizes[pwr] * elementsize);
59010+
59011+ *len = table_sizes[pwr];
59012+
59013+ return newtable;
59014+}
59015+
59016+static int
59017+init_variables(const struct gr_arg *arg)
59018+{
59019+ struct task_struct *reaper = init_pid_ns.child_reaper;
59020+ unsigned int stacksize;
59021+
59022+ subj_map_set.s_size = arg->role_db.num_subjects;
59023+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
59024+ name_set.n_size = arg->role_db.num_objects;
59025+ inodev_set.i_size = arg->role_db.num_objects;
59026+
59027+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
59028+ !name_set.n_size || !inodev_set.i_size)
59029+ return 1;
59030+
59031+ if (!gr_init_uidset())
59032+ return 1;
59033+
59034+ /* set up the stack that holds allocation info */
59035+
59036+ stacksize = arg->role_db.num_pointers + 5;
59037+
59038+ if (!acl_alloc_stack_init(stacksize))
59039+ return 1;
59040+
59041+ /* grab reference for the real root dentry and vfsmount */
59042+ get_fs_root(reaper->fs, &real_root);
59043+
59044+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59045+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
59046+#endif
59047+
59048+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
59049+ if (fakefs_obj_rw == NULL)
59050+ return 1;
59051+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
59052+
59053+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
59054+ if (fakefs_obj_rwx == NULL)
59055+ return 1;
59056+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
59057+
59058+ subj_map_set.s_hash =
59059+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
59060+ acl_role_set.r_hash =
59061+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
59062+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
59063+ inodev_set.i_hash =
59064+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
59065+
59066+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
59067+ !name_set.n_hash || !inodev_set.i_hash)
59068+ return 1;
59069+
59070+ memset(subj_map_set.s_hash, 0,
59071+ sizeof(struct subject_map *) * subj_map_set.s_size);
59072+ memset(acl_role_set.r_hash, 0,
59073+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
59074+ memset(name_set.n_hash, 0,
59075+ sizeof (struct name_entry *) * name_set.n_size);
59076+ memset(inodev_set.i_hash, 0,
59077+ sizeof (struct inodev_entry *) * inodev_set.i_size);
59078+
59079+ return 0;
59080+}
59081+
59082+/* free information not needed after startup
59083+ currently contains user->kernel pointer mappings for subjects
59084+*/
59085+
59086+static void
59087+free_init_variables(void)
59088+{
59089+ __u32 i;
59090+
59091+ if (subj_map_set.s_hash) {
59092+ for (i = 0; i < subj_map_set.s_size; i++) {
59093+ if (subj_map_set.s_hash[i]) {
59094+ kfree(subj_map_set.s_hash[i]);
59095+ subj_map_set.s_hash[i] = NULL;
59096+ }
59097+ }
59098+
59099+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
59100+ PAGE_SIZE)
59101+ kfree(subj_map_set.s_hash);
59102+ else
59103+ vfree(subj_map_set.s_hash);
59104+ }
59105+
59106+ return;
59107+}
59108+
59109+static void
59110+free_variables(void)
59111+{
59112+ struct acl_subject_label *s;
59113+ struct acl_role_label *r;
59114+ struct task_struct *task, *task2;
59115+ unsigned int x;
59116+
59117+ gr_clear_learn_entries();
59118+
59119+ read_lock(&tasklist_lock);
59120+ do_each_thread(task2, task) {
59121+ task->acl_sp_role = 0;
59122+ task->acl_role_id = 0;
59123+ task->acl = NULL;
59124+ task->role = NULL;
59125+ } while_each_thread(task2, task);
59126+ read_unlock(&tasklist_lock);
59127+
59128+ /* release the reference to the real root dentry and vfsmount */
59129+ path_put(&real_root);
59130+ memset(&real_root, 0, sizeof(real_root));
59131+
59132+ /* free all object hash tables */
59133+
59134+ FOR_EACH_ROLE_START(r)
59135+ if (r->subj_hash == NULL)
59136+ goto next_role;
59137+ FOR_EACH_SUBJECT_START(r, s, x)
59138+ if (s->obj_hash == NULL)
59139+ break;
59140+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
59141+ kfree(s->obj_hash);
59142+ else
59143+ vfree(s->obj_hash);
59144+ FOR_EACH_SUBJECT_END(s, x)
59145+ FOR_EACH_NESTED_SUBJECT_START(r, s)
59146+ if (s->obj_hash == NULL)
59147+ break;
59148+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
59149+ kfree(s->obj_hash);
59150+ else
59151+ vfree(s->obj_hash);
59152+ FOR_EACH_NESTED_SUBJECT_END(s)
59153+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
59154+ kfree(r->subj_hash);
59155+ else
59156+ vfree(r->subj_hash);
59157+ r->subj_hash = NULL;
59158+next_role:
59159+ FOR_EACH_ROLE_END(r)
59160+
59161+ acl_free_all();
59162+
59163+ if (acl_role_set.r_hash) {
59164+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
59165+ PAGE_SIZE)
59166+ kfree(acl_role_set.r_hash);
59167+ else
59168+ vfree(acl_role_set.r_hash);
59169+ }
59170+ if (name_set.n_hash) {
59171+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
59172+ PAGE_SIZE)
59173+ kfree(name_set.n_hash);
59174+ else
59175+ vfree(name_set.n_hash);
59176+ }
59177+
59178+ if (inodev_set.i_hash) {
59179+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
59180+ PAGE_SIZE)
59181+ kfree(inodev_set.i_hash);
59182+ else
59183+ vfree(inodev_set.i_hash);
59184+ }
59185+
59186+ gr_free_uidset();
59187+
59188+ memset(&name_set, 0, sizeof (struct name_db));
59189+ memset(&inodev_set, 0, sizeof (struct inodev_db));
59190+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
59191+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
59192+
59193+ default_role = NULL;
59194+ kernel_role = NULL;
59195+ role_list = NULL;
59196+
59197+ return;
59198+}
59199+
59200+static __u32
59201+count_user_objs(struct acl_object_label *userp)
59202+{
59203+ struct acl_object_label o_tmp;
59204+ __u32 num = 0;
59205+
59206+ while (userp) {
59207+ if (copy_from_user(&o_tmp, userp,
59208+ sizeof (struct acl_object_label)))
59209+ break;
59210+
59211+ userp = o_tmp.prev;
59212+ num++;
59213+ }
59214+
59215+ return num;
59216+}
59217+
59218+static struct acl_subject_label *
59219+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
59220+
59221+static int
59222+copy_user_glob(struct acl_object_label *obj)
59223+{
59224+ struct acl_object_label *g_tmp, **guser;
59225+ unsigned int len;
59226+ char *tmp;
59227+
59228+ if (obj->globbed == NULL)
59229+ return 0;
59230+
59231+ guser = &obj->globbed;
59232+ while (*guser) {
59233+ g_tmp = (struct acl_object_label *)
59234+ acl_alloc(sizeof (struct acl_object_label));
59235+ if (g_tmp == NULL)
59236+ return -ENOMEM;
59237+
59238+ if (copy_from_user(g_tmp, *guser,
59239+ sizeof (struct acl_object_label)))
59240+ return -EFAULT;
59241+
59242+ len = strnlen_user(g_tmp->filename, PATH_MAX);
59243+
59244+ if (!len || len >= PATH_MAX)
59245+ return -EINVAL;
59246+
59247+ if ((tmp = (char *) acl_alloc(len)) == NULL)
59248+ return -ENOMEM;
59249+
59250+ if (copy_from_user(tmp, g_tmp->filename, len))
59251+ return -EFAULT;
59252+ tmp[len-1] = '\0';
59253+ g_tmp->filename = tmp;
59254+
59255+ *guser = g_tmp;
59256+ guser = &(g_tmp->next);
59257+ }
59258+
59259+ return 0;
59260+}
59261+
59262+static int
59263+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
59264+ struct acl_role_label *role)
59265+{
59266+ struct acl_object_label *o_tmp;
59267+ unsigned int len;
59268+ int ret;
59269+ char *tmp;
59270+
59271+ while (userp) {
59272+ if ((o_tmp = (struct acl_object_label *)
59273+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
59274+ return -ENOMEM;
59275+
59276+ if (copy_from_user(o_tmp, userp,
59277+ sizeof (struct acl_object_label)))
59278+ return -EFAULT;
59279+
59280+ userp = o_tmp->prev;
59281+
59282+ len = strnlen_user(o_tmp->filename, PATH_MAX);
59283+
59284+ if (!len || len >= PATH_MAX)
59285+ return -EINVAL;
59286+
59287+ if ((tmp = (char *) acl_alloc(len)) == NULL)
59288+ return -ENOMEM;
59289+
59290+ if (copy_from_user(tmp, o_tmp->filename, len))
59291+ return -EFAULT;
59292+ tmp[len-1] = '\0';
59293+ o_tmp->filename = tmp;
59294+
59295+ insert_acl_obj_label(o_tmp, subj);
59296+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
59297+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
59298+ return -ENOMEM;
59299+
59300+ ret = copy_user_glob(o_tmp);
59301+ if (ret)
59302+ return ret;
59303+
59304+ if (o_tmp->nested) {
59305+ int already_copied;
59306+
59307+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
59308+ if (IS_ERR(o_tmp->nested))
59309+ return PTR_ERR(o_tmp->nested);
59310+
59311+ /* insert into nested subject list if we haven't copied this one yet
59312+ to prevent duplicate entries */
59313+ if (!already_copied) {
59314+ o_tmp->nested->next = role->hash->first;
59315+ role->hash->first = o_tmp->nested;
59316+ }
59317+ }
59318+ }
59319+
59320+ return 0;
59321+}
59322+
59323+static __u32
59324+count_user_subjs(struct acl_subject_label *userp)
59325+{
59326+ struct acl_subject_label s_tmp;
59327+ __u32 num = 0;
59328+
59329+ while (userp) {
59330+ if (copy_from_user(&s_tmp, userp,
59331+ sizeof (struct acl_subject_label)))
59332+ break;
59333+
59334+ userp = s_tmp.prev;
59335+ }
59336+
59337+ return num;
59338+}
59339+
59340+static int
59341+copy_user_allowedips(struct acl_role_label *rolep)
59342+{
59343+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
59344+
59345+ ruserip = rolep->allowed_ips;
59346+
59347+ while (ruserip) {
59348+ rlast = rtmp;
59349+
59350+ if ((rtmp = (struct role_allowed_ip *)
59351+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
59352+ return -ENOMEM;
59353+
59354+ if (copy_from_user(rtmp, ruserip,
59355+ sizeof (struct role_allowed_ip)))
59356+ return -EFAULT;
59357+
59358+ ruserip = rtmp->prev;
59359+
59360+ if (!rlast) {
59361+ rtmp->prev = NULL;
59362+ rolep->allowed_ips = rtmp;
59363+ } else {
59364+ rlast->next = rtmp;
59365+ rtmp->prev = rlast;
59366+ }
59367+
59368+ if (!ruserip)
59369+ rtmp->next = NULL;
59370+ }
59371+
59372+ return 0;
59373+}
59374+
59375+static int
59376+copy_user_transitions(struct acl_role_label *rolep)
59377+{
59378+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
59379+
59380+ unsigned int len;
59381+ char *tmp;
59382+
59383+ rusertp = rolep->transitions;
59384+
59385+ while (rusertp) {
59386+ rlast = rtmp;
59387+
59388+ if ((rtmp = (struct role_transition *)
59389+ acl_alloc(sizeof (struct role_transition))) == NULL)
59390+ return -ENOMEM;
59391+
59392+ if (copy_from_user(rtmp, rusertp,
59393+ sizeof (struct role_transition)))
59394+ return -EFAULT;
59395+
59396+ rusertp = rtmp->prev;
59397+
59398+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
59399+
59400+ if (!len || len >= GR_SPROLE_LEN)
59401+ return -EINVAL;
59402+
59403+ if ((tmp = (char *) acl_alloc(len)) == NULL)
59404+ return -ENOMEM;
59405+
59406+ if (copy_from_user(tmp, rtmp->rolename, len))
59407+ return -EFAULT;
59408+ tmp[len-1] = '\0';
59409+ rtmp->rolename = tmp;
59410+
59411+ if (!rlast) {
59412+ rtmp->prev = NULL;
59413+ rolep->transitions = rtmp;
59414+ } else {
59415+ rlast->next = rtmp;
59416+ rtmp->prev = rlast;
59417+ }
59418+
59419+ if (!rusertp)
59420+ rtmp->next = NULL;
59421+ }
59422+
59423+ return 0;
59424+}
59425+
59426+static struct acl_subject_label *
59427+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
59428+{
59429+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
59430+ unsigned int len;
59431+ char *tmp;
59432+ __u32 num_objs;
59433+ struct acl_ip_label **i_tmp, *i_utmp2;
59434+ struct gr_hash_struct ghash;
59435+ struct subject_map *subjmap;
59436+ unsigned int i_num;
59437+ int err;
59438+
59439+ if (already_copied != NULL)
59440+ *already_copied = 0;
59441+
59442+ s_tmp = lookup_subject_map(userp);
59443+
59444+ /* we've already copied this subject into the kernel, just return
59445+ the reference to it, and don't copy it over again
59446+ */
59447+ if (s_tmp) {
59448+ if (already_copied != NULL)
59449+ *already_copied = 1;
59450+ return(s_tmp);
59451+ }
59452+
59453+ if ((s_tmp = (struct acl_subject_label *)
59454+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
59455+ return ERR_PTR(-ENOMEM);
59456+
59457+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
59458+ if (subjmap == NULL)
59459+ return ERR_PTR(-ENOMEM);
59460+
59461+ subjmap->user = userp;
59462+ subjmap->kernel = s_tmp;
59463+ insert_subj_map_entry(subjmap);
59464+
59465+ if (copy_from_user(s_tmp, userp,
59466+ sizeof (struct acl_subject_label)))
59467+ return ERR_PTR(-EFAULT);
59468+
59469+ len = strnlen_user(s_tmp->filename, PATH_MAX);
59470+
59471+ if (!len || len >= PATH_MAX)
59472+ return ERR_PTR(-EINVAL);
59473+
59474+ if ((tmp = (char *) acl_alloc(len)) == NULL)
59475+ return ERR_PTR(-ENOMEM);
59476+
59477+ if (copy_from_user(tmp, s_tmp->filename, len))
59478+ return ERR_PTR(-EFAULT);
59479+ tmp[len-1] = '\0';
59480+ s_tmp->filename = tmp;
59481+
59482+ if (!strcmp(s_tmp->filename, "/"))
59483+ role->root_label = s_tmp;
59484+
59485+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
59486+ return ERR_PTR(-EFAULT);
59487+
59488+ /* copy user and group transition tables */
59489+
59490+ if (s_tmp->user_trans_num) {
59491+ uid_t *uidlist;
59492+
59493+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
59494+ if (uidlist == NULL)
59495+ return ERR_PTR(-ENOMEM);
59496+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
59497+ return ERR_PTR(-EFAULT);
59498+
59499+ s_tmp->user_transitions = uidlist;
59500+ }
59501+
59502+ if (s_tmp->group_trans_num) {
59503+ gid_t *gidlist;
59504+
59505+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
59506+ if (gidlist == NULL)
59507+ return ERR_PTR(-ENOMEM);
59508+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
59509+ return ERR_PTR(-EFAULT);
59510+
59511+ s_tmp->group_transitions = gidlist;
59512+ }
59513+
59514+ /* set up object hash table */
59515+ num_objs = count_user_objs(ghash.first);
59516+
59517+ s_tmp->obj_hash_size = num_objs;
59518+ s_tmp->obj_hash =
59519+ (struct acl_object_label **)
59520+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
59521+
59522+ if (!s_tmp->obj_hash)
59523+ return ERR_PTR(-ENOMEM);
59524+
59525+ memset(s_tmp->obj_hash, 0,
59526+ s_tmp->obj_hash_size *
59527+ sizeof (struct acl_object_label *));
59528+
59529+ /* add in objects */
59530+ err = copy_user_objs(ghash.first, s_tmp, role);
59531+
59532+ if (err)
59533+ return ERR_PTR(err);
59534+
59535+ /* set pointer for parent subject */
59536+ if (s_tmp->parent_subject) {
59537+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
59538+
59539+ if (IS_ERR(s_tmp2))
59540+ return s_tmp2;
59541+
59542+ s_tmp->parent_subject = s_tmp2;
59543+ }
59544+
59545+ /* add in ip acls */
59546+
59547+ if (!s_tmp->ip_num) {
59548+ s_tmp->ips = NULL;
59549+ goto insert;
59550+ }
59551+
59552+ i_tmp =
59553+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
59554+ sizeof (struct acl_ip_label *));
59555+
59556+ if (!i_tmp)
59557+ return ERR_PTR(-ENOMEM);
59558+
59559+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
59560+ *(i_tmp + i_num) =
59561+ (struct acl_ip_label *)
59562+ acl_alloc(sizeof (struct acl_ip_label));
59563+ if (!*(i_tmp + i_num))
59564+ return ERR_PTR(-ENOMEM);
59565+
59566+ if (copy_from_user
59567+ (&i_utmp2, s_tmp->ips + i_num,
59568+ sizeof (struct acl_ip_label *)))
59569+ return ERR_PTR(-EFAULT);
59570+
59571+ if (copy_from_user
59572+ (*(i_tmp + i_num), i_utmp2,
59573+ sizeof (struct acl_ip_label)))
59574+ return ERR_PTR(-EFAULT);
59575+
59576+ if ((*(i_tmp + i_num))->iface == NULL)
59577+ continue;
59578+
59579+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
59580+ if (!len || len >= IFNAMSIZ)
59581+ return ERR_PTR(-EINVAL);
59582+ tmp = acl_alloc(len);
59583+ if (tmp == NULL)
59584+ return ERR_PTR(-ENOMEM);
59585+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
59586+ return ERR_PTR(-EFAULT);
59587+ (*(i_tmp + i_num))->iface = tmp;
59588+ }
59589+
59590+ s_tmp->ips = i_tmp;
59591+
59592+insert:
59593+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
59594+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
59595+ return ERR_PTR(-ENOMEM);
59596+
59597+ return s_tmp;
59598+}
59599+
59600+static int
59601+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
59602+{
59603+ struct acl_subject_label s_pre;
59604+ struct acl_subject_label * ret;
59605+ int err;
59606+
59607+ while (userp) {
59608+ if (copy_from_user(&s_pre, userp,
59609+ sizeof (struct acl_subject_label)))
59610+ return -EFAULT;
59611+
59612+ ret = do_copy_user_subj(userp, role, NULL);
59613+
59614+ err = PTR_ERR(ret);
59615+ if (IS_ERR(ret))
59616+ return err;
59617+
59618+ insert_acl_subj_label(ret, role);
59619+
59620+ userp = s_pre.prev;
59621+ }
59622+
59623+ return 0;
59624+}
59625+
59626+static int
59627+copy_user_acl(struct gr_arg *arg)
59628+{
59629+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
59630+ struct acl_subject_label *subj_list;
59631+ struct sprole_pw *sptmp;
59632+ struct gr_hash_struct *ghash;
59633+ uid_t *domainlist;
59634+ unsigned int r_num;
59635+ unsigned int len;
59636+ char *tmp;
59637+ int err = 0;
59638+ __u16 i;
59639+ __u32 num_subjs;
59640+
59641+ /* we need a default and kernel role */
59642+ if (arg->role_db.num_roles < 2)
59643+ return -EINVAL;
59644+
59645+ /* copy special role authentication info from userspace */
59646+
59647+ num_sprole_pws = arg->num_sprole_pws;
59648+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
59649+
59650+ if (!acl_special_roles && num_sprole_pws)
59651+ return -ENOMEM;
59652+
59653+ for (i = 0; i < num_sprole_pws; i++) {
59654+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
59655+ if (!sptmp)
59656+ return -ENOMEM;
59657+ if (copy_from_user(sptmp, arg->sprole_pws + i,
59658+ sizeof (struct sprole_pw)))
59659+ return -EFAULT;
59660+
59661+ len = strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
59662+
59663+ if (!len || len >= GR_SPROLE_LEN)
59664+ return -EINVAL;
59665+
59666+ if ((tmp = (char *) acl_alloc(len)) == NULL)
59667+ return -ENOMEM;
59668+
59669+ if (copy_from_user(tmp, sptmp->rolename, len))
59670+ return -EFAULT;
59671+
59672+ tmp[len-1] = '\0';
59673+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59674+ printk(KERN_ALERT "Copying special role %s\n", tmp);
59675+#endif
59676+ sptmp->rolename = tmp;
59677+ acl_special_roles[i] = sptmp;
59678+ }
59679+
59680+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
59681+
59682+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
59683+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
59684+
59685+ if (!r_tmp)
59686+ return -ENOMEM;
59687+
59688+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
59689+ sizeof (struct acl_role_label *)))
59690+ return -EFAULT;
59691+
59692+ if (copy_from_user(r_tmp, r_utmp2,
59693+ sizeof (struct acl_role_label)))
59694+ return -EFAULT;
59695+
59696+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
59697+
59698+ if (!len || len >= PATH_MAX)
59699+ return -EINVAL;
59700+
59701+ if ((tmp = (char *) acl_alloc(len)) == NULL)
59702+ return -ENOMEM;
59703+
59704+ if (copy_from_user(tmp, r_tmp->rolename, len))
59705+ return -EFAULT;
59706+
59707+ tmp[len-1] = '\0';
59708+ r_tmp->rolename = tmp;
59709+
59710+ if (!strcmp(r_tmp->rolename, "default")
59711+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
59712+ default_role = r_tmp;
59713+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
59714+ kernel_role = r_tmp;
59715+ }
59716+
59717+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
59718+ return -ENOMEM;
59719+
59720+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct)))
59721+ return -EFAULT;
59722+
59723+ r_tmp->hash = ghash;
59724+
59725+ num_subjs = count_user_subjs(r_tmp->hash->first);
59726+
59727+ r_tmp->subj_hash_size = num_subjs;
59728+ r_tmp->subj_hash =
59729+ (struct acl_subject_label **)
59730+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
59731+
59732+ if (!r_tmp->subj_hash)
59733+ return -ENOMEM;
59734+
59735+ err = copy_user_allowedips(r_tmp);
59736+ if (err)
59737+ return err;
59738+
59739+ /* copy domain info */
59740+ if (r_tmp->domain_children != NULL) {
59741+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
59742+ if (domainlist == NULL)
59743+ return -ENOMEM;
59744+
59745+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
59746+ return -EFAULT;
59747+
59748+ r_tmp->domain_children = domainlist;
59749+ }
59750+
59751+ err = copy_user_transitions(r_tmp);
59752+ if (err)
59753+ return err;
59754+
59755+ memset(r_tmp->subj_hash, 0,
59756+ r_tmp->subj_hash_size *
59757+ sizeof (struct acl_subject_label *));
59758+
59759+ /* acquire the list of subjects, then NULL out
59760+ the list prior to parsing the subjects for this role,
59761+ as during this parsing the list is replaced with a list
59762+ of *nested* subjects for the role
59763+ */
59764+ subj_list = r_tmp->hash->first;
59765+
59766+ /* set nested subject list to null */
59767+ r_tmp->hash->first = NULL;
59768+
59769+ err = copy_user_subjs(subj_list, r_tmp);
59770+
59771+ if (err)
59772+ return err;
59773+
59774+ insert_acl_role_label(r_tmp);
59775+ }
59776+
59777+ if (default_role == NULL || kernel_role == NULL)
59778+ return -EINVAL;
59779+
59780+ return err;
59781+}
59782+
59783+static int
59784+gracl_init(struct gr_arg *args)
59785+{
59786+ int error = 0;
59787+
59788+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
59789+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
59790+
59791+ if (init_variables(args)) {
59792+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
59793+ error = -ENOMEM;
59794+ free_variables();
59795+ goto out;
59796+ }
59797+
59798+ error = copy_user_acl(args);
59799+ free_init_variables();
59800+ if (error) {
59801+ free_variables();
59802+ goto out;
59803+ }
59804+
59805+ if ((error = gr_set_acls(0))) {
59806+ free_variables();
59807+ goto out;
59808+ }
59809+
59810+ pax_open_kernel();
59811+ gr_status |= GR_READY;
59812+ pax_close_kernel();
59813+
59814+ out:
59815+ return error;
59816+}
59817+
59818+/* derived from glibc fnmatch() 0: match, 1: no match*/
59819+
59820+static int
59821+glob_match(const char *p, const char *n)
59822+{
59823+ char c;
59824+
59825+ while ((c = *p++) != '\0') {
59826+ switch (c) {
59827+ case '?':
59828+ if (*n == '\0')
59829+ return 1;
59830+ else if (*n == '/')
59831+ return 1;
59832+ break;
59833+ case '\\':
59834+ if (*n != c)
59835+ return 1;
59836+ break;
59837+ case '*':
59838+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
59839+ if (*n == '/')
59840+ return 1;
59841+ else if (c == '?') {
59842+ if (*n == '\0')
59843+ return 1;
59844+ else
59845+ ++n;
59846+ }
59847+ }
59848+ if (c == '\0') {
59849+ return 0;
59850+ } else {
59851+ const char *endp;
59852+
59853+ if ((endp = strchr(n, '/')) == NULL)
59854+ endp = n + strlen(n);
59855+
59856+ if (c == '[') {
59857+ for (--p; n < endp; ++n)
59858+ if (!glob_match(p, n))
59859+ return 0;
59860+ } else if (c == '/') {
59861+ while (*n != '\0' && *n != '/')
59862+ ++n;
59863+ if (*n == '/' && !glob_match(p, n + 1))
59864+ return 0;
59865+ } else {
59866+ for (--p; n < endp; ++n)
59867+ if (*n == c && !glob_match(p, n))
59868+ return 0;
59869+ }
59870+
59871+ return 1;
59872+ }
59873+ case '[':
59874+ {
59875+ int not;
59876+ char cold;
59877+
59878+ if (*n == '\0' || *n == '/')
59879+ return 1;
59880+
59881+ not = (*p == '!' || *p == '^');
59882+ if (not)
59883+ ++p;
59884+
59885+ c = *p++;
59886+ for (;;) {
59887+ unsigned char fn = (unsigned char)*n;
59888+
59889+ if (c == '\0')
59890+ return 1;
59891+ else {
59892+ if (c == fn)
59893+ goto matched;
59894+ cold = c;
59895+ c = *p++;
59896+
59897+ if (c == '-' && *p != ']') {
59898+ unsigned char cend = *p++;
59899+
59900+ if (cend == '\0')
59901+ return 1;
59902+
59903+ if (cold <= fn && fn <= cend)
59904+ goto matched;
59905+
59906+ c = *p++;
59907+ }
59908+ }
59909+
59910+ if (c == ']')
59911+ break;
59912+ }
59913+ if (!not)
59914+ return 1;
59915+ break;
59916+ matched:
59917+ while (c != ']') {
59918+ if (c == '\0')
59919+ return 1;
59920+
59921+ c = *p++;
59922+ }
59923+ if (not)
59924+ return 1;
59925+ }
59926+ break;
59927+ default:
59928+ if (c != *n)
59929+ return 1;
59930+ }
59931+
59932+ ++n;
59933+ }
59934+
59935+ if (*n == '\0')
59936+ return 0;
59937+
59938+ if (*n == '/')
59939+ return 0;
59940+
59941+ return 1;
59942+}
59943+
59944+static struct acl_object_label *
59945+chk_glob_label(struct acl_object_label *globbed,
59946+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
59947+{
59948+ struct acl_object_label *tmp;
59949+
59950+ if (*path == NULL)
59951+ *path = gr_to_filename_nolock(dentry, mnt);
59952+
59953+ tmp = globbed;
59954+
59955+ while (tmp) {
59956+ if (!glob_match(tmp->filename, *path))
59957+ return tmp;
59958+ tmp = tmp->next;
59959+ }
59960+
59961+ return NULL;
59962+}
59963+
59964+static struct acl_object_label *
59965+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
59966+ const ino_t curr_ino, const dev_t curr_dev,
59967+ const struct acl_subject_label *subj, char **path, const int checkglob)
59968+{
59969+ struct acl_subject_label *tmpsubj;
59970+ struct acl_object_label *retval;
59971+ struct acl_object_label *retval2;
59972+
59973+ tmpsubj = (struct acl_subject_label *) subj;
59974+ read_lock(&gr_inode_lock);
59975+ do {
59976+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
59977+ if (retval) {
59978+ if (checkglob && retval->globbed) {
59979+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
59980+ if (retval2)
59981+ retval = retval2;
59982+ }
59983+ break;
59984+ }
59985+ } while ((tmpsubj = tmpsubj->parent_subject));
59986+ read_unlock(&gr_inode_lock);
59987+
59988+ return retval;
59989+}
59990+
59991+static __inline__ struct acl_object_label *
59992+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
59993+ struct dentry *curr_dentry,
59994+ const struct acl_subject_label *subj, char **path, const int checkglob)
59995+{
59996+ int newglob = checkglob;
59997+ ino_t inode;
59998+ dev_t device;
59999+
60000+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
60001+ as we don't want a / * rule to match instead of the / object
60002+ don't do this for create lookups that call this function though, since they're looking up
60003+ on the parent and thus need globbing checks on all paths
60004+ */
60005+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
60006+ newglob = GR_NO_GLOB;
60007+
60008+ spin_lock(&curr_dentry->d_lock);
60009+ inode = curr_dentry->d_inode->i_ino;
60010+ device = __get_dev(curr_dentry);
60011+ spin_unlock(&curr_dentry->d_lock);
60012+
60013+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
60014+}
60015+
60016+#ifdef CONFIG_HUGETLBFS
60017+static inline bool
60018+is_hugetlbfs_mnt(const struct vfsmount *mnt)
60019+{
60020+ int i;
60021+ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
60022+ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
60023+ return true;
60024+ }
60025+
60026+ return false;
60027+}
60028+#endif
60029+
60030+static struct acl_object_label *
60031+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
60032+ const struct acl_subject_label *subj, char *path, const int checkglob)
60033+{
60034+ struct dentry *dentry = (struct dentry *) l_dentry;
60035+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
60036+ struct mount *real_mnt = real_mount(mnt);
60037+ struct acl_object_label *retval;
60038+ struct dentry *parent;
60039+
60040+ br_read_lock(&vfsmount_lock);
60041+ write_seqlock(&rename_lock);
60042+
60043+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
60044+#ifdef CONFIG_NET
60045+ mnt == sock_mnt ||
60046+#endif
60047+#ifdef CONFIG_HUGETLBFS
60048+ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
60049+#endif
60050+ /* ignore Eric Biederman */
60051+ IS_PRIVATE(l_dentry->d_inode))) {
60052+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
60053+ goto out;
60054+ }
60055+
60056+ for (;;) {
60057+ if (dentry == real_root.dentry && mnt == real_root.mnt)
60058+ break;
60059+
60060+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
60061+ if (!mnt_has_parent(real_mnt))
60062+ break;
60063+
60064+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
60065+ if (retval != NULL)
60066+ goto out;
60067+
60068+ dentry = real_mnt->mnt_mountpoint;
60069+ real_mnt = real_mnt->mnt_parent;
60070+ mnt = &real_mnt->mnt;
60071+ continue;
60072+ }
60073+
60074+ parent = dentry->d_parent;
60075+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
60076+ if (retval != NULL)
60077+ goto out;
60078+
60079+ dentry = parent;
60080+ }
60081+
60082+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
60083+
60084+ /* real_root is pinned so we don't have to hold a reference */
60085+ if (retval == NULL)
60086+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
60087+out:
60088+ write_sequnlock(&rename_lock);
60089+ br_read_unlock(&vfsmount_lock);
60090+
60091+ BUG_ON(retval == NULL);
60092+
60093+ return retval;
60094+}
60095+
60096+static __inline__ struct acl_object_label *
60097+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
60098+ const struct acl_subject_label *subj)
60099+{
60100+ char *path = NULL;
60101+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
60102+}
60103+
60104+static __inline__ struct acl_object_label *
60105+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
60106+ const struct acl_subject_label *subj)
60107+{
60108+ char *path = NULL;
60109+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
60110+}
60111+
60112+static __inline__ struct acl_object_label *
60113+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
60114+ const struct acl_subject_label *subj, char *path)
60115+{
60116+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
60117+}
60118+
60119+static struct acl_subject_label *
60120+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
60121+ const struct acl_role_label *role)
60122+{
60123+ struct dentry *dentry = (struct dentry *) l_dentry;
60124+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
60125+ struct mount *real_mnt = real_mount(mnt);
60126+ struct acl_subject_label *retval;
60127+ struct dentry *parent;
60128+
60129+ br_read_lock(&vfsmount_lock);
60130+ write_seqlock(&rename_lock);
60131+
60132+ for (;;) {
60133+ if (dentry == real_root.dentry && mnt == real_root.mnt)
60134+ break;
60135+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
60136+ if (!mnt_has_parent(real_mnt))
60137+ break;
60138+
60139+ spin_lock(&dentry->d_lock);
60140+ read_lock(&gr_inode_lock);
60141+ retval =
60142+ lookup_acl_subj_label(dentry->d_inode->i_ino,
60143+ __get_dev(dentry), role);
60144+ read_unlock(&gr_inode_lock);
60145+ spin_unlock(&dentry->d_lock);
60146+ if (retval != NULL)
60147+ goto out;
60148+
60149+ dentry = real_mnt->mnt_mountpoint;
60150+ real_mnt = real_mnt->mnt_parent;
60151+ mnt = &real_mnt->mnt;
60152+ continue;
60153+ }
60154+
60155+ spin_lock(&dentry->d_lock);
60156+ read_lock(&gr_inode_lock);
60157+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
60158+ __get_dev(dentry), role);
60159+ read_unlock(&gr_inode_lock);
60160+ parent = dentry->d_parent;
60161+ spin_unlock(&dentry->d_lock);
60162+
60163+ if (retval != NULL)
60164+ goto out;
60165+
60166+ dentry = parent;
60167+ }
60168+
60169+ spin_lock(&dentry->d_lock);
60170+ read_lock(&gr_inode_lock);
60171+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
60172+ __get_dev(dentry), role);
60173+ read_unlock(&gr_inode_lock);
60174+ spin_unlock(&dentry->d_lock);
60175+
60176+ if (unlikely(retval == NULL)) {
60177+ /* real_root is pinned, we don't need to hold a reference */
60178+ read_lock(&gr_inode_lock);
60179+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
60180+ __get_dev(real_root.dentry), role);
60181+ read_unlock(&gr_inode_lock);
60182+ }
60183+out:
60184+ write_sequnlock(&rename_lock);
60185+ br_read_unlock(&vfsmount_lock);
60186+
60187+ BUG_ON(retval == NULL);
60188+
60189+ return retval;
60190+}
60191+
60192+static void
60193+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
60194+{
60195+ struct task_struct *task = current;
60196+ const struct cred *cred = current_cred();
60197+
60198+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
60199+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
60200+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
60201+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
60202+
60203+ return;
60204+}
60205+
60206+static void
60207+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
60208+{
60209+ struct task_struct *task = current;
60210+ const struct cred *cred = current_cred();
60211+
60212+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
60213+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
60214+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
60215+ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
60216+
60217+ return;
60218+}
60219+
60220+static void
60221+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
60222+{
60223+ struct task_struct *task = current;
60224+ const struct cred *cred = current_cred();
60225+
60226+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
60227+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
60228+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
60229+ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
60230+
60231+ return;
60232+}
60233+
60234+__u32
60235+gr_search_file(const struct dentry * dentry, const __u32 mode,
60236+ const struct vfsmount * mnt)
60237+{
60238+ __u32 retval = mode;
60239+ struct acl_subject_label *curracl;
60240+ struct acl_object_label *currobj;
60241+
60242+ if (unlikely(!(gr_status & GR_READY)))
60243+ return (mode & ~GR_AUDITS);
60244+
60245+ curracl = current->acl;
60246+
60247+ currobj = chk_obj_label(dentry, mnt, curracl);
60248+ retval = currobj->mode & mode;
60249+
60250+ /* if we're opening a specified transfer file for writing
60251+ (e.g. /dev/initctl), then transfer our role to init
60252+ */
60253+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
60254+ current->role->roletype & GR_ROLE_PERSIST)) {
60255+ struct task_struct *task = init_pid_ns.child_reaper;
60256+
60257+ if (task->role != current->role) {
60258+ task->acl_sp_role = 0;
60259+ task->acl_role_id = current->acl_role_id;
60260+ task->role = current->role;
60261+ rcu_read_lock();
60262+ read_lock(&grsec_exec_file_lock);
60263+ gr_apply_subject_to_task(task);
60264+ read_unlock(&grsec_exec_file_lock);
60265+ rcu_read_unlock();
60266+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
60267+ }
60268+ }
60269+
60270+ if (unlikely
60271+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
60272+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
60273+ __u32 new_mode = mode;
60274+
60275+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
60276+
60277+ retval = new_mode;
60278+
60279+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
60280+ new_mode |= GR_INHERIT;
60281+
60282+ if (!(mode & GR_NOLEARN))
60283+ gr_log_learn(dentry, mnt, new_mode);
60284+ }
60285+
60286+ return retval;
60287+}
60288+
60289+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
60290+ const struct dentry *parent,
60291+ const struct vfsmount *mnt)
60292+{
60293+ struct name_entry *match;
60294+ struct acl_object_label *matchpo;
60295+ struct acl_subject_label *curracl;
60296+ char *path;
60297+
60298+ if (unlikely(!(gr_status & GR_READY)))
60299+ return NULL;
60300+
60301+ preempt_disable();
60302+ path = gr_to_filename_rbac(new_dentry, mnt);
60303+ match = lookup_name_entry_create(path);
60304+
60305+ curracl = current->acl;
60306+
60307+ if (match) {
60308+ read_lock(&gr_inode_lock);
60309+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
60310+ read_unlock(&gr_inode_lock);
60311+
60312+ if (matchpo) {
60313+ preempt_enable();
60314+ return matchpo;
60315+ }
60316+ }
60317+
60318+ // lookup parent
60319+
60320+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
60321+
60322+ preempt_enable();
60323+ return matchpo;
60324+}
60325+
60326+__u32
60327+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
60328+ const struct vfsmount * mnt, const __u32 mode)
60329+{
60330+ struct acl_object_label *matchpo;
60331+ __u32 retval;
60332+
60333+ if (unlikely(!(gr_status & GR_READY)))
60334+ return (mode & ~GR_AUDITS);
60335+
60336+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
60337+
60338+ retval = matchpo->mode & mode;
60339+
60340+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
60341+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
60342+ __u32 new_mode = mode;
60343+
60344+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
60345+
60346+ gr_log_learn(new_dentry, mnt, new_mode);
60347+ return new_mode;
60348+ }
60349+
60350+ return retval;
60351+}
60352+
60353+__u32
60354+gr_check_link(const struct dentry * new_dentry,
60355+ const struct dentry * parent_dentry,
60356+ const struct vfsmount * parent_mnt,
60357+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
60358+{
60359+ struct acl_object_label *obj;
60360+ __u32 oldmode, newmode;
60361+ __u32 needmode;
60362+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
60363+ GR_DELETE | GR_INHERIT;
60364+
60365+ if (unlikely(!(gr_status & GR_READY)))
60366+ return (GR_CREATE | GR_LINK);
60367+
60368+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
60369+ oldmode = obj->mode;
60370+
60371+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
60372+ newmode = obj->mode;
60373+
60374+ needmode = newmode & checkmodes;
60375+
60376+ // old name for hardlink must have at least the permissions of the new name
60377+ if ((oldmode & needmode) != needmode)
60378+ goto bad;
60379+
60380+ // if old name had restrictions/auditing, make sure the new name does as well
60381+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
60382+
60383+ // don't allow hardlinking of suid/sgid/fcapped files without permission
60384+ if (is_privileged_binary(old_dentry))
60385+ needmode |= GR_SETID;
60386+
60387+ if ((newmode & needmode) != needmode)
60388+ goto bad;
60389+
60390+ // enforce minimum permissions
60391+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
60392+ return newmode;
60393+bad:
60394+ needmode = oldmode;
60395+ if (is_privileged_binary(old_dentry))
60396+ needmode |= GR_SETID;
60397+
60398+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
60399+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
60400+ return (GR_CREATE | GR_LINK);
60401+ } else if (newmode & GR_SUPPRESS)
60402+ return GR_SUPPRESS;
60403+ else
60404+ return 0;
60405+}
60406+
60407+int
60408+gr_check_hidden_task(const struct task_struct *task)
60409+{
60410+ if (unlikely(!(gr_status & GR_READY)))
60411+ return 0;
60412+
60413+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
60414+ return 1;
60415+
60416+ return 0;
60417+}
60418+
60419+int
60420+gr_check_protected_task(const struct task_struct *task)
60421+{
60422+ if (unlikely(!(gr_status & GR_READY) || !task))
60423+ return 0;
60424+
60425+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
60426+ task->acl != current->acl)
60427+ return 1;
60428+
60429+ return 0;
60430+}
60431+
60432+int
60433+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
60434+{
60435+ struct task_struct *p;
60436+ int ret = 0;
60437+
60438+ if (unlikely(!(gr_status & GR_READY) || !pid))
60439+ return ret;
60440+
60441+ read_lock(&tasklist_lock);
60442+ do_each_pid_task(pid, type, p) {
60443+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
60444+ p->acl != current->acl) {
60445+ ret = 1;
60446+ goto out;
60447+ }
60448+ } while_each_pid_task(pid, type, p);
60449+out:
60450+ read_unlock(&tasklist_lock);
60451+
60452+ return ret;
60453+}
60454+
60455+void
60456+gr_copy_label(struct task_struct *tsk)
60457+{
60458+ tsk->signal->used_accept = 0;
60459+ tsk->acl_sp_role = 0;
60460+ tsk->acl_role_id = current->acl_role_id;
60461+ tsk->acl = current->acl;
60462+ tsk->role = current->role;
60463+ tsk->signal->curr_ip = current->signal->curr_ip;
60464+ tsk->signal->saved_ip = current->signal->saved_ip;
60465+ if (current->exec_file)
60466+ get_file(current->exec_file);
60467+ tsk->exec_file = current->exec_file;
60468+ tsk->is_writable = current->is_writable;
60469+ if (unlikely(current->signal->used_accept)) {
60470+ current->signal->curr_ip = 0;
60471+ current->signal->saved_ip = 0;
60472+ }
60473+
60474+ return;
60475+}
60476+
60477+static void
60478+gr_set_proc_res(struct task_struct *task)
60479+{
60480+ struct acl_subject_label *proc;
60481+ unsigned short i;
60482+
60483+ proc = task->acl;
60484+
60485+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
60486+ return;
60487+
60488+ for (i = 0; i < RLIM_NLIMITS; i++) {
60489+ if (!(proc->resmask & (1U << i)))
60490+ continue;
60491+
60492+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
60493+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
60494+
60495+ if (i == RLIMIT_CPU)
60496+ update_rlimit_cpu(task, proc->res[i].rlim_cur);
60497+ }
60498+
60499+ return;
60500+}
60501+
60502+extern int __gr_process_user_ban(struct user_struct *user);
60503+
60504+int
60505+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
60506+{
60507+ unsigned int i;
60508+ __u16 num;
60509+ uid_t *uidlist;
60510+ uid_t curuid;
60511+ int realok = 0;
60512+ int effectiveok = 0;
60513+ int fsok = 0;
60514+ uid_t globalreal, globaleffective, globalfs;
60515+
60516+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
60517+ struct user_struct *user;
60518+
60519+ if (!uid_valid(real))
60520+ goto skipit;
60521+
60522+ /* find user based on global namespace */
60523+
60524+ globalreal = GR_GLOBAL_UID(real);
60525+
60526+ user = find_user(make_kuid(&init_user_ns, globalreal));
60527+ if (user == NULL)
60528+ goto skipit;
60529+
60530+ if (__gr_process_user_ban(user)) {
60531+ /* for find_user */
60532+ free_uid(user);
60533+ return 1;
60534+ }
60535+
60536+ /* for find_user */
60537+ free_uid(user);
60538+
60539+skipit:
60540+#endif
60541+
60542+ if (unlikely(!(gr_status & GR_READY)))
60543+ return 0;
60544+
60545+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
60546+ gr_log_learn_uid_change(real, effective, fs);
60547+
60548+ num = current->acl->user_trans_num;
60549+ uidlist = current->acl->user_transitions;
60550+
60551+ if (uidlist == NULL)
60552+ return 0;
60553+
60554+ if (!uid_valid(real)) {
60555+ realok = 1;
60556+ globalreal = (uid_t)-1;
60557+ } else {
60558+ globalreal = GR_GLOBAL_UID(real);
60559+ }
60560+ if (!uid_valid(effective)) {
60561+ effectiveok = 1;
60562+ globaleffective = (uid_t)-1;
60563+ } else {
60564+ globaleffective = GR_GLOBAL_UID(effective);
60565+ }
60566+ if (!uid_valid(fs)) {
60567+ fsok = 1;
60568+ globalfs = (uid_t)-1;
60569+ } else {
60570+ globalfs = GR_GLOBAL_UID(fs);
60571+ }
60572+
60573+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
60574+ for (i = 0; i < num; i++) {
60575+ curuid = uidlist[i];
60576+ if (globalreal == curuid)
60577+ realok = 1;
60578+ if (globaleffective == curuid)
60579+ effectiveok = 1;
60580+ if (globalfs == curuid)
60581+ fsok = 1;
60582+ }
60583+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
60584+ for (i = 0; i < num; i++) {
60585+ curuid = uidlist[i];
60586+ if (globalreal == curuid)
60587+ break;
60588+ if (globaleffective == curuid)
60589+ break;
60590+ if (globalfs == curuid)
60591+ break;
60592+ }
60593+ /* not in deny list */
60594+ if (i == num) {
60595+ realok = 1;
60596+ effectiveok = 1;
60597+ fsok = 1;
60598+ }
60599+ }
60600+
60601+ if (realok && effectiveok && fsok)
60602+ return 0;
60603+ else {
60604+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
60605+ return 1;
60606+ }
60607+}
60608+
60609+int
60610+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
60611+{
60612+ unsigned int i;
60613+ __u16 num;
60614+ gid_t *gidlist;
60615+ gid_t curgid;
60616+ int realok = 0;
60617+ int effectiveok = 0;
60618+ int fsok = 0;
60619+ gid_t globalreal, globaleffective, globalfs;
60620+
60621+ if (unlikely(!(gr_status & GR_READY)))
60622+ return 0;
60623+
60624+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
60625+ gr_log_learn_gid_change(real, effective, fs);
60626+
60627+ num = current->acl->group_trans_num;
60628+ gidlist = current->acl->group_transitions;
60629+
60630+ if (gidlist == NULL)
60631+ return 0;
60632+
60633+ if (!gid_valid(real)) {
60634+ realok = 1;
60635+ globalreal = (gid_t)-1;
60636+ } else {
60637+ globalreal = GR_GLOBAL_GID(real);
60638+ }
60639+ if (!gid_valid(effective)) {
60640+ effectiveok = 1;
60641+ globaleffective = (gid_t)-1;
60642+ } else {
60643+ globaleffective = GR_GLOBAL_GID(effective);
60644+ }
60645+ if (!gid_valid(fs)) {
60646+ fsok = 1;
60647+ globalfs = (gid_t)-1;
60648+ } else {
60649+ globalfs = GR_GLOBAL_GID(fs);
60650+ }
60651+
60652+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
60653+ for (i = 0; i < num; i++) {
60654+ curgid = gidlist[i];
60655+ if (globalreal == curgid)
60656+ realok = 1;
60657+ if (globaleffective == curgid)
60658+ effectiveok = 1;
60659+ if (globalfs == curgid)
60660+ fsok = 1;
60661+ }
60662+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
60663+ for (i = 0; i < num; i++) {
60664+ curgid = gidlist[i];
60665+ if (globalreal == curgid)
60666+ break;
60667+ if (globaleffective == curgid)
60668+ break;
60669+ if (globalfs == curgid)
60670+ break;
60671+ }
60672+ /* not in deny list */
60673+ if (i == num) {
60674+ realok = 1;
60675+ effectiveok = 1;
60676+ fsok = 1;
60677+ }
60678+ }
60679+
60680+ if (realok && effectiveok && fsok)
60681+ return 0;
60682+ else {
60683+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
60684+ return 1;
60685+ }
60686+}
60687+
60688+extern int gr_acl_is_capable(const int cap);
60689+
60690+void
60691+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
60692+{
60693+ struct acl_role_label *role = task->role;
60694+ struct acl_subject_label *subj = NULL;
60695+ struct acl_object_label *obj;
60696+ struct file *filp;
60697+ uid_t uid;
60698+ gid_t gid;
60699+
60700+ if (unlikely(!(gr_status & GR_READY)))
60701+ return;
60702+
60703+ uid = GR_GLOBAL_UID(kuid);
60704+ gid = GR_GLOBAL_GID(kgid);
60705+
60706+ filp = task->exec_file;
60707+
60708+ /* kernel process, we'll give them the kernel role */
60709+ if (unlikely(!filp)) {
60710+ task->role = kernel_role;
60711+ task->acl = kernel_role->root_label;
60712+ return;
60713+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
60714+ role = lookup_acl_role_label(task, uid, gid);
60715+
60716+ /* don't change the role if we're not a privileged process */
60717+ if (role && task->role != role &&
60718+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
60719+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
60720+ return;
60721+
60722+ /* perform subject lookup in possibly new role
60723+ we can use this result below in the case where role == task->role
60724+ */
60725+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
60726+
60727+ /* if we changed uid/gid, but result in the same role
60728+ and are using inheritance, don't lose the inherited subject
60729+ if current subject is other than what normal lookup
60730+ would result in, we arrived via inheritance, don't
60731+ lose subject
60732+ */
60733+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
60734+ (subj == task->acl)))
60735+ task->acl = subj;
60736+
60737+ task->role = role;
60738+
60739+ task->is_writable = 0;
60740+
60741+ /* ignore additional mmap checks for processes that are writable
60742+ by the default ACL */
60743+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
60744+ if (unlikely(obj->mode & GR_WRITE))
60745+ task->is_writable = 1;
60746+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
60747+ if (unlikely(obj->mode & GR_WRITE))
60748+ task->is_writable = 1;
60749+
60750+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
60751+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
60752+#endif
60753+
60754+ gr_set_proc_res(task);
60755+
60756+ return;
60757+}
60758+
60759+int
60760+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
60761+ const int unsafe_flags)
60762+{
60763+ struct task_struct *task = current;
60764+ struct acl_subject_label *newacl;
60765+ struct acl_object_label *obj;
60766+ __u32 retmode;
60767+
60768+ if (unlikely(!(gr_status & GR_READY)))
60769+ return 0;
60770+
60771+ newacl = chk_subj_label(dentry, mnt, task->role);
60772+
60773+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
60774+ did an exec
60775+ */
60776+ rcu_read_lock();
60777+ read_lock(&tasklist_lock);
60778+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
60779+ (task->parent->acl->mode & GR_POVERRIDE))) {
60780+ read_unlock(&tasklist_lock);
60781+ rcu_read_unlock();
60782+ goto skip_check;
60783+ }
60784+ read_unlock(&tasklist_lock);
60785+ rcu_read_unlock();
60786+
60787+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
60788+ !(task->role->roletype & GR_ROLE_GOD) &&
60789+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
60790+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
60791+ if (unsafe_flags & LSM_UNSAFE_SHARE)
60792+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
60793+ else
60794+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
60795+ return -EACCES;
60796+ }
60797+
60798+skip_check:
60799+
60800+ obj = chk_obj_label(dentry, mnt, task->acl);
60801+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
60802+
60803+ if (!(task->acl->mode & GR_INHERITLEARN) &&
60804+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
60805+ if (obj->nested)
60806+ task->acl = obj->nested;
60807+ else
60808+ task->acl = newacl;
60809+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
60810+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
60811+
60812+ task->is_writable = 0;
60813+
60814+ /* ignore additional mmap checks for processes that are writable
60815+ by the default ACL */
60816+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
60817+ if (unlikely(obj->mode & GR_WRITE))
60818+ task->is_writable = 1;
60819+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
60820+ if (unlikely(obj->mode & GR_WRITE))
60821+ task->is_writable = 1;
60822+
60823+ gr_set_proc_res(task);
60824+
60825+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
60826+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
60827+#endif
60828+ return 0;
60829+}
60830+
60831+/* always called with valid inodev ptr */
60832+static void
60833+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
60834+{
60835+ struct acl_object_label *matchpo;
60836+ struct acl_subject_label *matchps;
60837+ struct acl_subject_label *subj;
60838+ struct acl_role_label *role;
60839+ unsigned int x;
60840+
60841+ FOR_EACH_ROLE_START(role)
60842+ FOR_EACH_SUBJECT_START(role, subj, x)
60843+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
60844+ matchpo->mode |= GR_DELETED;
60845+ FOR_EACH_SUBJECT_END(subj,x)
60846+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
60847+ /* nested subjects aren't in the role's subj_hash table */
60848+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
60849+ matchpo->mode |= GR_DELETED;
60850+ FOR_EACH_NESTED_SUBJECT_END(subj)
60851+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
60852+ matchps->mode |= GR_DELETED;
60853+ FOR_EACH_ROLE_END(role)
60854+
60855+ inodev->nentry->deleted = 1;
60856+
60857+ return;
60858+}
60859+
60860+void
60861+gr_handle_delete(const ino_t ino, const dev_t dev)
60862+{
60863+ struct inodev_entry *inodev;
60864+
60865+ if (unlikely(!(gr_status & GR_READY)))
60866+ return;
60867+
60868+ write_lock(&gr_inode_lock);
60869+ inodev = lookup_inodev_entry(ino, dev);
60870+ if (inodev != NULL)
60871+ do_handle_delete(inodev, ino, dev);
60872+ write_unlock(&gr_inode_lock);
60873+
60874+ return;
60875+}
60876+
60877+static void
60878+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
60879+ const ino_t newinode, const dev_t newdevice,
60880+ struct acl_subject_label *subj)
60881+{
60882+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
60883+ struct acl_object_label *match;
60884+
60885+ match = subj->obj_hash[index];
60886+
60887+ while (match && (match->inode != oldinode ||
60888+ match->device != olddevice ||
60889+ !(match->mode & GR_DELETED)))
60890+ match = match->next;
60891+
60892+ if (match && (match->inode == oldinode)
60893+ && (match->device == olddevice)
60894+ && (match->mode & GR_DELETED)) {
60895+ if (match->prev == NULL) {
60896+ subj->obj_hash[index] = match->next;
60897+ if (match->next != NULL)
60898+ match->next->prev = NULL;
60899+ } else {
60900+ match->prev->next = match->next;
60901+ if (match->next != NULL)
60902+ match->next->prev = match->prev;
60903+ }
60904+ match->prev = NULL;
60905+ match->next = NULL;
60906+ match->inode = newinode;
60907+ match->device = newdevice;
60908+ match->mode &= ~GR_DELETED;
60909+
60910+ insert_acl_obj_label(match, subj);
60911+ }
60912+
60913+ return;
60914+}
60915+
60916+static void
60917+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
60918+ const ino_t newinode, const dev_t newdevice,
60919+ struct acl_role_label *role)
60920+{
60921+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
60922+ struct acl_subject_label *match;
60923+
60924+ match = role->subj_hash[index];
60925+
60926+ while (match && (match->inode != oldinode ||
60927+ match->device != olddevice ||
60928+ !(match->mode & GR_DELETED)))
60929+ match = match->next;
60930+
60931+ if (match && (match->inode == oldinode)
60932+ && (match->device == olddevice)
60933+ && (match->mode & GR_DELETED)) {
60934+ if (match->prev == NULL) {
60935+ role->subj_hash[index] = match->next;
60936+ if (match->next != NULL)
60937+ match->next->prev = NULL;
60938+ } else {
60939+ match->prev->next = match->next;
60940+ if (match->next != NULL)
60941+ match->next->prev = match->prev;
60942+ }
60943+ match->prev = NULL;
60944+ match->next = NULL;
60945+ match->inode = newinode;
60946+ match->device = newdevice;
60947+ match->mode &= ~GR_DELETED;
60948+
60949+ insert_acl_subj_label(match, role);
60950+ }
60951+
60952+ return;
60953+}
60954+
60955+static void
60956+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
60957+ const ino_t newinode, const dev_t newdevice)
60958+{
60959+ unsigned int index = gr_fhash(oldinode, olddevice, inodev_set.i_size);
60960+ struct inodev_entry *match;
60961+
60962+ match = inodev_set.i_hash[index];
60963+
60964+ while (match && (match->nentry->inode != oldinode ||
60965+ match->nentry->device != olddevice || !match->nentry->deleted))
60966+ match = match->next;
60967+
60968+ if (match && (match->nentry->inode == oldinode)
60969+ && (match->nentry->device == olddevice) &&
60970+ match->nentry->deleted) {
60971+ if (match->prev == NULL) {
60972+ inodev_set.i_hash[index] = match->next;
60973+ if (match->next != NULL)
60974+ match->next->prev = NULL;
60975+ } else {
60976+ match->prev->next = match->next;
60977+ if (match->next != NULL)
60978+ match->next->prev = match->prev;
60979+ }
60980+ match->prev = NULL;
60981+ match->next = NULL;
60982+ match->nentry->inode = newinode;
60983+ match->nentry->device = newdevice;
60984+ match->nentry->deleted = 0;
60985+
60986+ insert_inodev_entry(match);
60987+ }
60988+
60989+ return;
60990+}
60991+
60992+static void
60993+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
60994+{
60995+ struct acl_subject_label *subj;
60996+ struct acl_role_label *role;
60997+ unsigned int x;
60998+
60999+ FOR_EACH_ROLE_START(role)
61000+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
61001+
61002+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
61003+ if ((subj->inode == ino) && (subj->device == dev)) {
61004+ subj->inode = ino;
61005+ subj->device = dev;
61006+ }
61007+ /* nested subjects aren't in the role's subj_hash table */
61008+ update_acl_obj_label(matchn->inode, matchn->device,
61009+ ino, dev, subj);
61010+ FOR_EACH_NESTED_SUBJECT_END(subj)
61011+ FOR_EACH_SUBJECT_START(role, subj, x)
61012+ update_acl_obj_label(matchn->inode, matchn->device,
61013+ ino, dev, subj);
61014+ FOR_EACH_SUBJECT_END(subj,x)
61015+ FOR_EACH_ROLE_END(role)
61016+
61017+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
61018+
61019+ return;
61020+}
61021+
61022+static void
61023+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
61024+ const struct vfsmount *mnt)
61025+{
61026+ ino_t ino = dentry->d_inode->i_ino;
61027+ dev_t dev = __get_dev(dentry);
61028+
61029+ __do_handle_create(matchn, ino, dev);
61030+
61031+ return;
61032+}
61033+
61034+void
61035+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
61036+{
61037+ struct name_entry *matchn;
61038+
61039+ if (unlikely(!(gr_status & GR_READY)))
61040+ return;
61041+
61042+ preempt_disable();
61043+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
61044+
61045+ if (unlikely((unsigned long)matchn)) {
61046+ write_lock(&gr_inode_lock);
61047+ do_handle_create(matchn, dentry, mnt);
61048+ write_unlock(&gr_inode_lock);
61049+ }
61050+ preempt_enable();
61051+
61052+ return;
61053+}
61054+
61055+void
61056+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
61057+{
61058+ struct name_entry *matchn;
61059+
61060+ if (unlikely(!(gr_status & GR_READY)))
61061+ return;
61062+
61063+ preempt_disable();
61064+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
61065+
61066+ if (unlikely((unsigned long)matchn)) {
61067+ write_lock(&gr_inode_lock);
61068+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
61069+ write_unlock(&gr_inode_lock);
61070+ }
61071+ preempt_enable();
61072+
61073+ return;
61074+}
61075+
61076+void
61077+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
61078+ struct dentry *old_dentry,
61079+ struct dentry *new_dentry,
61080+ struct vfsmount *mnt, const __u8 replace)
61081+{
61082+ struct name_entry *matchn;
61083+ struct inodev_entry *inodev;
61084+ struct inode *inode = new_dentry->d_inode;
61085+ ino_t old_ino = old_dentry->d_inode->i_ino;
61086+ dev_t old_dev = __get_dev(old_dentry);
61087+
61088+ /* vfs_rename swaps the name and parent link for old_dentry and
61089+ new_dentry
61090+ at this point, old_dentry has the new name, parent link, and inode
61091+ for the renamed file
61092+ if a file is being replaced by a rename, new_dentry has the inode
61093+ and name for the replaced file
61094+ */
61095+
61096+ if (unlikely(!(gr_status & GR_READY)))
61097+ return;
61098+
61099+ preempt_disable();
61100+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
61101+
61102+ /* we wouldn't have to check d_inode if it weren't for
61103+ NFS silly-renaming
61104+ */
61105+
61106+ write_lock(&gr_inode_lock);
61107+ if (unlikely(replace && inode)) {
61108+ ino_t new_ino = inode->i_ino;
61109+ dev_t new_dev = __get_dev(new_dentry);
61110+
61111+ inodev = lookup_inodev_entry(new_ino, new_dev);
61112+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
61113+ do_handle_delete(inodev, new_ino, new_dev);
61114+ }
61115+
61116+ inodev = lookup_inodev_entry(old_ino, old_dev);
61117+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
61118+ do_handle_delete(inodev, old_ino, old_dev);
61119+
61120+ if (unlikely((unsigned long)matchn))
61121+ do_handle_create(matchn, old_dentry, mnt);
61122+
61123+ write_unlock(&gr_inode_lock);
61124+ preempt_enable();
61125+
61126+ return;
61127+}
61128+
61129+static int
61130+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
61131+ unsigned char **sum)
61132+{
61133+ struct acl_role_label *r;
61134+ struct role_allowed_ip *ipp;
61135+ struct role_transition *trans;
61136+ unsigned int i;
61137+ int found = 0;
61138+ u32 curr_ip = current->signal->curr_ip;
61139+
61140+ current->signal->saved_ip = curr_ip;
61141+
61142+ /* check transition table */
61143+
61144+ for (trans = current->role->transitions; trans; trans = trans->next) {
61145+ if (!strcmp(rolename, trans->rolename)) {
61146+ found = 1;
61147+ break;
61148+ }
61149+ }
61150+
61151+ if (!found)
61152+ return 0;
61153+
61154+ /* handle special roles that do not require authentication
61155+ and check ip */
61156+
61157+ FOR_EACH_ROLE_START(r)
61158+ if (!strcmp(rolename, r->rolename) &&
61159+ (r->roletype & GR_ROLE_SPECIAL)) {
61160+ found = 0;
61161+ if (r->allowed_ips != NULL) {
61162+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
61163+ if ((ntohl(curr_ip) & ipp->netmask) ==
61164+ (ntohl(ipp->addr) & ipp->netmask))
61165+ found = 1;
61166+ }
61167+ } else
61168+ found = 2;
61169+ if (!found)
61170+ return 0;
61171+
61172+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
61173+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
61174+ *salt = NULL;
61175+ *sum = NULL;
61176+ return 1;
61177+ }
61178+ }
61179+ FOR_EACH_ROLE_END(r)
61180+
61181+ for (i = 0; i < num_sprole_pws; i++) {
61182+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
61183+ *salt = acl_special_roles[i]->salt;
61184+ *sum = acl_special_roles[i]->sum;
61185+ return 1;
61186+ }
61187+ }
61188+
61189+ return 0;
61190+}
61191+
61192+static void
61193+assign_special_role(char *rolename)
61194+{
61195+ struct acl_object_label *obj;
61196+ struct acl_role_label *r;
61197+ struct acl_role_label *assigned = NULL;
61198+ struct task_struct *tsk;
61199+ struct file *filp;
61200+
61201+ FOR_EACH_ROLE_START(r)
61202+ if (!strcmp(rolename, r->rolename) &&
61203+ (r->roletype & GR_ROLE_SPECIAL)) {
61204+ assigned = r;
61205+ break;
61206+ }
61207+ FOR_EACH_ROLE_END(r)
61208+
61209+ if (!assigned)
61210+ return;
61211+
61212+ read_lock(&tasklist_lock);
61213+ read_lock(&grsec_exec_file_lock);
61214+
61215+ tsk = current->real_parent;
61216+ if (tsk == NULL)
61217+ goto out_unlock;
61218+
61219+ filp = tsk->exec_file;
61220+ if (filp == NULL)
61221+ goto out_unlock;
61222+
61223+ tsk->is_writable = 0;
61224+
61225+ tsk->acl_sp_role = 1;
61226+ tsk->acl_role_id = ++acl_sp_role_value;
61227+ tsk->role = assigned;
61228+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
61229+
61230+ /* ignore additional mmap checks for processes that are writable
61231+ by the default ACL */
61232+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
61233+ if (unlikely(obj->mode & GR_WRITE))
61234+ tsk->is_writable = 1;
61235+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
61236+ if (unlikely(obj->mode & GR_WRITE))
61237+ tsk->is_writable = 1;
61238+
61239+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
61240+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
61241+#endif
61242+
61243+out_unlock:
61244+ read_unlock(&grsec_exec_file_lock);
61245+ read_unlock(&tasklist_lock);
61246+ return;
61247+}
61248+
61249+int gr_check_secure_terminal(struct task_struct *task)
61250+{
61251+ struct task_struct *p, *p2, *p3;
61252+ struct files_struct *files;
61253+ struct fdtable *fdt;
61254+ struct file *our_file = NULL, *file;
61255+ int i;
61256+
61257+ if (task->signal->tty == NULL)
61258+ return 1;
61259+
61260+ files = get_files_struct(task);
61261+ if (files != NULL) {
61262+ rcu_read_lock();
61263+ fdt = files_fdtable(files);
61264+ for (i=0; i < fdt->max_fds; i++) {
61265+ file = fcheck_files(files, i);
61266+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
61267+ get_file(file);
61268+ our_file = file;
61269+ }
61270+ }
61271+ rcu_read_unlock();
61272+ put_files_struct(files);
61273+ }
61274+
61275+ if (our_file == NULL)
61276+ return 1;
61277+
61278+ read_lock(&tasklist_lock);
61279+ do_each_thread(p2, p) {
61280+ files = get_files_struct(p);
61281+ if (files == NULL ||
61282+ (p->signal && p->signal->tty == task->signal->tty)) {
61283+ if (files != NULL)
61284+ put_files_struct(files);
61285+ continue;
61286+ }
61287+ rcu_read_lock();
61288+ fdt = files_fdtable(files);
61289+ for (i=0; i < fdt->max_fds; i++) {
61290+ file = fcheck_files(files, i);
61291+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
61292+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
61293+ p3 = task;
61294+ while (task_pid_nr(p3) > 0) {
61295+ if (p3 == p)
61296+ break;
61297+ p3 = p3->real_parent;
61298+ }
61299+ if (p3 == p)
61300+ break;
61301+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
61302+ gr_handle_alertkill(p);
61303+ rcu_read_unlock();
61304+ put_files_struct(files);
61305+ read_unlock(&tasklist_lock);
61306+ fput(our_file);
61307+ return 0;
61308+ }
61309+ }
61310+ rcu_read_unlock();
61311+ put_files_struct(files);
61312+ } while_each_thread(p2, p);
61313+ read_unlock(&tasklist_lock);
61314+
61315+ fput(our_file);
61316+ return 1;
61317+}
61318+
61319+static int gr_rbac_disable(void *unused)
61320+{
61321+ pax_open_kernel();
61322+ gr_status &= ~GR_READY;
61323+ pax_close_kernel();
61324+
61325+ return 0;
61326+}
61327+
61328+ssize_t
61329+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
61330+{
61331+ struct gr_arg_wrapper uwrap;
61332+ unsigned char *sprole_salt = NULL;
61333+ unsigned char *sprole_sum = NULL;
61334+ int error = sizeof (struct gr_arg_wrapper);
61335+ int error2 = 0;
61336+
61337+ mutex_lock(&gr_dev_mutex);
61338+
61339+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
61340+ error = -EPERM;
61341+ goto out;
61342+ }
61343+
61344+ if (count != sizeof (struct gr_arg_wrapper)) {
61345+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
61346+ error = -EINVAL;
61347+ goto out;
61348+ }
61349+
61350+
61351+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
61352+ gr_auth_expires = 0;
61353+ gr_auth_attempts = 0;
61354+ }
61355+
61356+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
61357+ error = -EFAULT;
61358+ goto out;
61359+ }
61360+
61361+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
61362+ error = -EINVAL;
61363+ goto out;
61364+ }
61365+
61366+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
61367+ error = -EFAULT;
61368+ goto out;
61369+ }
61370+
61371+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
61372+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
61373+ time_after(gr_auth_expires, get_seconds())) {
61374+ error = -EBUSY;
61375+ goto out;
61376+ }
61377+
61378+ /* if non-root trying to do anything other than use a special role,
61379+ do not attempt authentication, do not count towards authentication
61380+ locking
61381+ */
61382+
61383+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
61384+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
61385+ gr_is_global_nonroot(current_uid())) {
61386+ error = -EPERM;
61387+ goto out;
61388+ }
61389+
61390+ /* ensure pw and special role name are null terminated */
61391+
61392+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
61393+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
61394+
61395+ /* Okay.
61396+ * We have our enough of the argument structure..(we have yet
61397+ * to copy_from_user the tables themselves) . Copy the tables
61398+ * only if we need them, i.e. for loading operations. */
61399+
61400+ switch (gr_usermode->mode) {
61401+ case GR_STATUS:
61402+ if (gr_status & GR_READY) {
61403+ error = 1;
61404+ if (!gr_check_secure_terminal(current))
61405+ error = 3;
61406+ } else
61407+ error = 2;
61408+ goto out;
61409+ case GR_SHUTDOWN:
61410+ if ((gr_status & GR_READY)
61411+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
61412+ stop_machine(gr_rbac_disable, NULL, NULL);
61413+ free_variables();
61414+ memset(gr_usermode, 0, sizeof (struct gr_arg));
61415+ memset(gr_system_salt, 0, GR_SALT_LEN);
61416+ memset(gr_system_sum, 0, GR_SHA_LEN);
61417+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
61418+ } else if (gr_status & GR_READY) {
61419+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
61420+ error = -EPERM;
61421+ } else {
61422+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
61423+ error = -EAGAIN;
61424+ }
61425+ break;
61426+ case GR_ENABLE:
61427+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
61428+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
61429+ else {
61430+ if (gr_status & GR_READY)
61431+ error = -EAGAIN;
61432+ else
61433+ error = error2;
61434+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
61435+ }
61436+ break;
61437+ case GR_RELOAD:
61438+ if (!(gr_status & GR_READY)) {
61439+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
61440+ error = -EAGAIN;
61441+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
61442+ stop_machine(gr_rbac_disable, NULL, NULL);
61443+ free_variables();
61444+ error2 = gracl_init(gr_usermode);
61445+ if (!error2)
61446+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
61447+ else {
61448+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
61449+ error = error2;
61450+ }
61451+ } else {
61452+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
61453+ error = -EPERM;
61454+ }
61455+ break;
61456+ case GR_SEGVMOD:
61457+ if (unlikely(!(gr_status & GR_READY))) {
61458+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
61459+ error = -EAGAIN;
61460+ break;
61461+ }
61462+
61463+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
61464+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
61465+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
61466+ struct acl_subject_label *segvacl;
61467+ segvacl =
61468+ lookup_acl_subj_label(gr_usermode->segv_inode,
61469+ gr_usermode->segv_device,
61470+ current->role);
61471+ if (segvacl) {
61472+ segvacl->crashes = 0;
61473+ segvacl->expires = 0;
61474+ }
61475+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
61476+ gr_remove_uid(gr_usermode->segv_uid);
61477+ }
61478+ } else {
61479+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
61480+ error = -EPERM;
61481+ }
61482+ break;
61483+ case GR_SPROLE:
61484+ case GR_SPROLEPAM:
61485+ if (unlikely(!(gr_status & GR_READY))) {
61486+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
61487+ error = -EAGAIN;
61488+ break;
61489+ }
61490+
61491+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
61492+ current->role->expires = 0;
61493+ current->role->auth_attempts = 0;
61494+ }
61495+
61496+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
61497+ time_after(current->role->expires, get_seconds())) {
61498+ error = -EBUSY;
61499+ goto out;
61500+ }
61501+
61502+ if (lookup_special_role_auth
61503+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
61504+ && ((!sprole_salt && !sprole_sum)
61505+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
61506+ char *p = "";
61507+ assign_special_role(gr_usermode->sp_role);
61508+ read_lock(&tasklist_lock);
61509+ if (current->real_parent)
61510+ p = current->real_parent->role->rolename;
61511+ read_unlock(&tasklist_lock);
61512+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
61513+ p, acl_sp_role_value);
61514+ } else {
61515+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
61516+ error = -EPERM;
61517+ if(!(current->role->auth_attempts++))
61518+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
61519+
61520+ goto out;
61521+ }
61522+ break;
61523+ case GR_UNSPROLE:
61524+ if (unlikely(!(gr_status & GR_READY))) {
61525+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
61526+ error = -EAGAIN;
61527+ break;
61528+ }
61529+
61530+ if (current->role->roletype & GR_ROLE_SPECIAL) {
61531+ char *p = "";
61532+ int i = 0;
61533+
61534+ read_lock(&tasklist_lock);
61535+ if (current->real_parent) {
61536+ p = current->real_parent->role->rolename;
61537+ i = current->real_parent->acl_role_id;
61538+ }
61539+ read_unlock(&tasklist_lock);
61540+
61541+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
61542+ gr_set_acls(1);
61543+ } else {
61544+ error = -EPERM;
61545+ goto out;
61546+ }
61547+ break;
61548+ default:
61549+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
61550+ error = -EINVAL;
61551+ break;
61552+ }
61553+
61554+ if (error != -EPERM)
61555+ goto out;
61556+
61557+ if(!(gr_auth_attempts++))
61558+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
61559+
61560+ out:
61561+ mutex_unlock(&gr_dev_mutex);
61562+ return error;
61563+}
61564+
61565+/* must be called with
61566+ rcu_read_lock();
61567+ read_lock(&tasklist_lock);
61568+ read_lock(&grsec_exec_file_lock);
61569+*/
61570+int gr_apply_subject_to_task(struct task_struct *task)
61571+{
61572+ struct acl_object_label *obj;
61573+ char *tmpname;
61574+ struct acl_subject_label *tmpsubj;
61575+ struct file *filp;
61576+ struct name_entry *nmatch;
61577+
61578+ filp = task->exec_file;
61579+ if (filp == NULL)
61580+ return 0;
61581+
61582+ /* the following is to apply the correct subject
61583+ on binaries running when the RBAC system
61584+ is enabled, when the binaries have been
61585+ replaced or deleted since their execution
61586+ -----
61587+ when the RBAC system starts, the inode/dev
61588+ from exec_file will be one the RBAC system
61589+ is unaware of. It only knows the inode/dev
61590+ of the present file on disk, or the absence
61591+ of it.
61592+ */
61593+ preempt_disable();
61594+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
61595+
61596+ nmatch = lookup_name_entry(tmpname);
61597+ preempt_enable();
61598+ tmpsubj = NULL;
61599+ if (nmatch) {
61600+ if (nmatch->deleted)
61601+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
61602+ else
61603+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
61604+ if (tmpsubj != NULL)
61605+ task->acl = tmpsubj;
61606+ }
61607+ if (tmpsubj == NULL)
61608+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
61609+ task->role);
61610+ if (task->acl) {
61611+ task->is_writable = 0;
61612+ /* ignore additional mmap checks for processes that are writable
61613+ by the default ACL */
61614+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
61615+ if (unlikely(obj->mode & GR_WRITE))
61616+ task->is_writable = 1;
61617+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
61618+ if (unlikely(obj->mode & GR_WRITE))
61619+ task->is_writable = 1;
61620+
61621+ gr_set_proc_res(task);
61622+
61623+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
61624+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
61625+#endif
61626+ } else {
61627+ return 1;
61628+ }
61629+
61630+ return 0;
61631+}
61632+
61633+int
61634+gr_set_acls(const int type)
61635+{
61636+ struct task_struct *task, *task2;
61637+ struct acl_role_label *role = current->role;
61638+ __u16 acl_role_id = current->acl_role_id;
61639+ const struct cred *cred;
61640+ int ret;
61641+
61642+ rcu_read_lock();
61643+ read_lock(&tasklist_lock);
61644+ read_lock(&grsec_exec_file_lock);
61645+ do_each_thread(task2, task) {
61646+ /* check to see if we're called from the exit handler,
61647+ if so, only replace ACLs that have inherited the admin
61648+ ACL */
61649+
61650+ if (type && (task->role != role ||
61651+ task->acl_role_id != acl_role_id))
61652+ continue;
61653+
61654+ task->acl_role_id = 0;
61655+ task->acl_sp_role = 0;
61656+
61657+ if (task->exec_file) {
61658+ cred = __task_cred(task);
61659+ task->role = lookup_acl_role_label(task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
61660+ ret = gr_apply_subject_to_task(task);
61661+ if (ret) {
61662+ read_unlock(&grsec_exec_file_lock);
61663+ read_unlock(&tasklist_lock);
61664+ rcu_read_unlock();
61665+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
61666+ return ret;
61667+ }
61668+ } else {
61669+ // it's a kernel process
61670+ task->role = kernel_role;
61671+ task->acl = kernel_role->root_label;
61672+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
61673+ task->acl->mode &= ~GR_PROCFIND;
61674+#endif
61675+ }
61676+ } while_each_thread(task2, task);
61677+ read_unlock(&grsec_exec_file_lock);
61678+ read_unlock(&tasklist_lock);
61679+ rcu_read_unlock();
61680+
61681+ return 0;
61682+}
61683+
61684+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
61685+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
61686+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
61687+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
61688+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
61689+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
61690+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
61691+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
61692+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
61693+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
61694+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
61695+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
61696+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
61697+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
61698+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
61699+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
61700+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
61701+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
61702+};
61703+
61704+void
61705+gr_learn_resource(const struct task_struct *task,
61706+ const int res, const unsigned long wanted, const int gt)
61707+{
61708+ struct acl_subject_label *acl;
61709+ const struct cred *cred;
61710+
61711+ if (unlikely((gr_status & GR_READY) &&
61712+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
61713+ goto skip_reslog;
61714+
61715+ gr_log_resource(task, res, wanted, gt);
61716+skip_reslog:
61717+
61718+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
61719+ return;
61720+
61721+ acl = task->acl;
61722+
61723+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
61724+ !(acl->resmask & (1U << (unsigned short) res))))
61725+ return;
61726+
61727+ if (wanted >= acl->res[res].rlim_cur) {
61728+ unsigned long res_add;
61729+
61730+ res_add = wanted + res_learn_bumps[res];
61731+
61732+ acl->res[res].rlim_cur = res_add;
61733+
61734+ if (wanted > acl->res[res].rlim_max)
61735+ acl->res[res].rlim_max = res_add;
61736+
61737+ /* only log the subject filename, since resource logging is supported for
61738+ single-subject learning only */
61739+ rcu_read_lock();
61740+ cred = __task_cred(task);
61741+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
61742+ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
61743+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
61744+ "", (unsigned long) res, &task->signal->saved_ip);
61745+ rcu_read_unlock();
61746+ }
61747+
61748+ return;
61749+}
61750+EXPORT_SYMBOL(gr_learn_resource);
61751+#endif
61752+
61753+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
61754+void
61755+pax_set_initial_flags(struct linux_binprm *bprm)
61756+{
61757+ struct task_struct *task = current;
61758+ struct acl_subject_label *proc;
61759+ unsigned long flags;
61760+
61761+ if (unlikely(!(gr_status & GR_READY)))
61762+ return;
61763+
61764+ flags = pax_get_flags(task);
61765+
61766+ proc = task->acl;
61767+
61768+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
61769+ flags &= ~MF_PAX_PAGEEXEC;
61770+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
61771+ flags &= ~MF_PAX_SEGMEXEC;
61772+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
61773+ flags &= ~MF_PAX_RANDMMAP;
61774+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
61775+ flags &= ~MF_PAX_EMUTRAMP;
61776+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
61777+ flags &= ~MF_PAX_MPROTECT;
61778+
61779+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
61780+ flags |= MF_PAX_PAGEEXEC;
61781+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
61782+ flags |= MF_PAX_SEGMEXEC;
61783+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
61784+ flags |= MF_PAX_RANDMMAP;
61785+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
61786+ flags |= MF_PAX_EMUTRAMP;
61787+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
61788+ flags |= MF_PAX_MPROTECT;
61789+
61790+ pax_set_flags(task, flags);
61791+
61792+ return;
61793+}
61794+#endif
61795+
61796+int
61797+gr_handle_proc_ptrace(struct task_struct *task)
61798+{
61799+ struct file *filp;
61800+ struct task_struct *tmp = task;
61801+ struct task_struct *curtemp = current;
61802+ __u32 retmode;
61803+
61804+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
61805+ if (unlikely(!(gr_status & GR_READY)))
61806+ return 0;
61807+#endif
61808+
61809+ read_lock(&tasklist_lock);
61810+ read_lock(&grsec_exec_file_lock);
61811+ filp = task->exec_file;
61812+
61813+ while (task_pid_nr(tmp) > 0) {
61814+ if (tmp == curtemp)
61815+ break;
61816+ tmp = tmp->real_parent;
61817+ }
61818+
61819+ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
61820+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
61821+ read_unlock(&grsec_exec_file_lock);
61822+ read_unlock(&tasklist_lock);
61823+ return 1;
61824+ }
61825+
61826+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
61827+ if (!(gr_status & GR_READY)) {
61828+ read_unlock(&grsec_exec_file_lock);
61829+ read_unlock(&tasklist_lock);
61830+ return 0;
61831+ }
61832+#endif
61833+
61834+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
61835+ read_unlock(&grsec_exec_file_lock);
61836+ read_unlock(&tasklist_lock);
61837+
61838+ if (retmode & GR_NOPTRACE)
61839+ return 1;
61840+
61841+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
61842+ && (current->acl != task->acl || (current->acl != current->role->root_label
61843+ && task_pid_nr(current) != task_pid_nr(task))))
61844+ return 1;
61845+
61846+ return 0;
61847+}
61848+
61849+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
61850+{
61851+ if (unlikely(!(gr_status & GR_READY)))
61852+ return;
61853+
61854+ if (!(current->role->roletype & GR_ROLE_GOD))
61855+ return;
61856+
61857+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
61858+ p->role->rolename, gr_task_roletype_to_char(p),
61859+ p->acl->filename);
61860+}
61861+
61862+int
61863+gr_handle_ptrace(struct task_struct *task, const long request)
61864+{
61865+ struct task_struct *tmp = task;
61866+ struct task_struct *curtemp = current;
61867+ __u32 retmode;
61868+
61869+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
61870+ if (unlikely(!(gr_status & GR_READY)))
61871+ return 0;
61872+#endif
61873+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
61874+ read_lock(&tasklist_lock);
61875+ while (task_pid_nr(tmp) > 0) {
61876+ if (tmp == curtemp)
61877+ break;
61878+ tmp = tmp->real_parent;
61879+ }
61880+
61881+ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
61882+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
61883+ read_unlock(&tasklist_lock);
61884+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
61885+ return 1;
61886+ }
61887+ read_unlock(&tasklist_lock);
61888+ }
61889+
61890+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
61891+ if (!(gr_status & GR_READY))
61892+ return 0;
61893+#endif
61894+
61895+ read_lock(&grsec_exec_file_lock);
61896+ if (unlikely(!task->exec_file)) {
61897+ read_unlock(&grsec_exec_file_lock);
61898+ return 0;
61899+ }
61900+
61901+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
61902+ read_unlock(&grsec_exec_file_lock);
61903+
61904+ if (retmode & GR_NOPTRACE) {
61905+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
61906+ return 1;
61907+ }
61908+
61909+ if (retmode & GR_PTRACERD) {
61910+ switch (request) {
61911+ case PTRACE_SEIZE:
61912+ case PTRACE_POKETEXT:
61913+ case PTRACE_POKEDATA:
61914+ case PTRACE_POKEUSR:
61915+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
61916+ case PTRACE_SETREGS:
61917+ case PTRACE_SETFPREGS:
61918+#endif
61919+#ifdef CONFIG_X86
61920+ case PTRACE_SETFPXREGS:
61921+#endif
61922+#ifdef CONFIG_ALTIVEC
61923+ case PTRACE_SETVRREGS:
61924+#endif
61925+ return 1;
61926+ default:
61927+ return 0;
61928+ }
61929+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
61930+ !(current->role->roletype & GR_ROLE_GOD) &&
61931+ (current->acl != task->acl)) {
61932+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
61933+ return 1;
61934+ }
61935+
61936+ return 0;
61937+}
61938+
61939+static int is_writable_mmap(const struct file *filp)
61940+{
61941+ struct task_struct *task = current;
61942+ struct acl_object_label *obj, *obj2;
61943+
61944+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
61945+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
61946+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
61947+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
61948+ task->role->root_label);
61949+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
61950+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
61951+ return 1;
61952+ }
61953+ }
61954+ return 0;
61955+}
61956+
61957+int
61958+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
61959+{
61960+ __u32 mode;
61961+
61962+ if (unlikely(!file || !(prot & PROT_EXEC)))
61963+ return 1;
61964+
61965+ if (is_writable_mmap(file))
61966+ return 0;
61967+
61968+ mode =
61969+ gr_search_file(file->f_path.dentry,
61970+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
61971+ file->f_path.mnt);
61972+
61973+ if (!gr_tpe_allow(file))
61974+ return 0;
61975+
61976+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
61977+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
61978+ return 0;
61979+ } else if (unlikely(!(mode & GR_EXEC))) {
61980+ return 0;
61981+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
61982+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
61983+ return 1;
61984+ }
61985+
61986+ return 1;
61987+}
61988+
61989+int
61990+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
61991+{
61992+ __u32 mode;
61993+
61994+ if (unlikely(!file || !(prot & PROT_EXEC)))
61995+ return 1;
61996+
61997+ if (is_writable_mmap(file))
61998+ return 0;
61999+
62000+ mode =
62001+ gr_search_file(file->f_path.dentry,
62002+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
62003+ file->f_path.mnt);
62004+
62005+ if (!gr_tpe_allow(file))
62006+ return 0;
62007+
62008+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
62009+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
62010+ return 0;
62011+ } else if (unlikely(!(mode & GR_EXEC))) {
62012+ return 0;
62013+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
62014+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
62015+ return 1;
62016+ }
62017+
62018+ return 1;
62019+}
62020+
62021+void
62022+gr_acl_handle_psacct(struct task_struct *task, const long code)
62023+{
62024+ unsigned long runtime;
62025+ unsigned long cputime;
62026+ unsigned int wday, cday;
62027+ __u8 whr, chr;
62028+ __u8 wmin, cmin;
62029+ __u8 wsec, csec;
62030+ struct timespec timeval;
62031+
62032+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
62033+ !(task->acl->mode & GR_PROCACCT)))
62034+ return;
62035+
62036+ do_posix_clock_monotonic_gettime(&timeval);
62037+ runtime = timeval.tv_sec - task->start_time.tv_sec;
62038+ wday = runtime / (3600 * 24);
62039+ runtime -= wday * (3600 * 24);
62040+ whr = runtime / 3600;
62041+ runtime -= whr * 3600;
62042+ wmin = runtime / 60;
62043+ runtime -= wmin * 60;
62044+ wsec = runtime;
62045+
62046+ cputime = (task->utime + task->stime) / HZ;
62047+ cday = cputime / (3600 * 24);
62048+ cputime -= cday * (3600 * 24);
62049+ chr = cputime / 3600;
62050+ cputime -= chr * 3600;
62051+ cmin = cputime / 60;
62052+ cputime -= cmin * 60;
62053+ csec = cputime;
62054+
62055+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
62056+
62057+ return;
62058+}
62059+
62060+void gr_set_kernel_label(struct task_struct *task)
62061+{
62062+ if (gr_status & GR_READY) {
62063+ task->role = kernel_role;
62064+ task->acl = kernel_role->root_label;
62065+ }
62066+ return;
62067+}
62068+
62069+#ifdef CONFIG_TASKSTATS
62070+int gr_is_taskstats_denied(int pid)
62071+{
62072+ struct task_struct *task;
62073+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62074+ const struct cred *cred;
62075+#endif
62076+ int ret = 0;
62077+
62078+ /* restrict taskstats viewing to un-chrooted root users
62079+ who have the 'view' subject flag if the RBAC system is enabled
62080+ */
62081+
62082+ rcu_read_lock();
62083+ read_lock(&tasklist_lock);
62084+ task = find_task_by_vpid(pid);
62085+ if (task) {
62086+#ifdef CONFIG_GRKERNSEC_CHROOT
62087+ if (proc_is_chrooted(task))
62088+ ret = -EACCES;
62089+#endif
62090+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62091+ cred = __task_cred(task);
62092+#ifdef CONFIG_GRKERNSEC_PROC_USER
62093+ if (gr_is_global_nonroot(cred->uid))
62094+ ret = -EACCES;
62095+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62096+ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
62097+ ret = -EACCES;
62098+#endif
62099+#endif
62100+ if (gr_status & GR_READY) {
62101+ if (!(task->acl->mode & GR_VIEW))
62102+ ret = -EACCES;
62103+ }
62104+ } else
62105+ ret = -ENOENT;
62106+
62107+ read_unlock(&tasklist_lock);
62108+ rcu_read_unlock();
62109+
62110+ return ret;
62111+}
62112+#endif
62113+
62114+/* AUXV entries are filled via a descendant of search_binary_handler
62115+ after we've already applied the subject for the target
62116+*/
62117+int gr_acl_enable_at_secure(void)
62118+{
62119+ if (unlikely(!(gr_status & GR_READY)))
62120+ return 0;
62121+
62122+ if (current->acl->mode & GR_ATSECURE)
62123+ return 1;
62124+
62125+ return 0;
62126+}
62127+
62128+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
62129+{
62130+ struct task_struct *task = current;
62131+ struct dentry *dentry = file->f_path.dentry;
62132+ struct vfsmount *mnt = file->f_path.mnt;
62133+ struct acl_object_label *obj, *tmp;
62134+ struct acl_subject_label *subj;
62135+ unsigned int bufsize;
62136+ int is_not_root;
62137+ char *path;
62138+ dev_t dev = __get_dev(dentry);
62139+
62140+ if (unlikely(!(gr_status & GR_READY)))
62141+ return 1;
62142+
62143+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
62144+ return 1;
62145+
62146+ /* ignore Eric Biederman */
62147+ if (IS_PRIVATE(dentry->d_inode))
62148+ return 1;
62149+
62150+ subj = task->acl;
62151+ read_lock(&gr_inode_lock);
62152+ do {
62153+ obj = lookup_acl_obj_label(ino, dev, subj);
62154+ if (obj != NULL) {
62155+ read_unlock(&gr_inode_lock);
62156+ return (obj->mode & GR_FIND) ? 1 : 0;
62157+ }
62158+ } while ((subj = subj->parent_subject));
62159+ read_unlock(&gr_inode_lock);
62160+
62161+ /* this is purely an optimization since we're looking for an object
62162+ for the directory we're doing a readdir on
62163+ if it's possible for any globbed object to match the entry we're
62164+ filling into the directory, then the object we find here will be
62165+ an anchor point with attached globbed objects
62166+ */
62167+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
62168+ if (obj->globbed == NULL)
62169+ return (obj->mode & GR_FIND) ? 1 : 0;
62170+
62171+ is_not_root = ((obj->filename[0] == '/') &&
62172+ (obj->filename[1] == '\0')) ? 0 : 1;
62173+ bufsize = PAGE_SIZE - namelen - is_not_root;
62174+
62175+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
62176+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
62177+ return 1;
62178+
62179+ preempt_disable();
62180+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
62181+ bufsize);
62182+
62183+ bufsize = strlen(path);
62184+
62185+ /* if base is "/", don't append an additional slash */
62186+ if (is_not_root)
62187+ *(path + bufsize) = '/';
62188+ memcpy(path + bufsize + is_not_root, name, namelen);
62189+ *(path + bufsize + namelen + is_not_root) = '\0';
62190+
62191+ tmp = obj->globbed;
62192+ while (tmp) {
62193+ if (!glob_match(tmp->filename, path)) {
62194+ preempt_enable();
62195+ return (tmp->mode & GR_FIND) ? 1 : 0;
62196+ }
62197+ tmp = tmp->next;
62198+ }
62199+ preempt_enable();
62200+ return (obj->mode & GR_FIND) ? 1 : 0;
62201+}
62202+
62203+void gr_put_exec_file(struct task_struct *task)
62204+{
62205+ struct file *filp;
62206+
62207+ write_lock(&grsec_exec_file_lock);
62208+ filp = task->exec_file;
62209+ task->exec_file = NULL;
62210+ write_unlock(&grsec_exec_file_lock);
62211+
62212+ if (filp)
62213+ fput(filp);
62214+
62215+ return;
62216+}
62217+
62218+
62219+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
62220+EXPORT_SYMBOL(gr_acl_is_enabled);
62221+#endif
62222+EXPORT_SYMBOL(gr_set_kernel_label);
62223+#ifdef CONFIG_SECURITY
62224+EXPORT_SYMBOL(gr_check_user_change);
62225+EXPORT_SYMBOL(gr_check_group_change);
62226+#endif
62227+
62228diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
62229new file mode 100644
62230index 0000000..34fefda
62231--- /dev/null
62232+++ b/grsecurity/gracl_alloc.c
62233@@ -0,0 +1,105 @@
62234+#include <linux/kernel.h>
62235+#include <linux/mm.h>
62236+#include <linux/slab.h>
62237+#include <linux/vmalloc.h>
62238+#include <linux/gracl.h>
62239+#include <linux/grsecurity.h>
62240+
62241+static unsigned long alloc_stack_next = 1;
62242+static unsigned long alloc_stack_size = 1;
62243+static void **alloc_stack;
62244+
62245+static __inline__ int
62246+alloc_pop(void)
62247+{
62248+ if (alloc_stack_next == 1)
62249+ return 0;
62250+
62251+ kfree(alloc_stack[alloc_stack_next - 2]);
62252+
62253+ alloc_stack_next--;
62254+
62255+ return 1;
62256+}
62257+
62258+static __inline__ int
62259+alloc_push(void *buf)
62260+{
62261+ if (alloc_stack_next >= alloc_stack_size)
62262+ return 1;
62263+
62264+ alloc_stack[alloc_stack_next - 1] = buf;
62265+
62266+ alloc_stack_next++;
62267+
62268+ return 0;
62269+}
62270+
62271+void *
62272+acl_alloc(unsigned long len)
62273+{
62274+ void *ret = NULL;
62275+
62276+ if (!len || len > PAGE_SIZE)
62277+ goto out;
62278+
62279+ ret = kmalloc(len, GFP_KERNEL);
62280+
62281+ if (ret) {
62282+ if (alloc_push(ret)) {
62283+ kfree(ret);
62284+ ret = NULL;
62285+ }
62286+ }
62287+
62288+out:
62289+ return ret;
62290+}
62291+
62292+void *
62293+acl_alloc_num(unsigned long num, unsigned long len)
62294+{
62295+ if (!len || (num > (PAGE_SIZE / len)))
62296+ return NULL;
62297+
62298+ return acl_alloc(num * len);
62299+}
62300+
62301+void
62302+acl_free_all(void)
62303+{
62304+ if (gr_acl_is_enabled() || !alloc_stack)
62305+ return;
62306+
62307+ while (alloc_pop()) ;
62308+
62309+ if (alloc_stack) {
62310+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
62311+ kfree(alloc_stack);
62312+ else
62313+ vfree(alloc_stack);
62314+ }
62315+
62316+ alloc_stack = NULL;
62317+ alloc_stack_size = 1;
62318+ alloc_stack_next = 1;
62319+
62320+ return;
62321+}
62322+
62323+int
62324+acl_alloc_stack_init(unsigned long size)
62325+{
62326+ if ((size * sizeof (void *)) <= PAGE_SIZE)
62327+ alloc_stack =
62328+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
62329+ else
62330+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
62331+
62332+ alloc_stack_size = size;
62333+
62334+ if (!alloc_stack)
62335+ return 0;
62336+ else
62337+ return 1;
62338+}
62339diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
62340new file mode 100644
62341index 0000000..bdd51ea
62342--- /dev/null
62343+++ b/grsecurity/gracl_cap.c
62344@@ -0,0 +1,110 @@
62345+#include <linux/kernel.h>
62346+#include <linux/module.h>
62347+#include <linux/sched.h>
62348+#include <linux/gracl.h>
62349+#include <linux/grsecurity.h>
62350+#include <linux/grinternal.h>
62351+
62352+extern const char *captab_log[];
62353+extern int captab_log_entries;
62354+
62355+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
62356+{
62357+ struct acl_subject_label *curracl;
62358+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
62359+ kernel_cap_t cap_audit = __cap_empty_set;
62360+
62361+ if (!gr_acl_is_enabled())
62362+ return 1;
62363+
62364+ curracl = task->acl;
62365+
62366+ cap_drop = curracl->cap_lower;
62367+ cap_mask = curracl->cap_mask;
62368+ cap_audit = curracl->cap_invert_audit;
62369+
62370+ while ((curracl = curracl->parent_subject)) {
62371+ /* if the cap isn't specified in the current computed mask but is specified in the
62372+ current level subject, and is lowered in the current level subject, then add
62373+ it to the set of dropped capabilities
62374+ otherwise, add the current level subject's mask to the current computed mask
62375+ */
62376+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
62377+ cap_raise(cap_mask, cap);
62378+ if (cap_raised(curracl->cap_lower, cap))
62379+ cap_raise(cap_drop, cap);
62380+ if (cap_raised(curracl->cap_invert_audit, cap))
62381+ cap_raise(cap_audit, cap);
62382+ }
62383+ }
62384+
62385+ if (!cap_raised(cap_drop, cap)) {
62386+ if (cap_raised(cap_audit, cap))
62387+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
62388+ return 1;
62389+ }
62390+
62391+ curracl = task->acl;
62392+
62393+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
62394+ && cap_raised(cred->cap_effective, cap)) {
62395+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
62396+ task->role->roletype, GR_GLOBAL_UID(cred->uid),
62397+ GR_GLOBAL_GID(cred->gid), task->exec_file ?
62398+ gr_to_filename(task->exec_file->f_path.dentry,
62399+ task->exec_file->f_path.mnt) : curracl->filename,
62400+ curracl->filename, 0UL,
62401+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
62402+ return 1;
62403+ }
62404+
62405+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
62406+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
62407+
62408+ return 0;
62409+}
62410+
62411+int
62412+gr_acl_is_capable(const int cap)
62413+{
62414+ return gr_task_acl_is_capable(current, current_cred(), cap);
62415+}
62416+
62417+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
62418+{
62419+ struct acl_subject_label *curracl;
62420+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
62421+
62422+ if (!gr_acl_is_enabled())
62423+ return 1;
62424+
62425+ curracl = task->acl;
62426+
62427+ cap_drop = curracl->cap_lower;
62428+ cap_mask = curracl->cap_mask;
62429+
62430+ while ((curracl = curracl->parent_subject)) {
62431+ /* if the cap isn't specified in the current computed mask but is specified in the
62432+ current level subject, and is lowered in the current level subject, then add
62433+ it to the set of dropped capabilities
62434+ otherwise, add the current level subject's mask to the current computed mask
62435+ */
62436+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
62437+ cap_raise(cap_mask, cap);
62438+ if (cap_raised(curracl->cap_lower, cap))
62439+ cap_raise(cap_drop, cap);
62440+ }
62441+ }
62442+
62443+ if (!cap_raised(cap_drop, cap))
62444+ return 1;
62445+
62446+ return 0;
62447+}
62448+
62449+int
62450+gr_acl_is_capable_nolog(const int cap)
62451+{
62452+ return gr_task_acl_is_capable_nolog(current, cap);
62453+}
62454+
62455diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
62456new file mode 100644
62457index 0000000..a340c17
62458--- /dev/null
62459+++ b/grsecurity/gracl_fs.c
62460@@ -0,0 +1,431 @@
62461+#include <linux/kernel.h>
62462+#include <linux/sched.h>
62463+#include <linux/types.h>
62464+#include <linux/fs.h>
62465+#include <linux/file.h>
62466+#include <linux/stat.h>
62467+#include <linux/grsecurity.h>
62468+#include <linux/grinternal.h>
62469+#include <linux/gracl.h>
62470+
62471+umode_t
62472+gr_acl_umask(void)
62473+{
62474+ if (unlikely(!gr_acl_is_enabled()))
62475+ return 0;
62476+
62477+ return current->role->umask;
62478+}
62479+
62480+__u32
62481+gr_acl_handle_hidden_file(const struct dentry * dentry,
62482+ const struct vfsmount * mnt)
62483+{
62484+ __u32 mode;
62485+
62486+ if (unlikely(!dentry->d_inode))
62487+ return GR_FIND;
62488+
62489+ mode =
62490+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
62491+
62492+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
62493+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
62494+ return mode;
62495+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
62496+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
62497+ return 0;
62498+ } else if (unlikely(!(mode & GR_FIND)))
62499+ return 0;
62500+
62501+ return GR_FIND;
62502+}
62503+
62504+__u32
62505+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
62506+ int acc_mode)
62507+{
62508+ __u32 reqmode = GR_FIND;
62509+ __u32 mode;
62510+
62511+ if (unlikely(!dentry->d_inode))
62512+ return reqmode;
62513+
62514+ if (acc_mode & MAY_APPEND)
62515+ reqmode |= GR_APPEND;
62516+ else if (acc_mode & MAY_WRITE)
62517+ reqmode |= GR_WRITE;
62518+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
62519+ reqmode |= GR_READ;
62520+
62521+ mode =
62522+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
62523+ mnt);
62524+
62525+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
62526+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
62527+ reqmode & GR_READ ? " reading" : "",
62528+ reqmode & GR_WRITE ? " writing" : reqmode &
62529+ GR_APPEND ? " appending" : "");
62530+ return reqmode;
62531+ } else
62532+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
62533+ {
62534+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
62535+ reqmode & GR_READ ? " reading" : "",
62536+ reqmode & GR_WRITE ? " writing" : reqmode &
62537+ GR_APPEND ? " appending" : "");
62538+ return 0;
62539+ } else if (unlikely((mode & reqmode) != reqmode))
62540+ return 0;
62541+
62542+ return reqmode;
62543+}
62544+
62545+__u32
62546+gr_acl_handle_creat(const struct dentry * dentry,
62547+ const struct dentry * p_dentry,
62548+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
62549+ const int imode)
62550+{
62551+ __u32 reqmode = GR_WRITE | GR_CREATE;
62552+ __u32 mode;
62553+
62554+ if (acc_mode & MAY_APPEND)
62555+ reqmode |= GR_APPEND;
62556+ // if a directory was required or the directory already exists, then
62557+ // don't count this open as a read
62558+ if ((acc_mode & MAY_READ) &&
62559+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
62560+ reqmode |= GR_READ;
62561+ if ((open_flags & O_CREAT) &&
62562+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
62563+ reqmode |= GR_SETID;
62564+
62565+ mode =
62566+ gr_check_create(dentry, p_dentry, p_mnt,
62567+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
62568+
62569+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
62570+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
62571+ reqmode & GR_READ ? " reading" : "",
62572+ reqmode & GR_WRITE ? " writing" : reqmode &
62573+ GR_APPEND ? " appending" : "");
62574+ return reqmode;
62575+ } else
62576+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
62577+ {
62578+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
62579+ reqmode & GR_READ ? " reading" : "",
62580+ reqmode & GR_WRITE ? " writing" : reqmode &
62581+ GR_APPEND ? " appending" : "");
62582+ return 0;
62583+ } else if (unlikely((mode & reqmode) != reqmode))
62584+ return 0;
62585+
62586+ return reqmode;
62587+}
62588+
62589+__u32
62590+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
62591+ const int fmode)
62592+{
62593+ __u32 mode, reqmode = GR_FIND;
62594+
62595+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
62596+ reqmode |= GR_EXEC;
62597+ if (fmode & S_IWOTH)
62598+ reqmode |= GR_WRITE;
62599+ if (fmode & S_IROTH)
62600+ reqmode |= GR_READ;
62601+
62602+ mode =
62603+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
62604+ mnt);
62605+
62606+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
62607+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
62608+ reqmode & GR_READ ? " reading" : "",
62609+ reqmode & GR_WRITE ? " writing" : "",
62610+ reqmode & GR_EXEC ? " executing" : "");
62611+ return reqmode;
62612+ } else
62613+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
62614+ {
62615+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
62616+ reqmode & GR_READ ? " reading" : "",
62617+ reqmode & GR_WRITE ? " writing" : "",
62618+ reqmode & GR_EXEC ? " executing" : "");
62619+ return 0;
62620+ } else if (unlikely((mode & reqmode) != reqmode))
62621+ return 0;
62622+
62623+ return reqmode;
62624+}
62625+
62626+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
62627+{
62628+ __u32 mode;
62629+
62630+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
62631+
62632+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
62633+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
62634+ return mode;
62635+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
62636+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
62637+ return 0;
62638+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
62639+ return 0;
62640+
62641+ return (reqmode);
62642+}
62643+
62644+__u32
62645+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
62646+{
62647+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
62648+}
62649+
62650+__u32
62651+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
62652+{
62653+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
62654+}
62655+
62656+__u32
62657+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
62658+{
62659+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
62660+}
62661+
62662+__u32
62663+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
62664+{
62665+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
62666+}
62667+
62668+__u32
62669+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
62670+ umode_t *modeptr)
62671+{
62672+ umode_t mode;
62673+
62674+ *modeptr &= ~gr_acl_umask();
62675+ mode = *modeptr;
62676+
62677+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
62678+ return 1;
62679+
62680+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
62681+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
62682+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
62683+ GR_CHMOD_ACL_MSG);
62684+ } else {
62685+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
62686+ }
62687+}
62688+
62689+__u32
62690+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
62691+{
62692+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
62693+}
62694+
62695+__u32
62696+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
62697+{
62698+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
62699+}
62700+
62701+__u32
62702+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
62703+{
62704+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
62705+}
62706+
62707+__u32
62708+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
62709+{
62710+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
62711+ GR_UNIXCONNECT_ACL_MSG);
62712+}
62713+
62714+/* hardlinks require at minimum create and link permission,
62715+ any additional privilege required is based on the
62716+ privilege of the file being linked to
62717+*/
62718+__u32
62719+gr_acl_handle_link(const struct dentry * new_dentry,
62720+ const struct dentry * parent_dentry,
62721+ const struct vfsmount * parent_mnt,
62722+ const struct dentry * old_dentry,
62723+ const struct vfsmount * old_mnt, const struct filename *to)
62724+{
62725+ __u32 mode;
62726+ __u32 needmode = GR_CREATE | GR_LINK;
62727+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
62728+
62729+ mode =
62730+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
62731+ old_mnt);
62732+
62733+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
62734+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
62735+ return mode;
62736+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
62737+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
62738+ return 0;
62739+ } else if (unlikely((mode & needmode) != needmode))
62740+ return 0;
62741+
62742+ return 1;
62743+}
62744+
62745+__u32
62746+gr_acl_handle_symlink(const struct dentry * new_dentry,
62747+ const struct dentry * parent_dentry,
62748+ const struct vfsmount * parent_mnt, const struct filename *from)
62749+{
62750+ __u32 needmode = GR_WRITE | GR_CREATE;
62751+ __u32 mode;
62752+
62753+ mode =
62754+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
62755+ GR_CREATE | GR_AUDIT_CREATE |
62756+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
62757+
62758+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
62759+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
62760+ return mode;
62761+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
62762+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
62763+ return 0;
62764+ } else if (unlikely((mode & needmode) != needmode))
62765+ return 0;
62766+
62767+ return (GR_WRITE | GR_CREATE);
62768+}
62769+
62770+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
62771+{
62772+ __u32 mode;
62773+
62774+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
62775+
62776+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
62777+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
62778+ return mode;
62779+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
62780+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
62781+ return 0;
62782+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
62783+ return 0;
62784+
62785+ return (reqmode);
62786+}
62787+
62788+__u32
62789+gr_acl_handle_mknod(const struct dentry * new_dentry,
62790+ const struct dentry * parent_dentry,
62791+ const struct vfsmount * parent_mnt,
62792+ const int mode)
62793+{
62794+ __u32 reqmode = GR_WRITE | GR_CREATE;
62795+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
62796+ reqmode |= GR_SETID;
62797+
62798+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
62799+ reqmode, GR_MKNOD_ACL_MSG);
62800+}
62801+
62802+__u32
62803+gr_acl_handle_mkdir(const struct dentry *new_dentry,
62804+ const struct dentry *parent_dentry,
62805+ const struct vfsmount *parent_mnt)
62806+{
62807+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
62808+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
62809+}
62810+
62811+#define RENAME_CHECK_SUCCESS(old, new) \
62812+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
62813+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
62814+
62815+int
62816+gr_acl_handle_rename(struct dentry *new_dentry,
62817+ struct dentry *parent_dentry,
62818+ const struct vfsmount *parent_mnt,
62819+ struct dentry *old_dentry,
62820+ struct inode *old_parent_inode,
62821+ struct vfsmount *old_mnt, const struct filename *newname)
62822+{
62823+ __u32 comp1, comp2;
62824+ int error = 0;
62825+
62826+ if (unlikely(!gr_acl_is_enabled()))
62827+ return 0;
62828+
62829+ if (!new_dentry->d_inode) {
62830+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
62831+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
62832+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
62833+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
62834+ GR_DELETE | GR_AUDIT_DELETE |
62835+ GR_AUDIT_READ | GR_AUDIT_WRITE |
62836+ GR_SUPPRESS, old_mnt);
62837+ } else {
62838+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
62839+ GR_CREATE | GR_DELETE |
62840+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
62841+ GR_AUDIT_READ | GR_AUDIT_WRITE |
62842+ GR_SUPPRESS, parent_mnt);
62843+ comp2 =
62844+ gr_search_file(old_dentry,
62845+ GR_READ | GR_WRITE | GR_AUDIT_READ |
62846+ GR_DELETE | GR_AUDIT_DELETE |
62847+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
62848+ }
62849+
62850+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
62851+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
62852+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
62853+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
62854+ && !(comp2 & GR_SUPPRESS)) {
62855+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
62856+ error = -EACCES;
62857+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
62858+ error = -EACCES;
62859+
62860+ return error;
62861+}
62862+
62863+void
62864+gr_acl_handle_exit(void)
62865+{
62866+ u16 id;
62867+ char *rolename;
62868+
62869+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
62870+ !(current->role->roletype & GR_ROLE_PERSIST))) {
62871+ id = current->acl_role_id;
62872+ rolename = current->role->rolename;
62873+ gr_set_acls(1);
62874+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
62875+ }
62876+
62877+ gr_put_exec_file(current);
62878+ return;
62879+}
62880+
62881+int
62882+gr_acl_handle_procpidmem(const struct task_struct *task)
62883+{
62884+ if (unlikely(!gr_acl_is_enabled()))
62885+ return 0;
62886+
62887+ if (task != current && task->acl->mode & GR_PROTPROCFD)
62888+ return -EACCES;
62889+
62890+ return 0;
62891+}
62892diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
62893new file mode 100644
62894index 0000000..8132048
62895--- /dev/null
62896+++ b/grsecurity/gracl_ip.c
62897@@ -0,0 +1,387 @@
62898+#include <linux/kernel.h>
62899+#include <asm/uaccess.h>
62900+#include <asm/errno.h>
62901+#include <net/sock.h>
62902+#include <linux/file.h>
62903+#include <linux/fs.h>
62904+#include <linux/net.h>
62905+#include <linux/in.h>
62906+#include <linux/skbuff.h>
62907+#include <linux/ip.h>
62908+#include <linux/udp.h>
62909+#include <linux/types.h>
62910+#include <linux/sched.h>
62911+#include <linux/netdevice.h>
62912+#include <linux/inetdevice.h>
62913+#include <linux/gracl.h>
62914+#include <linux/grsecurity.h>
62915+#include <linux/grinternal.h>
62916+
62917+#define GR_BIND 0x01
62918+#define GR_CONNECT 0x02
62919+#define GR_INVERT 0x04
62920+#define GR_BINDOVERRIDE 0x08
62921+#define GR_CONNECTOVERRIDE 0x10
62922+#define GR_SOCK_FAMILY 0x20
62923+
62924+static const char * gr_protocols[IPPROTO_MAX] = {
62925+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
62926+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
62927+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
62928+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
62929+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
62930+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
62931+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
62932+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
62933+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
62934+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
62935+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
62936+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
62937+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
62938+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
62939+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
62940+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
62941+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
62942+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
62943+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
62944+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
62945+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
62946+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
62947+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
62948+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
62949+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
62950+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
62951+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
62952+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
62953+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
62954+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
62955+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
62956+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
62957+ };
62958+
62959+static const char * gr_socktypes[SOCK_MAX] = {
62960+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
62961+ "unknown:7", "unknown:8", "unknown:9", "packet"
62962+ };
62963+
62964+static const char * gr_sockfamilies[AF_MAX+1] = {
62965+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
62966+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
62967+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
62968+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
62969+ };
62970+
62971+const char *
62972+gr_proto_to_name(unsigned char proto)
62973+{
62974+ return gr_protocols[proto];
62975+}
62976+
62977+const char *
62978+gr_socktype_to_name(unsigned char type)
62979+{
62980+ return gr_socktypes[type];
62981+}
62982+
62983+const char *
62984+gr_sockfamily_to_name(unsigned char family)
62985+{
62986+ return gr_sockfamilies[family];
62987+}
62988+
62989+int
62990+gr_search_socket(const int domain, const int type, const int protocol)
62991+{
62992+ struct acl_subject_label *curr;
62993+ const struct cred *cred = current_cred();
62994+
62995+ if (unlikely(!gr_acl_is_enabled()))
62996+ goto exit;
62997+
62998+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
62999+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
63000+ goto exit; // let the kernel handle it
63001+
63002+ curr = current->acl;
63003+
63004+ if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
63005+ /* the family is allowed, if this is PF_INET allow it only if
63006+ the extra sock type/protocol checks pass */
63007+ if (domain == PF_INET)
63008+ goto inet_check;
63009+ goto exit;
63010+ } else {
63011+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
63012+ __u32 fakeip = 0;
63013+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
63014+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
63015+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
63016+ gr_to_filename(current->exec_file->f_path.dentry,
63017+ current->exec_file->f_path.mnt) :
63018+ curr->filename, curr->filename,
63019+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
63020+ &current->signal->saved_ip);
63021+ goto exit;
63022+ }
63023+ goto exit_fail;
63024+ }
63025+
63026+inet_check:
63027+ /* the rest of this checking is for IPv4 only */
63028+ if (!curr->ips)
63029+ goto exit;
63030+
63031+ if ((curr->ip_type & (1U << type)) &&
63032+ (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
63033+ goto exit;
63034+
63035+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
63036+ /* we don't place acls on raw sockets , and sometimes
63037+ dgram/ip sockets are opened for ioctl and not
63038+ bind/connect, so we'll fake a bind learn log */
63039+ if (type == SOCK_RAW || type == SOCK_PACKET) {
63040+ __u32 fakeip = 0;
63041+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
63042+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
63043+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
63044+ gr_to_filename(current->exec_file->f_path.dentry,
63045+ current->exec_file->f_path.mnt) :
63046+ curr->filename, curr->filename,
63047+ &fakeip, 0, type,
63048+ protocol, GR_CONNECT, &current->signal->saved_ip);
63049+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
63050+ __u32 fakeip = 0;
63051+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
63052+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
63053+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
63054+ gr_to_filename(current->exec_file->f_path.dentry,
63055+ current->exec_file->f_path.mnt) :
63056+ curr->filename, curr->filename,
63057+ &fakeip, 0, type,
63058+ protocol, GR_BIND, &current->signal->saved_ip);
63059+ }
63060+ /* we'll log when they use connect or bind */
63061+ goto exit;
63062+ }
63063+
63064+exit_fail:
63065+ if (domain == PF_INET)
63066+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
63067+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
63068+ else
63069+#ifndef CONFIG_IPV6
63070+ if (domain != PF_INET6)
63071+#endif
63072+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
63073+ gr_socktype_to_name(type), protocol);
63074+
63075+ return 0;
63076+exit:
63077+ return 1;
63078+}
63079+
63080+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
63081+{
63082+ if ((ip->mode & mode) &&
63083+ (ip_port >= ip->low) &&
63084+ (ip_port <= ip->high) &&
63085+ ((ntohl(ip_addr) & our_netmask) ==
63086+ (ntohl(our_addr) & our_netmask))
63087+ && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
63088+ && (ip->type & (1U << type))) {
63089+ if (ip->mode & GR_INVERT)
63090+ return 2; // specifically denied
63091+ else
63092+ return 1; // allowed
63093+ }
63094+
63095+ return 0; // not specifically allowed, may continue parsing
63096+}
63097+
63098+static int
63099+gr_search_connectbind(const int full_mode, struct sock *sk,
63100+ struct sockaddr_in *addr, const int type)
63101+{
63102+ char iface[IFNAMSIZ] = {0};
63103+ struct acl_subject_label *curr;
63104+ struct acl_ip_label *ip;
63105+ struct inet_sock *isk;
63106+ struct net_device *dev;
63107+ struct in_device *idev;
63108+ unsigned long i;
63109+ int ret;
63110+ int mode = full_mode & (GR_BIND | GR_CONNECT);
63111+ __u32 ip_addr = 0;
63112+ __u32 our_addr;
63113+ __u32 our_netmask;
63114+ char *p;
63115+ __u16 ip_port = 0;
63116+ const struct cred *cred = current_cred();
63117+
63118+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
63119+ return 0;
63120+
63121+ curr = current->acl;
63122+ isk = inet_sk(sk);
63123+
63124+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
63125+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
63126+ addr->sin_addr.s_addr = curr->inaddr_any_override;
63127+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
63128+ struct sockaddr_in saddr;
63129+ int err;
63130+
63131+ saddr.sin_family = AF_INET;
63132+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
63133+ saddr.sin_port = isk->inet_sport;
63134+
63135+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
63136+ if (err)
63137+ return err;
63138+
63139+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
63140+ if (err)
63141+ return err;
63142+ }
63143+
63144+ if (!curr->ips)
63145+ return 0;
63146+
63147+ ip_addr = addr->sin_addr.s_addr;
63148+ ip_port = ntohs(addr->sin_port);
63149+
63150+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
63151+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
63152+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
63153+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
63154+ gr_to_filename(current->exec_file->f_path.dentry,
63155+ current->exec_file->f_path.mnt) :
63156+ curr->filename, curr->filename,
63157+ &ip_addr, ip_port, type,
63158+ sk->sk_protocol, mode, &current->signal->saved_ip);
63159+ return 0;
63160+ }
63161+
63162+ for (i = 0; i < curr->ip_num; i++) {
63163+ ip = *(curr->ips + i);
63164+ if (ip->iface != NULL) {
63165+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
63166+ p = strchr(iface, ':');
63167+ if (p != NULL)
63168+ *p = '\0';
63169+ dev = dev_get_by_name(sock_net(sk), iface);
63170+ if (dev == NULL)
63171+ continue;
63172+ idev = in_dev_get(dev);
63173+ if (idev == NULL) {
63174+ dev_put(dev);
63175+ continue;
63176+ }
63177+ rcu_read_lock();
63178+ for_ifa(idev) {
63179+ if (!strcmp(ip->iface, ifa->ifa_label)) {
63180+ our_addr = ifa->ifa_address;
63181+ our_netmask = 0xffffffff;
63182+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
63183+ if (ret == 1) {
63184+ rcu_read_unlock();
63185+ in_dev_put(idev);
63186+ dev_put(dev);
63187+ return 0;
63188+ } else if (ret == 2) {
63189+ rcu_read_unlock();
63190+ in_dev_put(idev);
63191+ dev_put(dev);
63192+ goto denied;
63193+ }
63194+ }
63195+ } endfor_ifa(idev);
63196+ rcu_read_unlock();
63197+ in_dev_put(idev);
63198+ dev_put(dev);
63199+ } else {
63200+ our_addr = ip->addr;
63201+ our_netmask = ip->netmask;
63202+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
63203+ if (ret == 1)
63204+ return 0;
63205+ else if (ret == 2)
63206+ goto denied;
63207+ }
63208+ }
63209+
63210+denied:
63211+ if (mode == GR_BIND)
63212+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
63213+ else if (mode == GR_CONNECT)
63214+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
63215+
63216+ return -EACCES;
63217+}
63218+
63219+int
63220+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
63221+{
63222+ /* always allow disconnection of dgram sockets with connect */
63223+ if (addr->sin_family == AF_UNSPEC)
63224+ return 0;
63225+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
63226+}
63227+
63228+int
63229+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
63230+{
63231+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
63232+}
63233+
63234+int gr_search_listen(struct socket *sock)
63235+{
63236+ struct sock *sk = sock->sk;
63237+ struct sockaddr_in addr;
63238+
63239+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
63240+ addr.sin_port = inet_sk(sk)->inet_sport;
63241+
63242+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
63243+}
63244+
63245+int gr_search_accept(struct socket *sock)
63246+{
63247+ struct sock *sk = sock->sk;
63248+ struct sockaddr_in addr;
63249+
63250+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
63251+ addr.sin_port = inet_sk(sk)->inet_sport;
63252+
63253+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
63254+}
63255+
63256+int
63257+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
63258+{
63259+ if (addr)
63260+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
63261+ else {
63262+ struct sockaddr_in sin;
63263+ const struct inet_sock *inet = inet_sk(sk);
63264+
63265+ sin.sin_addr.s_addr = inet->inet_daddr;
63266+ sin.sin_port = inet->inet_dport;
63267+
63268+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
63269+ }
63270+}
63271+
63272+int
63273+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
63274+{
63275+ struct sockaddr_in sin;
63276+
63277+ if (unlikely(skb->len < sizeof (struct udphdr)))
63278+ return 0; // skip this packet
63279+
63280+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
63281+ sin.sin_port = udp_hdr(skb)->source;
63282+
63283+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
63284+}
63285diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
63286new file mode 100644
63287index 0000000..25f54ef
63288--- /dev/null
63289+++ b/grsecurity/gracl_learn.c
63290@@ -0,0 +1,207 @@
63291+#include <linux/kernel.h>
63292+#include <linux/mm.h>
63293+#include <linux/sched.h>
63294+#include <linux/poll.h>
63295+#include <linux/string.h>
63296+#include <linux/file.h>
63297+#include <linux/types.h>
63298+#include <linux/vmalloc.h>
63299+#include <linux/grinternal.h>
63300+
63301+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
63302+ size_t count, loff_t *ppos);
63303+extern int gr_acl_is_enabled(void);
63304+
63305+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
63306+static int gr_learn_attached;
63307+
63308+/* use a 512k buffer */
63309+#define LEARN_BUFFER_SIZE (512 * 1024)
63310+
63311+static DEFINE_SPINLOCK(gr_learn_lock);
63312+static DEFINE_MUTEX(gr_learn_user_mutex);
63313+
63314+/* we need to maintain two buffers, so that the kernel context of grlearn
63315+ uses a semaphore around the userspace copying, and the other kernel contexts
63316+ use a spinlock when copying into the buffer, since they cannot sleep
63317+*/
63318+static char *learn_buffer;
63319+static char *learn_buffer_user;
63320+static int learn_buffer_len;
63321+static int learn_buffer_user_len;
63322+
63323+static ssize_t
63324+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
63325+{
63326+ DECLARE_WAITQUEUE(wait, current);
63327+ ssize_t retval = 0;
63328+
63329+ add_wait_queue(&learn_wait, &wait);
63330+ set_current_state(TASK_INTERRUPTIBLE);
63331+ do {
63332+ mutex_lock(&gr_learn_user_mutex);
63333+ spin_lock(&gr_learn_lock);
63334+ if (learn_buffer_len)
63335+ break;
63336+ spin_unlock(&gr_learn_lock);
63337+ mutex_unlock(&gr_learn_user_mutex);
63338+ if (file->f_flags & O_NONBLOCK) {
63339+ retval = -EAGAIN;
63340+ goto out;
63341+ }
63342+ if (signal_pending(current)) {
63343+ retval = -ERESTARTSYS;
63344+ goto out;
63345+ }
63346+
63347+ schedule();
63348+ } while (1);
63349+
63350+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
63351+ learn_buffer_user_len = learn_buffer_len;
63352+ retval = learn_buffer_len;
63353+ learn_buffer_len = 0;
63354+
63355+ spin_unlock(&gr_learn_lock);
63356+
63357+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
63358+ retval = -EFAULT;
63359+
63360+ mutex_unlock(&gr_learn_user_mutex);
63361+out:
63362+ set_current_state(TASK_RUNNING);
63363+ remove_wait_queue(&learn_wait, &wait);
63364+ return retval;
63365+}
63366+
63367+static unsigned int
63368+poll_learn(struct file * file, poll_table * wait)
63369+{
63370+ poll_wait(file, &learn_wait, wait);
63371+
63372+ if (learn_buffer_len)
63373+ return (POLLIN | POLLRDNORM);
63374+
63375+ return 0;
63376+}
63377+
63378+void
63379+gr_clear_learn_entries(void)
63380+{
63381+ char *tmp;
63382+
63383+ mutex_lock(&gr_learn_user_mutex);
63384+ spin_lock(&gr_learn_lock);
63385+ tmp = learn_buffer;
63386+ learn_buffer = NULL;
63387+ spin_unlock(&gr_learn_lock);
63388+ if (tmp)
63389+ vfree(tmp);
63390+ if (learn_buffer_user != NULL) {
63391+ vfree(learn_buffer_user);
63392+ learn_buffer_user = NULL;
63393+ }
63394+ learn_buffer_len = 0;
63395+ mutex_unlock(&gr_learn_user_mutex);
63396+
63397+ return;
63398+}
63399+
63400+void
63401+gr_add_learn_entry(const char *fmt, ...)
63402+{
63403+ va_list args;
63404+ unsigned int len;
63405+
63406+ if (!gr_learn_attached)
63407+ return;
63408+
63409+ spin_lock(&gr_learn_lock);
63410+
63411+ /* leave a gap at the end so we know when it's "full" but don't have to
63412+ compute the exact length of the string we're trying to append
63413+ */
63414+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
63415+ spin_unlock(&gr_learn_lock);
63416+ wake_up_interruptible(&learn_wait);
63417+ return;
63418+ }
63419+ if (learn_buffer == NULL) {
63420+ spin_unlock(&gr_learn_lock);
63421+ return;
63422+ }
63423+
63424+ va_start(args, fmt);
63425+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
63426+ va_end(args);
63427+
63428+ learn_buffer_len += len + 1;
63429+
63430+ spin_unlock(&gr_learn_lock);
63431+ wake_up_interruptible(&learn_wait);
63432+
63433+ return;
63434+}
63435+
63436+static int
63437+open_learn(struct inode *inode, struct file *file)
63438+{
63439+ if (file->f_mode & FMODE_READ && gr_learn_attached)
63440+ return -EBUSY;
63441+ if (file->f_mode & FMODE_READ) {
63442+ int retval = 0;
63443+ mutex_lock(&gr_learn_user_mutex);
63444+ if (learn_buffer == NULL)
63445+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
63446+ if (learn_buffer_user == NULL)
63447+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
63448+ if (learn_buffer == NULL) {
63449+ retval = -ENOMEM;
63450+ goto out_error;
63451+ }
63452+ if (learn_buffer_user == NULL) {
63453+ retval = -ENOMEM;
63454+ goto out_error;
63455+ }
63456+ learn_buffer_len = 0;
63457+ learn_buffer_user_len = 0;
63458+ gr_learn_attached = 1;
63459+out_error:
63460+ mutex_unlock(&gr_learn_user_mutex);
63461+ return retval;
63462+ }
63463+ return 0;
63464+}
63465+
63466+static int
63467+close_learn(struct inode *inode, struct file *file)
63468+{
63469+ if (file->f_mode & FMODE_READ) {
63470+ char *tmp = NULL;
63471+ mutex_lock(&gr_learn_user_mutex);
63472+ spin_lock(&gr_learn_lock);
63473+ tmp = learn_buffer;
63474+ learn_buffer = NULL;
63475+ spin_unlock(&gr_learn_lock);
63476+ if (tmp)
63477+ vfree(tmp);
63478+ if (learn_buffer_user != NULL) {
63479+ vfree(learn_buffer_user);
63480+ learn_buffer_user = NULL;
63481+ }
63482+ learn_buffer_len = 0;
63483+ learn_buffer_user_len = 0;
63484+ gr_learn_attached = 0;
63485+ mutex_unlock(&gr_learn_user_mutex);
63486+ }
63487+
63488+ return 0;
63489+}
63490+
63491+const struct file_operations grsec_fops = {
63492+ .read = read_learn,
63493+ .write = write_grsec_handler,
63494+ .open = open_learn,
63495+ .release = close_learn,
63496+ .poll = poll_learn,
63497+};
63498diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
63499new file mode 100644
63500index 0000000..39645c9
63501--- /dev/null
63502+++ b/grsecurity/gracl_res.c
63503@@ -0,0 +1,68 @@
63504+#include <linux/kernel.h>
63505+#include <linux/sched.h>
63506+#include <linux/gracl.h>
63507+#include <linux/grinternal.h>
63508+
63509+static const char *restab_log[] = {
63510+ [RLIMIT_CPU] = "RLIMIT_CPU",
63511+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
63512+ [RLIMIT_DATA] = "RLIMIT_DATA",
63513+ [RLIMIT_STACK] = "RLIMIT_STACK",
63514+ [RLIMIT_CORE] = "RLIMIT_CORE",
63515+ [RLIMIT_RSS] = "RLIMIT_RSS",
63516+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
63517+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
63518+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
63519+ [RLIMIT_AS] = "RLIMIT_AS",
63520+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
63521+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
63522+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
63523+ [RLIMIT_NICE] = "RLIMIT_NICE",
63524+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
63525+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
63526+ [GR_CRASH_RES] = "RLIMIT_CRASH"
63527+};
63528+
63529+void
63530+gr_log_resource(const struct task_struct *task,
63531+ const int res, const unsigned long wanted, const int gt)
63532+{
63533+ const struct cred *cred;
63534+ unsigned long rlim;
63535+
63536+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
63537+ return;
63538+
63539+ // not yet supported resource
63540+ if (unlikely(!restab_log[res]))
63541+ return;
63542+
63543+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
63544+ rlim = task_rlimit_max(task, res);
63545+ else
63546+ rlim = task_rlimit(task, res);
63547+
63548+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
63549+ return;
63550+
63551+ rcu_read_lock();
63552+ cred = __task_cred(task);
63553+
63554+ if (res == RLIMIT_NPROC &&
63555+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
63556+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
63557+ goto out_rcu_unlock;
63558+ else if (res == RLIMIT_MEMLOCK &&
63559+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
63560+ goto out_rcu_unlock;
63561+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
63562+ goto out_rcu_unlock;
63563+ rcu_read_unlock();
63564+
63565+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
63566+
63567+ return;
63568+out_rcu_unlock:
63569+ rcu_read_unlock();
63570+ return;
63571+}
63572diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
63573new file mode 100644
63574index 0000000..4dcc92a
63575--- /dev/null
63576+++ b/grsecurity/gracl_segv.c
63577@@ -0,0 +1,305 @@
63578+#include <linux/kernel.h>
63579+#include <linux/mm.h>
63580+#include <asm/uaccess.h>
63581+#include <asm/errno.h>
63582+#include <asm/mman.h>
63583+#include <net/sock.h>
63584+#include <linux/file.h>
63585+#include <linux/fs.h>
63586+#include <linux/net.h>
63587+#include <linux/in.h>
63588+#include <linux/slab.h>
63589+#include <linux/types.h>
63590+#include <linux/sched.h>
63591+#include <linux/timer.h>
63592+#include <linux/gracl.h>
63593+#include <linux/grsecurity.h>
63594+#include <linux/grinternal.h>
63595+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
63596+#include <linux/magic.h>
63597+#include <linux/pagemap.h>
63598+#include "../fs/btrfs/async-thread.h"
63599+#include "../fs/btrfs/ctree.h"
63600+#include "../fs/btrfs/btrfs_inode.h"
63601+#endif
63602+
63603+static struct crash_uid *uid_set;
63604+static unsigned short uid_used;
63605+static DEFINE_SPINLOCK(gr_uid_lock);
63606+extern rwlock_t gr_inode_lock;
63607+extern struct acl_subject_label *
63608+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
63609+ struct acl_role_label *role);
63610+
63611+static inline dev_t __get_dev(const struct dentry *dentry)
63612+{
63613+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
63614+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
63615+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
63616+ else
63617+#endif
63618+ return dentry->d_sb->s_dev;
63619+}
63620+
63621+int
63622+gr_init_uidset(void)
63623+{
63624+ uid_set =
63625+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
63626+ uid_used = 0;
63627+
63628+ return uid_set ? 1 : 0;
63629+}
63630+
63631+void
63632+gr_free_uidset(void)
63633+{
63634+ if (uid_set)
63635+ kfree(uid_set);
63636+
63637+ return;
63638+}
63639+
63640+int
63641+gr_find_uid(const uid_t uid)
63642+{
63643+ struct crash_uid *tmp = uid_set;
63644+ uid_t buid;
63645+ int low = 0, high = uid_used - 1, mid;
63646+
63647+ while (high >= low) {
63648+ mid = (low + high) >> 1;
63649+ buid = tmp[mid].uid;
63650+ if (buid == uid)
63651+ return mid;
63652+ if (buid > uid)
63653+ high = mid - 1;
63654+ if (buid < uid)
63655+ low = mid + 1;
63656+ }
63657+
63658+ return -1;
63659+}
63660+
63661+static __inline__ void
63662+gr_insertsort(void)
63663+{
63664+ unsigned short i, j;
63665+ struct crash_uid index;
63666+
63667+ for (i = 1; i < uid_used; i++) {
63668+ index = uid_set[i];
63669+ j = i;
63670+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
63671+ uid_set[j] = uid_set[j - 1];
63672+ j--;
63673+ }
63674+ uid_set[j] = index;
63675+ }
63676+
63677+ return;
63678+}
63679+
63680+static __inline__ void
63681+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
63682+{
63683+ int loc;
63684+ uid_t uid = GR_GLOBAL_UID(kuid);
63685+
63686+ if (uid_used == GR_UIDTABLE_MAX)
63687+ return;
63688+
63689+ loc = gr_find_uid(uid);
63690+
63691+ if (loc >= 0) {
63692+ uid_set[loc].expires = expires;
63693+ return;
63694+ }
63695+
63696+ uid_set[uid_used].uid = uid;
63697+ uid_set[uid_used].expires = expires;
63698+ uid_used++;
63699+
63700+ gr_insertsort();
63701+
63702+ return;
63703+}
63704+
63705+void
63706+gr_remove_uid(const unsigned short loc)
63707+{
63708+ unsigned short i;
63709+
63710+ for (i = loc + 1; i < uid_used; i++)
63711+ uid_set[i - 1] = uid_set[i];
63712+
63713+ uid_used--;
63714+
63715+ return;
63716+}
63717+
63718+int
63719+gr_check_crash_uid(const kuid_t kuid)
63720+{
63721+ int loc;
63722+ int ret = 0;
63723+ uid_t uid;
63724+
63725+ if (unlikely(!gr_acl_is_enabled()))
63726+ return 0;
63727+
63728+ uid = GR_GLOBAL_UID(kuid);
63729+
63730+ spin_lock(&gr_uid_lock);
63731+ loc = gr_find_uid(uid);
63732+
63733+ if (loc < 0)
63734+ goto out_unlock;
63735+
63736+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
63737+ gr_remove_uid(loc);
63738+ else
63739+ ret = 1;
63740+
63741+out_unlock:
63742+ spin_unlock(&gr_uid_lock);
63743+ return ret;
63744+}
63745+
63746+static __inline__ int
63747+proc_is_setxid(const struct cred *cred)
63748+{
63749+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
63750+ !uid_eq(cred->uid, cred->fsuid))
63751+ return 1;
63752+ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
63753+ !gid_eq(cred->gid, cred->fsgid))
63754+ return 1;
63755+
63756+ return 0;
63757+}
63758+
63759+extern int gr_fake_force_sig(int sig, struct task_struct *t);
63760+
63761+void
63762+gr_handle_crash(struct task_struct *task, const int sig)
63763+{
63764+ struct acl_subject_label *curr;
63765+ struct task_struct *tsk, *tsk2;
63766+ const struct cred *cred;
63767+ const struct cred *cred2;
63768+
63769+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
63770+ return;
63771+
63772+ if (unlikely(!gr_acl_is_enabled()))
63773+ return;
63774+
63775+ curr = task->acl;
63776+
63777+ if (!(curr->resmask & (1U << GR_CRASH_RES)))
63778+ return;
63779+
63780+ if (time_before_eq(curr->expires, get_seconds())) {
63781+ curr->expires = 0;
63782+ curr->crashes = 0;
63783+ }
63784+
63785+ curr->crashes++;
63786+
63787+ if (!curr->expires)
63788+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
63789+
63790+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
63791+ time_after(curr->expires, get_seconds())) {
63792+ rcu_read_lock();
63793+ cred = __task_cred(task);
63794+ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
63795+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
63796+ spin_lock(&gr_uid_lock);
63797+ gr_insert_uid(cred->uid, curr->expires);
63798+ spin_unlock(&gr_uid_lock);
63799+ curr->expires = 0;
63800+ curr->crashes = 0;
63801+ read_lock(&tasklist_lock);
63802+ do_each_thread(tsk2, tsk) {
63803+ cred2 = __task_cred(tsk);
63804+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
63805+ gr_fake_force_sig(SIGKILL, tsk);
63806+ } while_each_thread(tsk2, tsk);
63807+ read_unlock(&tasklist_lock);
63808+ } else {
63809+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
63810+ read_lock(&tasklist_lock);
63811+ read_lock(&grsec_exec_file_lock);
63812+ do_each_thread(tsk2, tsk) {
63813+ if (likely(tsk != task)) {
63814+ // if this thread has the same subject as the one that triggered
63815+ // RES_CRASH and it's the same binary, kill it
63816+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
63817+ gr_fake_force_sig(SIGKILL, tsk);
63818+ }
63819+ } while_each_thread(tsk2, tsk);
63820+ read_unlock(&grsec_exec_file_lock);
63821+ read_unlock(&tasklist_lock);
63822+ }
63823+ rcu_read_unlock();
63824+ }
63825+
63826+ return;
63827+}
63828+
63829+int
63830+gr_check_crash_exec(const struct file *filp)
63831+{
63832+ struct acl_subject_label *curr;
63833+
63834+ if (unlikely(!gr_acl_is_enabled()))
63835+ return 0;
63836+
63837+ read_lock(&gr_inode_lock);
63838+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
63839+ __get_dev(filp->f_path.dentry),
63840+ current->role);
63841+ read_unlock(&gr_inode_lock);
63842+
63843+ if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
63844+ (!curr->crashes && !curr->expires))
63845+ return 0;
63846+
63847+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
63848+ time_after(curr->expires, get_seconds()))
63849+ return 1;
63850+ else if (time_before_eq(curr->expires, get_seconds())) {
63851+ curr->crashes = 0;
63852+ curr->expires = 0;
63853+ }
63854+
63855+ return 0;
63856+}
63857+
63858+void
63859+gr_handle_alertkill(struct task_struct *task)
63860+{
63861+ struct acl_subject_label *curracl;
63862+ __u32 curr_ip;
63863+ struct task_struct *p, *p2;
63864+
63865+ if (unlikely(!gr_acl_is_enabled()))
63866+ return;
63867+
63868+ curracl = task->acl;
63869+ curr_ip = task->signal->curr_ip;
63870+
63871+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
63872+ read_lock(&tasklist_lock);
63873+ do_each_thread(p2, p) {
63874+ if (p->signal->curr_ip == curr_ip)
63875+ gr_fake_force_sig(SIGKILL, p);
63876+ } while_each_thread(p2, p);
63877+ read_unlock(&tasklist_lock);
63878+ } else if (curracl->mode & GR_KILLPROC)
63879+ gr_fake_force_sig(SIGKILL, task);
63880+
63881+ return;
63882+}
63883diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
63884new file mode 100644
63885index 0000000..98011b0
63886--- /dev/null
63887+++ b/grsecurity/gracl_shm.c
63888@@ -0,0 +1,40 @@
63889+#include <linux/kernel.h>
63890+#include <linux/mm.h>
63891+#include <linux/sched.h>
63892+#include <linux/file.h>
63893+#include <linux/ipc.h>
63894+#include <linux/gracl.h>
63895+#include <linux/grsecurity.h>
63896+#include <linux/grinternal.h>
63897+
63898+int
63899+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
63900+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
63901+{
63902+ struct task_struct *task;
63903+
63904+ if (!gr_acl_is_enabled())
63905+ return 1;
63906+
63907+ rcu_read_lock();
63908+ read_lock(&tasklist_lock);
63909+
63910+ task = find_task_by_vpid(shm_cprid);
63911+
63912+ if (unlikely(!task))
63913+ task = find_task_by_vpid(shm_lapid);
63914+
63915+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
63916+ (task_pid_nr(task) == shm_lapid)) &&
63917+ (task->acl->mode & GR_PROTSHM) &&
63918+ (task->acl != current->acl))) {
63919+ read_unlock(&tasklist_lock);
63920+ rcu_read_unlock();
63921+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
63922+ return 0;
63923+ }
63924+ read_unlock(&tasklist_lock);
63925+ rcu_read_unlock();
63926+
63927+ return 1;
63928+}
63929diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
63930new file mode 100644
63931index 0000000..bc0be01
63932--- /dev/null
63933+++ b/grsecurity/grsec_chdir.c
63934@@ -0,0 +1,19 @@
63935+#include <linux/kernel.h>
63936+#include <linux/sched.h>
63937+#include <linux/fs.h>
63938+#include <linux/file.h>
63939+#include <linux/grsecurity.h>
63940+#include <linux/grinternal.h>
63941+
63942+void
63943+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
63944+{
63945+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
63946+ if ((grsec_enable_chdir && grsec_enable_group &&
63947+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
63948+ !grsec_enable_group)) {
63949+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
63950+ }
63951+#endif
63952+ return;
63953+}
63954diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
63955new file mode 100644
63956index 0000000..bd6e105
63957--- /dev/null
63958+++ b/grsecurity/grsec_chroot.c
63959@@ -0,0 +1,370 @@
63960+#include <linux/kernel.h>
63961+#include <linux/module.h>
63962+#include <linux/sched.h>
63963+#include <linux/file.h>
63964+#include <linux/fs.h>
63965+#include <linux/mount.h>
63966+#include <linux/types.h>
63967+#include "../fs/mount.h"
63968+#include <linux/grsecurity.h>
63969+#include <linux/grinternal.h>
63970+
63971+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
63972+static int gr_init_ran;
63973+#endif
63974+
63975+void gr_set_chroot_entries(struct task_struct *task, const struct path *path)
63976+{
63977+#ifdef CONFIG_GRKERNSEC
63978+ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
63979+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root
63980+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
63981+ && gr_init_ran
63982+#endif
63983+ )
63984+ task->gr_is_chrooted = 1;
63985+ else {
63986+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
63987+ if (task_pid_nr(task) == 1 && !gr_init_ran)
63988+ gr_init_ran = 1;
63989+#endif
63990+ task->gr_is_chrooted = 0;
63991+ }
63992+
63993+ task->gr_chroot_dentry = path->dentry;
63994+#endif
63995+ return;
63996+}
63997+
63998+void gr_clear_chroot_entries(struct task_struct *task)
63999+{
64000+#ifdef CONFIG_GRKERNSEC
64001+ task->gr_is_chrooted = 0;
64002+ task->gr_chroot_dentry = NULL;
64003+#endif
64004+ return;
64005+}
64006+
64007+int
64008+gr_handle_chroot_unix(const pid_t pid)
64009+{
64010+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
64011+ struct task_struct *p;
64012+
64013+ if (unlikely(!grsec_enable_chroot_unix))
64014+ return 1;
64015+
64016+ if (likely(!proc_is_chrooted(current)))
64017+ return 1;
64018+
64019+ rcu_read_lock();
64020+ read_lock(&tasklist_lock);
64021+ p = find_task_by_vpid_unrestricted(pid);
64022+ if (unlikely(p && !have_same_root(current, p))) {
64023+ read_unlock(&tasklist_lock);
64024+ rcu_read_unlock();
64025+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
64026+ return 0;
64027+ }
64028+ read_unlock(&tasklist_lock);
64029+ rcu_read_unlock();
64030+#endif
64031+ return 1;
64032+}
64033+
64034+int
64035+gr_handle_chroot_nice(void)
64036+{
64037+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
64038+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
64039+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
64040+ return -EPERM;
64041+ }
64042+#endif
64043+ return 0;
64044+}
64045+
64046+int
64047+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
64048+{
64049+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
64050+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
64051+ && proc_is_chrooted(current)) {
64052+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
64053+ return -EACCES;
64054+ }
64055+#endif
64056+ return 0;
64057+}
64058+
64059+int
64060+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
64061+{
64062+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
64063+ struct task_struct *p;
64064+ int ret = 0;
64065+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
64066+ return ret;
64067+
64068+ read_lock(&tasklist_lock);
64069+ do_each_pid_task(pid, type, p) {
64070+ if (!have_same_root(current, p)) {
64071+ ret = 1;
64072+ goto out;
64073+ }
64074+ } while_each_pid_task(pid, type, p);
64075+out:
64076+ read_unlock(&tasklist_lock);
64077+ return ret;
64078+#endif
64079+ return 0;
64080+}
64081+
64082+int
64083+gr_pid_is_chrooted(struct task_struct *p)
64084+{
64085+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
64086+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
64087+ return 0;
64088+
64089+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
64090+ !have_same_root(current, p)) {
64091+ return 1;
64092+ }
64093+#endif
64094+ return 0;
64095+}
64096+
64097+EXPORT_SYMBOL(gr_pid_is_chrooted);
64098+
64099+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
64100+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
64101+{
64102+ struct path path, currentroot;
64103+ int ret = 0;
64104+
64105+ path.dentry = (struct dentry *)u_dentry;
64106+ path.mnt = (struct vfsmount *)u_mnt;
64107+ get_fs_root(current->fs, &currentroot);
64108+ if (path_is_under(&path, &currentroot))
64109+ ret = 1;
64110+ path_put(&currentroot);
64111+
64112+ return ret;
64113+}
64114+#endif
64115+
64116+int
64117+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
64118+{
64119+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
64120+ if (!grsec_enable_chroot_fchdir)
64121+ return 1;
64122+
64123+ if (!proc_is_chrooted(current))
64124+ return 1;
64125+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
64126+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
64127+ return 0;
64128+ }
64129+#endif
64130+ return 1;
64131+}
64132+
64133+int
64134+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
64135+ const time_t shm_createtime)
64136+{
64137+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
64138+ struct task_struct *p;
64139+ time_t starttime;
64140+
64141+ if (unlikely(!grsec_enable_chroot_shmat))
64142+ return 1;
64143+
64144+ if (likely(!proc_is_chrooted(current)))
64145+ return 1;
64146+
64147+ rcu_read_lock();
64148+ read_lock(&tasklist_lock);
64149+
64150+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
64151+ starttime = p->start_time.tv_sec;
64152+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
64153+ if (have_same_root(current, p)) {
64154+ goto allow;
64155+ } else {
64156+ read_unlock(&tasklist_lock);
64157+ rcu_read_unlock();
64158+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
64159+ return 0;
64160+ }
64161+ }
64162+ /* creator exited, pid reuse, fall through to next check */
64163+ }
64164+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
64165+ if (unlikely(!have_same_root(current, p))) {
64166+ read_unlock(&tasklist_lock);
64167+ rcu_read_unlock();
64168+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
64169+ return 0;
64170+ }
64171+ }
64172+
64173+allow:
64174+ read_unlock(&tasklist_lock);
64175+ rcu_read_unlock();
64176+#endif
64177+ return 1;
64178+}
64179+
64180+void
64181+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
64182+{
64183+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
64184+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
64185+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
64186+#endif
64187+ return;
64188+}
64189+
64190+int
64191+gr_handle_chroot_mknod(const struct dentry *dentry,
64192+ const struct vfsmount *mnt, const int mode)
64193+{
64194+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
64195+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
64196+ proc_is_chrooted(current)) {
64197+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
64198+ return -EPERM;
64199+ }
64200+#endif
64201+ return 0;
64202+}
64203+
64204+int
64205+gr_handle_chroot_mount(const struct dentry *dentry,
64206+ const struct vfsmount *mnt, const char *dev_name)
64207+{
64208+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
64209+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
64210+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
64211+ return -EPERM;
64212+ }
64213+#endif
64214+ return 0;
64215+}
64216+
64217+int
64218+gr_handle_chroot_pivot(void)
64219+{
64220+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
64221+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
64222+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
64223+ return -EPERM;
64224+ }
64225+#endif
64226+ return 0;
64227+}
64228+
64229+int
64230+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
64231+{
64232+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
64233+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
64234+ !gr_is_outside_chroot(dentry, mnt)) {
64235+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
64236+ return -EPERM;
64237+ }
64238+#endif
64239+ return 0;
64240+}
64241+
64242+extern const char *captab_log[];
64243+extern int captab_log_entries;
64244+
64245+int
64246+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
64247+{
64248+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
64249+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
64250+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
64251+ if (cap_raised(chroot_caps, cap)) {
64252+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
64253+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
64254+ }
64255+ return 0;
64256+ }
64257+ }
64258+#endif
64259+ return 1;
64260+}
64261+
64262+int
64263+gr_chroot_is_capable(const int cap)
64264+{
64265+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
64266+ return gr_task_chroot_is_capable(current, current_cred(), cap);
64267+#endif
64268+ return 1;
64269+}
64270+
64271+int
64272+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
64273+{
64274+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
64275+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
64276+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
64277+ if (cap_raised(chroot_caps, cap)) {
64278+ return 0;
64279+ }
64280+ }
64281+#endif
64282+ return 1;
64283+}
64284+
64285+int
64286+gr_chroot_is_capable_nolog(const int cap)
64287+{
64288+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
64289+ return gr_task_chroot_is_capable_nolog(current, cap);
64290+#endif
64291+ return 1;
64292+}
64293+
64294+int
64295+gr_handle_chroot_sysctl(const int op)
64296+{
64297+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
64298+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
64299+ proc_is_chrooted(current))
64300+ return -EACCES;
64301+#endif
64302+ return 0;
64303+}
64304+
64305+void
64306+gr_handle_chroot_chdir(const struct path *path)
64307+{
64308+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
64309+ if (grsec_enable_chroot_chdir)
64310+ set_fs_pwd(current->fs, path);
64311+#endif
64312+ return;
64313+}
64314+
64315+int
64316+gr_handle_chroot_chmod(const struct dentry *dentry,
64317+ const struct vfsmount *mnt, const int mode)
64318+{
64319+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
64320+ /* allow chmod +s on directories, but not files */
64321+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
64322+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
64323+ proc_is_chrooted(current)) {
64324+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
64325+ return -EPERM;
64326+ }
64327+#endif
64328+ return 0;
64329+}
64330diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
64331new file mode 100644
64332index 0000000..ce65ceb
64333--- /dev/null
64334+++ b/grsecurity/grsec_disabled.c
64335@@ -0,0 +1,434 @@
64336+#include <linux/kernel.h>
64337+#include <linux/module.h>
64338+#include <linux/sched.h>
64339+#include <linux/file.h>
64340+#include <linux/fs.h>
64341+#include <linux/kdev_t.h>
64342+#include <linux/net.h>
64343+#include <linux/in.h>
64344+#include <linux/ip.h>
64345+#include <linux/skbuff.h>
64346+#include <linux/sysctl.h>
64347+
64348+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
64349+void
64350+pax_set_initial_flags(struct linux_binprm *bprm)
64351+{
64352+ return;
64353+}
64354+#endif
64355+
64356+#ifdef CONFIG_SYSCTL
64357+__u32
64358+gr_handle_sysctl(const struct ctl_table * table, const int op)
64359+{
64360+ return 0;
64361+}
64362+#endif
64363+
64364+#ifdef CONFIG_TASKSTATS
64365+int gr_is_taskstats_denied(int pid)
64366+{
64367+ return 0;
64368+}
64369+#endif
64370+
64371+int
64372+gr_acl_is_enabled(void)
64373+{
64374+ return 0;
64375+}
64376+
64377+void
64378+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
64379+{
64380+ return;
64381+}
64382+
64383+int
64384+gr_handle_rawio(const struct inode *inode)
64385+{
64386+ return 0;
64387+}
64388+
64389+void
64390+gr_acl_handle_psacct(struct task_struct *task, const long code)
64391+{
64392+ return;
64393+}
64394+
64395+int
64396+gr_handle_ptrace(struct task_struct *task, const long request)
64397+{
64398+ return 0;
64399+}
64400+
64401+int
64402+gr_handle_proc_ptrace(struct task_struct *task)
64403+{
64404+ return 0;
64405+}
64406+
64407+int
64408+gr_set_acls(const int type)
64409+{
64410+ return 0;
64411+}
64412+
64413+int
64414+gr_check_hidden_task(const struct task_struct *tsk)
64415+{
64416+ return 0;
64417+}
64418+
64419+int
64420+gr_check_protected_task(const struct task_struct *task)
64421+{
64422+ return 0;
64423+}
64424+
64425+int
64426+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
64427+{
64428+ return 0;
64429+}
64430+
64431+void
64432+gr_copy_label(struct task_struct *tsk)
64433+{
64434+ return;
64435+}
64436+
64437+void
64438+gr_set_pax_flags(struct task_struct *task)
64439+{
64440+ return;
64441+}
64442+
64443+int
64444+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
64445+ const int unsafe_share)
64446+{
64447+ return 0;
64448+}
64449+
64450+void
64451+gr_handle_delete(const ino_t ino, const dev_t dev)
64452+{
64453+ return;
64454+}
64455+
64456+void
64457+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
64458+{
64459+ return;
64460+}
64461+
64462+void
64463+gr_handle_crash(struct task_struct *task, const int sig)
64464+{
64465+ return;
64466+}
64467+
64468+int
64469+gr_check_crash_exec(const struct file *filp)
64470+{
64471+ return 0;
64472+}
64473+
64474+int
64475+gr_check_crash_uid(const kuid_t uid)
64476+{
64477+ return 0;
64478+}
64479+
64480+void
64481+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
64482+ struct dentry *old_dentry,
64483+ struct dentry *new_dentry,
64484+ struct vfsmount *mnt, const __u8 replace)
64485+{
64486+ return;
64487+}
64488+
64489+int
64490+gr_search_socket(const int family, const int type, const int protocol)
64491+{
64492+ return 1;
64493+}
64494+
64495+int
64496+gr_search_connectbind(const int mode, const struct socket *sock,
64497+ const struct sockaddr_in *addr)
64498+{
64499+ return 0;
64500+}
64501+
64502+void
64503+gr_handle_alertkill(struct task_struct *task)
64504+{
64505+ return;
64506+}
64507+
64508+__u32
64509+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
64510+{
64511+ return 1;
64512+}
64513+
64514+__u32
64515+gr_acl_handle_hidden_file(const struct dentry * dentry,
64516+ const struct vfsmount * mnt)
64517+{
64518+ return 1;
64519+}
64520+
64521+__u32
64522+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
64523+ int acc_mode)
64524+{
64525+ return 1;
64526+}
64527+
64528+__u32
64529+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
64530+{
64531+ return 1;
64532+}
64533+
64534+__u32
64535+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
64536+{
64537+ return 1;
64538+}
64539+
64540+int
64541+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
64542+ unsigned int *vm_flags)
64543+{
64544+ return 1;
64545+}
64546+
64547+__u32
64548+gr_acl_handle_truncate(const struct dentry * dentry,
64549+ const struct vfsmount * mnt)
64550+{
64551+ return 1;
64552+}
64553+
64554+__u32
64555+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
64556+{
64557+ return 1;
64558+}
64559+
64560+__u32
64561+gr_acl_handle_access(const struct dentry * dentry,
64562+ const struct vfsmount * mnt, const int fmode)
64563+{
64564+ return 1;
64565+}
64566+
64567+__u32
64568+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
64569+ umode_t *mode)
64570+{
64571+ return 1;
64572+}
64573+
64574+__u32
64575+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
64576+{
64577+ return 1;
64578+}
64579+
64580+__u32
64581+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
64582+{
64583+ return 1;
64584+}
64585+
64586+void
64587+grsecurity_init(void)
64588+{
64589+ return;
64590+}
64591+
64592+umode_t gr_acl_umask(void)
64593+{
64594+ return 0;
64595+}
64596+
64597+__u32
64598+gr_acl_handle_mknod(const struct dentry * new_dentry,
64599+ const struct dentry * parent_dentry,
64600+ const struct vfsmount * parent_mnt,
64601+ const int mode)
64602+{
64603+ return 1;
64604+}
64605+
64606+__u32
64607+gr_acl_handle_mkdir(const struct dentry * new_dentry,
64608+ const struct dentry * parent_dentry,
64609+ const struct vfsmount * parent_mnt)
64610+{
64611+ return 1;
64612+}
64613+
64614+__u32
64615+gr_acl_handle_symlink(const struct dentry * new_dentry,
64616+ const struct dentry * parent_dentry,
64617+ const struct vfsmount * parent_mnt, const struct filename *from)
64618+{
64619+ return 1;
64620+}
64621+
64622+__u32
64623+gr_acl_handle_link(const struct dentry * new_dentry,
64624+ const struct dentry * parent_dentry,
64625+ const struct vfsmount * parent_mnt,
64626+ const struct dentry * old_dentry,
64627+ const struct vfsmount * old_mnt, const struct filename *to)
64628+{
64629+ return 1;
64630+}
64631+
64632+int
64633+gr_acl_handle_rename(const struct dentry *new_dentry,
64634+ const struct dentry *parent_dentry,
64635+ const struct vfsmount *parent_mnt,
64636+ const struct dentry *old_dentry,
64637+ const struct inode *old_parent_inode,
64638+ const struct vfsmount *old_mnt, const struct filename *newname)
64639+{
64640+ return 0;
64641+}
64642+
64643+int
64644+gr_acl_handle_filldir(const struct file *file, const char *name,
64645+ const int namelen, const ino_t ino)
64646+{
64647+ return 1;
64648+}
64649+
64650+int
64651+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
64652+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
64653+{
64654+ return 1;
64655+}
64656+
64657+int
64658+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
64659+{
64660+ return 0;
64661+}
64662+
64663+int
64664+gr_search_accept(const struct socket *sock)
64665+{
64666+ return 0;
64667+}
64668+
64669+int
64670+gr_search_listen(const struct socket *sock)
64671+{
64672+ return 0;
64673+}
64674+
64675+int
64676+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
64677+{
64678+ return 0;
64679+}
64680+
64681+__u32
64682+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
64683+{
64684+ return 1;
64685+}
64686+
64687+__u32
64688+gr_acl_handle_creat(const struct dentry * dentry,
64689+ const struct dentry * p_dentry,
64690+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
64691+ const int imode)
64692+{
64693+ return 1;
64694+}
64695+
64696+void
64697+gr_acl_handle_exit(void)
64698+{
64699+ return;
64700+}
64701+
64702+int
64703+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
64704+{
64705+ return 1;
64706+}
64707+
64708+void
64709+gr_set_role_label(const kuid_t uid, const kgid_t gid)
64710+{
64711+ return;
64712+}
64713+
64714+int
64715+gr_acl_handle_procpidmem(const struct task_struct *task)
64716+{
64717+ return 0;
64718+}
64719+
64720+int
64721+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
64722+{
64723+ return 0;
64724+}
64725+
64726+int
64727+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
64728+{
64729+ return 0;
64730+}
64731+
64732+void
64733+gr_set_kernel_label(struct task_struct *task)
64734+{
64735+ return;
64736+}
64737+
64738+int
64739+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
64740+{
64741+ return 0;
64742+}
64743+
64744+int
64745+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
64746+{
64747+ return 0;
64748+}
64749+
64750+int gr_acl_enable_at_secure(void)
64751+{
64752+ return 0;
64753+}
64754+
64755+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
64756+{
64757+ return dentry->d_sb->s_dev;
64758+}
64759+
64760+void gr_put_exec_file(struct task_struct *task)
64761+{
64762+ return;
64763+}
64764+
64765+EXPORT_SYMBOL(gr_set_kernel_label);
64766+#ifdef CONFIG_SECURITY
64767+EXPORT_SYMBOL(gr_check_user_change);
64768+EXPORT_SYMBOL(gr_check_group_change);
64769+#endif
64770diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
64771new file mode 100644
64772index 0000000..387032b
64773--- /dev/null
64774+++ b/grsecurity/grsec_exec.c
64775@@ -0,0 +1,187 @@
64776+#include <linux/kernel.h>
64777+#include <linux/sched.h>
64778+#include <linux/file.h>
64779+#include <linux/binfmts.h>
64780+#include <linux/fs.h>
64781+#include <linux/types.h>
64782+#include <linux/grdefs.h>
64783+#include <linux/grsecurity.h>
64784+#include <linux/grinternal.h>
64785+#include <linux/capability.h>
64786+#include <linux/module.h>
64787+#include <linux/compat.h>
64788+
64789+#include <asm/uaccess.h>
64790+
64791+#ifdef CONFIG_GRKERNSEC_EXECLOG
64792+static char gr_exec_arg_buf[132];
64793+static DEFINE_MUTEX(gr_exec_arg_mutex);
64794+#endif
64795+
64796+struct user_arg_ptr {
64797+#ifdef CONFIG_COMPAT
64798+ bool is_compat;
64799+#endif
64800+ union {
64801+ const char __user *const __user *native;
64802+#ifdef CONFIG_COMPAT
64803+ const compat_uptr_t __user *compat;
64804+#endif
64805+ } ptr;
64806+};
64807+
64808+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
64809+
64810+void
64811+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
64812+{
64813+#ifdef CONFIG_GRKERNSEC_EXECLOG
64814+ char *grarg = gr_exec_arg_buf;
64815+ unsigned int i, x, execlen = 0;
64816+ char c;
64817+
64818+ if (!((grsec_enable_execlog && grsec_enable_group &&
64819+ in_group_p(grsec_audit_gid))
64820+ || (grsec_enable_execlog && !grsec_enable_group)))
64821+ return;
64822+
64823+ mutex_lock(&gr_exec_arg_mutex);
64824+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
64825+
64826+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
64827+ const char __user *p;
64828+ unsigned int len;
64829+
64830+ p = get_user_arg_ptr(argv, i);
64831+ if (IS_ERR(p))
64832+ goto log;
64833+
64834+ len = strnlen_user(p, 128 - execlen);
64835+ if (len > 128 - execlen)
64836+ len = 128 - execlen;
64837+ else if (len > 0)
64838+ len--;
64839+ if (copy_from_user(grarg + execlen, p, len))
64840+ goto log;
64841+
64842+ /* rewrite unprintable characters */
64843+ for (x = 0; x < len; x++) {
64844+ c = *(grarg + execlen + x);
64845+ if (c < 32 || c > 126)
64846+ *(grarg + execlen + x) = ' ';
64847+ }
64848+
64849+ execlen += len;
64850+ *(grarg + execlen) = ' ';
64851+ *(grarg + execlen + 1) = '\0';
64852+ execlen++;
64853+ }
64854+
64855+ log:
64856+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
64857+ bprm->file->f_path.mnt, grarg);
64858+ mutex_unlock(&gr_exec_arg_mutex);
64859+#endif
64860+ return;
64861+}
64862+
64863+#ifdef CONFIG_GRKERNSEC
64864+extern int gr_acl_is_capable(const int cap);
64865+extern int gr_acl_is_capable_nolog(const int cap);
64866+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
64867+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
64868+extern int gr_chroot_is_capable(const int cap);
64869+extern int gr_chroot_is_capable_nolog(const int cap);
64870+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
64871+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
64872+#endif
64873+
64874+const char *captab_log[] = {
64875+ "CAP_CHOWN",
64876+ "CAP_DAC_OVERRIDE",
64877+ "CAP_DAC_READ_SEARCH",
64878+ "CAP_FOWNER",
64879+ "CAP_FSETID",
64880+ "CAP_KILL",
64881+ "CAP_SETGID",
64882+ "CAP_SETUID",
64883+ "CAP_SETPCAP",
64884+ "CAP_LINUX_IMMUTABLE",
64885+ "CAP_NET_BIND_SERVICE",
64886+ "CAP_NET_BROADCAST",
64887+ "CAP_NET_ADMIN",
64888+ "CAP_NET_RAW",
64889+ "CAP_IPC_LOCK",
64890+ "CAP_IPC_OWNER",
64891+ "CAP_SYS_MODULE",
64892+ "CAP_SYS_RAWIO",
64893+ "CAP_SYS_CHROOT",
64894+ "CAP_SYS_PTRACE",
64895+ "CAP_SYS_PACCT",
64896+ "CAP_SYS_ADMIN",
64897+ "CAP_SYS_BOOT",
64898+ "CAP_SYS_NICE",
64899+ "CAP_SYS_RESOURCE",
64900+ "CAP_SYS_TIME",
64901+ "CAP_SYS_TTY_CONFIG",
64902+ "CAP_MKNOD",
64903+ "CAP_LEASE",
64904+ "CAP_AUDIT_WRITE",
64905+ "CAP_AUDIT_CONTROL",
64906+ "CAP_SETFCAP",
64907+ "CAP_MAC_OVERRIDE",
64908+ "CAP_MAC_ADMIN",
64909+ "CAP_SYSLOG",
64910+ "CAP_WAKE_ALARM"
64911+};
64912+
64913+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
64914+
64915+int gr_is_capable(const int cap)
64916+{
64917+#ifdef CONFIG_GRKERNSEC
64918+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
64919+ return 1;
64920+ return 0;
64921+#else
64922+ return 1;
64923+#endif
64924+}
64925+
64926+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
64927+{
64928+#ifdef CONFIG_GRKERNSEC
64929+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
64930+ return 1;
64931+ return 0;
64932+#else
64933+ return 1;
64934+#endif
64935+}
64936+
64937+int gr_is_capable_nolog(const int cap)
64938+{
64939+#ifdef CONFIG_GRKERNSEC
64940+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
64941+ return 1;
64942+ return 0;
64943+#else
64944+ return 1;
64945+#endif
64946+}
64947+
64948+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
64949+{
64950+#ifdef CONFIG_GRKERNSEC
64951+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
64952+ return 1;
64953+ return 0;
64954+#else
64955+ return 1;
64956+#endif
64957+}
64958+
64959+EXPORT_SYMBOL(gr_is_capable);
64960+EXPORT_SYMBOL(gr_is_capable_nolog);
64961+EXPORT_SYMBOL(gr_task_is_capable);
64962+EXPORT_SYMBOL(gr_task_is_capable_nolog);
64963diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
64964new file mode 100644
64965index 0000000..06cc6ea
64966--- /dev/null
64967+++ b/grsecurity/grsec_fifo.c
64968@@ -0,0 +1,24 @@
64969+#include <linux/kernel.h>
64970+#include <linux/sched.h>
64971+#include <linux/fs.h>
64972+#include <linux/file.h>
64973+#include <linux/grinternal.h>
64974+
64975+int
64976+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
64977+ const struct dentry *dir, const int flag, const int acc_mode)
64978+{
64979+#ifdef CONFIG_GRKERNSEC_FIFO
64980+ const struct cred *cred = current_cred();
64981+
64982+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
64983+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
64984+ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
64985+ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
64986+ if (!inode_permission(dentry->d_inode, acc_mode))
64987+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
64988+ return -EACCES;
64989+ }
64990+#endif
64991+ return 0;
64992+}
64993diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
64994new file mode 100644
64995index 0000000..8ca18bf
64996--- /dev/null
64997+++ b/grsecurity/grsec_fork.c
64998@@ -0,0 +1,23 @@
64999+#include <linux/kernel.h>
65000+#include <linux/sched.h>
65001+#include <linux/grsecurity.h>
65002+#include <linux/grinternal.h>
65003+#include <linux/errno.h>
65004+
65005+void
65006+gr_log_forkfail(const int retval)
65007+{
65008+#ifdef CONFIG_GRKERNSEC_FORKFAIL
65009+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
65010+ switch (retval) {
65011+ case -EAGAIN:
65012+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
65013+ break;
65014+ case -ENOMEM:
65015+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
65016+ break;
65017+ }
65018+ }
65019+#endif
65020+ return;
65021+}
65022diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
65023new file mode 100644
65024index 0000000..a862e9f
65025--- /dev/null
65026+++ b/grsecurity/grsec_init.c
65027@@ -0,0 +1,283 @@
65028+#include <linux/kernel.h>
65029+#include <linux/sched.h>
65030+#include <linux/mm.h>
65031+#include <linux/gracl.h>
65032+#include <linux/slab.h>
65033+#include <linux/vmalloc.h>
65034+#include <linux/percpu.h>
65035+#include <linux/module.h>
65036+
65037+int grsec_enable_ptrace_readexec;
65038+int grsec_enable_setxid;
65039+int grsec_enable_symlinkown;
65040+kgid_t grsec_symlinkown_gid;
65041+int grsec_enable_brute;
65042+int grsec_enable_link;
65043+int grsec_enable_dmesg;
65044+int grsec_enable_harden_ptrace;
65045+int grsec_enable_fifo;
65046+int grsec_enable_execlog;
65047+int grsec_enable_signal;
65048+int grsec_enable_forkfail;
65049+int grsec_enable_audit_ptrace;
65050+int grsec_enable_time;
65051+int grsec_enable_audit_textrel;
65052+int grsec_enable_group;
65053+kgid_t grsec_audit_gid;
65054+int grsec_enable_chdir;
65055+int grsec_enable_mount;
65056+int grsec_enable_rofs;
65057+int grsec_enable_chroot_findtask;
65058+int grsec_enable_chroot_mount;
65059+int grsec_enable_chroot_shmat;
65060+int grsec_enable_chroot_fchdir;
65061+int grsec_enable_chroot_double;
65062+int grsec_enable_chroot_pivot;
65063+int grsec_enable_chroot_chdir;
65064+int grsec_enable_chroot_chmod;
65065+int grsec_enable_chroot_mknod;
65066+int grsec_enable_chroot_nice;
65067+int grsec_enable_chroot_execlog;
65068+int grsec_enable_chroot_caps;
65069+int grsec_enable_chroot_sysctl;
65070+int grsec_enable_chroot_unix;
65071+int grsec_enable_tpe;
65072+kgid_t grsec_tpe_gid;
65073+int grsec_enable_blackhole;
65074+#ifdef CONFIG_IPV6_MODULE
65075+EXPORT_SYMBOL(grsec_enable_blackhole);
65076+#endif
65077+int grsec_lastack_retries;
65078+int grsec_enable_tpe_all;
65079+int grsec_enable_tpe_invert;
65080+int grsec_enable_socket_all;
65081+kgid_t grsec_socket_all_gid;
65082+int grsec_enable_socket_client;
65083+kgid_t grsec_socket_client_gid;
65084+int grsec_enable_socket_server;
65085+kgid_t grsec_socket_server_gid;
65086+int grsec_resource_logging;
65087+int grsec_disable_privio;
65088+int grsec_enable_log_rwxmaps;
65089+int grsec_lock;
65090+
65091+DEFINE_SPINLOCK(grsec_alert_lock);
65092+unsigned long grsec_alert_wtime = 0;
65093+unsigned long grsec_alert_fyet = 0;
65094+
65095+DEFINE_SPINLOCK(grsec_audit_lock);
65096+
65097+DEFINE_RWLOCK(grsec_exec_file_lock);
65098+
65099+char *gr_shared_page[4];
65100+
65101+char *gr_alert_log_fmt;
65102+char *gr_audit_log_fmt;
65103+char *gr_alert_log_buf;
65104+char *gr_audit_log_buf;
65105+
65106+extern struct gr_arg *gr_usermode;
65107+extern unsigned char *gr_system_salt;
65108+extern unsigned char *gr_system_sum;
65109+
65110+void __init
65111+grsecurity_init(void)
65112+{
65113+ int j;
65114+ /* create the per-cpu shared pages */
65115+
65116+#ifdef CONFIG_X86
65117+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
65118+#endif
65119+
65120+ for (j = 0; j < 4; j++) {
65121+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
65122+ if (gr_shared_page[j] == NULL) {
65123+ panic("Unable to allocate grsecurity shared page");
65124+ return;
65125+ }
65126+ }
65127+
65128+ /* allocate log buffers */
65129+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
65130+ if (!gr_alert_log_fmt) {
65131+ panic("Unable to allocate grsecurity alert log format buffer");
65132+ return;
65133+ }
65134+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
65135+ if (!gr_audit_log_fmt) {
65136+ panic("Unable to allocate grsecurity audit log format buffer");
65137+ return;
65138+ }
65139+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
65140+ if (!gr_alert_log_buf) {
65141+ panic("Unable to allocate grsecurity alert log buffer");
65142+ return;
65143+ }
65144+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
65145+ if (!gr_audit_log_buf) {
65146+ panic("Unable to allocate grsecurity audit log buffer");
65147+ return;
65148+ }
65149+
65150+ /* allocate memory for authentication structure */
65151+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
65152+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
65153+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
65154+
65155+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
65156+ panic("Unable to allocate grsecurity authentication structure");
65157+ return;
65158+ }
65159+
65160+
65161+#ifdef CONFIG_GRKERNSEC_IO
65162+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
65163+ grsec_disable_privio = 1;
65164+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
65165+ grsec_disable_privio = 1;
65166+#else
65167+ grsec_disable_privio = 0;
65168+#endif
65169+#endif
65170+
65171+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
65172+ /* for backward compatibility, tpe_invert always defaults to on if
65173+ enabled in the kernel
65174+ */
65175+ grsec_enable_tpe_invert = 1;
65176+#endif
65177+
65178+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
65179+#ifndef CONFIG_GRKERNSEC_SYSCTL
65180+ grsec_lock = 1;
65181+#endif
65182+
65183+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
65184+ grsec_enable_audit_textrel = 1;
65185+#endif
65186+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
65187+ grsec_enable_log_rwxmaps = 1;
65188+#endif
65189+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
65190+ grsec_enable_group = 1;
65191+ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
65192+#endif
65193+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
65194+ grsec_enable_ptrace_readexec = 1;
65195+#endif
65196+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
65197+ grsec_enable_chdir = 1;
65198+#endif
65199+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
65200+ grsec_enable_harden_ptrace = 1;
65201+#endif
65202+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
65203+ grsec_enable_mount = 1;
65204+#endif
65205+#ifdef CONFIG_GRKERNSEC_LINK
65206+ grsec_enable_link = 1;
65207+#endif
65208+#ifdef CONFIG_GRKERNSEC_BRUTE
65209+ grsec_enable_brute = 1;
65210+#endif
65211+#ifdef CONFIG_GRKERNSEC_DMESG
65212+ grsec_enable_dmesg = 1;
65213+#endif
65214+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65215+ grsec_enable_blackhole = 1;
65216+ grsec_lastack_retries = 4;
65217+#endif
65218+#ifdef CONFIG_GRKERNSEC_FIFO
65219+ grsec_enable_fifo = 1;
65220+#endif
65221+#ifdef CONFIG_GRKERNSEC_EXECLOG
65222+ grsec_enable_execlog = 1;
65223+#endif
65224+#ifdef CONFIG_GRKERNSEC_SETXID
65225+ grsec_enable_setxid = 1;
65226+#endif
65227+#ifdef CONFIG_GRKERNSEC_SIGNAL
65228+ grsec_enable_signal = 1;
65229+#endif
65230+#ifdef CONFIG_GRKERNSEC_FORKFAIL
65231+ grsec_enable_forkfail = 1;
65232+#endif
65233+#ifdef CONFIG_GRKERNSEC_TIME
65234+ grsec_enable_time = 1;
65235+#endif
65236+#ifdef CONFIG_GRKERNSEC_RESLOG
65237+ grsec_resource_logging = 1;
65238+#endif
65239+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
65240+ grsec_enable_chroot_findtask = 1;
65241+#endif
65242+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
65243+ grsec_enable_chroot_unix = 1;
65244+#endif
65245+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
65246+ grsec_enable_chroot_mount = 1;
65247+#endif
65248+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
65249+ grsec_enable_chroot_fchdir = 1;
65250+#endif
65251+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
65252+ grsec_enable_chroot_shmat = 1;
65253+#endif
65254+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
65255+ grsec_enable_audit_ptrace = 1;
65256+#endif
65257+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
65258+ grsec_enable_chroot_double = 1;
65259+#endif
65260+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
65261+ grsec_enable_chroot_pivot = 1;
65262+#endif
65263+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
65264+ grsec_enable_chroot_chdir = 1;
65265+#endif
65266+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
65267+ grsec_enable_chroot_chmod = 1;
65268+#endif
65269+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
65270+ grsec_enable_chroot_mknod = 1;
65271+#endif
65272+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
65273+ grsec_enable_chroot_nice = 1;
65274+#endif
65275+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
65276+ grsec_enable_chroot_execlog = 1;
65277+#endif
65278+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
65279+ grsec_enable_chroot_caps = 1;
65280+#endif
65281+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
65282+ grsec_enable_chroot_sysctl = 1;
65283+#endif
65284+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
65285+ grsec_enable_symlinkown = 1;
65286+ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
65287+#endif
65288+#ifdef CONFIG_GRKERNSEC_TPE
65289+ grsec_enable_tpe = 1;
65290+ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
65291+#ifdef CONFIG_GRKERNSEC_TPE_ALL
65292+ grsec_enable_tpe_all = 1;
65293+#endif
65294+#endif
65295+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
65296+ grsec_enable_socket_all = 1;
65297+ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
65298+#endif
65299+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
65300+ grsec_enable_socket_client = 1;
65301+ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
65302+#endif
65303+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
65304+ grsec_enable_socket_server = 1;
65305+ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
65306+#endif
65307+#endif
65308+
65309+ return;
65310+}
65311diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
65312new file mode 100644
65313index 0000000..5e05e20
65314--- /dev/null
65315+++ b/grsecurity/grsec_link.c
65316@@ -0,0 +1,58 @@
65317+#include <linux/kernel.h>
65318+#include <linux/sched.h>
65319+#include <linux/fs.h>
65320+#include <linux/file.h>
65321+#include <linux/grinternal.h>
65322+
65323+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
65324+{
65325+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
65326+ const struct inode *link_inode = link->dentry->d_inode;
65327+
65328+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
65329+ /* ignore root-owned links, e.g. /proc/self */
65330+ gr_is_global_nonroot(link_inode->i_uid) && target &&
65331+ !uid_eq(link_inode->i_uid, target->i_uid)) {
65332+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
65333+ return 1;
65334+ }
65335+#endif
65336+ return 0;
65337+}
65338+
65339+int
65340+gr_handle_follow_link(const struct inode *parent,
65341+ const struct inode *inode,
65342+ const struct dentry *dentry, const struct vfsmount *mnt)
65343+{
65344+#ifdef CONFIG_GRKERNSEC_LINK
65345+ const struct cred *cred = current_cred();
65346+
65347+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
65348+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
65349+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
65350+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
65351+ return -EACCES;
65352+ }
65353+#endif
65354+ return 0;
65355+}
65356+
65357+int
65358+gr_handle_hardlink(const struct dentry *dentry,
65359+ const struct vfsmount *mnt,
65360+ struct inode *inode, const int mode, const struct filename *to)
65361+{
65362+#ifdef CONFIG_GRKERNSEC_LINK
65363+ const struct cred *cred = current_cred();
65364+
65365+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
65366+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
65367+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
65368+ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
65369+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
65370+ return -EPERM;
65371+ }
65372+#endif
65373+ return 0;
65374+}
65375diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
65376new file mode 100644
65377index 0000000..7c06085
65378--- /dev/null
65379+++ b/grsecurity/grsec_log.c
65380@@ -0,0 +1,326 @@
65381+#include <linux/kernel.h>
65382+#include <linux/sched.h>
65383+#include <linux/file.h>
65384+#include <linux/tty.h>
65385+#include <linux/fs.h>
65386+#include <linux/grinternal.h>
65387+
65388+#ifdef CONFIG_TREE_PREEMPT_RCU
65389+#define DISABLE_PREEMPT() preempt_disable()
65390+#define ENABLE_PREEMPT() preempt_enable()
65391+#else
65392+#define DISABLE_PREEMPT()
65393+#define ENABLE_PREEMPT()
65394+#endif
65395+
65396+#define BEGIN_LOCKS(x) \
65397+ DISABLE_PREEMPT(); \
65398+ rcu_read_lock(); \
65399+ read_lock(&tasklist_lock); \
65400+ read_lock(&grsec_exec_file_lock); \
65401+ if (x != GR_DO_AUDIT) \
65402+ spin_lock(&grsec_alert_lock); \
65403+ else \
65404+ spin_lock(&grsec_audit_lock)
65405+
65406+#define END_LOCKS(x) \
65407+ if (x != GR_DO_AUDIT) \
65408+ spin_unlock(&grsec_alert_lock); \
65409+ else \
65410+ spin_unlock(&grsec_audit_lock); \
65411+ read_unlock(&grsec_exec_file_lock); \
65412+ read_unlock(&tasklist_lock); \
65413+ rcu_read_unlock(); \
65414+ ENABLE_PREEMPT(); \
65415+ if (x == GR_DONT_AUDIT) \
65416+ gr_handle_alertkill(current)
65417+
65418+enum {
65419+ FLOODING,
65420+ NO_FLOODING
65421+};
65422+
65423+extern char *gr_alert_log_fmt;
65424+extern char *gr_audit_log_fmt;
65425+extern char *gr_alert_log_buf;
65426+extern char *gr_audit_log_buf;
65427+
65428+static int gr_log_start(int audit)
65429+{
65430+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
65431+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
65432+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
65433+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
65434+ unsigned long curr_secs = get_seconds();
65435+
65436+ if (audit == GR_DO_AUDIT)
65437+ goto set_fmt;
65438+
65439+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
65440+ grsec_alert_wtime = curr_secs;
65441+ grsec_alert_fyet = 0;
65442+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
65443+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
65444+ grsec_alert_fyet++;
65445+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
65446+ grsec_alert_wtime = curr_secs;
65447+ grsec_alert_fyet++;
65448+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
65449+ return FLOODING;
65450+ }
65451+ else return FLOODING;
65452+
65453+set_fmt:
65454+#endif
65455+ memset(buf, 0, PAGE_SIZE);
65456+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
65457+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
65458+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
65459+ } else if (current->signal->curr_ip) {
65460+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
65461+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
65462+ } else if (gr_acl_is_enabled()) {
65463+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
65464+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
65465+ } else {
65466+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
65467+ strcpy(buf, fmt);
65468+ }
65469+
65470+ return NO_FLOODING;
65471+}
65472+
65473+static void gr_log_middle(int audit, const char *msg, va_list ap)
65474+ __attribute__ ((format (printf, 2, 0)));
65475+
65476+static void gr_log_middle(int audit, const char *msg, va_list ap)
65477+{
65478+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
65479+ unsigned int len = strlen(buf);
65480+
65481+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
65482+
65483+ return;
65484+}
65485+
65486+static void gr_log_middle_varargs(int audit, const char *msg, ...)
65487+ __attribute__ ((format (printf, 2, 3)));
65488+
65489+static void gr_log_middle_varargs(int audit, const char *msg, ...)
65490+{
65491+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
65492+ unsigned int len = strlen(buf);
65493+ va_list ap;
65494+
65495+ va_start(ap, msg);
65496+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
65497+ va_end(ap);
65498+
65499+ return;
65500+}
65501+
65502+static void gr_log_end(int audit, int append_default)
65503+{
65504+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
65505+ if (append_default) {
65506+ struct task_struct *task = current;
65507+ struct task_struct *parent = task->real_parent;
65508+ const struct cred *cred = __task_cred(task);
65509+ const struct cred *pcred = __task_cred(parent);
65510+ unsigned int len = strlen(buf);
65511+
65512+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
65513+ }
65514+
65515+ printk("%s\n", buf);
65516+
65517+ return;
65518+}
65519+
65520+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
65521+{
65522+ int logtype;
65523+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
65524+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
65525+ void *voidptr = NULL;
65526+ int num1 = 0, num2 = 0;
65527+ unsigned long ulong1 = 0, ulong2 = 0;
65528+ struct dentry *dentry = NULL;
65529+ struct vfsmount *mnt = NULL;
65530+ struct file *file = NULL;
65531+ struct task_struct *task = NULL;
65532+ const struct cred *cred, *pcred;
65533+ va_list ap;
65534+
65535+ BEGIN_LOCKS(audit);
65536+ logtype = gr_log_start(audit);
65537+ if (logtype == FLOODING) {
65538+ END_LOCKS(audit);
65539+ return;
65540+ }
65541+ va_start(ap, argtypes);
65542+ switch (argtypes) {
65543+ case GR_TTYSNIFF:
65544+ task = va_arg(ap, struct task_struct *);
65545+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
65546+ break;
65547+ case GR_SYSCTL_HIDDEN:
65548+ str1 = va_arg(ap, char *);
65549+ gr_log_middle_varargs(audit, msg, result, str1);
65550+ break;
65551+ case GR_RBAC:
65552+ dentry = va_arg(ap, struct dentry *);
65553+ mnt = va_arg(ap, struct vfsmount *);
65554+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
65555+ break;
65556+ case GR_RBAC_STR:
65557+ dentry = va_arg(ap, struct dentry *);
65558+ mnt = va_arg(ap, struct vfsmount *);
65559+ str1 = va_arg(ap, char *);
65560+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
65561+ break;
65562+ case GR_STR_RBAC:
65563+ str1 = va_arg(ap, char *);
65564+ dentry = va_arg(ap, struct dentry *);
65565+ mnt = va_arg(ap, struct vfsmount *);
65566+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
65567+ break;
65568+ case GR_RBAC_MODE2:
65569+ dentry = va_arg(ap, struct dentry *);
65570+ mnt = va_arg(ap, struct vfsmount *);
65571+ str1 = va_arg(ap, char *);
65572+ str2 = va_arg(ap, char *);
65573+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
65574+ break;
65575+ case GR_RBAC_MODE3:
65576+ dentry = va_arg(ap, struct dentry *);
65577+ mnt = va_arg(ap, struct vfsmount *);
65578+ str1 = va_arg(ap, char *);
65579+ str2 = va_arg(ap, char *);
65580+ str3 = va_arg(ap, char *);
65581+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
65582+ break;
65583+ case GR_FILENAME:
65584+ dentry = va_arg(ap, struct dentry *);
65585+ mnt = va_arg(ap, struct vfsmount *);
65586+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
65587+ break;
65588+ case GR_STR_FILENAME:
65589+ str1 = va_arg(ap, char *);
65590+ dentry = va_arg(ap, struct dentry *);
65591+ mnt = va_arg(ap, struct vfsmount *);
65592+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
65593+ break;
65594+ case GR_FILENAME_STR:
65595+ dentry = va_arg(ap, struct dentry *);
65596+ mnt = va_arg(ap, struct vfsmount *);
65597+ str1 = va_arg(ap, char *);
65598+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
65599+ break;
65600+ case GR_FILENAME_TWO_INT:
65601+ dentry = va_arg(ap, struct dentry *);
65602+ mnt = va_arg(ap, struct vfsmount *);
65603+ num1 = va_arg(ap, int);
65604+ num2 = va_arg(ap, int);
65605+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
65606+ break;
65607+ case GR_FILENAME_TWO_INT_STR:
65608+ dentry = va_arg(ap, struct dentry *);
65609+ mnt = va_arg(ap, struct vfsmount *);
65610+ num1 = va_arg(ap, int);
65611+ num2 = va_arg(ap, int);
65612+ str1 = va_arg(ap, char *);
65613+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
65614+ break;
65615+ case GR_TEXTREL:
65616+ file = va_arg(ap, struct file *);
65617+ ulong1 = va_arg(ap, unsigned long);
65618+ ulong2 = va_arg(ap, unsigned long);
65619+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
65620+ break;
65621+ case GR_PTRACE:
65622+ task = va_arg(ap, struct task_struct *);
65623+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
65624+ break;
65625+ case GR_RESOURCE:
65626+ task = va_arg(ap, struct task_struct *);
65627+ cred = __task_cred(task);
65628+ pcred = __task_cred(task->real_parent);
65629+ ulong1 = va_arg(ap, unsigned long);
65630+ str1 = va_arg(ap, char *);
65631+ ulong2 = va_arg(ap, unsigned long);
65632+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
65633+ break;
65634+ case GR_CAP:
65635+ task = va_arg(ap, struct task_struct *);
65636+ cred = __task_cred(task);
65637+ pcred = __task_cred(task->real_parent);
65638+ str1 = va_arg(ap, char *);
65639+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
65640+ break;
65641+ case GR_SIG:
65642+ str1 = va_arg(ap, char *);
65643+ voidptr = va_arg(ap, void *);
65644+ gr_log_middle_varargs(audit, msg, str1, voidptr);
65645+ break;
65646+ case GR_SIG2:
65647+ task = va_arg(ap, struct task_struct *);
65648+ cred = __task_cred(task);
65649+ pcred = __task_cred(task->real_parent);
65650+ num1 = va_arg(ap, int);
65651+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
65652+ break;
65653+ case GR_CRASH1:
65654+ task = va_arg(ap, struct task_struct *);
65655+ cred = __task_cred(task);
65656+ pcred = __task_cred(task->real_parent);
65657+ ulong1 = va_arg(ap, unsigned long);
65658+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
65659+ break;
65660+ case GR_CRASH2:
65661+ task = va_arg(ap, struct task_struct *);
65662+ cred = __task_cred(task);
65663+ pcred = __task_cred(task->real_parent);
65664+ ulong1 = va_arg(ap, unsigned long);
65665+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
65666+ break;
65667+ case GR_RWXMAP:
65668+ file = va_arg(ap, struct file *);
65669+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
65670+ break;
65671+ case GR_PSACCT:
65672+ {
65673+ unsigned int wday, cday;
65674+ __u8 whr, chr;
65675+ __u8 wmin, cmin;
65676+ __u8 wsec, csec;
65677+ char cur_tty[64] = { 0 };
65678+ char parent_tty[64] = { 0 };
65679+
65680+ task = va_arg(ap, struct task_struct *);
65681+ wday = va_arg(ap, unsigned int);
65682+ cday = va_arg(ap, unsigned int);
65683+ whr = va_arg(ap, int);
65684+ chr = va_arg(ap, int);
65685+ wmin = va_arg(ap, int);
65686+ cmin = va_arg(ap, int);
65687+ wsec = va_arg(ap, int);
65688+ csec = va_arg(ap, int);
65689+ ulong1 = va_arg(ap, unsigned long);
65690+ cred = __task_cred(task);
65691+ pcred = __task_cred(task->real_parent);
65692+
65693+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
65694+ }
65695+ break;
65696+ default:
65697+ gr_log_middle(audit, msg, ap);
65698+ }
65699+ va_end(ap);
65700+ // these don't need DEFAULTSECARGS printed on the end
65701+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
65702+ gr_log_end(audit, 0);
65703+ else
65704+ gr_log_end(audit, 1);
65705+ END_LOCKS(audit);
65706+}
65707diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
65708new file mode 100644
65709index 0000000..f536303
65710--- /dev/null
65711+++ b/grsecurity/grsec_mem.c
65712@@ -0,0 +1,40 @@
65713+#include <linux/kernel.h>
65714+#include <linux/sched.h>
65715+#include <linux/mm.h>
65716+#include <linux/mman.h>
65717+#include <linux/grinternal.h>
65718+
65719+void
65720+gr_handle_ioperm(void)
65721+{
65722+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
65723+ return;
65724+}
65725+
65726+void
65727+gr_handle_iopl(void)
65728+{
65729+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
65730+ return;
65731+}
65732+
65733+void
65734+gr_handle_mem_readwrite(u64 from, u64 to)
65735+{
65736+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
65737+ return;
65738+}
65739+
65740+void
65741+gr_handle_vm86(void)
65742+{
65743+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
65744+ return;
65745+}
65746+
65747+void
65748+gr_log_badprocpid(const char *entry)
65749+{
65750+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
65751+ return;
65752+}
65753diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
65754new file mode 100644
65755index 0000000..2131422
65756--- /dev/null
65757+++ b/grsecurity/grsec_mount.c
65758@@ -0,0 +1,62 @@
65759+#include <linux/kernel.h>
65760+#include <linux/sched.h>
65761+#include <linux/mount.h>
65762+#include <linux/grsecurity.h>
65763+#include <linux/grinternal.h>
65764+
65765+void
65766+gr_log_remount(const char *devname, const int retval)
65767+{
65768+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
65769+ if (grsec_enable_mount && (retval >= 0))
65770+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
65771+#endif
65772+ return;
65773+}
65774+
65775+void
65776+gr_log_unmount(const char *devname, const int retval)
65777+{
65778+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
65779+ if (grsec_enable_mount && (retval >= 0))
65780+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
65781+#endif
65782+ return;
65783+}
65784+
65785+void
65786+gr_log_mount(const char *from, const char *to, const int retval)
65787+{
65788+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
65789+ if (grsec_enable_mount && (retval >= 0))
65790+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
65791+#endif
65792+ return;
65793+}
65794+
65795+int
65796+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
65797+{
65798+#ifdef CONFIG_GRKERNSEC_ROFS
65799+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
65800+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
65801+ return -EPERM;
65802+ } else
65803+ return 0;
65804+#endif
65805+ return 0;
65806+}
65807+
65808+int
65809+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
65810+{
65811+#ifdef CONFIG_GRKERNSEC_ROFS
65812+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
65813+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
65814+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
65815+ return -EPERM;
65816+ } else
65817+ return 0;
65818+#endif
65819+ return 0;
65820+}
65821diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
65822new file mode 100644
65823index 0000000..a3b12a0
65824--- /dev/null
65825+++ b/grsecurity/grsec_pax.c
65826@@ -0,0 +1,36 @@
65827+#include <linux/kernel.h>
65828+#include <linux/sched.h>
65829+#include <linux/mm.h>
65830+#include <linux/file.h>
65831+#include <linux/grinternal.h>
65832+#include <linux/grsecurity.h>
65833+
65834+void
65835+gr_log_textrel(struct vm_area_struct * vma)
65836+{
65837+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
65838+ if (grsec_enable_audit_textrel)
65839+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
65840+#endif
65841+ return;
65842+}
65843+
65844+void
65845+gr_log_rwxmmap(struct file *file)
65846+{
65847+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
65848+ if (grsec_enable_log_rwxmaps)
65849+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
65850+#endif
65851+ return;
65852+}
65853+
65854+void
65855+gr_log_rwxmprotect(struct file *file)
65856+{
65857+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
65858+ if (grsec_enable_log_rwxmaps)
65859+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
65860+#endif
65861+ return;
65862+}
65863diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
65864new file mode 100644
65865index 0000000..f7f29aa
65866--- /dev/null
65867+++ b/grsecurity/grsec_ptrace.c
65868@@ -0,0 +1,30 @@
65869+#include <linux/kernel.h>
65870+#include <linux/sched.h>
65871+#include <linux/grinternal.h>
65872+#include <linux/security.h>
65873+
65874+void
65875+gr_audit_ptrace(struct task_struct *task)
65876+{
65877+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
65878+ if (grsec_enable_audit_ptrace)
65879+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
65880+#endif
65881+ return;
65882+}
65883+
65884+int
65885+gr_ptrace_readexec(struct file *file, int unsafe_flags)
65886+{
65887+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
65888+ const struct dentry *dentry = file->f_path.dentry;
65889+ const struct vfsmount *mnt = file->f_path.mnt;
65890+
65891+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
65892+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
65893+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
65894+ return -EACCES;
65895+ }
65896+#endif
65897+ return 0;
65898+}
65899diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
65900new file mode 100644
65901index 0000000..e09715a
65902--- /dev/null
65903+++ b/grsecurity/grsec_sig.c
65904@@ -0,0 +1,222 @@
65905+#include <linux/kernel.h>
65906+#include <linux/sched.h>
65907+#include <linux/delay.h>
65908+#include <linux/grsecurity.h>
65909+#include <linux/grinternal.h>
65910+#include <linux/hardirq.h>
65911+
65912+char *signames[] = {
65913+ [SIGSEGV] = "Segmentation fault",
65914+ [SIGILL] = "Illegal instruction",
65915+ [SIGABRT] = "Abort",
65916+ [SIGBUS] = "Invalid alignment/Bus error"
65917+};
65918+
65919+void
65920+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
65921+{
65922+#ifdef CONFIG_GRKERNSEC_SIGNAL
65923+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
65924+ (sig == SIGABRT) || (sig == SIGBUS))) {
65925+ if (task_pid_nr(t) == task_pid_nr(current)) {
65926+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
65927+ } else {
65928+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
65929+ }
65930+ }
65931+#endif
65932+ return;
65933+}
65934+
65935+int
65936+gr_handle_signal(const struct task_struct *p, const int sig)
65937+{
65938+#ifdef CONFIG_GRKERNSEC
65939+ /* ignore the 0 signal for protected task checks */
65940+ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
65941+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
65942+ return -EPERM;
65943+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
65944+ return -EPERM;
65945+ }
65946+#endif
65947+ return 0;
65948+}
65949+
65950+#ifdef CONFIG_GRKERNSEC
65951+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
65952+
65953+int gr_fake_force_sig(int sig, struct task_struct *t)
65954+{
65955+ unsigned long int flags;
65956+ int ret, blocked, ignored;
65957+ struct k_sigaction *action;
65958+
65959+ spin_lock_irqsave(&t->sighand->siglock, flags);
65960+ action = &t->sighand->action[sig-1];
65961+ ignored = action->sa.sa_handler == SIG_IGN;
65962+ blocked = sigismember(&t->blocked, sig);
65963+ if (blocked || ignored) {
65964+ action->sa.sa_handler = SIG_DFL;
65965+ if (blocked) {
65966+ sigdelset(&t->blocked, sig);
65967+ recalc_sigpending_and_wake(t);
65968+ }
65969+ }
65970+ if (action->sa.sa_handler == SIG_DFL)
65971+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
65972+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
65973+
65974+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
65975+
65976+ return ret;
65977+}
65978+#endif
65979+
65980+#ifdef CONFIG_GRKERNSEC_BRUTE
65981+#define GR_USER_BAN_TIME (15 * 60)
65982+#define GR_DAEMON_BRUTE_TIME (30 * 60)
65983+
65984+static int __get_dumpable(unsigned long mm_flags)
65985+{
65986+ int ret;
65987+
65988+ ret = mm_flags & MMF_DUMPABLE_MASK;
65989+ return (ret >= 2) ? 2 : ret;
65990+}
65991+#endif
65992+
65993+void gr_handle_brute_attach(unsigned long mm_flags)
65994+{
65995+#ifdef CONFIG_GRKERNSEC_BRUTE
65996+ struct task_struct *p = current;
65997+ kuid_t uid = GLOBAL_ROOT_UID;
65998+ int daemon = 0;
65999+
66000+ if (!grsec_enable_brute)
66001+ return;
66002+
66003+ rcu_read_lock();
66004+ read_lock(&tasklist_lock);
66005+ read_lock(&grsec_exec_file_lock);
66006+ if (p->real_parent && p->real_parent->exec_file == p->exec_file) {
66007+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
66008+ p->real_parent->brute = 1;
66009+ daemon = 1;
66010+ } else {
66011+ const struct cred *cred = __task_cred(p), *cred2;
66012+ struct task_struct *tsk, *tsk2;
66013+
66014+ if (!__get_dumpable(mm_flags) && gr_is_global_nonroot(cred->uid)) {
66015+ struct user_struct *user;
66016+
66017+ uid = cred->uid;
66018+
66019+ /* this is put upon execution past expiration */
66020+ user = find_user(uid);
66021+ if (user == NULL)
66022+ goto unlock;
66023+ user->banned = 1;
66024+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
66025+ if (user->ban_expires == ~0UL)
66026+ user->ban_expires--;
66027+
66028+ do_each_thread(tsk2, tsk) {
66029+ cred2 = __task_cred(tsk);
66030+ if (tsk != p && uid_eq(cred2->uid, uid))
66031+ gr_fake_force_sig(SIGKILL, tsk);
66032+ } while_each_thread(tsk2, tsk);
66033+ }
66034+ }
66035+unlock:
66036+ read_unlock(&grsec_exec_file_lock);
66037+ read_unlock(&tasklist_lock);
66038+ rcu_read_unlock();
66039+
66040+ if (gr_is_global_nonroot(uid))
66041+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n",
66042+ GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
66043+ else if (daemon)
66044+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
66045+
66046+#endif
66047+ return;
66048+}
66049+
66050+void gr_handle_brute_check(void)
66051+{
66052+#ifdef CONFIG_GRKERNSEC_BRUTE
66053+ struct task_struct *p = current;
66054+
66055+ if (unlikely(p->brute)) {
66056+ if (!grsec_enable_brute)
66057+ p->brute = 0;
66058+ else if (time_before(get_seconds(), p->brute_expires))
66059+ msleep(30 * 1000);
66060+ }
66061+#endif
66062+ return;
66063+}
66064+
66065+void gr_handle_kernel_exploit(void)
66066+{
66067+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
66068+ const struct cred *cred;
66069+ struct task_struct *tsk, *tsk2;
66070+ struct user_struct *user;
66071+ kuid_t uid;
66072+
66073+ if (in_irq() || in_serving_softirq() || in_nmi())
66074+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
66075+
66076+ uid = current_uid();
66077+
66078+ if (gr_is_global_root(uid))
66079+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
66080+ else {
66081+ /* kill all the processes of this user, hold a reference
66082+ to their creds struct, and prevent them from creating
66083+ another process until system reset
66084+ */
66085+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
66086+ GR_GLOBAL_UID(uid));
66087+ /* we intentionally leak this ref */
66088+ user = get_uid(current->cred->user);
66089+ if (user) {
66090+ user->banned = 1;
66091+ user->ban_expires = ~0UL;
66092+ }
66093+
66094+ read_lock(&tasklist_lock);
66095+ do_each_thread(tsk2, tsk) {
66096+ cred = __task_cred(tsk);
66097+ if (uid_eq(cred->uid, uid))
66098+ gr_fake_force_sig(SIGKILL, tsk);
66099+ } while_each_thread(tsk2, tsk);
66100+ read_unlock(&tasklist_lock);
66101+ }
66102+#endif
66103+}
66104+
66105+int __gr_process_user_ban(struct user_struct *user)
66106+{
66107+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
66108+ if (unlikely(user->banned)) {
66109+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
66110+ user->banned = 0;
66111+ user->ban_expires = 0;
66112+ free_uid(user);
66113+ } else
66114+ return -EPERM;
66115+ }
66116+#endif
66117+ return 0;
66118+}
66119+
66120+int gr_process_user_ban(void)
66121+{
66122+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
66123+ return __gr_process_user_ban(current->cred->user);
66124+#endif
66125+ return 0;
66126+}
66127diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
66128new file mode 100644
66129index 0000000..4030d57
66130--- /dev/null
66131+++ b/grsecurity/grsec_sock.c
66132@@ -0,0 +1,244 @@
66133+#include <linux/kernel.h>
66134+#include <linux/module.h>
66135+#include <linux/sched.h>
66136+#include <linux/file.h>
66137+#include <linux/net.h>
66138+#include <linux/in.h>
66139+#include <linux/ip.h>
66140+#include <net/sock.h>
66141+#include <net/inet_sock.h>
66142+#include <linux/grsecurity.h>
66143+#include <linux/grinternal.h>
66144+#include <linux/gracl.h>
66145+
66146+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
66147+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
66148+
66149+EXPORT_SYMBOL(gr_search_udp_recvmsg);
66150+EXPORT_SYMBOL(gr_search_udp_sendmsg);
66151+
66152+#ifdef CONFIG_UNIX_MODULE
66153+EXPORT_SYMBOL(gr_acl_handle_unix);
66154+EXPORT_SYMBOL(gr_acl_handle_mknod);
66155+EXPORT_SYMBOL(gr_handle_chroot_unix);
66156+EXPORT_SYMBOL(gr_handle_create);
66157+#endif
66158+
66159+#ifdef CONFIG_GRKERNSEC
66160+#define gr_conn_table_size 32749
66161+struct conn_table_entry {
66162+ struct conn_table_entry *next;
66163+ struct signal_struct *sig;
66164+};
66165+
66166+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
66167+DEFINE_SPINLOCK(gr_conn_table_lock);
66168+
66169+extern const char * gr_socktype_to_name(unsigned char type);
66170+extern const char * gr_proto_to_name(unsigned char proto);
66171+extern const char * gr_sockfamily_to_name(unsigned char family);
66172+
66173+static __inline__ int
66174+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
66175+{
66176+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
66177+}
66178+
66179+static __inline__ int
66180+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
66181+ __u16 sport, __u16 dport)
66182+{
66183+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
66184+ sig->gr_sport == sport && sig->gr_dport == dport))
66185+ return 1;
66186+ else
66187+ return 0;
66188+}
66189+
66190+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
66191+{
66192+ struct conn_table_entry **match;
66193+ unsigned int index;
66194+
66195+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
66196+ sig->gr_sport, sig->gr_dport,
66197+ gr_conn_table_size);
66198+
66199+ newent->sig = sig;
66200+
66201+ match = &gr_conn_table[index];
66202+ newent->next = *match;
66203+ *match = newent;
66204+
66205+ return;
66206+}
66207+
66208+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
66209+{
66210+ struct conn_table_entry *match, *last = NULL;
66211+ unsigned int index;
66212+
66213+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
66214+ sig->gr_sport, sig->gr_dport,
66215+ gr_conn_table_size);
66216+
66217+ match = gr_conn_table[index];
66218+ while (match && !conn_match(match->sig,
66219+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
66220+ sig->gr_dport)) {
66221+ last = match;
66222+ match = match->next;
66223+ }
66224+
66225+ if (match) {
66226+ if (last)
66227+ last->next = match->next;
66228+ else
66229+ gr_conn_table[index] = NULL;
66230+ kfree(match);
66231+ }
66232+
66233+ return;
66234+}
66235+
66236+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
66237+ __u16 sport, __u16 dport)
66238+{
66239+ struct conn_table_entry *match;
66240+ unsigned int index;
66241+
66242+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
66243+
66244+ match = gr_conn_table[index];
66245+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
66246+ match = match->next;
66247+
66248+ if (match)
66249+ return match->sig;
66250+ else
66251+ return NULL;
66252+}
66253+
66254+#endif
66255+
66256+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
66257+{
66258+#ifdef CONFIG_GRKERNSEC
66259+ struct signal_struct *sig = task->signal;
66260+ struct conn_table_entry *newent;
66261+
66262+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
66263+ if (newent == NULL)
66264+ return;
66265+ /* no bh lock needed since we are called with bh disabled */
66266+ spin_lock(&gr_conn_table_lock);
66267+ gr_del_task_from_ip_table_nolock(sig);
66268+ sig->gr_saddr = inet->inet_rcv_saddr;
66269+ sig->gr_daddr = inet->inet_daddr;
66270+ sig->gr_sport = inet->inet_sport;
66271+ sig->gr_dport = inet->inet_dport;
66272+ gr_add_to_task_ip_table_nolock(sig, newent);
66273+ spin_unlock(&gr_conn_table_lock);
66274+#endif
66275+ return;
66276+}
66277+
66278+void gr_del_task_from_ip_table(struct task_struct *task)
66279+{
66280+#ifdef CONFIG_GRKERNSEC
66281+ spin_lock_bh(&gr_conn_table_lock);
66282+ gr_del_task_from_ip_table_nolock(task->signal);
66283+ spin_unlock_bh(&gr_conn_table_lock);
66284+#endif
66285+ return;
66286+}
66287+
66288+void
66289+gr_attach_curr_ip(const struct sock *sk)
66290+{
66291+#ifdef CONFIG_GRKERNSEC
66292+ struct signal_struct *p, *set;
66293+ const struct inet_sock *inet = inet_sk(sk);
66294+
66295+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
66296+ return;
66297+
66298+ set = current->signal;
66299+
66300+ spin_lock_bh(&gr_conn_table_lock);
66301+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
66302+ inet->inet_dport, inet->inet_sport);
66303+ if (unlikely(p != NULL)) {
66304+ set->curr_ip = p->curr_ip;
66305+ set->used_accept = 1;
66306+ gr_del_task_from_ip_table_nolock(p);
66307+ spin_unlock_bh(&gr_conn_table_lock);
66308+ return;
66309+ }
66310+ spin_unlock_bh(&gr_conn_table_lock);
66311+
66312+ set->curr_ip = inet->inet_daddr;
66313+ set->used_accept = 1;
66314+#endif
66315+ return;
66316+}
66317+
66318+int
66319+gr_handle_sock_all(const int family, const int type, const int protocol)
66320+{
66321+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
66322+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
66323+ (family != AF_UNIX)) {
66324+ if (family == AF_INET)
66325+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
66326+ else
66327+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
66328+ return -EACCES;
66329+ }
66330+#endif
66331+ return 0;
66332+}
66333+
66334+int
66335+gr_handle_sock_server(const struct sockaddr *sck)
66336+{
66337+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
66338+ if (grsec_enable_socket_server &&
66339+ in_group_p(grsec_socket_server_gid) &&
66340+ sck && (sck->sa_family != AF_UNIX) &&
66341+ (sck->sa_family != AF_LOCAL)) {
66342+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
66343+ return -EACCES;
66344+ }
66345+#endif
66346+ return 0;
66347+}
66348+
66349+int
66350+gr_handle_sock_server_other(const struct sock *sck)
66351+{
66352+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
66353+ if (grsec_enable_socket_server &&
66354+ in_group_p(grsec_socket_server_gid) &&
66355+ sck && (sck->sk_family != AF_UNIX) &&
66356+ (sck->sk_family != AF_LOCAL)) {
66357+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
66358+ return -EACCES;
66359+ }
66360+#endif
66361+ return 0;
66362+}
66363+
66364+int
66365+gr_handle_sock_client(const struct sockaddr *sck)
66366+{
66367+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
66368+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
66369+ sck && (sck->sa_family != AF_UNIX) &&
66370+ (sck->sa_family != AF_LOCAL)) {
66371+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
66372+ return -EACCES;
66373+ }
66374+#endif
66375+ return 0;
66376+}
66377diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
66378new file mode 100644
66379index 0000000..f55ef0f
66380--- /dev/null
66381+++ b/grsecurity/grsec_sysctl.c
66382@@ -0,0 +1,469 @@
66383+#include <linux/kernel.h>
66384+#include <linux/sched.h>
66385+#include <linux/sysctl.h>
66386+#include <linux/grsecurity.h>
66387+#include <linux/grinternal.h>
66388+
66389+int
66390+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
66391+{
66392+#ifdef CONFIG_GRKERNSEC_SYSCTL
66393+ if (dirname == NULL || name == NULL)
66394+ return 0;
66395+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
66396+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
66397+ return -EACCES;
66398+ }
66399+#endif
66400+ return 0;
66401+}
66402+
66403+#ifdef CONFIG_GRKERNSEC_ROFS
66404+static int __maybe_unused one = 1;
66405+#endif
66406+
66407+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
66408+struct ctl_table grsecurity_table[] = {
66409+#ifdef CONFIG_GRKERNSEC_SYSCTL
66410+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
66411+#ifdef CONFIG_GRKERNSEC_IO
66412+ {
66413+ .procname = "disable_priv_io",
66414+ .data = &grsec_disable_privio,
66415+ .maxlen = sizeof(int),
66416+ .mode = 0600,
66417+ .proc_handler = &proc_dointvec,
66418+ },
66419+#endif
66420+#endif
66421+#ifdef CONFIG_GRKERNSEC_LINK
66422+ {
66423+ .procname = "linking_restrictions",
66424+ .data = &grsec_enable_link,
66425+ .maxlen = sizeof(int),
66426+ .mode = 0600,
66427+ .proc_handler = &proc_dointvec,
66428+ },
66429+#endif
66430+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
66431+ {
66432+ .procname = "enforce_symlinksifowner",
66433+ .data = &grsec_enable_symlinkown,
66434+ .maxlen = sizeof(int),
66435+ .mode = 0600,
66436+ .proc_handler = &proc_dointvec,
66437+ },
66438+ {
66439+ .procname = "symlinkown_gid",
66440+ .data = &grsec_symlinkown_gid,
66441+ .maxlen = sizeof(int),
66442+ .mode = 0600,
66443+ .proc_handler = &proc_dointvec,
66444+ },
66445+#endif
66446+#ifdef CONFIG_GRKERNSEC_BRUTE
66447+ {
66448+ .procname = "deter_bruteforce",
66449+ .data = &grsec_enable_brute,
66450+ .maxlen = sizeof(int),
66451+ .mode = 0600,
66452+ .proc_handler = &proc_dointvec,
66453+ },
66454+#endif
66455+#ifdef CONFIG_GRKERNSEC_FIFO
66456+ {
66457+ .procname = "fifo_restrictions",
66458+ .data = &grsec_enable_fifo,
66459+ .maxlen = sizeof(int),
66460+ .mode = 0600,
66461+ .proc_handler = &proc_dointvec,
66462+ },
66463+#endif
66464+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
66465+ {
66466+ .procname = "ptrace_readexec",
66467+ .data = &grsec_enable_ptrace_readexec,
66468+ .maxlen = sizeof(int),
66469+ .mode = 0600,
66470+ .proc_handler = &proc_dointvec,
66471+ },
66472+#endif
66473+#ifdef CONFIG_GRKERNSEC_SETXID
66474+ {
66475+ .procname = "consistent_setxid",
66476+ .data = &grsec_enable_setxid,
66477+ .maxlen = sizeof(int),
66478+ .mode = 0600,
66479+ .proc_handler = &proc_dointvec,
66480+ },
66481+#endif
66482+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66483+ {
66484+ .procname = "ip_blackhole",
66485+ .data = &grsec_enable_blackhole,
66486+ .maxlen = sizeof(int),
66487+ .mode = 0600,
66488+ .proc_handler = &proc_dointvec,
66489+ },
66490+ {
66491+ .procname = "lastack_retries",
66492+ .data = &grsec_lastack_retries,
66493+ .maxlen = sizeof(int),
66494+ .mode = 0600,
66495+ .proc_handler = &proc_dointvec,
66496+ },
66497+#endif
66498+#ifdef CONFIG_GRKERNSEC_EXECLOG
66499+ {
66500+ .procname = "exec_logging",
66501+ .data = &grsec_enable_execlog,
66502+ .maxlen = sizeof(int),
66503+ .mode = 0600,
66504+ .proc_handler = &proc_dointvec,
66505+ },
66506+#endif
66507+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
66508+ {
66509+ .procname = "rwxmap_logging",
66510+ .data = &grsec_enable_log_rwxmaps,
66511+ .maxlen = sizeof(int),
66512+ .mode = 0600,
66513+ .proc_handler = &proc_dointvec,
66514+ },
66515+#endif
66516+#ifdef CONFIG_GRKERNSEC_SIGNAL
66517+ {
66518+ .procname = "signal_logging",
66519+ .data = &grsec_enable_signal,
66520+ .maxlen = sizeof(int),
66521+ .mode = 0600,
66522+ .proc_handler = &proc_dointvec,
66523+ },
66524+#endif
66525+#ifdef CONFIG_GRKERNSEC_FORKFAIL
66526+ {
66527+ .procname = "forkfail_logging",
66528+ .data = &grsec_enable_forkfail,
66529+ .maxlen = sizeof(int),
66530+ .mode = 0600,
66531+ .proc_handler = &proc_dointvec,
66532+ },
66533+#endif
66534+#ifdef CONFIG_GRKERNSEC_TIME
66535+ {
66536+ .procname = "timechange_logging",
66537+ .data = &grsec_enable_time,
66538+ .maxlen = sizeof(int),
66539+ .mode = 0600,
66540+ .proc_handler = &proc_dointvec,
66541+ },
66542+#endif
66543+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
66544+ {
66545+ .procname = "chroot_deny_shmat",
66546+ .data = &grsec_enable_chroot_shmat,
66547+ .maxlen = sizeof(int),
66548+ .mode = 0600,
66549+ .proc_handler = &proc_dointvec,
66550+ },
66551+#endif
66552+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
66553+ {
66554+ .procname = "chroot_deny_unix",
66555+ .data = &grsec_enable_chroot_unix,
66556+ .maxlen = sizeof(int),
66557+ .mode = 0600,
66558+ .proc_handler = &proc_dointvec,
66559+ },
66560+#endif
66561+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
66562+ {
66563+ .procname = "chroot_deny_mount",
66564+ .data = &grsec_enable_chroot_mount,
66565+ .maxlen = sizeof(int),
66566+ .mode = 0600,
66567+ .proc_handler = &proc_dointvec,
66568+ },
66569+#endif
66570+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
66571+ {
66572+ .procname = "chroot_deny_fchdir",
66573+ .data = &grsec_enable_chroot_fchdir,
66574+ .maxlen = sizeof(int),
66575+ .mode = 0600,
66576+ .proc_handler = &proc_dointvec,
66577+ },
66578+#endif
66579+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
66580+ {
66581+ .procname = "chroot_deny_chroot",
66582+ .data = &grsec_enable_chroot_double,
66583+ .maxlen = sizeof(int),
66584+ .mode = 0600,
66585+ .proc_handler = &proc_dointvec,
66586+ },
66587+#endif
66588+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
66589+ {
66590+ .procname = "chroot_deny_pivot",
66591+ .data = &grsec_enable_chroot_pivot,
66592+ .maxlen = sizeof(int),
66593+ .mode = 0600,
66594+ .proc_handler = &proc_dointvec,
66595+ },
66596+#endif
66597+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
66598+ {
66599+ .procname = "chroot_enforce_chdir",
66600+ .data = &grsec_enable_chroot_chdir,
66601+ .maxlen = sizeof(int),
66602+ .mode = 0600,
66603+ .proc_handler = &proc_dointvec,
66604+ },
66605+#endif
66606+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
66607+ {
66608+ .procname = "chroot_deny_chmod",
66609+ .data = &grsec_enable_chroot_chmod,
66610+ .maxlen = sizeof(int),
66611+ .mode = 0600,
66612+ .proc_handler = &proc_dointvec,
66613+ },
66614+#endif
66615+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
66616+ {
66617+ .procname = "chroot_deny_mknod",
66618+ .data = &grsec_enable_chroot_mknod,
66619+ .maxlen = sizeof(int),
66620+ .mode = 0600,
66621+ .proc_handler = &proc_dointvec,
66622+ },
66623+#endif
66624+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
66625+ {
66626+ .procname = "chroot_restrict_nice",
66627+ .data = &grsec_enable_chroot_nice,
66628+ .maxlen = sizeof(int),
66629+ .mode = 0600,
66630+ .proc_handler = &proc_dointvec,
66631+ },
66632+#endif
66633+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
66634+ {
66635+ .procname = "chroot_execlog",
66636+ .data = &grsec_enable_chroot_execlog,
66637+ .maxlen = sizeof(int),
66638+ .mode = 0600,
66639+ .proc_handler = &proc_dointvec,
66640+ },
66641+#endif
66642+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
66643+ {
66644+ .procname = "chroot_caps",
66645+ .data = &grsec_enable_chroot_caps,
66646+ .maxlen = sizeof(int),
66647+ .mode = 0600,
66648+ .proc_handler = &proc_dointvec,
66649+ },
66650+#endif
66651+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
66652+ {
66653+ .procname = "chroot_deny_sysctl",
66654+ .data = &grsec_enable_chroot_sysctl,
66655+ .maxlen = sizeof(int),
66656+ .mode = 0600,
66657+ .proc_handler = &proc_dointvec,
66658+ },
66659+#endif
66660+#ifdef CONFIG_GRKERNSEC_TPE
66661+ {
66662+ .procname = "tpe",
66663+ .data = &grsec_enable_tpe,
66664+ .maxlen = sizeof(int),
66665+ .mode = 0600,
66666+ .proc_handler = &proc_dointvec,
66667+ },
66668+ {
66669+ .procname = "tpe_gid",
66670+ .data = &grsec_tpe_gid,
66671+ .maxlen = sizeof(int),
66672+ .mode = 0600,
66673+ .proc_handler = &proc_dointvec,
66674+ },
66675+#endif
66676+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
66677+ {
66678+ .procname = "tpe_invert",
66679+ .data = &grsec_enable_tpe_invert,
66680+ .maxlen = sizeof(int),
66681+ .mode = 0600,
66682+ .proc_handler = &proc_dointvec,
66683+ },
66684+#endif
66685+#ifdef CONFIG_GRKERNSEC_TPE_ALL
66686+ {
66687+ .procname = "tpe_restrict_all",
66688+ .data = &grsec_enable_tpe_all,
66689+ .maxlen = sizeof(int),
66690+ .mode = 0600,
66691+ .proc_handler = &proc_dointvec,
66692+ },
66693+#endif
66694+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
66695+ {
66696+ .procname = "socket_all",
66697+ .data = &grsec_enable_socket_all,
66698+ .maxlen = sizeof(int),
66699+ .mode = 0600,
66700+ .proc_handler = &proc_dointvec,
66701+ },
66702+ {
66703+ .procname = "socket_all_gid",
66704+ .data = &grsec_socket_all_gid,
66705+ .maxlen = sizeof(int),
66706+ .mode = 0600,
66707+ .proc_handler = &proc_dointvec,
66708+ },
66709+#endif
66710+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
66711+ {
66712+ .procname = "socket_client",
66713+ .data = &grsec_enable_socket_client,
66714+ .maxlen = sizeof(int),
66715+ .mode = 0600,
66716+ .proc_handler = &proc_dointvec,
66717+ },
66718+ {
66719+ .procname = "socket_client_gid",
66720+ .data = &grsec_socket_client_gid,
66721+ .maxlen = sizeof(int),
66722+ .mode = 0600,
66723+ .proc_handler = &proc_dointvec,
66724+ },
66725+#endif
66726+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
66727+ {
66728+ .procname = "socket_server",
66729+ .data = &grsec_enable_socket_server,
66730+ .maxlen = sizeof(int),
66731+ .mode = 0600,
66732+ .proc_handler = &proc_dointvec,
66733+ },
66734+ {
66735+ .procname = "socket_server_gid",
66736+ .data = &grsec_socket_server_gid,
66737+ .maxlen = sizeof(int),
66738+ .mode = 0600,
66739+ .proc_handler = &proc_dointvec,
66740+ },
66741+#endif
66742+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
66743+ {
66744+ .procname = "audit_group",
66745+ .data = &grsec_enable_group,
66746+ .maxlen = sizeof(int),
66747+ .mode = 0600,
66748+ .proc_handler = &proc_dointvec,
66749+ },
66750+ {
66751+ .procname = "audit_gid",
66752+ .data = &grsec_audit_gid,
66753+ .maxlen = sizeof(int),
66754+ .mode = 0600,
66755+ .proc_handler = &proc_dointvec,
66756+ },
66757+#endif
66758+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
66759+ {
66760+ .procname = "audit_chdir",
66761+ .data = &grsec_enable_chdir,
66762+ .maxlen = sizeof(int),
66763+ .mode = 0600,
66764+ .proc_handler = &proc_dointvec,
66765+ },
66766+#endif
66767+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
66768+ {
66769+ .procname = "audit_mount",
66770+ .data = &grsec_enable_mount,
66771+ .maxlen = sizeof(int),
66772+ .mode = 0600,
66773+ .proc_handler = &proc_dointvec,
66774+ },
66775+#endif
66776+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
66777+ {
66778+ .procname = "audit_textrel",
66779+ .data = &grsec_enable_audit_textrel,
66780+ .maxlen = sizeof(int),
66781+ .mode = 0600,
66782+ .proc_handler = &proc_dointvec,
66783+ },
66784+#endif
66785+#ifdef CONFIG_GRKERNSEC_DMESG
66786+ {
66787+ .procname = "dmesg",
66788+ .data = &grsec_enable_dmesg,
66789+ .maxlen = sizeof(int),
66790+ .mode = 0600,
66791+ .proc_handler = &proc_dointvec,
66792+ },
66793+#endif
66794+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
66795+ {
66796+ .procname = "chroot_findtask",
66797+ .data = &grsec_enable_chroot_findtask,
66798+ .maxlen = sizeof(int),
66799+ .mode = 0600,
66800+ .proc_handler = &proc_dointvec,
66801+ },
66802+#endif
66803+#ifdef CONFIG_GRKERNSEC_RESLOG
66804+ {
66805+ .procname = "resource_logging",
66806+ .data = &grsec_resource_logging,
66807+ .maxlen = sizeof(int),
66808+ .mode = 0600,
66809+ .proc_handler = &proc_dointvec,
66810+ },
66811+#endif
66812+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
66813+ {
66814+ .procname = "audit_ptrace",
66815+ .data = &grsec_enable_audit_ptrace,
66816+ .maxlen = sizeof(int),
66817+ .mode = 0600,
66818+ .proc_handler = &proc_dointvec,
66819+ },
66820+#endif
66821+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
66822+ {
66823+ .procname = "harden_ptrace",
66824+ .data = &grsec_enable_harden_ptrace,
66825+ .maxlen = sizeof(int),
66826+ .mode = 0600,
66827+ .proc_handler = &proc_dointvec,
66828+ },
66829+#endif
66830+ {
66831+ .procname = "grsec_lock",
66832+ .data = &grsec_lock,
66833+ .maxlen = sizeof(int),
66834+ .mode = 0600,
66835+ .proc_handler = &proc_dointvec,
66836+ },
66837+#endif
66838+#ifdef CONFIG_GRKERNSEC_ROFS
66839+ {
66840+ .procname = "romount_protect",
66841+ .data = &grsec_enable_rofs,
66842+ .maxlen = sizeof(int),
66843+ .mode = 0600,
66844+ .proc_handler = &proc_dointvec_minmax,
66845+ .extra1 = &one,
66846+ .extra2 = &one,
66847+ },
66848+#endif
66849+ { }
66850+};
66851+#endif
66852diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
66853new file mode 100644
66854index 0000000..0dc13c3
66855--- /dev/null
66856+++ b/grsecurity/grsec_time.c
66857@@ -0,0 +1,16 @@
66858+#include <linux/kernel.h>
66859+#include <linux/sched.h>
66860+#include <linux/grinternal.h>
66861+#include <linux/module.h>
66862+
66863+void
66864+gr_log_timechange(void)
66865+{
66866+#ifdef CONFIG_GRKERNSEC_TIME
66867+ if (grsec_enable_time)
66868+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
66869+#endif
66870+ return;
66871+}
66872+
66873+EXPORT_SYMBOL(gr_log_timechange);
66874diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
66875new file mode 100644
66876index 0000000..ee57dcf
66877--- /dev/null
66878+++ b/grsecurity/grsec_tpe.c
66879@@ -0,0 +1,73 @@
66880+#include <linux/kernel.h>
66881+#include <linux/sched.h>
66882+#include <linux/file.h>
66883+#include <linux/fs.h>
66884+#include <linux/grinternal.h>
66885+
66886+extern int gr_acl_tpe_check(void);
66887+
66888+int
66889+gr_tpe_allow(const struct file *file)
66890+{
66891+#ifdef CONFIG_GRKERNSEC
66892+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
66893+ const struct cred *cred = current_cred();
66894+ char *msg = NULL;
66895+ char *msg2 = NULL;
66896+
66897+ // never restrict root
66898+ if (gr_is_global_root(cred->uid))
66899+ return 1;
66900+
66901+ if (grsec_enable_tpe) {
66902+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
66903+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
66904+ msg = "not being in trusted group";
66905+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
66906+ msg = "being in untrusted group";
66907+#else
66908+ if (in_group_p(grsec_tpe_gid))
66909+ msg = "being in untrusted group";
66910+#endif
66911+ }
66912+ if (!msg && gr_acl_tpe_check())
66913+ msg = "being in untrusted role";
66914+
66915+ // not in any affected group/role
66916+ if (!msg)
66917+ goto next_check;
66918+
66919+ if (gr_is_global_nonroot(inode->i_uid))
66920+ msg2 = "file in non-root-owned directory";
66921+ else if (inode->i_mode & S_IWOTH)
66922+ msg2 = "file in world-writable directory";
66923+ else if (inode->i_mode & S_IWGRP)
66924+ msg2 = "file in group-writable directory";
66925+
66926+ if (msg && msg2) {
66927+ char fullmsg[70] = {0};
66928+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
66929+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
66930+ return 0;
66931+ }
66932+ msg = NULL;
66933+next_check:
66934+#ifdef CONFIG_GRKERNSEC_TPE_ALL
66935+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
66936+ return 1;
66937+
66938+ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
66939+ msg = "directory not owned by user";
66940+ else if (inode->i_mode & S_IWOTH)
66941+ msg = "file in world-writable directory";
66942+ else if (inode->i_mode & S_IWGRP)
66943+ msg = "file in group-writable directory";
66944+
66945+ if (msg) {
66946+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
66947+ return 0;
66948+ }
66949+#endif
66950+#endif
66951+ return 1;
66952+}
66953diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
66954new file mode 100644
66955index 0000000..9f7b1ac
66956--- /dev/null
66957+++ b/grsecurity/grsum.c
66958@@ -0,0 +1,61 @@
66959+#include <linux/err.h>
66960+#include <linux/kernel.h>
66961+#include <linux/sched.h>
66962+#include <linux/mm.h>
66963+#include <linux/scatterlist.h>
66964+#include <linux/crypto.h>
66965+#include <linux/gracl.h>
66966+
66967+
66968+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
66969+#error "crypto and sha256 must be built into the kernel"
66970+#endif
66971+
66972+int
66973+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
66974+{
66975+ char *p;
66976+ struct crypto_hash *tfm;
66977+ struct hash_desc desc;
66978+ struct scatterlist sg;
66979+ unsigned char temp_sum[GR_SHA_LEN];
66980+ volatile int retval = 0;
66981+ volatile int dummy = 0;
66982+ unsigned int i;
66983+
66984+ sg_init_table(&sg, 1);
66985+
66986+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
66987+ if (IS_ERR(tfm)) {
66988+ /* should never happen, since sha256 should be built in */
66989+ return 1;
66990+ }
66991+
66992+ desc.tfm = tfm;
66993+ desc.flags = 0;
66994+
66995+ crypto_hash_init(&desc);
66996+
66997+ p = salt;
66998+ sg_set_buf(&sg, p, GR_SALT_LEN);
66999+ crypto_hash_update(&desc, &sg, sg.length);
67000+
67001+ p = entry->pw;
67002+ sg_set_buf(&sg, p, strlen(p));
67003+
67004+ crypto_hash_update(&desc, &sg, sg.length);
67005+
67006+ crypto_hash_final(&desc, temp_sum);
67007+
67008+ memset(entry->pw, 0, GR_PW_LEN);
67009+
67010+ for (i = 0; i < GR_SHA_LEN; i++)
67011+ if (sum[i] != temp_sum[i])
67012+ retval = 1;
67013+ else
67014+ dummy = 1; // waste a cycle
67015+
67016+ crypto_free_hash(tfm);
67017+
67018+ return retval;
67019+}
67020diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
67021index 77ff547..181834f 100644
67022--- a/include/asm-generic/4level-fixup.h
67023+++ b/include/asm-generic/4level-fixup.h
67024@@ -13,8 +13,10 @@
67025 #define pmd_alloc(mm, pud, address) \
67026 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
67027 NULL: pmd_offset(pud, address))
67028+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
67029
67030 #define pud_alloc(mm, pgd, address) (pgd)
67031+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
67032 #define pud_offset(pgd, start) (pgd)
67033 #define pud_none(pud) 0
67034 #define pud_bad(pud) 0
67035diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
67036index b7babf0..04ad282 100644
67037--- a/include/asm-generic/atomic-long.h
67038+++ b/include/asm-generic/atomic-long.h
67039@@ -22,6 +22,12 @@
67040
67041 typedef atomic64_t atomic_long_t;
67042
67043+#ifdef CONFIG_PAX_REFCOUNT
67044+typedef atomic64_unchecked_t atomic_long_unchecked_t;
67045+#else
67046+typedef atomic64_t atomic_long_unchecked_t;
67047+#endif
67048+
67049 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
67050
67051 static inline long atomic_long_read(atomic_long_t *l)
67052@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
67053 return (long)atomic64_read(v);
67054 }
67055
67056+#ifdef CONFIG_PAX_REFCOUNT
67057+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
67058+{
67059+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
67060+
67061+ return (long)atomic64_read_unchecked(v);
67062+}
67063+#endif
67064+
67065 static inline void atomic_long_set(atomic_long_t *l, long i)
67066 {
67067 atomic64_t *v = (atomic64_t *)l;
67068@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
67069 atomic64_set(v, i);
67070 }
67071
67072+#ifdef CONFIG_PAX_REFCOUNT
67073+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
67074+{
67075+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
67076+
67077+ atomic64_set_unchecked(v, i);
67078+}
67079+#endif
67080+
67081 static inline void atomic_long_inc(atomic_long_t *l)
67082 {
67083 atomic64_t *v = (atomic64_t *)l;
67084@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
67085 atomic64_inc(v);
67086 }
67087
67088+#ifdef CONFIG_PAX_REFCOUNT
67089+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
67090+{
67091+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
67092+
67093+ atomic64_inc_unchecked(v);
67094+}
67095+#endif
67096+
67097 static inline void atomic_long_dec(atomic_long_t *l)
67098 {
67099 atomic64_t *v = (atomic64_t *)l;
67100@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
67101 atomic64_dec(v);
67102 }
67103
67104+#ifdef CONFIG_PAX_REFCOUNT
67105+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
67106+{
67107+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
67108+
67109+ atomic64_dec_unchecked(v);
67110+}
67111+#endif
67112+
67113 static inline void atomic_long_add(long i, atomic_long_t *l)
67114 {
67115 atomic64_t *v = (atomic64_t *)l;
67116@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
67117 atomic64_add(i, v);
67118 }
67119
67120+#ifdef CONFIG_PAX_REFCOUNT
67121+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
67122+{
67123+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
67124+
67125+ atomic64_add_unchecked(i, v);
67126+}
67127+#endif
67128+
67129 static inline void atomic_long_sub(long i, atomic_long_t *l)
67130 {
67131 atomic64_t *v = (atomic64_t *)l;
67132@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
67133 atomic64_sub(i, v);
67134 }
67135
67136+#ifdef CONFIG_PAX_REFCOUNT
67137+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
67138+{
67139+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
67140+
67141+ atomic64_sub_unchecked(i, v);
67142+}
67143+#endif
67144+
67145 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
67146 {
67147 atomic64_t *v = (atomic64_t *)l;
67148@@ -101,6 +161,15 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
67149 return (long)atomic64_add_return(i, v);
67150 }
67151
67152+#ifdef CONFIG_PAX_REFCOUNT
67153+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
67154+{
67155+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
67156+
67157+ return (long)atomic64_add_return_unchecked(i, v);
67158+}
67159+#endif
67160+
67161 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
67162 {
67163 atomic64_t *v = (atomic64_t *)l;
67164@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
67165 return (long)atomic64_inc_return(v);
67166 }
67167
67168+#ifdef CONFIG_PAX_REFCOUNT
67169+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
67170+{
67171+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
67172+
67173+ return (long)atomic64_inc_return_unchecked(v);
67174+}
67175+#endif
67176+
67177 static inline long atomic_long_dec_return(atomic_long_t *l)
67178 {
67179 atomic64_t *v = (atomic64_t *)l;
67180@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
67181
67182 typedef atomic_t atomic_long_t;
67183
67184+#ifdef CONFIG_PAX_REFCOUNT
67185+typedef atomic_unchecked_t atomic_long_unchecked_t;
67186+#else
67187+typedef atomic_t atomic_long_unchecked_t;
67188+#endif
67189+
67190 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
67191 static inline long atomic_long_read(atomic_long_t *l)
67192 {
67193@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
67194 return (long)atomic_read(v);
67195 }
67196
67197+#ifdef CONFIG_PAX_REFCOUNT
67198+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
67199+{
67200+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
67201+
67202+ return (long)atomic_read_unchecked(v);
67203+}
67204+#endif
67205+
67206 static inline void atomic_long_set(atomic_long_t *l, long i)
67207 {
67208 atomic_t *v = (atomic_t *)l;
67209@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
67210 atomic_set(v, i);
67211 }
67212
67213+#ifdef CONFIG_PAX_REFCOUNT
67214+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
67215+{
67216+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
67217+
67218+ atomic_set_unchecked(v, i);
67219+}
67220+#endif
67221+
67222 static inline void atomic_long_inc(atomic_long_t *l)
67223 {
67224 atomic_t *v = (atomic_t *)l;
67225@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
67226 atomic_inc(v);
67227 }
67228
67229+#ifdef CONFIG_PAX_REFCOUNT
67230+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
67231+{
67232+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
67233+
67234+ atomic_inc_unchecked(v);
67235+}
67236+#endif
67237+
67238 static inline void atomic_long_dec(atomic_long_t *l)
67239 {
67240 atomic_t *v = (atomic_t *)l;
67241@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
67242 atomic_dec(v);
67243 }
67244
67245+#ifdef CONFIG_PAX_REFCOUNT
67246+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
67247+{
67248+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
67249+
67250+ atomic_dec_unchecked(v);
67251+}
67252+#endif
67253+
67254 static inline void atomic_long_add(long i, atomic_long_t *l)
67255 {
67256 atomic_t *v = (atomic_t *)l;
67257@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
67258 atomic_add(i, v);
67259 }
67260
67261+#ifdef CONFIG_PAX_REFCOUNT
67262+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
67263+{
67264+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
67265+
67266+ atomic_add_unchecked(i, v);
67267+}
67268+#endif
67269+
67270 static inline void atomic_long_sub(long i, atomic_long_t *l)
67271 {
67272 atomic_t *v = (atomic_t *)l;
67273@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
67274 atomic_sub(i, v);
67275 }
67276
67277+#ifdef CONFIG_PAX_REFCOUNT
67278+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
67279+{
67280+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
67281+
67282+ atomic_sub_unchecked(i, v);
67283+}
67284+#endif
67285+
67286 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
67287 {
67288 atomic_t *v = (atomic_t *)l;
67289@@ -218,6 +356,16 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
67290 return (long)atomic_add_return(i, v);
67291 }
67292
67293+#ifdef CONFIG_PAX_REFCOUNT
67294+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
67295+{
67296+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
67297+
67298+ return (long)atomic_add_return_unchecked(i, v);
67299+}
67300+
67301+#endif
67302+
67303 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
67304 {
67305 atomic_t *v = (atomic_t *)l;
67306@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
67307 return (long)atomic_inc_return(v);
67308 }
67309
67310+#ifdef CONFIG_PAX_REFCOUNT
67311+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
67312+{
67313+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
67314+
67315+ return (long)atomic_inc_return_unchecked(v);
67316+}
67317+#endif
67318+
67319 static inline long atomic_long_dec_return(atomic_long_t *l)
67320 {
67321 atomic_t *v = (atomic_t *)l;
67322@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
67323
67324 #endif /* BITS_PER_LONG == 64 */
67325
67326+#ifdef CONFIG_PAX_REFCOUNT
67327+static inline void pax_refcount_needs_these_functions(void)
67328+{
67329+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
67330+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
67331+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
67332+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
67333+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
67334+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
67335+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
67336+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
67337+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
67338+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
67339+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
67340+#ifdef CONFIG_X86
67341+ atomic_clear_mask_unchecked(0, NULL);
67342+ atomic_set_mask_unchecked(0, NULL);
67343+#endif
67344+
67345+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
67346+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
67347+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
67348+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
67349+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
67350+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
67351+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
67352+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
67353+}
67354+#else
67355+#define atomic_read_unchecked(v) atomic_read(v)
67356+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
67357+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
67358+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
67359+#define atomic_inc_unchecked(v) atomic_inc(v)
67360+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
67361+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
67362+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
67363+#define atomic_dec_unchecked(v) atomic_dec(v)
67364+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
67365+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
67366+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
67367+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
67368+
67369+#define atomic_long_read_unchecked(v) atomic_long_read(v)
67370+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
67371+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
67372+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
67373+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
67374+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
67375+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
67376+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
67377+#endif
67378+
67379 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
67380diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
67381index 33bd2de..f31bff97 100644
67382--- a/include/asm-generic/atomic.h
67383+++ b/include/asm-generic/atomic.h
67384@@ -153,7 +153,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
67385 * Atomically clears the bits set in @mask from @v
67386 */
67387 #ifndef atomic_clear_mask
67388-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
67389+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
67390 {
67391 unsigned long flags;
67392
67393diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
67394index b18ce4f..2ee2843 100644
67395--- a/include/asm-generic/atomic64.h
67396+++ b/include/asm-generic/atomic64.h
67397@@ -16,6 +16,8 @@ typedef struct {
67398 long long counter;
67399 } atomic64_t;
67400
67401+typedef atomic64_t atomic64_unchecked_t;
67402+
67403 #define ATOMIC64_INIT(i) { (i) }
67404
67405 extern long long atomic64_read(const atomic64_t *v);
67406@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
67407 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
67408 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
67409
67410+#define atomic64_read_unchecked(v) atomic64_read(v)
67411+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
67412+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
67413+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
67414+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
67415+#define atomic64_inc_unchecked(v) atomic64_inc(v)
67416+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
67417+#define atomic64_dec_unchecked(v) atomic64_dec(v)
67418+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
67419+
67420 #endif /* _ASM_GENERIC_ATOMIC64_H */
67421diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
67422index 1bfcfe5..e04c5c9 100644
67423--- a/include/asm-generic/cache.h
67424+++ b/include/asm-generic/cache.h
67425@@ -6,7 +6,7 @@
67426 * cache lines need to provide their own cache.h.
67427 */
67428
67429-#define L1_CACHE_SHIFT 5
67430-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
67431+#define L1_CACHE_SHIFT 5UL
67432+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
67433
67434 #endif /* __ASM_GENERIC_CACHE_H */
67435diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
67436index 0d68a1e..b74a761 100644
67437--- a/include/asm-generic/emergency-restart.h
67438+++ b/include/asm-generic/emergency-restart.h
67439@@ -1,7 +1,7 @@
67440 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
67441 #define _ASM_GENERIC_EMERGENCY_RESTART_H
67442
67443-static inline void machine_emergency_restart(void)
67444+static inline __noreturn void machine_emergency_restart(void)
67445 {
67446 machine_restart(NULL);
67447 }
67448diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
67449index 90f99c7..00ce236 100644
67450--- a/include/asm-generic/kmap_types.h
67451+++ b/include/asm-generic/kmap_types.h
67452@@ -2,9 +2,9 @@
67453 #define _ASM_GENERIC_KMAP_TYPES_H
67454
67455 #ifdef __WITH_KM_FENCE
67456-# define KM_TYPE_NR 41
67457+# define KM_TYPE_NR 42
67458 #else
67459-# define KM_TYPE_NR 20
67460+# define KM_TYPE_NR 21
67461 #endif
67462
67463 #endif
67464diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
67465index 9ceb03b..62b0b8f 100644
67466--- a/include/asm-generic/local.h
67467+++ b/include/asm-generic/local.h
67468@@ -23,24 +23,37 @@ typedef struct
67469 atomic_long_t a;
67470 } local_t;
67471
67472+typedef struct {
67473+ atomic_long_unchecked_t a;
67474+} local_unchecked_t;
67475+
67476 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
67477
67478 #define local_read(l) atomic_long_read(&(l)->a)
67479+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
67480 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
67481+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
67482 #define local_inc(l) atomic_long_inc(&(l)->a)
67483+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
67484 #define local_dec(l) atomic_long_dec(&(l)->a)
67485+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
67486 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
67487+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
67488 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
67489+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
67490
67491 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
67492 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
67493 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
67494 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
67495 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
67496+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
67497 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
67498 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
67499+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
67500
67501 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
67502+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
67503 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
67504 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
67505 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
67506diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
67507index 725612b..9cc513a 100644
67508--- a/include/asm-generic/pgtable-nopmd.h
67509+++ b/include/asm-generic/pgtable-nopmd.h
67510@@ -1,14 +1,19 @@
67511 #ifndef _PGTABLE_NOPMD_H
67512 #define _PGTABLE_NOPMD_H
67513
67514-#ifndef __ASSEMBLY__
67515-
67516 #include <asm-generic/pgtable-nopud.h>
67517
67518-struct mm_struct;
67519-
67520 #define __PAGETABLE_PMD_FOLDED
67521
67522+#define PMD_SHIFT PUD_SHIFT
67523+#define PTRS_PER_PMD 1
67524+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
67525+#define PMD_MASK (~(PMD_SIZE-1))
67526+
67527+#ifndef __ASSEMBLY__
67528+
67529+struct mm_struct;
67530+
67531 /*
67532 * Having the pmd type consist of a pud gets the size right, and allows
67533 * us to conceptually access the pud entry that this pmd is folded into
67534@@ -16,11 +21,6 @@ struct mm_struct;
67535 */
67536 typedef struct { pud_t pud; } pmd_t;
67537
67538-#define PMD_SHIFT PUD_SHIFT
67539-#define PTRS_PER_PMD 1
67540-#define PMD_SIZE (1UL << PMD_SHIFT)
67541-#define PMD_MASK (~(PMD_SIZE-1))
67542-
67543 /*
67544 * The "pud_xxx()" functions here are trivial for a folded two-level
67545 * setup: the pmd is never bad, and a pmd always exists (as it's folded
67546diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
67547index 810431d..0ec4804f 100644
67548--- a/include/asm-generic/pgtable-nopud.h
67549+++ b/include/asm-generic/pgtable-nopud.h
67550@@ -1,10 +1,15 @@
67551 #ifndef _PGTABLE_NOPUD_H
67552 #define _PGTABLE_NOPUD_H
67553
67554-#ifndef __ASSEMBLY__
67555-
67556 #define __PAGETABLE_PUD_FOLDED
67557
67558+#define PUD_SHIFT PGDIR_SHIFT
67559+#define PTRS_PER_PUD 1
67560+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
67561+#define PUD_MASK (~(PUD_SIZE-1))
67562+
67563+#ifndef __ASSEMBLY__
67564+
67565 /*
67566 * Having the pud type consist of a pgd gets the size right, and allows
67567 * us to conceptually access the pgd entry that this pud is folded into
67568@@ -12,11 +17,6 @@
67569 */
67570 typedef struct { pgd_t pgd; } pud_t;
67571
67572-#define PUD_SHIFT PGDIR_SHIFT
67573-#define PTRS_PER_PUD 1
67574-#define PUD_SIZE (1UL << PUD_SHIFT)
67575-#define PUD_MASK (~(PUD_SIZE-1))
67576-
67577 /*
67578 * The "pgd_xxx()" functions here are trivial for a folded two-level
67579 * setup: the pud is never bad, and a pud always exists (as it's folded
67580@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
67581 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
67582
67583 #define pgd_populate(mm, pgd, pud) do { } while (0)
67584+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
67585 /*
67586 * (puds are folded into pgds so this doesn't get actually called,
67587 * but the define is needed for a generic inline function.)
67588diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
67589index a59ff51..2594a70 100644
67590--- a/include/asm-generic/pgtable.h
67591+++ b/include/asm-generic/pgtable.h
67592@@ -688,6 +688,14 @@ static inline pmd_t pmd_mknuma(pmd_t pmd)
67593 }
67594 #endif /* CONFIG_NUMA_BALANCING */
67595
67596+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
67597+static inline unsigned long pax_open_kernel(void) { return 0; }
67598+#endif
67599+
67600+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
67601+static inline unsigned long pax_close_kernel(void) { return 0; }
67602+#endif
67603+
67604 #endif /* CONFIG_MMU */
67605
67606 #endif /* !__ASSEMBLY__ */
67607diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
67608index afa12c7..99d4da0 100644
67609--- a/include/asm-generic/vmlinux.lds.h
67610+++ b/include/asm-generic/vmlinux.lds.h
67611@@ -245,6 +245,7 @@
67612 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
67613 VMLINUX_SYMBOL(__start_rodata) = .; \
67614 *(.rodata) *(.rodata.*) \
67615+ *(.data..read_only) \
67616 *(__vermagic) /* Kernel version magic */ \
67617 . = ALIGN(8); \
67618 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
67619@@ -755,17 +756,18 @@
67620 * section in the linker script will go there too. @phdr should have
67621 * a leading colon.
67622 *
67623- * Note that this macros defines __per_cpu_load as an absolute symbol.
67624+ * Note that this macros defines per_cpu_load as an absolute symbol.
67625 * If there is no need to put the percpu section at a predetermined
67626 * address, use PERCPU_SECTION.
67627 */
67628 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
67629- VMLINUX_SYMBOL(__per_cpu_load) = .; \
67630- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
67631+ per_cpu_load = .; \
67632+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
67633 - LOAD_OFFSET) { \
67634+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
67635 PERCPU_INPUT(cacheline) \
67636 } phdr \
67637- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
67638+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
67639
67640 /**
67641 * PERCPU_SECTION - define output section for percpu area, simple version
67642diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
67643index 418d270..bfd2794 100644
67644--- a/include/crypto/algapi.h
67645+++ b/include/crypto/algapi.h
67646@@ -34,7 +34,7 @@ struct crypto_type {
67647 unsigned int maskclear;
67648 unsigned int maskset;
67649 unsigned int tfmsize;
67650-};
67651+} __do_const;
67652
67653 struct crypto_instance {
67654 struct crypto_alg alg;
67655diff --git a/include/drm/drmP.h b/include/drm/drmP.h
67656index f1ce786..086a7a5 100644
67657--- a/include/drm/drmP.h
67658+++ b/include/drm/drmP.h
67659@@ -72,6 +72,7 @@
67660 #include <linux/workqueue.h>
67661 #include <linux/poll.h>
67662 #include <asm/pgalloc.h>
67663+#include <asm/local.h>
67664 #include <drm/drm.h>
67665 #include <drm/drm_sarea.h>
67666
67667@@ -296,10 +297,12 @@ do { \
67668 * \param cmd command.
67669 * \param arg argument.
67670 */
67671-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
67672+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
67673+ struct drm_file *file_priv);
67674+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
67675 struct drm_file *file_priv);
67676
67677-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
67678+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
67679 unsigned long arg);
67680
67681 #define DRM_IOCTL_NR(n) _IOC_NR(n)
67682@@ -314,9 +317,9 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
67683 struct drm_ioctl_desc {
67684 unsigned int cmd;
67685 int flags;
67686- drm_ioctl_t *func;
67687+ drm_ioctl_t func;
67688 unsigned int cmd_drv;
67689-};
67690+} __do_const;
67691
67692 /**
67693 * Creates a driver or general drm_ioctl_desc array entry for the given
67694@@ -1014,7 +1017,7 @@ struct drm_info_list {
67695 int (*show)(struct seq_file*, void*); /** show callback */
67696 u32 driver_features; /**< Required driver features for this entry */
67697 void *data;
67698-};
67699+} __do_const;
67700
67701 /**
67702 * debugfs node structure. This structure represents a debugfs file.
67703@@ -1087,7 +1090,7 @@ struct drm_device {
67704
67705 /** \name Usage Counters */
67706 /*@{ */
67707- int open_count; /**< Outstanding files open */
67708+ local_t open_count; /**< Outstanding files open */
67709 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
67710 atomic_t vma_count; /**< Outstanding vma areas open */
67711 int buf_use; /**< Buffers in use -- cannot alloc */
67712@@ -1098,7 +1101,7 @@ struct drm_device {
67713 /*@{ */
67714 unsigned long counters;
67715 enum drm_stat_type types[15];
67716- atomic_t counts[15];
67717+ atomic_unchecked_t counts[15];
67718 /*@} */
67719
67720 struct list_head filelist;
67721diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
67722index f43d556..94d9343 100644
67723--- a/include/drm/drm_crtc_helper.h
67724+++ b/include/drm/drm_crtc_helper.h
67725@@ -109,7 +109,7 @@ struct drm_encoder_helper_funcs {
67726 struct drm_connector *connector);
67727 /* disable encoder when not in use - more explicit than dpms off */
67728 void (*disable)(struct drm_encoder *encoder);
67729-};
67730+} __no_const;
67731
67732 /**
67733 * drm_connector_helper_funcs - helper operations for connectors
67734diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
67735index 72dcbe8..8db58d7 100644
67736--- a/include/drm/ttm/ttm_memory.h
67737+++ b/include/drm/ttm/ttm_memory.h
67738@@ -48,7 +48,7 @@
67739
67740 struct ttm_mem_shrink {
67741 int (*do_shrink) (struct ttm_mem_shrink *);
67742-};
67743+} __no_const;
67744
67745 /**
67746 * struct ttm_mem_global - Global memory accounting structure.
67747diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
67748index 4b840e8..155d235 100644
67749--- a/include/keys/asymmetric-subtype.h
67750+++ b/include/keys/asymmetric-subtype.h
67751@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
67752 /* Verify the signature on a key of this subtype (optional) */
67753 int (*verify_signature)(const struct key *key,
67754 const struct public_key_signature *sig);
67755-};
67756+} __do_const;
67757
67758 /**
67759 * asymmetric_key_subtype - Get the subtype from an asymmetric key
67760diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
67761index c1da539..1dcec55 100644
67762--- a/include/linux/atmdev.h
67763+++ b/include/linux/atmdev.h
67764@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
67765 #endif
67766
67767 struct k_atm_aal_stats {
67768-#define __HANDLE_ITEM(i) atomic_t i
67769+#define __HANDLE_ITEM(i) atomic_unchecked_t i
67770 __AAL_STAT_ITEMS
67771 #undef __HANDLE_ITEM
67772 };
67773@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
67774 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
67775 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
67776 struct module *owner;
67777-};
67778+} __do_const ;
67779
67780 struct atmphy_ops {
67781 int (*start)(struct atm_dev *dev);
67782diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
67783index c3a0914..ec5d48a 100644
67784--- a/include/linux/binfmts.h
67785+++ b/include/linux/binfmts.h
67786@@ -73,8 +73,9 @@ struct linux_binfmt {
67787 int (*load_binary)(struct linux_binprm *);
67788 int (*load_shlib)(struct file *);
67789 int (*core_dump)(struct coredump_params *cprm);
67790+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
67791 unsigned long min_coredump; /* minimal dump size */
67792-};
67793+} __do_const;
67794
67795 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
67796
67797diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
67798index 33f358f..7f2c27f 100644
67799--- a/include/linux/blkdev.h
67800+++ b/include/linux/blkdev.h
67801@@ -1499,7 +1499,7 @@ struct block_device_operations {
67802 /* this callback is with swap_lock and sometimes page table lock held */
67803 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
67804 struct module *owner;
67805-};
67806+} __do_const;
67807
67808 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
67809 unsigned long);
67810diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
67811index 7c2e030..b72475d 100644
67812--- a/include/linux/blktrace_api.h
67813+++ b/include/linux/blktrace_api.h
67814@@ -23,7 +23,7 @@ struct blk_trace {
67815 struct dentry *dir;
67816 struct dentry *dropped_file;
67817 struct dentry *msg_file;
67818- atomic_t dropped;
67819+ atomic_unchecked_t dropped;
67820 };
67821
67822 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
67823diff --git a/include/linux/cache.h b/include/linux/cache.h
67824index 4c57065..4307975 100644
67825--- a/include/linux/cache.h
67826+++ b/include/linux/cache.h
67827@@ -16,6 +16,10 @@
67828 #define __read_mostly
67829 #endif
67830
67831+#ifndef __read_only
67832+#define __read_only __read_mostly
67833+#endif
67834+
67835 #ifndef ____cacheline_aligned
67836 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
67837 #endif
67838diff --git a/include/linux/capability.h b/include/linux/capability.h
67839index d9a4f7f4..19f77d6 100644
67840--- a/include/linux/capability.h
67841+++ b/include/linux/capability.h
67842@@ -213,8 +213,13 @@ extern bool ns_capable(struct user_namespace *ns, int cap);
67843 extern bool nsown_capable(int cap);
67844 extern bool inode_capable(const struct inode *inode, int cap);
67845 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
67846+extern bool capable_nolog(int cap);
67847+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
67848+extern bool inode_capable_nolog(const struct inode *inode, int cap);
67849
67850 /* audit system wants to get cap info from files as well */
67851 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
67852
67853+extern int is_privileged_binary(const struct dentry *dentry);
67854+
67855 #endif /* !_LINUX_CAPABILITY_H */
67856diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
67857index 8609d57..86e4d79 100644
67858--- a/include/linux/cdrom.h
67859+++ b/include/linux/cdrom.h
67860@@ -87,7 +87,6 @@ struct cdrom_device_ops {
67861
67862 /* driver specifications */
67863 const int capability; /* capability flags */
67864- int n_minors; /* number of active minor devices */
67865 /* handle uniform packets for scsi type devices (scsi,atapi) */
67866 int (*generic_packet) (struct cdrom_device_info *,
67867 struct packet_command *);
67868diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
67869index 42e55de..1cd0e66 100644
67870--- a/include/linux/cleancache.h
67871+++ b/include/linux/cleancache.h
67872@@ -31,7 +31,7 @@ struct cleancache_ops {
67873 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
67874 void (*invalidate_inode)(int, struct cleancache_filekey);
67875 void (*invalidate_fs)(int);
67876-};
67877+} __no_const;
67878
67879 extern struct cleancache_ops
67880 cleancache_register_ops(struct cleancache_ops *ops);
67881diff --git a/include/linux/compat.h b/include/linux/compat.h
67882index 377cd8c..2479845 100644
67883--- a/include/linux/compat.h
67884+++ b/include/linux/compat.h
67885@@ -332,14 +332,14 @@ long compat_sys_msgsnd(int first, int second, int third, void __user *uptr);
67886 long compat_sys_msgrcv(int first, int second, int msgtyp, int third,
67887 int version, void __user *uptr);
67888 long compat_sys_shmat(int first, int second, compat_uptr_t third, int version,
67889- void __user *uptr);
67890+ void __user *uptr) __intentional_overflow(0);
67891 #else
67892 long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
67893 long compat_sys_msgsnd(int msqid, struct compat_msgbuf __user *msgp,
67894 compat_ssize_t msgsz, int msgflg);
67895 long compat_sys_msgrcv(int msqid, struct compat_msgbuf __user *msgp,
67896 compat_ssize_t msgsz, long msgtyp, int msgflg);
67897-long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
67898+long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
67899 #endif
67900 long compat_sys_msgctl(int first, int second, void __user *uptr);
67901 long compat_sys_shmctl(int first, int second, void __user *uptr);
67902@@ -442,7 +442,7 @@ extern int compat_ptrace_request(struct task_struct *child,
67903 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
67904 compat_ulong_t addr, compat_ulong_t data);
67905 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
67906- compat_long_t addr, compat_long_t data);
67907+ compat_ulong_t addr, compat_ulong_t data);
67908
67909 /*
67910 * epoll (fs/eventpoll.c) compat bits follow ...
67911diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
67912index 68b162d..660f5f0 100644
67913--- a/include/linux/compiler-gcc4.h
67914+++ b/include/linux/compiler-gcc4.h
67915@@ -39,9 +39,29 @@
67916 # define __compiletime_warning(message) __attribute__((warning(message)))
67917 # define __compiletime_error(message) __attribute__((error(message)))
67918 #endif /* __CHECKER__ */
67919+
67920+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
67921+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
67922+#define __bos0(ptr) __bos((ptr), 0)
67923+#define __bos1(ptr) __bos((ptr), 1)
67924 #endif /* GCC_VERSION >= 40300 */
67925
67926 #if GCC_VERSION >= 40500
67927+
67928+#ifdef CONSTIFY_PLUGIN
67929+#define __no_const __attribute__((no_const))
67930+#define __do_const __attribute__((do_const))
67931+#endif
67932+
67933+#ifdef SIZE_OVERFLOW_PLUGIN
67934+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
67935+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
67936+#endif
67937+
67938+#ifdef LATENT_ENTROPY_PLUGIN
67939+#define __latent_entropy __attribute__((latent_entropy))
67940+#endif
67941+
67942 /*
67943 * Mark a position in code as unreachable. This can be used to
67944 * suppress control flow warnings after asm blocks that transfer
67945diff --git a/include/linux/compiler.h b/include/linux/compiler.h
67946index 10b8f23..5e0b083 100644
67947--- a/include/linux/compiler.h
67948+++ b/include/linux/compiler.h
67949@@ -5,11 +5,14 @@
67950
67951 #ifdef __CHECKER__
67952 # define __user __attribute__((noderef, address_space(1)))
67953+# define __force_user __force __user
67954 # define __kernel __attribute__((address_space(0)))
67955+# define __force_kernel __force __kernel
67956 # define __safe __attribute__((safe))
67957 # define __force __attribute__((force))
67958 # define __nocast __attribute__((nocast))
67959 # define __iomem __attribute__((noderef, address_space(2)))
67960+# define __force_iomem __force __iomem
67961 # define __must_hold(x) __attribute__((context(x,1,1)))
67962 # define __acquires(x) __attribute__((context(x,0,1)))
67963 # define __releases(x) __attribute__((context(x,1,0)))
67964@@ -17,20 +20,37 @@
67965 # define __release(x) __context__(x,-1)
67966 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
67967 # define __percpu __attribute__((noderef, address_space(3)))
67968+# define __force_percpu __force __percpu
67969 #ifdef CONFIG_SPARSE_RCU_POINTER
67970 # define __rcu __attribute__((noderef, address_space(4)))
67971+# define __force_rcu __force __rcu
67972 #else
67973 # define __rcu
67974+# define __force_rcu
67975 #endif
67976 extern void __chk_user_ptr(const volatile void __user *);
67977 extern void __chk_io_ptr(const volatile void __iomem *);
67978 #else
67979-# define __user
67980-# define __kernel
67981+# ifdef CHECKER_PLUGIN
67982+//# define __user
67983+//# define __force_user
67984+//# define __kernel
67985+//# define __force_kernel
67986+# else
67987+# ifdef STRUCTLEAK_PLUGIN
67988+# define __user __attribute__((user))
67989+# else
67990+# define __user
67991+# endif
67992+# define __force_user
67993+# define __kernel
67994+# define __force_kernel
67995+# endif
67996 # define __safe
67997 # define __force
67998 # define __nocast
67999 # define __iomem
68000+# define __force_iomem
68001 # define __chk_user_ptr(x) (void)0
68002 # define __chk_io_ptr(x) (void)0
68003 # define __builtin_warning(x, y...) (1)
68004@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
68005 # define __release(x) (void)0
68006 # define __cond_lock(x,c) (c)
68007 # define __percpu
68008+# define __force_percpu
68009 # define __rcu
68010+# define __force_rcu
68011 #endif
68012
68013 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
68014@@ -275,6 +297,26 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
68015 # define __attribute_const__ /* unimplemented */
68016 #endif
68017
68018+#ifndef __no_const
68019+# define __no_const
68020+#endif
68021+
68022+#ifndef __do_const
68023+# define __do_const
68024+#endif
68025+
68026+#ifndef __size_overflow
68027+# define __size_overflow(...)
68028+#endif
68029+
68030+#ifndef __intentional_overflow
68031+# define __intentional_overflow(...)
68032+#endif
68033+
68034+#ifndef __latent_entropy
68035+# define __latent_entropy
68036+#endif
68037+
68038 /*
68039 * Tell gcc if a function is cold. The compiler will assume any path
68040 * directly leading to the call is unlikely.
68041@@ -284,6 +326,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
68042 #define __cold
68043 #endif
68044
68045+#ifndef __alloc_size
68046+#define __alloc_size(...)
68047+#endif
68048+
68049+#ifndef __bos
68050+#define __bos(ptr, arg)
68051+#endif
68052+
68053+#ifndef __bos0
68054+#define __bos0(ptr)
68055+#endif
68056+
68057+#ifndef __bos1
68058+#define __bos1(ptr)
68059+#endif
68060+
68061 /* Simple shorthand for a section definition */
68062 #ifndef __section
68063 # define __section(S) __attribute__ ((__section__(#S)))
68064@@ -349,6 +407,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
68065 * use is to mediate communication between process-level code and irq/NMI
68066 * handlers, all running on the same CPU.
68067 */
68068-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
68069+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
68070+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
68071
68072 #endif /* __LINUX_COMPILER_H */
68073diff --git a/include/linux/completion.h b/include/linux/completion.h
68074index 33f0280..35c6568 100644
68075--- a/include/linux/completion.h
68076+++ b/include/linux/completion.h
68077@@ -79,15 +79,15 @@ static inline void init_completion(struct completion *x)
68078 extern void wait_for_completion(struct completion *);
68079 extern void wait_for_completion_io(struct completion *);
68080 extern int wait_for_completion_interruptible(struct completion *x);
68081-extern int wait_for_completion_killable(struct completion *x);
68082+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
68083 extern unsigned long wait_for_completion_timeout(struct completion *x,
68084 unsigned long timeout);
68085 extern unsigned long wait_for_completion_io_timeout(struct completion *x,
68086 unsigned long timeout);
68087 extern long wait_for_completion_interruptible_timeout(
68088- struct completion *x, unsigned long timeout);
68089+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
68090 extern long wait_for_completion_killable_timeout(
68091- struct completion *x, unsigned long timeout);
68092+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
68093 extern bool try_wait_for_completion(struct completion *x);
68094 extern bool completion_done(struct completion *x);
68095
68096diff --git a/include/linux/configfs.h b/include/linux/configfs.h
68097index 34025df..d94bbbc 100644
68098--- a/include/linux/configfs.h
68099+++ b/include/linux/configfs.h
68100@@ -125,7 +125,7 @@ struct configfs_attribute {
68101 const char *ca_name;
68102 struct module *ca_owner;
68103 umode_t ca_mode;
68104-};
68105+} __do_const;
68106
68107 /*
68108 * Users often need to create attribute structures for their configurable
68109diff --git a/include/linux/cpu.h b/include/linux/cpu.h
68110index 714e792..e6130d9 100644
68111--- a/include/linux/cpu.h
68112+++ b/include/linux/cpu.h
68113@@ -115,7 +115,7 @@ enum {
68114 /* Need to know about CPUs going up/down? */
68115 #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
68116 #define cpu_notifier(fn, pri) { \
68117- static struct notifier_block fn##_nb __cpuinitdata = \
68118+ static struct notifier_block fn##_nb = \
68119 { .notifier_call = fn, .priority = pri }; \
68120 register_cpu_notifier(&fn##_nb); \
68121 }
68122diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
68123index a22944c..4e695fe 100644
68124--- a/include/linux/cpufreq.h
68125+++ b/include/linux/cpufreq.h
68126@@ -252,7 +252,7 @@ struct cpufreq_driver {
68127 int (*suspend) (struct cpufreq_policy *policy);
68128 int (*resume) (struct cpufreq_policy *policy);
68129 struct freq_attr **attr;
68130-};
68131+} __do_const;
68132
68133 /* flags */
68134
68135@@ -311,6 +311,7 @@ struct global_attr {
68136 ssize_t (*store)(struct kobject *a, struct attribute *b,
68137 const char *c, size_t count);
68138 };
68139+typedef struct global_attr __no_const global_attr_no_const;
68140
68141 #define define_one_global_ro(_name) \
68142 static struct global_attr _name = \
68143diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
68144index 480c14d..552896f 100644
68145--- a/include/linux/cpuidle.h
68146+++ b/include/linux/cpuidle.h
68147@@ -52,7 +52,8 @@ struct cpuidle_state {
68148 int index);
68149
68150 int (*enter_dead) (struct cpuidle_device *dev, int index);
68151-};
68152+} __do_const;
68153+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
68154
68155 /* Idle State Flags */
68156 #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
68157@@ -194,7 +195,7 @@ struct cpuidle_governor {
68158 void (*reflect) (struct cpuidle_device *dev, int index);
68159
68160 struct module *owner;
68161-};
68162+} __do_const;
68163
68164 #ifdef CONFIG_CPU_IDLE
68165
68166diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
68167index 0325602..5e9feff 100644
68168--- a/include/linux/cpumask.h
68169+++ b/include/linux/cpumask.h
68170@@ -118,17 +118,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
68171 }
68172
68173 /* Valid inputs for n are -1 and 0. */
68174-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
68175+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
68176 {
68177 return n+1;
68178 }
68179
68180-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
68181+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
68182 {
68183 return n+1;
68184 }
68185
68186-static inline unsigned int cpumask_next_and(int n,
68187+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
68188 const struct cpumask *srcp,
68189 const struct cpumask *andp)
68190 {
68191@@ -167,7 +167,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
68192 *
68193 * Returns >= nr_cpu_ids if no further cpus set.
68194 */
68195-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
68196+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
68197 {
68198 /* -1 is a legal arg here. */
68199 if (n != -1)
68200@@ -182,7 +182,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
68201 *
68202 * Returns >= nr_cpu_ids if no further cpus unset.
68203 */
68204-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
68205+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
68206 {
68207 /* -1 is a legal arg here. */
68208 if (n != -1)
68209@@ -190,7 +190,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
68210 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
68211 }
68212
68213-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
68214+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
68215 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
68216
68217 /**
68218diff --git a/include/linux/cred.h b/include/linux/cred.h
68219index 04421e8..6bce4ef 100644
68220--- a/include/linux/cred.h
68221+++ b/include/linux/cred.h
68222@@ -194,6 +194,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
68223 static inline void validate_process_creds(void)
68224 {
68225 }
68226+static inline void validate_task_creds(struct task_struct *task)
68227+{
68228+}
68229 #endif
68230
68231 /**
68232diff --git a/include/linux/crypto.h b/include/linux/crypto.h
68233index b92eadf..b4ecdc1 100644
68234--- a/include/linux/crypto.h
68235+++ b/include/linux/crypto.h
68236@@ -373,7 +373,7 @@ struct cipher_tfm {
68237 const u8 *key, unsigned int keylen);
68238 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
68239 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
68240-};
68241+} __no_const;
68242
68243 struct hash_tfm {
68244 int (*init)(struct hash_desc *desc);
68245@@ -394,13 +394,13 @@ struct compress_tfm {
68246 int (*cot_decompress)(struct crypto_tfm *tfm,
68247 const u8 *src, unsigned int slen,
68248 u8 *dst, unsigned int *dlen);
68249-};
68250+} __no_const;
68251
68252 struct rng_tfm {
68253 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
68254 unsigned int dlen);
68255 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
68256-};
68257+} __no_const;
68258
68259 #define crt_ablkcipher crt_u.ablkcipher
68260 #define crt_aead crt_u.aead
68261diff --git a/include/linux/ctype.h b/include/linux/ctype.h
68262index 8acfe31..6ffccd63 100644
68263--- a/include/linux/ctype.h
68264+++ b/include/linux/ctype.h
68265@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
68266 * Fast implementation of tolower() for internal usage. Do not use in your
68267 * code.
68268 */
68269-static inline char _tolower(const char c)
68270+static inline unsigned char _tolower(const unsigned char c)
68271 {
68272 return c | 0x20;
68273 }
68274diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
68275index 7925bf0..d5143d2 100644
68276--- a/include/linux/decompress/mm.h
68277+++ b/include/linux/decompress/mm.h
68278@@ -77,7 +77,7 @@ static void free(void *where)
68279 * warnings when not needed (indeed large_malloc / large_free are not
68280 * needed by inflate */
68281
68282-#define malloc(a) kmalloc(a, GFP_KERNEL)
68283+#define malloc(a) kmalloc((a), GFP_KERNEL)
68284 #define free(a) kfree(a)
68285
68286 #define large_malloc(a) vmalloc(a)
68287diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
68288index fe8c447..bdc1f33 100644
68289--- a/include/linux/devfreq.h
68290+++ b/include/linux/devfreq.h
68291@@ -114,7 +114,7 @@ struct devfreq_governor {
68292 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
68293 int (*event_handler)(struct devfreq *devfreq,
68294 unsigned int event, void *data);
68295-};
68296+} __do_const;
68297
68298 /**
68299 * struct devfreq - Device devfreq structure
68300diff --git a/include/linux/device.h b/include/linux/device.h
68301index 9d6464e..8a5cc92 100644
68302--- a/include/linux/device.h
68303+++ b/include/linux/device.h
68304@@ -295,7 +295,7 @@ struct subsys_interface {
68305 struct list_head node;
68306 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
68307 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
68308-};
68309+} __do_const;
68310
68311 int subsys_interface_register(struct subsys_interface *sif);
68312 void subsys_interface_unregister(struct subsys_interface *sif);
68313@@ -475,7 +475,7 @@ struct device_type {
68314 void (*release)(struct device *dev);
68315
68316 const struct dev_pm_ops *pm;
68317-};
68318+} __do_const;
68319
68320 /* interface for exporting device attributes */
68321 struct device_attribute {
68322@@ -485,11 +485,12 @@ struct device_attribute {
68323 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
68324 const char *buf, size_t count);
68325 };
68326+typedef struct device_attribute __no_const device_attribute_no_const;
68327
68328 struct dev_ext_attribute {
68329 struct device_attribute attr;
68330 void *var;
68331-};
68332+} __do_const;
68333
68334 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
68335 char *buf);
68336diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
68337index 94af418..b1ca7a2 100644
68338--- a/include/linux/dma-mapping.h
68339+++ b/include/linux/dma-mapping.h
68340@@ -54,7 +54,7 @@ struct dma_map_ops {
68341 u64 (*get_required_mask)(struct device *dev);
68342 #endif
68343 int is_phys;
68344-};
68345+} __do_const;
68346
68347 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
68348
68349diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
68350index 91ac8da..a841318 100644
68351--- a/include/linux/dmaengine.h
68352+++ b/include/linux/dmaengine.h
68353@@ -1034,9 +1034,9 @@ struct dma_pinned_list {
68354 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
68355 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
68356
68357-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
68358+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
68359 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
68360-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
68361+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
68362 struct dma_pinned_list *pinned_list, struct page *page,
68363 unsigned int offset, size_t len);
68364
68365diff --git a/include/linux/efi.h b/include/linux/efi.h
68366index 3d7df3d..301f024 100644
68367--- a/include/linux/efi.h
68368+++ b/include/linux/efi.h
68369@@ -740,6 +740,7 @@ struct efivar_operations {
68370 efi_set_variable_t *set_variable;
68371 efi_query_variable_store_t *query_variable_store;
68372 };
68373+typedef struct efivar_operations __no_const efivar_operations_no_const;
68374
68375 struct efivars {
68376 /*
68377diff --git a/include/linux/elf.h b/include/linux/elf.h
68378index 40a3c0e..4c45a38 100644
68379--- a/include/linux/elf.h
68380+++ b/include/linux/elf.h
68381@@ -24,6 +24,7 @@ extern Elf32_Dyn _DYNAMIC [];
68382 #define elf_note elf32_note
68383 #define elf_addr_t Elf32_Off
68384 #define Elf_Half Elf32_Half
68385+#define elf_dyn Elf32_Dyn
68386
68387 #else
68388
68389@@ -34,6 +35,7 @@ extern Elf64_Dyn _DYNAMIC [];
68390 #define elf_note elf64_note
68391 #define elf_addr_t Elf64_Off
68392 #define Elf_Half Elf64_Half
68393+#define elf_dyn Elf64_Dyn
68394
68395 #endif
68396
68397diff --git a/include/linux/err.h b/include/linux/err.h
68398index f2edce2..cc2082c 100644
68399--- a/include/linux/err.h
68400+++ b/include/linux/err.h
68401@@ -19,12 +19,12 @@
68402
68403 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
68404
68405-static inline void * __must_check ERR_PTR(long error)
68406+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
68407 {
68408 return (void *) error;
68409 }
68410
68411-static inline long __must_check PTR_ERR(const void *ptr)
68412+static inline long __must_check __intentional_overflow(-1) PTR_ERR(const void *ptr)
68413 {
68414 return (long) ptr;
68415 }
68416diff --git a/include/linux/extcon.h b/include/linux/extcon.h
68417index fcb51c8..bdafcf6 100644
68418--- a/include/linux/extcon.h
68419+++ b/include/linux/extcon.h
68420@@ -134,7 +134,7 @@ struct extcon_dev {
68421 /* /sys/class/extcon/.../mutually_exclusive/... */
68422 struct attribute_group attr_g_muex;
68423 struct attribute **attrs_muex;
68424- struct device_attribute *d_attrs_muex;
68425+ device_attribute_no_const *d_attrs_muex;
68426 };
68427
68428 /**
68429diff --git a/include/linux/fb.h b/include/linux/fb.h
68430index 58b9860..58e5516 100644
68431--- a/include/linux/fb.h
68432+++ b/include/linux/fb.h
68433@@ -304,7 +304,7 @@ struct fb_ops {
68434 /* called at KDB enter and leave time to prepare the console */
68435 int (*fb_debug_enter)(struct fb_info *info);
68436 int (*fb_debug_leave)(struct fb_info *info);
68437-};
68438+} __do_const;
68439
68440 #ifdef CONFIG_FB_TILEBLITTING
68441 #define FB_TILE_CURSOR_NONE 0
68442diff --git a/include/linux/filter.h b/include/linux/filter.h
68443index c45eabc..baa0be5 100644
68444--- a/include/linux/filter.h
68445+++ b/include/linux/filter.h
68446@@ -20,6 +20,7 @@ struct compat_sock_fprog {
68447
68448 struct sk_buff;
68449 struct sock;
68450+struct bpf_jit_work;
68451
68452 struct sk_filter
68453 {
68454@@ -27,6 +28,9 @@ struct sk_filter
68455 unsigned int len; /* Number of filter blocks */
68456 unsigned int (*bpf_func)(const struct sk_buff *skb,
68457 const struct sock_filter *filter);
68458+#ifdef CONFIG_BPF_JIT
68459+ struct bpf_jit_work *work;
68460+#endif
68461 struct rcu_head rcu;
68462 struct sock_filter insns[0];
68463 };
68464diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
68465index 3044254..9767f41 100644
68466--- a/include/linux/frontswap.h
68467+++ b/include/linux/frontswap.h
68468@@ -11,7 +11,7 @@ struct frontswap_ops {
68469 int (*load)(unsigned, pgoff_t, struct page *);
68470 void (*invalidate_page)(unsigned, pgoff_t);
68471 void (*invalidate_area)(unsigned);
68472-};
68473+} __no_const;
68474
68475 extern bool frontswap_enabled;
68476 extern struct frontswap_ops
68477diff --git a/include/linux/fs.h b/include/linux/fs.h
68478index 2c28271..8d3d74c 100644
68479--- a/include/linux/fs.h
68480+++ b/include/linux/fs.h
68481@@ -1541,7 +1541,8 @@ struct file_operations {
68482 long (*fallocate)(struct file *file, int mode, loff_t offset,
68483 loff_t len);
68484 int (*show_fdinfo)(struct seq_file *m, struct file *f);
68485-};
68486+} __do_const;
68487+typedef struct file_operations __no_const file_operations_no_const;
68488
68489 struct inode_operations {
68490 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
68491@@ -2672,4 +2673,14 @@ static inline void inode_has_no_xattr(struct inode *inode)
68492 inode->i_flags |= S_NOSEC;
68493 }
68494
68495+static inline bool is_sidechannel_device(const struct inode *inode)
68496+{
68497+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
68498+ umode_t mode = inode->i_mode;
68499+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
68500+#else
68501+ return false;
68502+#endif
68503+}
68504+
68505 #endif /* _LINUX_FS_H */
68506diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
68507index 2b93a9a..855d94a 100644
68508--- a/include/linux/fs_struct.h
68509+++ b/include/linux/fs_struct.h
68510@@ -6,7 +6,7 @@
68511 #include <linux/seqlock.h>
68512
68513 struct fs_struct {
68514- int users;
68515+ atomic_t users;
68516 spinlock_t lock;
68517 seqcount_t seq;
68518 int umask;
68519diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
68520index 5dfa0aa..6acf322 100644
68521--- a/include/linux/fscache-cache.h
68522+++ b/include/linux/fscache-cache.h
68523@@ -112,7 +112,7 @@ struct fscache_operation {
68524 fscache_operation_release_t release;
68525 };
68526
68527-extern atomic_t fscache_op_debug_id;
68528+extern atomic_unchecked_t fscache_op_debug_id;
68529 extern void fscache_op_work_func(struct work_struct *work);
68530
68531 extern void fscache_enqueue_operation(struct fscache_operation *);
68532@@ -134,7 +134,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
68533 INIT_WORK(&op->work, fscache_op_work_func);
68534 atomic_set(&op->usage, 1);
68535 op->state = FSCACHE_OP_ST_INITIALISED;
68536- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
68537+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
68538 op->processor = processor;
68539 op->release = release;
68540 INIT_LIST_HEAD(&op->pend_link);
68541diff --git a/include/linux/fscache.h b/include/linux/fscache.h
68542index 7a08623..4c07b0f 100644
68543--- a/include/linux/fscache.h
68544+++ b/include/linux/fscache.h
68545@@ -152,7 +152,7 @@ struct fscache_cookie_def {
68546 * - this is mandatory for any object that may have data
68547 */
68548 void (*now_uncached)(void *cookie_netfs_data);
68549-};
68550+} __do_const;
68551
68552 /*
68553 * fscache cached network filesystem type
68554diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
68555index a78680a..87bd73e 100644
68556--- a/include/linux/fsnotify.h
68557+++ b/include/linux/fsnotify.h
68558@@ -195,6 +195,9 @@ static inline void fsnotify_access(struct file *file)
68559 struct inode *inode = path->dentry->d_inode;
68560 __u32 mask = FS_ACCESS;
68561
68562+ if (is_sidechannel_device(inode))
68563+ return;
68564+
68565 if (S_ISDIR(inode->i_mode))
68566 mask |= FS_ISDIR;
68567
68568@@ -213,6 +216,9 @@ static inline void fsnotify_modify(struct file *file)
68569 struct inode *inode = path->dentry->d_inode;
68570 __u32 mask = FS_MODIFY;
68571
68572+ if (is_sidechannel_device(inode))
68573+ return;
68574+
68575 if (S_ISDIR(inode->i_mode))
68576 mask |= FS_ISDIR;
68577
68578@@ -315,7 +321,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
68579 */
68580 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
68581 {
68582- return kstrdup(name, GFP_KERNEL);
68583+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
68584 }
68585
68586 /*
68587diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
68588index 13a54d0..c6ce2a7 100644
68589--- a/include/linux/ftrace_event.h
68590+++ b/include/linux/ftrace_event.h
68591@@ -274,7 +274,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
68592 extern int trace_add_event_call(struct ftrace_event_call *call);
68593 extern void trace_remove_event_call(struct ftrace_event_call *call);
68594
68595-#define is_signed_type(type) (((type)(-1)) < (type)0)
68596+#define is_signed_type(type) (((type)(-1)) < (type)1)
68597
68598 int trace_set_clr_event(const char *system, const char *event, int set);
68599
68600diff --git a/include/linux/genhd.h b/include/linux/genhd.h
68601index 9f3c275..911b591 100644
68602--- a/include/linux/genhd.h
68603+++ b/include/linux/genhd.h
68604@@ -194,7 +194,7 @@ struct gendisk {
68605 struct kobject *slave_dir;
68606
68607 struct timer_rand_state *random;
68608- atomic_t sync_io; /* RAID */
68609+ atomic_unchecked_t sync_io; /* RAID */
68610 struct disk_events *ev;
68611 #ifdef CONFIG_BLK_DEV_INTEGRITY
68612 struct blk_integrity *integrity;
68613diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
68614index 023bc34..b02b46a 100644
68615--- a/include/linux/genl_magic_func.h
68616+++ b/include/linux/genl_magic_func.h
68617@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
68618 },
68619
68620 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
68621-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
68622+static struct genl_ops ZZZ_genl_ops[] = {
68623 #include GENL_MAGIC_INCLUDE_FILE
68624 };
68625
68626diff --git a/include/linux/gfp.h b/include/linux/gfp.h
68627index 0f615eb..5c3832f 100644
68628--- a/include/linux/gfp.h
68629+++ b/include/linux/gfp.h
68630@@ -35,6 +35,13 @@ struct vm_area_struct;
68631 #define ___GFP_NO_KSWAPD 0x400000u
68632 #define ___GFP_OTHER_NODE 0x800000u
68633 #define ___GFP_WRITE 0x1000000u
68634+
68635+#ifdef CONFIG_PAX_USERCOPY_SLABS
68636+#define ___GFP_USERCOPY 0x2000000u
68637+#else
68638+#define ___GFP_USERCOPY 0
68639+#endif
68640+
68641 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
68642
68643 /*
68644@@ -92,6 +99,7 @@ struct vm_area_struct;
68645 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
68646 #define __GFP_KMEMCG ((__force gfp_t)___GFP_KMEMCG) /* Allocation comes from a memcg-accounted resource */
68647 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
68648+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
68649
68650 /*
68651 * This may seem redundant, but it's a way of annotating false positives vs.
68652@@ -99,7 +107,7 @@ struct vm_area_struct;
68653 */
68654 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
68655
68656-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
68657+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
68658 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
68659
68660 /* This equals 0, but use constants in case they ever change */
68661@@ -153,6 +161,8 @@ struct vm_area_struct;
68662 /* 4GB DMA on some platforms */
68663 #define GFP_DMA32 __GFP_DMA32
68664
68665+#define GFP_USERCOPY __GFP_USERCOPY
68666+
68667 /* Convert GFP flags to their corresponding migrate type */
68668 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
68669 {
68670diff --git a/include/linux/gracl.h b/include/linux/gracl.h
68671new file mode 100644
68672index 0000000..ebe6d72
68673--- /dev/null
68674+++ b/include/linux/gracl.h
68675@@ -0,0 +1,319 @@
68676+#ifndef GR_ACL_H
68677+#define GR_ACL_H
68678+
68679+#include <linux/grdefs.h>
68680+#include <linux/resource.h>
68681+#include <linux/capability.h>
68682+#include <linux/dcache.h>
68683+#include <asm/resource.h>
68684+
68685+/* Major status information */
68686+
68687+#define GR_VERSION "grsecurity 2.9.1"
68688+#define GRSECURITY_VERSION 0x2901
68689+
68690+enum {
68691+ GR_SHUTDOWN = 0,
68692+ GR_ENABLE = 1,
68693+ GR_SPROLE = 2,
68694+ GR_RELOAD = 3,
68695+ GR_SEGVMOD = 4,
68696+ GR_STATUS = 5,
68697+ GR_UNSPROLE = 6,
68698+ GR_PASSSET = 7,
68699+ GR_SPROLEPAM = 8,
68700+};
68701+
68702+/* Password setup definitions
68703+ * kernel/grhash.c */
68704+enum {
68705+ GR_PW_LEN = 128,
68706+ GR_SALT_LEN = 16,
68707+ GR_SHA_LEN = 32,
68708+};
68709+
68710+enum {
68711+ GR_SPROLE_LEN = 64,
68712+};
68713+
68714+enum {
68715+ GR_NO_GLOB = 0,
68716+ GR_REG_GLOB,
68717+ GR_CREATE_GLOB
68718+};
68719+
68720+#define GR_NLIMITS 32
68721+
68722+/* Begin Data Structures */
68723+
68724+struct sprole_pw {
68725+ unsigned char *rolename;
68726+ unsigned char salt[GR_SALT_LEN];
68727+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
68728+};
68729+
68730+struct name_entry {
68731+ __u32 key;
68732+ ino_t inode;
68733+ dev_t device;
68734+ char *name;
68735+ __u16 len;
68736+ __u8 deleted;
68737+ struct name_entry *prev;
68738+ struct name_entry *next;
68739+};
68740+
68741+struct inodev_entry {
68742+ struct name_entry *nentry;
68743+ struct inodev_entry *prev;
68744+ struct inodev_entry *next;
68745+};
68746+
68747+struct acl_role_db {
68748+ struct acl_role_label **r_hash;
68749+ __u32 r_size;
68750+};
68751+
68752+struct inodev_db {
68753+ struct inodev_entry **i_hash;
68754+ __u32 i_size;
68755+};
68756+
68757+struct name_db {
68758+ struct name_entry **n_hash;
68759+ __u32 n_size;
68760+};
68761+
68762+struct crash_uid {
68763+ uid_t uid;
68764+ unsigned long expires;
68765+};
68766+
68767+struct gr_hash_struct {
68768+ void **table;
68769+ void **nametable;
68770+ void *first;
68771+ __u32 table_size;
68772+ __u32 used_size;
68773+ int type;
68774+};
68775+
68776+/* Userspace Grsecurity ACL data structures */
68777+
68778+struct acl_subject_label {
68779+ char *filename;
68780+ ino_t inode;
68781+ dev_t device;
68782+ __u32 mode;
68783+ kernel_cap_t cap_mask;
68784+ kernel_cap_t cap_lower;
68785+ kernel_cap_t cap_invert_audit;
68786+
68787+ struct rlimit res[GR_NLIMITS];
68788+ __u32 resmask;
68789+
68790+ __u8 user_trans_type;
68791+ __u8 group_trans_type;
68792+ uid_t *user_transitions;
68793+ gid_t *group_transitions;
68794+ __u16 user_trans_num;
68795+ __u16 group_trans_num;
68796+
68797+ __u32 sock_families[2];
68798+ __u32 ip_proto[8];
68799+ __u32 ip_type;
68800+ struct acl_ip_label **ips;
68801+ __u32 ip_num;
68802+ __u32 inaddr_any_override;
68803+
68804+ __u32 crashes;
68805+ unsigned long expires;
68806+
68807+ struct acl_subject_label *parent_subject;
68808+ struct gr_hash_struct *hash;
68809+ struct acl_subject_label *prev;
68810+ struct acl_subject_label *next;
68811+
68812+ struct acl_object_label **obj_hash;
68813+ __u32 obj_hash_size;
68814+ __u16 pax_flags;
68815+};
68816+
68817+struct role_allowed_ip {
68818+ __u32 addr;
68819+ __u32 netmask;
68820+
68821+ struct role_allowed_ip *prev;
68822+ struct role_allowed_ip *next;
68823+};
68824+
68825+struct role_transition {
68826+ char *rolename;
68827+
68828+ struct role_transition *prev;
68829+ struct role_transition *next;
68830+};
68831+
68832+struct acl_role_label {
68833+ char *rolename;
68834+ uid_t uidgid;
68835+ __u16 roletype;
68836+
68837+ __u16 auth_attempts;
68838+ unsigned long expires;
68839+
68840+ struct acl_subject_label *root_label;
68841+ struct gr_hash_struct *hash;
68842+
68843+ struct acl_role_label *prev;
68844+ struct acl_role_label *next;
68845+
68846+ struct role_transition *transitions;
68847+ struct role_allowed_ip *allowed_ips;
68848+ uid_t *domain_children;
68849+ __u16 domain_child_num;
68850+
68851+ umode_t umask;
68852+
68853+ struct acl_subject_label **subj_hash;
68854+ __u32 subj_hash_size;
68855+};
68856+
68857+struct user_acl_role_db {
68858+ struct acl_role_label **r_table;
68859+ __u32 num_pointers; /* Number of allocations to track */
68860+ __u32 num_roles; /* Number of roles */
68861+ __u32 num_domain_children; /* Number of domain children */
68862+ __u32 num_subjects; /* Number of subjects */
68863+ __u32 num_objects; /* Number of objects */
68864+};
68865+
68866+struct acl_object_label {
68867+ char *filename;
68868+ ino_t inode;
68869+ dev_t device;
68870+ __u32 mode;
68871+
68872+ struct acl_subject_label *nested;
68873+ struct acl_object_label *globbed;
68874+
68875+ /* next two structures not used */
68876+
68877+ struct acl_object_label *prev;
68878+ struct acl_object_label *next;
68879+};
68880+
68881+struct acl_ip_label {
68882+ char *iface;
68883+ __u32 addr;
68884+ __u32 netmask;
68885+ __u16 low, high;
68886+ __u8 mode;
68887+ __u32 type;
68888+ __u32 proto[8];
68889+
68890+ /* next two structures not used */
68891+
68892+ struct acl_ip_label *prev;
68893+ struct acl_ip_label *next;
68894+};
68895+
68896+struct gr_arg {
68897+ struct user_acl_role_db role_db;
68898+ unsigned char pw[GR_PW_LEN];
68899+ unsigned char salt[GR_SALT_LEN];
68900+ unsigned char sum[GR_SHA_LEN];
68901+ unsigned char sp_role[GR_SPROLE_LEN];
68902+ struct sprole_pw *sprole_pws;
68903+ dev_t segv_device;
68904+ ino_t segv_inode;
68905+ uid_t segv_uid;
68906+ __u16 num_sprole_pws;
68907+ __u16 mode;
68908+};
68909+
68910+struct gr_arg_wrapper {
68911+ struct gr_arg *arg;
68912+ __u32 version;
68913+ __u32 size;
68914+};
68915+
68916+struct subject_map {
68917+ struct acl_subject_label *user;
68918+ struct acl_subject_label *kernel;
68919+ struct subject_map *prev;
68920+ struct subject_map *next;
68921+};
68922+
68923+struct acl_subj_map_db {
68924+ struct subject_map **s_hash;
68925+ __u32 s_size;
68926+};
68927+
68928+/* End Data Structures Section */
68929+
68930+/* Hash functions generated by empirical testing by Brad Spengler
68931+ Makes good use of the low bits of the inode. Generally 0-1 times
68932+ in loop for successful match. 0-3 for unsuccessful match.
68933+ Shift/add algorithm with modulus of table size and an XOR*/
68934+
68935+static __inline__ unsigned int
68936+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
68937+{
68938+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
68939+}
68940+
68941+ static __inline__ unsigned int
68942+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
68943+{
68944+ return ((const unsigned long)userp % sz);
68945+}
68946+
68947+static __inline__ unsigned int
68948+gr_fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
68949+{
68950+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
68951+}
68952+
68953+static __inline__ unsigned int
68954+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
68955+{
68956+ return full_name_hash((const unsigned char *)name, len) % sz;
68957+}
68958+
68959+#define FOR_EACH_ROLE_START(role) \
68960+ role = role_list; \
68961+ while (role) {
68962+
68963+#define FOR_EACH_ROLE_END(role) \
68964+ role = role->prev; \
68965+ }
68966+
68967+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
68968+ subj = NULL; \
68969+ iter = 0; \
68970+ while (iter < role->subj_hash_size) { \
68971+ if (subj == NULL) \
68972+ subj = role->subj_hash[iter]; \
68973+ if (subj == NULL) { \
68974+ iter++; \
68975+ continue; \
68976+ }
68977+
68978+#define FOR_EACH_SUBJECT_END(subj,iter) \
68979+ subj = subj->next; \
68980+ if (subj == NULL) \
68981+ iter++; \
68982+ }
68983+
68984+
68985+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
68986+ subj = role->hash->first; \
68987+ while (subj != NULL) {
68988+
68989+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
68990+ subj = subj->next; \
68991+ }
68992+
68993+#endif
68994+
68995diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
68996new file mode 100644
68997index 0000000..323ecf2
68998--- /dev/null
68999+++ b/include/linux/gralloc.h
69000@@ -0,0 +1,9 @@
69001+#ifndef __GRALLOC_H
69002+#define __GRALLOC_H
69003+
69004+void acl_free_all(void);
69005+int acl_alloc_stack_init(unsigned long size);
69006+void *acl_alloc(unsigned long len);
69007+void *acl_alloc_num(unsigned long num, unsigned long len);
69008+
69009+#endif
69010diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
69011new file mode 100644
69012index 0000000..be66033
69013--- /dev/null
69014+++ b/include/linux/grdefs.h
69015@@ -0,0 +1,140 @@
69016+#ifndef GRDEFS_H
69017+#define GRDEFS_H
69018+
69019+/* Begin grsecurity status declarations */
69020+
69021+enum {
69022+ GR_READY = 0x01,
69023+ GR_STATUS_INIT = 0x00 // disabled state
69024+};
69025+
69026+/* Begin ACL declarations */
69027+
69028+/* Role flags */
69029+
69030+enum {
69031+ GR_ROLE_USER = 0x0001,
69032+ GR_ROLE_GROUP = 0x0002,
69033+ GR_ROLE_DEFAULT = 0x0004,
69034+ GR_ROLE_SPECIAL = 0x0008,
69035+ GR_ROLE_AUTH = 0x0010,
69036+ GR_ROLE_NOPW = 0x0020,
69037+ GR_ROLE_GOD = 0x0040,
69038+ GR_ROLE_LEARN = 0x0080,
69039+ GR_ROLE_TPE = 0x0100,
69040+ GR_ROLE_DOMAIN = 0x0200,
69041+ GR_ROLE_PAM = 0x0400,
69042+ GR_ROLE_PERSIST = 0x0800
69043+};
69044+
69045+/* ACL Subject and Object mode flags */
69046+enum {
69047+ GR_DELETED = 0x80000000
69048+};
69049+
69050+/* ACL Object-only mode flags */
69051+enum {
69052+ GR_READ = 0x00000001,
69053+ GR_APPEND = 0x00000002,
69054+ GR_WRITE = 0x00000004,
69055+ GR_EXEC = 0x00000008,
69056+ GR_FIND = 0x00000010,
69057+ GR_INHERIT = 0x00000020,
69058+ GR_SETID = 0x00000040,
69059+ GR_CREATE = 0x00000080,
69060+ GR_DELETE = 0x00000100,
69061+ GR_LINK = 0x00000200,
69062+ GR_AUDIT_READ = 0x00000400,
69063+ GR_AUDIT_APPEND = 0x00000800,
69064+ GR_AUDIT_WRITE = 0x00001000,
69065+ GR_AUDIT_EXEC = 0x00002000,
69066+ GR_AUDIT_FIND = 0x00004000,
69067+ GR_AUDIT_INHERIT= 0x00008000,
69068+ GR_AUDIT_SETID = 0x00010000,
69069+ GR_AUDIT_CREATE = 0x00020000,
69070+ GR_AUDIT_DELETE = 0x00040000,
69071+ GR_AUDIT_LINK = 0x00080000,
69072+ GR_PTRACERD = 0x00100000,
69073+ GR_NOPTRACE = 0x00200000,
69074+ GR_SUPPRESS = 0x00400000,
69075+ GR_NOLEARN = 0x00800000,
69076+ GR_INIT_TRANSFER= 0x01000000
69077+};
69078+
69079+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
69080+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
69081+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
69082+
69083+/* ACL subject-only mode flags */
69084+enum {
69085+ GR_KILL = 0x00000001,
69086+ GR_VIEW = 0x00000002,
69087+ GR_PROTECTED = 0x00000004,
69088+ GR_LEARN = 0x00000008,
69089+ GR_OVERRIDE = 0x00000010,
69090+ /* just a placeholder, this mode is only used in userspace */
69091+ GR_DUMMY = 0x00000020,
69092+ GR_PROTSHM = 0x00000040,
69093+ GR_KILLPROC = 0x00000080,
69094+ GR_KILLIPPROC = 0x00000100,
69095+ /* just a placeholder, this mode is only used in userspace */
69096+ GR_NOTROJAN = 0x00000200,
69097+ GR_PROTPROCFD = 0x00000400,
69098+ GR_PROCACCT = 0x00000800,
69099+ GR_RELAXPTRACE = 0x00001000,
69100+ //GR_NESTED = 0x00002000,
69101+ GR_INHERITLEARN = 0x00004000,
69102+ GR_PROCFIND = 0x00008000,
69103+ GR_POVERRIDE = 0x00010000,
69104+ GR_KERNELAUTH = 0x00020000,
69105+ GR_ATSECURE = 0x00040000,
69106+ GR_SHMEXEC = 0x00080000
69107+};
69108+
69109+enum {
69110+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
69111+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
69112+ GR_PAX_ENABLE_MPROTECT = 0x0004,
69113+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
69114+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
69115+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
69116+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
69117+ GR_PAX_DISABLE_MPROTECT = 0x0400,
69118+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
69119+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
69120+};
69121+
69122+enum {
69123+ GR_ID_USER = 0x01,
69124+ GR_ID_GROUP = 0x02,
69125+};
69126+
69127+enum {
69128+ GR_ID_ALLOW = 0x01,
69129+ GR_ID_DENY = 0x02,
69130+};
69131+
69132+#define GR_CRASH_RES 31
69133+#define GR_UIDTABLE_MAX 500
69134+
69135+/* begin resource learning section */
69136+enum {
69137+ GR_RLIM_CPU_BUMP = 60,
69138+ GR_RLIM_FSIZE_BUMP = 50000,
69139+ GR_RLIM_DATA_BUMP = 10000,
69140+ GR_RLIM_STACK_BUMP = 1000,
69141+ GR_RLIM_CORE_BUMP = 10000,
69142+ GR_RLIM_RSS_BUMP = 500000,
69143+ GR_RLIM_NPROC_BUMP = 1,
69144+ GR_RLIM_NOFILE_BUMP = 5,
69145+ GR_RLIM_MEMLOCK_BUMP = 50000,
69146+ GR_RLIM_AS_BUMP = 500000,
69147+ GR_RLIM_LOCKS_BUMP = 2,
69148+ GR_RLIM_SIGPENDING_BUMP = 5,
69149+ GR_RLIM_MSGQUEUE_BUMP = 10000,
69150+ GR_RLIM_NICE_BUMP = 1,
69151+ GR_RLIM_RTPRIO_BUMP = 1,
69152+ GR_RLIM_RTTIME_BUMP = 1000000
69153+};
69154+
69155+#endif
69156diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
69157new file mode 100644
69158index 0000000..5402bce
69159--- /dev/null
69160+++ b/include/linux/grinternal.h
69161@@ -0,0 +1,215 @@
69162+#ifndef __GRINTERNAL_H
69163+#define __GRINTERNAL_H
69164+
69165+#ifdef CONFIG_GRKERNSEC
69166+
69167+#include <linux/fs.h>
69168+#include <linux/mnt_namespace.h>
69169+#include <linux/nsproxy.h>
69170+#include <linux/gracl.h>
69171+#include <linux/grdefs.h>
69172+#include <linux/grmsg.h>
69173+
69174+void gr_add_learn_entry(const char *fmt, ...)
69175+ __attribute__ ((format (printf, 1, 2)));
69176+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
69177+ const struct vfsmount *mnt);
69178+__u32 gr_check_create(const struct dentry *new_dentry,
69179+ const struct dentry *parent,
69180+ const struct vfsmount *mnt, const __u32 mode);
69181+int gr_check_protected_task(const struct task_struct *task);
69182+__u32 to_gr_audit(const __u32 reqmode);
69183+int gr_set_acls(const int type);
69184+int gr_apply_subject_to_task(struct task_struct *task);
69185+int gr_acl_is_enabled(void);
69186+char gr_roletype_to_char(void);
69187+
69188+void gr_handle_alertkill(struct task_struct *task);
69189+char *gr_to_filename(const struct dentry *dentry,
69190+ const struct vfsmount *mnt);
69191+char *gr_to_filename1(const struct dentry *dentry,
69192+ const struct vfsmount *mnt);
69193+char *gr_to_filename2(const struct dentry *dentry,
69194+ const struct vfsmount *mnt);
69195+char *gr_to_filename3(const struct dentry *dentry,
69196+ const struct vfsmount *mnt);
69197+
69198+extern int grsec_enable_ptrace_readexec;
69199+extern int grsec_enable_harden_ptrace;
69200+extern int grsec_enable_link;
69201+extern int grsec_enable_fifo;
69202+extern int grsec_enable_execve;
69203+extern int grsec_enable_shm;
69204+extern int grsec_enable_execlog;
69205+extern int grsec_enable_signal;
69206+extern int grsec_enable_audit_ptrace;
69207+extern int grsec_enable_forkfail;
69208+extern int grsec_enable_time;
69209+extern int grsec_enable_rofs;
69210+extern int grsec_enable_chroot_shmat;
69211+extern int grsec_enable_chroot_mount;
69212+extern int grsec_enable_chroot_double;
69213+extern int grsec_enable_chroot_pivot;
69214+extern int grsec_enable_chroot_chdir;
69215+extern int grsec_enable_chroot_chmod;
69216+extern int grsec_enable_chroot_mknod;
69217+extern int grsec_enable_chroot_fchdir;
69218+extern int grsec_enable_chroot_nice;
69219+extern int grsec_enable_chroot_execlog;
69220+extern int grsec_enable_chroot_caps;
69221+extern int grsec_enable_chroot_sysctl;
69222+extern int grsec_enable_chroot_unix;
69223+extern int grsec_enable_symlinkown;
69224+extern kgid_t grsec_symlinkown_gid;
69225+extern int grsec_enable_tpe;
69226+extern kgid_t grsec_tpe_gid;
69227+extern int grsec_enable_tpe_all;
69228+extern int grsec_enable_tpe_invert;
69229+extern int grsec_enable_socket_all;
69230+extern kgid_t grsec_socket_all_gid;
69231+extern int grsec_enable_socket_client;
69232+extern kgid_t grsec_socket_client_gid;
69233+extern int grsec_enable_socket_server;
69234+extern kgid_t grsec_socket_server_gid;
69235+extern kgid_t grsec_audit_gid;
69236+extern int grsec_enable_group;
69237+extern int grsec_enable_audit_textrel;
69238+extern int grsec_enable_log_rwxmaps;
69239+extern int grsec_enable_mount;
69240+extern int grsec_enable_chdir;
69241+extern int grsec_resource_logging;
69242+extern int grsec_enable_blackhole;
69243+extern int grsec_lastack_retries;
69244+extern int grsec_enable_brute;
69245+extern int grsec_lock;
69246+
69247+extern spinlock_t grsec_alert_lock;
69248+extern unsigned long grsec_alert_wtime;
69249+extern unsigned long grsec_alert_fyet;
69250+
69251+extern spinlock_t grsec_audit_lock;
69252+
69253+extern rwlock_t grsec_exec_file_lock;
69254+
69255+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
69256+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
69257+ (tsk)->exec_file->f_path.mnt) : "/")
69258+
69259+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
69260+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
69261+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
69262+
69263+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
69264+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
69265+ (tsk)->exec_file->f_path.mnt) : "/")
69266+
69267+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
69268+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
69269+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
69270+
69271+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
69272+
69273+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
69274+
69275+#define GR_CHROOT_CAPS {{ \
69276+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
69277+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
69278+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
69279+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
69280+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
69281+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
69282+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
69283+
69284+#define security_learn(normal_msg,args...) \
69285+({ \
69286+ read_lock(&grsec_exec_file_lock); \
69287+ gr_add_learn_entry(normal_msg "\n", ## args); \
69288+ read_unlock(&grsec_exec_file_lock); \
69289+})
69290+
69291+enum {
69292+ GR_DO_AUDIT,
69293+ GR_DONT_AUDIT,
69294+ /* used for non-audit messages that we shouldn't kill the task on */
69295+ GR_DONT_AUDIT_GOOD
69296+};
69297+
69298+enum {
69299+ GR_TTYSNIFF,
69300+ GR_RBAC,
69301+ GR_RBAC_STR,
69302+ GR_STR_RBAC,
69303+ GR_RBAC_MODE2,
69304+ GR_RBAC_MODE3,
69305+ GR_FILENAME,
69306+ GR_SYSCTL_HIDDEN,
69307+ GR_NOARGS,
69308+ GR_ONE_INT,
69309+ GR_ONE_INT_TWO_STR,
69310+ GR_ONE_STR,
69311+ GR_STR_INT,
69312+ GR_TWO_STR_INT,
69313+ GR_TWO_INT,
69314+ GR_TWO_U64,
69315+ GR_THREE_INT,
69316+ GR_FIVE_INT_TWO_STR,
69317+ GR_TWO_STR,
69318+ GR_THREE_STR,
69319+ GR_FOUR_STR,
69320+ GR_STR_FILENAME,
69321+ GR_FILENAME_STR,
69322+ GR_FILENAME_TWO_INT,
69323+ GR_FILENAME_TWO_INT_STR,
69324+ GR_TEXTREL,
69325+ GR_PTRACE,
69326+ GR_RESOURCE,
69327+ GR_CAP,
69328+ GR_SIG,
69329+ GR_SIG2,
69330+ GR_CRASH1,
69331+ GR_CRASH2,
69332+ GR_PSACCT,
69333+ GR_RWXMAP
69334+};
69335+
69336+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
69337+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
69338+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
69339+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
69340+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
69341+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
69342+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
69343+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
69344+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
69345+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
69346+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
69347+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
69348+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
69349+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
69350+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
69351+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
69352+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
69353+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
69354+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
69355+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
69356+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
69357+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
69358+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
69359+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
69360+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
69361+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
69362+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
69363+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
69364+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
69365+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
69366+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
69367+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
69368+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
69369+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
69370+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
69371+
69372+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
69373+
69374+#endif
69375+
69376+#endif
69377diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
69378new file mode 100644
69379index 0000000..2bd4c8d
69380--- /dev/null
69381+++ b/include/linux/grmsg.h
69382@@ -0,0 +1,111 @@
69383+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
69384+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
69385+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
69386+#define GR_STOPMOD_MSG "denied modification of module state by "
69387+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
69388+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
69389+#define GR_IOPERM_MSG "denied use of ioperm() by "
69390+#define GR_IOPL_MSG "denied use of iopl() by "
69391+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
69392+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
69393+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
69394+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
69395+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
69396+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
69397+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
69398+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
69399+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
69400+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
69401+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
69402+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
69403+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
69404+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
69405+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
69406+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
69407+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
69408+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
69409+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
69410+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
69411+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
69412+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
69413+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
69414+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
69415+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
69416+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
69417+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
69418+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
69419+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
69420+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
69421+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
69422+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
69423+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
69424+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
69425+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
69426+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
69427+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
69428+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
69429+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
69430+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
69431+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
69432+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
69433+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
69434+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
69435+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
69436+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
69437+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
69438+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
69439+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
69440+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
69441+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
69442+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
69443+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
69444+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
69445+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
69446+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
69447+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
69448+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
69449+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
69450+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
69451+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
69452+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
69453+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
69454+#define GR_FAILFORK_MSG "failed fork with errno %s by "
69455+#define GR_NICE_CHROOT_MSG "denied priority change by "
69456+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
69457+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
69458+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
69459+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
69460+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
69461+#define GR_TIME_MSG "time set by "
69462+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
69463+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
69464+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
69465+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
69466+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
69467+#define GR_BIND_MSG "denied bind() by "
69468+#define GR_CONNECT_MSG "denied connect() by "
69469+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
69470+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
69471+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
69472+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
69473+#define GR_CAP_ACL_MSG "use of %s denied for "
69474+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
69475+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
69476+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
69477+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
69478+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
69479+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
69480+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
69481+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
69482+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
69483+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
69484+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
69485+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
69486+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
69487+#define GR_VM86_MSG "denied use of vm86 by "
69488+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
69489+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
69490+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
69491+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
69492+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
69493+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
69494diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
69495new file mode 100644
69496index 0000000..d7ef0ac
69497--- /dev/null
69498+++ b/include/linux/grsecurity.h
69499@@ -0,0 +1,242 @@
69500+#ifndef GR_SECURITY_H
69501+#define GR_SECURITY_H
69502+#include <linux/fs.h>
69503+#include <linux/fs_struct.h>
69504+#include <linux/binfmts.h>
69505+#include <linux/gracl.h>
69506+
69507+/* notify of brain-dead configs */
69508+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
69509+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
69510+#endif
69511+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
69512+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
69513+#endif
69514+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
69515+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
69516+#endif
69517+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
69518+#error "CONFIG_PAX enabled, but no PaX options are enabled."
69519+#endif
69520+
69521+void gr_handle_brute_attach(unsigned long mm_flags);
69522+void gr_handle_brute_check(void);
69523+void gr_handle_kernel_exploit(void);
69524+int gr_process_user_ban(void);
69525+
69526+char gr_roletype_to_char(void);
69527+
69528+int gr_acl_enable_at_secure(void);
69529+
69530+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
69531+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
69532+
69533+void gr_del_task_from_ip_table(struct task_struct *p);
69534+
69535+int gr_pid_is_chrooted(struct task_struct *p);
69536+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
69537+int gr_handle_chroot_nice(void);
69538+int gr_handle_chroot_sysctl(const int op);
69539+int gr_handle_chroot_setpriority(struct task_struct *p,
69540+ const int niceval);
69541+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
69542+int gr_handle_chroot_chroot(const struct dentry *dentry,
69543+ const struct vfsmount *mnt);
69544+void gr_handle_chroot_chdir(const struct path *path);
69545+int gr_handle_chroot_chmod(const struct dentry *dentry,
69546+ const struct vfsmount *mnt, const int mode);
69547+int gr_handle_chroot_mknod(const struct dentry *dentry,
69548+ const struct vfsmount *mnt, const int mode);
69549+int gr_handle_chroot_mount(const struct dentry *dentry,
69550+ const struct vfsmount *mnt,
69551+ const char *dev_name);
69552+int gr_handle_chroot_pivot(void);
69553+int gr_handle_chroot_unix(const pid_t pid);
69554+
69555+int gr_handle_rawio(const struct inode *inode);
69556+
69557+void gr_handle_ioperm(void);
69558+void gr_handle_iopl(void);
69559+
69560+umode_t gr_acl_umask(void);
69561+
69562+int gr_tpe_allow(const struct file *file);
69563+
69564+void gr_set_chroot_entries(struct task_struct *task, const struct path *path);
69565+void gr_clear_chroot_entries(struct task_struct *task);
69566+
69567+void gr_log_forkfail(const int retval);
69568+void gr_log_timechange(void);
69569+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
69570+void gr_log_chdir(const struct dentry *dentry,
69571+ const struct vfsmount *mnt);
69572+void gr_log_chroot_exec(const struct dentry *dentry,
69573+ const struct vfsmount *mnt);
69574+void gr_log_remount(const char *devname, const int retval);
69575+void gr_log_unmount(const char *devname, const int retval);
69576+void gr_log_mount(const char *from, const char *to, const int retval);
69577+void gr_log_textrel(struct vm_area_struct *vma);
69578+void gr_log_rwxmmap(struct file *file);
69579+void gr_log_rwxmprotect(struct file *file);
69580+
69581+int gr_handle_follow_link(const struct inode *parent,
69582+ const struct inode *inode,
69583+ const struct dentry *dentry,
69584+ const struct vfsmount *mnt);
69585+int gr_handle_fifo(const struct dentry *dentry,
69586+ const struct vfsmount *mnt,
69587+ const struct dentry *dir, const int flag,
69588+ const int acc_mode);
69589+int gr_handle_hardlink(const struct dentry *dentry,
69590+ const struct vfsmount *mnt,
69591+ struct inode *inode,
69592+ const int mode, const struct filename *to);
69593+
69594+int gr_is_capable(const int cap);
69595+int gr_is_capable_nolog(const int cap);
69596+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
69597+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
69598+
69599+void gr_copy_label(struct task_struct *tsk);
69600+void gr_handle_crash(struct task_struct *task, const int sig);
69601+int gr_handle_signal(const struct task_struct *p, const int sig);
69602+int gr_check_crash_uid(const kuid_t uid);
69603+int gr_check_protected_task(const struct task_struct *task);
69604+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
69605+int gr_acl_handle_mmap(const struct file *file,
69606+ const unsigned long prot);
69607+int gr_acl_handle_mprotect(const struct file *file,
69608+ const unsigned long prot);
69609+int gr_check_hidden_task(const struct task_struct *tsk);
69610+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
69611+ const struct vfsmount *mnt);
69612+__u32 gr_acl_handle_utime(const struct dentry *dentry,
69613+ const struct vfsmount *mnt);
69614+__u32 gr_acl_handle_access(const struct dentry *dentry,
69615+ const struct vfsmount *mnt, const int fmode);
69616+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
69617+ const struct vfsmount *mnt, umode_t *mode);
69618+__u32 gr_acl_handle_chown(const struct dentry *dentry,
69619+ const struct vfsmount *mnt);
69620+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
69621+ const struct vfsmount *mnt);
69622+int gr_handle_ptrace(struct task_struct *task, const long request);
69623+int gr_handle_proc_ptrace(struct task_struct *task);
69624+__u32 gr_acl_handle_execve(const struct dentry *dentry,
69625+ const struct vfsmount *mnt);
69626+int gr_check_crash_exec(const struct file *filp);
69627+int gr_acl_is_enabled(void);
69628+void gr_set_kernel_label(struct task_struct *task);
69629+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
69630+ const kgid_t gid);
69631+int gr_set_proc_label(const struct dentry *dentry,
69632+ const struct vfsmount *mnt,
69633+ const int unsafe_flags);
69634+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
69635+ const struct vfsmount *mnt);
69636+__u32 gr_acl_handle_open(const struct dentry *dentry,
69637+ const struct vfsmount *mnt, int acc_mode);
69638+__u32 gr_acl_handle_creat(const struct dentry *dentry,
69639+ const struct dentry *p_dentry,
69640+ const struct vfsmount *p_mnt,
69641+ int open_flags, int acc_mode, const int imode);
69642+void gr_handle_create(const struct dentry *dentry,
69643+ const struct vfsmount *mnt);
69644+void gr_handle_proc_create(const struct dentry *dentry,
69645+ const struct inode *inode);
69646+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
69647+ const struct dentry *parent_dentry,
69648+ const struct vfsmount *parent_mnt,
69649+ const int mode);
69650+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
69651+ const struct dentry *parent_dentry,
69652+ const struct vfsmount *parent_mnt);
69653+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
69654+ const struct vfsmount *mnt);
69655+void gr_handle_delete(const ino_t ino, const dev_t dev);
69656+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
69657+ const struct vfsmount *mnt);
69658+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
69659+ const struct dentry *parent_dentry,
69660+ const struct vfsmount *parent_mnt,
69661+ const struct filename *from);
69662+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
69663+ const struct dentry *parent_dentry,
69664+ const struct vfsmount *parent_mnt,
69665+ const struct dentry *old_dentry,
69666+ const struct vfsmount *old_mnt, const struct filename *to);
69667+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
69668+int gr_acl_handle_rename(struct dentry *new_dentry,
69669+ struct dentry *parent_dentry,
69670+ const struct vfsmount *parent_mnt,
69671+ struct dentry *old_dentry,
69672+ struct inode *old_parent_inode,
69673+ struct vfsmount *old_mnt, const struct filename *newname);
69674+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
69675+ struct dentry *old_dentry,
69676+ struct dentry *new_dentry,
69677+ struct vfsmount *mnt, const __u8 replace);
69678+__u32 gr_check_link(const struct dentry *new_dentry,
69679+ const struct dentry *parent_dentry,
69680+ const struct vfsmount *parent_mnt,
69681+ const struct dentry *old_dentry,
69682+ const struct vfsmount *old_mnt);
69683+int gr_acl_handle_filldir(const struct file *file, const char *name,
69684+ const unsigned int namelen, const ino_t ino);
69685+
69686+__u32 gr_acl_handle_unix(const struct dentry *dentry,
69687+ const struct vfsmount *mnt);
69688+void gr_acl_handle_exit(void);
69689+void gr_acl_handle_psacct(struct task_struct *task, const long code);
69690+int gr_acl_handle_procpidmem(const struct task_struct *task);
69691+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
69692+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
69693+void gr_audit_ptrace(struct task_struct *task);
69694+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
69695+void gr_put_exec_file(struct task_struct *task);
69696+
69697+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
69698+
69699+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
69700+extern void gr_learn_resource(const struct task_struct *task, const int res,
69701+ const unsigned long wanted, const int gt);
69702+#else
69703+static inline void gr_learn_resource(const struct task_struct *task, const int res,
69704+ const unsigned long wanted, const int gt)
69705+{
69706+}
69707+#endif
69708+
69709+#ifdef CONFIG_GRKERNSEC_RESLOG
69710+extern void gr_log_resource(const struct task_struct *task, const int res,
69711+ const unsigned long wanted, const int gt);
69712+#else
69713+static inline void gr_log_resource(const struct task_struct *task, const int res,
69714+ const unsigned long wanted, const int gt)
69715+{
69716+}
69717+#endif
69718+
69719+#ifdef CONFIG_GRKERNSEC
69720+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
69721+void gr_handle_vm86(void);
69722+void gr_handle_mem_readwrite(u64 from, u64 to);
69723+
69724+void gr_log_badprocpid(const char *entry);
69725+
69726+extern int grsec_enable_dmesg;
69727+extern int grsec_disable_privio;
69728+
69729+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
69730+extern kgid_t grsec_proc_gid;
69731+#endif
69732+
69733+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
69734+extern int grsec_enable_chroot_findtask;
69735+#endif
69736+#ifdef CONFIG_GRKERNSEC_SETXID
69737+extern int grsec_enable_setxid;
69738+#endif
69739+#endif
69740+
69741+#endif
69742diff --git a/include/linux/grsock.h b/include/linux/grsock.h
69743new file mode 100644
69744index 0000000..e7ffaaf
69745--- /dev/null
69746+++ b/include/linux/grsock.h
69747@@ -0,0 +1,19 @@
69748+#ifndef __GRSOCK_H
69749+#define __GRSOCK_H
69750+
69751+extern void gr_attach_curr_ip(const struct sock *sk);
69752+extern int gr_handle_sock_all(const int family, const int type,
69753+ const int protocol);
69754+extern int gr_handle_sock_server(const struct sockaddr *sck);
69755+extern int gr_handle_sock_server_other(const struct sock *sck);
69756+extern int gr_handle_sock_client(const struct sockaddr *sck);
69757+extern int gr_search_connect(struct socket * sock,
69758+ struct sockaddr_in * addr);
69759+extern int gr_search_bind(struct socket * sock,
69760+ struct sockaddr_in * addr);
69761+extern int gr_search_listen(struct socket * sock);
69762+extern int gr_search_accept(struct socket * sock);
69763+extern int gr_search_socket(const int domain, const int type,
69764+ const int protocol);
69765+
69766+#endif
69767diff --git a/include/linux/highmem.h b/include/linux/highmem.h
69768index 7fb31da..08b5114 100644
69769--- a/include/linux/highmem.h
69770+++ b/include/linux/highmem.h
69771@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
69772 kunmap_atomic(kaddr);
69773 }
69774
69775+static inline void sanitize_highpage(struct page *page)
69776+{
69777+ void *kaddr;
69778+ unsigned long flags;
69779+
69780+ local_irq_save(flags);
69781+ kaddr = kmap_atomic(page);
69782+ clear_page(kaddr);
69783+ kunmap_atomic(kaddr);
69784+ local_irq_restore(flags);
69785+}
69786+
69787 static inline void zero_user_segments(struct page *page,
69788 unsigned start1, unsigned end1,
69789 unsigned start2, unsigned end2)
69790diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
69791index 1c7b89a..7f52502 100644
69792--- a/include/linux/hwmon-sysfs.h
69793+++ b/include/linux/hwmon-sysfs.h
69794@@ -25,7 +25,8 @@
69795 struct sensor_device_attribute{
69796 struct device_attribute dev_attr;
69797 int index;
69798-};
69799+} __do_const;
69800+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
69801 #define to_sensor_dev_attr(_dev_attr) \
69802 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
69803
69804@@ -41,7 +42,7 @@ struct sensor_device_attribute_2 {
69805 struct device_attribute dev_attr;
69806 u8 index;
69807 u8 nr;
69808-};
69809+} __do_const;
69810 #define to_sensor_dev_attr_2(_dev_attr) \
69811 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
69812
69813diff --git a/include/linux/i2c.h b/include/linux/i2c.h
69814index d0c4db7..61b3577 100644
69815--- a/include/linux/i2c.h
69816+++ b/include/linux/i2c.h
69817@@ -369,6 +369,7 @@ struct i2c_algorithm {
69818 /* To determine what the adapter supports */
69819 u32 (*functionality) (struct i2c_adapter *);
69820 };
69821+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
69822
69823 /*
69824 * i2c_adapter is the structure used to identify a physical i2c bus along
69825diff --git a/include/linux/i2o.h b/include/linux/i2o.h
69826index d23c3c2..eb63c81 100644
69827--- a/include/linux/i2o.h
69828+++ b/include/linux/i2o.h
69829@@ -565,7 +565,7 @@ struct i2o_controller {
69830 struct i2o_device *exec; /* Executive */
69831 #if BITS_PER_LONG == 64
69832 spinlock_t context_list_lock; /* lock for context_list */
69833- atomic_t context_list_counter; /* needed for unique contexts */
69834+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
69835 struct list_head context_list; /* list of context id's
69836 and pointers */
69837 #endif
69838diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
69839index aff7ad8..3942bbd 100644
69840--- a/include/linux/if_pppox.h
69841+++ b/include/linux/if_pppox.h
69842@@ -76,7 +76,7 @@ struct pppox_proto {
69843 int (*ioctl)(struct socket *sock, unsigned int cmd,
69844 unsigned long arg);
69845 struct module *owner;
69846-};
69847+} __do_const;
69848
69849 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
69850 extern void unregister_pppox_proto(int proto_num);
69851diff --git a/include/linux/init.h b/include/linux/init.h
69852index 8618147..0821126 100644
69853--- a/include/linux/init.h
69854+++ b/include/linux/init.h
69855@@ -39,9 +39,36 @@
69856 * Also note, that this data cannot be "const".
69857 */
69858
69859+#ifdef MODULE
69860+#define add_init_latent_entropy
69861+#define add_devinit_latent_entropy
69862+#define add_cpuinit_latent_entropy
69863+#define add_meminit_latent_entropy
69864+#else
69865+#define add_init_latent_entropy __latent_entropy
69866+
69867+#ifdef CONFIG_HOTPLUG
69868+#define add_devinit_latent_entropy
69869+#else
69870+#define add_devinit_latent_entropy __latent_entropy
69871+#endif
69872+
69873+#ifdef CONFIG_HOTPLUG_CPU
69874+#define add_cpuinit_latent_entropy
69875+#else
69876+#define add_cpuinit_latent_entropy __latent_entropy
69877+#endif
69878+
69879+#ifdef CONFIG_MEMORY_HOTPLUG
69880+#define add_meminit_latent_entropy
69881+#else
69882+#define add_meminit_latent_entropy __latent_entropy
69883+#endif
69884+#endif
69885+
69886 /* These are for everybody (although not all archs will actually
69887 discard it in modules) */
69888-#define __init __section(.init.text) __cold notrace
69889+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
69890 #define __initdata __section(.init.data)
69891 #define __initconst __constsection(.init.rodata)
69892 #define __exitdata __section(.exit.data)
69893@@ -94,7 +121,7 @@
69894 #define __exit __section(.exit.text) __exitused __cold notrace
69895
69896 /* Used for HOTPLUG_CPU */
69897-#define __cpuinit __section(.cpuinit.text) __cold notrace
69898+#define __cpuinit __section(.cpuinit.text) __cold notrace add_cpuinit_latent_entropy
69899 #define __cpuinitdata __section(.cpuinit.data)
69900 #define __cpuinitconst __constsection(.cpuinit.rodata)
69901 #define __cpuexit __section(.cpuexit.text) __exitused __cold notrace
69902@@ -102,7 +129,7 @@
69903 #define __cpuexitconst __constsection(.cpuexit.rodata)
69904
69905 /* Used for MEMORY_HOTPLUG */
69906-#define __meminit __section(.meminit.text) __cold notrace
69907+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
69908 #define __meminitdata __section(.meminit.data)
69909 #define __meminitconst __constsection(.meminit.rodata)
69910 #define __memexit __section(.memexit.text) __exitused __cold notrace
69911diff --git a/include/linux/init_task.h b/include/linux/init_task.h
69912index 5cd0f09..c9f67cc 100644
69913--- a/include/linux/init_task.h
69914+++ b/include/linux/init_task.h
69915@@ -154,6 +154,12 @@ extern struct task_group root_task_group;
69916
69917 #define INIT_TASK_COMM "swapper"
69918
69919+#ifdef CONFIG_X86
69920+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
69921+#else
69922+#define INIT_TASK_THREAD_INFO
69923+#endif
69924+
69925 /*
69926 * INIT_TASK is used to set up the first task table, touch at
69927 * your own risk!. Base=0, limit=0x1fffff (=2MB)
69928@@ -193,6 +199,7 @@ extern struct task_group root_task_group;
69929 RCU_POINTER_INITIALIZER(cred, &init_cred), \
69930 .comm = INIT_TASK_COMM, \
69931 .thread = INIT_THREAD, \
69932+ INIT_TASK_THREAD_INFO \
69933 .fs = &init_fs, \
69934 .files = &init_files, \
69935 .signal = &init_signals, \
69936diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
69937index 5fa5afe..ac55b25 100644
69938--- a/include/linux/interrupt.h
69939+++ b/include/linux/interrupt.h
69940@@ -430,7 +430,7 @@ enum
69941 /* map softirq index to softirq name. update 'softirq_to_name' in
69942 * kernel/softirq.c when adding a new softirq.
69943 */
69944-extern char *softirq_to_name[NR_SOFTIRQS];
69945+extern const char * const softirq_to_name[NR_SOFTIRQS];
69946
69947 /* softirq mask and active fields moved to irq_cpustat_t in
69948 * asm/hardirq.h to get better cache usage. KAO
69949@@ -438,12 +438,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
69950
69951 struct softirq_action
69952 {
69953- void (*action)(struct softirq_action *);
69954-};
69955+ void (*action)(void);
69956+} __no_const;
69957
69958 asmlinkage void do_softirq(void);
69959 asmlinkage void __do_softirq(void);
69960-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
69961+extern void open_softirq(int nr, void (*action)(void));
69962 extern void softirq_init(void);
69963 extern void __raise_softirq_irqoff(unsigned int nr);
69964
69965diff --git a/include/linux/iommu.h b/include/linux/iommu.h
69966index ba3b8a9..7e14ed8 100644
69967--- a/include/linux/iommu.h
69968+++ b/include/linux/iommu.h
69969@@ -113,7 +113,7 @@ struct iommu_ops {
69970 u32 (*domain_get_windows)(struct iommu_domain *domain);
69971
69972 unsigned long pgsize_bitmap;
69973-};
69974+} __do_const;
69975
69976 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
69977 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
69978diff --git a/include/linux/ioport.h b/include/linux/ioport.h
69979index 85ac9b9b..e5759ab 100644
69980--- a/include/linux/ioport.h
69981+++ b/include/linux/ioport.h
69982@@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
69983 int adjust_resource(struct resource *res, resource_size_t start,
69984 resource_size_t size);
69985 resource_size_t resource_alignment(struct resource *res);
69986-static inline resource_size_t resource_size(const struct resource *res)
69987+static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
69988 {
69989 return res->end - res->start + 1;
69990 }
69991diff --git a/include/linux/irq.h b/include/linux/irq.h
69992index bc4e066..50468a9 100644
69993--- a/include/linux/irq.h
69994+++ b/include/linux/irq.h
69995@@ -328,7 +328,8 @@ struct irq_chip {
69996 void (*irq_print_chip)(struct irq_data *data, struct seq_file *p);
69997
69998 unsigned long flags;
69999-};
70000+} __do_const;
70001+typedef struct irq_chip __no_const irq_chip_no_const;
70002
70003 /*
70004 * irq_chip specific flags
70005diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
70006index 3fd8e42..a73e966 100644
70007--- a/include/linux/irqchip/arm-gic.h
70008+++ b/include/linux/irqchip/arm-gic.h
70009@@ -59,9 +59,11 @@
70010
70011 #ifndef __ASSEMBLY__
70012
70013+#include <linux/irq.h>
70014+
70015 struct device_node;
70016
70017-extern struct irq_chip gic_arch_extn;
70018+extern irq_chip_no_const gic_arch_extn;
70019
70020 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
70021 u32 offset, struct device_node *);
70022diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
70023index 6883e19..06992b1 100644
70024--- a/include/linux/kallsyms.h
70025+++ b/include/linux/kallsyms.h
70026@@ -15,7 +15,8 @@
70027
70028 struct module;
70029
70030-#ifdef CONFIG_KALLSYMS
70031+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
70032+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
70033 /* Lookup the address for a symbol. Returns 0 if not found. */
70034 unsigned long kallsyms_lookup_name(const char *name);
70035
70036@@ -106,6 +107,17 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
70037 /* Stupid that this does nothing, but I didn't create this mess. */
70038 #define __print_symbol(fmt, addr)
70039 #endif /*CONFIG_KALLSYMS*/
70040+#else /* when included by kallsyms.c, vsnprintf.c, or
70041+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
70042+extern void __print_symbol(const char *fmt, unsigned long address);
70043+extern int sprint_backtrace(char *buffer, unsigned long address);
70044+extern int sprint_symbol(char *buffer, unsigned long address);
70045+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
70046+const char *kallsyms_lookup(unsigned long addr,
70047+ unsigned long *symbolsize,
70048+ unsigned long *offset,
70049+ char **modname, char *namebuf);
70050+#endif
70051
70052 /* This macro allows us to keep printk typechecking */
70053 static __printf(1, 2)
70054diff --git a/include/linux/key-type.h b/include/linux/key-type.h
70055index 518a53a..5e28358 100644
70056--- a/include/linux/key-type.h
70057+++ b/include/linux/key-type.h
70058@@ -125,7 +125,7 @@ struct key_type {
70059 /* internal fields */
70060 struct list_head link; /* link in types list */
70061 struct lock_class_key lock_class; /* key->sem lock class */
70062-};
70063+} __do_const;
70064
70065 extern struct key_type key_type_keyring;
70066
70067diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
70068index c6e091b..a940adf 100644
70069--- a/include/linux/kgdb.h
70070+++ b/include/linux/kgdb.h
70071@@ -52,7 +52,7 @@ extern int kgdb_connected;
70072 extern int kgdb_io_module_registered;
70073
70074 extern atomic_t kgdb_setting_breakpoint;
70075-extern atomic_t kgdb_cpu_doing_single_step;
70076+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
70077
70078 extern struct task_struct *kgdb_usethread;
70079 extern struct task_struct *kgdb_contthread;
70080@@ -254,7 +254,7 @@ struct kgdb_arch {
70081 void (*correct_hw_break)(void);
70082
70083 void (*enable_nmi)(bool on);
70084-};
70085+} __do_const;
70086
70087 /**
70088 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
70089@@ -279,7 +279,7 @@ struct kgdb_io {
70090 void (*pre_exception) (void);
70091 void (*post_exception) (void);
70092 int is_console;
70093-};
70094+} __do_const;
70095
70096 extern struct kgdb_arch arch_kgdb_ops;
70097
70098diff --git a/include/linux/kmod.h b/include/linux/kmod.h
70099index 5398d58..5883a34 100644
70100--- a/include/linux/kmod.h
70101+++ b/include/linux/kmod.h
70102@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
70103 * usually useless though. */
70104 extern __printf(2, 3)
70105 int __request_module(bool wait, const char *name, ...);
70106+extern __printf(3, 4)
70107+int ___request_module(bool wait, char *param_name, const char *name, ...);
70108 #define request_module(mod...) __request_module(true, mod)
70109 #define request_module_nowait(mod...) __request_module(false, mod)
70110 #define try_then_request_module(x, mod...) \
70111diff --git a/include/linux/kobject.h b/include/linux/kobject.h
70112index 939b112..ed6ed51 100644
70113--- a/include/linux/kobject.h
70114+++ b/include/linux/kobject.h
70115@@ -111,7 +111,7 @@ struct kobj_type {
70116 struct attribute **default_attrs;
70117 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
70118 const void *(*namespace)(struct kobject *kobj);
70119-};
70120+} __do_const;
70121
70122 struct kobj_uevent_env {
70123 char *envp[UEVENT_NUM_ENVP];
70124@@ -134,6 +134,7 @@ struct kobj_attribute {
70125 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
70126 const char *buf, size_t count);
70127 };
70128+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
70129
70130 extern const struct sysfs_ops kobj_sysfs_ops;
70131
70132diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
70133index f66b065..c2c29b4 100644
70134--- a/include/linux/kobject_ns.h
70135+++ b/include/linux/kobject_ns.h
70136@@ -43,7 +43,7 @@ struct kobj_ns_type_operations {
70137 const void *(*netlink_ns)(struct sock *sk);
70138 const void *(*initial_ns)(void);
70139 void (*drop_ns)(void *);
70140-};
70141+} __do_const;
70142
70143 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
70144 int kobj_ns_type_registered(enum kobj_ns_type type);
70145diff --git a/include/linux/kref.h b/include/linux/kref.h
70146index 7419c02..aa2f02d 100644
70147--- a/include/linux/kref.h
70148+++ b/include/linux/kref.h
70149@@ -65,7 +65,7 @@ static inline void kref_get(struct kref *kref)
70150 static inline int kref_sub(struct kref *kref, unsigned int count,
70151 void (*release)(struct kref *kref))
70152 {
70153- WARN_ON(release == NULL);
70154+ BUG_ON(release == NULL);
70155
70156 if (atomic_sub_and_test((int) count, &kref->refcount)) {
70157 release(kref);
70158diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
70159index c139582..0b5b102 100644
70160--- a/include/linux/kvm_host.h
70161+++ b/include/linux/kvm_host.h
70162@@ -424,7 +424,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
70163 int __must_check vcpu_load(struct kvm_vcpu *vcpu);
70164 void vcpu_put(struct kvm_vcpu *vcpu);
70165
70166-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
70167+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
70168 struct module *module);
70169 void kvm_exit(void);
70170
70171@@ -582,7 +582,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
70172 struct kvm_guest_debug *dbg);
70173 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
70174
70175-int kvm_arch_init(void *opaque);
70176+int kvm_arch_init(const void *opaque);
70177 void kvm_arch_exit(void);
70178
70179 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
70180diff --git a/include/linux/libata.h b/include/linux/libata.h
70181index eae7a05..2cdd875 100644
70182--- a/include/linux/libata.h
70183+++ b/include/linux/libata.h
70184@@ -919,7 +919,7 @@ struct ata_port_operations {
70185 * fields must be pointers.
70186 */
70187 const struct ata_port_operations *inherits;
70188-};
70189+} __do_const;
70190
70191 struct ata_port_info {
70192 unsigned long flags;
70193diff --git a/include/linux/list.h b/include/linux/list.h
70194index 6a1f8df..eaec1ff 100644
70195--- a/include/linux/list.h
70196+++ b/include/linux/list.h
70197@@ -112,6 +112,19 @@ extern void __list_del_entry(struct list_head *entry);
70198 extern void list_del(struct list_head *entry);
70199 #endif
70200
70201+extern void __pax_list_add(struct list_head *new,
70202+ struct list_head *prev,
70203+ struct list_head *next);
70204+static inline void pax_list_add(struct list_head *new, struct list_head *head)
70205+{
70206+ __pax_list_add(new, head, head->next);
70207+}
70208+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
70209+{
70210+ __pax_list_add(new, head->prev, head);
70211+}
70212+extern void pax_list_del(struct list_head *entry);
70213+
70214 /**
70215 * list_replace - replace old entry by new one
70216 * @old : the element to be replaced
70217@@ -145,6 +158,8 @@ static inline void list_del_init(struct list_head *entry)
70218 INIT_LIST_HEAD(entry);
70219 }
70220
70221+extern void pax_list_del_init(struct list_head *entry);
70222+
70223 /**
70224 * list_move - delete from one list and add as another's head
70225 * @list: the entry to move
70226diff --git a/include/linux/math64.h b/include/linux/math64.h
70227index b8ba855..0148090 100644
70228--- a/include/linux/math64.h
70229+++ b/include/linux/math64.h
70230@@ -14,7 +14,7 @@
70231 * This is commonly provided by 32bit archs to provide an optimized 64bit
70232 * divide.
70233 */
70234-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
70235+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
70236 {
70237 *remainder = dividend % divisor;
70238 return dividend / divisor;
70239@@ -50,7 +50,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
70240 #define div64_long(x,y) div_s64((x),(y))
70241
70242 #ifndef div_u64_rem
70243-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
70244+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
70245 {
70246 *remainder = do_div(dividend, divisor);
70247 return dividend;
70248@@ -79,7 +79,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
70249 * divide.
70250 */
70251 #ifndef div_u64
70252-static inline u64 div_u64(u64 dividend, u32 divisor)
70253+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
70254 {
70255 u32 remainder;
70256 return div_u64_rem(dividend, divisor, &remainder);
70257diff --git a/include/linux/mm.h b/include/linux/mm.h
70258index e2091b8..821db54 100644
70259--- a/include/linux/mm.h
70260+++ b/include/linux/mm.h
70261@@ -101,6 +101,11 @@ extern unsigned int kobjsize(const void *objp);
70262 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
70263 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
70264 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
70265+
70266+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
70267+#define VM_PAGEEXEC 0x02000000 /* vma->vm_page_prot needs special handling */
70268+#endif
70269+
70270 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
70271
70272 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
70273@@ -202,8 +207,8 @@ struct vm_operations_struct {
70274 /* called by access_process_vm when get_user_pages() fails, typically
70275 * for use by special VMAs that can switch between memory and hardware
70276 */
70277- int (*access)(struct vm_area_struct *vma, unsigned long addr,
70278- void *buf, int len, int write);
70279+ ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
70280+ void *buf, size_t len, int write);
70281 #ifdef CONFIG_NUMA
70282 /*
70283 * set_policy() op must add a reference to any non-NULL @new mempolicy
70284@@ -233,6 +238,7 @@ struct vm_operations_struct {
70285 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
70286 unsigned long size, pgoff_t pgoff);
70287 };
70288+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
70289
70290 struct mmu_gather;
70291 struct inode;
70292@@ -970,8 +976,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
70293 unsigned long *pfn);
70294 int follow_phys(struct vm_area_struct *vma, unsigned long address,
70295 unsigned int flags, unsigned long *prot, resource_size_t *phys);
70296-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
70297- void *buf, int len, int write);
70298+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
70299+ void *buf, size_t len, int write);
70300
70301 static inline void unmap_shared_mapping_range(struct address_space *mapping,
70302 loff_t const holebegin, loff_t const holelen)
70303@@ -1010,9 +1016,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
70304 }
70305 #endif
70306
70307-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
70308-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
70309- void *buf, int len, int write);
70310+extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
70311+extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
70312+ void *buf, size_t len, int write);
70313
70314 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
70315 unsigned long start, unsigned long nr_pages,
70316@@ -1043,34 +1049,6 @@ int set_page_dirty(struct page *page);
70317 int set_page_dirty_lock(struct page *page);
70318 int clear_page_dirty_for_io(struct page *page);
70319
70320-/* Is the vma a continuation of the stack vma above it? */
70321-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
70322-{
70323- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
70324-}
70325-
70326-static inline int stack_guard_page_start(struct vm_area_struct *vma,
70327- unsigned long addr)
70328-{
70329- return (vma->vm_flags & VM_GROWSDOWN) &&
70330- (vma->vm_start == addr) &&
70331- !vma_growsdown(vma->vm_prev, addr);
70332-}
70333-
70334-/* Is the vma a continuation of the stack vma below it? */
70335-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
70336-{
70337- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
70338-}
70339-
70340-static inline int stack_guard_page_end(struct vm_area_struct *vma,
70341- unsigned long addr)
70342-{
70343- return (vma->vm_flags & VM_GROWSUP) &&
70344- (vma->vm_end == addr) &&
70345- !vma_growsup(vma->vm_next, addr);
70346-}
70347-
70348 extern pid_t
70349 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
70350
70351@@ -1173,6 +1151,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
70352 }
70353 #endif
70354
70355+#ifdef CONFIG_MMU
70356+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
70357+#else
70358+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
70359+{
70360+ return __pgprot(0);
70361+}
70362+#endif
70363+
70364 int vma_wants_writenotify(struct vm_area_struct *vma);
70365
70366 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
70367@@ -1191,8 +1178,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
70368 {
70369 return 0;
70370 }
70371+
70372+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
70373+ unsigned long address)
70374+{
70375+ return 0;
70376+}
70377 #else
70378 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
70379+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
70380 #endif
70381
70382 #ifdef __PAGETABLE_PMD_FOLDED
70383@@ -1201,8 +1195,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
70384 {
70385 return 0;
70386 }
70387+
70388+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
70389+ unsigned long address)
70390+{
70391+ return 0;
70392+}
70393 #else
70394 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
70395+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
70396 #endif
70397
70398 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
70399@@ -1220,11 +1221,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
70400 NULL: pud_offset(pgd, address);
70401 }
70402
70403+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
70404+{
70405+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
70406+ NULL: pud_offset(pgd, address);
70407+}
70408+
70409 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
70410 {
70411 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
70412 NULL: pmd_offset(pud, address);
70413 }
70414+
70415+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
70416+{
70417+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
70418+ NULL: pmd_offset(pud, address);
70419+}
70420 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
70421
70422 #if USE_SPLIT_PTLOCKS
70423@@ -1455,6 +1468,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70424 unsigned long len, unsigned long prot, unsigned long flags,
70425 unsigned long pgoff, unsigned long *populate);
70426 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
70427+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
70428
70429 #ifdef CONFIG_MMU
70430 extern int __mm_populate(unsigned long addr, unsigned long len,
70431@@ -1483,6 +1497,7 @@ struct vm_unmapped_area_info {
70432 unsigned long high_limit;
70433 unsigned long align_mask;
70434 unsigned long align_offset;
70435+ unsigned long threadstack_offset;
70436 };
70437
70438 extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
70439@@ -1561,6 +1576,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
70440 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
70441 struct vm_area_struct **pprev);
70442
70443+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
70444+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
70445+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
70446+
70447 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
70448 NULL if none. Assume start_addr < end_addr. */
70449 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
70450@@ -1589,15 +1608,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
70451 return vma;
70452 }
70453
70454-#ifdef CONFIG_MMU
70455-pgprot_t vm_get_page_prot(unsigned long vm_flags);
70456-#else
70457-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
70458-{
70459- return __pgprot(0);
70460-}
70461-#endif
70462-
70463 #ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
70464 unsigned long change_prot_numa(struct vm_area_struct *vma,
70465 unsigned long start, unsigned long end);
70466@@ -1649,6 +1659,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
70467 static inline void vm_stat_account(struct mm_struct *mm,
70468 unsigned long flags, struct file *file, long pages)
70469 {
70470+
70471+#ifdef CONFIG_PAX_RANDMMAP
70472+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
70473+#endif
70474+
70475 mm->total_vm += pages;
70476 }
70477 #endif /* CONFIG_PROC_FS */
70478@@ -1725,7 +1740,7 @@ extern int unpoison_memory(unsigned long pfn);
70479 extern int sysctl_memory_failure_early_kill;
70480 extern int sysctl_memory_failure_recovery;
70481 extern void shake_page(struct page *p, int access);
70482-extern atomic_long_t num_poisoned_pages;
70483+extern atomic_long_unchecked_t num_poisoned_pages;
70484 extern int soft_offline_page(struct page *page, int flags);
70485
70486 extern void dump_page(struct page *page);
70487@@ -1756,5 +1771,11 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
70488 static inline bool page_is_guard(struct page *page) { return false; }
70489 #endif /* CONFIG_DEBUG_PAGEALLOC */
70490
70491+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
70492+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
70493+#else
70494+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
70495+#endif
70496+
70497 #endif /* __KERNEL__ */
70498 #endif /* _LINUX_MM_H */
70499diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
70500index ace9a5f..81bdb59 100644
70501--- a/include/linux/mm_types.h
70502+++ b/include/linux/mm_types.h
70503@@ -289,6 +289,8 @@ struct vm_area_struct {
70504 #ifdef CONFIG_NUMA
70505 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
70506 #endif
70507+
70508+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
70509 };
70510
70511 struct core_thread {
70512@@ -437,6 +439,24 @@ struct mm_struct {
70513 int first_nid;
70514 #endif
70515 struct uprobes_state uprobes_state;
70516+
70517+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
70518+ unsigned long pax_flags;
70519+#endif
70520+
70521+#ifdef CONFIG_PAX_DLRESOLVE
70522+ unsigned long call_dl_resolve;
70523+#endif
70524+
70525+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
70526+ unsigned long call_syscall;
70527+#endif
70528+
70529+#ifdef CONFIG_PAX_ASLR
70530+ unsigned long delta_mmap; /* randomized offset */
70531+ unsigned long delta_stack; /* randomized offset */
70532+#endif
70533+
70534 };
70535
70536 /* first nid will either be a valid NID or one of these values */
70537diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
70538index c5d5278..f0b68c8 100644
70539--- a/include/linux/mmiotrace.h
70540+++ b/include/linux/mmiotrace.h
70541@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
70542 /* Called from ioremap.c */
70543 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
70544 void __iomem *addr);
70545-extern void mmiotrace_iounmap(volatile void __iomem *addr);
70546+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
70547
70548 /* For anyone to insert markers. Remember trailing newline. */
70549 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
70550@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
70551 {
70552 }
70553
70554-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
70555+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
70556 {
70557 }
70558
70559diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
70560index c74092e..b663967 100644
70561--- a/include/linux/mmzone.h
70562+++ b/include/linux/mmzone.h
70563@@ -396,7 +396,7 @@ struct zone {
70564 unsigned long flags; /* zone flags, see below */
70565
70566 /* Zone statistics */
70567- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70568+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70569
70570 /*
70571 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
70572diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
70573index 779cf7c..e6768240 100644
70574--- a/include/linux/mod_devicetable.h
70575+++ b/include/linux/mod_devicetable.h
70576@@ -12,7 +12,7 @@
70577 typedef unsigned long kernel_ulong_t;
70578 #endif
70579
70580-#define PCI_ANY_ID (~0)
70581+#define PCI_ANY_ID ((__u16)~0)
70582
70583 struct pci_device_id {
70584 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
70585@@ -138,7 +138,7 @@ struct usb_device_id {
70586 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
70587 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
70588
70589-#define HID_ANY_ID (~0)
70590+#define HID_ANY_ID (~0U)
70591 #define HID_BUS_ANY 0xffff
70592 #define HID_GROUP_ANY 0x0000
70593
70594@@ -464,7 +464,7 @@ struct dmi_system_id {
70595 const char *ident;
70596 struct dmi_strmatch matches[4];
70597 void *driver_data;
70598-};
70599+} __do_const;
70600 /*
70601 * struct dmi_device_id appears during expansion of
70602 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
70603diff --git a/include/linux/module.h b/include/linux/module.h
70604index ead1b57..81a3b6c 100644
70605--- a/include/linux/module.h
70606+++ b/include/linux/module.h
70607@@ -17,9 +17,11 @@
70608 #include <linux/moduleparam.h>
70609 #include <linux/tracepoint.h>
70610 #include <linux/export.h>
70611+#include <linux/fs.h>
70612
70613 #include <linux/percpu.h>
70614 #include <asm/module.h>
70615+#include <asm/pgtable.h>
70616
70617 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
70618 #define MODULE_SIG_STRING "~Module signature appended~\n"
70619@@ -54,12 +56,13 @@ struct module_attribute {
70620 int (*test)(struct module *);
70621 void (*free)(struct module *);
70622 };
70623+typedef struct module_attribute __no_const module_attribute_no_const;
70624
70625 struct module_version_attribute {
70626 struct module_attribute mattr;
70627 const char *module_name;
70628 const char *version;
70629-} __attribute__ ((__aligned__(sizeof(void *))));
70630+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
70631
70632 extern ssize_t __modver_version_show(struct module_attribute *,
70633 struct module_kobject *, char *);
70634@@ -232,7 +235,7 @@ struct module
70635
70636 /* Sysfs stuff. */
70637 struct module_kobject mkobj;
70638- struct module_attribute *modinfo_attrs;
70639+ module_attribute_no_const *modinfo_attrs;
70640 const char *version;
70641 const char *srcversion;
70642 struct kobject *holders_dir;
70643@@ -281,19 +284,16 @@ struct module
70644 int (*init)(void);
70645
70646 /* If this is non-NULL, vfree after init() returns */
70647- void *module_init;
70648+ void *module_init_rx, *module_init_rw;
70649
70650 /* Here is the actual code + data, vfree'd on unload. */
70651- void *module_core;
70652+ void *module_core_rx, *module_core_rw;
70653
70654 /* Here are the sizes of the init and core sections */
70655- unsigned int init_size, core_size;
70656+ unsigned int init_size_rw, core_size_rw;
70657
70658 /* The size of the executable code in each section. */
70659- unsigned int init_text_size, core_text_size;
70660-
70661- /* Size of RO sections of the module (text+rodata) */
70662- unsigned int init_ro_size, core_ro_size;
70663+ unsigned int init_size_rx, core_size_rx;
70664
70665 /* Arch-specific module values */
70666 struct mod_arch_specific arch;
70667@@ -349,6 +349,10 @@ struct module
70668 #ifdef CONFIG_EVENT_TRACING
70669 struct ftrace_event_call **trace_events;
70670 unsigned int num_trace_events;
70671+ struct file_operations trace_id;
70672+ struct file_operations trace_enable;
70673+ struct file_operations trace_format;
70674+ struct file_operations trace_filter;
70675 #endif
70676 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
70677 unsigned int num_ftrace_callsites;
70678@@ -396,16 +400,46 @@ bool is_module_address(unsigned long addr);
70679 bool is_module_percpu_address(unsigned long addr);
70680 bool is_module_text_address(unsigned long addr);
70681
70682+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
70683+{
70684+
70685+#ifdef CONFIG_PAX_KERNEXEC
70686+ if (ktla_ktva(addr) >= (unsigned long)start &&
70687+ ktla_ktva(addr) < (unsigned long)start + size)
70688+ return 1;
70689+#endif
70690+
70691+ return ((void *)addr >= start && (void *)addr < start + size);
70692+}
70693+
70694+static inline int within_module_core_rx(unsigned long addr, const struct module *mod)
70695+{
70696+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
70697+}
70698+
70699+static inline int within_module_core_rw(unsigned long addr, const struct module *mod)
70700+{
70701+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
70702+}
70703+
70704+static inline int within_module_init_rx(unsigned long addr, const struct module *mod)
70705+{
70706+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
70707+}
70708+
70709+static inline int within_module_init_rw(unsigned long addr, const struct module *mod)
70710+{
70711+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
70712+}
70713+
70714 static inline int within_module_core(unsigned long addr, const struct module *mod)
70715 {
70716- return (unsigned long)mod->module_core <= addr &&
70717- addr < (unsigned long)mod->module_core + mod->core_size;
70718+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
70719 }
70720
70721 static inline int within_module_init(unsigned long addr, const struct module *mod)
70722 {
70723- return (unsigned long)mod->module_init <= addr &&
70724- addr < (unsigned long)mod->module_init + mod->init_size;
70725+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
70726 }
70727
70728 /* Search for module by name: must hold module_mutex. */
70729diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
70730index 560ca53..ef621ef 100644
70731--- a/include/linux/moduleloader.h
70732+++ b/include/linux/moduleloader.h
70733@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
70734 sections. Returns NULL on failure. */
70735 void *module_alloc(unsigned long size);
70736
70737+#ifdef CONFIG_PAX_KERNEXEC
70738+void *module_alloc_exec(unsigned long size);
70739+#else
70740+#define module_alloc_exec(x) module_alloc(x)
70741+#endif
70742+
70743 /* Free memory returned from module_alloc. */
70744 void module_free(struct module *mod, void *module_region);
70745
70746+#ifdef CONFIG_PAX_KERNEXEC
70747+void module_free_exec(struct module *mod, void *module_region);
70748+#else
70749+#define module_free_exec(x, y) module_free((x), (y))
70750+#endif
70751+
70752 /*
70753 * Apply the given relocation to the (simplified) ELF. Return -error
70754 * or 0.
70755@@ -45,7 +57,9 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
70756 unsigned int relsec,
70757 struct module *me)
70758 {
70759+#ifdef CONFIG_MODULES
70760 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
70761+#endif
70762 return -ENOEXEC;
70763 }
70764 #endif
70765@@ -67,7 +81,9 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
70766 unsigned int relsec,
70767 struct module *me)
70768 {
70769+#ifdef CONFIG_MODULES
70770 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
70771+#endif
70772 return -ENOEXEC;
70773 }
70774 #endif
70775diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
70776index 137b419..fe663ec 100644
70777--- a/include/linux/moduleparam.h
70778+++ b/include/linux/moduleparam.h
70779@@ -284,7 +284,7 @@ static inline void __kernel_param_unlock(void)
70780 * @len is usually just sizeof(string).
70781 */
70782 #define module_param_string(name, string, len, perm) \
70783- static const struct kparam_string __param_string_##name \
70784+ static const struct kparam_string __param_string_##name __used \
70785 = { len, string }; \
70786 __module_param_call(MODULE_PARAM_PREFIX, name, \
70787 &param_ops_string, \
70788@@ -423,7 +423,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
70789 */
70790 #define module_param_array_named(name, array, type, nump, perm) \
70791 param_check_##type(name, &(array)[0]); \
70792- static const struct kparam_array __param_arr_##name \
70793+ static const struct kparam_array __param_arr_##name __used \
70794 = { .max = ARRAY_SIZE(array), .num = nump, \
70795 .ops = &param_ops_##type, \
70796 .elemsize = sizeof(array[0]), .elem = array }; \
70797diff --git a/include/linux/namei.h b/include/linux/namei.h
70798index 5a5ff57..5ae5070 100644
70799--- a/include/linux/namei.h
70800+++ b/include/linux/namei.h
70801@@ -19,7 +19,7 @@ struct nameidata {
70802 unsigned seq;
70803 int last_type;
70804 unsigned depth;
70805- char *saved_names[MAX_NESTED_LINKS + 1];
70806+ const char *saved_names[MAX_NESTED_LINKS + 1];
70807 };
70808
70809 /*
70810@@ -84,12 +84,12 @@ extern void unlock_rename(struct dentry *, struct dentry *);
70811
70812 extern void nd_jump_link(struct nameidata *nd, struct path *path);
70813
70814-static inline void nd_set_link(struct nameidata *nd, char *path)
70815+static inline void nd_set_link(struct nameidata *nd, const char *path)
70816 {
70817 nd->saved_names[nd->depth] = path;
70818 }
70819
70820-static inline char *nd_get_link(struct nameidata *nd)
70821+static inline const char *nd_get_link(const struct nameidata *nd)
70822 {
70823 return nd->saved_names[nd->depth];
70824 }
70825diff --git a/include/linux/net.h b/include/linux/net.h
70826index aa16731..514b875 100644
70827--- a/include/linux/net.h
70828+++ b/include/linux/net.h
70829@@ -183,7 +183,7 @@ struct net_proto_family {
70830 int (*create)(struct net *net, struct socket *sock,
70831 int protocol, int kern);
70832 struct module *owner;
70833-};
70834+} __do_const;
70835
70836 struct iovec;
70837 struct kvec;
70838diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
70839index 6151e90..2e0afb0 100644
70840--- a/include/linux/netdevice.h
70841+++ b/include/linux/netdevice.h
70842@@ -1028,6 +1028,7 @@ struct net_device_ops {
70843 int (*ndo_change_carrier)(struct net_device *dev,
70844 bool new_carrier);
70845 };
70846+typedef struct net_device_ops __no_const net_device_ops_no_const;
70847
70848 /*
70849 * The DEVICE structure.
70850@@ -1094,7 +1095,7 @@ struct net_device {
70851 int iflink;
70852
70853 struct net_device_stats stats;
70854- atomic_long_t rx_dropped; /* dropped packets by core network
70855+ atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
70856 * Do not use this in drivers.
70857 */
70858
70859diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
70860index ee14284..bc65d63 100644
70861--- a/include/linux/netfilter.h
70862+++ b/include/linux/netfilter.h
70863@@ -82,7 +82,7 @@ struct nf_sockopt_ops {
70864 #endif
70865 /* Use the module struct to lock set/get code in place */
70866 struct module *owner;
70867-};
70868+} __do_const;
70869
70870 /* Function to register/unregister hook points. */
70871 int nf_register_hook(struct nf_hook_ops *reg);
70872diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
70873index 7958e84..ed74d7a 100644
70874--- a/include/linux/netfilter/ipset/ip_set.h
70875+++ b/include/linux/netfilter/ipset/ip_set.h
70876@@ -98,7 +98,7 @@ struct ip_set_type_variant {
70877 /* Return true if "b" set is the same as "a"
70878 * according to the create set parameters */
70879 bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
70880-};
70881+} __do_const;
70882
70883 /* The core set type structure */
70884 struct ip_set_type {
70885diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
70886index ecbb8e4..8a1c4e1 100644
70887--- a/include/linux/netfilter/nfnetlink.h
70888+++ b/include/linux/netfilter/nfnetlink.h
70889@@ -16,7 +16,7 @@ struct nfnl_callback {
70890 const struct nlattr * const cda[]);
70891 const struct nla_policy *policy; /* netlink attribute policy */
70892 const u_int16_t attr_count; /* number of nlattr's */
70893-};
70894+} __do_const;
70895
70896 struct nfnetlink_subsystem {
70897 const char *name;
70898diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
70899new file mode 100644
70900index 0000000..33f4af8
70901--- /dev/null
70902+++ b/include/linux/netfilter/xt_gradm.h
70903@@ -0,0 +1,9 @@
70904+#ifndef _LINUX_NETFILTER_XT_GRADM_H
70905+#define _LINUX_NETFILTER_XT_GRADM_H 1
70906+
70907+struct xt_gradm_mtinfo {
70908+ __u16 flags;
70909+ __u16 invflags;
70910+};
70911+
70912+#endif
70913diff --git a/include/linux/nls.h b/include/linux/nls.h
70914index 5dc635f..35f5e11 100644
70915--- a/include/linux/nls.h
70916+++ b/include/linux/nls.h
70917@@ -31,7 +31,7 @@ struct nls_table {
70918 const unsigned char *charset2upper;
70919 struct module *owner;
70920 struct nls_table *next;
70921-};
70922+} __do_const;
70923
70924 /* this value hold the maximum octet of charset */
70925 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
70926diff --git a/include/linux/notifier.h b/include/linux/notifier.h
70927index d65746e..62e72c2 100644
70928--- a/include/linux/notifier.h
70929+++ b/include/linux/notifier.h
70930@@ -51,7 +51,8 @@ struct notifier_block {
70931 int (*notifier_call)(struct notifier_block *, unsigned long, void *);
70932 struct notifier_block __rcu *next;
70933 int priority;
70934-};
70935+} __do_const;
70936+typedef struct notifier_block __no_const notifier_block_no_const;
70937
70938 struct atomic_notifier_head {
70939 spinlock_t lock;
70940diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
70941index a4c5624..79d6d88 100644
70942--- a/include/linux/oprofile.h
70943+++ b/include/linux/oprofile.h
70944@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
70945 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
70946 char const * name, ulong * val);
70947
70948-/** Create a file for read-only access to an atomic_t. */
70949+/** Create a file for read-only access to an atomic_unchecked_t. */
70950 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
70951- char const * name, atomic_t * val);
70952+ char const * name, atomic_unchecked_t * val);
70953
70954 /** create a directory */
70955 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
70956diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
70957index 45fc162..01a4068 100644
70958--- a/include/linux/pci_hotplug.h
70959+++ b/include/linux/pci_hotplug.h
70960@@ -80,7 +80,8 @@ struct hotplug_slot_ops {
70961 int (*get_attention_status) (struct hotplug_slot *slot, u8 *value);
70962 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
70963 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
70964-};
70965+} __do_const;
70966+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
70967
70968 /**
70969 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
70970diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
70971index 1d795df..b0a6449 100644
70972--- a/include/linux/perf_event.h
70973+++ b/include/linux/perf_event.h
70974@@ -333,8 +333,8 @@ struct perf_event {
70975
70976 enum perf_event_active_state state;
70977 unsigned int attach_state;
70978- local64_t count;
70979- atomic64_t child_count;
70980+ local64_t count; /* PaX: fix it one day */
70981+ atomic64_unchecked_t child_count;
70982
70983 /*
70984 * These are the total time in nanoseconds that the event
70985@@ -385,8 +385,8 @@ struct perf_event {
70986 * These accumulate total time (in nanoseconds) that children
70987 * events have been enabled and running, respectively.
70988 */
70989- atomic64_t child_total_time_enabled;
70990- atomic64_t child_total_time_running;
70991+ atomic64_unchecked_t child_total_time_enabled;
70992+ atomic64_unchecked_t child_total_time_running;
70993
70994 /*
70995 * Protect attach/detach and child_list:
70996@@ -704,7 +704,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
70997 entry->ip[entry->nr++] = ip;
70998 }
70999
71000-extern int sysctl_perf_event_paranoid;
71001+extern int sysctl_perf_event_legitimately_concerned;
71002 extern int sysctl_perf_event_mlock;
71003 extern int sysctl_perf_event_sample_rate;
71004
71005@@ -712,19 +712,24 @@ extern int perf_proc_update_handler(struct ctl_table *table, int write,
71006 void __user *buffer, size_t *lenp,
71007 loff_t *ppos);
71008
71009+static inline bool perf_paranoid_any(void)
71010+{
71011+ return sysctl_perf_event_legitimately_concerned > 2;
71012+}
71013+
71014 static inline bool perf_paranoid_tracepoint_raw(void)
71015 {
71016- return sysctl_perf_event_paranoid > -1;
71017+ return sysctl_perf_event_legitimately_concerned > -1;
71018 }
71019
71020 static inline bool perf_paranoid_cpu(void)
71021 {
71022- return sysctl_perf_event_paranoid > 0;
71023+ return sysctl_perf_event_legitimately_concerned > 0;
71024 }
71025
71026 static inline bool perf_paranoid_kernel(void)
71027 {
71028- return sysctl_perf_event_paranoid > 1;
71029+ return sysctl_perf_event_legitimately_concerned > 1;
71030 }
71031
71032 extern void perf_event_init(void);
71033@@ -812,7 +817,7 @@ static inline void perf_restore_debug_store(void) { }
71034 */
71035 #define perf_cpu_notifier(fn) \
71036 do { \
71037- static struct notifier_block fn##_nb __cpuinitdata = \
71038+ static struct notifier_block fn##_nb = \
71039 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
71040 unsigned long cpu = smp_processor_id(); \
71041 unsigned long flags; \
71042@@ -831,7 +836,7 @@ do { \
71043 struct perf_pmu_events_attr {
71044 struct device_attribute attr;
71045 u64 id;
71046-};
71047+} __do_const;
71048
71049 #define PMU_EVENT_ATTR(_name, _var, _id, _show) \
71050 static struct perf_pmu_events_attr _var = { \
71051diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
71052index ad1a427..6419649 100644
71053--- a/include/linux/pipe_fs_i.h
71054+++ b/include/linux/pipe_fs_i.h
71055@@ -45,9 +45,9 @@ struct pipe_buffer {
71056 struct pipe_inode_info {
71057 wait_queue_head_t wait;
71058 unsigned int nrbufs, curbuf, buffers;
71059- unsigned int readers;
71060- unsigned int writers;
71061- unsigned int waiting_writers;
71062+ atomic_t readers;
71063+ atomic_t writers;
71064+ atomic_t waiting_writers;
71065 unsigned int r_counter;
71066 unsigned int w_counter;
71067 struct page *tmp_page;
71068diff --git a/include/linux/platform_data/usb-ehci-s5p.h b/include/linux/platform_data/usb-ehci-s5p.h
71069index 5f28cae..3d23723 100644
71070--- a/include/linux/platform_data/usb-ehci-s5p.h
71071+++ b/include/linux/platform_data/usb-ehci-s5p.h
71072@@ -14,7 +14,7 @@
71073 struct s5p_ehci_platdata {
71074 int (*phy_init)(struct platform_device *pdev, int type);
71075 int (*phy_exit)(struct platform_device *pdev, int type);
71076-};
71077+} __no_const;
71078
71079 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
71080
71081diff --git a/include/linux/platform_data/usb-exynos.h b/include/linux/platform_data/usb-exynos.h
71082index c256c59..8ea94c7 100644
71083--- a/include/linux/platform_data/usb-exynos.h
71084+++ b/include/linux/platform_data/usb-exynos.h
71085@@ -14,7 +14,7 @@
71086 struct exynos4_ohci_platdata {
71087 int (*phy_init)(struct platform_device *pdev, int type);
71088 int (*phy_exit)(struct platform_device *pdev, int type);
71089-};
71090+} __no_const;
71091
71092 extern void exynos4_ohci_set_platdata(struct exynos4_ohci_platdata *pd);
71093
71094diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
71095index 7c1d252..c5c773e 100644
71096--- a/include/linux/pm_domain.h
71097+++ b/include/linux/pm_domain.h
71098@@ -48,7 +48,7 @@ struct gpd_dev_ops {
71099
71100 struct gpd_cpu_data {
71101 unsigned int saved_exit_latency;
71102- struct cpuidle_state *idle_state;
71103+ cpuidle_state_no_const *idle_state;
71104 };
71105
71106 struct generic_pm_domain {
71107diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
71108index 7d7e09e..8671ef8 100644
71109--- a/include/linux/pm_runtime.h
71110+++ b/include/linux/pm_runtime.h
71111@@ -104,7 +104,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
71112
71113 static inline void pm_runtime_mark_last_busy(struct device *dev)
71114 {
71115- ACCESS_ONCE(dev->power.last_busy) = jiffies;
71116+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
71117 }
71118
71119 #else /* !CONFIG_PM_RUNTIME */
71120diff --git a/include/linux/pnp.h b/include/linux/pnp.h
71121index 195aafc..49a7bc2 100644
71122--- a/include/linux/pnp.h
71123+++ b/include/linux/pnp.h
71124@@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
71125 struct pnp_fixup {
71126 char id[7];
71127 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
71128-};
71129+} __do_const;
71130
71131 /* config parameters */
71132 #define PNP_CONFIG_NORMAL 0x0001
71133diff --git a/include/linux/poison.h b/include/linux/poison.h
71134index 2110a81..13a11bb 100644
71135--- a/include/linux/poison.h
71136+++ b/include/linux/poison.h
71137@@ -19,8 +19,8 @@
71138 * under normal circumstances, used to verify that nobody uses
71139 * non-initialized list entries.
71140 */
71141-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
71142-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
71143+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
71144+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
71145
71146 /********** include/linux/timer.h **********/
71147 /*
71148diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
71149index c0f44c2..1572583 100644
71150--- a/include/linux/power/smartreflex.h
71151+++ b/include/linux/power/smartreflex.h
71152@@ -238,7 +238,7 @@ struct omap_sr_class_data {
71153 int (*notify)(struct omap_sr *sr, u32 status);
71154 u8 notify_flags;
71155 u8 class_type;
71156-};
71157+} __do_const;
71158
71159 /**
71160 * struct omap_sr_nvalue_table - Smartreflex n-target value info
71161diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
71162index 4ea1d37..80f4b33 100644
71163--- a/include/linux/ppp-comp.h
71164+++ b/include/linux/ppp-comp.h
71165@@ -84,7 +84,7 @@ struct compressor {
71166 struct module *owner;
71167 /* Extra skb space needed by the compressor algorithm */
71168 unsigned int comp_extra;
71169-};
71170+} __do_const;
71171
71172 /*
71173 * The return value from decompress routine is the length of the
71174diff --git a/include/linux/printk.h b/include/linux/printk.h
71175index 822171f..12b30e8 100644
71176--- a/include/linux/printk.h
71177+++ b/include/linux/printk.h
71178@@ -98,6 +98,8 @@ int no_printk(const char *fmt, ...)
71179 extern asmlinkage __printf(1, 2)
71180 void early_printk(const char *fmt, ...);
71181
71182+extern int kptr_restrict;
71183+
71184 #ifdef CONFIG_PRINTK
71185 asmlinkage __printf(5, 0)
71186 int vprintk_emit(int facility, int level,
71187@@ -132,7 +134,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
71188
71189 extern int printk_delay_msec;
71190 extern int dmesg_restrict;
71191-extern int kptr_restrict;
71192
71193 extern void wake_up_klogd(void);
71194
71195diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
71196index 94dfb2a..88b9d3b 100644
71197--- a/include/linux/proc_fs.h
71198+++ b/include/linux/proc_fs.h
71199@@ -165,6 +165,18 @@ static inline struct proc_dir_entry *proc_create(const char *name, umode_t mode,
71200 return proc_create_data(name, mode, parent, proc_fops, NULL);
71201 }
71202
71203+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
71204+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
71205+{
71206+#ifdef CONFIG_GRKERNSEC_PROC_USER
71207+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
71208+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71209+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
71210+#else
71211+ return proc_create_data(name, mode, parent, proc_fops, NULL);
71212+#endif
71213+}
71214+
71215 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
71216 umode_t mode, struct proc_dir_entry *base,
71217 read_proc_t *read_proc, void * data)
71218@@ -266,7 +278,7 @@ struct proc_ns_operations {
71219 void (*put)(void *ns);
71220 int (*install)(struct nsproxy *nsproxy, void *ns);
71221 unsigned int (*inum)(void *ns);
71222-};
71223+} __do_const;
71224 extern const struct proc_ns_operations netns_operations;
71225 extern const struct proc_ns_operations utsns_operations;
71226 extern const struct proc_ns_operations ipcns_operations;
71227diff --git a/include/linux/random.h b/include/linux/random.h
71228index 347ce55..880f97c 100644
71229--- a/include/linux/random.h
71230+++ b/include/linux/random.h
71231@@ -39,6 +39,11 @@ void prandom_seed(u32 seed);
71232 u32 prandom_u32_state(struct rnd_state *);
71233 void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
71234
71235+static inline unsigned long pax_get_random_long(void)
71236+{
71237+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
71238+}
71239+
71240 /*
71241 * Handle minimum values for seeds
71242 */
71243diff --git a/include/linux/rculist.h b/include/linux/rculist.h
71244index 8089e35..3a0d59a 100644
71245--- a/include/linux/rculist.h
71246+++ b/include/linux/rculist.h
71247@@ -44,6 +44,9 @@ extern void __list_add_rcu(struct list_head *new,
71248 struct list_head *prev, struct list_head *next);
71249 #endif
71250
71251+extern void __pax_list_add_rcu(struct list_head *new,
71252+ struct list_head *prev, struct list_head *next);
71253+
71254 /**
71255 * list_add_rcu - add a new entry to rcu-protected list
71256 * @new: new entry to be added
71257@@ -65,6 +68,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
71258 __list_add_rcu(new, head, head->next);
71259 }
71260
71261+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
71262+{
71263+ __pax_list_add_rcu(new, head, head->next);
71264+}
71265+
71266 /**
71267 * list_add_tail_rcu - add a new entry to rcu-protected list
71268 * @new: new entry to be added
71269@@ -87,6 +95,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
71270 __list_add_rcu(new, head->prev, head);
71271 }
71272
71273+static inline void pax_list_add_tail_rcu(struct list_head *new,
71274+ struct list_head *head)
71275+{
71276+ __pax_list_add_rcu(new, head->prev, head);
71277+}
71278+
71279 /**
71280 * list_del_rcu - deletes entry from list without re-initialization
71281 * @entry: the element to delete from the list.
71282@@ -117,6 +131,8 @@ static inline void list_del_rcu(struct list_head *entry)
71283 entry->prev = LIST_POISON2;
71284 }
71285
71286+extern void pax_list_del_rcu(struct list_head *entry);
71287+
71288 /**
71289 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
71290 * @n: the element to delete from the hash list.
71291diff --git a/include/linux/reboot.h b/include/linux/reboot.h
71292index 23b3630..e1bc12b 100644
71293--- a/include/linux/reboot.h
71294+++ b/include/linux/reboot.h
71295@@ -18,9 +18,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
71296 * Architecture-specific implementations of sys_reboot commands.
71297 */
71298
71299-extern void machine_restart(char *cmd);
71300-extern void machine_halt(void);
71301-extern void machine_power_off(void);
71302+extern void machine_restart(char *cmd) __noreturn;
71303+extern void machine_halt(void) __noreturn;
71304+extern void machine_power_off(void) __noreturn;
71305
71306 extern void machine_shutdown(void);
71307 struct pt_regs;
71308@@ -31,9 +31,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
71309 */
71310
71311 extern void kernel_restart_prepare(char *cmd);
71312-extern void kernel_restart(char *cmd);
71313-extern void kernel_halt(void);
71314-extern void kernel_power_off(void);
71315+extern void kernel_restart(char *cmd) __noreturn;
71316+extern void kernel_halt(void) __noreturn;
71317+extern void kernel_power_off(void) __noreturn;
71318
71319 extern int C_A_D; /* for sysctl */
71320 void ctrl_alt_del(void);
71321@@ -47,7 +47,7 @@ extern int orderly_poweroff(bool force);
71322 * Emergency restart, callable from an interrupt handler.
71323 */
71324
71325-extern void emergency_restart(void);
71326+extern void emergency_restart(void) __noreturn;
71327 #include <asm/emergency-restart.h>
71328
71329 #endif /* _LINUX_REBOOT_H */
71330diff --git a/include/linux/regset.h b/include/linux/regset.h
71331index 8e0c9fe..ac4d221 100644
71332--- a/include/linux/regset.h
71333+++ b/include/linux/regset.h
71334@@ -161,7 +161,8 @@ struct user_regset {
71335 unsigned int align;
71336 unsigned int bias;
71337 unsigned int core_note_type;
71338-};
71339+} __do_const;
71340+typedef struct user_regset __no_const user_regset_no_const;
71341
71342 /**
71343 * struct user_regset_view - available regsets
71344diff --git a/include/linux/relay.h b/include/linux/relay.h
71345index 91cacc3..b55ff74 100644
71346--- a/include/linux/relay.h
71347+++ b/include/linux/relay.h
71348@@ -160,7 +160,7 @@ struct rchan_callbacks
71349 * The callback should return 0 if successful, negative if not.
71350 */
71351 int (*remove_buf_file)(struct dentry *dentry);
71352-};
71353+} __no_const;
71354
71355 /*
71356 * CONFIG_RELAY kernel API, kernel/relay.c
71357diff --git a/include/linux/rio.h b/include/linux/rio.h
71358index a3e7842..d973ca6 100644
71359--- a/include/linux/rio.h
71360+++ b/include/linux/rio.h
71361@@ -339,7 +339,7 @@ struct rio_ops {
71362 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
71363 u64 rstart, u32 size, u32 flags);
71364 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
71365-};
71366+} __no_const;
71367
71368 #define RIO_RESOURCE_MEM 0x00000100
71369 #define RIO_RESOURCE_DOORBELL 0x00000200
71370diff --git a/include/linux/rmap.h b/include/linux/rmap.h
71371index 6dacb93..6174423 100644
71372--- a/include/linux/rmap.h
71373+++ b/include/linux/rmap.h
71374@@ -145,8 +145,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
71375 void anon_vma_init(void); /* create anon_vma_cachep */
71376 int anon_vma_prepare(struct vm_area_struct *);
71377 void unlink_anon_vmas(struct vm_area_struct *);
71378-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
71379-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
71380+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
71381+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
71382
71383 static inline void anon_vma_merge(struct vm_area_struct *vma,
71384 struct vm_area_struct *next)
71385diff --git a/include/linux/sched.h b/include/linux/sched.h
71386index be4e742..7f9d593 100644
71387--- a/include/linux/sched.h
71388+++ b/include/linux/sched.h
71389@@ -62,6 +62,7 @@ struct bio_list;
71390 struct fs_struct;
71391 struct perf_event_context;
71392 struct blk_plug;
71393+struct linux_binprm;
71394
71395 /*
71396 * List of flags we want to share for kernel threads,
71397@@ -315,7 +316,7 @@ extern char __sched_text_start[], __sched_text_end[];
71398 extern int in_sched_functions(unsigned long addr);
71399
71400 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
71401-extern signed long schedule_timeout(signed long timeout);
71402+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
71403 extern signed long schedule_timeout_interruptible(signed long timeout);
71404 extern signed long schedule_timeout_killable(signed long timeout);
71405 extern signed long schedule_timeout_uninterruptible(signed long timeout);
71406@@ -329,6 +330,18 @@ struct user_namespace;
71407 #include <linux/aio.h>
71408
71409 #ifdef CONFIG_MMU
71410+
71411+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
71412+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
71413+#else
71414+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
71415+{
71416+ return 0;
71417+}
71418+#endif
71419+
71420+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
71421+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
71422 extern void arch_pick_mmap_layout(struct mm_struct *mm);
71423 extern unsigned long
71424 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
71425@@ -605,6 +618,17 @@ struct signal_struct {
71426 #ifdef CONFIG_TASKSTATS
71427 struct taskstats *stats;
71428 #endif
71429+
71430+#ifdef CONFIG_GRKERNSEC
71431+ u32 curr_ip;
71432+ u32 saved_ip;
71433+ u32 gr_saddr;
71434+ u32 gr_daddr;
71435+ u16 gr_sport;
71436+ u16 gr_dport;
71437+ u8 used_accept:1;
71438+#endif
71439+
71440 #ifdef CONFIG_AUDIT
71441 unsigned audit_tty;
71442 struct tty_audit_buf *tty_audit_buf;
71443@@ -683,6 +707,11 @@ struct user_struct {
71444 struct key *session_keyring; /* UID's default session keyring */
71445 #endif
71446
71447+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
71448+ unsigned int banned;
71449+ unsigned long ban_expires;
71450+#endif
71451+
71452 /* Hash table maintenance information */
71453 struct hlist_node uidhash_node;
71454 kuid_t uid;
71455@@ -1082,7 +1111,7 @@ struct sched_class {
71456 #ifdef CONFIG_FAIR_GROUP_SCHED
71457 void (*task_move_group) (struct task_struct *p, int on_rq);
71458 #endif
71459-};
71460+} __do_const;
71461
71462 struct load_weight {
71463 unsigned long weight, inv_weight;
71464@@ -1323,8 +1352,8 @@ struct task_struct {
71465 struct list_head thread_group;
71466
71467 struct completion *vfork_done; /* for vfork() */
71468- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
71469- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
71470+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
71471+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
71472
71473 cputime_t utime, stime, utimescaled, stimescaled;
71474 cputime_t gtime;
71475@@ -1349,11 +1378,6 @@ struct task_struct {
71476 struct task_cputime cputime_expires;
71477 struct list_head cpu_timers[3];
71478
71479-/* process credentials */
71480- const struct cred __rcu *real_cred; /* objective and real subjective task
71481- * credentials (COW) */
71482- const struct cred __rcu *cred; /* effective (overridable) subjective task
71483- * credentials (COW) */
71484 char comm[TASK_COMM_LEN]; /* executable name excluding path
71485 - access with [gs]et_task_comm (which lock
71486 it with task_lock())
71487@@ -1370,6 +1394,10 @@ struct task_struct {
71488 #endif
71489 /* CPU-specific state of this task */
71490 struct thread_struct thread;
71491+/* thread_info moved to task_struct */
71492+#ifdef CONFIG_X86
71493+ struct thread_info tinfo;
71494+#endif
71495 /* filesystem information */
71496 struct fs_struct *fs;
71497 /* open file information */
71498@@ -1443,6 +1471,10 @@ struct task_struct {
71499 gfp_t lockdep_reclaim_gfp;
71500 #endif
71501
71502+/* process credentials */
71503+ const struct cred __rcu *real_cred; /* objective and real subjective task
71504+ * credentials (COW) */
71505+
71506 /* journalling filesystem info */
71507 void *journal_info;
71508
71509@@ -1481,6 +1513,10 @@ struct task_struct {
71510 /* cg_list protected by css_set_lock and tsk->alloc_lock */
71511 struct list_head cg_list;
71512 #endif
71513+
71514+ const struct cred __rcu *cred; /* effective (overridable) subjective task
71515+ * credentials (COW) */
71516+
71517 #ifdef CONFIG_FUTEX
71518 struct robust_list_head __user *robust_list;
71519 #ifdef CONFIG_COMPAT
71520@@ -1577,8 +1613,74 @@ struct task_struct {
71521 #ifdef CONFIG_UPROBES
71522 struct uprobe_task *utask;
71523 #endif
71524+
71525+#ifdef CONFIG_GRKERNSEC
71526+ /* grsecurity */
71527+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71528+ u64 exec_id;
71529+#endif
71530+#ifdef CONFIG_GRKERNSEC_SETXID
71531+ const struct cred *delayed_cred;
71532+#endif
71533+ struct dentry *gr_chroot_dentry;
71534+ struct acl_subject_label *acl;
71535+ struct acl_role_label *role;
71536+ struct file *exec_file;
71537+ unsigned long brute_expires;
71538+ u16 acl_role_id;
71539+ /* is this the task that authenticated to the special role */
71540+ u8 acl_sp_role;
71541+ u8 is_writable;
71542+ u8 brute;
71543+ u8 gr_is_chrooted;
71544+#endif
71545+
71546 };
71547
71548+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
71549+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
71550+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
71551+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
71552+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
71553+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
71554+
71555+#ifdef CONFIG_PAX_SOFTMODE
71556+extern int pax_softmode;
71557+#endif
71558+
71559+extern int pax_check_flags(unsigned long *);
71560+
71561+/* if tsk != current then task_lock must be held on it */
71562+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
71563+static inline unsigned long pax_get_flags(struct task_struct *tsk)
71564+{
71565+ if (likely(tsk->mm))
71566+ return tsk->mm->pax_flags;
71567+ else
71568+ return 0UL;
71569+}
71570+
71571+/* if tsk != current then task_lock must be held on it */
71572+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
71573+{
71574+ if (likely(tsk->mm)) {
71575+ tsk->mm->pax_flags = flags;
71576+ return 0;
71577+ }
71578+ return -EINVAL;
71579+}
71580+#endif
71581+
71582+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
71583+extern void pax_set_initial_flags(struct linux_binprm *bprm);
71584+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
71585+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
71586+#endif
71587+
71588+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
71589+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
71590+extern void pax_report_refcount_overflow(struct pt_regs *regs);
71591+
71592 /* Future-safe accessor for struct task_struct's cpus_allowed. */
71593 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
71594
71595@@ -1637,7 +1739,7 @@ struct pid_namespace;
71596 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
71597 struct pid_namespace *ns);
71598
71599-static inline pid_t task_pid_nr(struct task_struct *tsk)
71600+static inline pid_t task_pid_nr(const struct task_struct *tsk)
71601 {
71602 return tsk->pid;
71603 }
71604@@ -2073,7 +2175,9 @@ void yield(void);
71605 extern struct exec_domain default_exec_domain;
71606
71607 union thread_union {
71608+#ifndef CONFIG_X86
71609 struct thread_info thread_info;
71610+#endif
71611 unsigned long stack[THREAD_SIZE/sizeof(long)];
71612 };
71613
71614@@ -2106,6 +2210,7 @@ extern struct pid_namespace init_pid_ns;
71615 */
71616
71617 extern struct task_struct *find_task_by_vpid(pid_t nr);
71618+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
71619 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
71620 struct pid_namespace *ns);
71621
71622@@ -2272,7 +2377,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
71623 extern void exit_itimers(struct signal_struct *);
71624 extern void flush_itimer_signals(void);
71625
71626-extern void do_group_exit(int);
71627+extern __noreturn void do_group_exit(int);
71628
71629 extern int allow_signal(int);
71630 extern int disallow_signal(int);
71631@@ -2463,9 +2568,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
71632
71633 #endif
71634
71635-static inline int object_is_on_stack(void *obj)
71636+static inline int object_starts_on_stack(void *obj)
71637 {
71638- void *stack = task_stack_page(current);
71639+ const void *stack = task_stack_page(current);
71640
71641 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
71642 }
71643diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
71644index bf8086b..962b035 100644
71645--- a/include/linux/sched/sysctl.h
71646+++ b/include/linux/sched/sysctl.h
71647@@ -30,6 +30,7 @@ enum { sysctl_hung_task_timeout_secs = 0 };
71648 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
71649
71650 extern int sysctl_max_map_count;
71651+extern unsigned long sysctl_heap_stack_gap;
71652
71653 extern unsigned int sysctl_sched_latency;
71654 extern unsigned int sysctl_sched_min_granularity;
71655diff --git a/include/linux/security.h b/include/linux/security.h
71656index 032c366..2c1c2dc2 100644
71657--- a/include/linux/security.h
71658+++ b/include/linux/security.h
71659@@ -26,6 +26,7 @@
71660 #include <linux/capability.h>
71661 #include <linux/slab.h>
71662 #include <linux/err.h>
71663+#include <linux/grsecurity.h>
71664
71665 struct linux_binprm;
71666 struct cred;
71667diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
71668index 68a04a3..866e6a1 100644
71669--- a/include/linux/seq_file.h
71670+++ b/include/linux/seq_file.h
71671@@ -26,6 +26,9 @@ struct seq_file {
71672 struct mutex lock;
71673 const struct seq_operations *op;
71674 int poll_event;
71675+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71676+ u64 exec_id;
71677+#endif
71678 #ifdef CONFIG_USER_NS
71679 struct user_namespace *user_ns;
71680 #endif
71681@@ -38,6 +41,7 @@ struct seq_operations {
71682 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
71683 int (*show) (struct seq_file *m, void *v);
71684 };
71685+typedef struct seq_operations __no_const seq_operations_no_const;
71686
71687 #define SEQ_SKIP 1
71688
71689diff --git a/include/linux/shm.h b/include/linux/shm.h
71690index 429c199..4d42e38 100644
71691--- a/include/linux/shm.h
71692+++ b/include/linux/shm.h
71693@@ -21,6 +21,10 @@ struct shmid_kernel /* private to the kernel */
71694
71695 /* The task created the shm object. NULL if the task is dead. */
71696 struct task_struct *shm_creator;
71697+#ifdef CONFIG_GRKERNSEC
71698+ time_t shm_createtime;
71699+ pid_t shm_lapid;
71700+#endif
71701 };
71702
71703 /* shm_mode upper byte flags */
71704diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
71705index b8292d8..96db310 100644
71706--- a/include/linux/skbuff.h
71707+++ b/include/linux/skbuff.h
71708@@ -599,7 +599,7 @@ extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
71709 extern struct sk_buff *__alloc_skb(unsigned int size,
71710 gfp_t priority, int flags, int node);
71711 extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
71712-static inline struct sk_buff *alloc_skb(unsigned int size,
71713+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
71714 gfp_t priority)
71715 {
71716 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
71717@@ -709,7 +709,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
71718 */
71719 static inline int skb_queue_empty(const struct sk_buff_head *list)
71720 {
71721- return list->next == (struct sk_buff *)list;
71722+ return list->next == (const struct sk_buff *)list;
71723 }
71724
71725 /**
71726@@ -722,7 +722,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
71727 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
71728 const struct sk_buff *skb)
71729 {
71730- return skb->next == (struct sk_buff *)list;
71731+ return skb->next == (const struct sk_buff *)list;
71732 }
71733
71734 /**
71735@@ -735,7 +735,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
71736 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
71737 const struct sk_buff *skb)
71738 {
71739- return skb->prev == (struct sk_buff *)list;
71740+ return skb->prev == (const struct sk_buff *)list;
71741 }
71742
71743 /**
71744@@ -1756,7 +1756,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
71745 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
71746 */
71747 #ifndef NET_SKB_PAD
71748-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
71749+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
71750 #endif
71751
71752 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
71753@@ -2351,7 +2351,7 @@ extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
71754 int noblock, int *err);
71755 extern unsigned int datagram_poll(struct file *file, struct socket *sock,
71756 struct poll_table_struct *wait);
71757-extern int skb_copy_datagram_iovec(const struct sk_buff *from,
71758+extern int __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from,
71759 int offset, struct iovec *to,
71760 int size);
71761 extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
71762@@ -2641,6 +2641,9 @@ static inline void nf_reset(struct sk_buff *skb)
71763 nf_bridge_put(skb->nf_bridge);
71764 skb->nf_bridge = NULL;
71765 #endif
71766+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
71767+ skb->nf_trace = 0;
71768+#endif
71769 }
71770
71771 static inline void nf_reset_trace(struct sk_buff *skb)
71772diff --git a/include/linux/slab.h b/include/linux/slab.h
71773index 5d168d7..720bff3 100644
71774--- a/include/linux/slab.h
71775+++ b/include/linux/slab.h
71776@@ -12,13 +12,20 @@
71777 #include <linux/gfp.h>
71778 #include <linux/types.h>
71779 #include <linux/workqueue.h>
71780-
71781+#include <linux/err.h>
71782
71783 /*
71784 * Flags to pass to kmem_cache_create().
71785 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
71786 */
71787 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
71788+
71789+#ifdef CONFIG_PAX_USERCOPY_SLABS
71790+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
71791+#else
71792+#define SLAB_USERCOPY 0x00000000UL
71793+#endif
71794+
71795 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
71796 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
71797 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
71798@@ -89,10 +96,13 @@
71799 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
71800 * Both make kfree a no-op.
71801 */
71802-#define ZERO_SIZE_PTR ((void *)16)
71803+#define ZERO_SIZE_PTR \
71804+({ \
71805+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
71806+ (void *)(-MAX_ERRNO-1L); \
71807+})
71808
71809-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
71810- (unsigned long)ZERO_SIZE_PTR)
71811+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
71812
71813 /*
71814 * Common fields provided in kmem_cache by all slab allocators
71815@@ -112,7 +122,7 @@ struct kmem_cache {
71816 unsigned int align; /* Alignment as calculated */
71817 unsigned long flags; /* Active flags on the slab */
71818 const char *name; /* Slab name for sysfs */
71819- int refcount; /* Use counter */
71820+ atomic_t refcount; /* Use counter */
71821 void (*ctor)(void *); /* Called on object slot creation */
71822 struct list_head list; /* List of all slab caches on the system */
71823 };
71824@@ -232,6 +242,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
71825 void kfree(const void *);
71826 void kzfree(const void *);
71827 size_t ksize(const void *);
71828+const char *check_heap_object(const void *ptr, unsigned long n);
71829+bool is_usercopy_object(const void *ptr);
71830
71831 /*
71832 * Allocator specific definitions. These are mainly used to establish optimized
71833@@ -311,6 +323,7 @@ size_t ksize(const void *);
71834 * for general use, and so are not documented here. For a full list of
71835 * potential flags, always refer to linux/gfp.h.
71836 */
71837+
71838 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
71839 {
71840 if (size != 0 && n > SIZE_MAX / size)
71841@@ -370,7 +383,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
71842 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
71843 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
71844 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
71845-extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
71846+extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
71847 #define kmalloc_track_caller(size, flags) \
71848 __kmalloc_track_caller(size, flags, _RET_IP_)
71849 #else
71850@@ -390,7 +403,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
71851 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
71852 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
71853 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
71854-extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
71855+extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
71856 #define kmalloc_node_track_caller(size, flags, node) \
71857 __kmalloc_node_track_caller(size, flags, node, \
71858 _RET_IP_)
71859diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
71860index 8bb6e0e..8eb0dbe 100644
71861--- a/include/linux/slab_def.h
71862+++ b/include/linux/slab_def.h
71863@@ -52,7 +52,7 @@ struct kmem_cache {
71864 /* 4) cache creation/removal */
71865 const char *name;
71866 struct list_head list;
71867- int refcount;
71868+ atomic_t refcount;
71869 int object_size;
71870 int align;
71871
71872@@ -68,10 +68,10 @@ struct kmem_cache {
71873 unsigned long node_allocs;
71874 unsigned long node_frees;
71875 unsigned long node_overflow;
71876- atomic_t allochit;
71877- atomic_t allocmiss;
71878- atomic_t freehit;
71879- atomic_t freemiss;
71880+ atomic_unchecked_t allochit;
71881+ atomic_unchecked_t allocmiss;
71882+ atomic_unchecked_t freehit;
71883+ atomic_unchecked_t freemiss;
71884
71885 /*
71886 * If debugging is enabled, then the allocator can add additional
71887@@ -111,11 +111,16 @@ struct cache_sizes {
71888 #ifdef CONFIG_ZONE_DMA
71889 struct kmem_cache *cs_dmacachep;
71890 #endif
71891+
71892+#ifdef CONFIG_PAX_USERCOPY_SLABS
71893+ struct kmem_cache *cs_usercopycachep;
71894+#endif
71895+
71896 };
71897 extern struct cache_sizes malloc_sizes[];
71898
71899 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
71900-void *__kmalloc(size_t size, gfp_t flags);
71901+void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
71902
71903 #ifdef CONFIG_TRACING
71904 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
71905@@ -152,6 +157,13 @@ found:
71906 cachep = malloc_sizes[i].cs_dmacachep;
71907 else
71908 #endif
71909+
71910+#ifdef CONFIG_PAX_USERCOPY_SLABS
71911+ if (flags & GFP_USERCOPY)
71912+ cachep = malloc_sizes[i].cs_usercopycachep;
71913+ else
71914+#endif
71915+
71916 cachep = malloc_sizes[i].cs_cachep;
71917
71918 ret = kmem_cache_alloc_trace(cachep, flags, size);
71919@@ -162,7 +174,7 @@ found:
71920 }
71921
71922 #ifdef CONFIG_NUMA
71923-extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
71924+extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
71925 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
71926
71927 #ifdef CONFIG_TRACING
71928@@ -205,6 +217,13 @@ found:
71929 cachep = malloc_sizes[i].cs_dmacachep;
71930 else
71931 #endif
71932+
71933+#ifdef CONFIG_PAX_USERCOPY_SLABS
71934+ if (flags & GFP_USERCOPY)
71935+ cachep = malloc_sizes[i].cs_usercopycachep;
71936+ else
71937+#endif
71938+
71939 cachep = malloc_sizes[i].cs_cachep;
71940
71941 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
71942diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
71943index f28e14a..7831211 100644
71944--- a/include/linux/slob_def.h
71945+++ b/include/linux/slob_def.h
71946@@ -11,7 +11,7 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
71947 return kmem_cache_alloc_node(cachep, flags, NUMA_NO_NODE);
71948 }
71949
71950-void *__kmalloc_node(size_t size, gfp_t flags, int node);
71951+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
71952
71953 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
71954 {
71955@@ -31,7 +31,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
71956 return __kmalloc_node(size, flags, NUMA_NO_NODE);
71957 }
71958
71959-static __always_inline void *__kmalloc(size_t size, gfp_t flags)
71960+static __always_inline __size_overflow(1) void *__kmalloc(size_t size, gfp_t flags)
71961 {
71962 return kmalloc(size, flags);
71963 }
71964diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
71965index 9db4825..ed42fb5 100644
71966--- a/include/linux/slub_def.h
71967+++ b/include/linux/slub_def.h
71968@@ -91,7 +91,7 @@ struct kmem_cache {
71969 struct kmem_cache_order_objects max;
71970 struct kmem_cache_order_objects min;
71971 gfp_t allocflags; /* gfp flags to use on each alloc */
71972- int refcount; /* Refcount for slab cache destroy */
71973+ atomic_t refcount; /* Refcount for slab cache destroy */
71974 void (*ctor)(void *);
71975 int inuse; /* Offset to metadata */
71976 int align; /* Alignment */
71977@@ -156,7 +156,7 @@ extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
71978 * Sorry that the following has to be that ugly but some versions of GCC
71979 * have trouble with constant propagation and loops.
71980 */
71981-static __always_inline int kmalloc_index(size_t size)
71982+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
71983 {
71984 if (!size)
71985 return 0;
71986@@ -221,7 +221,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
71987 }
71988
71989 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
71990-void *__kmalloc(size_t size, gfp_t flags);
71991+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
71992
71993 static __always_inline void *
71994 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
71995@@ -265,7 +265,7 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
71996 }
71997 #endif
71998
71999-static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
72000+static __always_inline __size_overflow(1) void *kmalloc_large(size_t size, gfp_t flags)
72001 {
72002 unsigned int order = get_order(size);
72003 return kmalloc_order_trace(size, flags, order);
72004@@ -290,7 +290,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
72005 }
72006
72007 #ifdef CONFIG_NUMA
72008-void *__kmalloc_node(size_t size, gfp_t flags, int node);
72009+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
72010 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
72011
72012 #ifdef CONFIG_TRACING
72013diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
72014index e8d702e..0a56eb4 100644
72015--- a/include/linux/sock_diag.h
72016+++ b/include/linux/sock_diag.h
72017@@ -10,7 +10,7 @@ struct sock;
72018 struct sock_diag_handler {
72019 __u8 family;
72020 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
72021-};
72022+} __do_const;
72023
72024 int sock_diag_register(const struct sock_diag_handler *h);
72025 void sock_diag_unregister(const struct sock_diag_handler *h);
72026diff --git a/include/linux/socket.h b/include/linux/socket.h
72027index 2b9f74b..e897bdc 100644
72028--- a/include/linux/socket.h
72029+++ b/include/linux/socket.h
72030@@ -321,6 +321,9 @@ extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
72031
72032 struct timespec;
72033
72034+/* The __sys_...msg variants allow MSG_CMSG_COMPAT */
72035+extern long __sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags);
72036+extern long __sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
72037 extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
72038 unsigned int flags, struct timespec *timeout);
72039 extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg,
72040diff --git a/include/linux/sonet.h b/include/linux/sonet.h
72041index 680f9a3..f13aeb0 100644
72042--- a/include/linux/sonet.h
72043+++ b/include/linux/sonet.h
72044@@ -7,7 +7,7 @@
72045 #include <uapi/linux/sonet.h>
72046
72047 struct k_sonet_stats {
72048-#define __HANDLE_ITEM(i) atomic_t i
72049+#define __HANDLE_ITEM(i) atomic_unchecked_t i
72050 __SONET_ITEMS
72051 #undef __HANDLE_ITEM
72052 };
72053diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
72054index 07d8e53..dc934c9 100644
72055--- a/include/linux/sunrpc/addr.h
72056+++ b/include/linux/sunrpc/addr.h
72057@@ -23,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
72058 {
72059 switch (sap->sa_family) {
72060 case AF_INET:
72061- return ntohs(((struct sockaddr_in *)sap)->sin_port);
72062+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
72063 case AF_INET6:
72064- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
72065+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
72066 }
72067 return 0;
72068 }
72069@@ -58,7 +58,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
72070 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
72071 const struct sockaddr *src)
72072 {
72073- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
72074+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
72075 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
72076
72077 dsin->sin_family = ssin->sin_family;
72078@@ -164,7 +164,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
72079 if (sa->sa_family != AF_INET6)
72080 return 0;
72081
72082- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
72083+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
72084 }
72085
72086 #endif /* _LINUX_SUNRPC_ADDR_H */
72087diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
72088index 2cf4ffa..470d140 100644
72089--- a/include/linux/sunrpc/clnt.h
72090+++ b/include/linux/sunrpc/clnt.h
72091@@ -96,7 +96,7 @@ struct rpc_procinfo {
72092 unsigned int p_timer; /* Which RTT timer to use */
72093 u32 p_statidx; /* Which procedure to account */
72094 const char * p_name; /* name of procedure */
72095-};
72096+} __do_const;
72097
72098 #ifdef __KERNEL__
72099
72100diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
72101index 1f0216b..6a4fa50 100644
72102--- a/include/linux/sunrpc/svc.h
72103+++ b/include/linux/sunrpc/svc.h
72104@@ -411,7 +411,7 @@ struct svc_procedure {
72105 unsigned int pc_count; /* call count */
72106 unsigned int pc_cachetype; /* cache info (NFS) */
72107 unsigned int pc_xdrressize; /* maximum size of XDR reply */
72108-};
72109+} __do_const;
72110
72111 /*
72112 * Function prototypes.
72113diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
72114index 0b8e3e6..33e0a01 100644
72115--- a/include/linux/sunrpc/svc_rdma.h
72116+++ b/include/linux/sunrpc/svc_rdma.h
72117@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
72118 extern unsigned int svcrdma_max_requests;
72119 extern unsigned int svcrdma_max_req_size;
72120
72121-extern atomic_t rdma_stat_recv;
72122-extern atomic_t rdma_stat_read;
72123-extern atomic_t rdma_stat_write;
72124-extern atomic_t rdma_stat_sq_starve;
72125-extern atomic_t rdma_stat_rq_starve;
72126-extern atomic_t rdma_stat_rq_poll;
72127-extern atomic_t rdma_stat_rq_prod;
72128-extern atomic_t rdma_stat_sq_poll;
72129-extern atomic_t rdma_stat_sq_prod;
72130+extern atomic_unchecked_t rdma_stat_recv;
72131+extern atomic_unchecked_t rdma_stat_read;
72132+extern atomic_unchecked_t rdma_stat_write;
72133+extern atomic_unchecked_t rdma_stat_sq_starve;
72134+extern atomic_unchecked_t rdma_stat_rq_starve;
72135+extern atomic_unchecked_t rdma_stat_rq_poll;
72136+extern atomic_unchecked_t rdma_stat_rq_prod;
72137+extern atomic_unchecked_t rdma_stat_sq_poll;
72138+extern atomic_unchecked_t rdma_stat_sq_prod;
72139
72140 #define RPCRDMA_VERSION 1
72141
72142diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
72143index ff374ab..7fd2ecb 100644
72144--- a/include/linux/sunrpc/svcauth.h
72145+++ b/include/linux/sunrpc/svcauth.h
72146@@ -109,7 +109,7 @@ struct auth_ops {
72147 int (*release)(struct svc_rqst *rq);
72148 void (*domain_release)(struct auth_domain *);
72149 int (*set_client)(struct svc_rqst *rq);
72150-};
72151+} __do_const;
72152
72153 #define SVC_GARBAGE 1
72154 #define SVC_SYSERR 2
72155diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
72156index a5ffd32..0935dea 100644
72157--- a/include/linux/swiotlb.h
72158+++ b/include/linux/swiotlb.h
72159@@ -60,7 +60,8 @@ extern void
72160
72161 extern void
72162 swiotlb_free_coherent(struct device *hwdev, size_t size,
72163- void *vaddr, dma_addr_t dma_handle);
72164+ void *vaddr, dma_addr_t dma_handle,
72165+ struct dma_attrs *attrs);
72166
72167 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
72168 unsigned long offset, size_t size,
72169diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
72170index 313a8e0..6b273a9 100644
72171--- a/include/linux/syscalls.h
72172+++ b/include/linux/syscalls.h
72173@@ -418,11 +418,11 @@ asmlinkage long sys_sync(void);
72174 asmlinkage long sys_fsync(unsigned int fd);
72175 asmlinkage long sys_fdatasync(unsigned int fd);
72176 asmlinkage long sys_bdflush(int func, long data);
72177-asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
72178- char __user *type, unsigned long flags,
72179+asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
72180+ const char __user *type, unsigned long flags,
72181 void __user *data);
72182-asmlinkage long sys_umount(char __user *name, int flags);
72183-asmlinkage long sys_oldumount(char __user *name);
72184+asmlinkage long sys_umount(const char __user *name, int flags);
72185+asmlinkage long sys_oldumount(const char __user *name);
72186 asmlinkage long sys_truncate(const char __user *path, long length);
72187 asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
72188 asmlinkage long sys_stat(const char __user *filename,
72189@@ -634,7 +634,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
72190 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
72191 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
72192 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
72193- struct sockaddr __user *, int);
72194+ struct sockaddr __user *, int) __intentional_overflow(0);
72195 asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
72196 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
72197 unsigned int vlen, unsigned flags);
72198diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
72199index 27b3b0b..e093dd9 100644
72200--- a/include/linux/syscore_ops.h
72201+++ b/include/linux/syscore_ops.h
72202@@ -16,7 +16,7 @@ struct syscore_ops {
72203 int (*suspend)(void);
72204 void (*resume)(void);
72205 void (*shutdown)(void);
72206-};
72207+} __do_const;
72208
72209 extern void register_syscore_ops(struct syscore_ops *ops);
72210 extern void unregister_syscore_ops(struct syscore_ops *ops);
72211diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
72212index 14a8ff2..af52bad 100644
72213--- a/include/linux/sysctl.h
72214+++ b/include/linux/sysctl.h
72215@@ -34,13 +34,13 @@ struct ctl_table_root;
72216 struct ctl_table_header;
72217 struct ctl_dir;
72218
72219-typedef struct ctl_table ctl_table;
72220-
72221 typedef int proc_handler (struct ctl_table *ctl, int write,
72222 void __user *buffer, size_t *lenp, loff_t *ppos);
72223
72224 extern int proc_dostring(struct ctl_table *, int,
72225 void __user *, size_t *, loff_t *);
72226+extern int proc_dostring_modpriv(struct ctl_table *, int,
72227+ void __user *, size_t *, loff_t *);
72228 extern int proc_dointvec(struct ctl_table *, int,
72229 void __user *, size_t *, loff_t *);
72230 extern int proc_dointvec_minmax(struct ctl_table *, int,
72231@@ -115,7 +115,9 @@ struct ctl_table
72232 struct ctl_table_poll *poll;
72233 void *extra1;
72234 void *extra2;
72235-};
72236+} __do_const;
72237+typedef struct ctl_table __no_const ctl_table_no_const;
72238+typedef struct ctl_table ctl_table;
72239
72240 struct ctl_node {
72241 struct rb_node node;
72242diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
72243index e2cee22..3ddb921 100644
72244--- a/include/linux/sysfs.h
72245+++ b/include/linux/sysfs.h
72246@@ -31,7 +31,8 @@ struct attribute {
72247 struct lock_class_key *key;
72248 struct lock_class_key skey;
72249 #endif
72250-};
72251+} __do_const;
72252+typedef struct attribute __no_const attribute_no_const;
72253
72254 /**
72255 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
72256@@ -59,8 +60,8 @@ struct attribute_group {
72257 umode_t (*is_visible)(struct kobject *,
72258 struct attribute *, int);
72259 struct attribute **attrs;
72260-};
72261-
72262+} __do_const;
72263+typedef struct attribute_group __no_const attribute_group_no_const;
72264
72265
72266 /**
72267@@ -107,7 +108,8 @@ struct bin_attribute {
72268 char *, loff_t, size_t);
72269 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
72270 struct vm_area_struct *vma);
72271-};
72272+} __do_const;
72273+typedef struct bin_attribute __no_const bin_attribute_no_const;
72274
72275 /**
72276 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
72277diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
72278index 7faf933..9b85a0c 100644
72279--- a/include/linux/sysrq.h
72280+++ b/include/linux/sysrq.h
72281@@ -16,6 +16,7 @@
72282
72283 #include <linux/errno.h>
72284 #include <linux/types.h>
72285+#include <linux/compiler.h>
72286
72287 /* Enable/disable SYSRQ support by default (0==no, 1==yes). */
72288 #define SYSRQ_DEFAULT_ENABLE 1
72289@@ -36,7 +37,7 @@ struct sysrq_key_op {
72290 char *help_msg;
72291 char *action_msg;
72292 int enable_mask;
72293-};
72294+} __do_const;
72295
72296 #ifdef CONFIG_MAGIC_SYSRQ
72297
72298diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
72299index e7e0473..7989295 100644
72300--- a/include/linux/thread_info.h
72301+++ b/include/linux/thread_info.h
72302@@ -148,6 +148,15 @@ static inline bool test_and_clear_restore_sigmask(void)
72303 #error "no set_restore_sigmask() provided and default one won't work"
72304 #endif
72305
72306+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user);
72307+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
72308+{
72309+#ifndef CONFIG_PAX_USERCOPY_DEBUG
72310+ if (!__builtin_constant_p(n))
72311+#endif
72312+ __check_object_size(ptr, n, to_user);
72313+}
72314+
72315 #endif /* __KERNEL__ */
72316
72317 #endif /* _LINUX_THREAD_INFO_H */
72318diff --git a/include/linux/tty.h b/include/linux/tty.h
72319index c75d886..04cb148 100644
72320--- a/include/linux/tty.h
72321+++ b/include/linux/tty.h
72322@@ -194,7 +194,7 @@ struct tty_port {
72323 const struct tty_port_operations *ops; /* Port operations */
72324 spinlock_t lock; /* Lock protecting tty field */
72325 int blocked_open; /* Waiting to open */
72326- int count; /* Usage count */
72327+ atomic_t count; /* Usage count */
72328 wait_queue_head_t open_wait; /* Open waiters */
72329 wait_queue_head_t close_wait; /* Close waiters */
72330 wait_queue_head_t delta_msr_wait; /* Modem status change */
72331@@ -515,7 +515,7 @@ extern int tty_port_open(struct tty_port *port,
72332 struct tty_struct *tty, struct file *filp);
72333 static inline int tty_port_users(struct tty_port *port)
72334 {
72335- return port->count + port->blocked_open;
72336+ return atomic_read(&port->count) + port->blocked_open;
72337 }
72338
72339 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
72340diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
72341index 756a609..b302dd6 100644
72342--- a/include/linux/tty_driver.h
72343+++ b/include/linux/tty_driver.h
72344@@ -285,7 +285,7 @@ struct tty_operations {
72345 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
72346 #endif
72347 const struct file_operations *proc_fops;
72348-};
72349+} __do_const;
72350
72351 struct tty_driver {
72352 int magic; /* magic number for this structure */
72353diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
72354index 455a0d7..bf97ff5 100644
72355--- a/include/linux/tty_ldisc.h
72356+++ b/include/linux/tty_ldisc.h
72357@@ -146,7 +146,7 @@ struct tty_ldisc_ops {
72358
72359 struct module *owner;
72360
72361- int refcount;
72362+ atomic_t refcount;
72363 };
72364
72365 struct tty_ldisc {
72366diff --git a/include/linux/types.h b/include/linux/types.h
72367index 4d118ba..c3ee9bf 100644
72368--- a/include/linux/types.h
72369+++ b/include/linux/types.h
72370@@ -176,10 +176,26 @@ typedef struct {
72371 int counter;
72372 } atomic_t;
72373
72374+#ifdef CONFIG_PAX_REFCOUNT
72375+typedef struct {
72376+ int counter;
72377+} atomic_unchecked_t;
72378+#else
72379+typedef atomic_t atomic_unchecked_t;
72380+#endif
72381+
72382 #ifdef CONFIG_64BIT
72383 typedef struct {
72384 long counter;
72385 } atomic64_t;
72386+
72387+#ifdef CONFIG_PAX_REFCOUNT
72388+typedef struct {
72389+ long counter;
72390+} atomic64_unchecked_t;
72391+#else
72392+typedef atomic64_t atomic64_unchecked_t;
72393+#endif
72394 #endif
72395
72396 struct list_head {
72397diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
72398index 5ca0951..ab496a5 100644
72399--- a/include/linux/uaccess.h
72400+++ b/include/linux/uaccess.h
72401@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
72402 long ret; \
72403 mm_segment_t old_fs = get_fs(); \
72404 \
72405- set_fs(KERNEL_DS); \
72406 pagefault_disable(); \
72407- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
72408- pagefault_enable(); \
72409+ set_fs(KERNEL_DS); \
72410+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
72411 set_fs(old_fs); \
72412+ pagefault_enable(); \
72413 ret; \
72414 })
72415
72416diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
72417index 8e522cbc..aa8572d 100644
72418--- a/include/linux/uidgid.h
72419+++ b/include/linux/uidgid.h
72420@@ -197,4 +197,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
72421
72422 #endif /* CONFIG_USER_NS */
72423
72424+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
72425+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
72426+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
72427+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
72428+
72429 #endif /* _LINUX_UIDGID_H */
72430diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
72431index 99c1b4d..562e6f3 100644
72432--- a/include/linux/unaligned/access_ok.h
72433+++ b/include/linux/unaligned/access_ok.h
72434@@ -4,34 +4,34 @@
72435 #include <linux/kernel.h>
72436 #include <asm/byteorder.h>
72437
72438-static inline u16 get_unaligned_le16(const void *p)
72439+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
72440 {
72441- return le16_to_cpup((__le16 *)p);
72442+ return le16_to_cpup((const __le16 *)p);
72443 }
72444
72445-static inline u32 get_unaligned_le32(const void *p)
72446+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
72447 {
72448- return le32_to_cpup((__le32 *)p);
72449+ return le32_to_cpup((const __le32 *)p);
72450 }
72451
72452-static inline u64 get_unaligned_le64(const void *p)
72453+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
72454 {
72455- return le64_to_cpup((__le64 *)p);
72456+ return le64_to_cpup((const __le64 *)p);
72457 }
72458
72459-static inline u16 get_unaligned_be16(const void *p)
72460+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
72461 {
72462- return be16_to_cpup((__be16 *)p);
72463+ return be16_to_cpup((const __be16 *)p);
72464 }
72465
72466-static inline u32 get_unaligned_be32(const void *p)
72467+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
72468 {
72469- return be32_to_cpup((__be32 *)p);
72470+ return be32_to_cpup((const __be32 *)p);
72471 }
72472
72473-static inline u64 get_unaligned_be64(const void *p)
72474+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
72475 {
72476- return be64_to_cpup((__be64 *)p);
72477+ return be64_to_cpup((const __be64 *)p);
72478 }
72479
72480 static inline void put_unaligned_le16(u16 val, void *p)
72481diff --git a/include/linux/usb.h b/include/linux/usb.h
72482index 4d22d0f..8d0e8f8 100644
72483--- a/include/linux/usb.h
72484+++ b/include/linux/usb.h
72485@@ -554,7 +554,7 @@ struct usb_device {
72486 int maxchild;
72487
72488 u32 quirks;
72489- atomic_t urbnum;
72490+ atomic_unchecked_t urbnum;
72491
72492 unsigned long active_duration;
72493
72494@@ -1604,7 +1604,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
72495
72496 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
72497 __u8 request, __u8 requesttype, __u16 value, __u16 index,
72498- void *data, __u16 size, int timeout);
72499+ void *data, __u16 size, int timeout) __intentional_overflow(-1);
72500 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
72501 void *data, int len, int *actual_length, int timeout);
72502 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
72503diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
72504index c5d36c6..108f4f9 100644
72505--- a/include/linux/usb/renesas_usbhs.h
72506+++ b/include/linux/usb/renesas_usbhs.h
72507@@ -39,7 +39,7 @@ enum {
72508 */
72509 struct renesas_usbhs_driver_callback {
72510 int (*notify_hotplug)(struct platform_device *pdev);
72511-};
72512+} __no_const;
72513
72514 /*
72515 * callback functions for platform
72516diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
72517index 6f8fbcf..8259001 100644
72518--- a/include/linux/vermagic.h
72519+++ b/include/linux/vermagic.h
72520@@ -25,9 +25,35 @@
72521 #define MODULE_ARCH_VERMAGIC ""
72522 #endif
72523
72524+#ifdef CONFIG_PAX_REFCOUNT
72525+#define MODULE_PAX_REFCOUNT "REFCOUNT "
72526+#else
72527+#define MODULE_PAX_REFCOUNT ""
72528+#endif
72529+
72530+#ifdef CONSTIFY_PLUGIN
72531+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
72532+#else
72533+#define MODULE_CONSTIFY_PLUGIN ""
72534+#endif
72535+
72536+#ifdef STACKLEAK_PLUGIN
72537+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
72538+#else
72539+#define MODULE_STACKLEAK_PLUGIN ""
72540+#endif
72541+
72542+#ifdef CONFIG_GRKERNSEC
72543+#define MODULE_GRSEC "GRSEC "
72544+#else
72545+#define MODULE_GRSEC ""
72546+#endif
72547+
72548 #define VERMAGIC_STRING \
72549 UTS_RELEASE " " \
72550 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
72551 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
72552- MODULE_ARCH_VERMAGIC
72553+ MODULE_ARCH_VERMAGIC \
72554+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
72555+ MODULE_GRSEC
72556
72557diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
72558index 6071e91..ca6a489 100644
72559--- a/include/linux/vmalloc.h
72560+++ b/include/linux/vmalloc.h
72561@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
72562 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
72563 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
72564 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
72565+
72566+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
72567+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
72568+#endif
72569+
72570 /* bits [20..32] reserved for arch specific ioremap internals */
72571
72572 /*
72573@@ -62,7 +67,7 @@ extern void *vmalloc_32_user(unsigned long size);
72574 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
72575 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
72576 unsigned long start, unsigned long end, gfp_t gfp_mask,
72577- pgprot_t prot, int node, const void *caller);
72578+ pgprot_t prot, int node, const void *caller) __size_overflow(1);
72579 extern void vfree(const void *addr);
72580
72581 extern void *vmap(struct page **pages, unsigned int count,
72582@@ -124,8 +129,8 @@ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
72583 extern void free_vm_area(struct vm_struct *area);
72584
72585 /* for /dev/kmem */
72586-extern long vread(char *buf, char *addr, unsigned long count);
72587-extern long vwrite(char *buf, char *addr, unsigned long count);
72588+extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
72589+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
72590
72591 /*
72592 * Internals. Dont't use..
72593diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
72594index 5fd71a7..e5ef9a9 100644
72595--- a/include/linux/vmstat.h
72596+++ b/include/linux/vmstat.h
72597@@ -95,18 +95,18 @@ static inline void vm_events_fold_cpu(int cpu)
72598 /*
72599 * Zone based page accounting with per cpu differentials.
72600 */
72601-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
72602+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
72603
72604 static inline void zone_page_state_add(long x, struct zone *zone,
72605 enum zone_stat_item item)
72606 {
72607- atomic_long_add(x, &zone->vm_stat[item]);
72608- atomic_long_add(x, &vm_stat[item]);
72609+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
72610+ atomic_long_add_unchecked(x, &vm_stat[item]);
72611 }
72612
72613 static inline unsigned long global_page_state(enum zone_stat_item item)
72614 {
72615- long x = atomic_long_read(&vm_stat[item]);
72616+ long x = atomic_long_read_unchecked(&vm_stat[item]);
72617 #ifdef CONFIG_SMP
72618 if (x < 0)
72619 x = 0;
72620@@ -117,7 +117,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
72621 static inline unsigned long zone_page_state(struct zone *zone,
72622 enum zone_stat_item item)
72623 {
72624- long x = atomic_long_read(&zone->vm_stat[item]);
72625+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
72626 #ifdef CONFIG_SMP
72627 if (x < 0)
72628 x = 0;
72629@@ -134,7 +134,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
72630 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
72631 enum zone_stat_item item)
72632 {
72633- long x = atomic_long_read(&zone->vm_stat[item]);
72634+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
72635
72636 #ifdef CONFIG_SMP
72637 int cpu;
72638@@ -226,8 +226,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
72639
72640 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
72641 {
72642- atomic_long_inc(&zone->vm_stat[item]);
72643- atomic_long_inc(&vm_stat[item]);
72644+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
72645+ atomic_long_inc_unchecked(&vm_stat[item]);
72646 }
72647
72648 static inline void __inc_zone_page_state(struct page *page,
72649@@ -238,8 +238,8 @@ static inline void __inc_zone_page_state(struct page *page,
72650
72651 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
72652 {
72653- atomic_long_dec(&zone->vm_stat[item]);
72654- atomic_long_dec(&vm_stat[item]);
72655+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
72656+ atomic_long_dec_unchecked(&vm_stat[item]);
72657 }
72658
72659 static inline void __dec_zone_page_state(struct page *page,
72660diff --git a/include/linux/xattr.h b/include/linux/xattr.h
72661index fdbafc6..49dfe4f 100644
72662--- a/include/linux/xattr.h
72663+++ b/include/linux/xattr.h
72664@@ -28,7 +28,7 @@ struct xattr_handler {
72665 size_t size, int handler_flags);
72666 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
72667 size_t size, int flags, int handler_flags);
72668-};
72669+} __do_const;
72670
72671 struct xattr {
72672 char *name;
72673@@ -37,6 +37,9 @@ struct xattr {
72674 };
72675
72676 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
72677+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
72678+ssize_t pax_getxattr(struct dentry *, void *, size_t);
72679+#endif
72680 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
72681 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
72682 int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
72683diff --git a/include/linux/zlib.h b/include/linux/zlib.h
72684index 9c5a6b4..09c9438 100644
72685--- a/include/linux/zlib.h
72686+++ b/include/linux/zlib.h
72687@@ -31,6 +31,7 @@
72688 #define _ZLIB_H
72689
72690 #include <linux/zconf.h>
72691+#include <linux/compiler.h>
72692
72693 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
72694 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
72695@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
72696
72697 /* basic functions */
72698
72699-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
72700+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
72701 /*
72702 Returns the number of bytes that needs to be allocated for a per-
72703 stream workspace with the specified parameters. A pointer to this
72704diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
72705index 95d1c91..6798cca 100644
72706--- a/include/media/v4l2-dev.h
72707+++ b/include/media/v4l2-dev.h
72708@@ -76,7 +76,7 @@ struct v4l2_file_operations {
72709 int (*mmap) (struct file *, struct vm_area_struct *);
72710 int (*open) (struct file *);
72711 int (*release) (struct file *);
72712-};
72713+} __do_const;
72714
72715 /*
72716 * Newer version of video_device, handled by videodev2.c
72717diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
72718index 4118ad1..cb7e25f 100644
72719--- a/include/media/v4l2-ioctl.h
72720+++ b/include/media/v4l2-ioctl.h
72721@@ -284,7 +284,6 @@ struct v4l2_ioctl_ops {
72722 bool valid_prio, int cmd, void *arg);
72723 };
72724
72725-
72726 /* v4l debugging and diagnostics */
72727
72728 /* Debug bitmask flags to be used on V4L2 */
72729diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
72730index adcbb20..62c2559 100644
72731--- a/include/net/9p/transport.h
72732+++ b/include/net/9p/transport.h
72733@@ -57,7 +57,7 @@ struct p9_trans_module {
72734 int (*cancel) (struct p9_client *, struct p9_req_t *req);
72735 int (*zc_request)(struct p9_client *, struct p9_req_t *,
72736 char *, char *, int , int, int, int);
72737-};
72738+} __do_const;
72739
72740 void v9fs_register_trans(struct p9_trans_module *m);
72741 void v9fs_unregister_trans(struct p9_trans_module *m);
72742diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
72743index cdd3302..76f8ede 100644
72744--- a/include/net/bluetooth/l2cap.h
72745+++ b/include/net/bluetooth/l2cap.h
72746@@ -551,7 +551,7 @@ struct l2cap_ops {
72747 void (*defer) (struct l2cap_chan *chan);
72748 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
72749 unsigned long len, int nb);
72750-};
72751+} __do_const;
72752
72753 struct l2cap_conn {
72754 struct hci_conn *hcon;
72755diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
72756index 9e5425b..8136ffc 100644
72757--- a/include/net/caif/cfctrl.h
72758+++ b/include/net/caif/cfctrl.h
72759@@ -52,7 +52,7 @@ struct cfctrl_rsp {
72760 void (*radioset_rsp)(void);
72761 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
72762 struct cflayer *client_layer);
72763-};
72764+} __no_const;
72765
72766 /* Link Setup Parameters for CAIF-Links. */
72767 struct cfctrl_link_param {
72768@@ -101,8 +101,8 @@ struct cfctrl_request_info {
72769 struct cfctrl {
72770 struct cfsrvl serv;
72771 struct cfctrl_rsp res;
72772- atomic_t req_seq_no;
72773- atomic_t rsp_seq_no;
72774+ atomic_unchecked_t req_seq_no;
72775+ atomic_unchecked_t rsp_seq_no;
72776 struct list_head list;
72777 /* Protects from simultaneous access to first_req list */
72778 spinlock_t info_list_lock;
72779diff --git a/include/net/flow.h b/include/net/flow.h
72780index 628e11b..4c475df 100644
72781--- a/include/net/flow.h
72782+++ b/include/net/flow.h
72783@@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
72784
72785 extern void flow_cache_flush(void);
72786 extern void flow_cache_flush_deferred(void);
72787-extern atomic_t flow_cache_genid;
72788+extern atomic_unchecked_t flow_cache_genid;
72789
72790 #endif
72791diff --git a/include/net/genetlink.h b/include/net/genetlink.h
72792index bdfbe68..4402ebe 100644
72793--- a/include/net/genetlink.h
72794+++ b/include/net/genetlink.h
72795@@ -118,7 +118,7 @@ struct genl_ops {
72796 struct netlink_callback *cb);
72797 int (*done)(struct netlink_callback *cb);
72798 struct list_head ops_list;
72799-};
72800+} __do_const;
72801
72802 extern int genl_register_family(struct genl_family *family);
72803 extern int genl_register_family_with_ops(struct genl_family *family,
72804diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
72805index 734d9b5..48a9a4b 100644
72806--- a/include/net/gro_cells.h
72807+++ b/include/net/gro_cells.h
72808@@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
72809 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
72810
72811 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
72812- atomic_long_inc(&dev->rx_dropped);
72813+ atomic_long_inc_unchecked(&dev->rx_dropped);
72814 kfree_skb(skb);
72815 return;
72816 }
72817diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
72818index 1832927..ce39aea 100644
72819--- a/include/net/inet_connection_sock.h
72820+++ b/include/net/inet_connection_sock.h
72821@@ -62,7 +62,7 @@ struct inet_connection_sock_af_ops {
72822 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
72823 int (*bind_conflict)(const struct sock *sk,
72824 const struct inet_bind_bucket *tb, bool relax);
72825-};
72826+} __do_const;
72827
72828 /** inet_connection_sock - INET connection oriented sock
72829 *
72830diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
72831index 53f464d..ba76aaa 100644
72832--- a/include/net/inetpeer.h
72833+++ b/include/net/inetpeer.h
72834@@ -47,8 +47,8 @@ struct inet_peer {
72835 */
72836 union {
72837 struct {
72838- atomic_t rid; /* Frag reception counter */
72839- atomic_t ip_id_count; /* IP ID for the next packet */
72840+ atomic_unchecked_t rid; /* Frag reception counter */
72841+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
72842 };
72843 struct rcu_head rcu;
72844 struct inet_peer *gc_next;
72845@@ -182,11 +182,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
72846 more++;
72847 inet_peer_refcheck(p);
72848 do {
72849- old = atomic_read(&p->ip_id_count);
72850+ old = atomic_read_unchecked(&p->ip_id_count);
72851 new = old + more;
72852 if (!new)
72853 new = 1;
72854- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
72855+ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
72856 return new;
72857 }
72858
72859diff --git a/include/net/ip.h b/include/net/ip.h
72860index a68f838..74518ab 100644
72861--- a/include/net/ip.h
72862+++ b/include/net/ip.h
72863@@ -202,7 +202,7 @@ extern struct local_ports {
72864 } sysctl_local_ports;
72865 extern void inet_get_local_port_range(int *low, int *high);
72866
72867-extern unsigned long *sysctl_local_reserved_ports;
72868+extern unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
72869 static inline int inet_is_reserved_local_port(int port)
72870 {
72871 return test_bit(port, sysctl_local_reserved_ports);
72872diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
72873index e49db91..76a81de 100644
72874--- a/include/net/ip_fib.h
72875+++ b/include/net/ip_fib.h
72876@@ -167,7 +167,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
72877
72878 #define FIB_RES_SADDR(net, res) \
72879 ((FIB_RES_NH(res).nh_saddr_genid == \
72880- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
72881+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
72882 FIB_RES_NH(res).nh_saddr : \
72883 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
72884 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
72885diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
72886index fce8e6b..3ca4916 100644
72887--- a/include/net/ip_vs.h
72888+++ b/include/net/ip_vs.h
72889@@ -599,7 +599,7 @@ struct ip_vs_conn {
72890 struct ip_vs_conn *control; /* Master control connection */
72891 atomic_t n_control; /* Number of controlled ones */
72892 struct ip_vs_dest *dest; /* real server */
72893- atomic_t in_pkts; /* incoming packet counter */
72894+ atomic_unchecked_t in_pkts; /* incoming packet counter */
72895
72896 /* packet transmitter for different forwarding methods. If it
72897 mangles the packet, it must return NF_DROP or better NF_STOLEN,
72898@@ -737,7 +737,7 @@ struct ip_vs_dest {
72899 __be16 port; /* port number of the server */
72900 union nf_inet_addr addr; /* IP address of the server */
72901 volatile unsigned int flags; /* dest status flags */
72902- atomic_t conn_flags; /* flags to copy to conn */
72903+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
72904 atomic_t weight; /* server weight */
72905
72906 atomic_t refcnt; /* reference counter */
72907@@ -981,11 +981,11 @@ struct netns_ipvs {
72908 /* ip_vs_lblc */
72909 int sysctl_lblc_expiration;
72910 struct ctl_table_header *lblc_ctl_header;
72911- struct ctl_table *lblc_ctl_table;
72912+ ctl_table_no_const *lblc_ctl_table;
72913 /* ip_vs_lblcr */
72914 int sysctl_lblcr_expiration;
72915 struct ctl_table_header *lblcr_ctl_header;
72916- struct ctl_table *lblcr_ctl_table;
72917+ ctl_table_no_const *lblcr_ctl_table;
72918 /* ip_vs_est */
72919 struct list_head est_list; /* estimator list */
72920 spinlock_t est_lock;
72921diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
72922index 80ffde3..968b0f4 100644
72923--- a/include/net/irda/ircomm_tty.h
72924+++ b/include/net/irda/ircomm_tty.h
72925@@ -35,6 +35,7 @@
72926 #include <linux/termios.h>
72927 #include <linux/timer.h>
72928 #include <linux/tty.h> /* struct tty_struct */
72929+#include <asm/local.h>
72930
72931 #include <net/irda/irias_object.h>
72932 #include <net/irda/ircomm_core.h>
72933diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
72934index 714cc9a..ea05f3e 100644
72935--- a/include/net/iucv/af_iucv.h
72936+++ b/include/net/iucv/af_iucv.h
72937@@ -149,7 +149,7 @@ struct iucv_skb_cb {
72938 struct iucv_sock_list {
72939 struct hlist_head head;
72940 rwlock_t lock;
72941- atomic_t autobind_name;
72942+ atomic_unchecked_t autobind_name;
72943 };
72944
72945 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
72946diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
72947index df83f69..9b640b8 100644
72948--- a/include/net/llc_c_ac.h
72949+++ b/include/net/llc_c_ac.h
72950@@ -87,7 +87,7 @@
72951 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
72952 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
72953
72954-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
72955+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
72956
72957 extern int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
72958 extern int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
72959diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
72960index 6ca3113..f8026dd 100644
72961--- a/include/net/llc_c_ev.h
72962+++ b/include/net/llc_c_ev.h
72963@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
72964 return (struct llc_conn_state_ev *)skb->cb;
72965 }
72966
72967-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
72968-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
72969+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
72970+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
72971
72972 extern int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
72973 extern int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
72974diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
72975index 0e79cfb..f46db31 100644
72976--- a/include/net/llc_c_st.h
72977+++ b/include/net/llc_c_st.h
72978@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
72979 u8 next_state;
72980 llc_conn_ev_qfyr_t *ev_qualifiers;
72981 llc_conn_action_t *ev_actions;
72982-};
72983+} __do_const;
72984
72985 struct llc_conn_state {
72986 u8 current_state;
72987diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
72988index 37a3bbd..55a4241 100644
72989--- a/include/net/llc_s_ac.h
72990+++ b/include/net/llc_s_ac.h
72991@@ -23,7 +23,7 @@
72992 #define SAP_ACT_TEST_IND 9
72993
72994 /* All action functions must look like this */
72995-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
72996+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
72997
72998 extern int llc_sap_action_unitdata_ind(struct llc_sap *sap,
72999 struct sk_buff *skb);
73000diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
73001index 567c681..cd73ac0 100644
73002--- a/include/net/llc_s_st.h
73003+++ b/include/net/llc_s_st.h
73004@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
73005 llc_sap_ev_t ev;
73006 u8 next_state;
73007 llc_sap_action_t *ev_actions;
73008-};
73009+} __do_const;
73010
73011 struct llc_sap_state {
73012 u8 curr_state;
73013diff --git a/include/net/mac80211.h b/include/net/mac80211.h
73014index f7eba13..91ed983 100644
73015--- a/include/net/mac80211.h
73016+++ b/include/net/mac80211.h
73017@@ -4119,7 +4119,7 @@ struct rate_control_ops {
73018 void (*add_sta_debugfs)(void *priv, void *priv_sta,
73019 struct dentry *dir);
73020 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
73021-};
73022+} __do_const;
73023
73024 static inline int rate_supported(struct ieee80211_sta *sta,
73025 enum ieee80211_band band,
73026diff --git a/include/net/neighbour.h b/include/net/neighbour.h
73027index 7e748ad..5c6229b 100644
73028--- a/include/net/neighbour.h
73029+++ b/include/net/neighbour.h
73030@@ -123,7 +123,7 @@ struct neigh_ops {
73031 void (*error_report)(struct neighbour *, struct sk_buff *);
73032 int (*output)(struct neighbour *, struct sk_buff *);
73033 int (*connected_output)(struct neighbour *, struct sk_buff *);
73034-};
73035+} __do_const;
73036
73037 struct pneigh_entry {
73038 struct pneigh_entry *next;
73039diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
73040index de644bc..dfbcc4c 100644
73041--- a/include/net/net_namespace.h
73042+++ b/include/net/net_namespace.h
73043@@ -115,7 +115,7 @@ struct net {
73044 #endif
73045 struct netns_ipvs *ipvs;
73046 struct sock *diag_nlsk;
73047- atomic_t rt_genid;
73048+ atomic_unchecked_t rt_genid;
73049 };
73050
73051 /*
73052@@ -272,7 +272,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
73053 #define __net_init __init
73054 #define __net_exit __exit_refok
73055 #define __net_initdata __initdata
73056+#ifdef CONSTIFY_PLUGIN
73057 #define __net_initconst __initconst
73058+#else
73059+#define __net_initconst __initdata
73060+#endif
73061 #endif
73062
73063 struct pernet_operations {
73064@@ -282,7 +286,7 @@ struct pernet_operations {
73065 void (*exit_batch)(struct list_head *net_exit_list);
73066 int *id;
73067 size_t size;
73068-};
73069+} __do_const;
73070
73071 /*
73072 * Use these carefully. If you implement a network device and it
73073@@ -330,12 +334,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
73074
73075 static inline int rt_genid(struct net *net)
73076 {
73077- return atomic_read(&net->rt_genid);
73078+ return atomic_read_unchecked(&net->rt_genid);
73079 }
73080
73081 static inline void rt_genid_bump(struct net *net)
73082 {
73083- atomic_inc(&net->rt_genid);
73084+ atomic_inc_unchecked(&net->rt_genid);
73085 }
73086
73087 #endif /* __NET_NET_NAMESPACE_H */
73088diff --git a/include/net/netdma.h b/include/net/netdma.h
73089index 8ba8ce2..99b7fff 100644
73090--- a/include/net/netdma.h
73091+++ b/include/net/netdma.h
73092@@ -24,7 +24,7 @@
73093 #include <linux/dmaengine.h>
73094 #include <linux/skbuff.h>
73095
73096-int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
73097+int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan,
73098 struct sk_buff *skb, int offset, struct iovec *to,
73099 size_t len, struct dma_pinned_list *pinned_list);
73100
73101diff --git a/include/net/netlink.h b/include/net/netlink.h
73102index 9690b0f..87aded7 100644
73103--- a/include/net/netlink.h
73104+++ b/include/net/netlink.h
73105@@ -534,7 +534,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
73106 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
73107 {
73108 if (mark)
73109- skb_trim(skb, (unsigned char *) mark - skb->data);
73110+ skb_trim(skb, (const unsigned char *) mark - skb->data);
73111 }
73112
73113 /**
73114diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
73115index c9c0c53..53f24c3 100644
73116--- a/include/net/netns/conntrack.h
73117+++ b/include/net/netns/conntrack.h
73118@@ -12,10 +12,10 @@ struct nf_conntrack_ecache;
73119 struct nf_proto_net {
73120 #ifdef CONFIG_SYSCTL
73121 struct ctl_table_header *ctl_table_header;
73122- struct ctl_table *ctl_table;
73123+ ctl_table_no_const *ctl_table;
73124 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
73125 struct ctl_table_header *ctl_compat_header;
73126- struct ctl_table *ctl_compat_table;
73127+ ctl_table_no_const *ctl_compat_table;
73128 #endif
73129 #endif
73130 unsigned int users;
73131@@ -58,7 +58,7 @@ struct nf_ip_net {
73132 struct nf_icmp_net icmpv6;
73133 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
73134 struct ctl_table_header *ctl_table_header;
73135- struct ctl_table *ctl_table;
73136+ ctl_table_no_const *ctl_table;
73137 #endif
73138 };
73139
73140diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
73141index 2ba9de8..47bd6c7 100644
73142--- a/include/net/netns/ipv4.h
73143+++ b/include/net/netns/ipv4.h
73144@@ -67,7 +67,7 @@ struct netns_ipv4 {
73145 kgid_t sysctl_ping_group_range[2];
73146 long sysctl_tcp_mem[3];
73147
73148- atomic_t dev_addr_genid;
73149+ atomic_unchecked_t dev_addr_genid;
73150
73151 #ifdef CONFIG_IP_MROUTE
73152 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
73153diff --git a/include/net/protocol.h b/include/net/protocol.h
73154index 047c047..b9dad15 100644
73155--- a/include/net/protocol.h
73156+++ b/include/net/protocol.h
73157@@ -44,7 +44,7 @@ struct net_protocol {
73158 void (*err_handler)(struct sk_buff *skb, u32 info);
73159 unsigned int no_policy:1,
73160 netns_ok:1;
73161-};
73162+} __do_const;
73163
73164 #if IS_ENABLED(CONFIG_IPV6)
73165 struct inet6_protocol {
73166@@ -57,7 +57,7 @@ struct inet6_protocol {
73167 u8 type, u8 code, int offset,
73168 __be32 info);
73169 unsigned int flags; /* INET6_PROTO_xxx */
73170-};
73171+} __do_const;
73172
73173 #define INET6_PROTO_NOPOLICY 0x1
73174 #define INET6_PROTO_FINAL 0x2
73175diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
73176index 5a15fab..d799ea7 100644
73177--- a/include/net/rtnetlink.h
73178+++ b/include/net/rtnetlink.h
73179@@ -81,7 +81,7 @@ struct rtnl_link_ops {
73180 const struct net_device *dev);
73181 unsigned int (*get_num_tx_queues)(void);
73182 unsigned int (*get_num_rx_queues)(void);
73183-};
73184+} __do_const;
73185
73186 extern int __rtnl_link_register(struct rtnl_link_ops *ops);
73187 extern void __rtnl_link_unregister(struct rtnl_link_ops *ops);
73188diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
73189index df85a0c..19ac300 100644
73190--- a/include/net/sctp/sctp.h
73191+++ b/include/net/sctp/sctp.h
73192@@ -330,9 +330,9 @@ do { \
73193
73194 #else /* SCTP_DEBUG */
73195
73196-#define SCTP_DEBUG_PRINTK(whatever...)
73197-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
73198-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
73199+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
73200+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
73201+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
73202 #define SCTP_ENABLE_DEBUG
73203 #define SCTP_DISABLE_DEBUG
73204 #define SCTP_ASSERT(expr, str, func)
73205diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
73206index 2a82d13..62a31c2 100644
73207--- a/include/net/sctp/sm.h
73208+++ b/include/net/sctp/sm.h
73209@@ -87,7 +87,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
73210 typedef struct {
73211 sctp_state_fn_t *fn;
73212 const char *name;
73213-} sctp_sm_table_entry_t;
73214+} __do_const sctp_sm_table_entry_t;
73215
73216 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
73217 * currently in use.
73218@@ -299,7 +299,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
73219 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
73220
73221 /* Extern declarations for major data structures. */
73222-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
73223+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
73224
73225
73226 /* Get the size of a DATA chunk payload. */
73227diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
73228index 0e0f9d2..cd05ebb 100644
73229--- a/include/net/sctp/structs.h
73230+++ b/include/net/sctp/structs.h
73231@@ -517,7 +517,7 @@ struct sctp_pf {
73232 struct sctp_association *asoc);
73233 void (*addr_v4map) (struct sctp_sock *, union sctp_addr *);
73234 struct sctp_af *af;
73235-};
73236+} __do_const;
73237
73238
73239 /* Structure to track chunk fragments that have been acked, but peer
73240diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
73241index c2e542b..6ca975b 100644
73242--- a/include/net/secure_seq.h
73243+++ b/include/net/secure_seq.h
73244@@ -3,6 +3,7 @@
73245
73246 #include <linux/types.h>
73247
73248+extern void net_secret_init(void);
73249 extern __u32 secure_ip_id(__be32 daddr);
73250 extern __u32 secure_ipv6_id(const __be32 daddr[4]);
73251 extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
73252diff --git a/include/net/sock.h b/include/net/sock.h
73253index 0be480a..586232f 100644
73254--- a/include/net/sock.h
73255+++ b/include/net/sock.h
73256@@ -325,7 +325,7 @@ struct sock {
73257 #ifdef CONFIG_RPS
73258 __u32 sk_rxhash;
73259 #endif
73260- atomic_t sk_drops;
73261+ atomic_unchecked_t sk_drops;
73262 int sk_rcvbuf;
73263
73264 struct sk_filter __rcu *sk_filter;
73265@@ -1796,7 +1796,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
73266 }
73267
73268 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
73269- char __user *from, char *to,
73270+ char __user *from, unsigned char *to,
73271 int copy, int offset)
73272 {
73273 if (skb->ip_summed == CHECKSUM_NONE) {
73274@@ -2055,7 +2055,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
73275 }
73276 }
73277
73278-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
73279+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
73280
73281 /**
73282 * sk_page_frag - return an appropriate page_frag
73283diff --git a/include/net/tcp.h b/include/net/tcp.h
73284index a345480..3c65cf4 100644
73285--- a/include/net/tcp.h
73286+++ b/include/net/tcp.h
73287@@ -529,7 +529,7 @@ extern void tcp_retransmit_timer(struct sock *sk);
73288 extern void tcp_xmit_retransmit_queue(struct sock *);
73289 extern void tcp_simple_retransmit(struct sock *);
73290 extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
73291-extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
73292+extern int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
73293
73294 extern void tcp_send_probe0(struct sock *);
73295 extern void tcp_send_partial(struct sock *);
73296@@ -700,8 +700,8 @@ struct tcp_skb_cb {
73297 struct inet6_skb_parm h6;
73298 #endif
73299 } header; /* For incoming frames */
73300- __u32 seq; /* Starting sequence number */
73301- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
73302+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
73303+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
73304 __u32 when; /* used to compute rtt's */
73305 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
73306
73307@@ -715,7 +715,7 @@ struct tcp_skb_cb {
73308
73309 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
73310 /* 1 byte hole */
73311- __u32 ack_seq; /* Sequence number ACK'd */
73312+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
73313 };
73314
73315 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
73316diff --git a/include/net/xfrm.h b/include/net/xfrm.h
73317index 24c8886..e6fb816 100644
73318--- a/include/net/xfrm.h
73319+++ b/include/net/xfrm.h
73320@@ -304,7 +304,7 @@ struct xfrm_policy_afinfo {
73321 struct net_device *dev,
73322 const struct flowi *fl);
73323 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
73324-};
73325+} __do_const;
73326
73327 extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
73328 extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
73329@@ -340,7 +340,7 @@ struct xfrm_state_afinfo {
73330 struct sk_buff *skb);
73331 int (*transport_finish)(struct sk_buff *skb,
73332 int async);
73333-};
73334+} __do_const;
73335
73336 extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
73337 extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
73338@@ -423,7 +423,7 @@ struct xfrm_mode {
73339 struct module *owner;
73340 unsigned int encap;
73341 int flags;
73342-};
73343+} __do_const;
73344
73345 /* Flags for xfrm_mode. */
73346 enum {
73347@@ -520,7 +520,7 @@ struct xfrm_policy {
73348 struct timer_list timer;
73349
73350 struct flow_cache_object flo;
73351- atomic_t genid;
73352+ atomic_unchecked_t genid;
73353 u32 priority;
73354 u32 index;
73355 struct xfrm_mark mark;
73356diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
73357index 1a046b1..ee0bef0 100644
73358--- a/include/rdma/iw_cm.h
73359+++ b/include/rdma/iw_cm.h
73360@@ -122,7 +122,7 @@ struct iw_cm_verbs {
73361 int backlog);
73362
73363 int (*destroy_listen)(struct iw_cm_id *cm_id);
73364-};
73365+} __no_const;
73366
73367 /**
73368 * iw_create_cm_id - Create an IW CM identifier.
73369diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
73370index e1379b4..67eafbe 100644
73371--- a/include/scsi/libfc.h
73372+++ b/include/scsi/libfc.h
73373@@ -762,6 +762,7 @@ struct libfc_function_template {
73374 */
73375 void (*disc_stop_final) (struct fc_lport *);
73376 };
73377+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
73378
73379 /**
73380 * struct fc_disc - Discovery context
73381@@ -866,7 +867,7 @@ struct fc_lport {
73382 struct fc_vport *vport;
73383
73384 /* Operational Information */
73385- struct libfc_function_template tt;
73386+ libfc_function_template_no_const tt;
73387 u8 link_up;
73388 u8 qfull;
73389 enum fc_lport_state state;
73390diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
73391index a7f9cba..b1f44d0 100644
73392--- a/include/scsi/scsi_device.h
73393+++ b/include/scsi/scsi_device.h
73394@@ -171,9 +171,9 @@ struct scsi_device {
73395 unsigned int max_device_blocked; /* what device_blocked counts down from */
73396 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
73397
73398- atomic_t iorequest_cnt;
73399- atomic_t iodone_cnt;
73400- atomic_t ioerr_cnt;
73401+ atomic_unchecked_t iorequest_cnt;
73402+ atomic_unchecked_t iodone_cnt;
73403+ atomic_unchecked_t ioerr_cnt;
73404
73405 struct device sdev_gendev,
73406 sdev_dev;
73407diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
73408index b797e8f..8e2c3aa 100644
73409--- a/include/scsi/scsi_transport_fc.h
73410+++ b/include/scsi/scsi_transport_fc.h
73411@@ -751,7 +751,8 @@ struct fc_function_template {
73412 unsigned long show_host_system_hostname:1;
73413
73414 unsigned long disable_target_scan:1;
73415-};
73416+} __do_const;
73417+typedef struct fc_function_template __no_const fc_function_template_no_const;
73418
73419
73420 /**
73421diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
73422index ff6c741..393815f 100644
73423--- a/include/sound/compress_driver.h
73424+++ b/include/sound/compress_driver.h
73425@@ -130,7 +130,7 @@ struct snd_compr_ops {
73426 struct snd_compr_caps *caps);
73427 int (*get_codec_caps) (struct snd_compr_stream *stream,
73428 struct snd_compr_codec_caps *codec);
73429-};
73430+} __no_const;
73431
73432 /**
73433 * struct snd_compr: Compressed device
73434diff --git a/include/sound/soc.h b/include/sound/soc.h
73435index a6a059c..2243336 100644
73436--- a/include/sound/soc.h
73437+++ b/include/sound/soc.h
73438@@ -771,7 +771,7 @@ struct snd_soc_codec_driver {
73439 /* probe ordering - for components with runtime dependencies */
73440 int probe_order;
73441 int remove_order;
73442-};
73443+} __do_const;
73444
73445 /* SoC platform interface */
73446 struct snd_soc_platform_driver {
73447@@ -817,7 +817,7 @@ struct snd_soc_platform_driver {
73448 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
73449 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
73450 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
73451-};
73452+} __do_const;
73453
73454 struct snd_soc_platform {
73455 const char *name;
73456diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
73457index f8640f3..b72d113 100644
73458--- a/include/target/target_core_base.h
73459+++ b/include/target/target_core_base.h
73460@@ -658,7 +658,7 @@ struct se_device {
73461 spinlock_t stats_lock;
73462 /* Active commands on this virtual SE device */
73463 atomic_t simple_cmds;
73464- atomic_t dev_ordered_id;
73465+ atomic_unchecked_t dev_ordered_id;
73466 atomic_t dev_ordered_sync;
73467 atomic_t dev_qf_count;
73468 int export_count;
73469diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
73470new file mode 100644
73471index 0000000..fb634b7
73472--- /dev/null
73473+++ b/include/trace/events/fs.h
73474@@ -0,0 +1,53 @@
73475+#undef TRACE_SYSTEM
73476+#define TRACE_SYSTEM fs
73477+
73478+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
73479+#define _TRACE_FS_H
73480+
73481+#include <linux/fs.h>
73482+#include <linux/tracepoint.h>
73483+
73484+TRACE_EVENT(do_sys_open,
73485+
73486+ TP_PROTO(const char *filename, int flags, int mode),
73487+
73488+ TP_ARGS(filename, flags, mode),
73489+
73490+ TP_STRUCT__entry(
73491+ __string( filename, filename )
73492+ __field( int, flags )
73493+ __field( int, mode )
73494+ ),
73495+
73496+ TP_fast_assign(
73497+ __assign_str(filename, filename);
73498+ __entry->flags = flags;
73499+ __entry->mode = mode;
73500+ ),
73501+
73502+ TP_printk("\"%s\" %x %o",
73503+ __get_str(filename), __entry->flags, __entry->mode)
73504+);
73505+
73506+TRACE_EVENT(open_exec,
73507+
73508+ TP_PROTO(const char *filename),
73509+
73510+ TP_ARGS(filename),
73511+
73512+ TP_STRUCT__entry(
73513+ __string( filename, filename )
73514+ ),
73515+
73516+ TP_fast_assign(
73517+ __assign_str(filename, filename);
73518+ ),
73519+
73520+ TP_printk("\"%s\"",
73521+ __get_str(filename))
73522+);
73523+
73524+#endif /* _TRACE_FS_H */
73525+
73526+/* This part must be outside protection */
73527+#include <trace/define_trace.h>
73528diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
73529index 1c09820..7f5ec79 100644
73530--- a/include/trace/events/irq.h
73531+++ b/include/trace/events/irq.h
73532@@ -36,7 +36,7 @@ struct softirq_action;
73533 */
73534 TRACE_EVENT(irq_handler_entry,
73535
73536- TP_PROTO(int irq, struct irqaction *action),
73537+ TP_PROTO(int irq, const struct irqaction *action),
73538
73539 TP_ARGS(irq, action),
73540
73541@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
73542 */
73543 TRACE_EVENT(irq_handler_exit,
73544
73545- TP_PROTO(int irq, struct irqaction *action, int ret),
73546+ TP_PROTO(int irq, const struct irqaction *action, int ret),
73547
73548 TP_ARGS(irq, action, ret),
73549
73550diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
73551index 7caf44c..23c6f27 100644
73552--- a/include/uapi/linux/a.out.h
73553+++ b/include/uapi/linux/a.out.h
73554@@ -39,6 +39,14 @@ enum machine_type {
73555 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
73556 };
73557
73558+/* Constants for the N_FLAGS field */
73559+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
73560+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
73561+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
73562+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
73563+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
73564+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
73565+
73566 #if !defined (N_MAGIC)
73567 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
73568 #endif
73569diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
73570index d876736..ccce5c0 100644
73571--- a/include/uapi/linux/byteorder/little_endian.h
73572+++ b/include/uapi/linux/byteorder/little_endian.h
73573@@ -42,51 +42,51 @@
73574
73575 static inline __le64 __cpu_to_le64p(const __u64 *p)
73576 {
73577- return (__force __le64)*p;
73578+ return (__force const __le64)*p;
73579 }
73580-static inline __u64 __le64_to_cpup(const __le64 *p)
73581+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
73582 {
73583- return (__force __u64)*p;
73584+ return (__force const __u64)*p;
73585 }
73586 static inline __le32 __cpu_to_le32p(const __u32 *p)
73587 {
73588- return (__force __le32)*p;
73589+ return (__force const __le32)*p;
73590 }
73591 static inline __u32 __le32_to_cpup(const __le32 *p)
73592 {
73593- return (__force __u32)*p;
73594+ return (__force const __u32)*p;
73595 }
73596 static inline __le16 __cpu_to_le16p(const __u16 *p)
73597 {
73598- return (__force __le16)*p;
73599+ return (__force const __le16)*p;
73600 }
73601 static inline __u16 __le16_to_cpup(const __le16 *p)
73602 {
73603- return (__force __u16)*p;
73604+ return (__force const __u16)*p;
73605 }
73606 static inline __be64 __cpu_to_be64p(const __u64 *p)
73607 {
73608- return (__force __be64)__swab64p(p);
73609+ return (__force const __be64)__swab64p(p);
73610 }
73611 static inline __u64 __be64_to_cpup(const __be64 *p)
73612 {
73613- return __swab64p((__u64 *)p);
73614+ return __swab64p((const __u64 *)p);
73615 }
73616 static inline __be32 __cpu_to_be32p(const __u32 *p)
73617 {
73618- return (__force __be32)__swab32p(p);
73619+ return (__force const __be32)__swab32p(p);
73620 }
73621-static inline __u32 __be32_to_cpup(const __be32 *p)
73622+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
73623 {
73624- return __swab32p((__u32 *)p);
73625+ return __swab32p((const __u32 *)p);
73626 }
73627 static inline __be16 __cpu_to_be16p(const __u16 *p)
73628 {
73629- return (__force __be16)__swab16p(p);
73630+ return (__force const __be16)__swab16p(p);
73631 }
73632 static inline __u16 __be16_to_cpup(const __be16 *p)
73633 {
73634- return __swab16p((__u16 *)p);
73635+ return __swab16p((const __u16 *)p);
73636 }
73637 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
73638 #define __le64_to_cpus(x) do { (void)(x); } while (0)
73639diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
73640index 8072d35..e77aeb8 100644
73641--- a/include/uapi/linux/elf.h
73642+++ b/include/uapi/linux/elf.h
73643@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
73644 #define PT_GNU_EH_FRAME 0x6474e550
73645
73646 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
73647+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
73648+
73649+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
73650+
73651+/* Constants for the e_flags field */
73652+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
73653+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
73654+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
73655+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
73656+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
73657+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
73658
73659 /*
73660 * Extended Numbering
73661@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
73662 #define DT_DEBUG 21
73663 #define DT_TEXTREL 22
73664 #define DT_JMPREL 23
73665+#define DT_FLAGS 30
73666+ #define DF_TEXTREL 0x00000004
73667 #define DT_ENCODING 32
73668 #define OLD_DT_LOOS 0x60000000
73669 #define DT_LOOS 0x6000000d
73670@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
73671 #define PF_W 0x2
73672 #define PF_X 0x1
73673
73674+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
73675+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
73676+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
73677+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
73678+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
73679+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
73680+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
73681+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
73682+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
73683+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
73684+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
73685+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
73686+
73687 typedef struct elf32_phdr{
73688 Elf32_Word p_type;
73689 Elf32_Off p_offset;
73690@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
73691 #define EI_OSABI 7
73692 #define EI_PAD 8
73693
73694+#define EI_PAX 14
73695+
73696 #define ELFMAG0 0x7f /* EI_MAG */
73697 #define ELFMAG1 'E'
73698 #define ELFMAG2 'L'
73699diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
73700index aa169c4..6a2771d 100644
73701--- a/include/uapi/linux/personality.h
73702+++ b/include/uapi/linux/personality.h
73703@@ -30,6 +30,7 @@ enum {
73704 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
73705 ADDR_NO_RANDOMIZE | \
73706 ADDR_COMPAT_LAYOUT | \
73707+ ADDR_LIMIT_3GB | \
73708 MMAP_PAGE_ZERO)
73709
73710 /*
73711diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
73712index 7530e74..e714828 100644
73713--- a/include/uapi/linux/screen_info.h
73714+++ b/include/uapi/linux/screen_info.h
73715@@ -43,7 +43,8 @@ struct screen_info {
73716 __u16 pages; /* 0x32 */
73717 __u16 vesa_attributes; /* 0x34 */
73718 __u32 capabilities; /* 0x36 */
73719- __u8 _reserved[6]; /* 0x3a */
73720+ __u16 vesapm_size; /* 0x3a */
73721+ __u8 _reserved[4]; /* 0x3c */
73722 } __attribute__((packed));
73723
73724 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
73725diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
73726index 0e011eb..82681b1 100644
73727--- a/include/uapi/linux/swab.h
73728+++ b/include/uapi/linux/swab.h
73729@@ -43,7 +43,7 @@
73730 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
73731 */
73732
73733-static inline __attribute_const__ __u16 __fswab16(__u16 val)
73734+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
73735 {
73736 #ifdef __HAVE_BUILTIN_BSWAP16__
73737 return __builtin_bswap16(val);
73738@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
73739 #endif
73740 }
73741
73742-static inline __attribute_const__ __u32 __fswab32(__u32 val)
73743+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
73744 {
73745 #ifdef __HAVE_BUILTIN_BSWAP32__
73746 return __builtin_bswap32(val);
73747@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
73748 #endif
73749 }
73750
73751-static inline __attribute_const__ __u64 __fswab64(__u64 val)
73752+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
73753 {
73754 #ifdef __HAVE_BUILTIN_BSWAP64__
73755 return __builtin_bswap64(val);
73756diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
73757index 6d67213..8dab561 100644
73758--- a/include/uapi/linux/sysctl.h
73759+++ b/include/uapi/linux/sysctl.h
73760@@ -155,7 +155,11 @@ enum
73761 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
73762 };
73763
73764-
73765+#ifdef CONFIG_PAX_SOFTMODE
73766+enum {
73767+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
73768+};
73769+#endif
73770
73771 /* CTL_VM names: */
73772 enum
73773diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
73774index e4629b9..6958086 100644
73775--- a/include/uapi/linux/xattr.h
73776+++ b/include/uapi/linux/xattr.h
73777@@ -63,5 +63,9 @@
73778 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
73779 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
73780
73781+/* User namespace */
73782+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
73783+#define XATTR_PAX_FLAGS_SUFFIX "flags"
73784+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
73785
73786 #endif /* _UAPI_LINUX_XATTR_H */
73787diff --git a/include/video/udlfb.h b/include/video/udlfb.h
73788index f9466fa..f4e2b81 100644
73789--- a/include/video/udlfb.h
73790+++ b/include/video/udlfb.h
73791@@ -53,10 +53,10 @@ struct dlfb_data {
73792 u32 pseudo_palette[256];
73793 int blank_mode; /*one of FB_BLANK_ */
73794 /* blit-only rendering path metrics, exposed through sysfs */
73795- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
73796- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
73797- atomic_t bytes_sent; /* to usb, after compression including overhead */
73798- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
73799+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
73800+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
73801+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
73802+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
73803 };
73804
73805 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
73806diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
73807index 1a91850..28573f8 100644
73808--- a/include/video/uvesafb.h
73809+++ b/include/video/uvesafb.h
73810@@ -122,6 +122,7 @@ struct uvesafb_par {
73811 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
73812 u8 pmi_setpal; /* PMI for palette changes */
73813 u16 *pmi_base; /* protected mode interface location */
73814+ u8 *pmi_code; /* protected mode code location */
73815 void *pmi_start;
73816 void *pmi_pal;
73817 u8 *vbe_state_orig; /*
73818diff --git a/init/Kconfig b/init/Kconfig
73819index 5341d72..153f24f 100644
73820--- a/init/Kconfig
73821+++ b/init/Kconfig
73822@@ -984,6 +984,7 @@ endif # CGROUPS
73823
73824 config CHECKPOINT_RESTORE
73825 bool "Checkpoint/restore support" if EXPERT
73826+ depends on !GRKERNSEC
73827 default n
73828 help
73829 Enables additional kernel features in a sake of checkpoint/restore.
73830@@ -1471,7 +1472,7 @@ config SLUB_DEBUG
73831
73832 config COMPAT_BRK
73833 bool "Disable heap randomization"
73834- default y
73835+ default n
73836 help
73837 Randomizing heap placement makes heap exploits harder, but it
73838 also breaks ancient binaries (including anything libc5 based).
73839@@ -1734,7 +1735,7 @@ config INIT_ALL_POSSIBLE
73840 config STOP_MACHINE
73841 bool
73842 default y
73843- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
73844+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
73845 help
73846 Need stop_machine() primitive.
73847
73848diff --git a/init/Makefile b/init/Makefile
73849index 7bc47ee..6da2dc7 100644
73850--- a/init/Makefile
73851+++ b/init/Makefile
73852@@ -2,6 +2,9 @@
73853 # Makefile for the linux kernel.
73854 #
73855
73856+ccflags-y := $(GCC_PLUGINS_CFLAGS)
73857+asflags-y := $(GCC_PLUGINS_AFLAGS)
73858+
73859 obj-y := main.o version.o mounts.o
73860 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
73861 obj-y += noinitramfs.o
73862diff --git a/init/do_mounts.c b/init/do_mounts.c
73863index a2b49f2..03a0e17c 100644
73864--- a/init/do_mounts.c
73865+++ b/init/do_mounts.c
73866@@ -355,11 +355,11 @@ static void __init get_fs_names(char *page)
73867 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
73868 {
73869 struct super_block *s;
73870- int err = sys_mount(name, "/root", fs, flags, data);
73871+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
73872 if (err)
73873 return err;
73874
73875- sys_chdir("/root");
73876+ sys_chdir((const char __force_user *)"/root");
73877 s = current->fs->pwd.dentry->d_sb;
73878 ROOT_DEV = s->s_dev;
73879 printk(KERN_INFO
73880@@ -480,18 +480,18 @@ void __init change_floppy(char *fmt, ...)
73881 va_start(args, fmt);
73882 vsprintf(buf, fmt, args);
73883 va_end(args);
73884- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
73885+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
73886 if (fd >= 0) {
73887 sys_ioctl(fd, FDEJECT, 0);
73888 sys_close(fd);
73889 }
73890 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
73891- fd = sys_open("/dev/console", O_RDWR, 0);
73892+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
73893 if (fd >= 0) {
73894 sys_ioctl(fd, TCGETS, (long)&termios);
73895 termios.c_lflag &= ~ICANON;
73896 sys_ioctl(fd, TCSETSF, (long)&termios);
73897- sys_read(fd, &c, 1);
73898+ sys_read(fd, (char __user *)&c, 1);
73899 termios.c_lflag |= ICANON;
73900 sys_ioctl(fd, TCSETSF, (long)&termios);
73901 sys_close(fd);
73902@@ -585,6 +585,6 @@ void __init prepare_namespace(void)
73903 mount_root();
73904 out:
73905 devtmpfs_mount("dev");
73906- sys_mount(".", "/", NULL, MS_MOVE, NULL);
73907- sys_chroot(".");
73908+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
73909+ sys_chroot((const char __force_user *)".");
73910 }
73911diff --git a/init/do_mounts.h b/init/do_mounts.h
73912index f5b978a..69dbfe8 100644
73913--- a/init/do_mounts.h
73914+++ b/init/do_mounts.h
73915@@ -15,15 +15,15 @@ extern int root_mountflags;
73916
73917 static inline int create_dev(char *name, dev_t dev)
73918 {
73919- sys_unlink(name);
73920- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
73921+ sys_unlink((char __force_user *)name);
73922+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
73923 }
73924
73925 #if BITS_PER_LONG == 32
73926 static inline u32 bstat(char *name)
73927 {
73928 struct stat64 stat;
73929- if (sys_stat64(name, &stat) != 0)
73930+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
73931 return 0;
73932 if (!S_ISBLK(stat.st_mode))
73933 return 0;
73934@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
73935 static inline u32 bstat(char *name)
73936 {
73937 struct stat stat;
73938- if (sys_newstat(name, &stat) != 0)
73939+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
73940 return 0;
73941 if (!S_ISBLK(stat.st_mode))
73942 return 0;
73943diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
73944index a32ec1c..60a6659 100644
73945--- a/init/do_mounts_initrd.c
73946+++ b/init/do_mounts_initrd.c
73947@@ -37,13 +37,13 @@ static int init_linuxrc(struct subprocess_info *info, struct cred *new)
73948 {
73949 sys_unshare(CLONE_FS | CLONE_FILES);
73950 /* stdin/stdout/stderr for /linuxrc */
73951- sys_open("/dev/console", O_RDWR, 0);
73952+ sys_open((const char __force_user *)"/dev/console", O_RDWR, 0);
73953 sys_dup(0);
73954 sys_dup(0);
73955 /* move initrd over / and chdir/chroot in initrd root */
73956- sys_chdir("/root");
73957- sys_mount(".", "/", NULL, MS_MOVE, NULL);
73958- sys_chroot(".");
73959+ sys_chdir((const char __force_user *)"/root");
73960+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
73961+ sys_chroot((const char __force_user *)".");
73962 sys_setsid();
73963 return 0;
73964 }
73965@@ -58,8 +58,8 @@ static void __init handle_initrd(void)
73966 create_dev("/dev/root.old", Root_RAM0);
73967 /* mount initrd on rootfs' /root */
73968 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
73969- sys_mkdir("/old", 0700);
73970- sys_chdir("/old");
73971+ sys_mkdir((const char __force_user *)"/old", 0700);
73972+ sys_chdir((const char __force_user *)"/old");
73973
73974 /* try loading default modules from initrd */
73975 load_default_modules();
73976@@ -76,31 +76,31 @@ static void __init handle_initrd(void)
73977 current->flags &= ~PF_FREEZER_SKIP;
73978
73979 /* move initrd to rootfs' /old */
73980- sys_mount("..", ".", NULL, MS_MOVE, NULL);
73981+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
73982 /* switch root and cwd back to / of rootfs */
73983- sys_chroot("..");
73984+ sys_chroot((const char __force_user *)"..");
73985
73986 if (new_decode_dev(real_root_dev) == Root_RAM0) {
73987- sys_chdir("/old");
73988+ sys_chdir((const char __force_user *)"/old");
73989 return;
73990 }
73991
73992- sys_chdir("/");
73993+ sys_chdir((const char __force_user *)"/");
73994 ROOT_DEV = new_decode_dev(real_root_dev);
73995 mount_root();
73996
73997 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
73998- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
73999+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
74000 if (!error)
74001 printk("okay\n");
74002 else {
74003- int fd = sys_open("/dev/root.old", O_RDWR, 0);
74004+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
74005 if (error == -ENOENT)
74006 printk("/initrd does not exist. Ignored.\n");
74007 else
74008 printk("failed\n");
74009 printk(KERN_NOTICE "Unmounting old root\n");
74010- sys_umount("/old", MNT_DETACH);
74011+ sys_umount((char __force_user *)"/old", MNT_DETACH);
74012 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
74013 if (fd < 0) {
74014 error = fd;
74015@@ -123,11 +123,11 @@ int __init initrd_load(void)
74016 * mounted in the normal path.
74017 */
74018 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
74019- sys_unlink("/initrd.image");
74020+ sys_unlink((const char __force_user *)"/initrd.image");
74021 handle_initrd();
74022 return 1;
74023 }
74024 }
74025- sys_unlink("/initrd.image");
74026+ sys_unlink((const char __force_user *)"/initrd.image");
74027 return 0;
74028 }
74029diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
74030index 8cb6db5..d729f50 100644
74031--- a/init/do_mounts_md.c
74032+++ b/init/do_mounts_md.c
74033@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
74034 partitioned ? "_d" : "", minor,
74035 md_setup_args[ent].device_names);
74036
74037- fd = sys_open(name, 0, 0);
74038+ fd = sys_open((char __force_user *)name, 0, 0);
74039 if (fd < 0) {
74040 printk(KERN_ERR "md: open failed - cannot start "
74041 "array %s\n", name);
74042@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
74043 * array without it
74044 */
74045 sys_close(fd);
74046- fd = sys_open(name, 0, 0);
74047+ fd = sys_open((char __force_user *)name, 0, 0);
74048 sys_ioctl(fd, BLKRRPART, 0);
74049 }
74050 sys_close(fd);
74051@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
74052
74053 wait_for_device_probe();
74054
74055- fd = sys_open("/dev/md0", 0, 0);
74056+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
74057 if (fd >= 0) {
74058 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
74059 sys_close(fd);
74060diff --git a/init/init_task.c b/init/init_task.c
74061index ba0a7f36..2bcf1d5 100644
74062--- a/init/init_task.c
74063+++ b/init/init_task.c
74064@@ -22,5 +22,9 @@ EXPORT_SYMBOL(init_task);
74065 * Initial thread structure. Alignment of this is handled by a special
74066 * linker map entry.
74067 */
74068+#ifdef CONFIG_X86
74069+union thread_union init_thread_union __init_task_data;
74070+#else
74071 union thread_union init_thread_union __init_task_data =
74072 { INIT_THREAD_INFO(init_task) };
74073+#endif
74074diff --git a/init/initramfs.c b/init/initramfs.c
74075index a67ef9d..2d17ed9 100644
74076--- a/init/initramfs.c
74077+++ b/init/initramfs.c
74078@@ -84,7 +84,7 @@ static void __init free_hash(void)
74079 }
74080 }
74081
74082-static long __init do_utime(char *filename, time_t mtime)
74083+static long __init do_utime(char __force_user *filename, time_t mtime)
74084 {
74085 struct timespec t[2];
74086
74087@@ -119,7 +119,7 @@ static void __init dir_utime(void)
74088 struct dir_entry *de, *tmp;
74089 list_for_each_entry_safe(de, tmp, &dir_list, list) {
74090 list_del(&de->list);
74091- do_utime(de->name, de->mtime);
74092+ do_utime((char __force_user *)de->name, de->mtime);
74093 kfree(de->name);
74094 kfree(de);
74095 }
74096@@ -281,7 +281,7 @@ static int __init maybe_link(void)
74097 if (nlink >= 2) {
74098 char *old = find_link(major, minor, ino, mode, collected);
74099 if (old)
74100- return (sys_link(old, collected) < 0) ? -1 : 1;
74101+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
74102 }
74103 return 0;
74104 }
74105@@ -290,11 +290,11 @@ static void __init clean_path(char *path, umode_t mode)
74106 {
74107 struct stat st;
74108
74109- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
74110+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
74111 if (S_ISDIR(st.st_mode))
74112- sys_rmdir(path);
74113+ sys_rmdir((char __force_user *)path);
74114 else
74115- sys_unlink(path);
74116+ sys_unlink((char __force_user *)path);
74117 }
74118 }
74119
74120@@ -315,7 +315,7 @@ static int __init do_name(void)
74121 int openflags = O_WRONLY|O_CREAT;
74122 if (ml != 1)
74123 openflags |= O_TRUNC;
74124- wfd = sys_open(collected, openflags, mode);
74125+ wfd = sys_open((char __force_user *)collected, openflags, mode);
74126
74127 if (wfd >= 0) {
74128 sys_fchown(wfd, uid, gid);
74129@@ -327,17 +327,17 @@ static int __init do_name(void)
74130 }
74131 }
74132 } else if (S_ISDIR(mode)) {
74133- sys_mkdir(collected, mode);
74134- sys_chown(collected, uid, gid);
74135- sys_chmod(collected, mode);
74136+ sys_mkdir((char __force_user *)collected, mode);
74137+ sys_chown((char __force_user *)collected, uid, gid);
74138+ sys_chmod((char __force_user *)collected, mode);
74139 dir_add(collected, mtime);
74140 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
74141 S_ISFIFO(mode) || S_ISSOCK(mode)) {
74142 if (maybe_link() == 0) {
74143- sys_mknod(collected, mode, rdev);
74144- sys_chown(collected, uid, gid);
74145- sys_chmod(collected, mode);
74146- do_utime(collected, mtime);
74147+ sys_mknod((char __force_user *)collected, mode, rdev);
74148+ sys_chown((char __force_user *)collected, uid, gid);
74149+ sys_chmod((char __force_user *)collected, mode);
74150+ do_utime((char __force_user *)collected, mtime);
74151 }
74152 }
74153 return 0;
74154@@ -346,15 +346,15 @@ static int __init do_name(void)
74155 static int __init do_copy(void)
74156 {
74157 if (count >= body_len) {
74158- sys_write(wfd, victim, body_len);
74159+ sys_write(wfd, (char __force_user *)victim, body_len);
74160 sys_close(wfd);
74161- do_utime(vcollected, mtime);
74162+ do_utime((char __force_user *)vcollected, mtime);
74163 kfree(vcollected);
74164 eat(body_len);
74165 state = SkipIt;
74166 return 0;
74167 } else {
74168- sys_write(wfd, victim, count);
74169+ sys_write(wfd, (char __force_user *)victim, count);
74170 body_len -= count;
74171 eat(count);
74172 return 1;
74173@@ -365,9 +365,9 @@ static int __init do_symlink(void)
74174 {
74175 collected[N_ALIGN(name_len) + body_len] = '\0';
74176 clean_path(collected, 0);
74177- sys_symlink(collected + N_ALIGN(name_len), collected);
74178- sys_lchown(collected, uid, gid);
74179- do_utime(collected, mtime);
74180+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
74181+ sys_lchown((char __force_user *)collected, uid, gid);
74182+ do_utime((char __force_user *)collected, mtime);
74183 state = SkipIt;
74184 next_state = Reset;
74185 return 0;
74186@@ -583,7 +583,7 @@ static int __init populate_rootfs(void)
74187 {
74188 char *err = unpack_to_rootfs(__initramfs_start, __initramfs_size);
74189 if (err)
74190- panic(err); /* Failed to decompress INTERNAL initramfs */
74191+ panic("%s", err); /* Failed to decompress INTERNAL initramfs */
74192 if (initrd_start) {
74193 #ifdef CONFIG_BLK_DEV_RAM
74194 int fd;
74195diff --git a/init/main.c b/init/main.c
74196index 63534a1..85feae2 100644
74197--- a/init/main.c
74198+++ b/init/main.c
74199@@ -98,6 +98,8 @@ static inline void mark_rodata_ro(void) { }
74200 extern void tc_init(void);
74201 #endif
74202
74203+extern void grsecurity_init(void);
74204+
74205 /*
74206 * Debug helper: via this flag we know that we are in 'early bootup code'
74207 * where only the boot processor is running with IRQ disabled. This means
74208@@ -151,6 +153,64 @@ static int __init set_reset_devices(char *str)
74209
74210 __setup("reset_devices", set_reset_devices);
74211
74212+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
74213+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
74214+static int __init setup_grsec_proc_gid(char *str)
74215+{
74216+ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
74217+ return 1;
74218+}
74219+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
74220+#endif
74221+
74222+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
74223+unsigned long pax_user_shadow_base __read_only = 1UL << TASK_SIZE_MAX_SHIFT;
74224+EXPORT_SYMBOL(pax_user_shadow_base);
74225+extern char pax_enter_kernel_user[];
74226+extern char pax_exit_kernel_user[];
74227+extern pgdval_t clone_pgd_mask;
74228+#endif
74229+
74230+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
74231+static int __init setup_pax_nouderef(char *str)
74232+{
74233+#ifdef CONFIG_X86_32
74234+ unsigned int cpu;
74235+ struct desc_struct *gdt;
74236+
74237+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
74238+ gdt = get_cpu_gdt_table(cpu);
74239+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
74240+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
74241+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
74242+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
74243+ }
74244+ loadsegment(ds, __KERNEL_DS);
74245+ loadsegment(es, __KERNEL_DS);
74246+ loadsegment(ss, __KERNEL_DS);
74247+#else
74248+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
74249+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
74250+ clone_pgd_mask = ~(pgdval_t)0UL;
74251+ pax_user_shadow_base = 0UL;
74252+#endif
74253+
74254+ return 0;
74255+}
74256+early_param("pax_nouderef", setup_pax_nouderef);
74257+#endif
74258+
74259+#ifdef CONFIG_PAX_SOFTMODE
74260+int pax_softmode;
74261+
74262+static int __init setup_pax_softmode(char *str)
74263+{
74264+ get_option(&str, &pax_softmode);
74265+ return 1;
74266+}
74267+__setup("pax_softmode=", setup_pax_softmode);
74268+#endif
74269+
74270 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
74271 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
74272 static const char *panic_later, *panic_param;
74273@@ -683,6 +743,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
74274 {
74275 int count = preempt_count();
74276 int ret;
74277+ const char *msg1 = "", *msg2 = "";
74278
74279 if (initcall_debug)
74280 ret = do_one_initcall_debug(fn);
74281@@ -695,15 +756,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
74282 sprintf(msgbuf, "error code %d ", ret);
74283
74284 if (preempt_count() != count) {
74285- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
74286+ msg1 = " preemption imbalance";
74287 preempt_count() = count;
74288 }
74289 if (irqs_disabled()) {
74290- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
74291+ msg2 = " disabled interrupts";
74292 local_irq_enable();
74293 }
74294- if (msgbuf[0]) {
74295- printk("initcall %pF returned with %s\n", fn, msgbuf);
74296+ if (msgbuf[0] || *msg1 || *msg2) {
74297+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
74298 }
74299
74300 return ret;
74301@@ -757,8 +818,14 @@ static void __init do_initcall_level(int level)
74302 level, level,
74303 &repair_env_string);
74304
74305- for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++)
74306+ for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++) {
74307 do_one_initcall(*fn);
74308+
74309+#ifdef LATENT_ENTROPY_PLUGIN
74310+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
74311+#endif
74312+
74313+ }
74314 }
74315
74316 static void __init do_initcalls(void)
74317@@ -792,8 +859,14 @@ static void __init do_pre_smp_initcalls(void)
74318 {
74319 initcall_t *fn;
74320
74321- for (fn = __initcall_start; fn < __initcall0_start; fn++)
74322+ for (fn = __initcall_start; fn < __initcall0_start; fn++) {
74323 do_one_initcall(*fn);
74324+
74325+#ifdef LATENT_ENTROPY_PLUGIN
74326+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
74327+#endif
74328+
74329+ }
74330 }
74331
74332 /*
74333@@ -811,8 +884,8 @@ static int run_init_process(const char *init_filename)
74334 {
74335 argv_init[0] = init_filename;
74336 return do_execve(init_filename,
74337- (const char __user *const __user *)argv_init,
74338- (const char __user *const __user *)envp_init);
74339+ (const char __user *const __force_user *)argv_init,
74340+ (const char __user *const __force_user *)envp_init);
74341 }
74342
74343 static noinline void __init kernel_init_freeable(void);
74344@@ -890,7 +963,7 @@ static noinline void __init kernel_init_freeable(void)
74345 do_basic_setup();
74346
74347 /* Open the /dev/console on the rootfs, this should never fail */
74348- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
74349+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
74350 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
74351
74352 (void) sys_dup(0);
74353@@ -903,11 +976,13 @@ static noinline void __init kernel_init_freeable(void)
74354 if (!ramdisk_execute_command)
74355 ramdisk_execute_command = "/init";
74356
74357- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
74358+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
74359 ramdisk_execute_command = NULL;
74360 prepare_namespace();
74361 }
74362
74363+ grsecurity_init();
74364+
74365 /*
74366 * Ok, we have completed the initial bootup, and
74367 * we're essentially up and running. Get rid of the
74368diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
74369index 130dfec..cc88451 100644
74370--- a/ipc/ipc_sysctl.c
74371+++ b/ipc/ipc_sysctl.c
74372@@ -30,7 +30,7 @@ static void *get_ipc(ctl_table *table)
74373 static int proc_ipc_dointvec(ctl_table *table, int write,
74374 void __user *buffer, size_t *lenp, loff_t *ppos)
74375 {
74376- struct ctl_table ipc_table;
74377+ ctl_table_no_const ipc_table;
74378
74379 memcpy(&ipc_table, table, sizeof(ipc_table));
74380 ipc_table.data = get_ipc(table);
74381@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(ctl_table *table, int write,
74382 static int proc_ipc_dointvec_minmax(ctl_table *table, int write,
74383 void __user *buffer, size_t *lenp, loff_t *ppos)
74384 {
74385- struct ctl_table ipc_table;
74386+ ctl_table_no_const ipc_table;
74387
74388 memcpy(&ipc_table, table, sizeof(ipc_table));
74389 ipc_table.data = get_ipc(table);
74390@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(ctl_table *table, int write,
74391 static int proc_ipc_callback_dointvec(ctl_table *table, int write,
74392 void __user *buffer, size_t *lenp, loff_t *ppos)
74393 {
74394- struct ctl_table ipc_table;
74395+ ctl_table_no_const ipc_table;
74396 size_t lenp_bef = *lenp;
74397 int rc;
74398
74399@@ -88,7 +88,7 @@ static int proc_ipc_callback_dointvec(ctl_table *table, int write,
74400 static int proc_ipc_doulongvec_minmax(ctl_table *table, int write,
74401 void __user *buffer, size_t *lenp, loff_t *ppos)
74402 {
74403- struct ctl_table ipc_table;
74404+ ctl_table_no_const ipc_table;
74405 memcpy(&ipc_table, table, sizeof(ipc_table));
74406 ipc_table.data = get_ipc(table);
74407
74408@@ -122,7 +122,7 @@ static void ipc_auto_callback(int val)
74409 static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write,
74410 void __user *buffer, size_t *lenp, loff_t *ppos)
74411 {
74412- struct ctl_table ipc_table;
74413+ ctl_table_no_const ipc_table;
74414 size_t lenp_bef = *lenp;
74415 int oldval;
74416 int rc;
74417diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
74418index 383d638..943fdbb 100644
74419--- a/ipc/mq_sysctl.c
74420+++ b/ipc/mq_sysctl.c
74421@@ -25,7 +25,7 @@ static void *get_mq(ctl_table *table)
74422 static int proc_mq_dointvec_minmax(ctl_table *table, int write,
74423 void __user *buffer, size_t *lenp, loff_t *ppos)
74424 {
74425- struct ctl_table mq_table;
74426+ ctl_table_no_const mq_table;
74427 memcpy(&mq_table, table, sizeof(mq_table));
74428 mq_table.data = get_mq(table);
74429
74430diff --git a/ipc/mqueue.c b/ipc/mqueue.c
74431index e4e47f6..a85e0ad 100644
74432--- a/ipc/mqueue.c
74433+++ b/ipc/mqueue.c
74434@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
74435 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
74436 info->attr.mq_msgsize);
74437
74438+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
74439 spin_lock(&mq_lock);
74440 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
74441 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
74442diff --git a/ipc/msg.c b/ipc/msg.c
74443index fede1d0..9778e0f8 100644
74444--- a/ipc/msg.c
74445+++ b/ipc/msg.c
74446@@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
74447 return security_msg_queue_associate(msq, msgflg);
74448 }
74449
74450+static struct ipc_ops msg_ops = {
74451+ .getnew = newque,
74452+ .associate = msg_security,
74453+ .more_checks = NULL
74454+};
74455+
74456 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
74457 {
74458 struct ipc_namespace *ns;
74459- struct ipc_ops msg_ops;
74460 struct ipc_params msg_params;
74461
74462 ns = current->nsproxy->ipc_ns;
74463
74464- msg_ops.getnew = newque;
74465- msg_ops.associate = msg_security;
74466- msg_ops.more_checks = NULL;
74467-
74468 msg_params.key = key;
74469 msg_params.flg = msgflg;
74470
74471diff --git a/ipc/sem.c b/ipc/sem.c
74472index 58d31f1..cce7a55 100644
74473--- a/ipc/sem.c
74474+++ b/ipc/sem.c
74475@@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
74476 return 0;
74477 }
74478
74479+static struct ipc_ops sem_ops = {
74480+ .getnew = newary,
74481+ .associate = sem_security,
74482+ .more_checks = sem_more_checks
74483+};
74484+
74485 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
74486 {
74487 struct ipc_namespace *ns;
74488- struct ipc_ops sem_ops;
74489 struct ipc_params sem_params;
74490
74491 ns = current->nsproxy->ipc_ns;
74492@@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
74493 if (nsems < 0 || nsems > ns->sc_semmsl)
74494 return -EINVAL;
74495
74496- sem_ops.getnew = newary;
74497- sem_ops.associate = sem_security;
74498- sem_ops.more_checks = sem_more_checks;
74499-
74500 sem_params.key = key;
74501 sem_params.flg = semflg;
74502 sem_params.u.nsems = nsems;
74503diff --git a/ipc/shm.c b/ipc/shm.c
74504index 7e199fa..180a1ca 100644
74505--- a/ipc/shm.c
74506+++ b/ipc/shm.c
74507@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
74508 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
74509 #endif
74510
74511+#ifdef CONFIG_GRKERNSEC
74512+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
74513+ const time_t shm_createtime, const kuid_t cuid,
74514+ const int shmid);
74515+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
74516+ const time_t shm_createtime);
74517+#endif
74518+
74519 void shm_init_ns(struct ipc_namespace *ns)
74520 {
74521 ns->shm_ctlmax = SHMMAX;
74522@@ -531,6 +539,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
74523 shp->shm_lprid = 0;
74524 shp->shm_atim = shp->shm_dtim = 0;
74525 shp->shm_ctim = get_seconds();
74526+#ifdef CONFIG_GRKERNSEC
74527+ {
74528+ struct timespec timeval;
74529+ do_posix_clock_monotonic_gettime(&timeval);
74530+
74531+ shp->shm_createtime = timeval.tv_sec;
74532+ }
74533+#endif
74534 shp->shm_segsz = size;
74535 shp->shm_nattch = 0;
74536 shp->shm_file = file;
74537@@ -582,18 +598,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
74538 return 0;
74539 }
74540
74541+static struct ipc_ops shm_ops = {
74542+ .getnew = newseg,
74543+ .associate = shm_security,
74544+ .more_checks = shm_more_checks
74545+};
74546+
74547 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
74548 {
74549 struct ipc_namespace *ns;
74550- struct ipc_ops shm_ops;
74551 struct ipc_params shm_params;
74552
74553 ns = current->nsproxy->ipc_ns;
74554
74555- shm_ops.getnew = newseg;
74556- shm_ops.associate = shm_security;
74557- shm_ops.more_checks = shm_more_checks;
74558-
74559 shm_params.key = key;
74560 shm_params.flg = shmflg;
74561 shm_params.u.size = size;
74562@@ -1014,6 +1031,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
74563 f_mode = FMODE_READ | FMODE_WRITE;
74564 }
74565 if (shmflg & SHM_EXEC) {
74566+
74567+#ifdef CONFIG_PAX_MPROTECT
74568+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
74569+ goto out;
74570+#endif
74571+
74572 prot |= PROT_EXEC;
74573 acc_mode |= S_IXUGO;
74574 }
74575@@ -1037,9 +1060,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
74576 if (err)
74577 goto out_unlock;
74578
74579+#ifdef CONFIG_GRKERNSEC
74580+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
74581+ shp->shm_perm.cuid, shmid) ||
74582+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
74583+ err = -EACCES;
74584+ goto out_unlock;
74585+ }
74586+#endif
74587+
74588 path = shp->shm_file->f_path;
74589 path_get(&path);
74590 shp->shm_nattch++;
74591+#ifdef CONFIG_GRKERNSEC
74592+ shp->shm_lapid = current->pid;
74593+#endif
74594 size = i_size_read(path.dentry->d_inode);
74595 shm_unlock(shp);
74596
74597diff --git a/kernel/acct.c b/kernel/acct.c
74598index b9bd7f0..1762b4a 100644
74599--- a/kernel/acct.c
74600+++ b/kernel/acct.c
74601@@ -550,7 +550,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
74602 */
74603 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
74604 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
74605- file->f_op->write(file, (char *)&ac,
74606+ file->f_op->write(file, (char __force_user *)&ac,
74607 sizeof(acct_t), &file->f_pos);
74608 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
74609 set_fs(fs);
74610diff --git a/kernel/audit.c b/kernel/audit.c
74611index 8a667f10..7375e3f 100644
74612--- a/kernel/audit.c
74613+++ b/kernel/audit.c
74614@@ -116,7 +116,7 @@ u32 audit_sig_sid = 0;
74615 3) suppressed due to audit_rate_limit
74616 4) suppressed due to audit_backlog_limit
74617 */
74618-static atomic_t audit_lost = ATOMIC_INIT(0);
74619+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
74620
74621 /* The netlink socket. */
74622 static struct sock *audit_sock;
74623@@ -238,7 +238,7 @@ void audit_log_lost(const char *message)
74624 unsigned long now;
74625 int print;
74626
74627- atomic_inc(&audit_lost);
74628+ atomic_inc_unchecked(&audit_lost);
74629
74630 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
74631
74632@@ -257,7 +257,7 @@ void audit_log_lost(const char *message)
74633 printk(KERN_WARNING
74634 "audit: audit_lost=%d audit_rate_limit=%d "
74635 "audit_backlog_limit=%d\n",
74636- atomic_read(&audit_lost),
74637+ atomic_read_unchecked(&audit_lost),
74638 audit_rate_limit,
74639 audit_backlog_limit);
74640 audit_panic(message);
74641@@ -681,7 +681,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
74642 status_set.pid = audit_pid;
74643 status_set.rate_limit = audit_rate_limit;
74644 status_set.backlog_limit = audit_backlog_limit;
74645- status_set.lost = atomic_read(&audit_lost);
74646+ status_set.lost = atomic_read_unchecked(&audit_lost);
74647 status_set.backlog = skb_queue_len(&audit_skb_queue);
74648 audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0,
74649 &status_set, sizeof(status_set));
74650diff --git a/kernel/auditsc.c b/kernel/auditsc.c
74651index c4b72b0..8654c4e 100644
74652--- a/kernel/auditsc.c
74653+++ b/kernel/auditsc.c
74654@@ -2295,7 +2295,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
74655 }
74656
74657 /* global counter which is incremented every time something logs in */
74658-static atomic_t session_id = ATOMIC_INIT(0);
74659+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
74660
74661 /**
74662 * audit_set_loginuid - set current task's audit_context loginuid
74663@@ -2319,7 +2319,7 @@ int audit_set_loginuid(kuid_t loginuid)
74664 return -EPERM;
74665 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
74666
74667- sessionid = atomic_inc_return(&session_id);
74668+ sessionid = atomic_inc_return_unchecked(&session_id);
74669 if (context && context->in_syscall) {
74670 struct audit_buffer *ab;
74671
74672diff --git a/kernel/capability.c b/kernel/capability.c
74673index f6c2ce5..982c0f9 100644
74674--- a/kernel/capability.c
74675+++ b/kernel/capability.c
74676@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
74677 * before modification is attempted and the application
74678 * fails.
74679 */
74680+ if (tocopy > ARRAY_SIZE(kdata))
74681+ return -EFAULT;
74682+
74683 if (copy_to_user(dataptr, kdata, tocopy
74684 * sizeof(struct __user_cap_data_struct))) {
74685 return -EFAULT;
74686@@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
74687 int ret;
74688
74689 rcu_read_lock();
74690- ret = security_capable(__task_cred(t), ns, cap);
74691+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
74692+ gr_task_is_capable(t, __task_cred(t), cap);
74693 rcu_read_unlock();
74694
74695- return (ret == 0);
74696+ return ret;
74697 }
74698
74699 /**
74700@@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
74701 int ret;
74702
74703 rcu_read_lock();
74704- ret = security_capable_noaudit(__task_cred(t), ns, cap);
74705+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
74706 rcu_read_unlock();
74707
74708- return (ret == 0);
74709+ return ret;
74710 }
74711
74712 /**
74713@@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
74714 BUG();
74715 }
74716
74717- if (security_capable(current_cred(), ns, cap) == 0) {
74718+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
74719 current->flags |= PF_SUPERPRIV;
74720 return true;
74721 }
74722@@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
74723 }
74724 EXPORT_SYMBOL(ns_capable);
74725
74726+bool ns_capable_nolog(struct user_namespace *ns, int cap)
74727+{
74728+ if (unlikely(!cap_valid(cap))) {
74729+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
74730+ BUG();
74731+ }
74732+
74733+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
74734+ current->flags |= PF_SUPERPRIV;
74735+ return true;
74736+ }
74737+ return false;
74738+}
74739+EXPORT_SYMBOL(ns_capable_nolog);
74740+
74741 /**
74742 * file_ns_capable - Determine if the file's opener had a capability in effect
74743 * @file: The file we want to check
74744@@ -432,6 +451,12 @@ bool capable(int cap)
74745 }
74746 EXPORT_SYMBOL(capable);
74747
74748+bool capable_nolog(int cap)
74749+{
74750+ return ns_capable_nolog(&init_user_ns, cap);
74751+}
74752+EXPORT_SYMBOL(capable_nolog);
74753+
74754 /**
74755 * nsown_capable - Check superior capability to one's own user_ns
74756 * @cap: The capability in question
74757@@ -464,3 +489,10 @@ bool inode_capable(const struct inode *inode, int cap)
74758
74759 return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
74760 }
74761+
74762+bool inode_capable_nolog(const struct inode *inode, int cap)
74763+{
74764+ struct user_namespace *ns = current_user_ns();
74765+
74766+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
74767+}
74768diff --git a/kernel/cgroup.c b/kernel/cgroup.c
74769index a48de6a..df24bfe 100644
74770--- a/kernel/cgroup.c
74771+++ b/kernel/cgroup.c
74772@@ -5567,7 +5567,7 @@ static int cgroup_css_links_read(struct cgroup *cont,
74773 struct css_set *cg = link->cg;
74774 struct task_struct *task;
74775 int count = 0;
74776- seq_printf(seq, "css_set %p\n", cg);
74777+ seq_printf(seq, "css_set %pK\n", cg);
74778 list_for_each_entry(task, &cg->tasks, cg_list) {
74779 if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
74780 seq_puts(seq, " ...\n");
74781diff --git a/kernel/compat.c b/kernel/compat.c
74782index 19971d8..02fe2df 100644
74783--- a/kernel/compat.c
74784+++ b/kernel/compat.c
74785@@ -13,6 +13,7 @@
74786
74787 #include <linux/linkage.h>
74788 #include <linux/compat.h>
74789+#include <linux/module.h>
74790 #include <linux/errno.h>
74791 #include <linux/time.h>
74792 #include <linux/signal.h>
74793@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
74794 mm_segment_t oldfs;
74795 long ret;
74796
74797- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
74798+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
74799 oldfs = get_fs();
74800 set_fs(KERNEL_DS);
74801 ret = hrtimer_nanosleep_restart(restart);
74802@@ -252,7 +253,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
74803 oldfs = get_fs();
74804 set_fs(KERNEL_DS);
74805 ret = hrtimer_nanosleep(&tu,
74806- rmtp ? (struct timespec __user *)&rmt : NULL,
74807+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
74808 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
74809 set_fs(oldfs);
74810
74811@@ -361,7 +362,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
74812 mm_segment_t old_fs = get_fs();
74813
74814 set_fs(KERNEL_DS);
74815- ret = sys_sigpending((old_sigset_t __user *) &s);
74816+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
74817 set_fs(old_fs);
74818 if (ret == 0)
74819 ret = put_user(s, set);
74820@@ -451,7 +452,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
74821 mm_segment_t old_fs = get_fs();
74822
74823 set_fs(KERNEL_DS);
74824- ret = sys_old_getrlimit(resource, &r);
74825+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
74826 set_fs(old_fs);
74827
74828 if (!ret) {
74829@@ -523,7 +524,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
74830 mm_segment_t old_fs = get_fs();
74831
74832 set_fs(KERNEL_DS);
74833- ret = sys_getrusage(who, (struct rusage __user *) &r);
74834+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
74835 set_fs(old_fs);
74836
74837 if (ret)
74838@@ -552,8 +553,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
74839 set_fs (KERNEL_DS);
74840 ret = sys_wait4(pid,
74841 (stat_addr ?
74842- (unsigned int __user *) &status : NULL),
74843- options, (struct rusage __user *) &r);
74844+ (unsigned int __force_user *) &status : NULL),
74845+ options, (struct rusage __force_user *) &r);
74846 set_fs (old_fs);
74847
74848 if (ret > 0) {
74849@@ -579,8 +580,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
74850 memset(&info, 0, sizeof(info));
74851
74852 set_fs(KERNEL_DS);
74853- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
74854- uru ? (struct rusage __user *)&ru : NULL);
74855+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
74856+ uru ? (struct rusage __force_user *)&ru : NULL);
74857 set_fs(old_fs);
74858
74859 if ((ret < 0) || (info.si_signo == 0))
74860@@ -714,8 +715,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
74861 oldfs = get_fs();
74862 set_fs(KERNEL_DS);
74863 err = sys_timer_settime(timer_id, flags,
74864- (struct itimerspec __user *) &newts,
74865- (struct itimerspec __user *) &oldts);
74866+ (struct itimerspec __force_user *) &newts,
74867+ (struct itimerspec __force_user *) &oldts);
74868 set_fs(oldfs);
74869 if (!err && old && put_compat_itimerspec(old, &oldts))
74870 return -EFAULT;
74871@@ -732,7 +733,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
74872 oldfs = get_fs();
74873 set_fs(KERNEL_DS);
74874 err = sys_timer_gettime(timer_id,
74875- (struct itimerspec __user *) &ts);
74876+ (struct itimerspec __force_user *) &ts);
74877 set_fs(oldfs);
74878 if (!err && put_compat_itimerspec(setting, &ts))
74879 return -EFAULT;
74880@@ -751,7 +752,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
74881 oldfs = get_fs();
74882 set_fs(KERNEL_DS);
74883 err = sys_clock_settime(which_clock,
74884- (struct timespec __user *) &ts);
74885+ (struct timespec __force_user *) &ts);
74886 set_fs(oldfs);
74887 return err;
74888 }
74889@@ -766,7 +767,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
74890 oldfs = get_fs();
74891 set_fs(KERNEL_DS);
74892 err = sys_clock_gettime(which_clock,
74893- (struct timespec __user *) &ts);
74894+ (struct timespec __force_user *) &ts);
74895 set_fs(oldfs);
74896 if (!err && put_compat_timespec(&ts, tp))
74897 return -EFAULT;
74898@@ -786,7 +787,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
74899
74900 oldfs = get_fs();
74901 set_fs(KERNEL_DS);
74902- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
74903+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
74904 set_fs(oldfs);
74905
74906 err = compat_put_timex(utp, &txc);
74907@@ -806,7 +807,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
74908 oldfs = get_fs();
74909 set_fs(KERNEL_DS);
74910 err = sys_clock_getres(which_clock,
74911- (struct timespec __user *) &ts);
74912+ (struct timespec __force_user *) &ts);
74913 set_fs(oldfs);
74914 if (!err && tp && put_compat_timespec(&ts, tp))
74915 return -EFAULT;
74916@@ -818,9 +819,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
74917 long err;
74918 mm_segment_t oldfs;
74919 struct timespec tu;
74920- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
74921+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
74922
74923- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
74924+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
74925 oldfs = get_fs();
74926 set_fs(KERNEL_DS);
74927 err = clock_nanosleep_restart(restart);
74928@@ -852,8 +853,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
74929 oldfs = get_fs();
74930 set_fs(KERNEL_DS);
74931 err = sys_clock_nanosleep(which_clock, flags,
74932- (struct timespec __user *) &in,
74933- (struct timespec __user *) &out);
74934+ (struct timespec __force_user *) &in,
74935+ (struct timespec __force_user *) &out);
74936 set_fs(oldfs);
74937
74938 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
74939diff --git a/kernel/configs.c b/kernel/configs.c
74940index 42e8fa0..9e7406b 100644
74941--- a/kernel/configs.c
74942+++ b/kernel/configs.c
74943@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
74944 struct proc_dir_entry *entry;
74945
74946 /* create the current config file */
74947+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
74948+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
74949+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
74950+ &ikconfig_file_ops);
74951+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
74952+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
74953+ &ikconfig_file_ops);
74954+#endif
74955+#else
74956 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
74957 &ikconfig_file_ops);
74958+#endif
74959+
74960 if (!entry)
74961 return -ENOMEM;
74962
74963diff --git a/kernel/cred.c b/kernel/cred.c
74964index e0573a4..3874e41 100644
74965--- a/kernel/cred.c
74966+++ b/kernel/cred.c
74967@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
74968 validate_creds(cred);
74969 alter_cred_subscribers(cred, -1);
74970 put_cred(cred);
74971+
74972+#ifdef CONFIG_GRKERNSEC_SETXID
74973+ cred = (struct cred *) tsk->delayed_cred;
74974+ if (cred != NULL) {
74975+ tsk->delayed_cred = NULL;
74976+ validate_creds(cred);
74977+ alter_cred_subscribers(cred, -1);
74978+ put_cred(cred);
74979+ }
74980+#endif
74981 }
74982
74983 /**
74984@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
74985 * Always returns 0 thus allowing this function to be tail-called at the end
74986 * of, say, sys_setgid().
74987 */
74988-int commit_creds(struct cred *new)
74989+static int __commit_creds(struct cred *new)
74990 {
74991 struct task_struct *task = current;
74992 const struct cred *old = task->real_cred;
74993@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
74994
74995 get_cred(new); /* we will require a ref for the subj creds too */
74996
74997+ gr_set_role_label(task, new->uid, new->gid);
74998+
74999 /* dumpability changes */
75000 if (!uid_eq(old->euid, new->euid) ||
75001 !gid_eq(old->egid, new->egid) ||
75002@@ -479,6 +491,102 @@ int commit_creds(struct cred *new)
75003 put_cred(old);
75004 return 0;
75005 }
75006+#ifdef CONFIG_GRKERNSEC_SETXID
75007+extern int set_user(struct cred *new);
75008+
75009+void gr_delayed_cred_worker(void)
75010+{
75011+ const struct cred *new = current->delayed_cred;
75012+ struct cred *ncred;
75013+
75014+ current->delayed_cred = NULL;
75015+
75016+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
75017+ // from doing get_cred on it when queueing this
75018+ put_cred(new);
75019+ return;
75020+ } else if (new == NULL)
75021+ return;
75022+
75023+ ncred = prepare_creds();
75024+ if (!ncred)
75025+ goto die;
75026+ // uids
75027+ ncred->uid = new->uid;
75028+ ncred->euid = new->euid;
75029+ ncred->suid = new->suid;
75030+ ncred->fsuid = new->fsuid;
75031+ // gids
75032+ ncred->gid = new->gid;
75033+ ncred->egid = new->egid;
75034+ ncred->sgid = new->sgid;
75035+ ncred->fsgid = new->fsgid;
75036+ // groups
75037+ if (set_groups(ncred, new->group_info) < 0) {
75038+ abort_creds(ncred);
75039+ goto die;
75040+ }
75041+ // caps
75042+ ncred->securebits = new->securebits;
75043+ ncred->cap_inheritable = new->cap_inheritable;
75044+ ncred->cap_permitted = new->cap_permitted;
75045+ ncred->cap_effective = new->cap_effective;
75046+ ncred->cap_bset = new->cap_bset;
75047+
75048+ if (set_user(ncred)) {
75049+ abort_creds(ncred);
75050+ goto die;
75051+ }
75052+
75053+ // from doing get_cred on it when queueing this
75054+ put_cred(new);
75055+
75056+ __commit_creds(ncred);
75057+ return;
75058+die:
75059+ // from doing get_cred on it when queueing this
75060+ put_cred(new);
75061+ do_group_exit(SIGKILL);
75062+}
75063+#endif
75064+
75065+int commit_creds(struct cred *new)
75066+{
75067+#ifdef CONFIG_GRKERNSEC_SETXID
75068+ int ret;
75069+ int schedule_it = 0;
75070+ struct task_struct *t;
75071+
75072+ /* we won't get called with tasklist_lock held for writing
75073+ and interrupts disabled as the cred struct in that case is
75074+ init_cred
75075+ */
75076+ if (grsec_enable_setxid && !current_is_single_threaded() &&
75077+ uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
75078+ !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
75079+ schedule_it = 1;
75080+ }
75081+ ret = __commit_creds(new);
75082+ if (schedule_it) {
75083+ rcu_read_lock();
75084+ read_lock(&tasklist_lock);
75085+ for (t = next_thread(current); t != current;
75086+ t = next_thread(t)) {
75087+ if (t->delayed_cred == NULL) {
75088+ t->delayed_cred = get_cred(new);
75089+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
75090+ set_tsk_need_resched(t);
75091+ }
75092+ }
75093+ read_unlock(&tasklist_lock);
75094+ rcu_read_unlock();
75095+ }
75096+ return ret;
75097+#else
75098+ return __commit_creds(new);
75099+#endif
75100+}
75101+
75102 EXPORT_SYMBOL(commit_creds);
75103
75104 /**
75105diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
75106index c26278f..e323fb8 100644
75107--- a/kernel/debug/debug_core.c
75108+++ b/kernel/debug/debug_core.c
75109@@ -123,7 +123,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
75110 */
75111 static atomic_t masters_in_kgdb;
75112 static atomic_t slaves_in_kgdb;
75113-static atomic_t kgdb_break_tasklet_var;
75114+static atomic_unchecked_t kgdb_break_tasklet_var;
75115 atomic_t kgdb_setting_breakpoint;
75116
75117 struct task_struct *kgdb_usethread;
75118@@ -133,7 +133,7 @@ int kgdb_single_step;
75119 static pid_t kgdb_sstep_pid;
75120
75121 /* to keep track of the CPU which is doing the single stepping*/
75122-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
75123+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
75124
75125 /*
75126 * If you are debugging a problem where roundup (the collection of
75127@@ -541,7 +541,7 @@ return_normal:
75128 * kernel will only try for the value of sstep_tries before
75129 * giving up and continuing on.
75130 */
75131- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
75132+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
75133 (kgdb_info[cpu].task &&
75134 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
75135 atomic_set(&kgdb_active, -1);
75136@@ -635,8 +635,8 @@ cpu_master_loop:
75137 }
75138
75139 kgdb_restore:
75140- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
75141- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
75142+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
75143+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
75144 if (kgdb_info[sstep_cpu].task)
75145 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
75146 else
75147@@ -888,18 +888,18 @@ static void kgdb_unregister_callbacks(void)
75148 static void kgdb_tasklet_bpt(unsigned long ing)
75149 {
75150 kgdb_breakpoint();
75151- atomic_set(&kgdb_break_tasklet_var, 0);
75152+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
75153 }
75154
75155 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
75156
75157 void kgdb_schedule_breakpoint(void)
75158 {
75159- if (atomic_read(&kgdb_break_tasklet_var) ||
75160+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
75161 atomic_read(&kgdb_active) != -1 ||
75162 atomic_read(&kgdb_setting_breakpoint))
75163 return;
75164- atomic_inc(&kgdb_break_tasklet_var);
75165+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
75166 tasklet_schedule(&kgdb_tasklet_breakpoint);
75167 }
75168 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
75169diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
75170index 00eb8f7..d7e3244 100644
75171--- a/kernel/debug/kdb/kdb_main.c
75172+++ b/kernel/debug/kdb/kdb_main.c
75173@@ -1974,7 +1974,7 @@ static int kdb_lsmod(int argc, const char **argv)
75174 continue;
75175
75176 kdb_printf("%-20s%8u 0x%p ", mod->name,
75177- mod->core_size, (void *)mod);
75178+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
75179 #ifdef CONFIG_MODULE_UNLOAD
75180 kdb_printf("%4ld ", module_refcount(mod));
75181 #endif
75182@@ -1984,7 +1984,7 @@ static int kdb_lsmod(int argc, const char **argv)
75183 kdb_printf(" (Loading)");
75184 else
75185 kdb_printf(" (Live)");
75186- kdb_printf(" 0x%p", mod->module_core);
75187+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
75188
75189 #ifdef CONFIG_MODULE_UNLOAD
75190 {
75191diff --git a/kernel/events/core.c b/kernel/events/core.c
75192index 9fcb094..353baaaf 100644
75193--- a/kernel/events/core.c
75194+++ b/kernel/events/core.c
75195@@ -154,8 +154,15 @@ static struct srcu_struct pmus_srcu;
75196 * 0 - disallow raw tracepoint access for unpriv
75197 * 1 - disallow cpu events for unpriv
75198 * 2 - disallow kernel profiling for unpriv
75199+ * 3 - disallow all unpriv perf event use
75200 */
75201-int sysctl_perf_event_paranoid __read_mostly = 1;
75202+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
75203+int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
75204+#elif defined(CONFIG_GRKERNSEC_HIDESYM)
75205+int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
75206+#else
75207+int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
75208+#endif
75209
75210 /* Minimum for 512 kiB + 1 user control page */
75211 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
75212@@ -182,7 +189,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
75213 return 0;
75214 }
75215
75216-static atomic64_t perf_event_id;
75217+static atomic64_unchecked_t perf_event_id;
75218
75219 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
75220 enum event_type_t event_type);
75221@@ -2677,7 +2684,7 @@ static void __perf_event_read(void *info)
75222
75223 static inline u64 perf_event_count(struct perf_event *event)
75224 {
75225- return local64_read(&event->count) + atomic64_read(&event->child_count);
75226+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
75227 }
75228
75229 static u64 perf_event_read(struct perf_event *event)
75230@@ -3007,9 +3014,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
75231 mutex_lock(&event->child_mutex);
75232 total += perf_event_read(event);
75233 *enabled += event->total_time_enabled +
75234- atomic64_read(&event->child_total_time_enabled);
75235+ atomic64_read_unchecked(&event->child_total_time_enabled);
75236 *running += event->total_time_running +
75237- atomic64_read(&event->child_total_time_running);
75238+ atomic64_read_unchecked(&event->child_total_time_running);
75239
75240 list_for_each_entry(child, &event->child_list, child_list) {
75241 total += perf_event_read(child);
75242@@ -3412,10 +3419,10 @@ void perf_event_update_userpage(struct perf_event *event)
75243 userpg->offset -= local64_read(&event->hw.prev_count);
75244
75245 userpg->time_enabled = enabled +
75246- atomic64_read(&event->child_total_time_enabled);
75247+ atomic64_read_unchecked(&event->child_total_time_enabled);
75248
75249 userpg->time_running = running +
75250- atomic64_read(&event->child_total_time_running);
75251+ atomic64_read_unchecked(&event->child_total_time_running);
75252
75253 arch_perf_update_userpage(userpg, now);
75254
75255@@ -3886,7 +3893,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
75256
75257 /* Data. */
75258 sp = perf_user_stack_pointer(regs);
75259- rem = __output_copy_user(handle, (void *) sp, dump_size);
75260+ rem = __output_copy_user(handle, (void __user *) sp, dump_size);
75261 dyn_size = dump_size - rem;
75262
75263 perf_output_skip(handle, rem);
75264@@ -3974,11 +3981,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
75265 values[n++] = perf_event_count(event);
75266 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
75267 values[n++] = enabled +
75268- atomic64_read(&event->child_total_time_enabled);
75269+ atomic64_read_unchecked(&event->child_total_time_enabled);
75270 }
75271 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
75272 values[n++] = running +
75273- atomic64_read(&event->child_total_time_running);
75274+ atomic64_read_unchecked(&event->child_total_time_running);
75275 }
75276 if (read_format & PERF_FORMAT_ID)
75277 values[n++] = primary_event_id(event);
75278@@ -4726,12 +4733,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
75279 * need to add enough zero bytes after the string to handle
75280 * the 64bit alignment we do later.
75281 */
75282- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
75283+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
75284 if (!buf) {
75285 name = strncpy(tmp, "//enomem", sizeof(tmp));
75286 goto got_name;
75287 }
75288- name = d_path(&file->f_path, buf, PATH_MAX);
75289+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
75290 if (IS_ERR(name)) {
75291 name = strncpy(tmp, "//toolong", sizeof(tmp));
75292 goto got_name;
75293@@ -6167,7 +6174,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
75294 event->parent = parent_event;
75295
75296 event->ns = get_pid_ns(task_active_pid_ns(current));
75297- event->id = atomic64_inc_return(&perf_event_id);
75298+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
75299
75300 event->state = PERF_EVENT_STATE_INACTIVE;
75301
75302@@ -6463,6 +6470,11 @@ SYSCALL_DEFINE5(perf_event_open,
75303 if (flags & ~PERF_FLAG_ALL)
75304 return -EINVAL;
75305
75306+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
75307+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
75308+ return -EACCES;
75309+#endif
75310+
75311 err = perf_copy_attr(attr_uptr, &attr);
75312 if (err)
75313 return err;
75314@@ -6795,10 +6807,10 @@ static void sync_child_event(struct perf_event *child_event,
75315 /*
75316 * Add back the child's count to the parent's count:
75317 */
75318- atomic64_add(child_val, &parent_event->child_count);
75319- atomic64_add(child_event->total_time_enabled,
75320+ atomic64_add_unchecked(child_val, &parent_event->child_count);
75321+ atomic64_add_unchecked(child_event->total_time_enabled,
75322 &parent_event->child_total_time_enabled);
75323- atomic64_add(child_event->total_time_running,
75324+ atomic64_add_unchecked(child_event->total_time_running,
75325 &parent_event->child_total_time_running);
75326
75327 /*
75328diff --git a/kernel/events/internal.h b/kernel/events/internal.h
75329index eb675c4..54912ff 100644
75330--- a/kernel/events/internal.h
75331+++ b/kernel/events/internal.h
75332@@ -77,10 +77,10 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
75333 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
75334 }
75335
75336-#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
75337+#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user) \
75338 static inline unsigned int \
75339 func_name(struct perf_output_handle *handle, \
75340- const void *buf, unsigned int len) \
75341+ const void user *buf, unsigned int len) \
75342 { \
75343 unsigned long size, written; \
75344 \
75345@@ -112,17 +112,17 @@ static inline int memcpy_common(void *dst, const void *src, size_t n)
75346 return n;
75347 }
75348
75349-DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
75350+DEFINE_OUTPUT_COPY(__output_copy, memcpy_common, )
75351
75352 #define MEMCPY_SKIP(dst, src, n) (n)
75353
75354-DEFINE_OUTPUT_COPY(__output_skip, MEMCPY_SKIP)
75355+DEFINE_OUTPUT_COPY(__output_skip, MEMCPY_SKIP, )
75356
75357 #ifndef arch_perf_out_copy_user
75358 #define arch_perf_out_copy_user __copy_from_user_inatomic
75359 #endif
75360
75361-DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
75362+DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user, __user)
75363
75364 /* Callchain handling */
75365 extern struct perf_callchain_entry *
75366diff --git a/kernel/exit.c b/kernel/exit.c
75367index 60bc027..ca6d727 100644
75368--- a/kernel/exit.c
75369+++ b/kernel/exit.c
75370@@ -172,6 +172,10 @@ void release_task(struct task_struct * p)
75371 struct task_struct *leader;
75372 int zap_leader;
75373 repeat:
75374+#ifdef CONFIG_NET
75375+ gr_del_task_from_ip_table(p);
75376+#endif
75377+
75378 /* don't need to get the RCU readlock here - the process is dead and
75379 * can't be modifying its own credentials. But shut RCU-lockdep up */
75380 rcu_read_lock();
75381@@ -340,7 +344,7 @@ int allow_signal(int sig)
75382 * know it'll be handled, so that they don't get converted to
75383 * SIGKILL or just silently dropped.
75384 */
75385- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
75386+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
75387 recalc_sigpending();
75388 spin_unlock_irq(&current->sighand->siglock);
75389 return 0;
75390@@ -710,6 +714,8 @@ void do_exit(long code)
75391 struct task_struct *tsk = current;
75392 int group_dead;
75393
75394+ set_fs(USER_DS);
75395+
75396 profile_task_exit(tsk);
75397
75398 WARN_ON(blk_needs_flush_plug(tsk));
75399@@ -726,7 +732,6 @@ void do_exit(long code)
75400 * mm_release()->clear_child_tid() from writing to a user-controlled
75401 * kernel address.
75402 */
75403- set_fs(USER_DS);
75404
75405 ptrace_event(PTRACE_EVENT_EXIT, code);
75406
75407@@ -785,6 +790,9 @@ void do_exit(long code)
75408 tsk->exit_code = code;
75409 taskstats_exit(tsk, group_dead);
75410
75411+ gr_acl_handle_psacct(tsk, code);
75412+ gr_acl_handle_exit();
75413+
75414 exit_mm(tsk);
75415
75416 if (group_dead)
75417@@ -905,7 +913,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
75418 * Take down every thread in the group. This is called by fatal signals
75419 * as well as by sys_exit_group (below).
75420 */
75421-void
75422+__noreturn void
75423 do_group_exit(int exit_code)
75424 {
75425 struct signal_struct *sig = current->signal;
75426diff --git a/kernel/fork.c b/kernel/fork.c
75427index 1766d32..c0e44e2 100644
75428--- a/kernel/fork.c
75429+++ b/kernel/fork.c
75430@@ -318,7 +318,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
75431 *stackend = STACK_END_MAGIC; /* for overflow detection */
75432
75433 #ifdef CONFIG_CC_STACKPROTECTOR
75434- tsk->stack_canary = get_random_int();
75435+ tsk->stack_canary = pax_get_random_long();
75436 #endif
75437
75438 /*
75439@@ -344,13 +344,81 @@ free_tsk:
75440 }
75441
75442 #ifdef CONFIG_MMU
75443+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
75444+{
75445+ struct vm_area_struct *tmp;
75446+ unsigned long charge;
75447+ struct mempolicy *pol;
75448+ struct file *file;
75449+
75450+ charge = 0;
75451+ if (mpnt->vm_flags & VM_ACCOUNT) {
75452+ unsigned long len = vma_pages(mpnt);
75453+
75454+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
75455+ goto fail_nomem;
75456+ charge = len;
75457+ }
75458+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
75459+ if (!tmp)
75460+ goto fail_nomem;
75461+ *tmp = *mpnt;
75462+ tmp->vm_mm = mm;
75463+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
75464+ pol = mpol_dup(vma_policy(mpnt));
75465+ if (IS_ERR(pol))
75466+ goto fail_nomem_policy;
75467+ vma_set_policy(tmp, pol);
75468+ if (anon_vma_fork(tmp, mpnt))
75469+ goto fail_nomem_anon_vma_fork;
75470+ tmp->vm_flags &= ~VM_LOCKED;
75471+ tmp->vm_next = tmp->vm_prev = NULL;
75472+ tmp->vm_mirror = NULL;
75473+ file = tmp->vm_file;
75474+ if (file) {
75475+ struct inode *inode = file_inode(file);
75476+ struct address_space *mapping = file->f_mapping;
75477+
75478+ get_file(file);
75479+ if (tmp->vm_flags & VM_DENYWRITE)
75480+ atomic_dec(&inode->i_writecount);
75481+ mutex_lock(&mapping->i_mmap_mutex);
75482+ if (tmp->vm_flags & VM_SHARED)
75483+ mapping->i_mmap_writable++;
75484+ flush_dcache_mmap_lock(mapping);
75485+ /* insert tmp into the share list, just after mpnt */
75486+ if (unlikely(tmp->vm_flags & VM_NONLINEAR))
75487+ vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
75488+ else
75489+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
75490+ flush_dcache_mmap_unlock(mapping);
75491+ mutex_unlock(&mapping->i_mmap_mutex);
75492+ }
75493+
75494+ /*
75495+ * Clear hugetlb-related page reserves for children. This only
75496+ * affects MAP_PRIVATE mappings. Faults generated by the child
75497+ * are not guaranteed to succeed, even if read-only
75498+ */
75499+ if (is_vm_hugetlb_page(tmp))
75500+ reset_vma_resv_huge_pages(tmp);
75501+
75502+ return tmp;
75503+
75504+fail_nomem_anon_vma_fork:
75505+ mpol_put(pol);
75506+fail_nomem_policy:
75507+ kmem_cache_free(vm_area_cachep, tmp);
75508+fail_nomem:
75509+ vm_unacct_memory(charge);
75510+ return NULL;
75511+}
75512+
75513 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
75514 {
75515 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
75516 struct rb_node **rb_link, *rb_parent;
75517 int retval;
75518- unsigned long charge;
75519- struct mempolicy *pol;
75520
75521 uprobe_start_dup_mmap();
75522 down_write(&oldmm->mmap_sem);
75523@@ -364,8 +432,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
75524 mm->locked_vm = 0;
75525 mm->mmap = NULL;
75526 mm->mmap_cache = NULL;
75527- mm->free_area_cache = oldmm->mmap_base;
75528- mm->cached_hole_size = ~0UL;
75529+ mm->free_area_cache = oldmm->free_area_cache;
75530+ mm->cached_hole_size = oldmm->cached_hole_size;
75531 mm->map_count = 0;
75532 cpumask_clear(mm_cpumask(mm));
75533 mm->mm_rb = RB_ROOT;
75534@@ -381,57 +449,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
75535
75536 prev = NULL;
75537 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
75538- struct file *file;
75539-
75540 if (mpnt->vm_flags & VM_DONTCOPY) {
75541 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
75542 -vma_pages(mpnt));
75543 continue;
75544 }
75545- charge = 0;
75546- if (mpnt->vm_flags & VM_ACCOUNT) {
75547- unsigned long len = vma_pages(mpnt);
75548-
75549- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
75550- goto fail_nomem;
75551- charge = len;
75552- }
75553- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
75554- if (!tmp)
75555- goto fail_nomem;
75556- *tmp = *mpnt;
75557- INIT_LIST_HEAD(&tmp->anon_vma_chain);
75558- pol = mpol_dup(vma_policy(mpnt));
75559- retval = PTR_ERR(pol);
75560- if (IS_ERR(pol))
75561- goto fail_nomem_policy;
75562- vma_set_policy(tmp, pol);
75563- tmp->vm_mm = mm;
75564- if (anon_vma_fork(tmp, mpnt))
75565- goto fail_nomem_anon_vma_fork;
75566- tmp->vm_flags &= ~VM_LOCKED;
75567- tmp->vm_next = tmp->vm_prev = NULL;
75568- file = tmp->vm_file;
75569- if (file) {
75570- struct inode *inode = file_inode(file);
75571- struct address_space *mapping = file->f_mapping;
75572-
75573- get_file(file);
75574- if (tmp->vm_flags & VM_DENYWRITE)
75575- atomic_dec(&inode->i_writecount);
75576- mutex_lock(&mapping->i_mmap_mutex);
75577- if (tmp->vm_flags & VM_SHARED)
75578- mapping->i_mmap_writable++;
75579- flush_dcache_mmap_lock(mapping);
75580- /* insert tmp into the share list, just after mpnt */
75581- if (unlikely(tmp->vm_flags & VM_NONLINEAR))
75582- vma_nonlinear_insert(tmp,
75583- &mapping->i_mmap_nonlinear);
75584- else
75585- vma_interval_tree_insert_after(tmp, mpnt,
75586- &mapping->i_mmap);
75587- flush_dcache_mmap_unlock(mapping);
75588- mutex_unlock(&mapping->i_mmap_mutex);
75589+ tmp = dup_vma(mm, oldmm, mpnt);
75590+ if (!tmp) {
75591+ retval = -ENOMEM;
75592+ goto out;
75593 }
75594
75595 /*
75596@@ -463,6 +489,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
75597 if (retval)
75598 goto out;
75599 }
75600+
75601+#ifdef CONFIG_PAX_SEGMEXEC
75602+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
75603+ struct vm_area_struct *mpnt_m;
75604+
75605+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
75606+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
75607+
75608+ if (!mpnt->vm_mirror)
75609+ continue;
75610+
75611+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
75612+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
75613+ mpnt->vm_mirror = mpnt_m;
75614+ } else {
75615+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
75616+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
75617+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
75618+ mpnt->vm_mirror->vm_mirror = mpnt;
75619+ }
75620+ }
75621+ BUG_ON(mpnt_m);
75622+ }
75623+#endif
75624+
75625 /* a new mm has just been created */
75626 arch_dup_mmap(oldmm, mm);
75627 retval = 0;
75628@@ -472,14 +523,6 @@ out:
75629 up_write(&oldmm->mmap_sem);
75630 uprobe_end_dup_mmap();
75631 return retval;
75632-fail_nomem_anon_vma_fork:
75633- mpol_put(pol);
75634-fail_nomem_policy:
75635- kmem_cache_free(vm_area_cachep, tmp);
75636-fail_nomem:
75637- retval = -ENOMEM;
75638- vm_unacct_memory(charge);
75639- goto out;
75640 }
75641
75642 static inline int mm_alloc_pgd(struct mm_struct *mm)
75643@@ -694,8 +737,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
75644 return ERR_PTR(err);
75645
75646 mm = get_task_mm(task);
75647- if (mm && mm != current->mm &&
75648- !ptrace_may_access(task, mode)) {
75649+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
75650+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
75651 mmput(mm);
75652 mm = ERR_PTR(-EACCES);
75653 }
75654@@ -917,13 +960,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
75655 spin_unlock(&fs->lock);
75656 return -EAGAIN;
75657 }
75658- fs->users++;
75659+ atomic_inc(&fs->users);
75660 spin_unlock(&fs->lock);
75661 return 0;
75662 }
75663 tsk->fs = copy_fs_struct(fs);
75664 if (!tsk->fs)
75665 return -ENOMEM;
75666+ /* Carry through gr_chroot_dentry and is_chrooted instead
75667+ of recomputing it here. Already copied when the task struct
75668+ is duplicated. This allows pivot_root to not be treated as
75669+ a chroot
75670+ */
75671+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
75672+
75673 return 0;
75674 }
75675
75676@@ -1196,6 +1246,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
75677 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
75678 #endif
75679 retval = -EAGAIN;
75680+
75681+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
75682+
75683 if (atomic_read(&p->real_cred->user->processes) >=
75684 task_rlimit(p, RLIMIT_NPROC)) {
75685 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
75686@@ -1441,6 +1494,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
75687 goto bad_fork_free_pid;
75688 }
75689
75690+ /* synchronizes with gr_set_acls()
75691+ we need to call this past the point of no return for fork()
75692+ */
75693+ gr_copy_label(p);
75694+
75695 if (clone_flags & CLONE_THREAD) {
75696 current->signal->nr_threads++;
75697 atomic_inc(&current->signal->live);
75698@@ -1524,6 +1582,8 @@ bad_fork_cleanup_count:
75699 bad_fork_free:
75700 free_task(p);
75701 fork_out:
75702+ gr_log_forkfail(retval);
75703+
75704 return ERR_PTR(retval);
75705 }
75706
75707@@ -1574,6 +1634,23 @@ long do_fork(unsigned long clone_flags,
75708 return -EINVAL;
75709 }
75710
75711+#ifdef CONFIG_GRKERNSEC
75712+ if (clone_flags & CLONE_NEWUSER) {
75713+ /*
75714+ * This doesn't really inspire confidence:
75715+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
75716+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
75717+ * Increases kernel attack surface in areas developers
75718+ * previously cared little about ("low importance due
75719+ * to requiring "root" capability")
75720+ * To be removed when this code receives *proper* review
75721+ */
75722+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
75723+ !capable(CAP_SETGID))
75724+ return -EPERM;
75725+ }
75726+#endif
75727+
75728 /*
75729 * Determine whether and which event to report to ptracer. When
75730 * called from kernel_thread or CLONE_UNTRACED is explicitly
75731@@ -1608,6 +1685,8 @@ long do_fork(unsigned long clone_flags,
75732 if (clone_flags & CLONE_PARENT_SETTID)
75733 put_user(nr, parent_tidptr);
75734
75735+ gr_handle_brute_check();
75736+
75737 if (clone_flags & CLONE_VFORK) {
75738 p->vfork_done = &vfork;
75739 init_completion(&vfork);
75740@@ -1761,7 +1840,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
75741 return 0;
75742
75743 /* don't need lock here; in the worst case we'll do useless copy */
75744- if (fs->users == 1)
75745+ if (atomic_read(&fs->users) == 1)
75746 return 0;
75747
75748 *new_fsp = copy_fs_struct(fs);
75749@@ -1873,7 +1952,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
75750 fs = current->fs;
75751 spin_lock(&fs->lock);
75752 current->fs = new_fs;
75753- if (--fs->users)
75754+ gr_set_chroot_entries(current, &current->fs->root);
75755+ if (atomic_dec_return(&fs->users))
75756 new_fs = NULL;
75757 else
75758 new_fs = fs;
75759diff --git a/kernel/futex.c b/kernel/futex.c
75760index b26dcfc..39e266a 100644
75761--- a/kernel/futex.c
75762+++ b/kernel/futex.c
75763@@ -54,6 +54,7 @@
75764 #include <linux/mount.h>
75765 #include <linux/pagemap.h>
75766 #include <linux/syscalls.h>
75767+#include <linux/ptrace.h>
75768 #include <linux/signal.h>
75769 #include <linux/export.h>
75770 #include <linux/magic.h>
75771@@ -241,6 +242,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
75772 struct page *page, *page_head;
75773 int err, ro = 0;
75774
75775+#ifdef CONFIG_PAX_SEGMEXEC
75776+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
75777+ return -EFAULT;
75778+#endif
75779+
75780 /*
75781 * The futex address must be "naturally" aligned.
75782 */
75783@@ -2732,6 +2738,7 @@ static int __init futex_init(void)
75784 {
75785 u32 curval;
75786 int i;
75787+ mm_segment_t oldfs;
75788
75789 /*
75790 * This will fail and we want it. Some arch implementations do
75791@@ -2743,8 +2750,11 @@ static int __init futex_init(void)
75792 * implementation, the non-functional ones will return
75793 * -ENOSYS.
75794 */
75795+ oldfs = get_fs();
75796+ set_fs(USER_DS);
75797 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
75798 futex_cmpxchg_enabled = 1;
75799+ set_fs(oldfs);
75800
75801 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
75802 plist_head_init(&futex_queues[i].chain);
75803diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
75804index f9f44fd..29885e4 100644
75805--- a/kernel/futex_compat.c
75806+++ b/kernel/futex_compat.c
75807@@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
75808 return 0;
75809 }
75810
75811-static void __user *futex_uaddr(struct robust_list __user *entry,
75812+static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
75813 compat_long_t futex_offset)
75814 {
75815 compat_uptr_t base = ptr_to_compat(entry);
75816diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
75817index 9b22d03..6295b62 100644
75818--- a/kernel/gcov/base.c
75819+++ b/kernel/gcov/base.c
75820@@ -102,11 +102,6 @@ void gcov_enable_events(void)
75821 }
75822
75823 #ifdef CONFIG_MODULES
75824-static inline int within(void *addr, void *start, unsigned long size)
75825-{
75826- return ((addr >= start) && (addr < start + size));
75827-}
75828-
75829 /* Update list and generate events when modules are unloaded. */
75830 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
75831 void *data)
75832@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
75833 prev = NULL;
75834 /* Remove entries located in module from linked list. */
75835 for (info = gcov_info_head; info; info = info->next) {
75836- if (within(info, mod->module_core, mod->core_size)) {
75837+ if (within_module_core_rw((unsigned long)info, mod)) {
75838 if (prev)
75839 prev->next = info->next;
75840 else
75841diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
75842index 7ef5556..8247f11 100644
75843--- a/kernel/hrtimer.c
75844+++ b/kernel/hrtimer.c
75845@@ -1416,7 +1416,7 @@ void hrtimer_peek_ahead_timers(void)
75846 local_irq_restore(flags);
75847 }
75848
75849-static void run_hrtimer_softirq(struct softirq_action *h)
75850+static void run_hrtimer_softirq(void)
75851 {
75852 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
75853
75854@@ -1758,7 +1758,7 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
75855 return NOTIFY_OK;
75856 }
75857
75858-static struct notifier_block __cpuinitdata hrtimers_nb = {
75859+static struct notifier_block hrtimers_nb = {
75860 .notifier_call = hrtimer_cpu_notify,
75861 };
75862
75863diff --git a/kernel/irq_work.c b/kernel/irq_work.c
75864index 55fcce6..0e4cf34 100644
75865--- a/kernel/irq_work.c
75866+++ b/kernel/irq_work.c
75867@@ -189,12 +189,13 @@ static int irq_work_cpu_notify(struct notifier_block *self,
75868 return NOTIFY_OK;
75869 }
75870
75871-static struct notifier_block cpu_notify;
75872+static struct notifier_block cpu_notify = {
75873+ .notifier_call = irq_work_cpu_notify,
75874+ .priority = 0,
75875+};
75876
75877 static __init int irq_work_init_cpu_notifier(void)
75878 {
75879- cpu_notify.notifier_call = irq_work_cpu_notify;
75880- cpu_notify.priority = 0;
75881 register_cpu_notifier(&cpu_notify);
75882 return 0;
75883 }
75884diff --git a/kernel/jump_label.c b/kernel/jump_label.c
75885index 60f48fa..7f3a770 100644
75886--- a/kernel/jump_label.c
75887+++ b/kernel/jump_label.c
75888@@ -13,6 +13,7 @@
75889 #include <linux/sort.h>
75890 #include <linux/err.h>
75891 #include <linux/static_key.h>
75892+#include <linux/mm.h>
75893
75894 #ifdef HAVE_JUMP_LABEL
75895
75896@@ -50,7 +51,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
75897
75898 size = (((unsigned long)stop - (unsigned long)start)
75899 / sizeof(struct jump_entry));
75900+ pax_open_kernel();
75901 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
75902+ pax_close_kernel();
75903 }
75904
75905 static void jump_label_update(struct static_key *key, int enable);
75906@@ -357,10 +360,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
75907 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
75908 struct jump_entry *iter;
75909
75910+ pax_open_kernel();
75911 for (iter = iter_start; iter < iter_stop; iter++) {
75912 if (within_module_init(iter->code, mod))
75913 iter->code = 0;
75914 }
75915+ pax_close_kernel();
75916 }
75917
75918 static int
75919diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
75920index 2169fee..706ccca 100644
75921--- a/kernel/kallsyms.c
75922+++ b/kernel/kallsyms.c
75923@@ -11,6 +11,9 @@
75924 * Changed the compression method from stem compression to "table lookup"
75925 * compression (see scripts/kallsyms.c for a more complete description)
75926 */
75927+#ifdef CONFIG_GRKERNSEC_HIDESYM
75928+#define __INCLUDED_BY_HIDESYM 1
75929+#endif
75930 #include <linux/kallsyms.h>
75931 #include <linux/module.h>
75932 #include <linux/init.h>
75933@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
75934
75935 static inline int is_kernel_inittext(unsigned long addr)
75936 {
75937+ if (system_state != SYSTEM_BOOTING)
75938+ return 0;
75939+
75940 if (addr >= (unsigned long)_sinittext
75941 && addr <= (unsigned long)_einittext)
75942 return 1;
75943 return 0;
75944 }
75945
75946+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
75947+#ifdef CONFIG_MODULES
75948+static inline int is_module_text(unsigned long addr)
75949+{
75950+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
75951+ return 1;
75952+
75953+ addr = ktla_ktva(addr);
75954+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
75955+}
75956+#else
75957+static inline int is_module_text(unsigned long addr)
75958+{
75959+ return 0;
75960+}
75961+#endif
75962+#endif
75963+
75964 static inline int is_kernel_text(unsigned long addr)
75965 {
75966 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
75967@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
75968
75969 static inline int is_kernel(unsigned long addr)
75970 {
75971+
75972+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
75973+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
75974+ return 1;
75975+
75976+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
75977+#else
75978 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
75979+#endif
75980+
75981 return 1;
75982 return in_gate_area_no_mm(addr);
75983 }
75984
75985 static int is_ksym_addr(unsigned long addr)
75986 {
75987+
75988+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
75989+ if (is_module_text(addr))
75990+ return 0;
75991+#endif
75992+
75993 if (all_var)
75994 return is_kernel(addr);
75995
75996@@ -470,7 +509,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
75997
75998 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
75999 {
76000- iter->name[0] = '\0';
76001 iter->nameoff = get_symbol_offset(new_pos);
76002 iter->pos = new_pos;
76003 }
76004@@ -518,6 +556,11 @@ static int s_show(struct seq_file *m, void *p)
76005 {
76006 struct kallsym_iter *iter = m->private;
76007
76008+#ifdef CONFIG_GRKERNSEC_HIDESYM
76009+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
76010+ return 0;
76011+#endif
76012+
76013 /* Some debugging symbols have no name. Ignore them. */
76014 if (!iter->name[0])
76015 return 0;
76016@@ -531,6 +574,7 @@ static int s_show(struct seq_file *m, void *p)
76017 */
76018 type = iter->exported ? toupper(iter->type) :
76019 tolower(iter->type);
76020+
76021 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
76022 type, iter->name, iter->module_name);
76023 } else
76024@@ -556,7 +600,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
76025 struct kallsym_iter *iter;
76026 int ret;
76027
76028- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
76029+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
76030 if (!iter)
76031 return -ENOMEM;
76032 reset_iter(iter, 0);
76033diff --git a/kernel/kcmp.c b/kernel/kcmp.c
76034index e30ac0f..3528cac 100644
76035--- a/kernel/kcmp.c
76036+++ b/kernel/kcmp.c
76037@@ -99,6 +99,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
76038 struct task_struct *task1, *task2;
76039 int ret;
76040
76041+#ifdef CONFIG_GRKERNSEC
76042+ return -ENOSYS;
76043+#endif
76044+
76045 rcu_read_lock();
76046
76047 /*
76048diff --git a/kernel/kexec.c b/kernel/kexec.c
76049index ffd4e11..c3ff6bf 100644
76050--- a/kernel/kexec.c
76051+++ b/kernel/kexec.c
76052@@ -1048,7 +1048,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
76053 unsigned long flags)
76054 {
76055 struct compat_kexec_segment in;
76056- struct kexec_segment out, __user *ksegments;
76057+ struct kexec_segment out;
76058+ struct kexec_segment __user *ksegments;
76059 unsigned long i, result;
76060
76061 /* Don't allow clients that don't understand the native
76062diff --git a/kernel/kmod.c b/kernel/kmod.c
76063index 8985c87..f539dbe 100644
76064--- a/kernel/kmod.c
76065+++ b/kernel/kmod.c
76066@@ -75,7 +75,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
76067 kfree(info->argv);
76068 }
76069
76070-static int call_modprobe(char *module_name, int wait)
76071+static int call_modprobe(char *module_name, char *module_param, int wait)
76072 {
76073 static char *envp[] = {
76074 "HOME=/",
76075@@ -84,7 +84,7 @@ static int call_modprobe(char *module_name, int wait)
76076 NULL
76077 };
76078
76079- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
76080+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
76081 if (!argv)
76082 goto out;
76083
76084@@ -96,7 +96,8 @@ static int call_modprobe(char *module_name, int wait)
76085 argv[1] = "-q";
76086 argv[2] = "--";
76087 argv[3] = module_name; /* check free_modprobe_argv() */
76088- argv[4] = NULL;
76089+ argv[4] = module_param;
76090+ argv[5] = NULL;
76091
76092 return call_usermodehelper_fns(modprobe_path, argv, envp,
76093 wait | UMH_KILLABLE, NULL, free_modprobe_argv, NULL);
76094@@ -121,9 +122,8 @@ out:
76095 * If module auto-loading support is disabled then this function
76096 * becomes a no-operation.
76097 */
76098-int __request_module(bool wait, const char *fmt, ...)
76099+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
76100 {
76101- va_list args;
76102 char module_name[MODULE_NAME_LEN];
76103 unsigned int max_modprobes;
76104 int ret;
76105@@ -139,9 +139,7 @@ int __request_module(bool wait, const char *fmt, ...)
76106 */
76107 WARN_ON_ONCE(wait && current_is_async());
76108
76109- va_start(args, fmt);
76110- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
76111- va_end(args);
76112+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
76113 if (ret >= MODULE_NAME_LEN)
76114 return -ENAMETOOLONG;
76115
76116@@ -149,6 +147,20 @@ int __request_module(bool wait, const char *fmt, ...)
76117 if (ret)
76118 return ret;
76119
76120+#ifdef CONFIG_GRKERNSEC_MODHARDEN
76121+ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
76122+ /* hack to workaround consolekit/udisks stupidity */
76123+ read_lock(&tasklist_lock);
76124+ if (!strcmp(current->comm, "mount") &&
76125+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
76126+ read_unlock(&tasklist_lock);
76127+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
76128+ return -EPERM;
76129+ }
76130+ read_unlock(&tasklist_lock);
76131+ }
76132+#endif
76133+
76134 /* If modprobe needs a service that is in a module, we get a recursive
76135 * loop. Limit the number of running kmod threads to max_threads/2 or
76136 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
76137@@ -177,11 +189,52 @@ int __request_module(bool wait, const char *fmt, ...)
76138
76139 trace_module_request(module_name, wait, _RET_IP_);
76140
76141- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
76142+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
76143
76144 atomic_dec(&kmod_concurrent);
76145 return ret;
76146 }
76147+
76148+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
76149+{
76150+ va_list args;
76151+ int ret;
76152+
76153+ va_start(args, fmt);
76154+ ret = ____request_module(wait, module_param, fmt, args);
76155+ va_end(args);
76156+
76157+ return ret;
76158+}
76159+
76160+int __request_module(bool wait, const char *fmt, ...)
76161+{
76162+ va_list args;
76163+ int ret;
76164+
76165+#ifdef CONFIG_GRKERNSEC_MODHARDEN
76166+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
76167+ char module_param[MODULE_NAME_LEN];
76168+
76169+ memset(module_param, 0, sizeof(module_param));
76170+
76171+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
76172+
76173+ va_start(args, fmt);
76174+ ret = ____request_module(wait, module_param, fmt, args);
76175+ va_end(args);
76176+
76177+ return ret;
76178+ }
76179+#endif
76180+
76181+ va_start(args, fmt);
76182+ ret = ____request_module(wait, NULL, fmt, args);
76183+ va_end(args);
76184+
76185+ return ret;
76186+}
76187+
76188 EXPORT_SYMBOL(__request_module);
76189 #endif /* CONFIG_MODULES */
76190
76191@@ -292,7 +345,7 @@ static int wait_for_helper(void *data)
76192 *
76193 * Thus the __user pointer cast is valid here.
76194 */
76195- sys_wait4(pid, (int __user *)&ret, 0, NULL);
76196+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
76197
76198 /*
76199 * If ret is 0, either ____call_usermodehelper failed and the
76200@@ -649,7 +702,7 @@ EXPORT_SYMBOL(call_usermodehelper_fns);
76201 static int proc_cap_handler(struct ctl_table *table, int write,
76202 void __user *buffer, size_t *lenp, loff_t *ppos)
76203 {
76204- struct ctl_table t;
76205+ ctl_table_no_const t;
76206 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
76207 kernel_cap_t new_cap;
76208 int err, i;
76209diff --git a/kernel/kprobes.c b/kernel/kprobes.c
76210index 3fed7f0..a3f95ed 100644
76211--- a/kernel/kprobes.c
76212+++ b/kernel/kprobes.c
76213@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
76214 * kernel image and loaded module images reside. This is required
76215 * so x86_64 can correctly handle the %rip-relative fixups.
76216 */
76217- kip->insns = module_alloc(PAGE_SIZE);
76218+ kip->insns = module_alloc_exec(PAGE_SIZE);
76219 if (!kip->insns) {
76220 kfree(kip);
76221 return NULL;
76222@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
76223 */
76224 if (!list_is_singular(&kip->list)) {
76225 list_del(&kip->list);
76226- module_free(NULL, kip->insns);
76227+ module_free_exec(NULL, kip->insns);
76228 kfree(kip);
76229 }
76230 return 1;
76231@@ -2073,7 +2073,7 @@ static int __init init_kprobes(void)
76232 {
76233 int i, err = 0;
76234 unsigned long offset = 0, size = 0;
76235- char *modname, namebuf[128];
76236+ char *modname, namebuf[KSYM_NAME_LEN];
76237 const char *symbol_name;
76238 void *addr;
76239 struct kprobe_blackpoint *kb;
76240@@ -2158,11 +2158,11 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
76241 kprobe_type = "k";
76242
76243 if (sym)
76244- seq_printf(pi, "%p %s %s+0x%x %s ",
76245+ seq_printf(pi, "%pK %s %s+0x%x %s ",
76246 p->addr, kprobe_type, sym, offset,
76247 (modname ? modname : " "));
76248 else
76249- seq_printf(pi, "%p %s %p ",
76250+ seq_printf(pi, "%pK %s %pK ",
76251 p->addr, kprobe_type, p->addr);
76252
76253 if (!pp)
76254@@ -2199,7 +2199,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
76255 const char *sym = NULL;
76256 unsigned int i = *(loff_t *) v;
76257 unsigned long offset = 0;
76258- char *modname, namebuf[128];
76259+ char *modname, namebuf[KSYM_NAME_LEN];
76260
76261 head = &kprobe_table[i];
76262 preempt_disable();
76263diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
76264index 6ada93c..dce7d5d 100644
76265--- a/kernel/ksysfs.c
76266+++ b/kernel/ksysfs.c
76267@@ -46,6 +46,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
76268 {
76269 if (count+1 > UEVENT_HELPER_PATH_LEN)
76270 return -ENOENT;
76271+ if (!capable(CAP_SYS_ADMIN))
76272+ return -EPERM;
76273 memcpy(uevent_helper, buf, count);
76274 uevent_helper[count] = '\0';
76275 if (count && uevent_helper[count-1] == '\n')
76276@@ -172,7 +174,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
76277 return count;
76278 }
76279
76280-static struct bin_attribute notes_attr = {
76281+static bin_attribute_no_const notes_attr __read_only = {
76282 .attr = {
76283 .name = "notes",
76284 .mode = S_IRUGO,
76285diff --git a/kernel/lockdep.c b/kernel/lockdep.c
76286index 8a0efac..56f1e2d 100644
76287--- a/kernel/lockdep.c
76288+++ b/kernel/lockdep.c
76289@@ -590,6 +590,10 @@ static int static_obj(void *obj)
76290 end = (unsigned long) &_end,
76291 addr = (unsigned long) obj;
76292
76293+#ifdef CONFIG_PAX_KERNEXEC
76294+ start = ktla_ktva(start);
76295+#endif
76296+
76297 /*
76298 * static variable?
76299 */
76300@@ -730,6 +734,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
76301 if (!static_obj(lock->key)) {
76302 debug_locks_off();
76303 printk("INFO: trying to register non-static key.\n");
76304+ printk("lock:%pS key:%pS.\n", lock, lock->key);
76305 printk("the code is fine but needs lockdep annotation.\n");
76306 printk("turning off the locking correctness validator.\n");
76307 dump_stack();
76308@@ -3078,7 +3083,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
76309 if (!class)
76310 return 0;
76311 }
76312- atomic_inc((atomic_t *)&class->ops);
76313+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
76314 if (very_verbose(class)) {
76315 printk("\nacquire class [%p] %s", class->key, class->name);
76316 if (class->name_version > 1)
76317diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
76318index b2c71c5..7b88d63 100644
76319--- a/kernel/lockdep_proc.c
76320+++ b/kernel/lockdep_proc.c
76321@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
76322 return 0;
76323 }
76324
76325- seq_printf(m, "%p", class->key);
76326+ seq_printf(m, "%pK", class->key);
76327 #ifdef CONFIG_DEBUG_LOCKDEP
76328 seq_printf(m, " OPS:%8ld", class->ops);
76329 #endif
76330@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
76331
76332 list_for_each_entry(entry, &class->locks_after, entry) {
76333 if (entry->distance == 1) {
76334- seq_printf(m, " -> [%p] ", entry->class->key);
76335+ seq_printf(m, " -> [%pK] ", entry->class->key);
76336 print_name(m, entry->class);
76337 seq_puts(m, "\n");
76338 }
76339@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
76340 if (!class->key)
76341 continue;
76342
76343- seq_printf(m, "[%p] ", class->key);
76344+ seq_printf(m, "[%pK] ", class->key);
76345 print_name(m, class);
76346 seq_puts(m, "\n");
76347 }
76348@@ -495,7 +495,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
76349 if (!i)
76350 seq_line(m, '-', 40-namelen, namelen);
76351
76352- snprintf(ip, sizeof(ip), "[<%p>]",
76353+ snprintf(ip, sizeof(ip), "[<%pK>]",
76354 (void *)class->contention_point[i]);
76355 seq_printf(m, "%40s %14lu %29s %pS\n",
76356 name, stats->contention_point[i],
76357@@ -510,7 +510,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
76358 if (!i)
76359 seq_line(m, '-', 40-namelen, namelen);
76360
76361- snprintf(ip, sizeof(ip), "[<%p>]",
76362+ snprintf(ip, sizeof(ip), "[<%pK>]",
76363 (void *)class->contending_point[i]);
76364 seq_printf(m, "%40s %14lu %29s %pS\n",
76365 name, stats->contending_point[i],
76366diff --git a/kernel/module.c b/kernel/module.c
76367index 97f202c..109575f 100644
76368--- a/kernel/module.c
76369+++ b/kernel/module.c
76370@@ -61,6 +61,7 @@
76371 #include <linux/pfn.h>
76372 #include <linux/bsearch.h>
76373 #include <linux/fips.h>
76374+#include <linux/grsecurity.h>
76375 #include <uapi/linux/module.h>
76376 #include "module-internal.h"
76377
76378@@ -156,7 +157,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
76379
76380 /* Bounds of module allocation, for speeding __module_address.
76381 * Protected by module_mutex. */
76382-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
76383+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
76384+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
76385
76386 int register_module_notifier(struct notifier_block * nb)
76387 {
76388@@ -323,7 +325,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
76389 return true;
76390
76391 list_for_each_entry_rcu(mod, &modules, list) {
76392- struct symsearch arr[] = {
76393+ struct symsearch modarr[] = {
76394 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
76395 NOT_GPL_ONLY, false },
76396 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
76397@@ -348,7 +350,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
76398 if (mod->state == MODULE_STATE_UNFORMED)
76399 continue;
76400
76401- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
76402+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
76403 return true;
76404 }
76405 return false;
76406@@ -485,7 +487,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
76407 static int percpu_modalloc(struct module *mod,
76408 unsigned long size, unsigned long align)
76409 {
76410- if (align > PAGE_SIZE) {
76411+ if (align-1 >= PAGE_SIZE) {
76412 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
76413 mod->name, align, PAGE_SIZE);
76414 align = PAGE_SIZE;
76415@@ -1089,7 +1091,7 @@ struct module_attribute module_uevent =
76416 static ssize_t show_coresize(struct module_attribute *mattr,
76417 struct module_kobject *mk, char *buffer)
76418 {
76419- return sprintf(buffer, "%u\n", mk->mod->core_size);
76420+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
76421 }
76422
76423 static struct module_attribute modinfo_coresize =
76424@@ -1098,7 +1100,7 @@ static struct module_attribute modinfo_coresize =
76425 static ssize_t show_initsize(struct module_attribute *mattr,
76426 struct module_kobject *mk, char *buffer)
76427 {
76428- return sprintf(buffer, "%u\n", mk->mod->init_size);
76429+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
76430 }
76431
76432 static struct module_attribute modinfo_initsize =
76433@@ -1312,7 +1314,7 @@ resolve_symbol_wait(struct module *mod,
76434 */
76435 #ifdef CONFIG_SYSFS
76436
76437-#ifdef CONFIG_KALLSYMS
76438+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
76439 static inline bool sect_empty(const Elf_Shdr *sect)
76440 {
76441 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
76442@@ -1452,7 +1454,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
76443 {
76444 unsigned int notes, loaded, i;
76445 struct module_notes_attrs *notes_attrs;
76446- struct bin_attribute *nattr;
76447+ bin_attribute_no_const *nattr;
76448
76449 /* failed to create section attributes, so can't create notes */
76450 if (!mod->sect_attrs)
76451@@ -1564,7 +1566,7 @@ static void del_usage_links(struct module *mod)
76452 static int module_add_modinfo_attrs(struct module *mod)
76453 {
76454 struct module_attribute *attr;
76455- struct module_attribute *temp_attr;
76456+ module_attribute_no_const *temp_attr;
76457 int error = 0;
76458 int i;
76459
76460@@ -1778,21 +1780,21 @@ static void set_section_ro_nx(void *base,
76461
76462 static void unset_module_core_ro_nx(struct module *mod)
76463 {
76464- set_page_attributes(mod->module_core + mod->core_text_size,
76465- mod->module_core + mod->core_size,
76466+ set_page_attributes(mod->module_core_rw,
76467+ mod->module_core_rw + mod->core_size_rw,
76468 set_memory_x);
76469- set_page_attributes(mod->module_core,
76470- mod->module_core + mod->core_ro_size,
76471+ set_page_attributes(mod->module_core_rx,
76472+ mod->module_core_rx + mod->core_size_rx,
76473 set_memory_rw);
76474 }
76475
76476 static void unset_module_init_ro_nx(struct module *mod)
76477 {
76478- set_page_attributes(mod->module_init + mod->init_text_size,
76479- mod->module_init + mod->init_size,
76480+ set_page_attributes(mod->module_init_rw,
76481+ mod->module_init_rw + mod->init_size_rw,
76482 set_memory_x);
76483- set_page_attributes(mod->module_init,
76484- mod->module_init + mod->init_ro_size,
76485+ set_page_attributes(mod->module_init_rx,
76486+ mod->module_init_rx + mod->init_size_rx,
76487 set_memory_rw);
76488 }
76489
76490@@ -1805,14 +1807,14 @@ void set_all_modules_text_rw(void)
76491 list_for_each_entry_rcu(mod, &modules, list) {
76492 if (mod->state == MODULE_STATE_UNFORMED)
76493 continue;
76494- if ((mod->module_core) && (mod->core_text_size)) {
76495- set_page_attributes(mod->module_core,
76496- mod->module_core + mod->core_text_size,
76497+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
76498+ set_page_attributes(mod->module_core_rx,
76499+ mod->module_core_rx + mod->core_size_rx,
76500 set_memory_rw);
76501 }
76502- if ((mod->module_init) && (mod->init_text_size)) {
76503- set_page_attributes(mod->module_init,
76504- mod->module_init + mod->init_text_size,
76505+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
76506+ set_page_attributes(mod->module_init_rx,
76507+ mod->module_init_rx + mod->init_size_rx,
76508 set_memory_rw);
76509 }
76510 }
76511@@ -1828,14 +1830,14 @@ void set_all_modules_text_ro(void)
76512 list_for_each_entry_rcu(mod, &modules, list) {
76513 if (mod->state == MODULE_STATE_UNFORMED)
76514 continue;
76515- if ((mod->module_core) && (mod->core_text_size)) {
76516- set_page_attributes(mod->module_core,
76517- mod->module_core + mod->core_text_size,
76518+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
76519+ set_page_attributes(mod->module_core_rx,
76520+ mod->module_core_rx + mod->core_size_rx,
76521 set_memory_ro);
76522 }
76523- if ((mod->module_init) && (mod->init_text_size)) {
76524- set_page_attributes(mod->module_init,
76525- mod->module_init + mod->init_text_size,
76526+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
76527+ set_page_attributes(mod->module_init_rx,
76528+ mod->module_init_rx + mod->init_size_rx,
76529 set_memory_ro);
76530 }
76531 }
76532@@ -1886,16 +1888,19 @@ static void free_module(struct module *mod)
76533
76534 /* This may be NULL, but that's OK */
76535 unset_module_init_ro_nx(mod);
76536- module_free(mod, mod->module_init);
76537+ module_free(mod, mod->module_init_rw);
76538+ module_free_exec(mod, mod->module_init_rx);
76539 kfree(mod->args);
76540 percpu_modfree(mod);
76541
76542 /* Free lock-classes: */
76543- lockdep_free_key_range(mod->module_core, mod->core_size);
76544+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
76545+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
76546
76547 /* Finally, free the core (containing the module structure) */
76548 unset_module_core_ro_nx(mod);
76549- module_free(mod, mod->module_core);
76550+ module_free_exec(mod, mod->module_core_rx);
76551+ module_free(mod, mod->module_core_rw);
76552
76553 #ifdef CONFIG_MPU
76554 update_protections(current->mm);
76555@@ -1965,9 +1970,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
76556 int ret = 0;
76557 const struct kernel_symbol *ksym;
76558
76559+#ifdef CONFIG_GRKERNSEC_MODHARDEN
76560+ int is_fs_load = 0;
76561+ int register_filesystem_found = 0;
76562+ char *p;
76563+
76564+ p = strstr(mod->args, "grsec_modharden_fs");
76565+ if (p) {
76566+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
76567+ /* copy \0 as well */
76568+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
76569+ is_fs_load = 1;
76570+ }
76571+#endif
76572+
76573 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
76574 const char *name = info->strtab + sym[i].st_name;
76575
76576+#ifdef CONFIG_GRKERNSEC_MODHARDEN
76577+ /* it's a real shame this will never get ripped and copied
76578+ upstream! ;(
76579+ */
76580+ if (is_fs_load && !strcmp(name, "register_filesystem"))
76581+ register_filesystem_found = 1;
76582+#endif
76583+
76584 switch (sym[i].st_shndx) {
76585 case SHN_COMMON:
76586 /* We compiled with -fno-common. These are not
76587@@ -1988,7 +2015,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
76588 ksym = resolve_symbol_wait(mod, info, name);
76589 /* Ok if resolved. */
76590 if (ksym && !IS_ERR(ksym)) {
76591+ pax_open_kernel();
76592 sym[i].st_value = ksym->value;
76593+ pax_close_kernel();
76594 break;
76595 }
76596
76597@@ -2007,11 +2036,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
76598 secbase = (unsigned long)mod_percpu(mod);
76599 else
76600 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
76601+ pax_open_kernel();
76602 sym[i].st_value += secbase;
76603+ pax_close_kernel();
76604 break;
76605 }
76606 }
76607
76608+#ifdef CONFIG_GRKERNSEC_MODHARDEN
76609+ if (is_fs_load && !register_filesystem_found) {
76610+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
76611+ ret = -EPERM;
76612+ }
76613+#endif
76614+
76615 return ret;
76616 }
76617
76618@@ -2095,22 +2133,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
76619 || s->sh_entsize != ~0UL
76620 || strstarts(sname, ".init"))
76621 continue;
76622- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
76623+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
76624+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
76625+ else
76626+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
76627 pr_debug("\t%s\n", sname);
76628 }
76629- switch (m) {
76630- case 0: /* executable */
76631- mod->core_size = debug_align(mod->core_size);
76632- mod->core_text_size = mod->core_size;
76633- break;
76634- case 1: /* RO: text and ro-data */
76635- mod->core_size = debug_align(mod->core_size);
76636- mod->core_ro_size = mod->core_size;
76637- break;
76638- case 3: /* whole core */
76639- mod->core_size = debug_align(mod->core_size);
76640- break;
76641- }
76642 }
76643
76644 pr_debug("Init section allocation order:\n");
76645@@ -2124,23 +2152,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
76646 || s->sh_entsize != ~0UL
76647 || !strstarts(sname, ".init"))
76648 continue;
76649- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
76650- | INIT_OFFSET_MASK);
76651+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
76652+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
76653+ else
76654+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
76655+ s->sh_entsize |= INIT_OFFSET_MASK;
76656 pr_debug("\t%s\n", sname);
76657 }
76658- switch (m) {
76659- case 0: /* executable */
76660- mod->init_size = debug_align(mod->init_size);
76661- mod->init_text_size = mod->init_size;
76662- break;
76663- case 1: /* RO: text and ro-data */
76664- mod->init_size = debug_align(mod->init_size);
76665- mod->init_ro_size = mod->init_size;
76666- break;
76667- case 3: /* whole init */
76668- mod->init_size = debug_align(mod->init_size);
76669- break;
76670- }
76671 }
76672 }
76673
76674@@ -2313,7 +2331,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
76675
76676 /* Put symbol section at end of init part of module. */
76677 symsect->sh_flags |= SHF_ALLOC;
76678- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
76679+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
76680 info->index.sym) | INIT_OFFSET_MASK;
76681 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
76682
76683@@ -2330,13 +2348,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
76684 }
76685
76686 /* Append room for core symbols at end of core part. */
76687- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
76688- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
76689- mod->core_size += strtab_size;
76690+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
76691+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
76692+ mod->core_size_rx += strtab_size;
76693
76694 /* Put string table section at end of init part of module. */
76695 strsect->sh_flags |= SHF_ALLOC;
76696- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
76697+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
76698 info->index.str) | INIT_OFFSET_MASK;
76699 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
76700 }
76701@@ -2354,12 +2372,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
76702 /* Make sure we get permanent strtab: don't use info->strtab. */
76703 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
76704
76705+ pax_open_kernel();
76706+
76707 /* Set types up while we still have access to sections. */
76708 for (i = 0; i < mod->num_symtab; i++)
76709 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
76710
76711- mod->core_symtab = dst = mod->module_core + info->symoffs;
76712- mod->core_strtab = s = mod->module_core + info->stroffs;
76713+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
76714+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
76715 src = mod->symtab;
76716 for (ndst = i = 0; i < mod->num_symtab; i++) {
76717 if (i == 0 ||
76718@@ -2371,6 +2391,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
76719 }
76720 }
76721 mod->core_num_syms = ndst;
76722+
76723+ pax_close_kernel();
76724 }
76725 #else
76726 static inline void layout_symtab(struct module *mod, struct load_info *info)
76727@@ -2404,17 +2426,33 @@ void * __weak module_alloc(unsigned long size)
76728 return vmalloc_exec(size);
76729 }
76730
76731-static void *module_alloc_update_bounds(unsigned long size)
76732+static void *module_alloc_update_bounds_rw(unsigned long size)
76733 {
76734 void *ret = module_alloc(size);
76735
76736 if (ret) {
76737 mutex_lock(&module_mutex);
76738 /* Update module bounds. */
76739- if ((unsigned long)ret < module_addr_min)
76740- module_addr_min = (unsigned long)ret;
76741- if ((unsigned long)ret + size > module_addr_max)
76742- module_addr_max = (unsigned long)ret + size;
76743+ if ((unsigned long)ret < module_addr_min_rw)
76744+ module_addr_min_rw = (unsigned long)ret;
76745+ if ((unsigned long)ret + size > module_addr_max_rw)
76746+ module_addr_max_rw = (unsigned long)ret + size;
76747+ mutex_unlock(&module_mutex);
76748+ }
76749+ return ret;
76750+}
76751+
76752+static void *module_alloc_update_bounds_rx(unsigned long size)
76753+{
76754+ void *ret = module_alloc_exec(size);
76755+
76756+ if (ret) {
76757+ mutex_lock(&module_mutex);
76758+ /* Update module bounds. */
76759+ if ((unsigned long)ret < module_addr_min_rx)
76760+ module_addr_min_rx = (unsigned long)ret;
76761+ if ((unsigned long)ret + size > module_addr_max_rx)
76762+ module_addr_max_rx = (unsigned long)ret + size;
76763 mutex_unlock(&module_mutex);
76764 }
76765 return ret;
76766@@ -2690,8 +2728,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
76767 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
76768 {
76769 const char *modmagic = get_modinfo(info, "vermagic");
76770+ const char *license = get_modinfo(info, "license");
76771 int err;
76772
76773+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
76774+ if (!license || !license_is_gpl_compatible(license))
76775+ return -ENOEXEC;
76776+#endif
76777+
76778 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
76779 modmagic = NULL;
76780
76781@@ -2717,7 +2761,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
76782 }
76783
76784 /* Set up license info based on the info section */
76785- set_license(mod, get_modinfo(info, "license"));
76786+ set_license(mod, license);
76787
76788 return 0;
76789 }
76790@@ -2811,7 +2855,7 @@ static int move_module(struct module *mod, struct load_info *info)
76791 void *ptr;
76792
76793 /* Do the allocs. */
76794- ptr = module_alloc_update_bounds(mod->core_size);
76795+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
76796 /*
76797 * The pointer to this block is stored in the module structure
76798 * which is inside the block. Just mark it as not being a
76799@@ -2821,11 +2865,11 @@ static int move_module(struct module *mod, struct load_info *info)
76800 if (!ptr)
76801 return -ENOMEM;
76802
76803- memset(ptr, 0, mod->core_size);
76804- mod->module_core = ptr;
76805+ memset(ptr, 0, mod->core_size_rw);
76806+ mod->module_core_rw = ptr;
76807
76808- if (mod->init_size) {
76809- ptr = module_alloc_update_bounds(mod->init_size);
76810+ if (mod->init_size_rw) {
76811+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
76812 /*
76813 * The pointer to this block is stored in the module structure
76814 * which is inside the block. This block doesn't need to be
76815@@ -2834,13 +2878,45 @@ static int move_module(struct module *mod, struct load_info *info)
76816 */
76817 kmemleak_ignore(ptr);
76818 if (!ptr) {
76819- module_free(mod, mod->module_core);
76820+ module_free(mod, mod->module_core_rw);
76821 return -ENOMEM;
76822 }
76823- memset(ptr, 0, mod->init_size);
76824- mod->module_init = ptr;
76825+ memset(ptr, 0, mod->init_size_rw);
76826+ mod->module_init_rw = ptr;
76827 } else
76828- mod->module_init = NULL;
76829+ mod->module_init_rw = NULL;
76830+
76831+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
76832+ kmemleak_not_leak(ptr);
76833+ if (!ptr) {
76834+ if (mod->module_init_rw)
76835+ module_free(mod, mod->module_init_rw);
76836+ module_free(mod, mod->module_core_rw);
76837+ return -ENOMEM;
76838+ }
76839+
76840+ pax_open_kernel();
76841+ memset(ptr, 0, mod->core_size_rx);
76842+ pax_close_kernel();
76843+ mod->module_core_rx = ptr;
76844+
76845+ if (mod->init_size_rx) {
76846+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
76847+ kmemleak_ignore(ptr);
76848+ if (!ptr && mod->init_size_rx) {
76849+ module_free_exec(mod, mod->module_core_rx);
76850+ if (mod->module_init_rw)
76851+ module_free(mod, mod->module_init_rw);
76852+ module_free(mod, mod->module_core_rw);
76853+ return -ENOMEM;
76854+ }
76855+
76856+ pax_open_kernel();
76857+ memset(ptr, 0, mod->init_size_rx);
76858+ pax_close_kernel();
76859+ mod->module_init_rx = ptr;
76860+ } else
76861+ mod->module_init_rx = NULL;
76862
76863 /* Transfer each section which specifies SHF_ALLOC */
76864 pr_debug("final section addresses:\n");
76865@@ -2851,16 +2927,45 @@ static int move_module(struct module *mod, struct load_info *info)
76866 if (!(shdr->sh_flags & SHF_ALLOC))
76867 continue;
76868
76869- if (shdr->sh_entsize & INIT_OFFSET_MASK)
76870- dest = mod->module_init
76871- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
76872- else
76873- dest = mod->module_core + shdr->sh_entsize;
76874+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
76875+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
76876+ dest = mod->module_init_rw
76877+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
76878+ else
76879+ dest = mod->module_init_rx
76880+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
76881+ } else {
76882+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
76883+ dest = mod->module_core_rw + shdr->sh_entsize;
76884+ else
76885+ dest = mod->module_core_rx + shdr->sh_entsize;
76886+ }
76887+
76888+ if (shdr->sh_type != SHT_NOBITS) {
76889+
76890+#ifdef CONFIG_PAX_KERNEXEC
76891+#ifdef CONFIG_X86_64
76892+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
76893+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
76894+#endif
76895+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
76896+ pax_open_kernel();
76897+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
76898+ pax_close_kernel();
76899+ } else
76900+#endif
76901
76902- if (shdr->sh_type != SHT_NOBITS)
76903 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
76904+ }
76905 /* Update sh_addr to point to copy in image. */
76906- shdr->sh_addr = (unsigned long)dest;
76907+
76908+#ifdef CONFIG_PAX_KERNEXEC
76909+ if (shdr->sh_flags & SHF_EXECINSTR)
76910+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
76911+ else
76912+#endif
76913+
76914+ shdr->sh_addr = (unsigned long)dest;
76915 pr_debug("\t0x%lx %s\n",
76916 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
76917 }
76918@@ -2917,12 +3022,12 @@ static void flush_module_icache(const struct module *mod)
76919 * Do it before processing of module parameters, so the module
76920 * can provide parameter accessor functions of its own.
76921 */
76922- if (mod->module_init)
76923- flush_icache_range((unsigned long)mod->module_init,
76924- (unsigned long)mod->module_init
76925- + mod->init_size);
76926- flush_icache_range((unsigned long)mod->module_core,
76927- (unsigned long)mod->module_core + mod->core_size);
76928+ if (mod->module_init_rx)
76929+ flush_icache_range((unsigned long)mod->module_init_rx,
76930+ (unsigned long)mod->module_init_rx
76931+ + mod->init_size_rx);
76932+ flush_icache_range((unsigned long)mod->module_core_rx,
76933+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
76934
76935 set_fs(old_fs);
76936 }
76937@@ -2992,8 +3097,10 @@ out:
76938 static void module_deallocate(struct module *mod, struct load_info *info)
76939 {
76940 percpu_modfree(mod);
76941- module_free(mod, mod->module_init);
76942- module_free(mod, mod->module_core);
76943+ module_free_exec(mod, mod->module_init_rx);
76944+ module_free_exec(mod, mod->module_core_rx);
76945+ module_free(mod, mod->module_init_rw);
76946+ module_free(mod, mod->module_core_rw);
76947 }
76948
76949 int __weak module_finalize(const Elf_Ehdr *hdr,
76950@@ -3006,7 +3113,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
76951 static int post_relocation(struct module *mod, const struct load_info *info)
76952 {
76953 /* Sort exception table now relocations are done. */
76954+ pax_open_kernel();
76955 sort_extable(mod->extable, mod->extable + mod->num_exentries);
76956+ pax_close_kernel();
76957
76958 /* Copy relocated percpu area over. */
76959 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
76960@@ -3060,16 +3169,16 @@ static int do_init_module(struct module *mod)
76961 MODULE_STATE_COMING, mod);
76962
76963 /* Set RO and NX regions for core */
76964- set_section_ro_nx(mod->module_core,
76965- mod->core_text_size,
76966- mod->core_ro_size,
76967- mod->core_size);
76968+ set_section_ro_nx(mod->module_core_rx,
76969+ mod->core_size_rx,
76970+ mod->core_size_rx,
76971+ mod->core_size_rx);
76972
76973 /* Set RO and NX regions for init */
76974- set_section_ro_nx(mod->module_init,
76975- mod->init_text_size,
76976- mod->init_ro_size,
76977- mod->init_size);
76978+ set_section_ro_nx(mod->module_init_rx,
76979+ mod->init_size_rx,
76980+ mod->init_size_rx,
76981+ mod->init_size_rx);
76982
76983 do_mod_ctors(mod);
76984 /* Start the module */
76985@@ -3131,11 +3240,12 @@ static int do_init_module(struct module *mod)
76986 mod->strtab = mod->core_strtab;
76987 #endif
76988 unset_module_init_ro_nx(mod);
76989- module_free(mod, mod->module_init);
76990- mod->module_init = NULL;
76991- mod->init_size = 0;
76992- mod->init_ro_size = 0;
76993- mod->init_text_size = 0;
76994+ module_free(mod, mod->module_init_rw);
76995+ module_free_exec(mod, mod->module_init_rx);
76996+ mod->module_init_rw = NULL;
76997+ mod->module_init_rx = NULL;
76998+ mod->init_size_rw = 0;
76999+ mod->init_size_rx = 0;
77000 mutex_unlock(&module_mutex);
77001 wake_up_all(&module_wq);
77002
77003@@ -3262,9 +3372,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
77004 if (err)
77005 goto free_unload;
77006
77007+ /* Now copy in args */
77008+ mod->args = strndup_user(uargs, ~0UL >> 1);
77009+ if (IS_ERR(mod->args)) {
77010+ err = PTR_ERR(mod->args);
77011+ goto free_unload;
77012+ }
77013+
77014 /* Set up MODINFO_ATTR fields */
77015 setup_modinfo(mod, info);
77016
77017+#ifdef CONFIG_GRKERNSEC_MODHARDEN
77018+ {
77019+ char *p, *p2;
77020+
77021+ if (strstr(mod->args, "grsec_modharden_netdev")) {
77022+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
77023+ err = -EPERM;
77024+ goto free_modinfo;
77025+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
77026+ p += sizeof("grsec_modharden_normal") - 1;
77027+ p2 = strstr(p, "_");
77028+ if (p2) {
77029+ *p2 = '\0';
77030+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
77031+ *p2 = '_';
77032+ }
77033+ err = -EPERM;
77034+ goto free_modinfo;
77035+ }
77036+ }
77037+#endif
77038+
77039 /* Fix up syms, so that st_value is a pointer to location. */
77040 err = simplify_symbols(mod, info);
77041 if (err < 0)
77042@@ -3280,13 +3419,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
77043
77044 flush_module_icache(mod);
77045
77046- /* Now copy in args */
77047- mod->args = strndup_user(uargs, ~0UL >> 1);
77048- if (IS_ERR(mod->args)) {
77049- err = PTR_ERR(mod->args);
77050- goto free_arch_cleanup;
77051- }
77052-
77053 dynamic_debug_setup(info->debug, info->num_debug);
77054
77055 /* Finally it's fully formed, ready to start executing. */
77056@@ -3321,11 +3453,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
77057 ddebug_cleanup:
77058 dynamic_debug_remove(info->debug);
77059 synchronize_sched();
77060- kfree(mod->args);
77061- free_arch_cleanup:
77062 module_arch_cleanup(mod);
77063 free_modinfo:
77064 free_modinfo(mod);
77065+ kfree(mod->args);
77066 free_unload:
77067 module_unload_free(mod);
77068 unlink_mod:
77069@@ -3408,10 +3539,16 @@ static const char *get_ksymbol(struct module *mod,
77070 unsigned long nextval;
77071
77072 /* At worse, next value is at end of module */
77073- if (within_module_init(addr, mod))
77074- nextval = (unsigned long)mod->module_init+mod->init_text_size;
77075+ if (within_module_init_rx(addr, mod))
77076+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
77077+ else if (within_module_init_rw(addr, mod))
77078+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
77079+ else if (within_module_core_rx(addr, mod))
77080+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
77081+ else if (within_module_core_rw(addr, mod))
77082+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
77083 else
77084- nextval = (unsigned long)mod->module_core+mod->core_text_size;
77085+ return NULL;
77086
77087 /* Scan for closest preceding symbol, and next symbol. (ELF
77088 starts real symbols at 1). */
77089@@ -3664,7 +3801,7 @@ static int m_show(struct seq_file *m, void *p)
77090 return 0;
77091
77092 seq_printf(m, "%s %u",
77093- mod->name, mod->init_size + mod->core_size);
77094+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
77095 print_unload_info(m, mod);
77096
77097 /* Informative for users. */
77098@@ -3673,7 +3810,7 @@ static int m_show(struct seq_file *m, void *p)
77099 mod->state == MODULE_STATE_COMING ? "Loading":
77100 "Live");
77101 /* Used by oprofile and other similar tools. */
77102- seq_printf(m, " 0x%pK", mod->module_core);
77103+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
77104
77105 /* Taints info */
77106 if (mod->taints)
77107@@ -3709,7 +3846,17 @@ static const struct file_operations proc_modules_operations = {
77108
77109 static int __init proc_modules_init(void)
77110 {
77111+#ifndef CONFIG_GRKERNSEC_HIDESYM
77112+#ifdef CONFIG_GRKERNSEC_PROC_USER
77113+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
77114+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
77115+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
77116+#else
77117 proc_create("modules", 0, NULL, &proc_modules_operations);
77118+#endif
77119+#else
77120+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
77121+#endif
77122 return 0;
77123 }
77124 module_init(proc_modules_init);
77125@@ -3770,14 +3917,14 @@ struct module *__module_address(unsigned long addr)
77126 {
77127 struct module *mod;
77128
77129- if (addr < module_addr_min || addr > module_addr_max)
77130+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
77131+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
77132 return NULL;
77133
77134 list_for_each_entry_rcu(mod, &modules, list) {
77135 if (mod->state == MODULE_STATE_UNFORMED)
77136 continue;
77137- if (within_module_core(addr, mod)
77138- || within_module_init(addr, mod))
77139+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
77140 return mod;
77141 }
77142 return NULL;
77143@@ -3812,11 +3959,20 @@ bool is_module_text_address(unsigned long addr)
77144 */
77145 struct module *__module_text_address(unsigned long addr)
77146 {
77147- struct module *mod = __module_address(addr);
77148+ struct module *mod;
77149+
77150+#ifdef CONFIG_X86_32
77151+ addr = ktla_ktva(addr);
77152+#endif
77153+
77154+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
77155+ return NULL;
77156+
77157+ mod = __module_address(addr);
77158+
77159 if (mod) {
77160 /* Make sure it's within the text section. */
77161- if (!within(addr, mod->module_init, mod->init_text_size)
77162- && !within(addr, mod->module_core, mod->core_text_size))
77163+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
77164 mod = NULL;
77165 }
77166 return mod;
77167diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
77168index 7e3443f..b2a1e6b 100644
77169--- a/kernel/mutex-debug.c
77170+++ b/kernel/mutex-debug.c
77171@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
77172 }
77173
77174 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
77175- struct thread_info *ti)
77176+ struct task_struct *task)
77177 {
77178 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
77179
77180 /* Mark the current thread as blocked on the lock: */
77181- ti->task->blocked_on = waiter;
77182+ task->blocked_on = waiter;
77183 }
77184
77185 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
77186- struct thread_info *ti)
77187+ struct task_struct *task)
77188 {
77189 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
77190- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
77191- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
77192- ti->task->blocked_on = NULL;
77193+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
77194+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
77195+ task->blocked_on = NULL;
77196
77197 list_del_init(&waiter->list);
77198 waiter->task = NULL;
77199diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
77200index 0799fd3..d06ae3b 100644
77201--- a/kernel/mutex-debug.h
77202+++ b/kernel/mutex-debug.h
77203@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
77204 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
77205 extern void debug_mutex_add_waiter(struct mutex *lock,
77206 struct mutex_waiter *waiter,
77207- struct thread_info *ti);
77208+ struct task_struct *task);
77209 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
77210- struct thread_info *ti);
77211+ struct task_struct *task);
77212 extern void debug_mutex_unlock(struct mutex *lock);
77213 extern void debug_mutex_init(struct mutex *lock, const char *name,
77214 struct lock_class_key *key);
77215diff --git a/kernel/mutex.c b/kernel/mutex.c
77216index 52f2301..73f7528 100644
77217--- a/kernel/mutex.c
77218+++ b/kernel/mutex.c
77219@@ -199,7 +199,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
77220 spin_lock_mutex(&lock->wait_lock, flags);
77221
77222 debug_mutex_lock_common(lock, &waiter);
77223- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
77224+ debug_mutex_add_waiter(lock, &waiter, task);
77225
77226 /* add waiting tasks to the end of the waitqueue (FIFO): */
77227 list_add_tail(&waiter.list, &lock->wait_list);
77228@@ -228,8 +228,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
77229 * TASK_UNINTERRUPTIBLE case.)
77230 */
77231 if (unlikely(signal_pending_state(state, task))) {
77232- mutex_remove_waiter(lock, &waiter,
77233- task_thread_info(task));
77234+ mutex_remove_waiter(lock, &waiter, task);
77235 mutex_release(&lock->dep_map, 1, ip);
77236 spin_unlock_mutex(&lock->wait_lock, flags);
77237
77238@@ -248,7 +247,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
77239 done:
77240 lock_acquired(&lock->dep_map, ip);
77241 /* got the lock - rejoice! */
77242- mutex_remove_waiter(lock, &waiter, current_thread_info());
77243+ mutex_remove_waiter(lock, &waiter, task);
77244 mutex_set_owner(lock);
77245
77246 /* set it to 0 if there are no waiters left: */
77247diff --git a/kernel/notifier.c b/kernel/notifier.c
77248index 2d5cc4c..d9ea600 100644
77249--- a/kernel/notifier.c
77250+++ b/kernel/notifier.c
77251@@ -5,6 +5,7 @@
77252 #include <linux/rcupdate.h>
77253 #include <linux/vmalloc.h>
77254 #include <linux/reboot.h>
77255+#include <linux/mm.h>
77256
77257 /*
77258 * Notifier list for kernel code which wants to be called
77259@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
77260 while ((*nl) != NULL) {
77261 if (n->priority > (*nl)->priority)
77262 break;
77263- nl = &((*nl)->next);
77264+ nl = (struct notifier_block **)&((*nl)->next);
77265 }
77266- n->next = *nl;
77267+ pax_open_kernel();
77268+ *(const void **)&n->next = *nl;
77269 rcu_assign_pointer(*nl, n);
77270+ pax_close_kernel();
77271 return 0;
77272 }
77273
77274@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
77275 return 0;
77276 if (n->priority > (*nl)->priority)
77277 break;
77278- nl = &((*nl)->next);
77279+ nl = (struct notifier_block **)&((*nl)->next);
77280 }
77281- n->next = *nl;
77282+ pax_open_kernel();
77283+ *(const void **)&n->next = *nl;
77284 rcu_assign_pointer(*nl, n);
77285+ pax_close_kernel();
77286 return 0;
77287 }
77288
77289@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
77290 {
77291 while ((*nl) != NULL) {
77292 if ((*nl) == n) {
77293+ pax_open_kernel();
77294 rcu_assign_pointer(*nl, n->next);
77295+ pax_close_kernel();
77296 return 0;
77297 }
77298- nl = &((*nl)->next);
77299+ nl = (struct notifier_block **)&((*nl)->next);
77300 }
77301 return -ENOENT;
77302 }
77303diff --git a/kernel/panic.c b/kernel/panic.c
77304index 7c57cc9..28f1b3f 100644
77305--- a/kernel/panic.c
77306+++ b/kernel/panic.c
77307@@ -403,7 +403,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
77308 const char *board;
77309
77310 printk(KERN_WARNING "------------[ cut here ]------------\n");
77311- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
77312+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
77313 board = dmi_get_system_info(DMI_PRODUCT_NAME);
77314 if (board)
77315 printk(KERN_WARNING "Hardware name: %s\n", board);
77316@@ -459,7 +459,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
77317 */
77318 void __stack_chk_fail(void)
77319 {
77320- panic("stack-protector: Kernel stack is corrupted in: %p\n",
77321+ dump_stack();
77322+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
77323 __builtin_return_address(0));
77324 }
77325 EXPORT_SYMBOL(__stack_chk_fail);
77326diff --git a/kernel/pid.c b/kernel/pid.c
77327index 047dc62..418d74b 100644
77328--- a/kernel/pid.c
77329+++ b/kernel/pid.c
77330@@ -33,6 +33,7 @@
77331 #include <linux/rculist.h>
77332 #include <linux/bootmem.h>
77333 #include <linux/hash.h>
77334+#include <linux/security.h>
77335 #include <linux/pid_namespace.h>
77336 #include <linux/init_task.h>
77337 #include <linux/syscalls.h>
77338@@ -46,7 +47,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
77339
77340 int pid_max = PID_MAX_DEFAULT;
77341
77342-#define RESERVED_PIDS 300
77343+#define RESERVED_PIDS 500
77344
77345 int pid_max_min = RESERVED_PIDS + 1;
77346 int pid_max_max = PID_MAX_LIMIT;
77347@@ -440,10 +441,18 @@ EXPORT_SYMBOL(pid_task);
77348 */
77349 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
77350 {
77351+ struct task_struct *task;
77352+
77353 rcu_lockdep_assert(rcu_read_lock_held(),
77354 "find_task_by_pid_ns() needs rcu_read_lock()"
77355 " protection");
77356- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
77357+
77358+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
77359+
77360+ if (gr_pid_is_chrooted(task))
77361+ return NULL;
77362+
77363+ return task;
77364 }
77365
77366 struct task_struct *find_task_by_vpid(pid_t vnr)
77367@@ -451,6 +460,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
77368 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
77369 }
77370
77371+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
77372+{
77373+ rcu_lockdep_assert(rcu_read_lock_held(),
77374+ "find_task_by_pid_ns() needs rcu_read_lock()"
77375+ " protection");
77376+ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
77377+}
77378+
77379 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
77380 {
77381 struct pid *pid;
77382diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
77383index bea15bd..789f3d0 100644
77384--- a/kernel/pid_namespace.c
77385+++ b/kernel/pid_namespace.c
77386@@ -249,7 +249,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
77387 void __user *buffer, size_t *lenp, loff_t *ppos)
77388 {
77389 struct pid_namespace *pid_ns = task_active_pid_ns(current);
77390- struct ctl_table tmp = *table;
77391+ ctl_table_no_const tmp = *table;
77392
77393 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
77394 return -EPERM;
77395diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
77396index 8fd709c..542bf4b 100644
77397--- a/kernel/posix-cpu-timers.c
77398+++ b/kernel/posix-cpu-timers.c
77399@@ -1592,14 +1592,14 @@ struct k_clock clock_posix_cpu = {
77400
77401 static __init int init_posix_cpu_timers(void)
77402 {
77403- struct k_clock process = {
77404+ static struct k_clock process = {
77405 .clock_getres = process_cpu_clock_getres,
77406 .clock_get = process_cpu_clock_get,
77407 .timer_create = process_cpu_timer_create,
77408 .nsleep = process_cpu_nsleep,
77409 .nsleep_restart = process_cpu_nsleep_restart,
77410 };
77411- struct k_clock thread = {
77412+ static struct k_clock thread = {
77413 .clock_getres = thread_cpu_clock_getres,
77414 .clock_get = thread_cpu_clock_get,
77415 .timer_create = thread_cpu_timer_create,
77416diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
77417index 6edbb2c..334f085 100644
77418--- a/kernel/posix-timers.c
77419+++ b/kernel/posix-timers.c
77420@@ -43,6 +43,7 @@
77421 #include <linux/idr.h>
77422 #include <linux/posix-clock.h>
77423 #include <linux/posix-timers.h>
77424+#include <linux/grsecurity.h>
77425 #include <linux/syscalls.h>
77426 #include <linux/wait.h>
77427 #include <linux/workqueue.h>
77428@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
77429 * which we beg off on and pass to do_sys_settimeofday().
77430 */
77431
77432-static struct k_clock posix_clocks[MAX_CLOCKS];
77433+static struct k_clock *posix_clocks[MAX_CLOCKS];
77434
77435 /*
77436 * These ones are defined below.
77437@@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
77438 */
77439 static __init int init_posix_timers(void)
77440 {
77441- struct k_clock clock_realtime = {
77442+ static struct k_clock clock_realtime = {
77443 .clock_getres = hrtimer_get_res,
77444 .clock_get = posix_clock_realtime_get,
77445 .clock_set = posix_clock_realtime_set,
77446@@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
77447 .timer_get = common_timer_get,
77448 .timer_del = common_timer_del,
77449 };
77450- struct k_clock clock_monotonic = {
77451+ static struct k_clock clock_monotonic = {
77452 .clock_getres = hrtimer_get_res,
77453 .clock_get = posix_ktime_get_ts,
77454 .nsleep = common_nsleep,
77455@@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
77456 .timer_get = common_timer_get,
77457 .timer_del = common_timer_del,
77458 };
77459- struct k_clock clock_monotonic_raw = {
77460+ static struct k_clock clock_monotonic_raw = {
77461 .clock_getres = hrtimer_get_res,
77462 .clock_get = posix_get_monotonic_raw,
77463 };
77464- struct k_clock clock_realtime_coarse = {
77465+ static struct k_clock clock_realtime_coarse = {
77466 .clock_getres = posix_get_coarse_res,
77467 .clock_get = posix_get_realtime_coarse,
77468 };
77469- struct k_clock clock_monotonic_coarse = {
77470+ static struct k_clock clock_monotonic_coarse = {
77471 .clock_getres = posix_get_coarse_res,
77472 .clock_get = posix_get_monotonic_coarse,
77473 };
77474- struct k_clock clock_boottime = {
77475+ static struct k_clock clock_boottime = {
77476 .clock_getres = hrtimer_get_res,
77477 .clock_get = posix_get_boottime,
77478 .nsleep = common_nsleep,
77479@@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
77480 return;
77481 }
77482
77483- posix_clocks[clock_id] = *new_clock;
77484+ posix_clocks[clock_id] = new_clock;
77485 }
77486 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
77487
77488@@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
77489 return (id & CLOCKFD_MASK) == CLOCKFD ?
77490 &clock_posix_dynamic : &clock_posix_cpu;
77491
77492- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
77493+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
77494 return NULL;
77495- return &posix_clocks[id];
77496+ return posix_clocks[id];
77497 }
77498
77499 static int common_timer_create(struct k_itimer *new_timer)
77500@@ -964,6 +965,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
77501 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
77502 return -EFAULT;
77503
77504+ /* only the CLOCK_REALTIME clock can be set, all other clocks
77505+ have their clock_set fptr set to a nosettime dummy function
77506+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
77507+ call common_clock_set, which calls do_sys_settimeofday, which
77508+ we hook
77509+ */
77510+
77511 return kc->clock_set(which_clock, &new_tp);
77512 }
77513
77514diff --git a/kernel/power/process.c b/kernel/power/process.c
77515index 98088e0..aaf95c0 100644
77516--- a/kernel/power/process.c
77517+++ b/kernel/power/process.c
77518@@ -33,6 +33,7 @@ static int try_to_freeze_tasks(bool user_only)
77519 u64 elapsed_csecs64;
77520 unsigned int elapsed_csecs;
77521 bool wakeup = false;
77522+ bool timedout = false;
77523
77524 do_gettimeofday(&start);
77525
77526@@ -43,13 +44,20 @@ static int try_to_freeze_tasks(bool user_only)
77527
77528 while (true) {
77529 todo = 0;
77530+ if (time_after(jiffies, end_time))
77531+ timedout = true;
77532 read_lock(&tasklist_lock);
77533 do_each_thread(g, p) {
77534 if (p == current || !freeze_task(p))
77535 continue;
77536
77537- if (!freezer_should_skip(p))
77538+ if (!freezer_should_skip(p)) {
77539 todo++;
77540+ if (timedout) {
77541+ printk(KERN_ERR "Task refusing to freeze:\n");
77542+ sched_show_task(p);
77543+ }
77544+ }
77545 } while_each_thread(g, p);
77546 read_unlock(&tasklist_lock);
77547
77548@@ -58,7 +66,7 @@ static int try_to_freeze_tasks(bool user_only)
77549 todo += wq_busy;
77550 }
77551
77552- if (!todo || time_after(jiffies, end_time))
77553+ if (!todo || timedout)
77554 break;
77555
77556 if (pm_wakeup_pending()) {
77557diff --git a/kernel/printk.c b/kernel/printk.c
77558index 0e4eba6a..80abe9c 100644
77559--- a/kernel/printk.c
77560+++ b/kernel/printk.c
77561@@ -378,6 +378,7 @@ static int syslog_action_restricted(int type)
77562 {
77563 if (dmesg_restrict)
77564 return 1;
77565+
77566 /*
77567 * Unless restricted, we allow "read all" and "get buffer size"
77568 * for everybody.
77569@@ -395,6 +396,11 @@ static int check_syslog_permissions(int type, bool from_file)
77570 if (from_file && type != SYSLOG_ACTION_OPEN)
77571 return 0;
77572
77573+#ifdef CONFIG_GRKERNSEC_DMESG
77574+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
77575+ return -EPERM;
77576+#endif
77577+
77578 if (syslog_action_restricted(type)) {
77579 if (capable(CAP_SYSLOG))
77580 return 0;
77581@@ -662,11 +668,17 @@ static unsigned int devkmsg_poll(struct file *file, poll_table *wait)
77582 return ret;
77583 }
77584
77585+static int check_syslog_permissions(int type, bool from_file);
77586+
77587 static int devkmsg_open(struct inode *inode, struct file *file)
77588 {
77589 struct devkmsg_user *user;
77590 int err;
77591
77592+ err = check_syslog_permissions(SYSLOG_ACTION_OPEN, SYSLOG_FROM_FILE);
77593+ if (err)
77594+ return err;
77595+
77596 /* write-only does not need any file context */
77597 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
77598 return 0;
77599diff --git a/kernel/profile.c b/kernel/profile.c
77600index dc3384e..0de5b49 100644
77601--- a/kernel/profile.c
77602+++ b/kernel/profile.c
77603@@ -37,7 +37,7 @@ struct profile_hit {
77604 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
77605 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
77606
77607-static atomic_t *prof_buffer;
77608+static atomic_unchecked_t *prof_buffer;
77609 static unsigned long prof_len, prof_shift;
77610
77611 int prof_on __read_mostly;
77612@@ -260,7 +260,7 @@ static void profile_flip_buffers(void)
77613 hits[i].pc = 0;
77614 continue;
77615 }
77616- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
77617+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
77618 hits[i].hits = hits[i].pc = 0;
77619 }
77620 }
77621@@ -321,9 +321,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
77622 * Add the current hit(s) and flush the write-queue out
77623 * to the global buffer:
77624 */
77625- atomic_add(nr_hits, &prof_buffer[pc]);
77626+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
77627 for (i = 0; i < NR_PROFILE_HIT; ++i) {
77628- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
77629+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
77630 hits[i].pc = hits[i].hits = 0;
77631 }
77632 out:
77633@@ -398,7 +398,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
77634 {
77635 unsigned long pc;
77636 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
77637- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
77638+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
77639 }
77640 #endif /* !CONFIG_SMP */
77641
77642@@ -494,7 +494,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
77643 return -EFAULT;
77644 buf++; p++; count--; read++;
77645 }
77646- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
77647+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
77648 if (copy_to_user(buf, (void *)pnt, count))
77649 return -EFAULT;
77650 read += count;
77651@@ -525,7 +525,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
77652 }
77653 #endif
77654 profile_discard_flip_buffers();
77655- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
77656+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
77657 return count;
77658 }
77659
77660diff --git a/kernel/ptrace.c b/kernel/ptrace.c
77661index acbd284..00bb0c9 100644
77662--- a/kernel/ptrace.c
77663+++ b/kernel/ptrace.c
77664@@ -324,7 +324,7 @@ static int ptrace_attach(struct task_struct *task, long request,
77665 if (seize)
77666 flags |= PT_SEIZED;
77667 rcu_read_lock();
77668- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
77669+ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
77670 flags |= PT_PTRACE_CAP;
77671 rcu_read_unlock();
77672 task->ptrace = flags;
77673@@ -535,7 +535,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
77674 break;
77675 return -EIO;
77676 }
77677- if (copy_to_user(dst, buf, retval))
77678+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
77679 return -EFAULT;
77680 copied += retval;
77681 src += retval;
77682@@ -726,7 +726,7 @@ int ptrace_request(struct task_struct *child, long request,
77683 bool seized = child->ptrace & PT_SEIZED;
77684 int ret = -EIO;
77685 siginfo_t siginfo, *si;
77686- void __user *datavp = (void __user *) data;
77687+ void __user *datavp = (__force void __user *) data;
77688 unsigned long __user *datalp = datavp;
77689 unsigned long flags;
77690
77691@@ -928,14 +928,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
77692 goto out;
77693 }
77694
77695+ if (gr_handle_ptrace(child, request)) {
77696+ ret = -EPERM;
77697+ goto out_put_task_struct;
77698+ }
77699+
77700 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
77701 ret = ptrace_attach(child, request, addr, data);
77702 /*
77703 * Some architectures need to do book-keeping after
77704 * a ptrace attach.
77705 */
77706- if (!ret)
77707+ if (!ret) {
77708 arch_ptrace_attach(child);
77709+ gr_audit_ptrace(child);
77710+ }
77711 goto out_put_task_struct;
77712 }
77713
77714@@ -963,7 +970,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
77715 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
77716 if (copied != sizeof(tmp))
77717 return -EIO;
77718- return put_user(tmp, (unsigned long __user *)data);
77719+ return put_user(tmp, (__force unsigned long __user *)data);
77720 }
77721
77722 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
77723@@ -1057,7 +1064,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
77724 }
77725
77726 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
77727- compat_long_t addr, compat_long_t data)
77728+ compat_ulong_t addr, compat_ulong_t data)
77729 {
77730 struct task_struct *child;
77731 long ret;
77732@@ -1073,14 +1080,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
77733 goto out;
77734 }
77735
77736+ if (gr_handle_ptrace(child, request)) {
77737+ ret = -EPERM;
77738+ goto out_put_task_struct;
77739+ }
77740+
77741 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
77742 ret = ptrace_attach(child, request, addr, data);
77743 /*
77744 * Some architectures need to do book-keeping after
77745 * a ptrace attach.
77746 */
77747- if (!ret)
77748+ if (!ret) {
77749 arch_ptrace_attach(child);
77750+ gr_audit_ptrace(child);
77751+ }
77752 goto out_put_task_struct;
77753 }
77754
77755diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
77756index 48ab703..07561d4 100644
77757--- a/kernel/rcupdate.c
77758+++ b/kernel/rcupdate.c
77759@@ -439,10 +439,10 @@ int rcu_jiffies_till_stall_check(void)
77760 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
77761 */
77762 if (till_stall_check < 3) {
77763- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
77764+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
77765 till_stall_check = 3;
77766 } else if (till_stall_check > 300) {
77767- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
77768+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
77769 till_stall_check = 300;
77770 }
77771 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
77772diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
77773index a0714a5..2ab5e34 100644
77774--- a/kernel/rcutiny.c
77775+++ b/kernel/rcutiny.c
77776@@ -46,7 +46,7 @@
77777 struct rcu_ctrlblk;
77778 static void invoke_rcu_callbacks(void);
77779 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
77780-static void rcu_process_callbacks(struct softirq_action *unused);
77781+static void rcu_process_callbacks(void);
77782 static void __call_rcu(struct rcu_head *head,
77783 void (*func)(struct rcu_head *rcu),
77784 struct rcu_ctrlblk *rcp);
77785@@ -312,7 +312,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
77786 rcu_is_callbacks_kthread()));
77787 }
77788
77789-static void rcu_process_callbacks(struct softirq_action *unused)
77790+static void rcu_process_callbacks(void)
77791 {
77792 __rcu_process_callbacks(&rcu_sched_ctrlblk);
77793 __rcu_process_callbacks(&rcu_bh_ctrlblk);
77794diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
77795index 8a23300..4255818 100644
77796--- a/kernel/rcutiny_plugin.h
77797+++ b/kernel/rcutiny_plugin.h
77798@@ -945,7 +945,7 @@ static int rcu_kthread(void *arg)
77799 have_rcu_kthread_work = morework;
77800 local_irq_restore(flags);
77801 if (work)
77802- rcu_process_callbacks(NULL);
77803+ rcu_process_callbacks();
77804 schedule_timeout_interruptible(1); /* Leave CPU for others. */
77805 }
77806
77807diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
77808index e1f3a8c..42c94a2 100644
77809--- a/kernel/rcutorture.c
77810+++ b/kernel/rcutorture.c
77811@@ -164,12 +164,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
77812 { 0 };
77813 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
77814 { 0 };
77815-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
77816-static atomic_t n_rcu_torture_alloc;
77817-static atomic_t n_rcu_torture_alloc_fail;
77818-static atomic_t n_rcu_torture_free;
77819-static atomic_t n_rcu_torture_mberror;
77820-static atomic_t n_rcu_torture_error;
77821+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
77822+static atomic_unchecked_t n_rcu_torture_alloc;
77823+static atomic_unchecked_t n_rcu_torture_alloc_fail;
77824+static atomic_unchecked_t n_rcu_torture_free;
77825+static atomic_unchecked_t n_rcu_torture_mberror;
77826+static atomic_unchecked_t n_rcu_torture_error;
77827 static long n_rcu_torture_barrier_error;
77828 static long n_rcu_torture_boost_ktrerror;
77829 static long n_rcu_torture_boost_rterror;
77830@@ -287,11 +287,11 @@ rcu_torture_alloc(void)
77831
77832 spin_lock_bh(&rcu_torture_lock);
77833 if (list_empty(&rcu_torture_freelist)) {
77834- atomic_inc(&n_rcu_torture_alloc_fail);
77835+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
77836 spin_unlock_bh(&rcu_torture_lock);
77837 return NULL;
77838 }
77839- atomic_inc(&n_rcu_torture_alloc);
77840+ atomic_inc_unchecked(&n_rcu_torture_alloc);
77841 p = rcu_torture_freelist.next;
77842 list_del_init(p);
77843 spin_unlock_bh(&rcu_torture_lock);
77844@@ -304,7 +304,7 @@ rcu_torture_alloc(void)
77845 static void
77846 rcu_torture_free(struct rcu_torture *p)
77847 {
77848- atomic_inc(&n_rcu_torture_free);
77849+ atomic_inc_unchecked(&n_rcu_torture_free);
77850 spin_lock_bh(&rcu_torture_lock);
77851 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
77852 spin_unlock_bh(&rcu_torture_lock);
77853@@ -424,7 +424,7 @@ rcu_torture_cb(struct rcu_head *p)
77854 i = rp->rtort_pipe_count;
77855 if (i > RCU_TORTURE_PIPE_LEN)
77856 i = RCU_TORTURE_PIPE_LEN;
77857- atomic_inc(&rcu_torture_wcount[i]);
77858+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
77859 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
77860 rp->rtort_mbtest = 0;
77861 rcu_torture_free(rp);
77862@@ -472,7 +472,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
77863 i = rp->rtort_pipe_count;
77864 if (i > RCU_TORTURE_PIPE_LEN)
77865 i = RCU_TORTURE_PIPE_LEN;
77866- atomic_inc(&rcu_torture_wcount[i]);
77867+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
77868 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
77869 rp->rtort_mbtest = 0;
77870 list_del(&rp->rtort_free);
77871@@ -990,7 +990,7 @@ rcu_torture_writer(void *arg)
77872 i = old_rp->rtort_pipe_count;
77873 if (i > RCU_TORTURE_PIPE_LEN)
77874 i = RCU_TORTURE_PIPE_LEN;
77875- atomic_inc(&rcu_torture_wcount[i]);
77876+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
77877 old_rp->rtort_pipe_count++;
77878 cur_ops->deferred_free(old_rp);
77879 }
77880@@ -1076,7 +1076,7 @@ static void rcu_torture_timer(unsigned long unused)
77881 return;
77882 }
77883 if (p->rtort_mbtest == 0)
77884- atomic_inc(&n_rcu_torture_mberror);
77885+ atomic_inc_unchecked(&n_rcu_torture_mberror);
77886 spin_lock(&rand_lock);
77887 cur_ops->read_delay(&rand);
77888 n_rcu_torture_timers++;
77889@@ -1146,7 +1146,7 @@ rcu_torture_reader(void *arg)
77890 continue;
77891 }
77892 if (p->rtort_mbtest == 0)
77893- atomic_inc(&n_rcu_torture_mberror);
77894+ atomic_inc_unchecked(&n_rcu_torture_mberror);
77895 cur_ops->read_delay(&rand);
77896 preempt_disable();
77897 pipe_count = p->rtort_pipe_count;
77898@@ -1209,11 +1209,11 @@ rcu_torture_printk(char *page)
77899 rcu_torture_current,
77900 rcu_torture_current_version,
77901 list_empty(&rcu_torture_freelist),
77902- atomic_read(&n_rcu_torture_alloc),
77903- atomic_read(&n_rcu_torture_alloc_fail),
77904- atomic_read(&n_rcu_torture_free));
77905+ atomic_read_unchecked(&n_rcu_torture_alloc),
77906+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
77907+ atomic_read_unchecked(&n_rcu_torture_free));
77908 cnt += sprintf(&page[cnt], "rtmbe: %d rtbke: %ld rtbre: %ld ",
77909- atomic_read(&n_rcu_torture_mberror),
77910+ atomic_read_unchecked(&n_rcu_torture_mberror),
77911 n_rcu_torture_boost_ktrerror,
77912 n_rcu_torture_boost_rterror);
77913 cnt += sprintf(&page[cnt], "rtbf: %ld rtb: %ld nt: %ld ",
77914@@ -1232,14 +1232,14 @@ rcu_torture_printk(char *page)
77915 n_barrier_attempts,
77916 n_rcu_torture_barrier_error);
77917 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
77918- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
77919+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
77920 n_rcu_torture_barrier_error != 0 ||
77921 n_rcu_torture_boost_ktrerror != 0 ||
77922 n_rcu_torture_boost_rterror != 0 ||
77923 n_rcu_torture_boost_failure != 0 ||
77924 i > 1) {
77925 cnt += sprintf(&page[cnt], "!!! ");
77926- atomic_inc(&n_rcu_torture_error);
77927+ atomic_inc_unchecked(&n_rcu_torture_error);
77928 WARN_ON_ONCE(1);
77929 }
77930 cnt += sprintf(&page[cnt], "Reader Pipe: ");
77931@@ -1253,7 +1253,7 @@ rcu_torture_printk(char *page)
77932 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
77933 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
77934 cnt += sprintf(&page[cnt], " %d",
77935- atomic_read(&rcu_torture_wcount[i]));
77936+ atomic_read_unchecked(&rcu_torture_wcount[i]));
77937 }
77938 cnt += sprintf(&page[cnt], "\n");
77939 if (cur_ops->stats)
77940@@ -1962,7 +1962,7 @@ rcu_torture_cleanup(void)
77941
77942 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
77943
77944- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
77945+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
77946 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
77947 else if (n_online_successes != n_online_attempts ||
77948 n_offline_successes != n_offline_attempts)
77949@@ -2031,18 +2031,18 @@ rcu_torture_init(void)
77950
77951 rcu_torture_current = NULL;
77952 rcu_torture_current_version = 0;
77953- atomic_set(&n_rcu_torture_alloc, 0);
77954- atomic_set(&n_rcu_torture_alloc_fail, 0);
77955- atomic_set(&n_rcu_torture_free, 0);
77956- atomic_set(&n_rcu_torture_mberror, 0);
77957- atomic_set(&n_rcu_torture_error, 0);
77958+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
77959+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
77960+ atomic_set_unchecked(&n_rcu_torture_free, 0);
77961+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
77962+ atomic_set_unchecked(&n_rcu_torture_error, 0);
77963 n_rcu_torture_barrier_error = 0;
77964 n_rcu_torture_boost_ktrerror = 0;
77965 n_rcu_torture_boost_rterror = 0;
77966 n_rcu_torture_boost_failure = 0;
77967 n_rcu_torture_boosts = 0;
77968 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
77969- atomic_set(&rcu_torture_wcount[i], 0);
77970+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
77971 for_each_possible_cpu(cpu) {
77972 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
77973 per_cpu(rcu_torture_count, cpu)[i] = 0;
77974diff --git a/kernel/rcutree.c b/kernel/rcutree.c
77975index 5b8ad82..59e1f64 100644
77976--- a/kernel/rcutree.c
77977+++ b/kernel/rcutree.c
77978@@ -353,9 +353,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
77979 rcu_prepare_for_idle(smp_processor_id());
77980 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
77981 smp_mb__before_atomic_inc(); /* See above. */
77982- atomic_inc(&rdtp->dynticks);
77983+ atomic_inc_unchecked(&rdtp->dynticks);
77984 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
77985- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
77986+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
77987
77988 /*
77989 * It is illegal to enter an extended quiescent state while
77990@@ -491,10 +491,10 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
77991 int user)
77992 {
77993 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
77994- atomic_inc(&rdtp->dynticks);
77995+ atomic_inc_unchecked(&rdtp->dynticks);
77996 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
77997 smp_mb__after_atomic_inc(); /* See above. */
77998- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
77999+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
78000 rcu_cleanup_after_idle(smp_processor_id());
78001 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
78002 if (!user && !is_idle_task(current)) {
78003@@ -633,14 +633,14 @@ void rcu_nmi_enter(void)
78004 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
78005
78006 if (rdtp->dynticks_nmi_nesting == 0 &&
78007- (atomic_read(&rdtp->dynticks) & 0x1))
78008+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
78009 return;
78010 rdtp->dynticks_nmi_nesting++;
78011 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
78012- atomic_inc(&rdtp->dynticks);
78013+ atomic_inc_unchecked(&rdtp->dynticks);
78014 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
78015 smp_mb__after_atomic_inc(); /* See above. */
78016- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
78017+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
78018 }
78019
78020 /**
78021@@ -659,9 +659,9 @@ void rcu_nmi_exit(void)
78022 return;
78023 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
78024 smp_mb__before_atomic_inc(); /* See above. */
78025- atomic_inc(&rdtp->dynticks);
78026+ atomic_inc_unchecked(&rdtp->dynticks);
78027 smp_mb__after_atomic_inc(); /* Force delay to next write. */
78028- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
78029+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
78030 }
78031
78032 /**
78033@@ -675,7 +675,7 @@ int rcu_is_cpu_idle(void)
78034 int ret;
78035
78036 preempt_disable();
78037- ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
78038+ ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
78039 preempt_enable();
78040 return ret;
78041 }
78042@@ -743,7 +743,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
78043 */
78044 static int dyntick_save_progress_counter(struct rcu_data *rdp)
78045 {
78046- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
78047+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
78048 return (rdp->dynticks_snap & 0x1) == 0;
78049 }
78050
78051@@ -758,7 +758,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
78052 unsigned int curr;
78053 unsigned int snap;
78054
78055- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
78056+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
78057 snap = (unsigned int)rdp->dynticks_snap;
78058
78059 /*
78060@@ -1698,7 +1698,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
78061 rsp->qlen += rdp->qlen;
78062 rdp->n_cbs_orphaned += rdp->qlen;
78063 rdp->qlen_lazy = 0;
78064- ACCESS_ONCE(rdp->qlen) = 0;
78065+ ACCESS_ONCE_RW(rdp->qlen) = 0;
78066 }
78067
78068 /*
78069@@ -1944,7 +1944,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
78070 }
78071 smp_mb(); /* List handling before counting for rcu_barrier(). */
78072 rdp->qlen_lazy -= count_lazy;
78073- ACCESS_ONCE(rdp->qlen) -= count;
78074+ ACCESS_ONCE_RW(rdp->qlen) -= count;
78075 rdp->n_cbs_invoked += count;
78076
78077 /* Reinstate batch limit if we have worked down the excess. */
78078@@ -2137,7 +2137,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
78079 /*
78080 * Do RCU core processing for the current CPU.
78081 */
78082-static void rcu_process_callbacks(struct softirq_action *unused)
78083+static void rcu_process_callbacks(void)
78084 {
78085 struct rcu_state *rsp;
78086
78087@@ -2260,7 +2260,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
78088 local_irq_restore(flags);
78089 return;
78090 }
78091- ACCESS_ONCE(rdp->qlen)++;
78092+ ACCESS_ONCE_RW(rdp->qlen)++;
78093 if (lazy)
78094 rdp->qlen_lazy++;
78095 else
78096@@ -2469,11 +2469,11 @@ void synchronize_sched_expedited(void)
78097 * counter wrap on a 32-bit system. Quite a few more CPUs would of
78098 * course be required on a 64-bit system.
78099 */
78100- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
78101+ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
78102 (ulong)atomic_long_read(&rsp->expedited_done) +
78103 ULONG_MAX / 8)) {
78104 synchronize_sched();
78105- atomic_long_inc(&rsp->expedited_wrap);
78106+ atomic_long_inc_unchecked(&rsp->expedited_wrap);
78107 return;
78108 }
78109
78110@@ -2481,7 +2481,7 @@ void synchronize_sched_expedited(void)
78111 * Take a ticket. Note that atomic_inc_return() implies a
78112 * full memory barrier.
78113 */
78114- snap = atomic_long_inc_return(&rsp->expedited_start);
78115+ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
78116 firstsnap = snap;
78117 get_online_cpus();
78118 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
78119@@ -2494,14 +2494,14 @@ void synchronize_sched_expedited(void)
78120 synchronize_sched_expedited_cpu_stop,
78121 NULL) == -EAGAIN) {
78122 put_online_cpus();
78123- atomic_long_inc(&rsp->expedited_tryfail);
78124+ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
78125
78126 /* Check to see if someone else did our work for us. */
78127 s = atomic_long_read(&rsp->expedited_done);
78128 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
78129 /* ensure test happens before caller kfree */
78130 smp_mb__before_atomic_inc(); /* ^^^ */
78131- atomic_long_inc(&rsp->expedited_workdone1);
78132+ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
78133 return;
78134 }
78135
78136@@ -2510,7 +2510,7 @@ void synchronize_sched_expedited(void)
78137 udelay(trycount * num_online_cpus());
78138 } else {
78139 wait_rcu_gp(call_rcu_sched);
78140- atomic_long_inc(&rsp->expedited_normal);
78141+ atomic_long_inc_unchecked(&rsp->expedited_normal);
78142 return;
78143 }
78144
78145@@ -2519,7 +2519,7 @@ void synchronize_sched_expedited(void)
78146 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
78147 /* ensure test happens before caller kfree */
78148 smp_mb__before_atomic_inc(); /* ^^^ */
78149- atomic_long_inc(&rsp->expedited_workdone2);
78150+ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
78151 return;
78152 }
78153
78154@@ -2531,10 +2531,10 @@ void synchronize_sched_expedited(void)
78155 * period works for us.
78156 */
78157 get_online_cpus();
78158- snap = atomic_long_read(&rsp->expedited_start);
78159+ snap = atomic_long_read_unchecked(&rsp->expedited_start);
78160 smp_mb(); /* ensure read is before try_stop_cpus(). */
78161 }
78162- atomic_long_inc(&rsp->expedited_stoppedcpus);
78163+ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
78164
78165 /*
78166 * Everyone up to our most recent fetch is covered by our grace
78167@@ -2543,16 +2543,16 @@ void synchronize_sched_expedited(void)
78168 * than we did already did their update.
78169 */
78170 do {
78171- atomic_long_inc(&rsp->expedited_done_tries);
78172+ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
78173 s = atomic_long_read(&rsp->expedited_done);
78174 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
78175 /* ensure test happens before caller kfree */
78176 smp_mb__before_atomic_inc(); /* ^^^ */
78177- atomic_long_inc(&rsp->expedited_done_lost);
78178+ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
78179 break;
78180 }
78181 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
78182- atomic_long_inc(&rsp->expedited_done_exit);
78183+ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
78184
78185 put_online_cpus();
78186 }
78187@@ -2726,7 +2726,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
78188 * ACCESS_ONCE() to prevent the compiler from speculating
78189 * the increment to precede the early-exit check.
78190 */
78191- ACCESS_ONCE(rsp->n_barrier_done)++;
78192+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
78193 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
78194 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
78195 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
78196@@ -2776,7 +2776,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
78197
78198 /* Increment ->n_barrier_done to prevent duplicate work. */
78199 smp_mb(); /* Keep increment after above mechanism. */
78200- ACCESS_ONCE(rsp->n_barrier_done)++;
78201+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
78202 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
78203 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
78204 smp_mb(); /* Keep increment before caller's subsequent code. */
78205@@ -2821,10 +2821,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
78206 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
78207 init_callback_list(rdp);
78208 rdp->qlen_lazy = 0;
78209- ACCESS_ONCE(rdp->qlen) = 0;
78210+ ACCESS_ONCE_RW(rdp->qlen) = 0;
78211 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
78212 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
78213- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
78214+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
78215 rdp->cpu = cpu;
78216 rdp->rsp = rsp;
78217 rcu_boot_init_nocb_percpu_data(rdp);
78218@@ -2857,8 +2857,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
78219 rdp->blimit = blimit;
78220 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
78221 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
78222- atomic_set(&rdp->dynticks->dynticks,
78223- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
78224+ atomic_set_unchecked(&rdp->dynticks->dynticks,
78225+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
78226 rcu_prepare_for_idle_init(cpu);
78227 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
78228
78229@@ -2964,7 +2964,7 @@ static int __init rcu_spawn_gp_kthread(void)
78230 struct task_struct *t;
78231
78232 for_each_rcu_flavor(rsp) {
78233- t = kthread_run(rcu_gp_kthread, rsp, rsp->name);
78234+ t = kthread_run(rcu_gp_kthread, rsp, "%s", rsp->name);
78235 BUG_ON(IS_ERR(t));
78236 rnp = rcu_get_root(rsp);
78237 raw_spin_lock_irqsave(&rnp->lock, flags);
78238diff --git a/kernel/rcutree.h b/kernel/rcutree.h
78239index c896b50..c357252 100644
78240--- a/kernel/rcutree.h
78241+++ b/kernel/rcutree.h
78242@@ -86,7 +86,7 @@ struct rcu_dynticks {
78243 long long dynticks_nesting; /* Track irq/process nesting level. */
78244 /* Process level is worth LLONG_MAX/2. */
78245 int dynticks_nmi_nesting; /* Track NMI nesting level. */
78246- atomic_t dynticks; /* Even value for idle, else odd. */
78247+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
78248 #ifdef CONFIG_RCU_FAST_NO_HZ
78249 int dyntick_drain; /* Prepare-for-idle state variable. */
78250 unsigned long dyntick_holdoff;
78251@@ -416,17 +416,17 @@ struct rcu_state {
78252 /* _rcu_barrier(). */
78253 /* End of fields guarded by barrier_mutex. */
78254
78255- atomic_long_t expedited_start; /* Starting ticket. */
78256- atomic_long_t expedited_done; /* Done ticket. */
78257- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
78258- atomic_long_t expedited_tryfail; /* # acquisition failures. */
78259- atomic_long_t expedited_workdone1; /* # done by others #1. */
78260- atomic_long_t expedited_workdone2; /* # done by others #2. */
78261- atomic_long_t expedited_normal; /* # fallbacks to normal. */
78262- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
78263- atomic_long_t expedited_done_tries; /* # tries to update _done. */
78264- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
78265- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
78266+ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
78267+ atomic_long_t expedited_done; /* Done ticket. */
78268+ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
78269+ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
78270+ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
78271+ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
78272+ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
78273+ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
78274+ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
78275+ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
78276+ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
78277
78278 unsigned long jiffies_force_qs; /* Time at which to invoke */
78279 /* force_quiescent_state(). */
78280diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
78281index c1cc7e1..f62e436 100644
78282--- a/kernel/rcutree_plugin.h
78283+++ b/kernel/rcutree_plugin.h
78284@@ -892,7 +892,7 @@ void synchronize_rcu_expedited(void)
78285
78286 /* Clean up and exit. */
78287 smp_mb(); /* ensure expedited GP seen before counter increment. */
78288- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
78289+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
78290 unlock_mb_ret:
78291 mutex_unlock(&sync_rcu_preempt_exp_mutex);
78292 mb_ret:
78293@@ -1440,7 +1440,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
78294 free_cpumask_var(cm);
78295 }
78296
78297-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
78298+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
78299 .store = &rcu_cpu_kthread_task,
78300 .thread_should_run = rcu_cpu_kthread_should_run,
78301 .thread_fn = rcu_cpu_kthread,
78302@@ -2072,7 +2072,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
78303 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
78304 printk(KERN_ERR "\t%d: (%lu %s) idle=%03x/%llx/%d %s\n",
78305 cpu, ticks_value, ticks_title,
78306- atomic_read(&rdtp->dynticks) & 0xfff,
78307+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
78308 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
78309 fast_no_hz);
78310 }
78311@@ -2192,7 +2192,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
78312
78313 /* Enqueue the callback on the nocb list and update counts. */
78314 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
78315- ACCESS_ONCE(*old_rhpp) = rhp;
78316+ ACCESS_ONCE_RW(*old_rhpp) = rhp;
78317 atomic_long_add(rhcount, &rdp->nocb_q_count);
78318 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
78319
78320@@ -2384,12 +2384,12 @@ static int rcu_nocb_kthread(void *arg)
78321 * Extract queued callbacks, update counts, and wait
78322 * for a grace period to elapse.
78323 */
78324- ACCESS_ONCE(rdp->nocb_head) = NULL;
78325+ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
78326 tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
78327 c = atomic_long_xchg(&rdp->nocb_q_count, 0);
78328 cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
78329- ACCESS_ONCE(rdp->nocb_p_count) += c;
78330- ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
78331+ ACCESS_ONCE_RW(rdp->nocb_p_count) += c;
78332+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) += cl;
78333 wait_rcu_gp(rdp->rsp->call_remote);
78334
78335 /* Each pass through the following loop invokes a callback. */
78336@@ -2411,8 +2411,8 @@ static int rcu_nocb_kthread(void *arg)
78337 list = next;
78338 }
78339 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
78340- ACCESS_ONCE(rdp->nocb_p_count) -= c;
78341- ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
78342+ ACCESS_ONCE_RW(rdp->nocb_p_count) -= c;
78343+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) -= cl;
78344 rdp->n_nocbs_invoked += c;
78345 }
78346 return 0;
78347@@ -2438,7 +2438,7 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
78348 rdp = per_cpu_ptr(rsp->rda, cpu);
78349 t = kthread_run(rcu_nocb_kthread, rdp, "rcuo%d", cpu);
78350 BUG_ON(IS_ERR(t));
78351- ACCESS_ONCE(rdp->nocb_kthread) = t;
78352+ ACCESS_ONCE_RW(rdp->nocb_kthread) = t;
78353 }
78354 }
78355
78356diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
78357index 93f8e8f..cf812ae 100644
78358--- a/kernel/rcutree_trace.c
78359+++ b/kernel/rcutree_trace.c
78360@@ -123,7 +123,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
78361 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
78362 rdp->passed_quiesce, rdp->qs_pending);
78363 seq_printf(m, " dt=%d/%llx/%d df=%lu",
78364- atomic_read(&rdp->dynticks->dynticks),
78365+ atomic_read_unchecked(&rdp->dynticks->dynticks),
78366 rdp->dynticks->dynticks_nesting,
78367 rdp->dynticks->dynticks_nmi_nesting,
78368 rdp->dynticks_fqs);
78369@@ -184,17 +184,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
78370 struct rcu_state *rsp = (struct rcu_state *)m->private;
78371
78372 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
78373- atomic_long_read(&rsp->expedited_start),
78374+ atomic_long_read_unchecked(&rsp->expedited_start),
78375 atomic_long_read(&rsp->expedited_done),
78376- atomic_long_read(&rsp->expedited_wrap),
78377- atomic_long_read(&rsp->expedited_tryfail),
78378- atomic_long_read(&rsp->expedited_workdone1),
78379- atomic_long_read(&rsp->expedited_workdone2),
78380- atomic_long_read(&rsp->expedited_normal),
78381- atomic_long_read(&rsp->expedited_stoppedcpus),
78382- atomic_long_read(&rsp->expedited_done_tries),
78383- atomic_long_read(&rsp->expedited_done_lost),
78384- atomic_long_read(&rsp->expedited_done_exit));
78385+ atomic_long_read_unchecked(&rsp->expedited_wrap),
78386+ atomic_long_read_unchecked(&rsp->expedited_tryfail),
78387+ atomic_long_read_unchecked(&rsp->expedited_workdone1),
78388+ atomic_long_read_unchecked(&rsp->expedited_workdone2),
78389+ atomic_long_read_unchecked(&rsp->expedited_normal),
78390+ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
78391+ atomic_long_read_unchecked(&rsp->expedited_done_tries),
78392+ atomic_long_read_unchecked(&rsp->expedited_done_lost),
78393+ atomic_long_read_unchecked(&rsp->expedited_done_exit));
78394 return 0;
78395 }
78396
78397diff --git a/kernel/resource.c b/kernel/resource.c
78398index 73f35d4..4684fc4 100644
78399--- a/kernel/resource.c
78400+++ b/kernel/resource.c
78401@@ -143,8 +143,18 @@ static const struct file_operations proc_iomem_operations = {
78402
78403 static int __init ioresources_init(void)
78404 {
78405+#ifdef CONFIG_GRKERNSEC_PROC_ADD
78406+#ifdef CONFIG_GRKERNSEC_PROC_USER
78407+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
78408+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
78409+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
78410+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
78411+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
78412+#endif
78413+#else
78414 proc_create("ioports", 0, NULL, &proc_ioports_operations);
78415 proc_create("iomem", 0, NULL, &proc_iomem_operations);
78416+#endif
78417 return 0;
78418 }
78419 __initcall(ioresources_init);
78420diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
78421index 7890b10..8b68605f 100644
78422--- a/kernel/rtmutex-tester.c
78423+++ b/kernel/rtmutex-tester.c
78424@@ -21,7 +21,7 @@
78425 #define MAX_RT_TEST_MUTEXES 8
78426
78427 static spinlock_t rttest_lock;
78428-static atomic_t rttest_event;
78429+static atomic_unchecked_t rttest_event;
78430
78431 struct test_thread_data {
78432 int opcode;
78433@@ -62,7 +62,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
78434
78435 case RTTEST_LOCKCONT:
78436 td->mutexes[td->opdata] = 1;
78437- td->event = atomic_add_return(1, &rttest_event);
78438+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78439 return 0;
78440
78441 case RTTEST_RESET:
78442@@ -75,7 +75,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
78443 return 0;
78444
78445 case RTTEST_RESETEVENT:
78446- atomic_set(&rttest_event, 0);
78447+ atomic_set_unchecked(&rttest_event, 0);
78448 return 0;
78449
78450 default:
78451@@ -92,9 +92,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
78452 return ret;
78453
78454 td->mutexes[id] = 1;
78455- td->event = atomic_add_return(1, &rttest_event);
78456+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78457 rt_mutex_lock(&mutexes[id]);
78458- td->event = atomic_add_return(1, &rttest_event);
78459+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78460 td->mutexes[id] = 4;
78461 return 0;
78462
78463@@ -105,9 +105,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
78464 return ret;
78465
78466 td->mutexes[id] = 1;
78467- td->event = atomic_add_return(1, &rttest_event);
78468+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78469 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
78470- td->event = atomic_add_return(1, &rttest_event);
78471+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78472 td->mutexes[id] = ret ? 0 : 4;
78473 return ret ? -EINTR : 0;
78474
78475@@ -116,9 +116,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
78476 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
78477 return ret;
78478
78479- td->event = atomic_add_return(1, &rttest_event);
78480+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78481 rt_mutex_unlock(&mutexes[id]);
78482- td->event = atomic_add_return(1, &rttest_event);
78483+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78484 td->mutexes[id] = 0;
78485 return 0;
78486
78487@@ -165,7 +165,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
78488 break;
78489
78490 td->mutexes[dat] = 2;
78491- td->event = atomic_add_return(1, &rttest_event);
78492+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78493 break;
78494
78495 default:
78496@@ -185,7 +185,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
78497 return;
78498
78499 td->mutexes[dat] = 3;
78500- td->event = atomic_add_return(1, &rttest_event);
78501+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78502 break;
78503
78504 case RTTEST_LOCKNOWAIT:
78505@@ -197,7 +197,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
78506 return;
78507
78508 td->mutexes[dat] = 1;
78509- td->event = atomic_add_return(1, &rttest_event);
78510+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78511 return;
78512
78513 default:
78514diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
78515index 64de5f8..7735e12 100644
78516--- a/kernel/sched/auto_group.c
78517+++ b/kernel/sched/auto_group.c
78518@@ -11,7 +11,7 @@
78519
78520 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
78521 static struct autogroup autogroup_default;
78522-static atomic_t autogroup_seq_nr;
78523+static atomic_unchecked_t autogroup_seq_nr;
78524
78525 void __init autogroup_init(struct task_struct *init_task)
78526 {
78527@@ -81,7 +81,7 @@ static inline struct autogroup *autogroup_create(void)
78528
78529 kref_init(&ag->kref);
78530 init_rwsem(&ag->lock);
78531- ag->id = atomic_inc_return(&autogroup_seq_nr);
78532+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
78533 ag->tg = tg;
78534 #ifdef CONFIG_RT_GROUP_SCHED
78535 /*
78536diff --git a/kernel/sched/core.c b/kernel/sched/core.c
78537index 67d0465..4cf9361 100644
78538--- a/kernel/sched/core.c
78539+++ b/kernel/sched/core.c
78540@@ -3406,7 +3406,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
78541 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
78542 * positive (at least 1, or number of jiffies left till timeout) if completed.
78543 */
78544-long __sched
78545+long __sched __intentional_overflow(-1)
78546 wait_for_completion_interruptible_timeout(struct completion *x,
78547 unsigned long timeout)
78548 {
78549@@ -3423,7 +3423,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
78550 *
78551 * The return value is -ERESTARTSYS if interrupted, 0 if completed.
78552 */
78553-int __sched wait_for_completion_killable(struct completion *x)
78554+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
78555 {
78556 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
78557 if (t == -ERESTARTSYS)
78558@@ -3444,7 +3444,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
78559 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
78560 * positive (at least 1, or number of jiffies left till timeout) if completed.
78561 */
78562-long __sched
78563+long __sched __intentional_overflow(-1)
78564 wait_for_completion_killable_timeout(struct completion *x,
78565 unsigned long timeout)
78566 {
78567@@ -3670,6 +3670,8 @@ int can_nice(const struct task_struct *p, const int nice)
78568 /* convert nice value [19,-20] to rlimit style value [1,40] */
78569 int nice_rlim = 20 - nice;
78570
78571+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
78572+
78573 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
78574 capable(CAP_SYS_NICE));
78575 }
78576@@ -3703,7 +3705,8 @@ SYSCALL_DEFINE1(nice, int, increment)
78577 if (nice > 19)
78578 nice = 19;
78579
78580- if (increment < 0 && !can_nice(current, nice))
78581+ if (increment < 0 && (!can_nice(current, nice) ||
78582+ gr_handle_chroot_nice()))
78583 return -EPERM;
78584
78585 retval = security_task_setnice(current, nice);
78586@@ -3857,6 +3860,7 @@ recheck:
78587 unsigned long rlim_rtprio =
78588 task_rlimit(p, RLIMIT_RTPRIO);
78589
78590+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
78591 /* can't set/change the rt policy */
78592 if (policy != p->policy && !rlim_rtprio)
78593 return -EPERM;
78594@@ -4954,7 +4958,7 @@ static void migrate_tasks(unsigned int dead_cpu)
78595
78596 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
78597
78598-static struct ctl_table sd_ctl_dir[] = {
78599+static ctl_table_no_const sd_ctl_dir[] __read_only = {
78600 {
78601 .procname = "sched_domain",
78602 .mode = 0555,
78603@@ -4971,17 +4975,17 @@ static struct ctl_table sd_ctl_root[] = {
78604 {}
78605 };
78606
78607-static struct ctl_table *sd_alloc_ctl_entry(int n)
78608+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
78609 {
78610- struct ctl_table *entry =
78611+ ctl_table_no_const *entry =
78612 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
78613
78614 return entry;
78615 }
78616
78617-static void sd_free_ctl_entry(struct ctl_table **tablep)
78618+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
78619 {
78620- struct ctl_table *entry;
78621+ ctl_table_no_const *entry;
78622
78623 /*
78624 * In the intermediate directories, both the child directory and
78625@@ -4989,22 +4993,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
78626 * will always be set. In the lowest directory the names are
78627 * static strings and all have proc handlers.
78628 */
78629- for (entry = *tablep; entry->mode; entry++) {
78630- if (entry->child)
78631- sd_free_ctl_entry(&entry->child);
78632+ for (entry = tablep; entry->mode; entry++) {
78633+ if (entry->child) {
78634+ sd_free_ctl_entry(entry->child);
78635+ pax_open_kernel();
78636+ entry->child = NULL;
78637+ pax_close_kernel();
78638+ }
78639 if (entry->proc_handler == NULL)
78640 kfree(entry->procname);
78641 }
78642
78643- kfree(*tablep);
78644- *tablep = NULL;
78645+ kfree(tablep);
78646 }
78647
78648 static int min_load_idx = 0;
78649 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
78650
78651 static void
78652-set_table_entry(struct ctl_table *entry,
78653+set_table_entry(ctl_table_no_const *entry,
78654 const char *procname, void *data, int maxlen,
78655 umode_t mode, proc_handler *proc_handler,
78656 bool load_idx)
78657@@ -5024,7 +5031,7 @@ set_table_entry(struct ctl_table *entry,
78658 static struct ctl_table *
78659 sd_alloc_ctl_domain_table(struct sched_domain *sd)
78660 {
78661- struct ctl_table *table = sd_alloc_ctl_entry(13);
78662+ ctl_table_no_const *table = sd_alloc_ctl_entry(13);
78663
78664 if (table == NULL)
78665 return NULL;
78666@@ -5059,9 +5066,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
78667 return table;
78668 }
78669
78670-static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
78671+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
78672 {
78673- struct ctl_table *entry, *table;
78674+ ctl_table_no_const *entry, *table;
78675 struct sched_domain *sd;
78676 int domain_num = 0, i;
78677 char buf[32];
78678@@ -5088,11 +5095,13 @@ static struct ctl_table_header *sd_sysctl_header;
78679 static void register_sched_domain_sysctl(void)
78680 {
78681 int i, cpu_num = num_possible_cpus();
78682- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
78683+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
78684 char buf[32];
78685
78686 WARN_ON(sd_ctl_dir[0].child);
78687+ pax_open_kernel();
78688 sd_ctl_dir[0].child = entry;
78689+ pax_close_kernel();
78690
78691 if (entry == NULL)
78692 return;
78693@@ -5115,8 +5124,12 @@ static void unregister_sched_domain_sysctl(void)
78694 if (sd_sysctl_header)
78695 unregister_sysctl_table(sd_sysctl_header);
78696 sd_sysctl_header = NULL;
78697- if (sd_ctl_dir[0].child)
78698- sd_free_ctl_entry(&sd_ctl_dir[0].child);
78699+ if (sd_ctl_dir[0].child) {
78700+ sd_free_ctl_entry(sd_ctl_dir[0].child);
78701+ pax_open_kernel();
78702+ sd_ctl_dir[0].child = NULL;
78703+ pax_close_kernel();
78704+ }
78705 }
78706 #else
78707 static void register_sched_domain_sysctl(void)
78708@@ -5215,7 +5228,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
78709 * happens before everything else. This has to be lower priority than
78710 * the notifier in the perf_event subsystem, though.
78711 */
78712-static struct notifier_block __cpuinitdata migration_notifier = {
78713+static struct notifier_block migration_notifier = {
78714 .notifier_call = migration_call,
78715 .priority = CPU_PRI_MIGRATION,
78716 };
78717diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
78718index 7a33e59..2f7730c 100644
78719--- a/kernel/sched/fair.c
78720+++ b/kernel/sched/fair.c
78721@@ -830,7 +830,7 @@ void task_numa_fault(int node, int pages, bool migrated)
78722
78723 static void reset_ptenuma_scan(struct task_struct *p)
78724 {
78725- ACCESS_ONCE(p->mm->numa_scan_seq)++;
78726+ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
78727 p->mm->numa_scan_offset = 0;
78728 }
78729
78730@@ -5654,7 +5654,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
78731 * run_rebalance_domains is triggered when needed from the scheduler tick.
78732 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
78733 */
78734-static void run_rebalance_domains(struct softirq_action *h)
78735+static void run_rebalance_domains(void)
78736 {
78737 int this_cpu = smp_processor_id();
78738 struct rq *this_rq = cpu_rq(this_cpu);
78739diff --git a/kernel/signal.c b/kernel/signal.c
78740index 598dc06..471310a 100644
78741--- a/kernel/signal.c
78742+++ b/kernel/signal.c
78743@@ -50,12 +50,12 @@ static struct kmem_cache *sigqueue_cachep;
78744
78745 int print_fatal_signals __read_mostly;
78746
78747-static void __user *sig_handler(struct task_struct *t, int sig)
78748+static __sighandler_t sig_handler(struct task_struct *t, int sig)
78749 {
78750 return t->sighand->action[sig - 1].sa.sa_handler;
78751 }
78752
78753-static int sig_handler_ignored(void __user *handler, int sig)
78754+static int sig_handler_ignored(__sighandler_t handler, int sig)
78755 {
78756 /* Is it explicitly or implicitly ignored? */
78757 return handler == SIG_IGN ||
78758@@ -64,7 +64,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
78759
78760 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
78761 {
78762- void __user *handler;
78763+ __sighandler_t handler;
78764
78765 handler = sig_handler(t, sig);
78766
78767@@ -368,6 +368,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
78768 atomic_inc(&user->sigpending);
78769 rcu_read_unlock();
78770
78771+ if (!override_rlimit)
78772+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
78773+
78774 if (override_rlimit ||
78775 atomic_read(&user->sigpending) <=
78776 task_rlimit(t, RLIMIT_SIGPENDING)) {
78777@@ -495,7 +498,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
78778
78779 int unhandled_signal(struct task_struct *tsk, int sig)
78780 {
78781- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
78782+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
78783 if (is_global_init(tsk))
78784 return 1;
78785 if (handler != SIG_IGN && handler != SIG_DFL)
78786@@ -815,6 +818,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
78787 }
78788 }
78789
78790+ /* allow glibc communication via tgkill to other threads in our
78791+ thread group */
78792+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
78793+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
78794+ && gr_handle_signal(t, sig))
78795+ return -EPERM;
78796+
78797 return security_task_kill(t, info, sig, 0);
78798 }
78799
78800@@ -1197,7 +1207,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
78801 return send_signal(sig, info, p, 1);
78802 }
78803
78804-static int
78805+int
78806 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
78807 {
78808 return send_signal(sig, info, t, 0);
78809@@ -1234,6 +1244,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
78810 unsigned long int flags;
78811 int ret, blocked, ignored;
78812 struct k_sigaction *action;
78813+ int is_unhandled = 0;
78814
78815 spin_lock_irqsave(&t->sighand->siglock, flags);
78816 action = &t->sighand->action[sig-1];
78817@@ -1248,9 +1259,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
78818 }
78819 if (action->sa.sa_handler == SIG_DFL)
78820 t->signal->flags &= ~SIGNAL_UNKILLABLE;
78821+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
78822+ is_unhandled = 1;
78823 ret = specific_send_sig_info(sig, info, t);
78824 spin_unlock_irqrestore(&t->sighand->siglock, flags);
78825
78826+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
78827+ normal operation */
78828+ if (is_unhandled) {
78829+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
78830+ gr_handle_crash(t, sig);
78831+ }
78832+
78833 return ret;
78834 }
78835
78836@@ -1317,8 +1337,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
78837 ret = check_kill_permission(sig, info, p);
78838 rcu_read_unlock();
78839
78840- if (!ret && sig)
78841+ if (!ret && sig) {
78842 ret = do_send_sig_info(sig, info, p, true);
78843+ if (!ret)
78844+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
78845+ }
78846
78847 return ret;
78848 }
78849@@ -2923,7 +2946,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
78850 int error = -ESRCH;
78851
78852 rcu_read_lock();
78853- p = find_task_by_vpid(pid);
78854+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
78855+ /* allow glibc communication via tgkill to other threads in our
78856+ thread group */
78857+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
78858+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
78859+ p = find_task_by_vpid_unrestricted(pid);
78860+ else
78861+#endif
78862+ p = find_task_by_vpid(pid);
78863 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
78864 error = check_kill_permission(sig, info, p);
78865 /*
78866@@ -3237,8 +3268,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
78867 }
78868 seg = get_fs();
78869 set_fs(KERNEL_DS);
78870- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
78871- (stack_t __force __user *) &uoss,
78872+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
78873+ (stack_t __force_user *) &uoss,
78874 compat_user_stack_pointer());
78875 set_fs(seg);
78876 if (ret >= 0 && uoss_ptr) {
78877diff --git a/kernel/smp.c b/kernel/smp.c
78878index 8e451f3..8322029 100644
78879--- a/kernel/smp.c
78880+++ b/kernel/smp.c
78881@@ -73,7 +73,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
78882 return NOTIFY_OK;
78883 }
78884
78885-static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
78886+static struct notifier_block hotplug_cfd_notifier = {
78887 .notifier_call = hotplug_cfd,
78888 };
78889
78890diff --git a/kernel/smpboot.c b/kernel/smpboot.c
78891index 02fc5c9..e54c335 100644
78892--- a/kernel/smpboot.c
78893+++ b/kernel/smpboot.c
78894@@ -288,7 +288,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
78895 }
78896 smpboot_unpark_thread(plug_thread, cpu);
78897 }
78898- list_add(&plug_thread->list, &hotplug_threads);
78899+ pax_list_add(&plug_thread->list, &hotplug_threads);
78900 out:
78901 mutex_unlock(&smpboot_threads_lock);
78902 return ret;
78903@@ -305,7 +305,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
78904 {
78905 get_online_cpus();
78906 mutex_lock(&smpboot_threads_lock);
78907- list_del(&plug_thread->list);
78908+ pax_list_del(&plug_thread->list);
78909 smpboot_destroy_threads(plug_thread);
78910 mutex_unlock(&smpboot_threads_lock);
78911 put_online_cpus();
78912diff --git a/kernel/softirq.c b/kernel/softirq.c
78913index d93dcb1..1cd8a71 100644
78914--- a/kernel/softirq.c
78915+++ b/kernel/softirq.c
78916@@ -53,11 +53,11 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
78917 EXPORT_SYMBOL(irq_stat);
78918 #endif
78919
78920-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
78921+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
78922
78923 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
78924
78925-char *softirq_to_name[NR_SOFTIRQS] = {
78926+const char * const softirq_to_name[NR_SOFTIRQS] = {
78927 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
78928 "TASKLET", "SCHED", "HRTIMER", "RCU"
78929 };
78930@@ -250,7 +250,7 @@ restart:
78931 kstat_incr_softirqs_this_cpu(vec_nr);
78932
78933 trace_softirq_entry(vec_nr);
78934- h->action(h);
78935+ h->action();
78936 trace_softirq_exit(vec_nr);
78937 if (unlikely(prev_count != preempt_count())) {
78938 printk(KERN_ERR "huh, entered softirq %u %s %p"
78939@@ -396,7 +396,7 @@ void __raise_softirq_irqoff(unsigned int nr)
78940 or_softirq_pending(1UL << nr);
78941 }
78942
78943-void open_softirq(int nr, void (*action)(struct softirq_action *))
78944+void __init open_softirq(int nr, void (*action)(void))
78945 {
78946 softirq_vec[nr].action = action;
78947 }
78948@@ -452,7 +452,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
78949
78950 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
78951
78952-static void tasklet_action(struct softirq_action *a)
78953+static void tasklet_action(void)
78954 {
78955 struct tasklet_struct *list;
78956
78957@@ -487,7 +487,7 @@ static void tasklet_action(struct softirq_action *a)
78958 }
78959 }
78960
78961-static void tasklet_hi_action(struct softirq_action *a)
78962+static void tasklet_hi_action(void)
78963 {
78964 struct tasklet_struct *list;
78965
78966@@ -723,7 +723,7 @@ static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
78967 return NOTIFY_OK;
78968 }
78969
78970-static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
78971+static struct notifier_block remote_softirq_cpu_notifier = {
78972 .notifier_call = remote_softirq_cpu_notify,
78973 };
78974
78975@@ -840,11 +840,11 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
78976 return NOTIFY_OK;
78977 }
78978
78979-static struct notifier_block __cpuinitdata cpu_nfb = {
78980+static struct notifier_block cpu_nfb = {
78981 .notifier_call = cpu_callback
78982 };
78983
78984-static struct smp_hotplug_thread softirq_threads = {
78985+static struct smp_hotplug_thread softirq_threads __read_only = {
78986 .store = &ksoftirqd,
78987 .thread_should_run = ksoftirqd_should_run,
78988 .thread_fn = run_ksoftirqd,
78989diff --git a/kernel/srcu.c b/kernel/srcu.c
78990index 01d5ccb..cdcbee6 100644
78991--- a/kernel/srcu.c
78992+++ b/kernel/srcu.c
78993@@ -300,9 +300,9 @@ int __srcu_read_lock(struct srcu_struct *sp)
78994
78995 idx = ACCESS_ONCE(sp->completed) & 0x1;
78996 preempt_disable();
78997- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
78998+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
78999 smp_mb(); /* B */ /* Avoid leaking the critical section. */
79000- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
79001+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
79002 preempt_enable();
79003 return idx;
79004 }
79005diff --git a/kernel/sys.c b/kernel/sys.c
79006index e5f0aca..8d58b1f 100644
79007--- a/kernel/sys.c
79008+++ b/kernel/sys.c
79009@@ -158,6 +158,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
79010 error = -EACCES;
79011 goto out;
79012 }
79013+
79014+ if (gr_handle_chroot_setpriority(p, niceval)) {
79015+ error = -EACCES;
79016+ goto out;
79017+ }
79018+
79019 no_nice = security_task_setnice(p, niceval);
79020 if (no_nice) {
79021 error = no_nice;
79022@@ -621,6 +627,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
79023 goto error;
79024 }
79025
79026+ if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
79027+ goto error;
79028+
79029 if (rgid != (gid_t) -1 ||
79030 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
79031 new->sgid = new->egid;
79032@@ -656,6 +665,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
79033 old = current_cred();
79034
79035 retval = -EPERM;
79036+
79037+ if (gr_check_group_change(kgid, kgid, kgid))
79038+ goto error;
79039+
79040 if (nsown_capable(CAP_SETGID))
79041 new->gid = new->egid = new->sgid = new->fsgid = kgid;
79042 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
79043@@ -673,7 +686,7 @@ error:
79044 /*
79045 * change the user struct in a credentials set to match the new UID
79046 */
79047-static int set_user(struct cred *new)
79048+int set_user(struct cred *new)
79049 {
79050 struct user_struct *new_user;
79051
79052@@ -753,6 +766,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
79053 goto error;
79054 }
79055
79056+ if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
79057+ goto error;
79058+
79059 if (!uid_eq(new->uid, old->uid)) {
79060 retval = set_user(new);
79061 if (retval < 0)
79062@@ -803,6 +819,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
79063 old = current_cred();
79064
79065 retval = -EPERM;
79066+
79067+ if (gr_check_crash_uid(kuid))
79068+ goto error;
79069+ if (gr_check_user_change(kuid, kuid, kuid))
79070+ goto error;
79071+
79072 if (nsown_capable(CAP_SETUID)) {
79073 new->suid = new->uid = kuid;
79074 if (!uid_eq(kuid, old->uid)) {
79075@@ -872,6 +894,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
79076 goto error;
79077 }
79078
79079+ if (gr_check_user_change(kruid, keuid, INVALID_UID))
79080+ goto error;
79081+
79082 if (ruid != (uid_t) -1) {
79083 new->uid = kruid;
79084 if (!uid_eq(kruid, old->uid)) {
79085@@ -954,6 +979,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
79086 goto error;
79087 }
79088
79089+ if (gr_check_group_change(krgid, kegid, INVALID_GID))
79090+ goto error;
79091+
79092 if (rgid != (gid_t) -1)
79093 new->gid = krgid;
79094 if (egid != (gid_t) -1)
79095@@ -1015,12 +1043,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
79096 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
79097 nsown_capable(CAP_SETUID)) {
79098 if (!uid_eq(kuid, old->fsuid)) {
79099+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
79100+ goto error;
79101+
79102 new->fsuid = kuid;
79103 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
79104 goto change_okay;
79105 }
79106 }
79107
79108+error:
79109 abort_creds(new);
79110 return old_fsuid;
79111
79112@@ -1053,12 +1085,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
79113 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
79114 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
79115 nsown_capable(CAP_SETGID)) {
79116+ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
79117+ goto error;
79118+
79119 if (!gid_eq(kgid, old->fsgid)) {
79120 new->fsgid = kgid;
79121 goto change_okay;
79122 }
79123 }
79124
79125+error:
79126 abort_creds(new);
79127 return old_fsgid;
79128
79129@@ -1366,19 +1402,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
79130 return -EFAULT;
79131
79132 down_read(&uts_sem);
79133- error = __copy_to_user(&name->sysname, &utsname()->sysname,
79134+ error = __copy_to_user(name->sysname, &utsname()->sysname,
79135 __OLD_UTS_LEN);
79136 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
79137- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
79138+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
79139 __OLD_UTS_LEN);
79140 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
79141- error |= __copy_to_user(&name->release, &utsname()->release,
79142+ error |= __copy_to_user(name->release, &utsname()->release,
79143 __OLD_UTS_LEN);
79144 error |= __put_user(0, name->release + __OLD_UTS_LEN);
79145- error |= __copy_to_user(&name->version, &utsname()->version,
79146+ error |= __copy_to_user(name->version, &utsname()->version,
79147 __OLD_UTS_LEN);
79148 error |= __put_user(0, name->version + __OLD_UTS_LEN);
79149- error |= __copy_to_user(&name->machine, &utsname()->machine,
79150+ error |= __copy_to_user(name->machine, &utsname()->machine,
79151 __OLD_UTS_LEN);
79152 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
79153 up_read(&uts_sem);
79154@@ -1580,6 +1616,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
79155 */
79156 new_rlim->rlim_cur = 1;
79157 }
79158+ /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
79159+ is changed to a lower value. Since tasks can be created by the same
79160+ user in between this limit change and an execve by this task, force
79161+ a recheck only for this task by setting PF_NPROC_EXCEEDED
79162+ */
79163+ if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
79164+ tsk->flags |= PF_NPROC_EXCEEDED;
79165 }
79166 if (!retval) {
79167 if (old_rlim)
79168diff --git a/kernel/sysctl.c b/kernel/sysctl.c
79169index afc1dc6..f6cf355 100644
79170--- a/kernel/sysctl.c
79171+++ b/kernel/sysctl.c
79172@@ -93,7 +93,6 @@
79173
79174
79175 #if defined(CONFIG_SYSCTL)
79176-
79177 /* External variables not in a header file. */
79178 extern int sysctl_overcommit_memory;
79179 extern int sysctl_overcommit_ratio;
79180@@ -120,18 +119,18 @@ extern int blk_iopoll_enabled;
79181
79182 /* Constants used for minimum and maximum */
79183 #ifdef CONFIG_LOCKUP_DETECTOR
79184-static int sixty = 60;
79185-static int neg_one = -1;
79186+static int sixty __read_only = 60;
79187 #endif
79188
79189-static int zero;
79190-static int __maybe_unused one = 1;
79191-static int __maybe_unused two = 2;
79192-static int __maybe_unused three = 3;
79193-static unsigned long one_ul = 1;
79194-static int one_hundred = 100;
79195+static int neg_one __read_only = -1;
79196+static int zero __read_only = 0;
79197+static int __maybe_unused one __read_only = 1;
79198+static int __maybe_unused two __read_only = 2;
79199+static int __maybe_unused three __read_only = 3;
79200+static unsigned long one_ul __read_only = 1;
79201+static int one_hundred __read_only = 100;
79202 #ifdef CONFIG_PRINTK
79203-static int ten_thousand = 10000;
79204+static int ten_thousand __read_only = 10000;
79205 #endif
79206
79207 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
79208@@ -178,10 +177,8 @@ static int proc_taint(struct ctl_table *table, int write,
79209 void __user *buffer, size_t *lenp, loff_t *ppos);
79210 #endif
79211
79212-#ifdef CONFIG_PRINTK
79213 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
79214 void __user *buffer, size_t *lenp, loff_t *ppos);
79215-#endif
79216
79217 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
79218 void __user *buffer, size_t *lenp, loff_t *ppos);
79219@@ -212,6 +209,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
79220
79221 #endif
79222
79223+extern struct ctl_table grsecurity_table[];
79224+
79225 static struct ctl_table kern_table[];
79226 static struct ctl_table vm_table[];
79227 static struct ctl_table fs_table[];
79228@@ -226,6 +225,20 @@ extern struct ctl_table epoll_table[];
79229 int sysctl_legacy_va_layout;
79230 #endif
79231
79232+#ifdef CONFIG_PAX_SOFTMODE
79233+static ctl_table pax_table[] = {
79234+ {
79235+ .procname = "softmode",
79236+ .data = &pax_softmode,
79237+ .maxlen = sizeof(unsigned int),
79238+ .mode = 0600,
79239+ .proc_handler = &proc_dointvec,
79240+ },
79241+
79242+ { }
79243+};
79244+#endif
79245+
79246 /* The default sysctl tables: */
79247
79248 static struct ctl_table sysctl_base_table[] = {
79249@@ -274,6 +287,22 @@ static int max_extfrag_threshold = 1000;
79250 #endif
79251
79252 static struct ctl_table kern_table[] = {
79253+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
79254+ {
79255+ .procname = "grsecurity",
79256+ .mode = 0500,
79257+ .child = grsecurity_table,
79258+ },
79259+#endif
79260+
79261+#ifdef CONFIG_PAX_SOFTMODE
79262+ {
79263+ .procname = "pax",
79264+ .mode = 0500,
79265+ .child = pax_table,
79266+ },
79267+#endif
79268+
79269 {
79270 .procname = "sched_child_runs_first",
79271 .data = &sysctl_sched_child_runs_first,
79272@@ -608,7 +637,7 @@ static struct ctl_table kern_table[] = {
79273 .data = &modprobe_path,
79274 .maxlen = KMOD_PATH_LEN,
79275 .mode = 0644,
79276- .proc_handler = proc_dostring,
79277+ .proc_handler = proc_dostring_modpriv,
79278 },
79279 {
79280 .procname = "modules_disabled",
79281@@ -775,16 +804,20 @@ static struct ctl_table kern_table[] = {
79282 .extra1 = &zero,
79283 .extra2 = &one,
79284 },
79285+#endif
79286 {
79287 .procname = "kptr_restrict",
79288 .data = &kptr_restrict,
79289 .maxlen = sizeof(int),
79290 .mode = 0644,
79291 .proc_handler = proc_dointvec_minmax_sysadmin,
79292+#ifdef CONFIG_GRKERNSEC_HIDESYM
79293+ .extra1 = &two,
79294+#else
79295 .extra1 = &zero,
79296+#endif
79297 .extra2 = &two,
79298 },
79299-#endif
79300 {
79301 .procname = "ngroups_max",
79302 .data = &ngroups_max,
79303@@ -1026,10 +1059,17 @@ static struct ctl_table kern_table[] = {
79304 */
79305 {
79306 .procname = "perf_event_paranoid",
79307- .data = &sysctl_perf_event_paranoid,
79308- .maxlen = sizeof(sysctl_perf_event_paranoid),
79309+ .data = &sysctl_perf_event_legitimately_concerned,
79310+ .maxlen = sizeof(sysctl_perf_event_legitimately_concerned),
79311 .mode = 0644,
79312- .proc_handler = proc_dointvec,
79313+ /* go ahead, be a hero */
79314+ .proc_handler = proc_dointvec_minmax_sysadmin,
79315+ .extra1 = &neg_one,
79316+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
79317+ .extra2 = &three,
79318+#else
79319+ .extra2 = &two,
79320+#endif
79321 },
79322 {
79323 .procname = "perf_event_mlock_kb",
79324@@ -1283,6 +1323,13 @@ static struct ctl_table vm_table[] = {
79325 .proc_handler = proc_dointvec_minmax,
79326 .extra1 = &zero,
79327 },
79328+ {
79329+ .procname = "heap_stack_gap",
79330+ .data = &sysctl_heap_stack_gap,
79331+ .maxlen = sizeof(sysctl_heap_stack_gap),
79332+ .mode = 0644,
79333+ .proc_handler = proc_doulongvec_minmax,
79334+ },
79335 #else
79336 {
79337 .procname = "nr_trim_pages",
79338@@ -1733,6 +1780,16 @@ int proc_dostring(struct ctl_table *table, int write,
79339 buffer, lenp, ppos);
79340 }
79341
79342+int proc_dostring_modpriv(struct ctl_table *table, int write,
79343+ void __user *buffer, size_t *lenp, loff_t *ppos)
79344+{
79345+ if (write && !capable(CAP_SYS_MODULE))
79346+ return -EPERM;
79347+
79348+ return _proc_do_string(table->data, table->maxlen, write,
79349+ buffer, lenp, ppos);
79350+}
79351+
79352 static size_t proc_skip_spaces(char **buf)
79353 {
79354 size_t ret;
79355@@ -1838,6 +1895,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
79356 len = strlen(tmp);
79357 if (len > *size)
79358 len = *size;
79359+ if (len > sizeof(tmp))
79360+ len = sizeof(tmp);
79361 if (copy_to_user(*buf, tmp, len))
79362 return -EFAULT;
79363 *size -= len;
79364@@ -2002,7 +2061,7 @@ int proc_dointvec(struct ctl_table *table, int write,
79365 static int proc_taint(struct ctl_table *table, int write,
79366 void __user *buffer, size_t *lenp, loff_t *ppos)
79367 {
79368- struct ctl_table t;
79369+ ctl_table_no_const t;
79370 unsigned long tmptaint = get_taint();
79371 int err;
79372
79373@@ -2030,7 +2089,6 @@ static int proc_taint(struct ctl_table *table, int write,
79374 return err;
79375 }
79376
79377-#ifdef CONFIG_PRINTK
79378 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
79379 void __user *buffer, size_t *lenp, loff_t *ppos)
79380 {
79381@@ -2039,7 +2097,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
79382
79383 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
79384 }
79385-#endif
79386
79387 struct do_proc_dointvec_minmax_conv_param {
79388 int *min;
79389@@ -2186,8 +2243,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
79390 *i = val;
79391 } else {
79392 val = convdiv * (*i) / convmul;
79393- if (!first)
79394+ if (!first) {
79395 err = proc_put_char(&buffer, &left, '\t');
79396+ if (err)
79397+ break;
79398+ }
79399 err = proc_put_long(&buffer, &left, val, false);
79400 if (err)
79401 break;
79402@@ -2579,6 +2639,12 @@ int proc_dostring(struct ctl_table *table, int write,
79403 return -ENOSYS;
79404 }
79405
79406+int proc_dostring_modpriv(struct ctl_table *table, int write,
79407+ void __user *buffer, size_t *lenp, loff_t *ppos)
79408+{
79409+ return -ENOSYS;
79410+}
79411+
79412 int proc_dointvec(struct ctl_table *table, int write,
79413 void __user *buffer, size_t *lenp, loff_t *ppos)
79414 {
79415@@ -2635,5 +2701,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
79416 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
79417 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
79418 EXPORT_SYMBOL(proc_dostring);
79419+EXPORT_SYMBOL(proc_dostring_modpriv);
79420 EXPORT_SYMBOL(proc_doulongvec_minmax);
79421 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
79422diff --git a/kernel/taskstats.c b/kernel/taskstats.c
79423index 145bb4d..b2aa969 100644
79424--- a/kernel/taskstats.c
79425+++ b/kernel/taskstats.c
79426@@ -28,9 +28,12 @@
79427 #include <linux/fs.h>
79428 #include <linux/file.h>
79429 #include <linux/pid_namespace.h>
79430+#include <linux/grsecurity.h>
79431 #include <net/genetlink.h>
79432 #include <linux/atomic.h>
79433
79434+extern int gr_is_taskstats_denied(int pid);
79435+
79436 /*
79437 * Maximum length of a cpumask that can be specified in
79438 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
79439@@ -570,6 +573,9 @@ err:
79440
79441 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
79442 {
79443+ if (gr_is_taskstats_denied(current->pid))
79444+ return -EACCES;
79445+
79446 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
79447 return cmd_attr_register_cpumask(info);
79448 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
79449diff --git a/kernel/time.c b/kernel/time.c
79450index f8342a4..288f13b 100644
79451--- a/kernel/time.c
79452+++ b/kernel/time.c
79453@@ -171,6 +171,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
79454 return error;
79455
79456 if (tz) {
79457+ /* we log in do_settimeofday called below, so don't log twice
79458+ */
79459+ if (!tv)
79460+ gr_log_timechange();
79461+
79462 sys_tz = *tz;
79463 update_vsyscall_tz();
79464 if (firsttime) {
79465@@ -501,7 +506,7 @@ EXPORT_SYMBOL(usecs_to_jiffies);
79466 * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
79467 * value to a scaled second value.
79468 */
79469-unsigned long
79470+unsigned long __intentional_overflow(-1)
79471 timespec_to_jiffies(const struct timespec *value)
79472 {
79473 unsigned long sec = value->tv_sec;
79474diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
79475index f11d83b..d016d91 100644
79476--- a/kernel/time/alarmtimer.c
79477+++ b/kernel/time/alarmtimer.c
79478@@ -750,7 +750,7 @@ static int __init alarmtimer_init(void)
79479 struct platform_device *pdev;
79480 int error = 0;
79481 int i;
79482- struct k_clock alarm_clock = {
79483+ static struct k_clock alarm_clock = {
79484 .clock_getres = alarm_clock_getres,
79485 .clock_get = alarm_clock_get,
79486 .timer_create = alarm_timer_create,
79487diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
79488index 90ad470..1814e9a 100644
79489--- a/kernel/time/tick-broadcast.c
79490+++ b/kernel/time/tick-broadcast.c
79491@@ -138,7 +138,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
79492 * then clear the broadcast bit.
79493 */
79494 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
79495- int cpu = smp_processor_id();
79496+ cpu = smp_processor_id();
79497 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
79498 tick_broadcast_clear_oneshot(cpu);
79499 } else {
79500diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
79501index 183df62..59b1442 100644
79502--- a/kernel/time/timekeeping.c
79503+++ b/kernel/time/timekeeping.c
79504@@ -15,6 +15,7 @@
79505 #include <linux/init.h>
79506 #include <linux/mm.h>
79507 #include <linux/sched.h>
79508+#include <linux/grsecurity.h>
79509 #include <linux/syscore_ops.h>
79510 #include <linux/clocksource.h>
79511 #include <linux/jiffies.h>
79512@@ -448,6 +449,8 @@ int do_settimeofday(const struct timespec *tv)
79513 if (!timespec_valid_strict(tv))
79514 return -EINVAL;
79515
79516+ gr_log_timechange();
79517+
79518 write_seqlock_irqsave(&tk->lock, flags);
79519
79520 timekeeping_forward_now(tk);
79521diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
79522index af5a7e9..715611a 100644
79523--- a/kernel/time/timer_list.c
79524+++ b/kernel/time/timer_list.c
79525@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
79526
79527 static void print_name_offset(struct seq_file *m, void *sym)
79528 {
79529+#ifdef CONFIG_GRKERNSEC_HIDESYM
79530+ SEQ_printf(m, "<%p>", NULL);
79531+#else
79532 char symname[KSYM_NAME_LEN];
79533
79534 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
79535 SEQ_printf(m, "<%pK>", sym);
79536 else
79537 SEQ_printf(m, "%s", symname);
79538+#endif
79539 }
79540
79541 static void
79542@@ -112,7 +116,11 @@ next_one:
79543 static void
79544 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
79545 {
79546+#ifdef CONFIG_GRKERNSEC_HIDESYM
79547+ SEQ_printf(m, " .base: %p\n", NULL);
79548+#else
79549 SEQ_printf(m, " .base: %pK\n", base);
79550+#endif
79551 SEQ_printf(m, " .index: %d\n",
79552 base->index);
79553 SEQ_printf(m, " .resolution: %Lu nsecs\n",
79554@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
79555 {
79556 struct proc_dir_entry *pe;
79557
79558+#ifdef CONFIG_GRKERNSEC_PROC_ADD
79559+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
79560+#else
79561 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
79562+#endif
79563 if (!pe)
79564 return -ENOMEM;
79565 return 0;
79566diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
79567index 0b537f2..40d6c20 100644
79568--- a/kernel/time/timer_stats.c
79569+++ b/kernel/time/timer_stats.c
79570@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
79571 static unsigned long nr_entries;
79572 static struct entry entries[MAX_ENTRIES];
79573
79574-static atomic_t overflow_count;
79575+static atomic_unchecked_t overflow_count;
79576
79577 /*
79578 * The entries are in a hash-table, for fast lookup:
79579@@ -140,7 +140,7 @@ static void reset_entries(void)
79580 nr_entries = 0;
79581 memset(entries, 0, sizeof(entries));
79582 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
79583- atomic_set(&overflow_count, 0);
79584+ atomic_set_unchecked(&overflow_count, 0);
79585 }
79586
79587 static struct entry *alloc_entry(void)
79588@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
79589 if (likely(entry))
79590 entry->count++;
79591 else
79592- atomic_inc(&overflow_count);
79593+ atomic_inc_unchecked(&overflow_count);
79594
79595 out_unlock:
79596 raw_spin_unlock_irqrestore(lock, flags);
79597@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
79598
79599 static void print_name_offset(struct seq_file *m, unsigned long addr)
79600 {
79601+#ifdef CONFIG_GRKERNSEC_HIDESYM
79602+ seq_printf(m, "<%p>", NULL);
79603+#else
79604 char symname[KSYM_NAME_LEN];
79605
79606 if (lookup_symbol_name(addr, symname) < 0)
79607- seq_printf(m, "<%p>", (void *)addr);
79608+ seq_printf(m, "<%pK>", (void *)addr);
79609 else
79610 seq_printf(m, "%s", symname);
79611+#endif
79612 }
79613
79614 static int tstats_show(struct seq_file *m, void *v)
79615@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
79616
79617 seq_puts(m, "Timer Stats Version: v0.2\n");
79618 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
79619- if (atomic_read(&overflow_count))
79620+ if (atomic_read_unchecked(&overflow_count))
79621 seq_printf(m, "Overflow: %d entries\n",
79622- atomic_read(&overflow_count));
79623+ atomic_read_unchecked(&overflow_count));
79624
79625 for (i = 0; i < nr_entries; i++) {
79626 entry = entries + i;
79627@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
79628 {
79629 struct proc_dir_entry *pe;
79630
79631+#ifdef CONFIG_GRKERNSEC_PROC_ADD
79632+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
79633+#else
79634 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
79635+#endif
79636 if (!pe)
79637 return -ENOMEM;
79638 return 0;
79639diff --git a/kernel/timer.c b/kernel/timer.c
79640index 1b399c8..90e1849 100644
79641--- a/kernel/timer.c
79642+++ b/kernel/timer.c
79643@@ -1363,7 +1363,7 @@ void update_process_times(int user_tick)
79644 /*
79645 * This function runs timers and the timer-tq in bottom half context.
79646 */
79647-static void run_timer_softirq(struct softirq_action *h)
79648+static void run_timer_softirq(void)
79649 {
79650 struct tvec_base *base = __this_cpu_read(tvec_bases);
79651
79652@@ -1481,7 +1481,7 @@ static void process_timeout(unsigned long __data)
79653 *
79654 * In all cases the return value is guaranteed to be non-negative.
79655 */
79656-signed long __sched schedule_timeout(signed long timeout)
79657+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
79658 {
79659 struct timer_list timer;
79660 unsigned long expire;
79661@@ -1772,7 +1772,7 @@ static int __cpuinit timer_cpu_notify(struct notifier_block *self,
79662 return NOTIFY_OK;
79663 }
79664
79665-static struct notifier_block __cpuinitdata timers_nb = {
79666+static struct notifier_block timers_nb = {
79667 .notifier_call = timer_cpu_notify,
79668 };
79669
79670diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
79671index 5a0f781..1497f95 100644
79672--- a/kernel/trace/blktrace.c
79673+++ b/kernel/trace/blktrace.c
79674@@ -317,7 +317,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
79675 struct blk_trace *bt = filp->private_data;
79676 char buf[16];
79677
79678- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
79679+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
79680
79681 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
79682 }
79683@@ -375,7 +375,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
79684 return 1;
79685
79686 bt = buf->chan->private_data;
79687- atomic_inc(&bt->dropped);
79688+ atomic_inc_unchecked(&bt->dropped);
79689 return 0;
79690 }
79691
79692@@ -476,7 +476,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
79693
79694 bt->dir = dir;
79695 bt->dev = dev;
79696- atomic_set(&bt->dropped, 0);
79697+ atomic_set_unchecked(&bt->dropped, 0);
79698
79699 ret = -EIO;
79700 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
79701diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
79702index 0a0e2a6..943495e 100644
79703--- a/kernel/trace/ftrace.c
79704+++ b/kernel/trace/ftrace.c
79705@@ -1909,12 +1909,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
79706 if (unlikely(ftrace_disabled))
79707 return 0;
79708
79709+ ret = ftrace_arch_code_modify_prepare();
79710+ FTRACE_WARN_ON(ret);
79711+ if (ret)
79712+ return 0;
79713+
79714 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
79715+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
79716 if (ret) {
79717 ftrace_bug(ret, ip);
79718- return 0;
79719 }
79720- return 1;
79721+ return ret ? 0 : 1;
79722 }
79723
79724 /*
79725@@ -2986,7 +2991,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
79726
79727 int
79728 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
79729- void *data)
79730+ void *data)
79731 {
79732 struct ftrace_func_probe *entry;
79733 struct ftrace_page *pg;
79734@@ -3854,8 +3859,10 @@ static int ftrace_process_locs(struct module *mod,
79735 if (!count)
79736 return 0;
79737
79738+ pax_open_kernel();
79739 sort(start, count, sizeof(*start),
79740 ftrace_cmp_ips, ftrace_swap_ips);
79741+ pax_close_kernel();
79742
79743 start_pg = ftrace_allocate_pages(count);
79744 if (!start_pg)
79745@@ -4574,8 +4581,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
79746 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
79747
79748 static int ftrace_graph_active;
79749-static struct notifier_block ftrace_suspend_notifier;
79750-
79751 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
79752 {
79753 return 0;
79754@@ -4719,6 +4724,10 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
79755 return NOTIFY_DONE;
79756 }
79757
79758+static struct notifier_block ftrace_suspend_notifier = {
79759+ .notifier_call = ftrace_suspend_notifier_call
79760+};
79761+
79762 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
79763 trace_func_graph_ent_t entryfunc)
79764 {
79765@@ -4732,7 +4741,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
79766 goto out;
79767 }
79768
79769- ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
79770 register_pm_notifier(&ftrace_suspend_notifier);
79771
79772 ftrace_graph_active++;
79773diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
79774index 6989df2..c2265cf 100644
79775--- a/kernel/trace/ring_buffer.c
79776+++ b/kernel/trace/ring_buffer.c
79777@@ -349,9 +349,9 @@ struct buffer_data_page {
79778 */
79779 struct buffer_page {
79780 struct list_head list; /* list of buffer pages */
79781- local_t write; /* index for next write */
79782+ local_unchecked_t write; /* index for next write */
79783 unsigned read; /* index for next read */
79784- local_t entries; /* entries on this page */
79785+ local_unchecked_t entries; /* entries on this page */
79786 unsigned long real_end; /* real end of data */
79787 struct buffer_data_page *page; /* Actual data page */
79788 };
79789@@ -464,8 +464,8 @@ struct ring_buffer_per_cpu {
79790 unsigned long last_overrun;
79791 local_t entries_bytes;
79792 local_t entries;
79793- local_t overrun;
79794- local_t commit_overrun;
79795+ local_unchecked_t overrun;
79796+ local_unchecked_t commit_overrun;
79797 local_t dropped_events;
79798 local_t committing;
79799 local_t commits;
79800@@ -864,8 +864,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
79801 *
79802 * We add a counter to the write field to denote this.
79803 */
79804- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
79805- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
79806+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
79807+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
79808
79809 /*
79810 * Just make sure we have seen our old_write and synchronize
79811@@ -893,8 +893,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
79812 * cmpxchg to only update if an interrupt did not already
79813 * do it for us. If the cmpxchg fails, we don't care.
79814 */
79815- (void)local_cmpxchg(&next_page->write, old_write, val);
79816- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
79817+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
79818+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
79819
79820 /*
79821 * No need to worry about races with clearing out the commit.
79822@@ -1253,12 +1253,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
79823
79824 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
79825 {
79826- return local_read(&bpage->entries) & RB_WRITE_MASK;
79827+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
79828 }
79829
79830 static inline unsigned long rb_page_write(struct buffer_page *bpage)
79831 {
79832- return local_read(&bpage->write) & RB_WRITE_MASK;
79833+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
79834 }
79835
79836 static int
79837@@ -1353,7 +1353,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
79838 * bytes consumed in ring buffer from here.
79839 * Increment overrun to account for the lost events.
79840 */
79841- local_add(page_entries, &cpu_buffer->overrun);
79842+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
79843 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
79844 }
79845
79846@@ -1909,7 +1909,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
79847 * it is our responsibility to update
79848 * the counters.
79849 */
79850- local_add(entries, &cpu_buffer->overrun);
79851+ local_add_unchecked(entries, &cpu_buffer->overrun);
79852 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
79853
79854 /*
79855@@ -2059,7 +2059,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
79856 if (tail == BUF_PAGE_SIZE)
79857 tail_page->real_end = 0;
79858
79859- local_sub(length, &tail_page->write);
79860+ local_sub_unchecked(length, &tail_page->write);
79861 return;
79862 }
79863
79864@@ -2094,7 +2094,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
79865 rb_event_set_padding(event);
79866
79867 /* Set the write back to the previous setting */
79868- local_sub(length, &tail_page->write);
79869+ local_sub_unchecked(length, &tail_page->write);
79870 return;
79871 }
79872
79873@@ -2106,7 +2106,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
79874
79875 /* Set write to end of buffer */
79876 length = (tail + length) - BUF_PAGE_SIZE;
79877- local_sub(length, &tail_page->write);
79878+ local_sub_unchecked(length, &tail_page->write);
79879 }
79880
79881 /*
79882@@ -2132,7 +2132,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
79883 * about it.
79884 */
79885 if (unlikely(next_page == commit_page)) {
79886- local_inc(&cpu_buffer->commit_overrun);
79887+ local_inc_unchecked(&cpu_buffer->commit_overrun);
79888 goto out_reset;
79889 }
79890
79891@@ -2188,7 +2188,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
79892 cpu_buffer->tail_page) &&
79893 (cpu_buffer->commit_page ==
79894 cpu_buffer->reader_page))) {
79895- local_inc(&cpu_buffer->commit_overrun);
79896+ local_inc_unchecked(&cpu_buffer->commit_overrun);
79897 goto out_reset;
79898 }
79899 }
79900@@ -2236,7 +2236,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
79901 length += RB_LEN_TIME_EXTEND;
79902
79903 tail_page = cpu_buffer->tail_page;
79904- write = local_add_return(length, &tail_page->write);
79905+ write = local_add_return_unchecked(length, &tail_page->write);
79906
79907 /* set write to only the index of the write */
79908 write &= RB_WRITE_MASK;
79909@@ -2253,7 +2253,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
79910 kmemcheck_annotate_bitfield(event, bitfield);
79911 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
79912
79913- local_inc(&tail_page->entries);
79914+ local_inc_unchecked(&tail_page->entries);
79915
79916 /*
79917 * If this is the first commit on the page, then update
79918@@ -2286,7 +2286,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
79919
79920 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
79921 unsigned long write_mask =
79922- local_read(&bpage->write) & ~RB_WRITE_MASK;
79923+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
79924 unsigned long event_length = rb_event_length(event);
79925 /*
79926 * This is on the tail page. It is possible that
79927@@ -2296,7 +2296,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
79928 */
79929 old_index += write_mask;
79930 new_index += write_mask;
79931- index = local_cmpxchg(&bpage->write, old_index, new_index);
79932+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
79933 if (index == old_index) {
79934 /* update counters */
79935 local_sub(event_length, &cpu_buffer->entries_bytes);
79936@@ -2670,7 +2670,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
79937
79938 /* Do the likely case first */
79939 if (likely(bpage->page == (void *)addr)) {
79940- local_dec(&bpage->entries);
79941+ local_dec_unchecked(&bpage->entries);
79942 return;
79943 }
79944
79945@@ -2682,7 +2682,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
79946 start = bpage;
79947 do {
79948 if (bpage->page == (void *)addr) {
79949- local_dec(&bpage->entries);
79950+ local_dec_unchecked(&bpage->entries);
79951 return;
79952 }
79953 rb_inc_page(cpu_buffer, &bpage);
79954@@ -2964,7 +2964,7 @@ static inline unsigned long
79955 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
79956 {
79957 return local_read(&cpu_buffer->entries) -
79958- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
79959+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
79960 }
79961
79962 /**
79963@@ -3053,7 +3053,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
79964 return 0;
79965
79966 cpu_buffer = buffer->buffers[cpu];
79967- ret = local_read(&cpu_buffer->overrun);
79968+ ret = local_read_unchecked(&cpu_buffer->overrun);
79969
79970 return ret;
79971 }
79972@@ -3076,7 +3076,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
79973 return 0;
79974
79975 cpu_buffer = buffer->buffers[cpu];
79976- ret = local_read(&cpu_buffer->commit_overrun);
79977+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
79978
79979 return ret;
79980 }
79981@@ -3161,7 +3161,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
79982 /* if you care about this being correct, lock the buffer */
79983 for_each_buffer_cpu(buffer, cpu) {
79984 cpu_buffer = buffer->buffers[cpu];
79985- overruns += local_read(&cpu_buffer->overrun);
79986+ overruns += local_read_unchecked(&cpu_buffer->overrun);
79987 }
79988
79989 return overruns;
79990@@ -3337,8 +3337,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
79991 /*
79992 * Reset the reader page to size zero.
79993 */
79994- local_set(&cpu_buffer->reader_page->write, 0);
79995- local_set(&cpu_buffer->reader_page->entries, 0);
79996+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
79997+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
79998 local_set(&cpu_buffer->reader_page->page->commit, 0);
79999 cpu_buffer->reader_page->real_end = 0;
80000
80001@@ -3372,7 +3372,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
80002 * want to compare with the last_overrun.
80003 */
80004 smp_mb();
80005- overwrite = local_read(&(cpu_buffer->overrun));
80006+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
80007
80008 /*
80009 * Here's the tricky part.
80010@@ -3942,8 +3942,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
80011
80012 cpu_buffer->head_page
80013 = list_entry(cpu_buffer->pages, struct buffer_page, list);
80014- local_set(&cpu_buffer->head_page->write, 0);
80015- local_set(&cpu_buffer->head_page->entries, 0);
80016+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
80017+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
80018 local_set(&cpu_buffer->head_page->page->commit, 0);
80019
80020 cpu_buffer->head_page->read = 0;
80021@@ -3953,14 +3953,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
80022
80023 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
80024 INIT_LIST_HEAD(&cpu_buffer->new_pages);
80025- local_set(&cpu_buffer->reader_page->write, 0);
80026- local_set(&cpu_buffer->reader_page->entries, 0);
80027+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
80028+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
80029 local_set(&cpu_buffer->reader_page->page->commit, 0);
80030 cpu_buffer->reader_page->read = 0;
80031
80032 local_set(&cpu_buffer->entries_bytes, 0);
80033- local_set(&cpu_buffer->overrun, 0);
80034- local_set(&cpu_buffer->commit_overrun, 0);
80035+ local_set_unchecked(&cpu_buffer->overrun, 0);
80036+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
80037 local_set(&cpu_buffer->dropped_events, 0);
80038 local_set(&cpu_buffer->entries, 0);
80039 local_set(&cpu_buffer->committing, 0);
80040@@ -4364,8 +4364,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
80041 rb_init_page(bpage);
80042 bpage = reader->page;
80043 reader->page = *data_page;
80044- local_set(&reader->write, 0);
80045- local_set(&reader->entries, 0);
80046+ local_set_unchecked(&reader->write, 0);
80047+ local_set_unchecked(&reader->entries, 0);
80048 reader->read = 0;
80049 *data_page = bpage;
80050
80051diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
80052index 3f28192..a29e8b0 100644
80053--- a/kernel/trace/trace.c
80054+++ b/kernel/trace/trace.c
80055@@ -2893,7 +2893,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
80056 return 0;
80057 }
80058
80059-int set_tracer_flag(unsigned int mask, int enabled)
80060+int set_tracer_flag(unsigned long mask, int enabled)
80061 {
80062 /* do nothing if flag is already set */
80063 if (!!(trace_flags & mask) == !!enabled)
80064@@ -4637,10 +4637,9 @@ static const struct file_operations tracing_dyn_info_fops = {
80065 };
80066 #endif
80067
80068-static struct dentry *d_tracer;
80069-
80070 struct dentry *tracing_init_dentry(void)
80071 {
80072+ static struct dentry *d_tracer;
80073 static int once;
80074
80075 if (d_tracer)
80076@@ -4660,10 +4659,9 @@ struct dentry *tracing_init_dentry(void)
80077 return d_tracer;
80078 }
80079
80080-static struct dentry *d_percpu;
80081-
80082 static struct dentry *tracing_dentry_percpu(void)
80083 {
80084+ static struct dentry *d_percpu;
80085 static int once;
80086 struct dentry *d_tracer;
80087
80088diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
80089index 2081971..09f861e 100644
80090--- a/kernel/trace/trace.h
80091+++ b/kernel/trace/trace.h
80092@@ -948,7 +948,7 @@ extern const char *__stop___trace_bprintk_fmt[];
80093 void trace_printk_init_buffers(void);
80094 void trace_printk_start_comm(void);
80095 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
80096-int set_tracer_flag(unsigned int mask, int enabled);
80097+int set_tracer_flag(unsigned long mask, int enabled);
80098
80099 #undef FTRACE_ENTRY
80100 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
80101diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
80102index 57e9b28..eebe41c 100644
80103--- a/kernel/trace/trace_events.c
80104+++ b/kernel/trace/trace_events.c
80105@@ -1329,10 +1329,6 @@ static LIST_HEAD(ftrace_module_file_list);
80106 struct ftrace_module_file_ops {
80107 struct list_head list;
80108 struct module *mod;
80109- struct file_operations id;
80110- struct file_operations enable;
80111- struct file_operations format;
80112- struct file_operations filter;
80113 };
80114
80115 static struct ftrace_module_file_ops *
80116@@ -1353,17 +1349,12 @@ trace_create_file_ops(struct module *mod)
80117
80118 file_ops->mod = mod;
80119
80120- file_ops->id = ftrace_event_id_fops;
80121- file_ops->id.owner = mod;
80122-
80123- file_ops->enable = ftrace_enable_fops;
80124- file_ops->enable.owner = mod;
80125-
80126- file_ops->filter = ftrace_event_filter_fops;
80127- file_ops->filter.owner = mod;
80128-
80129- file_ops->format = ftrace_event_format_fops;
80130- file_ops->format.owner = mod;
80131+ pax_open_kernel();
80132+ mod->trace_id.owner = mod;
80133+ mod->trace_enable.owner = mod;
80134+ mod->trace_filter.owner = mod;
80135+ mod->trace_format.owner = mod;
80136+ pax_close_kernel();
80137
80138 list_add(&file_ops->list, &ftrace_module_file_list);
80139
80140@@ -1387,8 +1378,8 @@ static void trace_module_add_events(struct module *mod)
80141
80142 for_each_event(call, start, end) {
80143 __trace_add_event_call(*call, mod,
80144- &file_ops->id, &file_ops->enable,
80145- &file_ops->filter, &file_ops->format);
80146+ &mod->trace_id, &mod->trace_enable,
80147+ &mod->trace_filter, &mod->trace_format);
80148 }
80149 }
80150
80151diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
80152index fd3c8aa..5f324a6 100644
80153--- a/kernel/trace/trace_mmiotrace.c
80154+++ b/kernel/trace/trace_mmiotrace.c
80155@@ -24,7 +24,7 @@ struct header_iter {
80156 static struct trace_array *mmio_trace_array;
80157 static bool overrun_detected;
80158 static unsigned long prev_overruns;
80159-static atomic_t dropped_count;
80160+static atomic_unchecked_t dropped_count;
80161
80162 static void mmio_reset_data(struct trace_array *tr)
80163 {
80164@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
80165
80166 static unsigned long count_overruns(struct trace_iterator *iter)
80167 {
80168- unsigned long cnt = atomic_xchg(&dropped_count, 0);
80169+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
80170 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
80171
80172 if (over > prev_overruns)
80173@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
80174 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
80175 sizeof(*entry), 0, pc);
80176 if (!event) {
80177- atomic_inc(&dropped_count);
80178+ atomic_inc_unchecked(&dropped_count);
80179 return;
80180 }
80181 entry = ring_buffer_event_data(event);
80182@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
80183 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
80184 sizeof(*entry), 0, pc);
80185 if (!event) {
80186- atomic_inc(&dropped_count);
80187+ atomic_inc_unchecked(&dropped_count);
80188 return;
80189 }
80190 entry = ring_buffer_event_data(event);
80191diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
80192index 697e88d..1a79993 100644
80193--- a/kernel/trace/trace_output.c
80194+++ b/kernel/trace/trace_output.c
80195@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
80196
80197 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
80198 if (!IS_ERR(p)) {
80199- p = mangle_path(s->buffer + s->len, p, "\n");
80200+ p = mangle_path(s->buffer + s->len, p, "\n\\");
80201 if (p) {
80202 s->len = p - s->buffer;
80203 return 1;
80204@@ -851,14 +851,16 @@ int register_ftrace_event(struct trace_event *event)
80205 goto out;
80206 }
80207
80208+ pax_open_kernel();
80209 if (event->funcs->trace == NULL)
80210- event->funcs->trace = trace_nop_print;
80211+ *(void **)&event->funcs->trace = trace_nop_print;
80212 if (event->funcs->raw == NULL)
80213- event->funcs->raw = trace_nop_print;
80214+ *(void **)&event->funcs->raw = trace_nop_print;
80215 if (event->funcs->hex == NULL)
80216- event->funcs->hex = trace_nop_print;
80217+ *(void **)&event->funcs->hex = trace_nop_print;
80218 if (event->funcs->binary == NULL)
80219- event->funcs->binary = trace_nop_print;
80220+ *(void **)&event->funcs->binary = trace_nop_print;
80221+ pax_close_kernel();
80222
80223 key = event->type & (EVENT_HASHSIZE - 1);
80224
80225diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
80226index b20428c..4845a10 100644
80227--- a/kernel/trace/trace_stack.c
80228+++ b/kernel/trace/trace_stack.c
80229@@ -68,7 +68,7 @@ check_stack(unsigned long ip, unsigned long *stack)
80230 return;
80231
80232 /* we do not handle interrupt stacks yet */
80233- if (!object_is_on_stack(stack))
80234+ if (!object_starts_on_stack(stack))
80235 return;
80236
80237 local_irq_save(flags);
80238diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
80239index e134d8f..a018cdd 100644
80240--- a/kernel/user_namespace.c
80241+++ b/kernel/user_namespace.c
80242@@ -853,7 +853,7 @@ static int userns_install(struct nsproxy *nsproxy, void *ns)
80243 if (atomic_read(&current->mm->mm_users) > 1)
80244 return -EINVAL;
80245
80246- if (current->fs->users != 1)
80247+ if (atomic_read(&current->fs->users) != 1)
80248 return -EINVAL;
80249
80250 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
80251diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
80252index 4f69f9a..7c6f8f8 100644
80253--- a/kernel/utsname_sysctl.c
80254+++ b/kernel/utsname_sysctl.c
80255@@ -47,7 +47,7 @@ static void put_uts(ctl_table *table, int write, void *which)
80256 static int proc_do_uts_string(ctl_table *table, int write,
80257 void __user *buffer, size_t *lenp, loff_t *ppos)
80258 {
80259- struct ctl_table uts_table;
80260+ ctl_table_no_const uts_table;
80261 int r;
80262 memcpy(&uts_table, table, sizeof(uts_table));
80263 uts_table.data = get_uts(table, write);
80264diff --git a/kernel/watchdog.c b/kernel/watchdog.c
80265index 4a94467..80a6f9c 100644
80266--- a/kernel/watchdog.c
80267+++ b/kernel/watchdog.c
80268@@ -526,7 +526,7 @@ int proc_dowatchdog(struct ctl_table *table, int write,
80269 }
80270 #endif /* CONFIG_SYSCTL */
80271
80272-static struct smp_hotplug_thread watchdog_threads = {
80273+static struct smp_hotplug_thread watchdog_threads __read_only = {
80274 .store = &softlockup_watchdog,
80275 .thread_should_run = watchdog_should_run,
80276 .thread_fn = watchdog,
80277diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
80278index 28be08c..47bab92 100644
80279--- a/lib/Kconfig.debug
80280+++ b/lib/Kconfig.debug
80281@@ -549,7 +549,7 @@ config DEBUG_MUTEXES
80282
80283 config DEBUG_LOCK_ALLOC
80284 bool "Lock debugging: detect incorrect freeing of live locks"
80285- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
80286+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
80287 select DEBUG_SPINLOCK
80288 select DEBUG_MUTEXES
80289 select LOCKDEP
80290@@ -563,7 +563,7 @@ config DEBUG_LOCK_ALLOC
80291
80292 config PROVE_LOCKING
80293 bool "Lock debugging: prove locking correctness"
80294- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
80295+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
80296 select LOCKDEP
80297 select DEBUG_SPINLOCK
80298 select DEBUG_MUTEXES
80299@@ -614,7 +614,7 @@ config LOCKDEP
80300
80301 config LOCK_STAT
80302 bool "Lock usage statistics"
80303- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
80304+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
80305 select LOCKDEP
80306 select DEBUG_SPINLOCK
80307 select DEBUG_MUTEXES
80308@@ -1282,6 +1282,7 @@ config LATENCYTOP
80309 depends on DEBUG_KERNEL
80310 depends on STACKTRACE_SUPPORT
80311 depends on PROC_FS
80312+ depends on !GRKERNSEC_HIDESYM
80313 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
80314 select KALLSYMS
80315 select KALLSYMS_ALL
80316@@ -1310,7 +1311,7 @@ config INTERVAL_TREE_TEST
80317
80318 config PROVIDE_OHCI1394_DMA_INIT
80319 bool "Remote debugging over FireWire early on boot"
80320- depends on PCI && X86
80321+ depends on PCI && X86 && !GRKERNSEC
80322 help
80323 If you want to debug problems which hang or crash the kernel early
80324 on boot and the crashing machine has a FireWire port, you can use
80325@@ -1339,7 +1340,7 @@ config PROVIDE_OHCI1394_DMA_INIT
80326
80327 config FIREWIRE_OHCI_REMOTE_DMA
80328 bool "Remote debugging over FireWire with firewire-ohci"
80329- depends on FIREWIRE_OHCI
80330+ depends on FIREWIRE_OHCI && !GRKERNSEC
80331 help
80332 This option lets you use the FireWire bus for remote debugging
80333 with help of the firewire-ohci driver. It enables unfiltered
80334diff --git a/lib/Makefile b/lib/Makefile
80335index 6e2cc56..9b13738 100644
80336--- a/lib/Makefile
80337+++ b/lib/Makefile
80338@@ -47,7 +47,7 @@ obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
80339
80340 obj-$(CONFIG_BTREE) += btree.o
80341 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
80342-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
80343+obj-y += list_debug.o
80344 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
80345
80346 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
80347diff --git a/lib/bitmap.c b/lib/bitmap.c
80348index 06f7e4f..f3cf2b0 100644
80349--- a/lib/bitmap.c
80350+++ b/lib/bitmap.c
80351@@ -422,7 +422,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
80352 {
80353 int c, old_c, totaldigits, ndigits, nchunks, nbits;
80354 u32 chunk;
80355- const char __user __force *ubuf = (const char __user __force *)buf;
80356+ const char __user *ubuf = (const char __force_user *)buf;
80357
80358 bitmap_zero(maskp, nmaskbits);
80359
80360@@ -507,7 +507,7 @@ int bitmap_parse_user(const char __user *ubuf,
80361 {
80362 if (!access_ok(VERIFY_READ, ubuf, ulen))
80363 return -EFAULT;
80364- return __bitmap_parse((const char __force *)ubuf,
80365+ return __bitmap_parse((const char __force_kernel *)ubuf,
80366 ulen, 1, maskp, nmaskbits);
80367
80368 }
80369@@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
80370 {
80371 unsigned a, b;
80372 int c, old_c, totaldigits;
80373- const char __user __force *ubuf = (const char __user __force *)buf;
80374+ const char __user *ubuf = (const char __force_user *)buf;
80375 int exp_digit, in_range;
80376
80377 totaldigits = c = 0;
80378@@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
80379 {
80380 if (!access_ok(VERIFY_READ, ubuf, ulen))
80381 return -EFAULT;
80382- return __bitmap_parselist((const char __force *)ubuf,
80383+ return __bitmap_parselist((const char __force_kernel *)ubuf,
80384 ulen, 1, maskp, nmaskbits);
80385 }
80386 EXPORT_SYMBOL(bitmap_parselist_user);
80387diff --git a/lib/bug.c b/lib/bug.c
80388index 1686034..a9c00c8 100644
80389--- a/lib/bug.c
80390+++ b/lib/bug.c
80391@@ -134,6 +134,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
80392 return BUG_TRAP_TYPE_NONE;
80393
80394 bug = find_bug(bugaddr);
80395+ if (!bug)
80396+ return BUG_TRAP_TYPE_NONE;
80397
80398 file = NULL;
80399 line = 0;
80400diff --git a/lib/debugobjects.c b/lib/debugobjects.c
80401index 37061ed..da83f48 100644
80402--- a/lib/debugobjects.c
80403+++ b/lib/debugobjects.c
80404@@ -286,7 +286,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
80405 if (limit > 4)
80406 return;
80407
80408- is_on_stack = object_is_on_stack(addr);
80409+ is_on_stack = object_starts_on_stack(addr);
80410 if (is_on_stack == onstack)
80411 return;
80412
80413diff --git a/lib/devres.c b/lib/devres.c
80414index 8235331..5881053 100644
80415--- a/lib/devres.c
80416+++ b/lib/devres.c
80417@@ -81,7 +81,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
80418 void devm_iounmap(struct device *dev, void __iomem *addr)
80419 {
80420 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
80421- (void *)addr));
80422+ (void __force *)addr));
80423 iounmap(addr);
80424 }
80425 EXPORT_SYMBOL(devm_iounmap);
80426@@ -224,7 +224,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
80427 {
80428 ioport_unmap(addr);
80429 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
80430- devm_ioport_map_match, (void *)addr));
80431+ devm_ioport_map_match, (void __force *)addr));
80432 }
80433 EXPORT_SYMBOL(devm_ioport_unmap);
80434 #endif /* CONFIG_HAS_IOPORT */
80435diff --git a/lib/div64.c b/lib/div64.c
80436index a163b6c..9618fa5 100644
80437--- a/lib/div64.c
80438+++ b/lib/div64.c
80439@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
80440 EXPORT_SYMBOL(__div64_32);
80441
80442 #ifndef div_s64_rem
80443-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
80444+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
80445 {
80446 u64 quotient;
80447
80448@@ -90,7 +90,7 @@ EXPORT_SYMBOL(div_s64_rem);
80449 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
80450 */
80451 #ifndef div64_u64
80452-u64 div64_u64(u64 dividend, u64 divisor)
80453+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
80454 {
80455 u32 high = divisor >> 32;
80456 u64 quot;
80457diff --git a/lib/dma-debug.c b/lib/dma-debug.c
80458index d87a17a..ac0d79a 100644
80459--- a/lib/dma-debug.c
80460+++ b/lib/dma-debug.c
80461@@ -768,7 +768,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
80462
80463 void dma_debug_add_bus(struct bus_type *bus)
80464 {
80465- struct notifier_block *nb;
80466+ notifier_block_no_const *nb;
80467
80468 if (global_disable)
80469 return;
80470@@ -945,7 +945,7 @@ static void check_unmap(struct dma_debug_entry *ref)
80471
80472 static void check_for_stack(struct device *dev, void *addr)
80473 {
80474- if (object_is_on_stack(addr))
80475+ if (object_starts_on_stack(addr))
80476 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
80477 "stack [addr=%p]\n", addr);
80478 }
80479diff --git a/lib/inflate.c b/lib/inflate.c
80480index 013a761..c28f3fc 100644
80481--- a/lib/inflate.c
80482+++ b/lib/inflate.c
80483@@ -269,7 +269,7 @@ static void free(void *where)
80484 malloc_ptr = free_mem_ptr;
80485 }
80486 #else
80487-#define malloc(a) kmalloc(a, GFP_KERNEL)
80488+#define malloc(a) kmalloc((a), GFP_KERNEL)
80489 #define free(a) kfree(a)
80490 #endif
80491
80492diff --git a/lib/ioremap.c b/lib/ioremap.c
80493index 0c9216c..863bd89 100644
80494--- a/lib/ioremap.c
80495+++ b/lib/ioremap.c
80496@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
80497 unsigned long next;
80498
80499 phys_addr -= addr;
80500- pmd = pmd_alloc(&init_mm, pud, addr);
80501+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
80502 if (!pmd)
80503 return -ENOMEM;
80504 do {
80505@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
80506 unsigned long next;
80507
80508 phys_addr -= addr;
80509- pud = pud_alloc(&init_mm, pgd, addr);
80510+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
80511 if (!pud)
80512 return -ENOMEM;
80513 do {
80514diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
80515index bd2bea9..6b3c95e 100644
80516--- a/lib/is_single_threaded.c
80517+++ b/lib/is_single_threaded.c
80518@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
80519 struct task_struct *p, *t;
80520 bool ret;
80521
80522+ if (!mm)
80523+ return true;
80524+
80525 if (atomic_read(&task->signal->live) != 1)
80526 return false;
80527
80528diff --git a/lib/kobject.c b/lib/kobject.c
80529index a654866..d8bb115 100644
80530--- a/lib/kobject.c
80531+++ b/lib/kobject.c
80532@@ -805,7 +805,7 @@ static struct kset *kset_create(const char *name,
80533 kset = kzalloc(sizeof(*kset), GFP_KERNEL);
80534 if (!kset)
80535 return NULL;
80536- retval = kobject_set_name(&kset->kobj, name);
80537+ retval = kobject_set_name(&kset->kobj, "%s", name);
80538 if (retval) {
80539 kfree(kset);
80540 return NULL;
80541@@ -859,9 +859,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
80542
80543
80544 static DEFINE_SPINLOCK(kobj_ns_type_lock);
80545-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
80546+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
80547
80548-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
80549+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
80550 {
80551 enum kobj_ns_type type = ops->type;
80552 int error;
80553diff --git a/lib/list_debug.c b/lib/list_debug.c
80554index c24c2f7..06e070b 100644
80555--- a/lib/list_debug.c
80556+++ b/lib/list_debug.c
80557@@ -11,7 +11,9 @@
80558 #include <linux/bug.h>
80559 #include <linux/kernel.h>
80560 #include <linux/rculist.h>
80561+#include <linux/mm.h>
80562
80563+#ifdef CONFIG_DEBUG_LIST
80564 /*
80565 * Insert a new entry between two known consecutive entries.
80566 *
80567@@ -19,21 +21,32 @@
80568 * the prev/next entries already!
80569 */
80570
80571-void __list_add(struct list_head *new,
80572- struct list_head *prev,
80573- struct list_head *next)
80574+static bool __list_add_debug(struct list_head *new,
80575+ struct list_head *prev,
80576+ struct list_head *next)
80577 {
80578- WARN(next->prev != prev,
80579+ if (WARN(next->prev != prev,
80580 "list_add corruption. next->prev should be "
80581 "prev (%p), but was %p. (next=%p).\n",
80582- prev, next->prev, next);
80583- WARN(prev->next != next,
80584+ prev, next->prev, next) ||
80585+ WARN(prev->next != next,
80586 "list_add corruption. prev->next should be "
80587 "next (%p), but was %p. (prev=%p).\n",
80588- next, prev->next, prev);
80589- WARN(new == prev || new == next,
80590- "list_add double add: new=%p, prev=%p, next=%p.\n",
80591- new, prev, next);
80592+ next, prev->next, prev) ||
80593+ WARN(new == prev || new == next,
80594+ "list_add double add: new=%p, prev=%p, next=%p.\n",
80595+ new, prev, next))
80596+ return false;
80597+ return true;
80598+}
80599+
80600+void __list_add(struct list_head *new,
80601+ struct list_head *prev,
80602+ struct list_head *next)
80603+{
80604+ if (!__list_add_debug(new, prev, next))
80605+ return;
80606+
80607 next->prev = new;
80608 new->next = next;
80609 new->prev = prev;
80610@@ -41,7 +54,7 @@ void __list_add(struct list_head *new,
80611 }
80612 EXPORT_SYMBOL(__list_add);
80613
80614-void __list_del_entry(struct list_head *entry)
80615+static bool __list_del_entry_debug(struct list_head *entry)
80616 {
80617 struct list_head *prev, *next;
80618
80619@@ -60,9 +73,16 @@ void __list_del_entry(struct list_head *entry)
80620 WARN(next->prev != entry,
80621 "list_del corruption. next->prev should be %p, "
80622 "but was %p\n", entry, next->prev))
80623+ return false;
80624+ return true;
80625+}
80626+
80627+void __list_del_entry(struct list_head *entry)
80628+{
80629+ if (!__list_del_entry_debug(entry))
80630 return;
80631
80632- __list_del(prev, next);
80633+ __list_del(entry->prev, entry->next);
80634 }
80635 EXPORT_SYMBOL(__list_del_entry);
80636
80637@@ -86,15 +106,85 @@ EXPORT_SYMBOL(list_del);
80638 void __list_add_rcu(struct list_head *new,
80639 struct list_head *prev, struct list_head *next)
80640 {
80641- WARN(next->prev != prev,
80642- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
80643- prev, next->prev, next);
80644- WARN(prev->next != next,
80645- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
80646- next, prev->next, prev);
80647+ if (!__list_add_debug(new, prev, next))
80648+ return;
80649+
80650 new->next = next;
80651 new->prev = prev;
80652 rcu_assign_pointer(list_next_rcu(prev), new);
80653 next->prev = new;
80654 }
80655 EXPORT_SYMBOL(__list_add_rcu);
80656+#endif
80657+
80658+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
80659+{
80660+#ifdef CONFIG_DEBUG_LIST
80661+ if (!__list_add_debug(new, prev, next))
80662+ return;
80663+#endif
80664+
80665+ pax_open_kernel();
80666+ next->prev = new;
80667+ new->next = next;
80668+ new->prev = prev;
80669+ prev->next = new;
80670+ pax_close_kernel();
80671+}
80672+EXPORT_SYMBOL(__pax_list_add);
80673+
80674+void pax_list_del(struct list_head *entry)
80675+{
80676+#ifdef CONFIG_DEBUG_LIST
80677+ if (!__list_del_entry_debug(entry))
80678+ return;
80679+#endif
80680+
80681+ pax_open_kernel();
80682+ __list_del(entry->prev, entry->next);
80683+ entry->next = LIST_POISON1;
80684+ entry->prev = LIST_POISON2;
80685+ pax_close_kernel();
80686+}
80687+EXPORT_SYMBOL(pax_list_del);
80688+
80689+void pax_list_del_init(struct list_head *entry)
80690+{
80691+ pax_open_kernel();
80692+ __list_del(entry->prev, entry->next);
80693+ INIT_LIST_HEAD(entry);
80694+ pax_close_kernel();
80695+}
80696+EXPORT_SYMBOL(pax_list_del_init);
80697+
80698+void __pax_list_add_rcu(struct list_head *new,
80699+ struct list_head *prev, struct list_head *next)
80700+{
80701+#ifdef CONFIG_DEBUG_LIST
80702+ if (!__list_add_debug(new, prev, next))
80703+ return;
80704+#endif
80705+
80706+ pax_open_kernel();
80707+ new->next = next;
80708+ new->prev = prev;
80709+ rcu_assign_pointer(list_next_rcu(prev), new);
80710+ next->prev = new;
80711+ pax_close_kernel();
80712+}
80713+EXPORT_SYMBOL(__pax_list_add_rcu);
80714+
80715+void pax_list_del_rcu(struct list_head *entry)
80716+{
80717+#ifdef CONFIG_DEBUG_LIST
80718+ if (!__list_del_entry_debug(entry))
80719+ return;
80720+#endif
80721+
80722+ pax_open_kernel();
80723+ __list_del(entry->prev, entry->next);
80724+ entry->next = LIST_POISON1;
80725+ entry->prev = LIST_POISON2;
80726+ pax_close_kernel();
80727+}
80728+EXPORT_SYMBOL(pax_list_del_rcu);
80729diff --git a/lib/radix-tree.c b/lib/radix-tree.c
80730index e796429..6e38f9f 100644
80731--- a/lib/radix-tree.c
80732+++ b/lib/radix-tree.c
80733@@ -92,7 +92,7 @@ struct radix_tree_preload {
80734 int nr;
80735 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
80736 };
80737-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
80738+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
80739
80740 static inline void *ptr_to_indirect(void *ptr)
80741 {
80742diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
80743index bb2b201..46abaf9 100644
80744--- a/lib/strncpy_from_user.c
80745+++ b/lib/strncpy_from_user.c
80746@@ -21,7 +21,7 @@
80747 */
80748 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
80749 {
80750- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
80751+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
80752 long res = 0;
80753
80754 /*
80755diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
80756index a28df52..3d55877 100644
80757--- a/lib/strnlen_user.c
80758+++ b/lib/strnlen_user.c
80759@@ -26,7 +26,7 @@
80760 */
80761 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
80762 {
80763- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
80764+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
80765 long align, res = 0;
80766 unsigned long c;
80767
80768diff --git a/lib/swiotlb.c b/lib/swiotlb.c
80769index d23762e..e21eab2 100644
80770--- a/lib/swiotlb.c
80771+++ b/lib/swiotlb.c
80772@@ -664,7 +664,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
80773
80774 void
80775 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
80776- dma_addr_t dev_addr)
80777+ dma_addr_t dev_addr, struct dma_attrs *attrs)
80778 {
80779 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
80780
80781diff --git a/lib/vsprintf.c b/lib/vsprintf.c
80782index 0d62fd7..b7bc911 100644
80783--- a/lib/vsprintf.c
80784+++ b/lib/vsprintf.c
80785@@ -16,6 +16,9 @@
80786 * - scnprintf and vscnprintf
80787 */
80788
80789+#ifdef CONFIG_GRKERNSEC_HIDESYM
80790+#define __INCLUDED_BY_HIDESYM 1
80791+#endif
80792 #include <stdarg.h>
80793 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
80794 #include <linux/types.h>
80795@@ -974,7 +977,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
80796 return number(buf, end, *(const netdev_features_t *)addr, spec);
80797 }
80798
80799+#ifdef CONFIG_GRKERNSEC_HIDESYM
80800+int kptr_restrict __read_mostly = 2;
80801+#else
80802 int kptr_restrict __read_mostly;
80803+#endif
80804
80805 /*
80806 * Show a '%p' thing. A kernel extension is that the '%p' is followed
80807@@ -988,6 +995,8 @@ int kptr_restrict __read_mostly;
80808 * - 'S' For symbolic direct pointers with offset
80809 * - 's' For symbolic direct pointers without offset
80810 * - 'B' For backtraced symbolic direct pointers with offset
80811+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
80812+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
80813 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
80814 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
80815 * - 'M' For a 6-byte MAC address, it prints the address in the
80816@@ -1044,12 +1053,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
80817
80818 if (!ptr && *fmt != 'K') {
80819 /*
80820- * Print (null) with the same width as a pointer so it makes
80821+ * Print (nil) with the same width as a pointer so it makes
80822 * tabular output look nice.
80823 */
80824 if (spec.field_width == -1)
80825 spec.field_width = default_width;
80826- return string(buf, end, "(null)", spec);
80827+ return string(buf, end, "(nil)", spec);
80828 }
80829
80830 switch (*fmt) {
80831@@ -1059,6 +1068,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
80832 /* Fallthrough */
80833 case 'S':
80834 case 's':
80835+#ifdef CONFIG_GRKERNSEC_HIDESYM
80836+ break;
80837+#else
80838+ return symbol_string(buf, end, ptr, spec, *fmt);
80839+#endif
80840+ case 'A':
80841 case 'B':
80842 return symbol_string(buf, end, ptr, spec, *fmt);
80843 case 'R':
80844@@ -1099,6 +1114,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
80845 va_end(va);
80846 return buf;
80847 }
80848+ case 'P':
80849+ break;
80850 case 'K':
80851 /*
80852 * %pK cannot be used in IRQ context because its test
80853@@ -1128,6 +1145,21 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
80854 return number(buf, end,
80855 (unsigned long long) *((phys_addr_t *)ptr), spec);
80856 }
80857+
80858+#ifdef CONFIG_GRKERNSEC_HIDESYM
80859+ /* 'P' = approved pointers to copy to userland,
80860+ as in the /proc/kallsyms case, as we make it display nothing
80861+ for non-root users, and the real contents for root users
80862+ Also ignore 'K' pointers, since we force their NULLing for non-root users
80863+ above
80864+ */
80865+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'K' && is_usercopy_object(buf)) {
80866+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
80867+ dump_stack();
80868+ ptr = NULL;
80869+ }
80870+#endif
80871+
80872 spec.flags |= SMALL;
80873 if (spec.field_width == -1) {
80874 spec.field_width = default_width;
80875@@ -1849,11 +1881,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
80876 typeof(type) value; \
80877 if (sizeof(type) == 8) { \
80878 args = PTR_ALIGN(args, sizeof(u32)); \
80879- *(u32 *)&value = *(u32 *)args; \
80880- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
80881+ *(u32 *)&value = *(const u32 *)args; \
80882+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
80883 } else { \
80884 args = PTR_ALIGN(args, sizeof(type)); \
80885- value = *(typeof(type) *)args; \
80886+ value = *(const typeof(type) *)args; \
80887 } \
80888 args += sizeof(type); \
80889 value; \
80890@@ -1916,7 +1948,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
80891 case FORMAT_TYPE_STR: {
80892 const char *str_arg = args;
80893 args += strlen(str_arg) + 1;
80894- str = string(str, end, (char *)str_arg, spec);
80895+ str = string(str, end, str_arg, spec);
80896 break;
80897 }
80898
80899diff --git a/localversion-grsec b/localversion-grsec
80900new file mode 100644
80901index 0000000..7cd6065
80902--- /dev/null
80903+++ b/localversion-grsec
80904@@ -0,0 +1 @@
80905+-grsec
80906diff --git a/mm/Kconfig b/mm/Kconfig
80907index 3bea74f..e821c99 100644
80908--- a/mm/Kconfig
80909+++ b/mm/Kconfig
80910@@ -311,10 +311,10 @@ config KSM
80911 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
80912
80913 config DEFAULT_MMAP_MIN_ADDR
80914- int "Low address space to protect from user allocation"
80915+ int "Low address space to protect from user allocation"
80916 depends on MMU
80917- default 4096
80918- help
80919+ default 65536
80920+ help
80921 This is the portion of low virtual memory which should be protected
80922 from userspace allocation. Keeping a user from writing to low pages
80923 can help reduce the impact of kernel NULL pointer bugs.
80924@@ -345,7 +345,7 @@ config MEMORY_FAILURE
80925
80926 config HWPOISON_INJECT
80927 tristate "HWPoison pages injector"
80928- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
80929+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
80930 select PROC_PAGE_MONITOR
80931
80932 config NOMMU_INITIAL_TRIM_EXCESS
80933diff --git a/mm/backing-dev.c b/mm/backing-dev.c
80934index 41733c5..d80d7a9 100644
80935--- a/mm/backing-dev.c
80936+++ b/mm/backing-dev.c
80937@@ -716,7 +716,6 @@ EXPORT_SYMBOL(bdi_destroy);
80938 int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
80939 unsigned int cap)
80940 {
80941- char tmp[32];
80942 int err;
80943
80944 bdi->name = name;
80945@@ -725,8 +724,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
80946 if (err)
80947 return err;
80948
80949- sprintf(tmp, "%.28s%s", name, "-%d");
80950- err = bdi_register(bdi, NULL, tmp, atomic_long_inc_return(&bdi_seq));
80951+ err = bdi_register(bdi, NULL, "%.28s-%ld", name, atomic_long_inc_return(&bdi_seq));
80952 if (err) {
80953 bdi_destroy(bdi);
80954 return err;
80955diff --git a/mm/filemap.c b/mm/filemap.c
80956index e1979fd..dda5120 100644
80957--- a/mm/filemap.c
80958+++ b/mm/filemap.c
80959@@ -1748,7 +1748,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
80960 struct address_space *mapping = file->f_mapping;
80961
80962 if (!mapping->a_ops->readpage)
80963- return -ENOEXEC;
80964+ return -ENODEV;
80965 file_accessed(file);
80966 vma->vm_ops = &generic_file_vm_ops;
80967 return 0;
80968@@ -2088,6 +2088,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
80969 *pos = i_size_read(inode);
80970
80971 if (limit != RLIM_INFINITY) {
80972+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
80973 if (*pos >= limit) {
80974 send_sig(SIGXFSZ, current, 0);
80975 return -EFBIG;
80976diff --git a/mm/fremap.c b/mm/fremap.c
80977index 87da359..3f41cb1 100644
80978--- a/mm/fremap.c
80979+++ b/mm/fremap.c
80980@@ -158,6 +158,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
80981 retry:
80982 vma = find_vma(mm, start);
80983
80984+#ifdef CONFIG_PAX_SEGMEXEC
80985+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
80986+ goto out;
80987+#endif
80988+
80989 /*
80990 * Make sure the vma is shared, that it supports prefaulting,
80991 * and that the remapped range is valid and fully within
80992diff --git a/mm/highmem.c b/mm/highmem.c
80993index b32b70c..e512eb0 100644
80994--- a/mm/highmem.c
80995+++ b/mm/highmem.c
80996@@ -138,8 +138,9 @@ static void flush_all_zero_pkmaps(void)
80997 * So no dangers, even with speculative execution.
80998 */
80999 page = pte_page(pkmap_page_table[i]);
81000+ pax_open_kernel();
81001 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
81002-
81003+ pax_close_kernel();
81004 set_page_address(page, NULL);
81005 need_flush = 1;
81006 }
81007@@ -198,9 +199,11 @@ start:
81008 }
81009 }
81010 vaddr = PKMAP_ADDR(last_pkmap_nr);
81011+
81012+ pax_open_kernel();
81013 set_pte_at(&init_mm, vaddr,
81014 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
81015-
81016+ pax_close_kernel();
81017 pkmap_count[last_pkmap_nr] = 1;
81018 set_page_address(page, (void *)vaddr);
81019
81020diff --git a/mm/hugetlb.c b/mm/hugetlb.c
81021index ce4cb19..93899ef 100644
81022--- a/mm/hugetlb.c
81023+++ b/mm/hugetlb.c
81024@@ -2005,15 +2005,17 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
81025 struct hstate *h = &default_hstate;
81026 unsigned long tmp;
81027 int ret;
81028+ ctl_table_no_const hugetlb_table;
81029
81030 tmp = h->max_huge_pages;
81031
81032 if (write && h->order >= MAX_ORDER)
81033 return -EINVAL;
81034
81035- table->data = &tmp;
81036- table->maxlen = sizeof(unsigned long);
81037- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
81038+ hugetlb_table = *table;
81039+ hugetlb_table.data = &tmp;
81040+ hugetlb_table.maxlen = sizeof(unsigned long);
81041+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
81042 if (ret)
81043 goto out;
81044
81045@@ -2070,15 +2072,17 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
81046 struct hstate *h = &default_hstate;
81047 unsigned long tmp;
81048 int ret;
81049+ ctl_table_no_const hugetlb_table;
81050
81051 tmp = h->nr_overcommit_huge_pages;
81052
81053 if (write && h->order >= MAX_ORDER)
81054 return -EINVAL;
81055
81056- table->data = &tmp;
81057- table->maxlen = sizeof(unsigned long);
81058- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
81059+ hugetlb_table = *table;
81060+ hugetlb_table.data = &tmp;
81061+ hugetlb_table.maxlen = sizeof(unsigned long);
81062+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
81063 if (ret)
81064 goto out;
81065
81066@@ -2512,6 +2516,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
81067 return 1;
81068 }
81069
81070+#ifdef CONFIG_PAX_SEGMEXEC
81071+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
81072+{
81073+ struct mm_struct *mm = vma->vm_mm;
81074+ struct vm_area_struct *vma_m;
81075+ unsigned long address_m;
81076+ pte_t *ptep_m;
81077+
81078+ vma_m = pax_find_mirror_vma(vma);
81079+ if (!vma_m)
81080+ return;
81081+
81082+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
81083+ address_m = address + SEGMEXEC_TASK_SIZE;
81084+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
81085+ get_page(page_m);
81086+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
81087+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
81088+}
81089+#endif
81090+
81091 /*
81092 * Hugetlb_cow() should be called with page lock of the original hugepage held.
81093 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
81094@@ -2630,6 +2655,11 @@ retry_avoidcopy:
81095 make_huge_pte(vma, new_page, 1));
81096 page_remove_rmap(old_page);
81097 hugepage_add_new_anon_rmap(new_page, vma, address);
81098+
81099+#ifdef CONFIG_PAX_SEGMEXEC
81100+ pax_mirror_huge_pte(vma, address, new_page);
81101+#endif
81102+
81103 /* Make the old page be freed below */
81104 new_page = old_page;
81105 }
81106@@ -2788,6 +2818,10 @@ retry:
81107 && (vma->vm_flags & VM_SHARED)));
81108 set_huge_pte_at(mm, address, ptep, new_pte);
81109
81110+#ifdef CONFIG_PAX_SEGMEXEC
81111+ pax_mirror_huge_pte(vma, address, page);
81112+#endif
81113+
81114 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
81115 /* Optimization, do the COW without a second fault */
81116 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
81117@@ -2817,6 +2851,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
81118 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
81119 struct hstate *h = hstate_vma(vma);
81120
81121+#ifdef CONFIG_PAX_SEGMEXEC
81122+ struct vm_area_struct *vma_m;
81123+#endif
81124+
81125 address &= huge_page_mask(h);
81126
81127 ptep = huge_pte_offset(mm, address);
81128@@ -2830,6 +2868,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
81129 VM_FAULT_SET_HINDEX(hstate_index(h));
81130 }
81131
81132+#ifdef CONFIG_PAX_SEGMEXEC
81133+ vma_m = pax_find_mirror_vma(vma);
81134+ if (vma_m) {
81135+ unsigned long address_m;
81136+
81137+ if (vma->vm_start > vma_m->vm_start) {
81138+ address_m = address;
81139+ address -= SEGMEXEC_TASK_SIZE;
81140+ vma = vma_m;
81141+ h = hstate_vma(vma);
81142+ } else
81143+ address_m = address + SEGMEXEC_TASK_SIZE;
81144+
81145+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
81146+ return VM_FAULT_OOM;
81147+ address_m &= HPAGE_MASK;
81148+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
81149+ }
81150+#endif
81151+
81152 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
81153 if (!ptep)
81154 return VM_FAULT_OOM;
81155diff --git a/mm/internal.h b/mm/internal.h
81156index 8562de0..7fdfe92 100644
81157--- a/mm/internal.h
81158+++ b/mm/internal.h
81159@@ -100,6 +100,7 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
81160 * in mm/page_alloc.c
81161 */
81162 extern void __free_pages_bootmem(struct page *page, unsigned int order);
81163+extern void free_compound_page(struct page *page);
81164 extern void prep_compound_page(struct page *page, unsigned long order);
81165 #ifdef CONFIG_MEMORY_FAILURE
81166 extern bool is_free_buddy_page(struct page *page);
81167diff --git a/mm/kmemleak.c b/mm/kmemleak.c
81168index c8d7f31..2dbeffd 100644
81169--- a/mm/kmemleak.c
81170+++ b/mm/kmemleak.c
81171@@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
81172
81173 for (i = 0; i < object->trace_len; i++) {
81174 void *ptr = (void *)object->trace[i];
81175- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
81176+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
81177 }
81178 }
81179
81180@@ -1851,7 +1851,7 @@ static int __init kmemleak_late_init(void)
81181 return -ENOMEM;
81182 }
81183
81184- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
81185+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
81186 &kmemleak_fops);
81187 if (!dentry)
81188 pr_warning("Failed to create the debugfs kmemleak file\n");
81189diff --git a/mm/maccess.c b/mm/maccess.c
81190index d53adf9..03a24bf 100644
81191--- a/mm/maccess.c
81192+++ b/mm/maccess.c
81193@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
81194 set_fs(KERNEL_DS);
81195 pagefault_disable();
81196 ret = __copy_from_user_inatomic(dst,
81197- (__force const void __user *)src, size);
81198+ (const void __force_user *)src, size);
81199 pagefault_enable();
81200 set_fs(old_fs);
81201
81202@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
81203
81204 set_fs(KERNEL_DS);
81205 pagefault_disable();
81206- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
81207+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
81208 pagefault_enable();
81209 set_fs(old_fs);
81210
81211diff --git a/mm/madvise.c b/mm/madvise.c
81212index c58c94b..86ec14e 100644
81213--- a/mm/madvise.c
81214+++ b/mm/madvise.c
81215@@ -51,6 +51,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
81216 pgoff_t pgoff;
81217 unsigned long new_flags = vma->vm_flags;
81218
81219+#ifdef CONFIG_PAX_SEGMEXEC
81220+ struct vm_area_struct *vma_m;
81221+#endif
81222+
81223 switch (behavior) {
81224 case MADV_NORMAL:
81225 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
81226@@ -126,6 +130,13 @@ success:
81227 /*
81228 * vm_flags is protected by the mmap_sem held in write mode.
81229 */
81230+
81231+#ifdef CONFIG_PAX_SEGMEXEC
81232+ vma_m = pax_find_mirror_vma(vma);
81233+ if (vma_m)
81234+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
81235+#endif
81236+
81237 vma->vm_flags = new_flags;
81238
81239 out:
81240@@ -274,6 +285,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
81241 struct vm_area_struct ** prev,
81242 unsigned long start, unsigned long end)
81243 {
81244+
81245+#ifdef CONFIG_PAX_SEGMEXEC
81246+ struct vm_area_struct *vma_m;
81247+#endif
81248+
81249 *prev = vma;
81250 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
81251 return -EINVAL;
81252@@ -286,6 +302,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
81253 zap_page_range(vma, start, end - start, &details);
81254 } else
81255 zap_page_range(vma, start, end - start, NULL);
81256+
81257+#ifdef CONFIG_PAX_SEGMEXEC
81258+ vma_m = pax_find_mirror_vma(vma);
81259+ if (vma_m) {
81260+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
81261+ struct zap_details details = {
81262+ .nonlinear_vma = vma_m,
81263+ .last_index = ULONG_MAX,
81264+ };
81265+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
81266+ } else
81267+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
81268+ }
81269+#endif
81270+
81271 return 0;
81272 }
81273
81274@@ -491,6 +522,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
81275 if (end < start)
81276 goto out;
81277
81278+#ifdef CONFIG_PAX_SEGMEXEC
81279+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
81280+ if (end > SEGMEXEC_TASK_SIZE)
81281+ goto out;
81282+ } else
81283+#endif
81284+
81285+ if (end > TASK_SIZE)
81286+ goto out;
81287+
81288 error = 0;
81289 if (end == start)
81290 goto out;
81291diff --git a/mm/memory-failure.c b/mm/memory-failure.c
81292index df0694c..bc95539 100644
81293--- a/mm/memory-failure.c
81294+++ b/mm/memory-failure.c
81295@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
81296
81297 int sysctl_memory_failure_recovery __read_mostly = 1;
81298
81299-atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
81300+atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
81301
81302 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
81303
81304@@ -202,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
81305 pfn, t->comm, t->pid);
81306 si.si_signo = SIGBUS;
81307 si.si_errno = 0;
81308- si.si_addr = (void *)addr;
81309+ si.si_addr = (void __user *)addr;
81310 #ifdef __ARCH_SI_TRAPNO
81311 si.si_trapno = trapno;
81312 #endif
81313@@ -760,7 +760,7 @@ static struct page_state {
81314 unsigned long res;
81315 char *msg;
81316 int (*action)(struct page *p, unsigned long pfn);
81317-} error_states[] = {
81318+} __do_const error_states[] = {
81319 { reserved, reserved, "reserved kernel", me_kernel },
81320 /*
81321 * free pages are specially detected outside this table:
81322@@ -1051,7 +1051,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
81323 nr_pages = 1 << compound_order(hpage);
81324 else /* normal page or thp */
81325 nr_pages = 1;
81326- atomic_long_add(nr_pages, &num_poisoned_pages);
81327+ atomic_long_add_unchecked(nr_pages, &num_poisoned_pages);
81328
81329 /*
81330 * We need/can do nothing about count=0 pages.
81331@@ -1081,7 +1081,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
81332 if (!PageHWPoison(hpage)
81333 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
81334 || (p != hpage && TestSetPageHWPoison(hpage))) {
81335- atomic_long_sub(nr_pages, &num_poisoned_pages);
81336+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
81337 return 0;
81338 }
81339 set_page_hwpoison_huge_page(hpage);
81340@@ -1148,7 +1148,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
81341 }
81342 if (hwpoison_filter(p)) {
81343 if (TestClearPageHWPoison(p))
81344- atomic_long_sub(nr_pages, &num_poisoned_pages);
81345+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
81346 unlock_page(hpage);
81347 put_page(hpage);
81348 return 0;
81349@@ -1350,7 +1350,7 @@ int unpoison_memory(unsigned long pfn)
81350 return 0;
81351 }
81352 if (TestClearPageHWPoison(p))
81353- atomic_long_sub(nr_pages, &num_poisoned_pages);
81354+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
81355 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
81356 return 0;
81357 }
81358@@ -1364,7 +1364,7 @@ int unpoison_memory(unsigned long pfn)
81359 */
81360 if (TestClearPageHWPoison(page)) {
81361 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
81362- atomic_long_sub(nr_pages, &num_poisoned_pages);
81363+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
81364 freeit = 1;
81365 if (PageHuge(page))
81366 clear_page_hwpoison_huge_page(page);
81367@@ -1491,7 +1491,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
81368 } else {
81369 set_page_hwpoison_huge_page(hpage);
81370 dequeue_hwpoisoned_huge_page(hpage);
81371- atomic_long_add(1 << compound_trans_order(hpage),
81372+ atomic_long_add_unchecked(1 << compound_trans_order(hpage),
81373 &num_poisoned_pages);
81374 }
81375 /* keep elevated page count for bad page */
81376@@ -1552,11 +1552,11 @@ int soft_offline_page(struct page *page, int flags)
81377 if (PageHuge(page)) {
81378 set_page_hwpoison_huge_page(hpage);
81379 dequeue_hwpoisoned_huge_page(hpage);
81380- atomic_long_add(1 << compound_trans_order(hpage),
81381+ atomic_long_add_unchecked(1 << compound_trans_order(hpage),
81382 &num_poisoned_pages);
81383 } else {
81384 SetPageHWPoison(page);
81385- atomic_long_inc(&num_poisoned_pages);
81386+ atomic_long_inc_unchecked(&num_poisoned_pages);
81387 }
81388 }
81389 /* keep elevated page count for bad page */
81390@@ -1596,7 +1596,7 @@ static int __soft_offline_page(struct page *page, int flags)
81391 put_page(page);
81392 pr_info("soft_offline: %#lx: invalidated\n", pfn);
81393 SetPageHWPoison(page);
81394- atomic_long_inc(&num_poisoned_pages);
81395+ atomic_long_inc_unchecked(&num_poisoned_pages);
81396 return 0;
81397 }
81398
81399@@ -1626,7 +1626,7 @@ static int __soft_offline_page(struct page *page, int flags)
81400 ret = -EIO;
81401 } else {
81402 SetPageHWPoison(page);
81403- atomic_long_inc(&num_poisoned_pages);
81404+ atomic_long_inc_unchecked(&num_poisoned_pages);
81405 }
81406 } else {
81407 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
81408diff --git a/mm/memory.c b/mm/memory.c
81409index ba94dec..08ffe0d 100644
81410--- a/mm/memory.c
81411+++ b/mm/memory.c
81412@@ -438,6 +438,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
81413 free_pte_range(tlb, pmd, addr);
81414 } while (pmd++, addr = next, addr != end);
81415
81416+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
81417 start &= PUD_MASK;
81418 if (start < floor)
81419 return;
81420@@ -452,6 +453,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
81421 pmd = pmd_offset(pud, start);
81422 pud_clear(pud);
81423 pmd_free_tlb(tlb, pmd, start);
81424+#endif
81425+
81426 }
81427
81428 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
81429@@ -471,6 +474,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
81430 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
81431 } while (pud++, addr = next, addr != end);
81432
81433+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
81434 start &= PGDIR_MASK;
81435 if (start < floor)
81436 return;
81437@@ -485,6 +489,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
81438 pud = pud_offset(pgd, start);
81439 pgd_clear(pgd);
81440 pud_free_tlb(tlb, pud, start);
81441+#endif
81442+
81443 }
81444
81445 /*
81446@@ -1644,12 +1650,6 @@ no_page_table:
81447 return page;
81448 }
81449
81450-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
81451-{
81452- return stack_guard_page_start(vma, addr) ||
81453- stack_guard_page_end(vma, addr+PAGE_SIZE);
81454-}
81455-
81456 /**
81457 * __get_user_pages() - pin user pages in memory
81458 * @tsk: task_struct of target task
81459@@ -1736,10 +1736,10 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
81460
81461 i = 0;
81462
81463- do {
81464+ while (nr_pages) {
81465 struct vm_area_struct *vma;
81466
81467- vma = find_extend_vma(mm, start);
81468+ vma = find_vma(mm, start);
81469 if (!vma && in_gate_area(mm, start)) {
81470 unsigned long pg = start & PAGE_MASK;
81471 pgd_t *pgd;
81472@@ -1788,7 +1788,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
81473 goto next_page;
81474 }
81475
81476- if (!vma ||
81477+ if (!vma || start < vma->vm_start ||
81478 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
81479 !(vm_flags & vma->vm_flags))
81480 return i ? : -EFAULT;
81481@@ -1817,11 +1817,6 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
81482 int ret;
81483 unsigned int fault_flags = 0;
81484
81485- /* For mlock, just skip the stack guard page. */
81486- if (foll_flags & FOLL_MLOCK) {
81487- if (stack_guard_page(vma, start))
81488- goto next_page;
81489- }
81490 if (foll_flags & FOLL_WRITE)
81491 fault_flags |= FAULT_FLAG_WRITE;
81492 if (nonblocking)
81493@@ -1901,7 +1896,7 @@ next_page:
81494 start += page_increm * PAGE_SIZE;
81495 nr_pages -= page_increm;
81496 } while (nr_pages && start < vma->vm_end);
81497- } while (nr_pages);
81498+ }
81499 return i;
81500 }
81501 EXPORT_SYMBOL(__get_user_pages);
81502@@ -2108,6 +2103,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
81503 page_add_file_rmap(page);
81504 set_pte_at(mm, addr, pte, mk_pte(page, prot));
81505
81506+#ifdef CONFIG_PAX_SEGMEXEC
81507+ pax_mirror_file_pte(vma, addr, page, ptl);
81508+#endif
81509+
81510 retval = 0;
81511 pte_unmap_unlock(pte, ptl);
81512 return retval;
81513@@ -2152,9 +2151,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
81514 if (!page_count(page))
81515 return -EINVAL;
81516 if (!(vma->vm_flags & VM_MIXEDMAP)) {
81517+
81518+#ifdef CONFIG_PAX_SEGMEXEC
81519+ struct vm_area_struct *vma_m;
81520+#endif
81521+
81522 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
81523 BUG_ON(vma->vm_flags & VM_PFNMAP);
81524 vma->vm_flags |= VM_MIXEDMAP;
81525+
81526+#ifdef CONFIG_PAX_SEGMEXEC
81527+ vma_m = pax_find_mirror_vma(vma);
81528+ if (vma_m)
81529+ vma_m->vm_flags |= VM_MIXEDMAP;
81530+#endif
81531+
81532 }
81533 return insert_page(vma, addr, page, vma->vm_page_prot);
81534 }
81535@@ -2237,6 +2248,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
81536 unsigned long pfn)
81537 {
81538 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
81539+ BUG_ON(vma->vm_mirror);
81540
81541 if (addr < vma->vm_start || addr >= vma->vm_end)
81542 return -EFAULT;
81543@@ -2484,7 +2496,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
81544
81545 BUG_ON(pud_huge(*pud));
81546
81547- pmd = pmd_alloc(mm, pud, addr);
81548+ pmd = (mm == &init_mm) ?
81549+ pmd_alloc_kernel(mm, pud, addr) :
81550+ pmd_alloc(mm, pud, addr);
81551 if (!pmd)
81552 return -ENOMEM;
81553 do {
81554@@ -2504,7 +2518,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
81555 unsigned long next;
81556 int err;
81557
81558- pud = pud_alloc(mm, pgd, addr);
81559+ pud = (mm == &init_mm) ?
81560+ pud_alloc_kernel(mm, pgd, addr) :
81561+ pud_alloc(mm, pgd, addr);
81562 if (!pud)
81563 return -ENOMEM;
81564 do {
81565@@ -2592,6 +2608,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
81566 copy_user_highpage(dst, src, va, vma);
81567 }
81568
81569+#ifdef CONFIG_PAX_SEGMEXEC
81570+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
81571+{
81572+ struct mm_struct *mm = vma->vm_mm;
81573+ spinlock_t *ptl;
81574+ pte_t *pte, entry;
81575+
81576+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
81577+ entry = *pte;
81578+ if (!pte_present(entry)) {
81579+ if (!pte_none(entry)) {
81580+ BUG_ON(pte_file(entry));
81581+ free_swap_and_cache(pte_to_swp_entry(entry));
81582+ pte_clear_not_present_full(mm, address, pte, 0);
81583+ }
81584+ } else {
81585+ struct page *page;
81586+
81587+ flush_cache_page(vma, address, pte_pfn(entry));
81588+ entry = ptep_clear_flush(vma, address, pte);
81589+ BUG_ON(pte_dirty(entry));
81590+ page = vm_normal_page(vma, address, entry);
81591+ if (page) {
81592+ update_hiwater_rss(mm);
81593+ if (PageAnon(page))
81594+ dec_mm_counter_fast(mm, MM_ANONPAGES);
81595+ else
81596+ dec_mm_counter_fast(mm, MM_FILEPAGES);
81597+ page_remove_rmap(page);
81598+ page_cache_release(page);
81599+ }
81600+ }
81601+ pte_unmap_unlock(pte, ptl);
81602+}
81603+
81604+/* PaX: if vma is mirrored, synchronize the mirror's PTE
81605+ *
81606+ * the ptl of the lower mapped page is held on entry and is not released on exit
81607+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
81608+ */
81609+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
81610+{
81611+ struct mm_struct *mm = vma->vm_mm;
81612+ unsigned long address_m;
81613+ spinlock_t *ptl_m;
81614+ struct vm_area_struct *vma_m;
81615+ pmd_t *pmd_m;
81616+ pte_t *pte_m, entry_m;
81617+
81618+ BUG_ON(!page_m || !PageAnon(page_m));
81619+
81620+ vma_m = pax_find_mirror_vma(vma);
81621+ if (!vma_m)
81622+ return;
81623+
81624+ BUG_ON(!PageLocked(page_m));
81625+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
81626+ address_m = address + SEGMEXEC_TASK_SIZE;
81627+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
81628+ pte_m = pte_offset_map(pmd_m, address_m);
81629+ ptl_m = pte_lockptr(mm, pmd_m);
81630+ if (ptl != ptl_m) {
81631+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
81632+ if (!pte_none(*pte_m))
81633+ goto out;
81634+ }
81635+
81636+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
81637+ page_cache_get(page_m);
81638+ page_add_anon_rmap(page_m, vma_m, address_m);
81639+ inc_mm_counter_fast(mm, MM_ANONPAGES);
81640+ set_pte_at(mm, address_m, pte_m, entry_m);
81641+ update_mmu_cache(vma_m, address_m, pte_m);
81642+out:
81643+ if (ptl != ptl_m)
81644+ spin_unlock(ptl_m);
81645+ pte_unmap(pte_m);
81646+ unlock_page(page_m);
81647+}
81648+
81649+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
81650+{
81651+ struct mm_struct *mm = vma->vm_mm;
81652+ unsigned long address_m;
81653+ spinlock_t *ptl_m;
81654+ struct vm_area_struct *vma_m;
81655+ pmd_t *pmd_m;
81656+ pte_t *pte_m, entry_m;
81657+
81658+ BUG_ON(!page_m || PageAnon(page_m));
81659+
81660+ vma_m = pax_find_mirror_vma(vma);
81661+ if (!vma_m)
81662+ return;
81663+
81664+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
81665+ address_m = address + SEGMEXEC_TASK_SIZE;
81666+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
81667+ pte_m = pte_offset_map(pmd_m, address_m);
81668+ ptl_m = pte_lockptr(mm, pmd_m);
81669+ if (ptl != ptl_m) {
81670+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
81671+ if (!pte_none(*pte_m))
81672+ goto out;
81673+ }
81674+
81675+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
81676+ page_cache_get(page_m);
81677+ page_add_file_rmap(page_m);
81678+ inc_mm_counter_fast(mm, MM_FILEPAGES);
81679+ set_pte_at(mm, address_m, pte_m, entry_m);
81680+ update_mmu_cache(vma_m, address_m, pte_m);
81681+out:
81682+ if (ptl != ptl_m)
81683+ spin_unlock(ptl_m);
81684+ pte_unmap(pte_m);
81685+}
81686+
81687+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
81688+{
81689+ struct mm_struct *mm = vma->vm_mm;
81690+ unsigned long address_m;
81691+ spinlock_t *ptl_m;
81692+ struct vm_area_struct *vma_m;
81693+ pmd_t *pmd_m;
81694+ pte_t *pte_m, entry_m;
81695+
81696+ vma_m = pax_find_mirror_vma(vma);
81697+ if (!vma_m)
81698+ return;
81699+
81700+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
81701+ address_m = address + SEGMEXEC_TASK_SIZE;
81702+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
81703+ pte_m = pte_offset_map(pmd_m, address_m);
81704+ ptl_m = pte_lockptr(mm, pmd_m);
81705+ if (ptl != ptl_m) {
81706+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
81707+ if (!pte_none(*pte_m))
81708+ goto out;
81709+ }
81710+
81711+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
81712+ set_pte_at(mm, address_m, pte_m, entry_m);
81713+out:
81714+ if (ptl != ptl_m)
81715+ spin_unlock(ptl_m);
81716+ pte_unmap(pte_m);
81717+}
81718+
81719+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
81720+{
81721+ struct page *page_m;
81722+ pte_t entry;
81723+
81724+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
81725+ goto out;
81726+
81727+ entry = *pte;
81728+ page_m = vm_normal_page(vma, address, entry);
81729+ if (!page_m)
81730+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
81731+ else if (PageAnon(page_m)) {
81732+ if (pax_find_mirror_vma(vma)) {
81733+ pte_unmap_unlock(pte, ptl);
81734+ lock_page(page_m);
81735+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
81736+ if (pte_same(entry, *pte))
81737+ pax_mirror_anon_pte(vma, address, page_m, ptl);
81738+ else
81739+ unlock_page(page_m);
81740+ }
81741+ } else
81742+ pax_mirror_file_pte(vma, address, page_m, ptl);
81743+
81744+out:
81745+ pte_unmap_unlock(pte, ptl);
81746+}
81747+#endif
81748+
81749 /*
81750 * This routine handles present pages, when users try to write
81751 * to a shared page. It is done by copying the page to a new address
81752@@ -2808,6 +3004,12 @@ gotten:
81753 */
81754 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
81755 if (likely(pte_same(*page_table, orig_pte))) {
81756+
81757+#ifdef CONFIG_PAX_SEGMEXEC
81758+ if (pax_find_mirror_vma(vma))
81759+ BUG_ON(!trylock_page(new_page));
81760+#endif
81761+
81762 if (old_page) {
81763 if (!PageAnon(old_page)) {
81764 dec_mm_counter_fast(mm, MM_FILEPAGES);
81765@@ -2859,6 +3061,10 @@ gotten:
81766 page_remove_rmap(old_page);
81767 }
81768
81769+#ifdef CONFIG_PAX_SEGMEXEC
81770+ pax_mirror_anon_pte(vma, address, new_page, ptl);
81771+#endif
81772+
81773 /* Free the old page.. */
81774 new_page = old_page;
81775 ret |= VM_FAULT_WRITE;
81776@@ -3134,6 +3340,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
81777 swap_free(entry);
81778 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
81779 try_to_free_swap(page);
81780+
81781+#ifdef CONFIG_PAX_SEGMEXEC
81782+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
81783+#endif
81784+
81785 unlock_page(page);
81786 if (page != swapcache) {
81787 /*
81788@@ -3157,6 +3368,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
81789
81790 /* No need to invalidate - it was non-present before */
81791 update_mmu_cache(vma, address, page_table);
81792+
81793+#ifdef CONFIG_PAX_SEGMEXEC
81794+ pax_mirror_anon_pte(vma, address, page, ptl);
81795+#endif
81796+
81797 unlock:
81798 pte_unmap_unlock(page_table, ptl);
81799 out:
81800@@ -3176,40 +3392,6 @@ out_release:
81801 }
81802
81803 /*
81804- * This is like a special single-page "expand_{down|up}wards()",
81805- * except we must first make sure that 'address{-|+}PAGE_SIZE'
81806- * doesn't hit another vma.
81807- */
81808-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
81809-{
81810- address &= PAGE_MASK;
81811- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
81812- struct vm_area_struct *prev = vma->vm_prev;
81813-
81814- /*
81815- * Is there a mapping abutting this one below?
81816- *
81817- * That's only ok if it's the same stack mapping
81818- * that has gotten split..
81819- */
81820- if (prev && prev->vm_end == address)
81821- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
81822-
81823- expand_downwards(vma, address - PAGE_SIZE);
81824- }
81825- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
81826- struct vm_area_struct *next = vma->vm_next;
81827-
81828- /* As VM_GROWSDOWN but s/below/above/ */
81829- if (next && next->vm_start == address + PAGE_SIZE)
81830- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
81831-
81832- expand_upwards(vma, address + PAGE_SIZE);
81833- }
81834- return 0;
81835-}
81836-
81837-/*
81838 * We enter with non-exclusive mmap_sem (to exclude vma changes,
81839 * but allow concurrent faults), and pte mapped but not yet locked.
81840 * We return with mmap_sem still held, but pte unmapped and unlocked.
81841@@ -3218,27 +3400,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
81842 unsigned long address, pte_t *page_table, pmd_t *pmd,
81843 unsigned int flags)
81844 {
81845- struct page *page;
81846+ struct page *page = NULL;
81847 spinlock_t *ptl;
81848 pte_t entry;
81849
81850- pte_unmap(page_table);
81851-
81852- /* Check if we need to add a guard page to the stack */
81853- if (check_stack_guard_page(vma, address) < 0)
81854- return VM_FAULT_SIGBUS;
81855-
81856- /* Use the zero-page for reads */
81857 if (!(flags & FAULT_FLAG_WRITE)) {
81858 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
81859 vma->vm_page_prot));
81860- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
81861+ ptl = pte_lockptr(mm, pmd);
81862+ spin_lock(ptl);
81863 if (!pte_none(*page_table))
81864 goto unlock;
81865 goto setpte;
81866 }
81867
81868 /* Allocate our own private page. */
81869+ pte_unmap(page_table);
81870+
81871 if (unlikely(anon_vma_prepare(vma)))
81872 goto oom;
81873 page = alloc_zeroed_user_highpage_movable(vma, address);
81874@@ -3257,6 +3435,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
81875 if (!pte_none(*page_table))
81876 goto release;
81877
81878+#ifdef CONFIG_PAX_SEGMEXEC
81879+ if (pax_find_mirror_vma(vma))
81880+ BUG_ON(!trylock_page(page));
81881+#endif
81882+
81883 inc_mm_counter_fast(mm, MM_ANONPAGES);
81884 page_add_new_anon_rmap(page, vma, address);
81885 setpte:
81886@@ -3264,6 +3447,12 @@ setpte:
81887
81888 /* No need to invalidate - it was non-present before */
81889 update_mmu_cache(vma, address, page_table);
81890+
81891+#ifdef CONFIG_PAX_SEGMEXEC
81892+ if (page)
81893+ pax_mirror_anon_pte(vma, address, page, ptl);
81894+#endif
81895+
81896 unlock:
81897 pte_unmap_unlock(page_table, ptl);
81898 return 0;
81899@@ -3407,6 +3596,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
81900 */
81901 /* Only go through if we didn't race with anybody else... */
81902 if (likely(pte_same(*page_table, orig_pte))) {
81903+
81904+#ifdef CONFIG_PAX_SEGMEXEC
81905+ if (anon && pax_find_mirror_vma(vma))
81906+ BUG_ON(!trylock_page(page));
81907+#endif
81908+
81909 flush_icache_page(vma, page);
81910 entry = mk_pte(page, vma->vm_page_prot);
81911 if (flags & FAULT_FLAG_WRITE)
81912@@ -3426,6 +3621,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
81913
81914 /* no need to invalidate: a not-present page won't be cached */
81915 update_mmu_cache(vma, address, page_table);
81916+
81917+#ifdef CONFIG_PAX_SEGMEXEC
81918+ if (anon)
81919+ pax_mirror_anon_pte(vma, address, page, ptl);
81920+ else
81921+ pax_mirror_file_pte(vma, address, page, ptl);
81922+#endif
81923+
81924 } else {
81925 if (cow_page)
81926 mem_cgroup_uncharge_page(cow_page);
81927@@ -3747,6 +3950,12 @@ int handle_pte_fault(struct mm_struct *mm,
81928 if (flags & FAULT_FLAG_WRITE)
81929 flush_tlb_fix_spurious_fault(vma, address);
81930 }
81931+
81932+#ifdef CONFIG_PAX_SEGMEXEC
81933+ pax_mirror_pte(vma, address, pte, pmd, ptl);
81934+ return 0;
81935+#endif
81936+
81937 unlock:
81938 pte_unmap_unlock(pte, ptl);
81939 return 0;
81940@@ -3763,6 +3972,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
81941 pmd_t *pmd;
81942 pte_t *pte;
81943
81944+#ifdef CONFIG_PAX_SEGMEXEC
81945+ struct vm_area_struct *vma_m;
81946+#endif
81947+
81948 __set_current_state(TASK_RUNNING);
81949
81950 count_vm_event(PGFAULT);
81951@@ -3774,6 +3987,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
81952 if (unlikely(is_vm_hugetlb_page(vma)))
81953 return hugetlb_fault(mm, vma, address, flags);
81954
81955+#ifdef CONFIG_PAX_SEGMEXEC
81956+ vma_m = pax_find_mirror_vma(vma);
81957+ if (vma_m) {
81958+ unsigned long address_m;
81959+ pgd_t *pgd_m;
81960+ pud_t *pud_m;
81961+ pmd_t *pmd_m;
81962+
81963+ if (vma->vm_start > vma_m->vm_start) {
81964+ address_m = address;
81965+ address -= SEGMEXEC_TASK_SIZE;
81966+ vma = vma_m;
81967+ } else
81968+ address_m = address + SEGMEXEC_TASK_SIZE;
81969+
81970+ pgd_m = pgd_offset(mm, address_m);
81971+ pud_m = pud_alloc(mm, pgd_m, address_m);
81972+ if (!pud_m)
81973+ return VM_FAULT_OOM;
81974+ pmd_m = pmd_alloc(mm, pud_m, address_m);
81975+ if (!pmd_m)
81976+ return VM_FAULT_OOM;
81977+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
81978+ return VM_FAULT_OOM;
81979+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
81980+ }
81981+#endif
81982+
81983 retry:
81984 pgd = pgd_offset(mm, address);
81985 pud = pud_alloc(mm, pgd, address);
81986@@ -3872,6 +4113,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
81987 spin_unlock(&mm->page_table_lock);
81988 return 0;
81989 }
81990+
81991+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
81992+{
81993+ pud_t *new = pud_alloc_one(mm, address);
81994+ if (!new)
81995+ return -ENOMEM;
81996+
81997+ smp_wmb(); /* See comment in __pte_alloc */
81998+
81999+ spin_lock(&mm->page_table_lock);
82000+ if (pgd_present(*pgd)) /* Another has populated it */
82001+ pud_free(mm, new);
82002+ else
82003+ pgd_populate_kernel(mm, pgd, new);
82004+ spin_unlock(&mm->page_table_lock);
82005+ return 0;
82006+}
82007 #endif /* __PAGETABLE_PUD_FOLDED */
82008
82009 #ifndef __PAGETABLE_PMD_FOLDED
82010@@ -3902,6 +4160,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
82011 spin_unlock(&mm->page_table_lock);
82012 return 0;
82013 }
82014+
82015+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
82016+{
82017+ pmd_t *new = pmd_alloc_one(mm, address);
82018+ if (!new)
82019+ return -ENOMEM;
82020+
82021+ smp_wmb(); /* See comment in __pte_alloc */
82022+
82023+ spin_lock(&mm->page_table_lock);
82024+#ifndef __ARCH_HAS_4LEVEL_HACK
82025+ if (pud_present(*pud)) /* Another has populated it */
82026+ pmd_free(mm, new);
82027+ else
82028+ pud_populate_kernel(mm, pud, new);
82029+#else
82030+ if (pgd_present(*pud)) /* Another has populated it */
82031+ pmd_free(mm, new);
82032+ else
82033+ pgd_populate_kernel(mm, pud, new);
82034+#endif /* __ARCH_HAS_4LEVEL_HACK */
82035+ spin_unlock(&mm->page_table_lock);
82036+ return 0;
82037+}
82038 #endif /* __PAGETABLE_PMD_FOLDED */
82039
82040 #if !defined(__HAVE_ARCH_GATE_AREA)
82041@@ -3915,7 +4197,7 @@ static int __init gate_vma_init(void)
82042 gate_vma.vm_start = FIXADDR_USER_START;
82043 gate_vma.vm_end = FIXADDR_USER_END;
82044 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
82045- gate_vma.vm_page_prot = __P101;
82046+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
82047
82048 return 0;
82049 }
82050@@ -4049,8 +4331,8 @@ out:
82051 return ret;
82052 }
82053
82054-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
82055- void *buf, int len, int write)
82056+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
82057+ void *buf, size_t len, int write)
82058 {
82059 resource_size_t phys_addr;
82060 unsigned long prot = 0;
82061@@ -4075,8 +4357,8 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
82062 * Access another process' address space as given in mm. If non-NULL, use the
82063 * given task for page fault accounting.
82064 */
82065-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
82066- unsigned long addr, void *buf, int len, int write)
82067+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
82068+ unsigned long addr, void *buf, size_t len, int write)
82069 {
82070 struct vm_area_struct *vma;
82071 void *old_buf = buf;
82072@@ -4084,7 +4366,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
82073 down_read(&mm->mmap_sem);
82074 /* ignore errors, just check how much was successfully transferred */
82075 while (len) {
82076- int bytes, ret, offset;
82077+ ssize_t bytes, ret, offset;
82078 void *maddr;
82079 struct page *page = NULL;
82080
82081@@ -4143,8 +4425,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
82082 *
82083 * The caller must hold a reference on @mm.
82084 */
82085-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
82086- void *buf, int len, int write)
82087+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
82088+ void *buf, size_t len, int write)
82089 {
82090 return __access_remote_vm(NULL, mm, addr, buf, len, write);
82091 }
82092@@ -4154,11 +4436,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
82093 * Source/target buffer must be kernel space,
82094 * Do not walk the page table directly, use get_user_pages
82095 */
82096-int access_process_vm(struct task_struct *tsk, unsigned long addr,
82097- void *buf, int len, int write)
82098+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
82099+ void *buf, size_t len, int write)
82100 {
82101 struct mm_struct *mm;
82102- int ret;
82103+ ssize_t ret;
82104
82105 mm = get_task_mm(tsk);
82106 if (!mm)
82107diff --git a/mm/mempolicy.c b/mm/mempolicy.c
82108index 7431001..0f8344e 100644
82109--- a/mm/mempolicy.c
82110+++ b/mm/mempolicy.c
82111@@ -708,6 +708,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
82112 unsigned long vmstart;
82113 unsigned long vmend;
82114
82115+#ifdef CONFIG_PAX_SEGMEXEC
82116+ struct vm_area_struct *vma_m;
82117+#endif
82118+
82119 vma = find_vma(mm, start);
82120 if (!vma || vma->vm_start > start)
82121 return -EFAULT;
82122@@ -744,9 +748,20 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
82123 if (err)
82124 goto out;
82125 }
82126+
82127 err = vma_replace_policy(vma, new_pol);
82128 if (err)
82129 goto out;
82130+
82131+#ifdef CONFIG_PAX_SEGMEXEC
82132+ vma_m = pax_find_mirror_vma(vma);
82133+ if (vma_m) {
82134+ err = vma_replace_policy(vma_m, new_pol);
82135+ if (err)
82136+ goto out;
82137+ }
82138+#endif
82139+
82140 }
82141
82142 out:
82143@@ -1202,6 +1217,17 @@ static long do_mbind(unsigned long start, unsigned long len,
82144
82145 if (end < start)
82146 return -EINVAL;
82147+
82148+#ifdef CONFIG_PAX_SEGMEXEC
82149+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
82150+ if (end > SEGMEXEC_TASK_SIZE)
82151+ return -EINVAL;
82152+ } else
82153+#endif
82154+
82155+ if (end > TASK_SIZE)
82156+ return -EINVAL;
82157+
82158 if (end == start)
82159 return 0;
82160
82161@@ -1430,8 +1456,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
82162 */
82163 tcred = __task_cred(task);
82164 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
82165- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
82166- !capable(CAP_SYS_NICE)) {
82167+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
82168 rcu_read_unlock();
82169 err = -EPERM;
82170 goto out_put;
82171@@ -1462,6 +1487,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
82172 goto out;
82173 }
82174
82175+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
82176+ if (mm != current->mm &&
82177+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
82178+ mmput(mm);
82179+ err = -EPERM;
82180+ goto out;
82181+ }
82182+#endif
82183+
82184 err = do_migrate_pages(mm, old, new,
82185 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
82186
82187diff --git a/mm/migrate.c b/mm/migrate.c
82188index c04d9af..0b41805 100644
82189--- a/mm/migrate.c
82190+++ b/mm/migrate.c
82191@@ -1395,8 +1395,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
82192 */
82193 tcred = __task_cred(task);
82194 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
82195- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
82196- !capable(CAP_SYS_NICE)) {
82197+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
82198 rcu_read_unlock();
82199 err = -EPERM;
82200 goto out;
82201diff --git a/mm/mlock.c b/mm/mlock.c
82202index 79b7cf7..9944291 100644
82203--- a/mm/mlock.c
82204+++ b/mm/mlock.c
82205@@ -13,6 +13,7 @@
82206 #include <linux/pagemap.h>
82207 #include <linux/mempolicy.h>
82208 #include <linux/syscalls.h>
82209+#include <linux/security.h>
82210 #include <linux/sched.h>
82211 #include <linux/export.h>
82212 #include <linux/rmap.h>
82213@@ -334,7 +335,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
82214 {
82215 unsigned long nstart, end, tmp;
82216 struct vm_area_struct * vma, * prev;
82217- int error;
82218+ int error = 0;
82219
82220 VM_BUG_ON(start & ~PAGE_MASK);
82221 VM_BUG_ON(len != PAGE_ALIGN(len));
82222@@ -343,6 +344,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
82223 return -EINVAL;
82224 if (end == start)
82225 return 0;
82226+ if (end > TASK_SIZE)
82227+ return -EINVAL;
82228+
82229 vma = find_vma(current->mm, start);
82230 if (!vma || vma->vm_start > start)
82231 return -ENOMEM;
82232@@ -354,6 +358,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
82233 for (nstart = start ; ; ) {
82234 vm_flags_t newflags;
82235
82236+#ifdef CONFIG_PAX_SEGMEXEC
82237+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
82238+ break;
82239+#endif
82240+
82241 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
82242
82243 newflags = vma->vm_flags & ~VM_LOCKED;
82244@@ -466,6 +475,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
82245 lock_limit >>= PAGE_SHIFT;
82246
82247 /* check against resource limits */
82248+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
82249 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
82250 error = do_mlock(start, len, 1);
82251 up_write(&current->mm->mmap_sem);
82252@@ -500,6 +510,11 @@ static int do_mlockall(int flags)
82253 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
82254 vm_flags_t newflags;
82255
82256+#ifdef CONFIG_PAX_SEGMEXEC
82257+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
82258+ break;
82259+#endif
82260+
82261 newflags = vma->vm_flags & ~VM_LOCKED;
82262 if (flags & MCL_CURRENT)
82263 newflags |= VM_LOCKED;
82264@@ -532,6 +547,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
82265 lock_limit >>= PAGE_SHIFT;
82266
82267 ret = -ENOMEM;
82268+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
82269 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
82270 capable(CAP_IPC_LOCK))
82271 ret = do_mlockall(flags);
82272diff --git a/mm/mmap.c b/mm/mmap.c
82273index 0dceed8..e7cfc40 100644
82274--- a/mm/mmap.c
82275+++ b/mm/mmap.c
82276@@ -33,6 +33,7 @@
82277 #include <linux/uprobes.h>
82278 #include <linux/rbtree_augmented.h>
82279 #include <linux/sched/sysctl.h>
82280+#include <linux/random.h>
82281
82282 #include <asm/uaccess.h>
82283 #include <asm/cacheflush.h>
82284@@ -49,6 +50,16 @@
82285 #define arch_rebalance_pgtables(addr, len) (addr)
82286 #endif
82287
82288+static inline void verify_mm_writelocked(struct mm_struct *mm)
82289+{
82290+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
82291+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
82292+ up_read(&mm->mmap_sem);
82293+ BUG();
82294+ }
82295+#endif
82296+}
82297+
82298 static void unmap_region(struct mm_struct *mm,
82299 struct vm_area_struct *vma, struct vm_area_struct *prev,
82300 unsigned long start, unsigned long end);
82301@@ -68,22 +79,32 @@ static void unmap_region(struct mm_struct *mm,
82302 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
82303 *
82304 */
82305-pgprot_t protection_map[16] = {
82306+pgprot_t protection_map[16] __read_only = {
82307 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
82308 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
82309 };
82310
82311-pgprot_t vm_get_page_prot(unsigned long vm_flags)
82312+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
82313 {
82314- return __pgprot(pgprot_val(protection_map[vm_flags &
82315+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
82316 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
82317 pgprot_val(arch_vm_get_page_prot(vm_flags)));
82318+
82319+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
82320+ if (!(__supported_pte_mask & _PAGE_NX) &&
82321+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
82322+ (vm_flags & (VM_READ | VM_WRITE)))
82323+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
82324+#endif
82325+
82326+ return prot;
82327 }
82328 EXPORT_SYMBOL(vm_get_page_prot);
82329
82330 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
82331 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
82332 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
82333+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
82334 /*
82335 * Make sure vm_committed_as in one cacheline and not cacheline shared with
82336 * other variables. It can be updated by several CPUs frequently.
82337@@ -239,6 +260,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
82338 struct vm_area_struct *next = vma->vm_next;
82339
82340 might_sleep();
82341+ BUG_ON(vma->vm_mirror);
82342 if (vma->vm_ops && vma->vm_ops->close)
82343 vma->vm_ops->close(vma);
82344 if (vma->vm_file)
82345@@ -283,6 +305,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
82346 * not page aligned -Ram Gupta
82347 */
82348 rlim = rlimit(RLIMIT_DATA);
82349+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
82350 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
82351 (mm->end_data - mm->start_data) > rlim)
82352 goto out;
82353@@ -897,6 +920,12 @@ static int
82354 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
82355 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
82356 {
82357+
82358+#ifdef CONFIG_PAX_SEGMEXEC
82359+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
82360+ return 0;
82361+#endif
82362+
82363 if (is_mergeable_vma(vma, file, vm_flags) &&
82364 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
82365 if (vma->vm_pgoff == vm_pgoff)
82366@@ -916,6 +945,12 @@ static int
82367 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
82368 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
82369 {
82370+
82371+#ifdef CONFIG_PAX_SEGMEXEC
82372+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
82373+ return 0;
82374+#endif
82375+
82376 if (is_mergeable_vma(vma, file, vm_flags) &&
82377 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
82378 pgoff_t vm_pglen;
82379@@ -958,13 +993,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
82380 struct vm_area_struct *vma_merge(struct mm_struct *mm,
82381 struct vm_area_struct *prev, unsigned long addr,
82382 unsigned long end, unsigned long vm_flags,
82383- struct anon_vma *anon_vma, struct file *file,
82384+ struct anon_vma *anon_vma, struct file *file,
82385 pgoff_t pgoff, struct mempolicy *policy)
82386 {
82387 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
82388 struct vm_area_struct *area, *next;
82389 int err;
82390
82391+#ifdef CONFIG_PAX_SEGMEXEC
82392+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
82393+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
82394+
82395+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
82396+#endif
82397+
82398 /*
82399 * We later require that vma->vm_flags == vm_flags,
82400 * so this tests vma->vm_flags & VM_SPECIAL, too.
82401@@ -980,6 +1022,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
82402 if (next && next->vm_end == end) /* cases 6, 7, 8 */
82403 next = next->vm_next;
82404
82405+#ifdef CONFIG_PAX_SEGMEXEC
82406+ if (prev)
82407+ prev_m = pax_find_mirror_vma(prev);
82408+ if (area)
82409+ area_m = pax_find_mirror_vma(area);
82410+ if (next)
82411+ next_m = pax_find_mirror_vma(next);
82412+#endif
82413+
82414 /*
82415 * Can it merge with the predecessor?
82416 */
82417@@ -999,9 +1050,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
82418 /* cases 1, 6 */
82419 err = vma_adjust(prev, prev->vm_start,
82420 next->vm_end, prev->vm_pgoff, NULL);
82421- } else /* cases 2, 5, 7 */
82422+
82423+#ifdef CONFIG_PAX_SEGMEXEC
82424+ if (!err && prev_m)
82425+ err = vma_adjust(prev_m, prev_m->vm_start,
82426+ next_m->vm_end, prev_m->vm_pgoff, NULL);
82427+#endif
82428+
82429+ } else { /* cases 2, 5, 7 */
82430 err = vma_adjust(prev, prev->vm_start,
82431 end, prev->vm_pgoff, NULL);
82432+
82433+#ifdef CONFIG_PAX_SEGMEXEC
82434+ if (!err && prev_m)
82435+ err = vma_adjust(prev_m, prev_m->vm_start,
82436+ end_m, prev_m->vm_pgoff, NULL);
82437+#endif
82438+
82439+ }
82440 if (err)
82441 return NULL;
82442 khugepaged_enter_vma_merge(prev);
82443@@ -1015,12 +1081,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
82444 mpol_equal(policy, vma_policy(next)) &&
82445 can_vma_merge_before(next, vm_flags,
82446 anon_vma, file, pgoff+pglen)) {
82447- if (prev && addr < prev->vm_end) /* case 4 */
82448+ if (prev && addr < prev->vm_end) { /* case 4 */
82449 err = vma_adjust(prev, prev->vm_start,
82450 addr, prev->vm_pgoff, NULL);
82451- else /* cases 3, 8 */
82452+
82453+#ifdef CONFIG_PAX_SEGMEXEC
82454+ if (!err && prev_m)
82455+ err = vma_adjust(prev_m, prev_m->vm_start,
82456+ addr_m, prev_m->vm_pgoff, NULL);
82457+#endif
82458+
82459+ } else { /* cases 3, 8 */
82460 err = vma_adjust(area, addr, next->vm_end,
82461 next->vm_pgoff - pglen, NULL);
82462+
82463+#ifdef CONFIG_PAX_SEGMEXEC
82464+ if (!err && area_m)
82465+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
82466+ next_m->vm_pgoff - pglen, NULL);
82467+#endif
82468+
82469+ }
82470 if (err)
82471 return NULL;
82472 khugepaged_enter_vma_merge(area);
82473@@ -1129,8 +1210,10 @@ none:
82474 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
82475 struct file *file, long pages)
82476 {
82477- const unsigned long stack_flags
82478- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
82479+
82480+#ifdef CONFIG_PAX_RANDMMAP
82481+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
82482+#endif
82483
82484 mm->total_vm += pages;
82485
82486@@ -1138,7 +1221,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
82487 mm->shared_vm += pages;
82488 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
82489 mm->exec_vm += pages;
82490- } else if (flags & stack_flags)
82491+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
82492 mm->stack_vm += pages;
82493 }
82494 #endif /* CONFIG_PROC_FS */
82495@@ -1177,7 +1260,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
82496 * (the exception is when the underlying filesystem is noexec
82497 * mounted, in which case we dont add PROT_EXEC.)
82498 */
82499- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
82500+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
82501 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
82502 prot |= PROT_EXEC;
82503
82504@@ -1203,7 +1286,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
82505 /* Obtain the address to map to. we verify (or select) it and ensure
82506 * that it represents a valid section of the address space.
82507 */
82508- addr = get_unmapped_area(file, addr, len, pgoff, flags);
82509+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
82510 if (addr & ~PAGE_MASK)
82511 return addr;
82512
82513@@ -1214,6 +1297,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
82514 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
82515 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
82516
82517+#ifdef CONFIG_PAX_MPROTECT
82518+ if (mm->pax_flags & MF_PAX_MPROTECT) {
82519+#ifndef CONFIG_PAX_MPROTECT_COMPAT
82520+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
82521+ gr_log_rwxmmap(file);
82522+
82523+#ifdef CONFIG_PAX_EMUPLT
82524+ vm_flags &= ~VM_EXEC;
82525+#else
82526+ return -EPERM;
82527+#endif
82528+
82529+ }
82530+
82531+ if (!(vm_flags & VM_EXEC))
82532+ vm_flags &= ~VM_MAYEXEC;
82533+#else
82534+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
82535+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
82536+#endif
82537+ else
82538+ vm_flags &= ~VM_MAYWRITE;
82539+ }
82540+#endif
82541+
82542+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
82543+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
82544+ vm_flags &= ~VM_PAGEEXEC;
82545+#endif
82546+
82547 if (flags & MAP_LOCKED)
82548 if (!can_do_mlock())
82549 return -EPERM;
82550@@ -1225,6 +1338,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
82551 locked += mm->locked_vm;
82552 lock_limit = rlimit(RLIMIT_MEMLOCK);
82553 lock_limit >>= PAGE_SHIFT;
82554+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
82555 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
82556 return -EAGAIN;
82557 }
82558@@ -1305,6 +1419,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
82559 vm_flags |= VM_NORESERVE;
82560 }
82561
82562+ if (!gr_acl_handle_mmap(file, prot))
82563+ return -EACCES;
82564+
82565 addr = mmap_region(file, addr, len, vm_flags, pgoff);
82566 if (!IS_ERR_VALUE(addr) &&
82567 ((vm_flags & VM_LOCKED) ||
82568@@ -1396,7 +1513,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
82569 vm_flags_t vm_flags = vma->vm_flags;
82570
82571 /* If it was private or non-writable, the write bit is already clear */
82572- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
82573+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
82574 return 0;
82575
82576 /* The backer wishes to know when pages are first written to? */
82577@@ -1444,16 +1561,30 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
82578 unsigned long charged = 0;
82579 struct inode *inode = file ? file_inode(file) : NULL;
82580
82581+#ifdef CONFIG_PAX_SEGMEXEC
82582+ struct vm_area_struct *vma_m = NULL;
82583+#endif
82584+
82585+ /*
82586+ * mm->mmap_sem is required to protect against another thread
82587+ * changing the mappings in case we sleep.
82588+ */
82589+ verify_mm_writelocked(mm);
82590+
82591 /* Clear old maps */
82592 error = -ENOMEM;
82593-munmap_back:
82594 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
82595 if (do_munmap(mm, addr, len))
82596 return -ENOMEM;
82597- goto munmap_back;
82598+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
82599 }
82600
82601 /* Check against address space limit. */
82602+
82603+#ifdef CONFIG_PAX_RANDMMAP
82604+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
82605+#endif
82606+
82607 if (!may_expand_vm(mm, len >> PAGE_SHIFT))
82608 return -ENOMEM;
82609
82610@@ -1485,6 +1616,16 @@ munmap_back:
82611 goto unacct_error;
82612 }
82613
82614+#ifdef CONFIG_PAX_SEGMEXEC
82615+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
82616+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
82617+ if (!vma_m) {
82618+ error = -ENOMEM;
82619+ goto free_vma;
82620+ }
82621+ }
82622+#endif
82623+
82624 vma->vm_mm = mm;
82625 vma->vm_start = addr;
82626 vma->vm_end = addr + len;
82627@@ -1509,6 +1650,13 @@ munmap_back:
82628 if (error)
82629 goto unmap_and_free_vma;
82630
82631+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
82632+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
82633+ vma->vm_flags |= VM_PAGEEXEC;
82634+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
82635+ }
82636+#endif
82637+
82638 /* Can addr have changed??
82639 *
82640 * Answer: Yes, several device drivers can do it in their
82641@@ -1547,6 +1695,11 @@ munmap_back:
82642 vma_link(mm, vma, prev, rb_link, rb_parent);
82643 file = vma->vm_file;
82644
82645+#ifdef CONFIG_PAX_SEGMEXEC
82646+ if (vma_m)
82647+ BUG_ON(pax_mirror_vma(vma_m, vma));
82648+#endif
82649+
82650 /* Once vma denies write, undo our temporary denial count */
82651 if (correct_wcount)
82652 atomic_inc(&inode->i_writecount);
82653@@ -1554,6 +1707,7 @@ out:
82654 perf_event_mmap(vma);
82655
82656 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
82657+ track_exec_limit(mm, addr, addr + len, vm_flags);
82658 if (vm_flags & VM_LOCKED) {
82659 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
82660 vma == get_gate_vma(current->mm)))
82661@@ -1577,6 +1731,12 @@ unmap_and_free_vma:
82662 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
82663 charged = 0;
82664 free_vma:
82665+
82666+#ifdef CONFIG_PAX_SEGMEXEC
82667+ if (vma_m)
82668+ kmem_cache_free(vm_area_cachep, vma_m);
82669+#endif
82670+
82671 kmem_cache_free(vm_area_cachep, vma);
82672 unacct_error:
82673 if (charged)
82674@@ -1584,6 +1744,62 @@ unacct_error:
82675 return error;
82676 }
82677
82678+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
82679+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
82680+{
82681+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
82682+ return (random32() & 0xFF) << PAGE_SHIFT;
82683+
82684+ return 0;
82685+}
82686+#endif
82687+
82688+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
82689+{
82690+ if (!vma) {
82691+#ifdef CONFIG_STACK_GROWSUP
82692+ if (addr > sysctl_heap_stack_gap)
82693+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
82694+ else
82695+ vma = find_vma(current->mm, 0);
82696+ if (vma && (vma->vm_flags & VM_GROWSUP))
82697+ return false;
82698+#endif
82699+ return true;
82700+ }
82701+
82702+ if (addr + len > vma->vm_start)
82703+ return false;
82704+
82705+ if (vma->vm_flags & VM_GROWSDOWN)
82706+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
82707+#ifdef CONFIG_STACK_GROWSUP
82708+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
82709+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
82710+#endif
82711+ else if (offset)
82712+ return offset <= vma->vm_start - addr - len;
82713+
82714+ return true;
82715+}
82716+
82717+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
82718+{
82719+ if (vma->vm_start < len)
82720+ return -ENOMEM;
82721+
82722+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
82723+ if (offset <= vma->vm_start - len)
82724+ return vma->vm_start - len - offset;
82725+ else
82726+ return -ENOMEM;
82727+ }
82728+
82729+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
82730+ return vma->vm_start - len - sysctl_heap_stack_gap;
82731+ return -ENOMEM;
82732+}
82733+
82734 unsigned long unmapped_area(struct vm_unmapped_area_info *info)
82735 {
82736 /*
82737@@ -1803,6 +2019,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
82738 struct mm_struct *mm = current->mm;
82739 struct vm_area_struct *vma;
82740 struct vm_unmapped_area_info info;
82741+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
82742
82743 if (len > TASK_SIZE)
82744 return -ENOMEM;
82745@@ -1810,29 +2027,45 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
82746 if (flags & MAP_FIXED)
82747 return addr;
82748
82749+#ifdef CONFIG_PAX_RANDMMAP
82750+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
82751+#endif
82752+
82753 if (addr) {
82754 addr = PAGE_ALIGN(addr);
82755 vma = find_vma(mm, addr);
82756- if (TASK_SIZE - len >= addr &&
82757- (!vma || addr + len <= vma->vm_start))
82758+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
82759 return addr;
82760 }
82761
82762 info.flags = 0;
82763 info.length = len;
82764 info.low_limit = TASK_UNMAPPED_BASE;
82765+
82766+#ifdef CONFIG_PAX_RANDMMAP
82767+ if (mm->pax_flags & MF_PAX_RANDMMAP)
82768+ info.low_limit += mm->delta_mmap;
82769+#endif
82770+
82771 info.high_limit = TASK_SIZE;
82772 info.align_mask = 0;
82773+ info.threadstack_offset = offset;
82774 return vm_unmapped_area(&info);
82775 }
82776 #endif
82777
82778 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
82779 {
82780+
82781+#ifdef CONFIG_PAX_SEGMEXEC
82782+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
82783+ return;
82784+#endif
82785+
82786 /*
82787 * Is this a new hole at the lowest possible address?
82788 */
82789- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
82790+ if (addr >= mm->mmap_base && addr < mm->free_area_cache)
82791 mm->free_area_cache = addr;
82792 }
82793
82794@@ -1850,6 +2083,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
82795 struct mm_struct *mm = current->mm;
82796 unsigned long addr = addr0;
82797 struct vm_unmapped_area_info info;
82798+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
82799
82800 /* requested length too big for entire address space */
82801 if (len > TASK_SIZE)
82802@@ -1858,12 +2092,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
82803 if (flags & MAP_FIXED)
82804 return addr;
82805
82806+#ifdef CONFIG_PAX_RANDMMAP
82807+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
82808+#endif
82809+
82810 /* requesting a specific address */
82811 if (addr) {
82812 addr = PAGE_ALIGN(addr);
82813 vma = find_vma(mm, addr);
82814- if (TASK_SIZE - len >= addr &&
82815- (!vma || addr + len <= vma->vm_start))
82816+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
82817 return addr;
82818 }
82819
82820@@ -1872,6 +2109,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
82821 info.low_limit = PAGE_SIZE;
82822 info.high_limit = mm->mmap_base;
82823 info.align_mask = 0;
82824+ info.threadstack_offset = offset;
82825 addr = vm_unmapped_area(&info);
82826
82827 /*
82828@@ -1884,6 +2122,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
82829 VM_BUG_ON(addr != -ENOMEM);
82830 info.flags = 0;
82831 info.low_limit = TASK_UNMAPPED_BASE;
82832+
82833+#ifdef CONFIG_PAX_RANDMMAP
82834+ if (mm->pax_flags & MF_PAX_RANDMMAP)
82835+ info.low_limit += mm->delta_mmap;
82836+#endif
82837+
82838 info.high_limit = TASK_SIZE;
82839 addr = vm_unmapped_area(&info);
82840 }
82841@@ -1894,6 +2138,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
82842
82843 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
82844 {
82845+
82846+#ifdef CONFIG_PAX_SEGMEXEC
82847+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
82848+ return;
82849+#endif
82850+
82851 /*
82852 * Is this a new hole at the highest possible address?
82853 */
82854@@ -1901,8 +2151,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
82855 mm->free_area_cache = addr;
82856
82857 /* dont allow allocations above current base */
82858- if (mm->free_area_cache > mm->mmap_base)
82859+ if (mm->free_area_cache > mm->mmap_base) {
82860 mm->free_area_cache = mm->mmap_base;
82861+ mm->cached_hole_size = ~0UL;
82862+ }
82863 }
82864
82865 unsigned long
82866@@ -2001,6 +2253,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
82867 return vma;
82868 }
82869
82870+#ifdef CONFIG_PAX_SEGMEXEC
82871+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
82872+{
82873+ struct vm_area_struct *vma_m;
82874+
82875+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
82876+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
82877+ BUG_ON(vma->vm_mirror);
82878+ return NULL;
82879+ }
82880+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
82881+ vma_m = vma->vm_mirror;
82882+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
82883+ BUG_ON(vma->vm_file != vma_m->vm_file);
82884+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
82885+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
82886+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
82887+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
82888+ return vma_m;
82889+}
82890+#endif
82891+
82892 /*
82893 * Verify that the stack growth is acceptable and
82894 * update accounting. This is shared with both the
82895@@ -2017,6 +2291,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
82896 return -ENOMEM;
82897
82898 /* Stack limit test */
82899+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
82900 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
82901 return -ENOMEM;
82902
82903@@ -2027,6 +2302,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
82904 locked = mm->locked_vm + grow;
82905 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
82906 limit >>= PAGE_SHIFT;
82907+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
82908 if (locked > limit && !capable(CAP_IPC_LOCK))
82909 return -ENOMEM;
82910 }
82911@@ -2056,37 +2332,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
82912 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
82913 * vma is the last one with address > vma->vm_end. Have to extend vma.
82914 */
82915+#ifndef CONFIG_IA64
82916+static
82917+#endif
82918 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
82919 {
82920 int error;
82921+ bool locknext;
82922
82923 if (!(vma->vm_flags & VM_GROWSUP))
82924 return -EFAULT;
82925
82926+ /* Also guard against wrapping around to address 0. */
82927+ if (address < PAGE_ALIGN(address+1))
82928+ address = PAGE_ALIGN(address+1);
82929+ else
82930+ return -ENOMEM;
82931+
82932 /*
82933 * We must make sure the anon_vma is allocated
82934 * so that the anon_vma locking is not a noop.
82935 */
82936 if (unlikely(anon_vma_prepare(vma)))
82937 return -ENOMEM;
82938+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
82939+ if (locknext && anon_vma_prepare(vma->vm_next))
82940+ return -ENOMEM;
82941 vma_lock_anon_vma(vma);
82942+ if (locknext)
82943+ vma_lock_anon_vma(vma->vm_next);
82944
82945 /*
82946 * vma->vm_start/vm_end cannot change under us because the caller
82947 * is required to hold the mmap_sem in read mode. We need the
82948- * anon_vma lock to serialize against concurrent expand_stacks.
82949- * Also guard against wrapping around to address 0.
82950+ * anon_vma locks to serialize against concurrent expand_stacks
82951+ * and expand_upwards.
82952 */
82953- if (address < PAGE_ALIGN(address+4))
82954- address = PAGE_ALIGN(address+4);
82955- else {
82956- vma_unlock_anon_vma(vma);
82957- return -ENOMEM;
82958- }
82959 error = 0;
82960
82961 /* Somebody else might have raced and expanded it already */
82962- if (address > vma->vm_end) {
82963+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
82964+ error = -ENOMEM;
82965+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
82966 unsigned long size, grow;
82967
82968 size = address - vma->vm_start;
82969@@ -2121,6 +2408,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
82970 }
82971 }
82972 }
82973+ if (locknext)
82974+ vma_unlock_anon_vma(vma->vm_next);
82975 vma_unlock_anon_vma(vma);
82976 khugepaged_enter_vma_merge(vma);
82977 validate_mm(vma->vm_mm);
82978@@ -2135,6 +2424,8 @@ int expand_downwards(struct vm_area_struct *vma,
82979 unsigned long address)
82980 {
82981 int error;
82982+ bool lockprev = false;
82983+ struct vm_area_struct *prev;
82984
82985 /*
82986 * We must make sure the anon_vma is allocated
82987@@ -2148,6 +2439,15 @@ int expand_downwards(struct vm_area_struct *vma,
82988 if (error)
82989 return error;
82990
82991+ prev = vma->vm_prev;
82992+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
82993+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
82994+#endif
82995+ if (lockprev && anon_vma_prepare(prev))
82996+ return -ENOMEM;
82997+ if (lockprev)
82998+ vma_lock_anon_vma(prev);
82999+
83000 vma_lock_anon_vma(vma);
83001
83002 /*
83003@@ -2157,9 +2457,17 @@ int expand_downwards(struct vm_area_struct *vma,
83004 */
83005
83006 /* Somebody else might have raced and expanded it already */
83007- if (address < vma->vm_start) {
83008+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
83009+ error = -ENOMEM;
83010+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
83011 unsigned long size, grow;
83012
83013+#ifdef CONFIG_PAX_SEGMEXEC
83014+ struct vm_area_struct *vma_m;
83015+
83016+ vma_m = pax_find_mirror_vma(vma);
83017+#endif
83018+
83019 size = vma->vm_end - address;
83020 grow = (vma->vm_start - address) >> PAGE_SHIFT;
83021
83022@@ -2184,13 +2492,27 @@ int expand_downwards(struct vm_area_struct *vma,
83023 vma->vm_pgoff -= grow;
83024 anon_vma_interval_tree_post_update_vma(vma);
83025 vma_gap_update(vma);
83026+
83027+#ifdef CONFIG_PAX_SEGMEXEC
83028+ if (vma_m) {
83029+ anon_vma_interval_tree_pre_update_vma(vma_m);
83030+ vma_m->vm_start -= grow << PAGE_SHIFT;
83031+ vma_m->vm_pgoff -= grow;
83032+ anon_vma_interval_tree_post_update_vma(vma_m);
83033+ vma_gap_update(vma_m);
83034+ }
83035+#endif
83036+
83037 spin_unlock(&vma->vm_mm->page_table_lock);
83038
83039+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
83040 perf_event_mmap(vma);
83041 }
83042 }
83043 }
83044 vma_unlock_anon_vma(vma);
83045+ if (lockprev)
83046+ vma_unlock_anon_vma(prev);
83047 khugepaged_enter_vma_merge(vma);
83048 validate_mm(vma->vm_mm);
83049 return error;
83050@@ -2288,6 +2610,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
83051 do {
83052 long nrpages = vma_pages(vma);
83053
83054+#ifdef CONFIG_PAX_SEGMEXEC
83055+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
83056+ vma = remove_vma(vma);
83057+ continue;
83058+ }
83059+#endif
83060+
83061 if (vma->vm_flags & VM_ACCOUNT)
83062 nr_accounted += nrpages;
83063 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
83064@@ -2333,6 +2662,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
83065 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
83066 vma->vm_prev = NULL;
83067 do {
83068+
83069+#ifdef CONFIG_PAX_SEGMEXEC
83070+ if (vma->vm_mirror) {
83071+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
83072+ vma->vm_mirror->vm_mirror = NULL;
83073+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
83074+ vma->vm_mirror = NULL;
83075+ }
83076+#endif
83077+
83078 vma_rb_erase(vma, &mm->mm_rb);
83079 mm->map_count--;
83080 tail_vma = vma;
83081@@ -2364,14 +2703,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
83082 struct vm_area_struct *new;
83083 int err = -ENOMEM;
83084
83085+#ifdef CONFIG_PAX_SEGMEXEC
83086+ struct vm_area_struct *vma_m, *new_m = NULL;
83087+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
83088+#endif
83089+
83090 if (is_vm_hugetlb_page(vma) && (addr &
83091 ~(huge_page_mask(hstate_vma(vma)))))
83092 return -EINVAL;
83093
83094+#ifdef CONFIG_PAX_SEGMEXEC
83095+ vma_m = pax_find_mirror_vma(vma);
83096+#endif
83097+
83098 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
83099 if (!new)
83100 goto out_err;
83101
83102+#ifdef CONFIG_PAX_SEGMEXEC
83103+ if (vma_m) {
83104+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
83105+ if (!new_m) {
83106+ kmem_cache_free(vm_area_cachep, new);
83107+ goto out_err;
83108+ }
83109+ }
83110+#endif
83111+
83112 /* most fields are the same, copy all, and then fixup */
83113 *new = *vma;
83114
83115@@ -2384,6 +2742,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
83116 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
83117 }
83118
83119+#ifdef CONFIG_PAX_SEGMEXEC
83120+ if (vma_m) {
83121+ *new_m = *vma_m;
83122+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
83123+ new_m->vm_mirror = new;
83124+ new->vm_mirror = new_m;
83125+
83126+ if (new_below)
83127+ new_m->vm_end = addr_m;
83128+ else {
83129+ new_m->vm_start = addr_m;
83130+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
83131+ }
83132+ }
83133+#endif
83134+
83135 pol = mpol_dup(vma_policy(vma));
83136 if (IS_ERR(pol)) {
83137 err = PTR_ERR(pol);
83138@@ -2406,6 +2780,36 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
83139 else
83140 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
83141
83142+#ifdef CONFIG_PAX_SEGMEXEC
83143+ if (!err && vma_m) {
83144+ if (anon_vma_clone(new_m, vma_m))
83145+ goto out_free_mpol;
83146+
83147+ mpol_get(pol);
83148+ vma_set_policy(new_m, pol);
83149+
83150+ if (new_m->vm_file)
83151+ get_file(new_m->vm_file);
83152+
83153+ if (new_m->vm_ops && new_m->vm_ops->open)
83154+ new_m->vm_ops->open(new_m);
83155+
83156+ if (new_below)
83157+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
83158+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
83159+ else
83160+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
83161+
83162+ if (err) {
83163+ if (new_m->vm_ops && new_m->vm_ops->close)
83164+ new_m->vm_ops->close(new_m);
83165+ if (new_m->vm_file)
83166+ fput(new_m->vm_file);
83167+ mpol_put(pol);
83168+ }
83169+ }
83170+#endif
83171+
83172 /* Success. */
83173 if (!err)
83174 return 0;
83175@@ -2415,10 +2819,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
83176 new->vm_ops->close(new);
83177 if (new->vm_file)
83178 fput(new->vm_file);
83179- unlink_anon_vmas(new);
83180 out_free_mpol:
83181 mpol_put(pol);
83182 out_free_vma:
83183+
83184+#ifdef CONFIG_PAX_SEGMEXEC
83185+ if (new_m) {
83186+ unlink_anon_vmas(new_m);
83187+ kmem_cache_free(vm_area_cachep, new_m);
83188+ }
83189+#endif
83190+
83191+ unlink_anon_vmas(new);
83192 kmem_cache_free(vm_area_cachep, new);
83193 out_err:
83194 return err;
83195@@ -2431,6 +2843,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
83196 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
83197 unsigned long addr, int new_below)
83198 {
83199+
83200+#ifdef CONFIG_PAX_SEGMEXEC
83201+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
83202+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
83203+ if (mm->map_count >= sysctl_max_map_count-1)
83204+ return -ENOMEM;
83205+ } else
83206+#endif
83207+
83208 if (mm->map_count >= sysctl_max_map_count)
83209 return -ENOMEM;
83210
83211@@ -2442,11 +2863,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
83212 * work. This now handles partial unmappings.
83213 * Jeremy Fitzhardinge <jeremy@goop.org>
83214 */
83215+#ifdef CONFIG_PAX_SEGMEXEC
83216 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
83217 {
83218+ int ret = __do_munmap(mm, start, len);
83219+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
83220+ return ret;
83221+
83222+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
83223+}
83224+
83225+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
83226+#else
83227+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
83228+#endif
83229+{
83230 unsigned long end;
83231 struct vm_area_struct *vma, *prev, *last;
83232
83233+ /*
83234+ * mm->mmap_sem is required to protect against another thread
83235+ * changing the mappings in case we sleep.
83236+ */
83237+ verify_mm_writelocked(mm);
83238+
83239 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
83240 return -EINVAL;
83241
83242@@ -2521,6 +2961,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
83243 /* Fix up all other VM information */
83244 remove_vma_list(mm, vma);
83245
83246+ track_exec_limit(mm, start, end, 0UL);
83247+
83248 return 0;
83249 }
83250
83251@@ -2529,6 +2971,13 @@ int vm_munmap(unsigned long start, size_t len)
83252 int ret;
83253 struct mm_struct *mm = current->mm;
83254
83255+
83256+#ifdef CONFIG_PAX_SEGMEXEC
83257+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
83258+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
83259+ return -EINVAL;
83260+#endif
83261+
83262 down_write(&mm->mmap_sem);
83263 ret = do_munmap(mm, start, len);
83264 up_write(&mm->mmap_sem);
83265@@ -2542,16 +2991,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
83266 return vm_munmap(addr, len);
83267 }
83268
83269-static inline void verify_mm_writelocked(struct mm_struct *mm)
83270-{
83271-#ifdef CONFIG_DEBUG_VM
83272- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
83273- WARN_ON(1);
83274- up_read(&mm->mmap_sem);
83275- }
83276-#endif
83277-}
83278-
83279 /*
83280 * this is really a simplified "do_mmap". it only handles
83281 * anonymous maps. eventually we may be able to do some
83282@@ -2565,6 +3004,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
83283 struct rb_node ** rb_link, * rb_parent;
83284 pgoff_t pgoff = addr >> PAGE_SHIFT;
83285 int error;
83286+ unsigned long charged;
83287
83288 len = PAGE_ALIGN(len);
83289 if (!len)
83290@@ -2572,16 +3012,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
83291
83292 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
83293
83294+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
83295+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
83296+ flags &= ~VM_EXEC;
83297+
83298+#ifdef CONFIG_PAX_MPROTECT
83299+ if (mm->pax_flags & MF_PAX_MPROTECT)
83300+ flags &= ~VM_MAYEXEC;
83301+#endif
83302+
83303+ }
83304+#endif
83305+
83306 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
83307 if (error & ~PAGE_MASK)
83308 return error;
83309
83310+ charged = len >> PAGE_SHIFT;
83311+
83312 /*
83313 * mlock MCL_FUTURE?
83314 */
83315 if (mm->def_flags & VM_LOCKED) {
83316 unsigned long locked, lock_limit;
83317- locked = len >> PAGE_SHIFT;
83318+ locked = charged;
83319 locked += mm->locked_vm;
83320 lock_limit = rlimit(RLIMIT_MEMLOCK);
83321 lock_limit >>= PAGE_SHIFT;
83322@@ -2598,21 +3052,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
83323 /*
83324 * Clear old maps. this also does some error checking for us
83325 */
83326- munmap_back:
83327 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
83328 if (do_munmap(mm, addr, len))
83329 return -ENOMEM;
83330- goto munmap_back;
83331+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
83332 }
83333
83334 /* Check against address space limits *after* clearing old maps... */
83335- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
83336+ if (!may_expand_vm(mm, charged))
83337 return -ENOMEM;
83338
83339 if (mm->map_count > sysctl_max_map_count)
83340 return -ENOMEM;
83341
83342- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
83343+ if (security_vm_enough_memory_mm(mm, charged))
83344 return -ENOMEM;
83345
83346 /* Can we just expand an old private anonymous mapping? */
83347@@ -2626,7 +3079,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
83348 */
83349 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
83350 if (!vma) {
83351- vm_unacct_memory(len >> PAGE_SHIFT);
83352+ vm_unacct_memory(charged);
83353 return -ENOMEM;
83354 }
83355
83356@@ -2640,9 +3093,10 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
83357 vma_link(mm, vma, prev, rb_link, rb_parent);
83358 out:
83359 perf_event_mmap(vma);
83360- mm->total_vm += len >> PAGE_SHIFT;
83361+ mm->total_vm += charged;
83362 if (flags & VM_LOCKED)
83363- mm->locked_vm += (len >> PAGE_SHIFT);
83364+ mm->locked_vm += charged;
83365+ track_exec_limit(mm, addr, addr + len, flags);
83366 return addr;
83367 }
83368
83369@@ -2704,6 +3158,7 @@ void exit_mmap(struct mm_struct *mm)
83370 while (vma) {
83371 if (vma->vm_flags & VM_ACCOUNT)
83372 nr_accounted += vma_pages(vma);
83373+ vma->vm_mirror = NULL;
83374 vma = remove_vma(vma);
83375 }
83376 vm_unacct_memory(nr_accounted);
83377@@ -2720,6 +3175,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
83378 struct vm_area_struct *prev;
83379 struct rb_node **rb_link, *rb_parent;
83380
83381+#ifdef CONFIG_PAX_SEGMEXEC
83382+ struct vm_area_struct *vma_m = NULL;
83383+#endif
83384+
83385+ if (security_mmap_addr(vma->vm_start))
83386+ return -EPERM;
83387+
83388 /*
83389 * The vm_pgoff of a purely anonymous vma should be irrelevant
83390 * until its first write fault, when page's anon_vma and index
83391@@ -2743,7 +3205,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
83392 security_vm_enough_memory_mm(mm, vma_pages(vma)))
83393 return -ENOMEM;
83394
83395+#ifdef CONFIG_PAX_SEGMEXEC
83396+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
83397+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
83398+ if (!vma_m)
83399+ return -ENOMEM;
83400+ }
83401+#endif
83402+
83403 vma_link(mm, vma, prev, rb_link, rb_parent);
83404+
83405+#ifdef CONFIG_PAX_SEGMEXEC
83406+ if (vma_m)
83407+ BUG_ON(pax_mirror_vma(vma_m, vma));
83408+#endif
83409+
83410 return 0;
83411 }
83412
83413@@ -2763,6 +3239,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
83414 struct mempolicy *pol;
83415 bool faulted_in_anon_vma = true;
83416
83417+ BUG_ON(vma->vm_mirror);
83418+
83419 /*
83420 * If anonymous vma has not yet been faulted, update new pgoff
83421 * to match new location, to increase its chance of merging.
83422@@ -2829,6 +3307,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
83423 return NULL;
83424 }
83425
83426+#ifdef CONFIG_PAX_SEGMEXEC
83427+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
83428+{
83429+ struct vm_area_struct *prev_m;
83430+ struct rb_node **rb_link_m, *rb_parent_m;
83431+ struct mempolicy *pol_m;
83432+
83433+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
83434+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
83435+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
83436+ *vma_m = *vma;
83437+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
83438+ if (anon_vma_clone(vma_m, vma))
83439+ return -ENOMEM;
83440+ pol_m = vma_policy(vma_m);
83441+ mpol_get(pol_m);
83442+ vma_set_policy(vma_m, pol_m);
83443+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
83444+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
83445+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
83446+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
83447+ if (vma_m->vm_file)
83448+ get_file(vma_m->vm_file);
83449+ if (vma_m->vm_ops && vma_m->vm_ops->open)
83450+ vma_m->vm_ops->open(vma_m);
83451+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
83452+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
83453+ vma_m->vm_mirror = vma;
83454+ vma->vm_mirror = vma_m;
83455+ return 0;
83456+}
83457+#endif
83458+
83459 /*
83460 * Return true if the calling process may expand its vm space by the passed
83461 * number of pages
83462@@ -2840,6 +3351,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
83463
83464 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
83465
83466+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
83467 if (cur + npages > lim)
83468 return 0;
83469 return 1;
83470@@ -2910,6 +3422,22 @@ int install_special_mapping(struct mm_struct *mm,
83471 vma->vm_start = addr;
83472 vma->vm_end = addr + len;
83473
83474+#ifdef CONFIG_PAX_MPROTECT
83475+ if (mm->pax_flags & MF_PAX_MPROTECT) {
83476+#ifndef CONFIG_PAX_MPROTECT_COMPAT
83477+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
83478+ return -EPERM;
83479+ if (!(vm_flags & VM_EXEC))
83480+ vm_flags &= ~VM_MAYEXEC;
83481+#else
83482+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
83483+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
83484+#endif
83485+ else
83486+ vm_flags &= ~VM_MAYWRITE;
83487+ }
83488+#endif
83489+
83490 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
83491 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
83492
83493diff --git a/mm/mprotect.c b/mm/mprotect.c
83494index 94722a4..07d9926 100644
83495--- a/mm/mprotect.c
83496+++ b/mm/mprotect.c
83497@@ -23,10 +23,18 @@
83498 #include <linux/mmu_notifier.h>
83499 #include <linux/migrate.h>
83500 #include <linux/perf_event.h>
83501+#include <linux/sched/sysctl.h>
83502+
83503+#ifdef CONFIG_PAX_MPROTECT
83504+#include <linux/elf.h>
83505+#include <linux/binfmts.h>
83506+#endif
83507+
83508 #include <asm/uaccess.h>
83509 #include <asm/pgtable.h>
83510 #include <asm/cacheflush.h>
83511 #include <asm/tlbflush.h>
83512+#include <asm/mmu_context.h>
83513
83514 #ifndef pgprot_modify
83515 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
83516@@ -233,6 +241,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
83517 return pages;
83518 }
83519
83520+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
83521+/* called while holding the mmap semaphor for writing except stack expansion */
83522+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
83523+{
83524+ unsigned long oldlimit, newlimit = 0UL;
83525+
83526+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
83527+ return;
83528+
83529+ spin_lock(&mm->page_table_lock);
83530+ oldlimit = mm->context.user_cs_limit;
83531+ if ((prot & VM_EXEC) && oldlimit < end)
83532+ /* USER_CS limit moved up */
83533+ newlimit = end;
83534+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
83535+ /* USER_CS limit moved down */
83536+ newlimit = start;
83537+
83538+ if (newlimit) {
83539+ mm->context.user_cs_limit = newlimit;
83540+
83541+#ifdef CONFIG_SMP
83542+ wmb();
83543+ cpus_clear(mm->context.cpu_user_cs_mask);
83544+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
83545+#endif
83546+
83547+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
83548+ }
83549+ spin_unlock(&mm->page_table_lock);
83550+ if (newlimit == end) {
83551+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
83552+
83553+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
83554+ if (is_vm_hugetlb_page(vma))
83555+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
83556+ else
83557+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
83558+ }
83559+}
83560+#endif
83561+
83562 int
83563 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
83564 unsigned long start, unsigned long end, unsigned long newflags)
83565@@ -245,11 +295,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
83566 int error;
83567 int dirty_accountable = 0;
83568
83569+#ifdef CONFIG_PAX_SEGMEXEC
83570+ struct vm_area_struct *vma_m = NULL;
83571+ unsigned long start_m, end_m;
83572+
83573+ start_m = start + SEGMEXEC_TASK_SIZE;
83574+ end_m = end + SEGMEXEC_TASK_SIZE;
83575+#endif
83576+
83577 if (newflags == oldflags) {
83578 *pprev = vma;
83579 return 0;
83580 }
83581
83582+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
83583+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
83584+
83585+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
83586+ return -ENOMEM;
83587+
83588+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
83589+ return -ENOMEM;
83590+ }
83591+
83592 /*
83593 * If we make a private mapping writable we increase our commit;
83594 * but (without finer accounting) cannot reduce our commit if we
83595@@ -266,6 +334,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
83596 }
83597 }
83598
83599+#ifdef CONFIG_PAX_SEGMEXEC
83600+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
83601+ if (start != vma->vm_start) {
83602+ error = split_vma(mm, vma, start, 1);
83603+ if (error)
83604+ goto fail;
83605+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
83606+ *pprev = (*pprev)->vm_next;
83607+ }
83608+
83609+ if (end != vma->vm_end) {
83610+ error = split_vma(mm, vma, end, 0);
83611+ if (error)
83612+ goto fail;
83613+ }
83614+
83615+ if (pax_find_mirror_vma(vma)) {
83616+ error = __do_munmap(mm, start_m, end_m - start_m);
83617+ if (error)
83618+ goto fail;
83619+ } else {
83620+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
83621+ if (!vma_m) {
83622+ error = -ENOMEM;
83623+ goto fail;
83624+ }
83625+ vma->vm_flags = newflags;
83626+ error = pax_mirror_vma(vma_m, vma);
83627+ if (error) {
83628+ vma->vm_flags = oldflags;
83629+ goto fail;
83630+ }
83631+ }
83632+ }
83633+#endif
83634+
83635 /*
83636 * First try to merge with previous and/or next vma.
83637 */
83638@@ -296,9 +400,21 @@ success:
83639 * vm_flags and vm_page_prot are protected by the mmap_sem
83640 * held in write mode.
83641 */
83642+
83643+#ifdef CONFIG_PAX_SEGMEXEC
83644+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
83645+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
83646+#endif
83647+
83648 vma->vm_flags = newflags;
83649+
83650+#ifdef CONFIG_PAX_MPROTECT
83651+ if (mm->binfmt && mm->binfmt->handle_mprotect)
83652+ mm->binfmt->handle_mprotect(vma, newflags);
83653+#endif
83654+
83655 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
83656- vm_get_page_prot(newflags));
83657+ vm_get_page_prot(vma->vm_flags));
83658
83659 if (vma_wants_writenotify(vma)) {
83660 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
83661@@ -337,6 +453,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
83662 end = start + len;
83663 if (end <= start)
83664 return -ENOMEM;
83665+
83666+#ifdef CONFIG_PAX_SEGMEXEC
83667+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
83668+ if (end > SEGMEXEC_TASK_SIZE)
83669+ return -EINVAL;
83670+ } else
83671+#endif
83672+
83673+ if (end > TASK_SIZE)
83674+ return -EINVAL;
83675+
83676 if (!arch_validate_prot(prot))
83677 return -EINVAL;
83678
83679@@ -344,7 +471,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
83680 /*
83681 * Does the application expect PROT_READ to imply PROT_EXEC:
83682 */
83683- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
83684+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
83685 prot |= PROT_EXEC;
83686
83687 vm_flags = calc_vm_prot_bits(prot);
83688@@ -376,6 +503,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
83689 if (start > vma->vm_start)
83690 prev = vma;
83691
83692+#ifdef CONFIG_PAX_MPROTECT
83693+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
83694+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
83695+#endif
83696+
83697 for (nstart = start ; ; ) {
83698 unsigned long newflags;
83699
83700@@ -386,6 +518,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
83701
83702 /* newflags >> 4 shift VM_MAY% in place of VM_% */
83703 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
83704+ if (prot & (PROT_WRITE | PROT_EXEC))
83705+ gr_log_rwxmprotect(vma->vm_file);
83706+
83707+ error = -EACCES;
83708+ goto out;
83709+ }
83710+
83711+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
83712 error = -EACCES;
83713 goto out;
83714 }
83715@@ -400,6 +540,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
83716 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
83717 if (error)
83718 goto out;
83719+
83720+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
83721+
83722 nstart = tmp;
83723
83724 if (nstart < prev->vm_end)
83725diff --git a/mm/mremap.c b/mm/mremap.c
83726index 463a257..c0c7a92 100644
83727--- a/mm/mremap.c
83728+++ b/mm/mremap.c
83729@@ -126,6 +126,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
83730 continue;
83731 pte = ptep_get_and_clear(mm, old_addr, old_pte);
83732 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
83733+
83734+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
83735+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
83736+ pte = pte_exprotect(pte);
83737+#endif
83738+
83739 set_pte_at(mm, new_addr, new_pte, pte);
83740 }
83741
83742@@ -318,6 +324,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
83743 if (is_vm_hugetlb_page(vma))
83744 goto Einval;
83745
83746+#ifdef CONFIG_PAX_SEGMEXEC
83747+ if (pax_find_mirror_vma(vma))
83748+ goto Einval;
83749+#endif
83750+
83751 /* We can't remap across vm area boundaries */
83752 if (old_len > vma->vm_end - addr)
83753 goto Efault;
83754@@ -373,20 +384,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
83755 unsigned long ret = -EINVAL;
83756 unsigned long charged = 0;
83757 unsigned long map_flags;
83758+ unsigned long pax_task_size = TASK_SIZE;
83759
83760 if (new_addr & ~PAGE_MASK)
83761 goto out;
83762
83763- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
83764+#ifdef CONFIG_PAX_SEGMEXEC
83765+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
83766+ pax_task_size = SEGMEXEC_TASK_SIZE;
83767+#endif
83768+
83769+ pax_task_size -= PAGE_SIZE;
83770+
83771+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
83772 goto out;
83773
83774 /* Check if the location we're moving into overlaps the
83775 * old location at all, and fail if it does.
83776 */
83777- if ((new_addr <= addr) && (new_addr+new_len) > addr)
83778- goto out;
83779-
83780- if ((addr <= new_addr) && (addr+old_len) > new_addr)
83781+ if (addr + old_len > new_addr && new_addr + new_len > addr)
83782 goto out;
83783
83784 ret = do_munmap(mm, new_addr, new_len);
83785@@ -455,6 +471,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
83786 unsigned long ret = -EINVAL;
83787 unsigned long charged = 0;
83788 bool locked = false;
83789+ unsigned long pax_task_size = TASK_SIZE;
83790
83791 down_write(&current->mm->mmap_sem);
83792
83793@@ -475,6 +492,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
83794 if (!new_len)
83795 goto out;
83796
83797+#ifdef CONFIG_PAX_SEGMEXEC
83798+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
83799+ pax_task_size = SEGMEXEC_TASK_SIZE;
83800+#endif
83801+
83802+ pax_task_size -= PAGE_SIZE;
83803+
83804+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
83805+ old_len > pax_task_size || addr > pax_task_size-old_len)
83806+ goto out;
83807+
83808 if (flags & MREMAP_FIXED) {
83809 if (flags & MREMAP_MAYMOVE)
83810 ret = mremap_to(addr, old_len, new_addr, new_len,
83811@@ -524,6 +552,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
83812 new_addr = addr;
83813 }
83814 ret = addr;
83815+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
83816 goto out;
83817 }
83818 }
83819@@ -547,7 +576,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
83820 goto out;
83821 }
83822
83823+ map_flags = vma->vm_flags;
83824 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
83825+ if (!(ret & ~PAGE_MASK)) {
83826+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
83827+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
83828+ }
83829 }
83830 out:
83831 if (ret & ~PAGE_MASK)
83832diff --git a/mm/nommu.c b/mm/nommu.c
83833index e001768..9b52b30 100644
83834--- a/mm/nommu.c
83835+++ b/mm/nommu.c
83836@@ -63,7 +63,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
83837 int sysctl_overcommit_ratio = 50; /* default is 50% */
83838 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
83839 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
83840-int heap_stack_gap = 0;
83841
83842 atomic_long_t mmap_pages_allocated;
83843
83844@@ -841,15 +840,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
83845 EXPORT_SYMBOL(find_vma);
83846
83847 /*
83848- * find a VMA
83849- * - we don't extend stack VMAs under NOMMU conditions
83850- */
83851-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
83852-{
83853- return find_vma(mm, addr);
83854-}
83855-
83856-/*
83857 * expand a stack to a given address
83858 * - not supported under NOMMU conditions
83859 */
83860@@ -1560,6 +1550,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
83861
83862 /* most fields are the same, copy all, and then fixup */
83863 *new = *vma;
83864+ INIT_LIST_HEAD(&new->anon_vma_chain);
83865 *region = *vma->vm_region;
83866 new->vm_region = region;
83867
83868@@ -1992,8 +1983,8 @@ int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
83869 }
83870 EXPORT_SYMBOL(generic_file_remap_pages);
83871
83872-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
83873- unsigned long addr, void *buf, int len, int write)
83874+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
83875+ unsigned long addr, void *buf, size_t len, int write)
83876 {
83877 struct vm_area_struct *vma;
83878
83879@@ -2034,8 +2025,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
83880 *
83881 * The caller must hold a reference on @mm.
83882 */
83883-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
83884- void *buf, int len, int write)
83885+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
83886+ void *buf, size_t len, int write)
83887 {
83888 return __access_remote_vm(NULL, mm, addr, buf, len, write);
83889 }
83890@@ -2044,7 +2035,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
83891 * Access another process' address space.
83892 * - source/target buffer must be kernel space
83893 */
83894-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
83895+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
83896 {
83897 struct mm_struct *mm;
83898
83899diff --git a/mm/page-writeback.c b/mm/page-writeback.c
83900index efe6814..64b4701 100644
83901--- a/mm/page-writeback.c
83902+++ b/mm/page-writeback.c
83903@@ -659,7 +659,7 @@ unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
83904 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
83905 * - the bdi dirty thresh drops quickly due to change of JBOD workload
83906 */
83907-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
83908+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
83909 unsigned long thresh,
83910 unsigned long bg_thresh,
83911 unsigned long dirty,
83912@@ -1634,7 +1634,7 @@ ratelimit_handler(struct notifier_block *self, unsigned long action,
83913 }
83914 }
83915
83916-static struct notifier_block __cpuinitdata ratelimit_nb = {
83917+static struct notifier_block ratelimit_nb = {
83918 .notifier_call = ratelimit_handler,
83919 .next = NULL,
83920 };
83921diff --git a/mm/page_alloc.c b/mm/page_alloc.c
83922index 0d4fef2..8870335 100644
83923--- a/mm/page_alloc.c
83924+++ b/mm/page_alloc.c
83925@@ -59,6 +59,7 @@
83926 #include <linux/migrate.h>
83927 #include <linux/page-debug-flags.h>
83928 #include <linux/sched/rt.h>
83929+#include <linux/random.h>
83930
83931 #include <asm/tlbflush.h>
83932 #include <asm/div64.h>
83933@@ -344,7 +345,7 @@ out:
83934 * This usage means that zero-order pages may not be compound.
83935 */
83936
83937-static void free_compound_page(struct page *page)
83938+void free_compound_page(struct page *page)
83939 {
83940 __free_pages_ok(page, compound_order(page));
83941 }
83942@@ -701,6 +702,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
83943 int i;
83944 int bad = 0;
83945
83946+#ifdef CONFIG_PAX_MEMORY_SANITIZE
83947+ unsigned long index = 1UL << order;
83948+#endif
83949+
83950 trace_mm_page_free(page, order);
83951 kmemcheck_free_shadow(page, order);
83952
83953@@ -716,6 +721,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
83954 debug_check_no_obj_freed(page_address(page),
83955 PAGE_SIZE << order);
83956 }
83957+
83958+#ifdef CONFIG_PAX_MEMORY_SANITIZE
83959+ for (; index; --index)
83960+ sanitize_highpage(page + index - 1);
83961+#endif
83962+
83963 arch_free_page(page, order);
83964 kernel_map_pages(page, 1 << order, 0);
83965
83966@@ -738,6 +749,19 @@ static void __free_pages_ok(struct page *page, unsigned int order)
83967 local_irq_restore(flags);
83968 }
83969
83970+#ifdef CONFIG_PAX_LATENT_ENTROPY
83971+bool __meminitdata extra_latent_entropy;
83972+
83973+static int __init setup_pax_extra_latent_entropy(char *str)
83974+{
83975+ extra_latent_entropy = true;
83976+ return 0;
83977+}
83978+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
83979+
83980+volatile u64 latent_entropy;
83981+#endif
83982+
83983 /*
83984 * Read access to zone->managed_pages is safe because it's unsigned long,
83985 * but we still need to serialize writers. Currently all callers of
83986@@ -760,6 +784,19 @@ void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
83987 set_page_count(p, 0);
83988 }
83989
83990+#ifdef CONFIG_PAX_LATENT_ENTROPY
83991+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
83992+ u64 hash = 0;
83993+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
83994+ const u64 *data = lowmem_page_address(page);
83995+
83996+ for (index = 0; index < end; index++)
83997+ hash ^= hash + data[index];
83998+ latent_entropy ^= hash;
83999+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
84000+ }
84001+#endif
84002+
84003 page_zone(page)->managed_pages += 1 << order;
84004 set_page_refcounted(page);
84005 __free_pages(page, order);
84006@@ -869,8 +906,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
84007 arch_alloc_page(page, order);
84008 kernel_map_pages(page, 1 << order, 1);
84009
84010+#ifndef CONFIG_PAX_MEMORY_SANITIZE
84011 if (gfp_flags & __GFP_ZERO)
84012 prep_zero_page(page, order, gfp_flags);
84013+#endif
84014
84015 if (order && (gfp_flags & __GFP_COMP))
84016 prep_compound_page(page, order);
84017diff --git a/mm/page_io.c b/mm/page_io.c
84018index 6182870..4bba6a2 100644
84019--- a/mm/page_io.c
84020+++ b/mm/page_io.c
84021@@ -205,7 +205,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
84022 struct file *swap_file = sis->swap_file;
84023 struct address_space *mapping = swap_file->f_mapping;
84024 struct iovec iov = {
84025- .iov_base = kmap(page),
84026+ .iov_base = (void __force_user *)kmap(page),
84027 .iov_len = PAGE_SIZE,
84028 };
84029
84030diff --git a/mm/percpu.c b/mm/percpu.c
84031index 8c8e08f..73a5cda 100644
84032--- a/mm/percpu.c
84033+++ b/mm/percpu.c
84034@@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
84035 static unsigned int pcpu_high_unit_cpu __read_mostly;
84036
84037 /* the address of the first chunk which starts with the kernel static area */
84038-void *pcpu_base_addr __read_mostly;
84039+void *pcpu_base_addr __read_only;
84040 EXPORT_SYMBOL_GPL(pcpu_base_addr);
84041
84042 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
84043diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
84044index fd26d04..0cea1b0 100644
84045--- a/mm/process_vm_access.c
84046+++ b/mm/process_vm_access.c
84047@@ -13,6 +13,7 @@
84048 #include <linux/uio.h>
84049 #include <linux/sched.h>
84050 #include <linux/highmem.h>
84051+#include <linux/security.h>
84052 #include <linux/ptrace.h>
84053 #include <linux/slab.h>
84054 #include <linux/syscalls.h>
84055@@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
84056 size_t iov_l_curr_offset = 0;
84057 ssize_t iov_len;
84058
84059+ return -ENOSYS; // PaX: until properly audited
84060+
84061 /*
84062 * Work out how many pages of struct pages we're going to need
84063 * when eventually calling get_user_pages
84064 */
84065 for (i = 0; i < riovcnt; i++) {
84066 iov_len = rvec[i].iov_len;
84067- if (iov_len > 0) {
84068- nr_pages_iov = ((unsigned long)rvec[i].iov_base
84069- + iov_len)
84070- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
84071- / PAGE_SIZE + 1;
84072- nr_pages = max(nr_pages, nr_pages_iov);
84073- }
84074+ if (iov_len <= 0)
84075+ continue;
84076+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
84077+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
84078+ nr_pages = max(nr_pages, nr_pages_iov);
84079 }
84080
84081 if (nr_pages == 0)
84082@@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
84083 goto free_proc_pages;
84084 }
84085
84086+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
84087+ rc = -EPERM;
84088+ goto put_task_struct;
84089+ }
84090+
84091 mm = mm_access(task, PTRACE_MODE_ATTACH);
84092 if (!mm || IS_ERR(mm)) {
84093 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
84094diff --git a/mm/rmap.c b/mm/rmap.c
84095index 807c96b..0e05279 100644
84096--- a/mm/rmap.c
84097+++ b/mm/rmap.c
84098@@ -163,6 +163,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
84099 struct anon_vma *anon_vma = vma->anon_vma;
84100 struct anon_vma_chain *avc;
84101
84102+#ifdef CONFIG_PAX_SEGMEXEC
84103+ struct anon_vma_chain *avc_m = NULL;
84104+#endif
84105+
84106 might_sleep();
84107 if (unlikely(!anon_vma)) {
84108 struct mm_struct *mm = vma->vm_mm;
84109@@ -172,6 +176,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
84110 if (!avc)
84111 goto out_enomem;
84112
84113+#ifdef CONFIG_PAX_SEGMEXEC
84114+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
84115+ if (!avc_m)
84116+ goto out_enomem_free_avc;
84117+#endif
84118+
84119 anon_vma = find_mergeable_anon_vma(vma);
84120 allocated = NULL;
84121 if (!anon_vma) {
84122@@ -185,6 +195,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
84123 /* page_table_lock to protect against threads */
84124 spin_lock(&mm->page_table_lock);
84125 if (likely(!vma->anon_vma)) {
84126+
84127+#ifdef CONFIG_PAX_SEGMEXEC
84128+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
84129+
84130+ if (vma_m) {
84131+ BUG_ON(vma_m->anon_vma);
84132+ vma_m->anon_vma = anon_vma;
84133+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
84134+ avc_m = NULL;
84135+ }
84136+#endif
84137+
84138 vma->anon_vma = anon_vma;
84139 anon_vma_chain_link(vma, avc, anon_vma);
84140 allocated = NULL;
84141@@ -195,12 +217,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
84142
84143 if (unlikely(allocated))
84144 put_anon_vma(allocated);
84145+
84146+#ifdef CONFIG_PAX_SEGMEXEC
84147+ if (unlikely(avc_m))
84148+ anon_vma_chain_free(avc_m);
84149+#endif
84150+
84151 if (unlikely(avc))
84152 anon_vma_chain_free(avc);
84153 }
84154 return 0;
84155
84156 out_enomem_free_avc:
84157+
84158+#ifdef CONFIG_PAX_SEGMEXEC
84159+ if (avc_m)
84160+ anon_vma_chain_free(avc_m);
84161+#endif
84162+
84163 anon_vma_chain_free(avc);
84164 out_enomem:
84165 return -ENOMEM;
84166@@ -236,7 +270,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
84167 * Attach the anon_vmas from src to dst.
84168 * Returns 0 on success, -ENOMEM on failure.
84169 */
84170-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
84171+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
84172 {
84173 struct anon_vma_chain *avc, *pavc;
84174 struct anon_vma *root = NULL;
84175@@ -269,7 +303,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
84176 * the corresponding VMA in the parent process is attached to.
84177 * Returns 0 on success, non-zero on failure.
84178 */
84179-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
84180+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
84181 {
84182 struct anon_vma_chain *avc;
84183 struct anon_vma *anon_vma;
84184diff --git a/mm/shmem.c b/mm/shmem.c
84185index 1c44af7..cefe9a6 100644
84186--- a/mm/shmem.c
84187+++ b/mm/shmem.c
84188@@ -31,7 +31,7 @@
84189 #include <linux/export.h>
84190 #include <linux/swap.h>
84191
84192-static struct vfsmount *shm_mnt;
84193+struct vfsmount *shm_mnt;
84194
84195 #ifdef CONFIG_SHMEM
84196 /*
84197@@ -75,7 +75,7 @@ static struct vfsmount *shm_mnt;
84198 #define BOGO_DIRENT_SIZE 20
84199
84200 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
84201-#define SHORT_SYMLINK_LEN 128
84202+#define SHORT_SYMLINK_LEN 64
84203
84204 /*
84205 * shmem_fallocate and shmem_writepage communicate via inode->i_private
84206@@ -2201,6 +2201,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
84207 static int shmem_xattr_validate(const char *name)
84208 {
84209 struct { const char *prefix; size_t len; } arr[] = {
84210+
84211+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
84212+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
84213+#endif
84214+
84215 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
84216 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
84217 };
84218@@ -2256,6 +2261,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
84219 if (err)
84220 return err;
84221
84222+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
84223+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
84224+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
84225+ return -EOPNOTSUPP;
84226+ if (size > 8)
84227+ return -EINVAL;
84228+ }
84229+#endif
84230+
84231 return simple_xattr_set(&info->xattrs, name, value, size, flags);
84232 }
84233
84234@@ -2568,8 +2582,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
84235 int err = -ENOMEM;
84236
84237 /* Round up to L1_CACHE_BYTES to resist false sharing */
84238- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
84239- L1_CACHE_BYTES), GFP_KERNEL);
84240+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
84241 if (!sbinfo)
84242 return -ENOMEM;
84243
84244diff --git a/mm/slab.c b/mm/slab.c
84245index 856e4a1..fafb820 100644
84246--- a/mm/slab.c
84247+++ b/mm/slab.c
84248@@ -306,7 +306,7 @@ struct kmem_list3 {
84249 * Need this for bootstrapping a per node allocator.
84250 */
84251 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
84252-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
84253+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
84254 #define CACHE_CACHE 0
84255 #define SIZE_AC MAX_NUMNODES
84256 #define SIZE_L3 (2 * MAX_NUMNODES)
84257@@ -407,10 +407,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
84258 if ((x)->max_freeable < i) \
84259 (x)->max_freeable = i; \
84260 } while (0)
84261-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
84262-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
84263-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
84264-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
84265+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
84266+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
84267+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
84268+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
84269 #else
84270 #define STATS_INC_ACTIVE(x) do { } while (0)
84271 #define STATS_DEC_ACTIVE(x) do { } while (0)
84272@@ -518,7 +518,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
84273 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
84274 */
84275 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
84276- const struct slab *slab, void *obj)
84277+ const struct slab *slab, const void *obj)
84278 {
84279 u32 offset = (obj - slab->s_mem);
84280 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
84281@@ -539,12 +539,13 @@ EXPORT_SYMBOL(malloc_sizes);
84282 struct cache_names {
84283 char *name;
84284 char *name_dma;
84285+ char *name_usercopy;
84286 };
84287
84288 static struct cache_names __initdata cache_names[] = {
84289-#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
84290+#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)", .name_usercopy = "size-" #x "(USERCOPY)" },
84291 #include <linux/kmalloc_sizes.h>
84292- {NULL,}
84293+ {NULL}
84294 #undef CACHE
84295 };
84296
84297@@ -729,6 +730,12 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
84298 if (unlikely(gfpflags & GFP_DMA))
84299 return csizep->cs_dmacachep;
84300 #endif
84301+
84302+#ifdef CONFIG_PAX_USERCOPY_SLABS
84303+ if (unlikely(gfpflags & GFP_USERCOPY))
84304+ return csizep->cs_usercopycachep;
84305+#endif
84306+
84307 return csizep->cs_cachep;
84308 }
84309
84310@@ -1482,7 +1489,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
84311 return notifier_from_errno(err);
84312 }
84313
84314-static struct notifier_block __cpuinitdata cpucache_notifier = {
84315+static struct notifier_block cpucache_notifier = {
84316 &cpuup_callback, NULL, 0
84317 };
84318
84319@@ -1667,12 +1674,12 @@ void __init kmem_cache_init(void)
84320 */
84321
84322 sizes[INDEX_AC].cs_cachep = create_kmalloc_cache(names[INDEX_AC].name,
84323- sizes[INDEX_AC].cs_size, ARCH_KMALLOC_FLAGS);
84324+ sizes[INDEX_AC].cs_size, ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
84325
84326 if (INDEX_AC != INDEX_L3)
84327 sizes[INDEX_L3].cs_cachep =
84328 create_kmalloc_cache(names[INDEX_L3].name,
84329- sizes[INDEX_L3].cs_size, ARCH_KMALLOC_FLAGS);
84330+ sizes[INDEX_L3].cs_size, ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
84331
84332 slab_early_init = 0;
84333
84334@@ -1686,13 +1693,20 @@ void __init kmem_cache_init(void)
84335 */
84336 if (!sizes->cs_cachep)
84337 sizes->cs_cachep = create_kmalloc_cache(names->name,
84338- sizes->cs_size, ARCH_KMALLOC_FLAGS);
84339+ sizes->cs_size, ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
84340
84341 #ifdef CONFIG_ZONE_DMA
84342 sizes->cs_dmacachep = create_kmalloc_cache(
84343 names->name_dma, sizes->cs_size,
84344 SLAB_CACHE_DMA|ARCH_KMALLOC_FLAGS);
84345 #endif
84346+
84347+#ifdef CONFIG_PAX_USERCOPY_SLABS
84348+ sizes->cs_usercopycachep = create_kmalloc_cache(
84349+ names->name_usercopy, sizes->cs_size,
84350+ ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
84351+#endif
84352+
84353 sizes++;
84354 names++;
84355 }
84356@@ -3924,6 +3938,7 @@ void kfree(const void *objp)
84357
84358 if (unlikely(ZERO_OR_NULL_PTR(objp)))
84359 return;
84360+ VM_BUG_ON(!virt_addr_valid(objp));
84361 local_irq_save(flags);
84362 kfree_debugcheck(objp);
84363 c = virt_to_cache(objp);
84364@@ -4365,10 +4380,10 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
84365 }
84366 /* cpu stats */
84367 {
84368- unsigned long allochit = atomic_read(&cachep->allochit);
84369- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
84370- unsigned long freehit = atomic_read(&cachep->freehit);
84371- unsigned long freemiss = atomic_read(&cachep->freemiss);
84372+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
84373+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
84374+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
84375+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
84376
84377 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
84378 allochit, allocmiss, freehit, freemiss);
84379@@ -4600,13 +4615,71 @@ static const struct file_operations proc_slabstats_operations = {
84380 static int __init slab_proc_init(void)
84381 {
84382 #ifdef CONFIG_DEBUG_SLAB_LEAK
84383- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
84384+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
84385 #endif
84386 return 0;
84387 }
84388 module_init(slab_proc_init);
84389 #endif
84390
84391+bool is_usercopy_object(const void *ptr)
84392+{
84393+ struct page *page;
84394+ struct kmem_cache *cachep;
84395+
84396+ if (ZERO_OR_NULL_PTR(ptr))
84397+ return false;
84398+
84399+ if (!slab_is_available())
84400+ return false;
84401+
84402+ if (!virt_addr_valid(ptr))
84403+ return false;
84404+
84405+ page = virt_to_head_page(ptr);
84406+
84407+ if (!PageSlab(page))
84408+ return false;
84409+
84410+ cachep = page->slab_cache;
84411+ return cachep->flags & SLAB_USERCOPY;
84412+}
84413+
84414+#ifdef CONFIG_PAX_USERCOPY
84415+const char *check_heap_object(const void *ptr, unsigned long n)
84416+{
84417+ struct page *page;
84418+ struct kmem_cache *cachep;
84419+ struct slab *slabp;
84420+ unsigned int objnr;
84421+ unsigned long offset;
84422+
84423+ if (ZERO_OR_NULL_PTR(ptr))
84424+ return "<null>";
84425+
84426+ if (!virt_addr_valid(ptr))
84427+ return NULL;
84428+
84429+ page = virt_to_head_page(ptr);
84430+
84431+ if (!PageSlab(page))
84432+ return NULL;
84433+
84434+ cachep = page->slab_cache;
84435+ if (!(cachep->flags & SLAB_USERCOPY))
84436+ return cachep->name;
84437+
84438+ slabp = page->slab_page;
84439+ objnr = obj_to_index(cachep, slabp, ptr);
84440+ BUG_ON(objnr >= cachep->num);
84441+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
84442+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
84443+ return NULL;
84444+
84445+ return cachep->name;
84446+}
84447+#endif
84448+
84449 /**
84450 * ksize - get the actual amount of memory allocated for a given object
84451 * @objp: Pointer to the object
84452diff --git a/mm/slab.h b/mm/slab.h
84453index 34a98d6..73633d1 100644
84454--- a/mm/slab.h
84455+++ b/mm/slab.h
84456@@ -58,7 +58,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
84457
84458 /* Legal flag mask for kmem_cache_create(), for various configurations */
84459 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
84460- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
84461+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | SLAB_USERCOPY)
84462
84463 #if defined(CONFIG_DEBUG_SLAB)
84464 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
84465@@ -220,6 +220,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
84466 return s;
84467
84468 page = virt_to_head_page(x);
84469+
84470+ BUG_ON(!PageSlab(page));
84471+
84472 cachep = page->slab_cache;
84473 if (slab_equal_or_root(cachep, s))
84474 return cachep;
84475diff --git a/mm/slab_common.c b/mm/slab_common.c
84476index 3f3cd97..93b0236 100644
84477--- a/mm/slab_common.c
84478+++ b/mm/slab_common.c
84479@@ -22,7 +22,7 @@
84480
84481 #include "slab.h"
84482
84483-enum slab_state slab_state;
84484+enum slab_state slab_state __read_only;
84485 LIST_HEAD(slab_caches);
84486 DEFINE_MUTEX(slab_mutex);
84487 struct kmem_cache *kmem_cache;
84488@@ -209,7 +209,7 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
84489
84490 err = __kmem_cache_create(s, flags);
84491 if (!err) {
84492- s->refcount = 1;
84493+ atomic_set(&s->refcount, 1);
84494 list_add(&s->list, &slab_caches);
84495 memcg_cache_list_add(memcg, s);
84496 } else {
84497@@ -255,8 +255,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
84498
84499 get_online_cpus();
84500 mutex_lock(&slab_mutex);
84501- s->refcount--;
84502- if (!s->refcount) {
84503+ if (atomic_dec_and_test(&s->refcount)) {
84504 list_del(&s->list);
84505
84506 if (!__kmem_cache_shutdown(s)) {
84507@@ -302,7 +301,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
84508 panic("Creation of kmalloc slab %s size=%zd failed. Reason %d\n",
84509 name, size, err);
84510
84511- s->refcount = -1; /* Exempt from merging for now */
84512+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
84513 }
84514
84515 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
84516@@ -315,7 +314,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
84517
84518 create_boot_cache(s, name, size, flags);
84519 list_add(&s->list, &slab_caches);
84520- s->refcount = 1;
84521+ atomic_set(&s->refcount, 1);
84522 return s;
84523 }
84524
84525diff --git a/mm/slob.c b/mm/slob.c
84526index eeed4a0..6ee34ec 100644
84527--- a/mm/slob.c
84528+++ b/mm/slob.c
84529@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
84530 /*
84531 * Return the size of a slob block.
84532 */
84533-static slobidx_t slob_units(slob_t *s)
84534+static slobidx_t slob_units(const slob_t *s)
84535 {
84536 if (s->units > 0)
84537 return s->units;
84538@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
84539 /*
84540 * Return the next free slob block pointer after this one.
84541 */
84542-static slob_t *slob_next(slob_t *s)
84543+static slob_t *slob_next(const slob_t *s)
84544 {
84545 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
84546 slobidx_t next;
84547@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
84548 /*
84549 * Returns true if s is the last free block in its page.
84550 */
84551-static int slob_last(slob_t *s)
84552+static int slob_last(const slob_t *s)
84553 {
84554 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
84555 }
84556
84557-static void *slob_new_pages(gfp_t gfp, int order, int node)
84558+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
84559 {
84560- void *page;
84561+ struct page *page;
84562
84563 #ifdef CONFIG_NUMA
84564 if (node != NUMA_NO_NODE)
84565@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
84566 if (!page)
84567 return NULL;
84568
84569- return page_address(page);
84570+ __SetPageSlab(page);
84571+ return page;
84572 }
84573
84574-static void slob_free_pages(void *b, int order)
84575+static void slob_free_pages(struct page *sp, int order)
84576 {
84577 if (current->reclaim_state)
84578 current->reclaim_state->reclaimed_slab += 1 << order;
84579- free_pages((unsigned long)b, order);
84580+ __ClearPageSlab(sp);
84581+ reset_page_mapcount(sp);
84582+ sp->private = 0;
84583+ __free_pages(sp, order);
84584 }
84585
84586 /*
84587@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
84588
84589 /* Not enough space: must allocate a new page */
84590 if (!b) {
84591- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
84592- if (!b)
84593+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
84594+ if (!sp)
84595 return NULL;
84596- sp = virt_to_page(b);
84597- __SetPageSlab(sp);
84598+ b = page_address(sp);
84599
84600 spin_lock_irqsave(&slob_lock, flags);
84601 sp->units = SLOB_UNITS(PAGE_SIZE);
84602 sp->freelist = b;
84603+ sp->private = 0;
84604 INIT_LIST_HEAD(&sp->list);
84605 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
84606 set_slob_page_free(sp, slob_list);
84607@@ -359,9 +363,7 @@ static void slob_free(void *block, int size)
84608 if (slob_page_free(sp))
84609 clear_slob_page_free(sp);
84610 spin_unlock_irqrestore(&slob_lock, flags);
84611- __ClearPageSlab(sp);
84612- page_mapcount_reset(sp);
84613- slob_free_pages(b, 0);
84614+ slob_free_pages(sp, 0);
84615 return;
84616 }
84617
84618@@ -424,11 +426,10 @@ out:
84619 */
84620
84621 static __always_inline void *
84622-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
84623+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
84624 {
84625- unsigned int *m;
84626- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
84627- void *ret;
84628+ slob_t *m;
84629+ void *ret = NULL;
84630
84631 gfp &= gfp_allowed_mask;
84632
84633@@ -442,23 +443,41 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
84634
84635 if (!m)
84636 return NULL;
84637- *m = size;
84638+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
84639+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
84640+ m[0].units = size;
84641+ m[1].units = align;
84642 ret = (void *)m + align;
84643
84644 trace_kmalloc_node(caller, ret,
84645 size, size + align, gfp, node);
84646 } else {
84647 unsigned int order = get_order(size);
84648+ struct page *page;
84649
84650 if (likely(order))
84651 gfp |= __GFP_COMP;
84652- ret = slob_new_pages(gfp, order, node);
84653+ page = slob_new_pages(gfp, order, node);
84654+ if (page) {
84655+ ret = page_address(page);
84656+ page->private = size;
84657+ }
84658
84659 trace_kmalloc_node(caller, ret,
84660 size, PAGE_SIZE << order, gfp, node);
84661 }
84662
84663- kmemleak_alloc(ret, size, 1, gfp);
84664+ return ret;
84665+}
84666+
84667+static __always_inline void *
84668+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
84669+{
84670+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
84671+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
84672+
84673+ if (!ZERO_OR_NULL_PTR(ret))
84674+ kmemleak_alloc(ret, size, 1, gfp);
84675 return ret;
84676 }
84677
84678@@ -493,34 +512,112 @@ void kfree(const void *block)
84679 return;
84680 kmemleak_free(block);
84681
84682+ VM_BUG_ON(!virt_addr_valid(block));
84683 sp = virt_to_page(block);
84684- if (PageSlab(sp)) {
84685+ VM_BUG_ON(!PageSlab(sp));
84686+ if (!sp->private) {
84687 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
84688- unsigned int *m = (unsigned int *)(block - align);
84689- slob_free(m, *m + align);
84690- } else
84691+ slob_t *m = (slob_t *)(block - align);
84692+ slob_free(m, m[0].units + align);
84693+ } else {
84694+ __ClearPageSlab(sp);
84695+ reset_page_mapcount(sp);
84696+ sp->private = 0;
84697 __free_pages(sp, compound_order(sp));
84698+ }
84699 }
84700 EXPORT_SYMBOL(kfree);
84701
84702+bool is_usercopy_object(const void *ptr)
84703+{
84704+ if (!slab_is_available())
84705+ return false;
84706+
84707+ // PAX: TODO
84708+
84709+ return false;
84710+}
84711+
84712+#ifdef CONFIG_PAX_USERCOPY
84713+const char *check_heap_object(const void *ptr, unsigned long n)
84714+{
84715+ struct page *page;
84716+ const slob_t *free;
84717+ const void *base;
84718+ unsigned long flags;
84719+
84720+ if (ZERO_OR_NULL_PTR(ptr))
84721+ return "<null>";
84722+
84723+ if (!virt_addr_valid(ptr))
84724+ return NULL;
84725+
84726+ page = virt_to_head_page(ptr);
84727+ if (!PageSlab(page))
84728+ return NULL;
84729+
84730+ if (page->private) {
84731+ base = page;
84732+ if (base <= ptr && n <= page->private - (ptr - base))
84733+ return NULL;
84734+ return "<slob>";
84735+ }
84736+
84737+ /* some tricky double walking to find the chunk */
84738+ spin_lock_irqsave(&slob_lock, flags);
84739+ base = (void *)((unsigned long)ptr & PAGE_MASK);
84740+ free = page->freelist;
84741+
84742+ while (!slob_last(free) && (void *)free <= ptr) {
84743+ base = free + slob_units(free);
84744+ free = slob_next(free);
84745+ }
84746+
84747+ while (base < (void *)free) {
84748+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
84749+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
84750+ int offset;
84751+
84752+ if (ptr < base + align)
84753+ break;
84754+
84755+ offset = ptr - base - align;
84756+ if (offset >= m) {
84757+ base += size;
84758+ continue;
84759+ }
84760+
84761+ if (n > m - offset)
84762+ break;
84763+
84764+ spin_unlock_irqrestore(&slob_lock, flags);
84765+ return NULL;
84766+ }
84767+
84768+ spin_unlock_irqrestore(&slob_lock, flags);
84769+ return "<slob>";
84770+}
84771+#endif
84772+
84773 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
84774 size_t ksize(const void *block)
84775 {
84776 struct page *sp;
84777 int align;
84778- unsigned int *m;
84779+ slob_t *m;
84780
84781 BUG_ON(!block);
84782 if (unlikely(block == ZERO_SIZE_PTR))
84783 return 0;
84784
84785 sp = virt_to_page(block);
84786- if (unlikely(!PageSlab(sp)))
84787- return PAGE_SIZE << compound_order(sp);
84788+ VM_BUG_ON(!PageSlab(sp));
84789+ if (sp->private)
84790+ return sp->private;
84791
84792 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
84793- m = (unsigned int *)(block - align);
84794- return SLOB_UNITS(*m) * SLOB_UNIT;
84795+ m = (slob_t *)(block - align);
84796+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
84797 }
84798 EXPORT_SYMBOL(ksize);
84799
84800@@ -536,23 +633,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
84801
84802 void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
84803 {
84804- void *b;
84805+ void *b = NULL;
84806
84807 flags &= gfp_allowed_mask;
84808
84809 lockdep_trace_alloc(flags);
84810
84811+#ifdef CONFIG_PAX_USERCOPY_SLABS
84812+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
84813+#else
84814 if (c->size < PAGE_SIZE) {
84815 b = slob_alloc(c->size, flags, c->align, node);
84816 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
84817 SLOB_UNITS(c->size) * SLOB_UNIT,
84818 flags, node);
84819 } else {
84820- b = slob_new_pages(flags, get_order(c->size), node);
84821+ struct page *sp;
84822+
84823+ sp = slob_new_pages(flags, get_order(c->size), node);
84824+ if (sp) {
84825+ b = page_address(sp);
84826+ sp->private = c->size;
84827+ }
84828 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
84829 PAGE_SIZE << get_order(c->size),
84830 flags, node);
84831 }
84832+#endif
84833
84834 if (c->ctor)
84835 c->ctor(b);
84836@@ -564,10 +671,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
84837
84838 static void __kmem_cache_free(void *b, int size)
84839 {
84840- if (size < PAGE_SIZE)
84841+ struct page *sp;
84842+
84843+ sp = virt_to_page(b);
84844+ BUG_ON(!PageSlab(sp));
84845+ if (!sp->private)
84846 slob_free(b, size);
84847 else
84848- slob_free_pages(b, get_order(size));
84849+ slob_free_pages(sp, get_order(size));
84850 }
84851
84852 static void kmem_rcu_free(struct rcu_head *head)
84853@@ -580,17 +691,31 @@ static void kmem_rcu_free(struct rcu_head *head)
84854
84855 void kmem_cache_free(struct kmem_cache *c, void *b)
84856 {
84857+ int size = c->size;
84858+
84859+#ifdef CONFIG_PAX_USERCOPY_SLABS
84860+ if (size + c->align < PAGE_SIZE) {
84861+ size += c->align;
84862+ b -= c->align;
84863+ }
84864+#endif
84865+
84866 kmemleak_free_recursive(b, c->flags);
84867 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
84868 struct slob_rcu *slob_rcu;
84869- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
84870- slob_rcu->size = c->size;
84871+ slob_rcu = b + (size - sizeof(struct slob_rcu));
84872+ slob_rcu->size = size;
84873 call_rcu(&slob_rcu->head, kmem_rcu_free);
84874 } else {
84875- __kmem_cache_free(b, c->size);
84876+ __kmem_cache_free(b, size);
84877 }
84878
84879+#ifdef CONFIG_PAX_USERCOPY_SLABS
84880+ trace_kfree(_RET_IP_, b);
84881+#else
84882 trace_kmem_cache_free(_RET_IP_, b);
84883+#endif
84884+
84885 }
84886 EXPORT_SYMBOL(kmem_cache_free);
84887
84888diff --git a/mm/slub.c b/mm/slub.c
84889index 4aec537..8043df1 100644
84890--- a/mm/slub.c
84891+++ b/mm/slub.c
84892@@ -197,7 +197,7 @@ struct track {
84893
84894 enum track_item { TRACK_ALLOC, TRACK_FREE };
84895
84896-#ifdef CONFIG_SYSFS
84897+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
84898 static int sysfs_slab_add(struct kmem_cache *);
84899 static int sysfs_slab_alias(struct kmem_cache *, const char *);
84900 static void sysfs_slab_remove(struct kmem_cache *);
84901@@ -518,7 +518,7 @@ static void print_track(const char *s, struct track *t)
84902 if (!t->addr)
84903 return;
84904
84905- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
84906+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
84907 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
84908 #ifdef CONFIG_STACKTRACE
84909 {
84910@@ -2653,7 +2653,7 @@ static int slub_min_objects;
84911 * Merge control. If this is set then no merging of slab caches will occur.
84912 * (Could be removed. This was introduced to pacify the merge skeptics.)
84913 */
84914-static int slub_nomerge;
84915+static int slub_nomerge = 1;
84916
84917 /*
84918 * Calculate the order of allocation given an slab object size.
84919@@ -3181,6 +3181,10 @@ EXPORT_SYMBOL(kmalloc_caches);
84920 static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
84921 #endif
84922
84923+#ifdef CONFIG_PAX_USERCOPY_SLABS
84924+static struct kmem_cache *kmalloc_usercopy_caches[SLUB_PAGE_SHIFT];
84925+#endif
84926+
84927 static int __init setup_slub_min_order(char *str)
84928 {
84929 get_option(&str, &slub_min_order);
84930@@ -3272,6 +3276,13 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
84931 return kmalloc_dma_caches[index];
84932
84933 #endif
84934+
84935+#ifdef CONFIG_PAX_USERCOPY_SLABS
84936+ if (flags & SLAB_USERCOPY)
84937+ return kmalloc_usercopy_caches[index];
84938+
84939+#endif
84940+
84941 return kmalloc_caches[index];
84942 }
84943
84944@@ -3340,6 +3351,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
84945 EXPORT_SYMBOL(__kmalloc_node);
84946 #endif
84947
84948+bool is_usercopy_object(const void *ptr)
84949+{
84950+ struct page *page;
84951+ struct kmem_cache *s;
84952+
84953+ if (ZERO_OR_NULL_PTR(ptr))
84954+ return false;
84955+
84956+ if (!slab_is_available())
84957+ return false;
84958+
84959+ if (!virt_addr_valid(ptr))
84960+ return false;
84961+
84962+ page = virt_to_head_page(ptr);
84963+
84964+ if (!PageSlab(page))
84965+ return false;
84966+
84967+ s = page->slab_cache;
84968+ return s->flags & SLAB_USERCOPY;
84969+}
84970+
84971+#ifdef CONFIG_PAX_USERCOPY
84972+const char *check_heap_object(const void *ptr, unsigned long n)
84973+{
84974+ struct page *page;
84975+ struct kmem_cache *s;
84976+ unsigned long offset;
84977+
84978+ if (ZERO_OR_NULL_PTR(ptr))
84979+ return "<null>";
84980+
84981+ if (!virt_addr_valid(ptr))
84982+ return NULL;
84983+
84984+ page = virt_to_head_page(ptr);
84985+
84986+ if (!PageSlab(page))
84987+ return NULL;
84988+
84989+ s = page->slab_cache;
84990+ if (!(s->flags & SLAB_USERCOPY))
84991+ return s->name;
84992+
84993+ offset = (ptr - page_address(page)) % s->size;
84994+ if (offset <= s->object_size && n <= s->object_size - offset)
84995+ return NULL;
84996+
84997+ return s->name;
84998+}
84999+#endif
85000+
85001 size_t ksize(const void *object)
85002 {
85003 struct page *page;
85004@@ -3404,6 +3468,7 @@ void kfree(const void *x)
85005 if (unlikely(ZERO_OR_NULL_PTR(x)))
85006 return;
85007
85008+ VM_BUG_ON(!virt_addr_valid(x));
85009 page = virt_to_head_page(x);
85010 if (unlikely(!PageSlab(page))) {
85011 BUG_ON(!PageCompound(page));
85012@@ -3712,17 +3777,17 @@ void __init kmem_cache_init(void)
85013
85014 /* Caches that are not of the two-to-the-power-of size */
85015 if (KMALLOC_MIN_SIZE <= 32) {
85016- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
85017+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
85018 caches++;
85019 }
85020
85021 if (KMALLOC_MIN_SIZE <= 64) {
85022- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
85023+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
85024 caches++;
85025 }
85026
85027 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
85028- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
85029+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
85030 caches++;
85031 }
85032
85033@@ -3764,6 +3829,22 @@ void __init kmem_cache_init(void)
85034 }
85035 }
85036 #endif
85037+
85038+#ifdef CONFIG_PAX_USERCOPY_SLABS
85039+ for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
85040+ struct kmem_cache *s = kmalloc_caches[i];
85041+
85042+ if (s && s->size) {
85043+ char *name = kasprintf(GFP_NOWAIT,
85044+ "usercopy-kmalloc-%d", s->object_size);
85045+
85046+ BUG_ON(!name);
85047+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(name,
85048+ s->object_size, SLAB_USERCOPY);
85049+ }
85050+ }
85051+#endif
85052+
85053 printk(KERN_INFO
85054 "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
85055 " CPUs=%d, Nodes=%d\n",
85056@@ -3790,7 +3871,7 @@ static int slab_unmergeable(struct kmem_cache *s)
85057 /*
85058 * We may have set a slab to be unmergeable during bootstrap.
85059 */
85060- if (s->refcount < 0)
85061+ if (atomic_read(&s->refcount) < 0)
85062 return 1;
85063
85064 return 0;
85065@@ -3848,7 +3929,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
85066
85067 s = find_mergeable(memcg, size, align, flags, name, ctor);
85068 if (s) {
85069- s->refcount++;
85070+ atomic_inc(&s->refcount);
85071 /*
85072 * Adjust the object sizes so that we clear
85073 * the complete object on kzalloc.
85074@@ -3857,7 +3938,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
85075 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
85076
85077 if (sysfs_slab_alias(s, name)) {
85078- s->refcount--;
85079+ atomic_dec(&s->refcount);
85080 s = NULL;
85081 }
85082 }
85083@@ -3919,7 +4000,7 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
85084 return NOTIFY_OK;
85085 }
85086
85087-static struct notifier_block __cpuinitdata slab_notifier = {
85088+static struct notifier_block slab_notifier = {
85089 .notifier_call = slab_cpuup_callback
85090 };
85091
85092@@ -3977,7 +4058,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
85093 }
85094 #endif
85095
85096-#ifdef CONFIG_SYSFS
85097+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
85098 static int count_inuse(struct page *page)
85099 {
85100 return page->inuse;
85101@@ -4364,12 +4445,12 @@ static void resiliency_test(void)
85102 validate_slab_cache(kmalloc_caches[9]);
85103 }
85104 #else
85105-#ifdef CONFIG_SYSFS
85106+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
85107 static void resiliency_test(void) {};
85108 #endif
85109 #endif
85110
85111-#ifdef CONFIG_SYSFS
85112+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
85113 enum slab_stat_type {
85114 SL_ALL, /* All slabs */
85115 SL_PARTIAL, /* Only partially allocated slabs */
85116@@ -4613,7 +4694,7 @@ SLAB_ATTR_RO(ctor);
85117
85118 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
85119 {
85120- return sprintf(buf, "%d\n", s->refcount - 1);
85121+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
85122 }
85123 SLAB_ATTR_RO(aliases);
85124
85125@@ -5266,6 +5347,7 @@ static char *create_unique_id(struct kmem_cache *s)
85126 return name;
85127 }
85128
85129+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
85130 static int sysfs_slab_add(struct kmem_cache *s)
85131 {
85132 int err;
85133@@ -5289,7 +5371,7 @@ static int sysfs_slab_add(struct kmem_cache *s)
85134 }
85135
85136 s->kobj.kset = slab_kset;
85137- err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
85138+ err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
85139 if (err) {
85140 kobject_put(&s->kobj);
85141 return err;
85142@@ -5323,6 +5405,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
85143 kobject_del(&s->kobj);
85144 kobject_put(&s->kobj);
85145 }
85146+#endif
85147
85148 /*
85149 * Need to buffer aliases during bootup until sysfs becomes
85150@@ -5336,6 +5419,7 @@ struct saved_alias {
85151
85152 static struct saved_alias *alias_list;
85153
85154+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
85155 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
85156 {
85157 struct saved_alias *al;
85158@@ -5358,6 +5442,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
85159 alias_list = al;
85160 return 0;
85161 }
85162+#endif
85163
85164 static int __init slab_sysfs_init(void)
85165 {
85166diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
85167index 1b7e22a..3fcd4f3 100644
85168--- a/mm/sparse-vmemmap.c
85169+++ b/mm/sparse-vmemmap.c
85170@@ -128,7 +128,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
85171 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
85172 if (!p)
85173 return NULL;
85174- pud_populate(&init_mm, pud, p);
85175+ pud_populate_kernel(&init_mm, pud, p);
85176 }
85177 return pud;
85178 }
85179@@ -140,7 +140,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
85180 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
85181 if (!p)
85182 return NULL;
85183- pgd_populate(&init_mm, pgd, p);
85184+ pgd_populate_kernel(&init_mm, pgd, p);
85185 }
85186 return pgd;
85187 }
85188diff --git a/mm/sparse.c b/mm/sparse.c
85189index 7ca6dc8..6472aa1 100644
85190--- a/mm/sparse.c
85191+++ b/mm/sparse.c
85192@@ -783,7 +783,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
85193
85194 for (i = 0; i < PAGES_PER_SECTION; i++) {
85195 if (PageHWPoison(&memmap[i])) {
85196- atomic_long_sub(1, &num_poisoned_pages);
85197+ atomic_long_sub_unchecked(1, &num_poisoned_pages);
85198 ClearPageHWPoison(&memmap[i]);
85199 }
85200 }
85201diff --git a/mm/swap.c b/mm/swap.c
85202index 8a529a0..154ef26 100644
85203--- a/mm/swap.c
85204+++ b/mm/swap.c
85205@@ -30,6 +30,7 @@
85206 #include <linux/backing-dev.h>
85207 #include <linux/memcontrol.h>
85208 #include <linux/gfp.h>
85209+#include <linux/hugetlb.h>
85210
85211 #include "internal.h"
85212
85213@@ -72,6 +73,8 @@ static void __put_compound_page(struct page *page)
85214
85215 __page_cache_release(page);
85216 dtor = get_compound_page_dtor(page);
85217+ if (!PageHuge(page))
85218+ BUG_ON(dtor != free_compound_page);
85219 (*dtor)(page);
85220 }
85221
85222diff --git a/mm/swapfile.c b/mm/swapfile.c
85223index a1f7772..9e982ac 100644
85224--- a/mm/swapfile.c
85225+++ b/mm/swapfile.c
85226@@ -66,7 +66,7 @@ static DEFINE_MUTEX(swapon_mutex);
85227
85228 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
85229 /* Activity counter to indicate that a swapon or swapoff has occurred */
85230-static atomic_t proc_poll_event = ATOMIC_INIT(0);
85231+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
85232
85233 static inline unsigned char swap_count(unsigned char ent)
85234 {
85235@@ -1683,7 +1683,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
85236 }
85237 filp_close(swap_file, NULL);
85238 err = 0;
85239- atomic_inc(&proc_poll_event);
85240+ atomic_inc_unchecked(&proc_poll_event);
85241 wake_up_interruptible(&proc_poll_wait);
85242
85243 out_dput:
85244@@ -1700,8 +1700,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
85245
85246 poll_wait(file, &proc_poll_wait, wait);
85247
85248- if (seq->poll_event != atomic_read(&proc_poll_event)) {
85249- seq->poll_event = atomic_read(&proc_poll_event);
85250+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
85251+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
85252 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
85253 }
85254
85255@@ -1799,7 +1799,7 @@ static int swaps_open(struct inode *inode, struct file *file)
85256 return ret;
85257
85258 seq = file->private_data;
85259- seq->poll_event = atomic_read(&proc_poll_event);
85260+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
85261 return 0;
85262 }
85263
85264@@ -2142,7 +2142,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
85265 (frontswap_map) ? "FS" : "");
85266
85267 mutex_unlock(&swapon_mutex);
85268- atomic_inc(&proc_poll_event);
85269+ atomic_inc_unchecked(&proc_poll_event);
85270 wake_up_interruptible(&proc_poll_wait);
85271
85272 if (S_ISREG(inode->i_mode))
85273diff --git a/mm/util.c b/mm/util.c
85274index ab1424d..7c5bd5a 100644
85275--- a/mm/util.c
85276+++ b/mm/util.c
85277@@ -294,6 +294,12 @@ done:
85278 void arch_pick_mmap_layout(struct mm_struct *mm)
85279 {
85280 mm->mmap_base = TASK_UNMAPPED_BASE;
85281+
85282+#ifdef CONFIG_PAX_RANDMMAP
85283+ if (mm->pax_flags & MF_PAX_RANDMMAP)
85284+ mm->mmap_base += mm->delta_mmap;
85285+#endif
85286+
85287 mm->get_unmapped_area = arch_get_unmapped_area;
85288 mm->unmap_area = arch_unmap_area;
85289 }
85290diff --git a/mm/vmalloc.c b/mm/vmalloc.c
85291index 0f751f2..2bc3bd1 100644
85292--- a/mm/vmalloc.c
85293+++ b/mm/vmalloc.c
85294@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
85295
85296 pte = pte_offset_kernel(pmd, addr);
85297 do {
85298- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
85299- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
85300+
85301+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
85302+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
85303+ BUG_ON(!pte_exec(*pte));
85304+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
85305+ continue;
85306+ }
85307+#endif
85308+
85309+ {
85310+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
85311+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
85312+ }
85313 } while (pte++, addr += PAGE_SIZE, addr != end);
85314 }
85315
85316@@ -100,16 +111,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
85317 pte = pte_alloc_kernel(pmd, addr);
85318 if (!pte)
85319 return -ENOMEM;
85320+
85321+ pax_open_kernel();
85322 do {
85323 struct page *page = pages[*nr];
85324
85325- if (WARN_ON(!pte_none(*pte)))
85326+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
85327+ if (pgprot_val(prot) & _PAGE_NX)
85328+#endif
85329+
85330+ if (!pte_none(*pte)) {
85331+ pax_close_kernel();
85332+ WARN_ON(1);
85333 return -EBUSY;
85334- if (WARN_ON(!page))
85335+ }
85336+ if (!page) {
85337+ pax_close_kernel();
85338+ WARN_ON(1);
85339 return -ENOMEM;
85340+ }
85341 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
85342 (*nr)++;
85343 } while (pte++, addr += PAGE_SIZE, addr != end);
85344+ pax_close_kernel();
85345 return 0;
85346 }
85347
85348@@ -119,7 +143,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
85349 pmd_t *pmd;
85350 unsigned long next;
85351
85352- pmd = pmd_alloc(&init_mm, pud, addr);
85353+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
85354 if (!pmd)
85355 return -ENOMEM;
85356 do {
85357@@ -136,7 +160,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
85358 pud_t *pud;
85359 unsigned long next;
85360
85361- pud = pud_alloc(&init_mm, pgd, addr);
85362+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
85363 if (!pud)
85364 return -ENOMEM;
85365 do {
85366@@ -196,6 +220,12 @@ int is_vmalloc_or_module_addr(const void *x)
85367 if (addr >= MODULES_VADDR && addr < MODULES_END)
85368 return 1;
85369 #endif
85370+
85371+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
85372+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
85373+ return 1;
85374+#endif
85375+
85376 return is_vmalloc_addr(x);
85377 }
85378
85379@@ -216,8 +246,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
85380
85381 if (!pgd_none(*pgd)) {
85382 pud_t *pud = pud_offset(pgd, addr);
85383+#ifdef CONFIG_X86
85384+ if (!pud_large(*pud))
85385+#endif
85386 if (!pud_none(*pud)) {
85387 pmd_t *pmd = pmd_offset(pud, addr);
85388+#ifdef CONFIG_X86
85389+ if (!pmd_large(*pmd))
85390+#endif
85391 if (!pmd_none(*pmd)) {
85392 pte_t *ptep, pte;
85393
85394@@ -329,7 +365,7 @@ static void purge_vmap_area_lazy(void);
85395 * Allocate a region of KVA of the specified size and alignment, within the
85396 * vstart and vend.
85397 */
85398-static struct vmap_area *alloc_vmap_area(unsigned long size,
85399+static __size_overflow(1) struct vmap_area *alloc_vmap_area(unsigned long size,
85400 unsigned long align,
85401 unsigned long vstart, unsigned long vend,
85402 int node, gfp_t gfp_mask)
85403@@ -1328,6 +1364,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
85404 struct vm_struct *area;
85405
85406 BUG_ON(in_interrupt());
85407+
85408+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
85409+ if (flags & VM_KERNEXEC) {
85410+ if (start != VMALLOC_START || end != VMALLOC_END)
85411+ return NULL;
85412+ start = (unsigned long)MODULES_EXEC_VADDR;
85413+ end = (unsigned long)MODULES_EXEC_END;
85414+ }
85415+#endif
85416+
85417 if (flags & VM_IOREMAP) {
85418 int bit = fls(size);
85419
85420@@ -1569,6 +1615,11 @@ void *vmap(struct page **pages, unsigned int count,
85421 if (count > totalram_pages)
85422 return NULL;
85423
85424+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
85425+ if (!(pgprot_val(prot) & _PAGE_NX))
85426+ flags |= VM_KERNEXEC;
85427+#endif
85428+
85429 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
85430 __builtin_return_address(0));
85431 if (!area)
85432@@ -1670,6 +1721,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
85433 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
85434 goto fail;
85435
85436+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
85437+ if (!(pgprot_val(prot) & _PAGE_NX))
85438+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
85439+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
85440+ else
85441+#endif
85442+
85443 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
85444 start, end, node, gfp_mask, caller);
85445 if (!area)
85446@@ -1845,10 +1903,9 @@ EXPORT_SYMBOL(vzalloc_node);
85447 * For tight control over page level allocator and protection flags
85448 * use __vmalloc() instead.
85449 */
85450-
85451 void *vmalloc_exec(unsigned long size)
85452 {
85453- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
85454+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
85455 NUMA_NO_NODE, __builtin_return_address(0));
85456 }
85457
85458@@ -2139,6 +2196,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
85459 unsigned long uaddr = vma->vm_start;
85460 unsigned long usize = vma->vm_end - vma->vm_start;
85461
85462+ BUG_ON(vma->vm_mirror);
85463+
85464 if ((PAGE_SIZE-1) & (unsigned long)addr)
85465 return -EINVAL;
85466
85467@@ -2578,7 +2637,11 @@ static int s_show(struct seq_file *m, void *p)
85468 v->addr, v->addr + v->size, v->size);
85469
85470 if (v->caller)
85471+#ifdef CONFIG_GRKERNSEC_HIDESYM
85472+ seq_printf(m, " %pK", v->caller);
85473+#else
85474 seq_printf(m, " %pS", v->caller);
85475+#endif
85476
85477 if (v->nr_pages)
85478 seq_printf(m, " pages=%d", v->nr_pages);
85479diff --git a/mm/vmstat.c b/mm/vmstat.c
85480index e1d8ed1..253fa3c 100644
85481--- a/mm/vmstat.c
85482+++ b/mm/vmstat.c
85483@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
85484 *
85485 * vm_stat contains the global counters
85486 */
85487-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
85488+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
85489 EXPORT_SYMBOL(vm_stat);
85490
85491 #ifdef CONFIG_SMP
85492@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
85493 v = p->vm_stat_diff[i];
85494 p->vm_stat_diff[i] = 0;
85495 local_irq_restore(flags);
85496- atomic_long_add(v, &zone->vm_stat[i]);
85497+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
85498 global_diff[i] += v;
85499 #ifdef CONFIG_NUMA
85500 /* 3 seconds idle till flush */
85501@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
85502
85503 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
85504 if (global_diff[i])
85505- atomic_long_add(global_diff[i], &vm_stat[i]);
85506+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
85507 }
85508
85509 void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
85510@@ -503,8 +503,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
85511 if (pset->vm_stat_diff[i]) {
85512 int v = pset->vm_stat_diff[i];
85513 pset->vm_stat_diff[i] = 0;
85514- atomic_long_add(v, &zone->vm_stat[i]);
85515- atomic_long_add(v, &vm_stat[i]);
85516+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
85517+ atomic_long_add_unchecked(v, &vm_stat[i]);
85518 }
85519 }
85520 #endif
85521@@ -1224,7 +1224,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
85522 return NOTIFY_OK;
85523 }
85524
85525-static struct notifier_block __cpuinitdata vmstat_notifier =
85526+static struct notifier_block vmstat_notifier =
85527 { &vmstat_cpuup_callback, NULL, 0 };
85528 #endif
85529
85530@@ -1239,10 +1239,20 @@ static int __init setup_vmstat(void)
85531 start_cpu_timer(cpu);
85532 #endif
85533 #ifdef CONFIG_PROC_FS
85534- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
85535- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
85536- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
85537- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
85538+ {
85539+ mode_t gr_mode = S_IRUGO;
85540+#ifdef CONFIG_GRKERNSEC_PROC_ADD
85541+ gr_mode = S_IRUSR;
85542+#endif
85543+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
85544+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
85545+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
85546+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
85547+#else
85548+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
85549+#endif
85550+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
85551+ }
85552 #endif
85553 return 0;
85554 }
85555diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
85556index 85addcd..c429a13 100644
85557--- a/net/8021q/vlan.c
85558+++ b/net/8021q/vlan.c
85559@@ -114,6 +114,13 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
85560 if (vlan_id)
85561 vlan_vid_del(real_dev, vlan_id);
85562
85563+ /* Take it out of our own structures, but be sure to interlock with
85564+ * HW accelerating devices or SW vlan input packet processing if
85565+ * VLAN is not 0 (leave it there for 802.1p).
85566+ */
85567+ if (vlan_id)
85568+ vlan_vid_del(real_dev, vlan_id);
85569+
85570 /* Get rid of the vlan's reference to real_dev */
85571 dev_put(real_dev);
85572 }
85573@@ -496,7 +503,7 @@ out:
85574 return NOTIFY_DONE;
85575 }
85576
85577-static struct notifier_block vlan_notifier_block __read_mostly = {
85578+static struct notifier_block vlan_notifier_block = {
85579 .notifier_call = vlan_device_event,
85580 };
85581
85582@@ -571,8 +578,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
85583 err = -EPERM;
85584 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
85585 break;
85586- if ((args.u.name_type >= 0) &&
85587- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
85588+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
85589 struct vlan_net *vn;
85590
85591 vn = net_generic(net, vlan_net_id);
85592diff --git a/net/9p/mod.c b/net/9p/mod.c
85593index 6ab36ae..6f1841b 100644
85594--- a/net/9p/mod.c
85595+++ b/net/9p/mod.c
85596@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
85597 void v9fs_register_trans(struct p9_trans_module *m)
85598 {
85599 spin_lock(&v9fs_trans_lock);
85600- list_add_tail(&m->list, &v9fs_trans_list);
85601+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
85602 spin_unlock(&v9fs_trans_lock);
85603 }
85604 EXPORT_SYMBOL(v9fs_register_trans);
85605@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
85606 void v9fs_unregister_trans(struct p9_trans_module *m)
85607 {
85608 spin_lock(&v9fs_trans_lock);
85609- list_del_init(&m->list);
85610+ pax_list_del_init((struct list_head *)&m->list);
85611 spin_unlock(&v9fs_trans_lock);
85612 }
85613 EXPORT_SYMBOL(v9fs_unregister_trans);
85614diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
85615index 02efb25..41541a9 100644
85616--- a/net/9p/trans_fd.c
85617+++ b/net/9p/trans_fd.c
85618@@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
85619 oldfs = get_fs();
85620 set_fs(get_ds());
85621 /* The cast to a user pointer is valid due to the set_fs() */
85622- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
85623+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
85624 set_fs(oldfs);
85625
85626 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
85627diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
85628index 876fbe8..8bbea9f 100644
85629--- a/net/atm/atm_misc.c
85630+++ b/net/atm/atm_misc.c
85631@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
85632 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
85633 return 1;
85634 atm_return(vcc, truesize);
85635- atomic_inc(&vcc->stats->rx_drop);
85636+ atomic_inc_unchecked(&vcc->stats->rx_drop);
85637 return 0;
85638 }
85639 EXPORT_SYMBOL(atm_charge);
85640@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
85641 }
85642 }
85643 atm_return(vcc, guess);
85644- atomic_inc(&vcc->stats->rx_drop);
85645+ atomic_inc_unchecked(&vcc->stats->rx_drop);
85646 return NULL;
85647 }
85648 EXPORT_SYMBOL(atm_alloc_charge);
85649@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
85650
85651 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
85652 {
85653-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
85654+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
85655 __SONET_ITEMS
85656 #undef __HANDLE_ITEM
85657 }
85658@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
85659
85660 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
85661 {
85662-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
85663+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
85664 __SONET_ITEMS
85665 #undef __HANDLE_ITEM
85666 }
85667diff --git a/net/atm/lec.h b/net/atm/lec.h
85668index a86aff9..3a0d6f6 100644
85669--- a/net/atm/lec.h
85670+++ b/net/atm/lec.h
85671@@ -48,7 +48,7 @@ struct lane2_ops {
85672 const u8 *tlvs, u32 sizeoftlvs);
85673 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
85674 const u8 *tlvs, u32 sizeoftlvs);
85675-};
85676+} __no_const;
85677
85678 /*
85679 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
85680diff --git a/net/atm/proc.c b/net/atm/proc.c
85681index 6ac35ff..ac0e136 100644
85682--- a/net/atm/proc.c
85683+++ b/net/atm/proc.c
85684@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
85685 const struct k_atm_aal_stats *stats)
85686 {
85687 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
85688- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
85689- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
85690- atomic_read(&stats->rx_drop));
85691+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
85692+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
85693+ atomic_read_unchecked(&stats->rx_drop));
85694 }
85695
85696 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
85697diff --git a/net/atm/resources.c b/net/atm/resources.c
85698index 0447d5d..3cf4728 100644
85699--- a/net/atm/resources.c
85700+++ b/net/atm/resources.c
85701@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
85702 static void copy_aal_stats(struct k_atm_aal_stats *from,
85703 struct atm_aal_stats *to)
85704 {
85705-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
85706+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
85707 __AAL_STAT_ITEMS
85708 #undef __HANDLE_ITEM
85709 }
85710@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
85711 static void subtract_aal_stats(struct k_atm_aal_stats *from,
85712 struct atm_aal_stats *to)
85713 {
85714-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
85715+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
85716 __AAL_STAT_ITEMS
85717 #undef __HANDLE_ITEM
85718 }
85719diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
85720index d5744b7..506bae3 100644
85721--- a/net/ax25/sysctl_net_ax25.c
85722+++ b/net/ax25/sysctl_net_ax25.c
85723@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
85724 {
85725 char path[sizeof("net/ax25/") + IFNAMSIZ];
85726 int k;
85727- struct ctl_table *table;
85728+ ctl_table_no_const *table;
85729
85730 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
85731 if (!table)
85732diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
85733index a5bb0a7..e1d8b97 100644
85734--- a/net/batman-adv/bat_iv_ogm.c
85735+++ b/net/batman-adv/bat_iv_ogm.c
85736@@ -63,7 +63,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
85737
85738 /* randomize initial seqno to avoid collision */
85739 get_random_bytes(&random_seqno, sizeof(random_seqno));
85740- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
85741+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
85742
85743 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
85744 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
85745@@ -611,9 +611,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
85746 batadv_ogm_packet = (struct batadv_ogm_packet *)(*ogm_buff);
85747
85748 /* change sequence number to network order */
85749- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
85750+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
85751 batadv_ogm_packet->seqno = htonl(seqno);
85752- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
85753+ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
85754
85755 batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn);
85756 batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc);
85757@@ -1013,7 +1013,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
85758 return;
85759
85760 /* could be changed by schedule_own_packet() */
85761- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
85762+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
85763
85764 if (batadv_ogm_packet->flags & BATADV_DIRECTLINK)
85765 has_directlink_flag = 1;
85766diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
85767index 368219e..53f56f9 100644
85768--- a/net/batman-adv/hard-interface.c
85769+++ b/net/batman-adv/hard-interface.c
85770@@ -370,7 +370,7 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
85771 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
85772 dev_add_pack(&hard_iface->batman_adv_ptype);
85773
85774- atomic_set(&hard_iface->frag_seqno, 1);
85775+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
85776 batadv_info(hard_iface->soft_iface, "Adding interface: %s\n",
85777 hard_iface->net_dev->name);
85778
85779@@ -514,7 +514,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
85780 /* This can't be called via a bat_priv callback because
85781 * we have no bat_priv yet.
85782 */
85783- atomic_set(&hard_iface->bat_iv.ogm_seqno, 1);
85784+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, 1);
85785 hard_iface->bat_iv.ogm_buff = NULL;
85786
85787 return hard_iface;
85788diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
85789index 2711e87..4ca48fa 100644
85790--- a/net/batman-adv/soft-interface.c
85791+++ b/net/batman-adv/soft-interface.c
85792@@ -252,7 +252,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
85793 primary_if->net_dev->dev_addr, ETH_ALEN);
85794
85795 /* set broadcast sequence number */
85796- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
85797+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
85798 bcast_packet->seqno = htonl(seqno);
85799
85800 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
85801@@ -527,7 +527,7 @@ struct net_device *batadv_softif_create(const char *name)
85802 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
85803
85804 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
85805- atomic_set(&bat_priv->bcast_seqno, 1);
85806+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
85807 atomic_set(&bat_priv->tt.vn, 0);
85808 atomic_set(&bat_priv->tt.local_changes, 0);
85809 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
85810diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
85811index 4cd87a0..348e705 100644
85812--- a/net/batman-adv/types.h
85813+++ b/net/batman-adv/types.h
85814@@ -51,7 +51,7 @@
85815 struct batadv_hard_iface_bat_iv {
85816 unsigned char *ogm_buff;
85817 int ogm_buff_len;
85818- atomic_t ogm_seqno;
85819+ atomic_unchecked_t ogm_seqno;
85820 };
85821
85822 /**
85823@@ -75,7 +75,7 @@ struct batadv_hard_iface {
85824 int16_t if_num;
85825 char if_status;
85826 struct net_device *net_dev;
85827- atomic_t frag_seqno;
85828+ atomic_unchecked_t frag_seqno;
85829 struct kobject *hardif_obj;
85830 atomic_t refcount;
85831 struct packet_type batman_adv_ptype;
85832@@ -495,7 +495,7 @@ struct batadv_priv {
85833 #ifdef CONFIG_BATMAN_ADV_DEBUG
85834 atomic_t log_level;
85835 #endif
85836- atomic_t bcast_seqno;
85837+ atomic_unchecked_t bcast_seqno;
85838 atomic_t bcast_queue_left;
85839 atomic_t batman_queue_left;
85840 char num_ifaces;
85841diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
85842index 50e079f..49ce2d2 100644
85843--- a/net/batman-adv/unicast.c
85844+++ b/net/batman-adv/unicast.c
85845@@ -270,7 +270,7 @@ int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
85846 frag1->flags = BATADV_UNI_FRAG_HEAD | large_tail;
85847 frag2->flags = large_tail;
85848
85849- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
85850+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
85851 frag1->seqno = htons(seqno - 1);
85852 frag2->seqno = htons(seqno);
85853
85854diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
85855index b88605f..958e3e2 100644
85856--- a/net/bluetooth/hci_core.c
85857+++ b/net/bluetooth/hci_core.c
85858@@ -1793,16 +1793,16 @@ int hci_register_dev(struct hci_dev *hdev)
85859 list_add(&hdev->list, &hci_dev_list);
85860 write_unlock(&hci_dev_list_lock);
85861
85862- hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
85863- WQ_MEM_RECLAIM, 1);
85864+ hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
85865+ WQ_MEM_RECLAIM, 1, hdev->name);
85866 if (!hdev->workqueue) {
85867 error = -ENOMEM;
85868 goto err;
85869 }
85870
85871- hdev->req_workqueue = alloc_workqueue(hdev->name,
85872+ hdev->req_workqueue = alloc_workqueue("%s",
85873 WQ_HIGHPRI | WQ_UNBOUND |
85874- WQ_MEM_RECLAIM, 1);
85875+ WQ_MEM_RECLAIM, 1, hdev->name);
85876 if (!hdev->req_workqueue) {
85877 destroy_workqueue(hdev->workqueue);
85878 error = -ENOMEM;
85879diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
85880index 6a93614..1415549 100644
85881--- a/net/bluetooth/hci_sock.c
85882+++ b/net/bluetooth/hci_sock.c
85883@@ -929,7 +929,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
85884 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
85885 }
85886
85887- len = min_t(unsigned int, len, sizeof(uf));
85888+ len = min((size_t)len, sizeof(uf));
85889 if (copy_from_user(&uf, optval, len)) {
85890 err = -EFAULT;
85891 break;
85892diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
85893index c5f9cd6..8d23158 100644
85894--- a/net/bluetooth/l2cap_core.c
85895+++ b/net/bluetooth/l2cap_core.c
85896@@ -3395,8 +3395,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
85897 break;
85898
85899 case L2CAP_CONF_RFC:
85900- if (olen == sizeof(rfc))
85901- memcpy(&rfc, (void *)val, olen);
85902+ if (olen != sizeof(rfc))
85903+ break;
85904+
85905+ memcpy(&rfc, (void *)val, olen);
85906
85907 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
85908 rfc.mode != chan->mode)
85909diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
85910index 1bcfb84..dad9f98 100644
85911--- a/net/bluetooth/l2cap_sock.c
85912+++ b/net/bluetooth/l2cap_sock.c
85913@@ -479,7 +479,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
85914 struct sock *sk = sock->sk;
85915 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
85916 struct l2cap_options opts;
85917- int len, err = 0;
85918+ int err = 0;
85919+ size_t len = optlen;
85920 u32 opt;
85921
85922 BT_DBG("sk %p", sk);
85923@@ -501,7 +502,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
85924 opts.max_tx = chan->max_tx;
85925 opts.txwin_size = chan->tx_win;
85926
85927- len = min_t(unsigned int, sizeof(opts), optlen);
85928+ len = min(sizeof(opts), len);
85929 if (copy_from_user((char *) &opts, optval, len)) {
85930 err = -EFAULT;
85931 break;
85932@@ -581,7 +582,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
85933 struct bt_security sec;
85934 struct bt_power pwr;
85935 struct l2cap_conn *conn;
85936- int len, err = 0;
85937+ int err = 0;
85938+ size_t len = optlen;
85939 u32 opt;
85940
85941 BT_DBG("sk %p", sk);
85942@@ -604,7 +606,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
85943
85944 sec.level = BT_SECURITY_LOW;
85945
85946- len = min_t(unsigned int, sizeof(sec), optlen);
85947+ len = min(sizeof(sec), len);
85948 if (copy_from_user((char *) &sec, optval, len)) {
85949 err = -EFAULT;
85950 break;
85951@@ -701,7 +703,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
85952
85953 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
85954
85955- len = min_t(unsigned int, sizeof(pwr), optlen);
85956+ len = min(sizeof(pwr), len);
85957 if (copy_from_user((char *) &pwr, optval, len)) {
85958 err = -EFAULT;
85959 break;
85960diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
85961index 7c9224b..381009e 100644
85962--- a/net/bluetooth/rfcomm/sock.c
85963+++ b/net/bluetooth/rfcomm/sock.c
85964@@ -666,7 +666,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
85965 struct sock *sk = sock->sk;
85966 struct bt_security sec;
85967 int err = 0;
85968- size_t len;
85969+ size_t len = optlen;
85970 u32 opt;
85971
85972 BT_DBG("sk %p", sk);
85973@@ -688,7 +688,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
85974
85975 sec.level = BT_SECURITY_LOW;
85976
85977- len = min_t(unsigned int, sizeof(sec), optlen);
85978+ len = min(sizeof(sec), len);
85979 if (copy_from_user((char *) &sec, optval, len)) {
85980 err = -EFAULT;
85981 break;
85982diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
85983index b6e44ad..5b0d514 100644
85984--- a/net/bluetooth/rfcomm/tty.c
85985+++ b/net/bluetooth/rfcomm/tty.c
85986@@ -309,7 +309,7 @@ static void rfcomm_dev_del(struct rfcomm_dev *dev)
85987 BUG_ON(test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags));
85988
85989 spin_lock_irqsave(&dev->port.lock, flags);
85990- if (dev->port.count > 0) {
85991+ if (atomic_read(&dev->port.count) > 0) {
85992 spin_unlock_irqrestore(&dev->port.lock, flags);
85993 return;
85994 }
85995@@ -659,10 +659,10 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
85996 return -ENODEV;
85997
85998 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
85999- dev->channel, dev->port.count);
86000+ dev->channel, atomic_read(&dev->port.count));
86001
86002 spin_lock_irqsave(&dev->port.lock, flags);
86003- if (++dev->port.count > 1) {
86004+ if (atomic_inc_return(&dev->port.count) > 1) {
86005 spin_unlock_irqrestore(&dev->port.lock, flags);
86006 return 0;
86007 }
86008@@ -727,10 +727,10 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
86009 return;
86010
86011 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
86012- dev->port.count);
86013+ atomic_read(&dev->port.count));
86014
86015 spin_lock_irqsave(&dev->port.lock, flags);
86016- if (!--dev->port.count) {
86017+ if (!atomic_dec_return(&dev->port.count)) {
86018 spin_unlock_irqrestore(&dev->port.lock, flags);
86019 if (dev->tty_dev->parent)
86020 device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST);
86021diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
86022index 8d493c9..3849e49 100644
86023--- a/net/bridge/netfilter/ebtables.c
86024+++ b/net/bridge/netfilter/ebtables.c
86025@@ -1525,7 +1525,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
86026 tmp.valid_hooks = t->table->valid_hooks;
86027 }
86028 mutex_unlock(&ebt_mutex);
86029- if (copy_to_user(user, &tmp, *len) != 0){
86030+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
86031 BUGPRINT("c2u Didn't work\n");
86032 ret = -EFAULT;
86033 break;
86034@@ -2331,7 +2331,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
86035 goto out;
86036 tmp.valid_hooks = t->valid_hooks;
86037
86038- if (copy_to_user(user, &tmp, *len) != 0) {
86039+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
86040 ret = -EFAULT;
86041 break;
86042 }
86043@@ -2342,7 +2342,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
86044 tmp.entries_size = t->table->entries_size;
86045 tmp.valid_hooks = t->table->valid_hooks;
86046
86047- if (copy_to_user(user, &tmp, *len) != 0) {
86048+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
86049 ret = -EFAULT;
86050 break;
86051 }
86052diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
86053index a376ec1..1fbd6be 100644
86054--- a/net/caif/cfctrl.c
86055+++ b/net/caif/cfctrl.c
86056@@ -10,6 +10,7 @@
86057 #include <linux/spinlock.h>
86058 #include <linux/slab.h>
86059 #include <linux/pkt_sched.h>
86060+#include <linux/sched.h>
86061 #include <net/caif/caif_layer.h>
86062 #include <net/caif/cfpkt.h>
86063 #include <net/caif/cfctrl.h>
86064@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
86065 memset(&dev_info, 0, sizeof(dev_info));
86066 dev_info.id = 0xff;
86067 cfsrvl_init(&this->serv, 0, &dev_info, false);
86068- atomic_set(&this->req_seq_no, 1);
86069- atomic_set(&this->rsp_seq_no, 1);
86070+ atomic_set_unchecked(&this->req_seq_no, 1);
86071+ atomic_set_unchecked(&this->rsp_seq_no, 1);
86072 this->serv.layer.receive = cfctrl_recv;
86073 sprintf(this->serv.layer.name, "ctrl");
86074 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
86075@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
86076 struct cfctrl_request_info *req)
86077 {
86078 spin_lock_bh(&ctrl->info_list_lock);
86079- atomic_inc(&ctrl->req_seq_no);
86080- req->sequence_no = atomic_read(&ctrl->req_seq_no);
86081+ atomic_inc_unchecked(&ctrl->req_seq_no);
86082+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
86083 list_add_tail(&req->list, &ctrl->list);
86084 spin_unlock_bh(&ctrl->info_list_lock);
86085 }
86086@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
86087 if (p != first)
86088 pr_warn("Requests are not received in order\n");
86089
86090- atomic_set(&ctrl->rsp_seq_no,
86091+ atomic_set_unchecked(&ctrl->rsp_seq_no,
86092 p->sequence_no);
86093 list_del(&p->list);
86094 goto out;
86095diff --git a/net/can/af_can.c b/net/can/af_can.c
86096index c48e522..1223690 100644
86097--- a/net/can/af_can.c
86098+++ b/net/can/af_can.c
86099@@ -870,7 +870,7 @@ static const struct net_proto_family can_family_ops = {
86100 };
86101
86102 /* notifier block for netdevice event */
86103-static struct notifier_block can_netdev_notifier __read_mostly = {
86104+static struct notifier_block can_netdev_notifier = {
86105 .notifier_call = can_notifier,
86106 };
86107
86108diff --git a/net/can/gw.c b/net/can/gw.c
86109index 117814a..ad4fb73 100644
86110--- a/net/can/gw.c
86111+++ b/net/can/gw.c
86112@@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
86113 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
86114
86115 static HLIST_HEAD(cgw_list);
86116-static struct notifier_block notifier;
86117
86118 static struct kmem_cache *cgw_cache __read_mostly;
86119
86120@@ -928,6 +927,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
86121 return err;
86122 }
86123
86124+static struct notifier_block notifier = {
86125+ .notifier_call = cgw_notifier
86126+};
86127+
86128 static __init int cgw_module_init(void)
86129 {
86130 /* sanitize given module parameter */
86131@@ -943,7 +946,6 @@ static __init int cgw_module_init(void)
86132 return -ENOMEM;
86133
86134 /* set notifier */
86135- notifier.notifier_call = cgw_notifier;
86136 register_netdevice_notifier(&notifier);
86137
86138 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
86139diff --git a/net/compat.c b/net/compat.c
86140index 79ae884..0541331 100644
86141--- a/net/compat.c
86142+++ b/net/compat.c
86143@@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
86144 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
86145 __get_user(kmsg->msg_flags, &umsg->msg_flags))
86146 return -EFAULT;
86147- kmsg->msg_name = compat_ptr(tmp1);
86148- kmsg->msg_iov = compat_ptr(tmp2);
86149- kmsg->msg_control = compat_ptr(tmp3);
86150+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
86151+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
86152+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
86153 return 0;
86154 }
86155
86156@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
86157
86158 if (kern_msg->msg_namelen) {
86159 if (mode == VERIFY_READ) {
86160- int err = move_addr_to_kernel(kern_msg->msg_name,
86161+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
86162 kern_msg->msg_namelen,
86163 kern_address);
86164 if (err < 0)
86165@@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
86166 kern_msg->msg_name = NULL;
86167
86168 tot_len = iov_from_user_compat_to_kern(kern_iov,
86169- (struct compat_iovec __user *)kern_msg->msg_iov,
86170+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
86171 kern_msg->msg_iovlen);
86172 if (tot_len >= 0)
86173 kern_msg->msg_iov = kern_iov;
86174@@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
86175
86176 #define CMSG_COMPAT_FIRSTHDR(msg) \
86177 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
86178- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
86179+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
86180 (struct compat_cmsghdr __user *)NULL)
86181
86182 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
86183 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
86184 (ucmlen) <= (unsigned long) \
86185 ((mhdr)->msg_controllen - \
86186- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
86187+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
86188
86189 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
86190 struct compat_cmsghdr __user *cmsg, int cmsg_len)
86191 {
86192 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
86193- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
86194+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
86195 msg->msg_controllen)
86196 return NULL;
86197 return (struct compat_cmsghdr __user *)ptr;
86198@@ -219,7 +219,7 @@ Efault:
86199
86200 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
86201 {
86202- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
86203+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
86204 struct compat_cmsghdr cmhdr;
86205 struct compat_timeval ctv;
86206 struct compat_timespec cts[3];
86207@@ -275,7 +275,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
86208
86209 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
86210 {
86211- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
86212+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
86213 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
86214 int fdnum = scm->fp->count;
86215 struct file **fp = scm->fp->fp;
86216@@ -363,7 +363,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
86217 return -EFAULT;
86218 old_fs = get_fs();
86219 set_fs(KERNEL_DS);
86220- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
86221+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
86222 set_fs(old_fs);
86223
86224 return err;
86225@@ -424,7 +424,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
86226 len = sizeof(ktime);
86227 old_fs = get_fs();
86228 set_fs(KERNEL_DS);
86229- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
86230+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
86231 set_fs(old_fs);
86232
86233 if (!err) {
86234@@ -567,7 +567,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
86235 case MCAST_JOIN_GROUP:
86236 case MCAST_LEAVE_GROUP:
86237 {
86238- struct compat_group_req __user *gr32 = (void *)optval;
86239+ struct compat_group_req __user *gr32 = (void __user *)optval;
86240 struct group_req __user *kgr =
86241 compat_alloc_user_space(sizeof(struct group_req));
86242 u32 interface;
86243@@ -588,7 +588,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
86244 case MCAST_BLOCK_SOURCE:
86245 case MCAST_UNBLOCK_SOURCE:
86246 {
86247- struct compat_group_source_req __user *gsr32 = (void *)optval;
86248+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
86249 struct group_source_req __user *kgsr = compat_alloc_user_space(
86250 sizeof(struct group_source_req));
86251 u32 interface;
86252@@ -609,7 +609,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
86253 }
86254 case MCAST_MSFILTER:
86255 {
86256- struct compat_group_filter __user *gf32 = (void *)optval;
86257+ struct compat_group_filter __user *gf32 = (void __user *)optval;
86258 struct group_filter __user *kgf;
86259 u32 interface, fmode, numsrc;
86260
86261@@ -647,7 +647,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
86262 char __user *optval, int __user *optlen,
86263 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
86264 {
86265- struct compat_group_filter __user *gf32 = (void *)optval;
86266+ struct compat_group_filter __user *gf32 = (void __user *)optval;
86267 struct group_filter __user *kgf;
86268 int __user *koptlen;
86269 u32 interface, fmode, numsrc;
86270@@ -734,19 +734,25 @@ static unsigned char nas[21] = {
86271
86272 asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags)
86273 {
86274- return sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
86275+ if (flags & MSG_CMSG_COMPAT)
86276+ return -EINVAL;
86277+ return __sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
86278 }
86279
86280 asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg,
86281 unsigned int vlen, unsigned int flags)
86282 {
86283+ if (flags & MSG_CMSG_COMPAT)
86284+ return -EINVAL;
86285 return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
86286 flags | MSG_CMSG_COMPAT);
86287 }
86288
86289 asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags)
86290 {
86291- return sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
86292+ if (flags & MSG_CMSG_COMPAT)
86293+ return -EINVAL;
86294+ return __sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
86295 }
86296
86297 asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len, unsigned int flags)
86298@@ -768,6 +774,9 @@ asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
86299 int datagrams;
86300 struct timespec ktspec;
86301
86302+ if (flags & MSG_CMSG_COMPAT)
86303+ return -EINVAL;
86304+
86305 if (COMPAT_USE_64BIT_TIME)
86306 return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
86307 flags | MSG_CMSG_COMPAT,
86308@@ -796,7 +805,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
86309
86310 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
86311 return -EINVAL;
86312- if (copy_from_user(a, args, nas[call]))
86313+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
86314 return -EFAULT;
86315 a0 = a[0];
86316 a1 = a[1];
86317diff --git a/net/core/datagram.c b/net/core/datagram.c
86318index 368f9c3..f82d4a3 100644
86319--- a/net/core/datagram.c
86320+++ b/net/core/datagram.c
86321@@ -289,7 +289,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
86322 }
86323
86324 kfree_skb(skb);
86325- atomic_inc(&sk->sk_drops);
86326+ atomic_inc_unchecked(&sk->sk_drops);
86327 sk_mem_reclaim_partial(sk);
86328
86329 return err;
86330diff --git a/net/core/dev.c b/net/core/dev.c
86331index 9a278e9..15f2b9e 100644
86332--- a/net/core/dev.c
86333+++ b/net/core/dev.c
86334@@ -1617,7 +1617,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
86335 {
86336 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
86337 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
86338- atomic_long_inc(&dev->rx_dropped);
86339+ atomic_long_inc_unchecked(&dev->rx_dropped);
86340 kfree_skb(skb);
86341 return NET_RX_DROP;
86342 }
86343@@ -1626,7 +1626,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
86344 skb_orphan(skb);
86345
86346 if (unlikely(!is_skb_forwardable(dev, skb))) {
86347- atomic_long_inc(&dev->rx_dropped);
86348+ atomic_long_inc_unchecked(&dev->rx_dropped);
86349 kfree_skb(skb);
86350 return NET_RX_DROP;
86351 }
86352@@ -2351,7 +2351,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
86353
86354 struct dev_gso_cb {
86355 void (*destructor)(struct sk_buff *skb);
86356-};
86357+} __no_const;
86358
86359 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
86360
86361@@ -3093,7 +3093,7 @@ enqueue:
86362
86363 local_irq_restore(flags);
86364
86365- atomic_long_inc(&skb->dev->rx_dropped);
86366+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
86367 kfree_skb(skb);
86368 return NET_RX_DROP;
86369 }
86370@@ -3165,7 +3165,7 @@ int netif_rx_ni(struct sk_buff *skb)
86371 }
86372 EXPORT_SYMBOL(netif_rx_ni);
86373
86374-static void net_tx_action(struct softirq_action *h)
86375+static void net_tx_action(void)
86376 {
86377 struct softnet_data *sd = &__get_cpu_var(softnet_data);
86378
86379@@ -3490,7 +3490,7 @@ ncls:
86380 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
86381 } else {
86382 drop:
86383- atomic_long_inc(&skb->dev->rx_dropped);
86384+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
86385 kfree_skb(skb);
86386 /* Jamal, now you will not able to escape explaining
86387 * me how you were going to use this. :-)
86388@@ -4095,7 +4095,7 @@ void netif_napi_del(struct napi_struct *napi)
86389 }
86390 EXPORT_SYMBOL(netif_napi_del);
86391
86392-static void net_rx_action(struct softirq_action *h)
86393+static void net_rx_action(void)
86394 {
86395 struct softnet_data *sd = &__get_cpu_var(softnet_data);
86396 unsigned long time_limit = jiffies + 2;
86397@@ -5522,7 +5522,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
86398 } else {
86399 netdev_stats_to_stats64(storage, &dev->stats);
86400 }
86401- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
86402+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
86403 return storage;
86404 }
86405 EXPORT_SYMBOL(dev_get_stats);
86406diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
86407index 6cc0481..59cfb00 100644
86408--- a/net/core/dev_ioctl.c
86409+++ b/net/core/dev_ioctl.c
86410@@ -376,9 +376,13 @@ void dev_load(struct net *net, const char *name)
86411 if (no_module && capable(CAP_NET_ADMIN))
86412 no_module = request_module("netdev-%s", name);
86413 if (no_module && capable(CAP_SYS_MODULE)) {
86414+#ifdef CONFIG_GRKERNSEC_MODHARDEN
86415+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
86416+#else
86417 if (!request_module("%s", name))
86418 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
86419 name);
86420+#endif
86421 }
86422 }
86423 EXPORT_SYMBOL(dev_load);
86424diff --git a/net/core/flow.c b/net/core/flow.c
86425index 2bfd081..53c6058 100644
86426--- a/net/core/flow.c
86427+++ b/net/core/flow.c
86428@@ -61,7 +61,7 @@ struct flow_cache {
86429 struct timer_list rnd_timer;
86430 };
86431
86432-atomic_t flow_cache_genid = ATOMIC_INIT(0);
86433+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
86434 EXPORT_SYMBOL(flow_cache_genid);
86435 static struct flow_cache flow_cache_global;
86436 static struct kmem_cache *flow_cachep __read_mostly;
86437@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
86438
86439 static int flow_entry_valid(struct flow_cache_entry *fle)
86440 {
86441- if (atomic_read(&flow_cache_genid) != fle->genid)
86442+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
86443 return 0;
86444 if (fle->object && !fle->object->ops->check(fle->object))
86445 return 0;
86446@@ -258,7 +258,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
86447 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
86448 fcp->hash_count++;
86449 }
86450- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
86451+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
86452 flo = fle->object;
86453 if (!flo)
86454 goto ret_object;
86455@@ -279,7 +279,7 @@ nocache:
86456 }
86457 flo = resolver(net, key, family, dir, flo, ctx);
86458 if (fle) {
86459- fle->genid = atomic_read(&flow_cache_genid);
86460+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
86461 if (!IS_ERR(flo))
86462 fle->object = flo;
86463 else
86464diff --git a/net/core/iovec.c b/net/core/iovec.c
86465index 7e7aeb0..2a998cb 100644
86466--- a/net/core/iovec.c
86467+++ b/net/core/iovec.c
86468@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
86469 if (m->msg_namelen) {
86470 if (mode == VERIFY_READ) {
86471 void __user *namep;
86472- namep = (void __user __force *) m->msg_name;
86473+ namep = (void __force_user *) m->msg_name;
86474 err = move_addr_to_kernel(namep, m->msg_namelen,
86475 address);
86476 if (err < 0)
86477@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
86478 }
86479
86480 size = m->msg_iovlen * sizeof(struct iovec);
86481- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
86482+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
86483 return -EFAULT;
86484
86485 m->msg_iov = iov;
86486diff --git a/net/core/neighbour.c b/net/core/neighbour.c
86487index 3863b8f..85c99a6 100644
86488--- a/net/core/neighbour.c
86489+++ b/net/core/neighbour.c
86490@@ -2778,7 +2778,7 @@ static int proc_unres_qlen(ctl_table *ctl, int write, void __user *buffer,
86491 size_t *lenp, loff_t *ppos)
86492 {
86493 int size, ret;
86494- ctl_table tmp = *ctl;
86495+ ctl_table_no_const tmp = *ctl;
86496
86497 tmp.extra1 = &zero;
86498 tmp.extra2 = &unres_qlen_max;
86499diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
86500index 3174f19..5810985 100644
86501--- a/net/core/net-procfs.c
86502+++ b/net/core/net-procfs.c
86503@@ -271,8 +271,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
86504 else
86505 seq_printf(seq, "%04x", ntohs(pt->type));
86506
86507+#ifdef CONFIG_GRKERNSEC_HIDESYM
86508+ seq_printf(seq, " %-8s %pF\n",
86509+ pt->dev ? pt->dev->name : "", NULL);
86510+#else
86511 seq_printf(seq, " %-8s %pF\n",
86512 pt->dev ? pt->dev->name : "", pt->func);
86513+#endif
86514 }
86515
86516 return 0;
86517diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
86518index 7427ab5..389f411 100644
86519--- a/net/core/net-sysfs.c
86520+++ b/net/core/net-sysfs.c
86521@@ -1321,7 +1321,7 @@ void netdev_class_remove_file(struct class_attribute *class_attr)
86522 }
86523 EXPORT_SYMBOL(netdev_class_remove_file);
86524
86525-int netdev_kobject_init(void)
86526+int __init netdev_kobject_init(void)
86527 {
86528 kobj_ns_type_register(&net_ns_type_operations);
86529 return class_register(&net_class);
86530diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
86531index 80e271d..2980cc2 100644
86532--- a/net/core/net_namespace.c
86533+++ b/net/core/net_namespace.c
86534@@ -442,7 +442,7 @@ static int __register_pernet_operations(struct list_head *list,
86535 int error;
86536 LIST_HEAD(net_exit_list);
86537
86538- list_add_tail(&ops->list, list);
86539+ pax_list_add_tail((struct list_head *)&ops->list, list);
86540 if (ops->init || (ops->id && ops->size)) {
86541 for_each_net(net) {
86542 error = ops_init(ops, net);
86543@@ -455,7 +455,7 @@ static int __register_pernet_operations(struct list_head *list,
86544
86545 out_undo:
86546 /* If I have an error cleanup all namespaces I initialized */
86547- list_del(&ops->list);
86548+ pax_list_del((struct list_head *)&ops->list);
86549 ops_exit_list(ops, &net_exit_list);
86550 ops_free_list(ops, &net_exit_list);
86551 return error;
86552@@ -466,7 +466,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
86553 struct net *net;
86554 LIST_HEAD(net_exit_list);
86555
86556- list_del(&ops->list);
86557+ pax_list_del((struct list_head *)&ops->list);
86558 for_each_net(net)
86559 list_add_tail(&net->exit_list, &net_exit_list);
86560 ops_exit_list(ops, &net_exit_list);
86561@@ -600,7 +600,7 @@ int register_pernet_device(struct pernet_operations *ops)
86562 mutex_lock(&net_mutex);
86563 error = register_pernet_operations(&pernet_list, ops);
86564 if (!error && (first_device == &pernet_list))
86565- first_device = &ops->list;
86566+ first_device = (struct list_head *)&ops->list;
86567 mutex_unlock(&net_mutex);
86568 return error;
86569 }
86570diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
86571index 23854b5..ff4fda4 100644
86572--- a/net/core/rtnetlink.c
86573+++ b/net/core/rtnetlink.c
86574@@ -58,7 +58,7 @@ struct rtnl_link {
86575 rtnl_doit_func doit;
86576 rtnl_dumpit_func dumpit;
86577 rtnl_calcit_func calcit;
86578-};
86579+} __no_const;
86580
86581 static DEFINE_MUTEX(rtnl_mutex);
86582
86583@@ -299,10 +299,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
86584 if (rtnl_link_ops_get(ops->kind))
86585 return -EEXIST;
86586
86587- if (!ops->dellink)
86588- ops->dellink = unregister_netdevice_queue;
86589+ if (!ops->dellink) {
86590+ pax_open_kernel();
86591+ *(void **)&ops->dellink = unregister_netdevice_queue;
86592+ pax_close_kernel();
86593+ }
86594
86595- list_add_tail(&ops->list, &link_ops);
86596+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
86597 return 0;
86598 }
86599 EXPORT_SYMBOL_GPL(__rtnl_link_register);
86600@@ -349,7 +352,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
86601 for_each_net(net) {
86602 __rtnl_kill_links(net, ops);
86603 }
86604- list_del(&ops->list);
86605+ pax_list_del((struct list_head *)&ops->list);
86606 }
86607 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
86608
86609diff --git a/net/core/scm.c b/net/core/scm.c
86610index 2dc6cda..2159524 100644
86611--- a/net/core/scm.c
86612+++ b/net/core/scm.c
86613@@ -226,7 +226,7 @@ EXPORT_SYMBOL(__scm_send);
86614 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
86615 {
86616 struct cmsghdr __user *cm
86617- = (__force struct cmsghdr __user *)msg->msg_control;
86618+ = (struct cmsghdr __force_user *)msg->msg_control;
86619 struct cmsghdr cmhdr;
86620 int cmlen = CMSG_LEN(len);
86621 int err;
86622@@ -249,7 +249,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
86623 err = -EFAULT;
86624 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
86625 goto out;
86626- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
86627+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
86628 goto out;
86629 cmlen = CMSG_SPACE(len);
86630 if (msg->msg_controllen < cmlen)
86631@@ -265,7 +265,7 @@ EXPORT_SYMBOL(put_cmsg);
86632 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
86633 {
86634 struct cmsghdr __user *cm
86635- = (__force struct cmsghdr __user*)msg->msg_control;
86636+ = (struct cmsghdr __force_user *)msg->msg_control;
86637
86638 int fdmax = 0;
86639 int fdnum = scm->fp->count;
86640@@ -285,7 +285,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
86641 if (fdnum < fdmax)
86642 fdmax = fdnum;
86643
86644- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
86645+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
86646 i++, cmfptr++)
86647 {
86648 struct socket *sock;
86649diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
86650index e61a8bb..6a2f13c 100644
86651--- a/net/core/secure_seq.c
86652+++ b/net/core/secure_seq.c
86653@@ -12,12 +12,10 @@
86654
86655 static u32 net_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
86656
86657-static int __init net_secret_init(void)
86658+void net_secret_init(void)
86659 {
86660 get_random_bytes(net_secret, sizeof(net_secret));
86661- return 0;
86662 }
86663-late_initcall(net_secret_init);
86664
86665 #ifdef CONFIG_INET
86666 static u32 seq_scale(u32 seq)
86667diff --git a/net/core/sock.c b/net/core/sock.c
86668index 1432266..1a0d4a1 100644
86669--- a/net/core/sock.c
86670+++ b/net/core/sock.c
86671@@ -390,7 +390,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
86672 struct sk_buff_head *list = &sk->sk_receive_queue;
86673
86674 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
86675- atomic_inc(&sk->sk_drops);
86676+ atomic_inc_unchecked(&sk->sk_drops);
86677 trace_sock_rcvqueue_full(sk, skb);
86678 return -ENOMEM;
86679 }
86680@@ -400,7 +400,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
86681 return err;
86682
86683 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
86684- atomic_inc(&sk->sk_drops);
86685+ atomic_inc_unchecked(&sk->sk_drops);
86686 return -ENOBUFS;
86687 }
86688
86689@@ -420,7 +420,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
86690 skb_dst_force(skb);
86691
86692 spin_lock_irqsave(&list->lock, flags);
86693- skb->dropcount = atomic_read(&sk->sk_drops);
86694+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
86695 __skb_queue_tail(list, skb);
86696 spin_unlock_irqrestore(&list->lock, flags);
86697
86698@@ -440,7 +440,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
86699 skb->dev = NULL;
86700
86701 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
86702- atomic_inc(&sk->sk_drops);
86703+ atomic_inc_unchecked(&sk->sk_drops);
86704 goto discard_and_relse;
86705 }
86706 if (nested)
86707@@ -458,7 +458,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
86708 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
86709 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
86710 bh_unlock_sock(sk);
86711- atomic_inc(&sk->sk_drops);
86712+ atomic_inc_unchecked(&sk->sk_drops);
86713 goto discard_and_relse;
86714 }
86715
86716@@ -942,12 +942,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
86717 struct timeval tm;
86718 } v;
86719
86720- int lv = sizeof(int);
86721- int len;
86722+ unsigned int lv = sizeof(int);
86723+ unsigned int len;
86724
86725 if (get_user(len, optlen))
86726 return -EFAULT;
86727- if (len < 0)
86728+ if (len > INT_MAX)
86729 return -EINVAL;
86730
86731 memset(&v, 0, sizeof(v));
86732@@ -1099,11 +1099,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
86733
86734 case SO_PEERNAME:
86735 {
86736- char address[128];
86737+ char address[_K_SS_MAXSIZE];
86738
86739 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
86740 return -ENOTCONN;
86741- if (lv < len)
86742+ if (lv < len || sizeof address < len)
86743 return -EINVAL;
86744 if (copy_to_user(optval, address, len))
86745 return -EFAULT;
86746@@ -1166,7 +1166,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
86747
86748 if (len > lv)
86749 len = lv;
86750- if (copy_to_user(optval, &v, len))
86751+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
86752 return -EFAULT;
86753 lenout:
86754 if (put_user(len, optlen))
86755@@ -2284,7 +2284,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
86756 */
86757 smp_wmb();
86758 atomic_set(&sk->sk_refcnt, 1);
86759- atomic_set(&sk->sk_drops, 0);
86760+ atomic_set_unchecked(&sk->sk_drops, 0);
86761 }
86762 EXPORT_SYMBOL(sock_init_data);
86763
86764diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
86765index a29e90c..922399c 100644
86766--- a/net/core/sock_diag.c
86767+++ b/net/core/sock_diag.c
86768@@ -9,26 +9,33 @@
86769 #include <linux/inet_diag.h>
86770 #include <linux/sock_diag.h>
86771
86772-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
86773+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
86774 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
86775 static DEFINE_MUTEX(sock_diag_table_mutex);
86776
86777 int sock_diag_check_cookie(void *sk, __u32 *cookie)
86778 {
86779+#ifndef CONFIG_GRKERNSEC_HIDESYM
86780 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
86781 cookie[1] != INET_DIAG_NOCOOKIE) &&
86782 ((u32)(unsigned long)sk != cookie[0] ||
86783 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
86784 return -ESTALE;
86785 else
86786+#endif
86787 return 0;
86788 }
86789 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
86790
86791 void sock_diag_save_cookie(void *sk, __u32 *cookie)
86792 {
86793+#ifdef CONFIG_GRKERNSEC_HIDESYM
86794+ cookie[0] = 0;
86795+ cookie[1] = 0;
86796+#else
86797 cookie[0] = (u32)(unsigned long)sk;
86798 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
86799+#endif
86800 }
86801 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
86802
86803@@ -75,8 +82,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
86804 mutex_lock(&sock_diag_table_mutex);
86805 if (sock_diag_handlers[hndl->family])
86806 err = -EBUSY;
86807- else
86808+ else {
86809+ pax_open_kernel();
86810 sock_diag_handlers[hndl->family] = hndl;
86811+ pax_close_kernel();
86812+ }
86813 mutex_unlock(&sock_diag_table_mutex);
86814
86815 return err;
86816@@ -92,7 +102,9 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
86817
86818 mutex_lock(&sock_diag_table_mutex);
86819 BUG_ON(sock_diag_handlers[family] != hnld);
86820+ pax_open_kernel();
86821 sock_diag_handlers[family] = NULL;
86822+ pax_close_kernel();
86823 mutex_unlock(&sock_diag_table_mutex);
86824 }
86825 EXPORT_SYMBOL_GPL(sock_diag_unregister);
86826diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
86827index cfdb46a..cef55e1 100644
86828--- a/net/core/sysctl_net_core.c
86829+++ b/net/core/sysctl_net_core.c
86830@@ -28,7 +28,7 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write,
86831 {
86832 unsigned int orig_size, size;
86833 int ret, i;
86834- ctl_table tmp = {
86835+ ctl_table_no_const tmp = {
86836 .data = &size,
86837 .maxlen = sizeof(size),
86838 .mode = table->mode
86839@@ -211,13 +211,12 @@ static struct ctl_table netns_core_table[] = {
86840
86841 static __net_init int sysctl_core_net_init(struct net *net)
86842 {
86843- struct ctl_table *tbl;
86844+ ctl_table_no_const *tbl = NULL;
86845
86846 net->core.sysctl_somaxconn = SOMAXCONN;
86847
86848- tbl = netns_core_table;
86849 if (!net_eq(net, &init_net)) {
86850- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
86851+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
86852 if (tbl == NULL)
86853 goto err_dup;
86854
86855@@ -227,17 +226,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
86856 if (net->user_ns != &init_user_ns) {
86857 tbl[0].procname = NULL;
86858 }
86859- }
86860-
86861- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
86862+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
86863+ } else
86864+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
86865 if (net->core.sysctl_hdr == NULL)
86866 goto err_reg;
86867
86868 return 0;
86869
86870 err_reg:
86871- if (tbl != netns_core_table)
86872- kfree(tbl);
86873+ kfree(tbl);
86874 err_dup:
86875 return -ENOMEM;
86876 }
86877@@ -252,7 +250,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
86878 kfree(tbl);
86879 }
86880
86881-static __net_initdata struct pernet_operations sysctl_core_ops = {
86882+static __net_initconst struct pernet_operations sysctl_core_ops = {
86883 .init = sysctl_core_net_init,
86884 .exit = sysctl_core_net_exit,
86885 };
86886diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
86887index c21f200..bc4565b 100644
86888--- a/net/decnet/af_decnet.c
86889+++ b/net/decnet/af_decnet.c
86890@@ -465,6 +465,7 @@ static struct proto dn_proto = {
86891 .sysctl_rmem = sysctl_decnet_rmem,
86892 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
86893 .obj_size = sizeof(struct dn_sock),
86894+ .slab_flags = SLAB_USERCOPY,
86895 };
86896
86897 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
86898diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
86899index a55eecc..dd8428c 100644
86900--- a/net/decnet/sysctl_net_decnet.c
86901+++ b/net/decnet/sysctl_net_decnet.c
86902@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
86903
86904 if (len > *lenp) len = *lenp;
86905
86906- if (copy_to_user(buffer, addr, len))
86907+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
86908 return -EFAULT;
86909
86910 *lenp = len;
86911@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
86912
86913 if (len > *lenp) len = *lenp;
86914
86915- if (copy_to_user(buffer, devname, len))
86916+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
86917 return -EFAULT;
86918
86919 *lenp = len;
86920diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
86921index c929d9c..df10cde 100644
86922--- a/net/ipv4/af_inet.c
86923+++ b/net/ipv4/af_inet.c
86924@@ -115,6 +115,7 @@
86925 #include <net/inet_common.h>
86926 #include <net/xfrm.h>
86927 #include <net/net_namespace.h>
86928+#include <net/secure_seq.h>
86929 #ifdef CONFIG_IP_MROUTE
86930 #include <linux/mroute.h>
86931 #endif
86932@@ -263,8 +264,10 @@ void build_ehash_secret(void)
86933 get_random_bytes(&rnd, sizeof(rnd));
86934 } while (rnd == 0);
86935
86936- if (cmpxchg(&inet_ehash_secret, 0, rnd) == 0)
86937+ if (cmpxchg(&inet_ehash_secret, 0, rnd) == 0) {
86938 get_random_bytes(&ipv6_hash_secret, sizeof(ipv6_hash_secret));
86939+ net_secret_init();
86940+ }
86941 }
86942 EXPORT_SYMBOL(build_ehash_secret);
86943
86944@@ -1699,13 +1702,9 @@ static int __init inet_init(void)
86945
86946 BUILD_BUG_ON(sizeof(struct inet_skb_parm) > FIELD_SIZEOF(struct sk_buff, cb));
86947
86948- sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
86949- if (!sysctl_local_reserved_ports)
86950- goto out;
86951-
86952 rc = proto_register(&tcp_prot, 1);
86953 if (rc)
86954- goto out_free_reserved_ports;
86955+ goto out;
86956
86957 rc = proto_register(&udp_prot, 1);
86958 if (rc)
86959@@ -1814,8 +1813,6 @@ out_unregister_udp_proto:
86960 proto_unregister(&udp_prot);
86961 out_unregister_tcp_proto:
86962 proto_unregister(&tcp_prot);
86963-out_free_reserved_ports:
86964- kfree(sysctl_local_reserved_ports);
86965 goto out;
86966 }
86967
86968diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
86969index 2e7f194..0fa4d6d 100644
86970--- a/net/ipv4/ah4.c
86971+++ b/net/ipv4/ah4.c
86972@@ -420,7 +420,7 @@ static void ah4_err(struct sk_buff *skb, u32 info)
86973 return;
86974
86975 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
86976- atomic_inc(&flow_cache_genid);
86977+ atomic_inc_unchecked(&flow_cache_genid);
86978 rt_genid_bump(net);
86979
86980 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0);
86981diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
86982index c6287cd..e9bc96a 100644
86983--- a/net/ipv4/devinet.c
86984+++ b/net/ipv4/devinet.c
86985@@ -1992,7 +1992,7 @@ static int ipv4_doint_and_flush(ctl_table *ctl, int write,
86986 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
86987 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
86988
86989-static struct devinet_sysctl_table {
86990+static const struct devinet_sysctl_table {
86991 struct ctl_table_header *sysctl_header;
86992 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
86993 } devinet_sysctl = {
86994@@ -2110,7 +2110,7 @@ static __net_init int devinet_init_net(struct net *net)
86995 int err;
86996 struct ipv4_devconf *all, *dflt;
86997 #ifdef CONFIG_SYSCTL
86998- struct ctl_table *tbl = ctl_forward_entry;
86999+ ctl_table_no_const *tbl = NULL;
87000 struct ctl_table_header *forw_hdr;
87001 #endif
87002
87003@@ -2128,7 +2128,7 @@ static __net_init int devinet_init_net(struct net *net)
87004 goto err_alloc_dflt;
87005
87006 #ifdef CONFIG_SYSCTL
87007- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
87008+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
87009 if (tbl == NULL)
87010 goto err_alloc_ctl;
87011
87012@@ -2148,7 +2148,10 @@ static __net_init int devinet_init_net(struct net *net)
87013 goto err_reg_dflt;
87014
87015 err = -ENOMEM;
87016- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
87017+ if (!net_eq(net, &init_net))
87018+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
87019+ else
87020+ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
87021 if (forw_hdr == NULL)
87022 goto err_reg_ctl;
87023 net->ipv4.forw_hdr = forw_hdr;
87024@@ -2164,8 +2167,7 @@ err_reg_ctl:
87025 err_reg_dflt:
87026 __devinet_sysctl_unregister(all);
87027 err_reg_all:
87028- if (tbl != ctl_forward_entry)
87029- kfree(tbl);
87030+ kfree(tbl);
87031 err_alloc_ctl:
87032 #endif
87033 if (dflt != &ipv4_devconf_dflt)
87034diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
87035index 4cfe34d..a6ba66e 100644
87036--- a/net/ipv4/esp4.c
87037+++ b/net/ipv4/esp4.c
87038@@ -503,7 +503,7 @@ static void esp4_err(struct sk_buff *skb, u32 info)
87039 return;
87040
87041 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
87042- atomic_inc(&flow_cache_genid);
87043+ atomic_inc_unchecked(&flow_cache_genid);
87044 rt_genid_bump(net);
87045
87046 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0);
87047diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
87048index eb4bb12..ee4ec7d 100644
87049--- a/net/ipv4/fib_frontend.c
87050+++ b/net/ipv4/fib_frontend.c
87051@@ -1017,12 +1017,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
87052 #ifdef CONFIG_IP_ROUTE_MULTIPATH
87053 fib_sync_up(dev);
87054 #endif
87055- atomic_inc(&net->ipv4.dev_addr_genid);
87056+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
87057 rt_cache_flush(dev_net(dev));
87058 break;
87059 case NETDEV_DOWN:
87060 fib_del_ifaddr(ifa, NULL);
87061- atomic_inc(&net->ipv4.dev_addr_genid);
87062+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
87063 if (ifa->ifa_dev->ifa_list == NULL) {
87064 /* Last address was deleted from this interface.
87065 * Disable IP.
87066@@ -1058,7 +1058,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
87067 #ifdef CONFIG_IP_ROUTE_MULTIPATH
87068 fib_sync_up(dev);
87069 #endif
87070- atomic_inc(&net->ipv4.dev_addr_genid);
87071+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
87072 rt_cache_flush(net);
87073 break;
87074 case NETDEV_DOWN:
87075diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
87076index 8f6cb7a..34507f9 100644
87077--- a/net/ipv4/fib_semantics.c
87078+++ b/net/ipv4/fib_semantics.c
87079@@ -765,7 +765,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
87080 nh->nh_saddr = inet_select_addr(nh->nh_dev,
87081 nh->nh_gw,
87082 nh->nh_parent->fib_scope);
87083- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
87084+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
87085
87086 return nh->nh_saddr;
87087 }
87088diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
87089index 786d97a..1889c0d 100644
87090--- a/net/ipv4/inet_connection_sock.c
87091+++ b/net/ipv4/inet_connection_sock.c
87092@@ -37,7 +37,7 @@ struct local_ports sysctl_local_ports __read_mostly = {
87093 .range = { 32768, 61000 },
87094 };
87095
87096-unsigned long *sysctl_local_reserved_ports;
87097+unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
87098 EXPORT_SYMBOL(sysctl_local_reserved_ports);
87099
87100 void inet_get_local_port_range(int *low, int *high)
87101diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
87102index 6af375a..c493c74 100644
87103--- a/net/ipv4/inet_hashtables.c
87104+++ b/net/ipv4/inet_hashtables.c
87105@@ -18,12 +18,15 @@
87106 #include <linux/sched.h>
87107 #include <linux/slab.h>
87108 #include <linux/wait.h>
87109+#include <linux/security.h>
87110
87111 #include <net/inet_connection_sock.h>
87112 #include <net/inet_hashtables.h>
87113 #include <net/secure_seq.h>
87114 #include <net/ip.h>
87115
87116+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
87117+
87118 /*
87119 * Allocate and initialize a new local port bind bucket.
87120 * The bindhash mutex for snum's hash chain must be held here.
87121@@ -554,6 +557,8 @@ ok:
87122 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
87123 spin_unlock(&head->lock);
87124
87125+ gr_update_task_in_ip_table(current, inet_sk(sk));
87126+
87127 if (tw) {
87128 inet_twsk_deschedule(tw, death_row);
87129 while (twrefcnt) {
87130diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
87131index 000e3d2..5472da3 100644
87132--- a/net/ipv4/inetpeer.c
87133+++ b/net/ipv4/inetpeer.c
87134@@ -503,8 +503,8 @@ relookup:
87135 if (p) {
87136 p->daddr = *daddr;
87137 atomic_set(&p->refcnt, 1);
87138- atomic_set(&p->rid, 0);
87139- atomic_set(&p->ip_id_count,
87140+ atomic_set_unchecked(&p->rid, 0);
87141+ atomic_set_unchecked(&p->ip_id_count,
87142 (daddr->family == AF_INET) ?
87143 secure_ip_id(daddr->addr.a4) :
87144 secure_ipv6_id(daddr->addr.a6));
87145diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
87146index 52c273e..579060b 100644
87147--- a/net/ipv4/ip_fragment.c
87148+++ b/net/ipv4/ip_fragment.c
87149@@ -311,7 +311,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
87150 return 0;
87151
87152 start = qp->rid;
87153- end = atomic_inc_return(&peer->rid);
87154+ end = atomic_inc_return_unchecked(&peer->rid);
87155 qp->rid = end;
87156
87157 rc = qp->q.fragments && (end - start) > max;
87158@@ -788,12 +788,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
87159
87160 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
87161 {
87162- struct ctl_table *table;
87163+ ctl_table_no_const *table = NULL;
87164 struct ctl_table_header *hdr;
87165
87166- table = ip4_frags_ns_ctl_table;
87167 if (!net_eq(net, &init_net)) {
87168- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
87169+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
87170 if (table == NULL)
87171 goto err_alloc;
87172
87173@@ -804,9 +803,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
87174 /* Don't export sysctls to unprivileged users */
87175 if (net->user_ns != &init_user_ns)
87176 table[0].procname = NULL;
87177- }
87178+ hdr = register_net_sysctl(net, "net/ipv4", table);
87179+ } else
87180+ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
87181
87182- hdr = register_net_sysctl(net, "net/ipv4", table);
87183 if (hdr == NULL)
87184 goto err_reg;
87185
87186@@ -814,8 +814,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
87187 return 0;
87188
87189 err_reg:
87190- if (!net_eq(net, &init_net))
87191- kfree(table);
87192+ kfree(table);
87193 err_alloc:
87194 return -ENOMEM;
87195 }
87196diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
87197index 91d66db..4af7d99 100644
87198--- a/net/ipv4/ip_gre.c
87199+++ b/net/ipv4/ip_gre.c
87200@@ -124,7 +124,7 @@ static bool log_ecn_error = true;
87201 module_param(log_ecn_error, bool, 0644);
87202 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
87203
87204-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
87205+static struct rtnl_link_ops ipgre_link_ops;
87206 static int ipgre_tunnel_init(struct net_device *dev);
87207 static void ipgre_tunnel_setup(struct net_device *dev);
87208 static int ipgre_tunnel_bind_dev(struct net_device *dev);
87209@@ -1823,7 +1823,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
87210 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
87211 };
87212
87213-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
87214+static struct rtnl_link_ops ipgre_link_ops = {
87215 .kind = "gre",
87216 .maxtype = IFLA_GRE_MAX,
87217 .policy = ipgre_policy,
87218@@ -1836,7 +1836,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
87219 .fill_info = ipgre_fill_info,
87220 };
87221
87222-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
87223+static struct rtnl_link_ops ipgre_tap_ops = {
87224 .kind = "gretap",
87225 .maxtype = IFLA_GRE_MAX,
87226 .policy = ipgre_policy,
87227diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
87228index d9c4f11..02b82dbc 100644
87229--- a/net/ipv4/ip_sockglue.c
87230+++ b/net/ipv4/ip_sockglue.c
87231@@ -1152,7 +1152,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
87232 len = min_t(unsigned int, len, opt->optlen);
87233 if (put_user(len, optlen))
87234 return -EFAULT;
87235- if (copy_to_user(optval, opt->__data, len))
87236+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
87237+ copy_to_user(optval, opt->__data, len))
87238 return -EFAULT;
87239 return 0;
87240 }
87241@@ -1283,7 +1284,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
87242 if (sk->sk_type != SOCK_STREAM)
87243 return -ENOPROTOOPT;
87244
87245- msg.msg_control = optval;
87246+ msg.msg_control = (void __force_kernel *)optval;
87247 msg.msg_controllen = len;
87248 msg.msg_flags = flags;
87249
87250diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
87251index c3a4233..7df5626 100644
87252--- a/net/ipv4/ip_vti.c
87253+++ b/net/ipv4/ip_vti.c
87254@@ -47,7 +47,7 @@
87255 #define HASH_SIZE 16
87256 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&(HASH_SIZE-1))
87257
87258-static struct rtnl_link_ops vti_link_ops __read_mostly;
87259+static struct rtnl_link_ops vti_link_ops;
87260
87261 static int vti_net_id __read_mostly;
87262 struct vti_net {
87263@@ -399,8 +399,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
87264 tunnel->err_count = 0;
87265 }
87266
87267- IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
87268- IPSKB_REROUTED);
87269+ memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
87270 skb_dst_drop(skb);
87271 skb_dst_set(skb, &rt->dst);
87272 nf_reset(skb);
87273@@ -886,7 +885,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
87274 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
87275 };
87276
87277-static struct rtnl_link_ops vti_link_ops __read_mostly = {
87278+static struct rtnl_link_ops vti_link_ops = {
87279 .kind = "vti",
87280 .maxtype = IFLA_VTI_MAX,
87281 .policy = vti_policy,
87282diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
87283index f01d1b1..8fe03ad 100644
87284--- a/net/ipv4/ipcomp.c
87285+++ b/net/ipv4/ipcomp.c
87286@@ -48,7 +48,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
87287 return;
87288
87289 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
87290- atomic_inc(&flow_cache_genid);
87291+ atomic_inc_unchecked(&flow_cache_genid);
87292 rt_genid_bump(net);
87293
87294 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_COMP, 0);
87295diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
87296index bf6c5cf..ab2e9c6 100644
87297--- a/net/ipv4/ipconfig.c
87298+++ b/net/ipv4/ipconfig.c
87299@@ -323,7 +323,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
87300
87301 mm_segment_t oldfs = get_fs();
87302 set_fs(get_ds());
87303- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
87304+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
87305 set_fs(oldfs);
87306 return res;
87307 }
87308@@ -334,7 +334,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
87309
87310 mm_segment_t oldfs = get_fs();
87311 set_fs(get_ds());
87312- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
87313+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
87314 set_fs(oldfs);
87315 return res;
87316 }
87317@@ -345,7 +345,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
87318
87319 mm_segment_t oldfs = get_fs();
87320 set_fs(get_ds());
87321- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
87322+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
87323 set_fs(oldfs);
87324 return res;
87325 }
87326diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
87327index 8f024d4..8b3500c 100644
87328--- a/net/ipv4/ipip.c
87329+++ b/net/ipv4/ipip.c
87330@@ -138,7 +138,7 @@ struct ipip_net {
87331 static int ipip_tunnel_init(struct net_device *dev);
87332 static void ipip_tunnel_setup(struct net_device *dev);
87333 static void ipip_dev_free(struct net_device *dev);
87334-static struct rtnl_link_ops ipip_link_ops __read_mostly;
87335+static struct rtnl_link_ops ipip_link_ops;
87336
87337 static struct rtnl_link_stats64 *ipip_get_stats64(struct net_device *dev,
87338 struct rtnl_link_stats64 *tot)
87339@@ -974,7 +974,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
87340 [IFLA_IPTUN_PMTUDISC] = { .type = NLA_U8 },
87341 };
87342
87343-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
87344+static struct rtnl_link_ops ipip_link_ops = {
87345 .kind = "ipip",
87346 .maxtype = IFLA_IPTUN_MAX,
87347 .policy = ipip_policy,
87348diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
87349index 7dc6a97..229c61b 100644
87350--- a/net/ipv4/netfilter/arp_tables.c
87351+++ b/net/ipv4/netfilter/arp_tables.c
87352@@ -879,14 +879,14 @@ static int compat_table_info(const struct xt_table_info *info,
87353 #endif
87354
87355 static int get_info(struct net *net, void __user *user,
87356- const int *len, int compat)
87357+ int len, int compat)
87358 {
87359 char name[XT_TABLE_MAXNAMELEN];
87360 struct xt_table *t;
87361 int ret;
87362
87363- if (*len != sizeof(struct arpt_getinfo)) {
87364- duprintf("length %u != %Zu\n", *len,
87365+ if (len != sizeof(struct arpt_getinfo)) {
87366+ duprintf("length %u != %Zu\n", len,
87367 sizeof(struct arpt_getinfo));
87368 return -EINVAL;
87369 }
87370@@ -923,7 +923,7 @@ static int get_info(struct net *net, void __user *user,
87371 info.size = private->size;
87372 strcpy(info.name, name);
87373
87374- if (copy_to_user(user, &info, *len) != 0)
87375+ if (copy_to_user(user, &info, len) != 0)
87376 ret = -EFAULT;
87377 else
87378 ret = 0;
87379@@ -1682,7 +1682,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
87380
87381 switch (cmd) {
87382 case ARPT_SO_GET_INFO:
87383- ret = get_info(sock_net(sk), user, len, 1);
87384+ ret = get_info(sock_net(sk), user, *len, 1);
87385 break;
87386 case ARPT_SO_GET_ENTRIES:
87387 ret = compat_get_entries(sock_net(sk), user, len);
87388@@ -1727,7 +1727,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
87389
87390 switch (cmd) {
87391 case ARPT_SO_GET_INFO:
87392- ret = get_info(sock_net(sk), user, len, 0);
87393+ ret = get_info(sock_net(sk), user, *len, 0);
87394 break;
87395
87396 case ARPT_SO_GET_ENTRIES:
87397diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
87398index 3efcf87..5247916 100644
87399--- a/net/ipv4/netfilter/ip_tables.c
87400+++ b/net/ipv4/netfilter/ip_tables.c
87401@@ -1068,14 +1068,14 @@ static int compat_table_info(const struct xt_table_info *info,
87402 #endif
87403
87404 static int get_info(struct net *net, void __user *user,
87405- const int *len, int compat)
87406+ int len, int compat)
87407 {
87408 char name[XT_TABLE_MAXNAMELEN];
87409 struct xt_table *t;
87410 int ret;
87411
87412- if (*len != sizeof(struct ipt_getinfo)) {
87413- duprintf("length %u != %zu\n", *len,
87414+ if (len != sizeof(struct ipt_getinfo)) {
87415+ duprintf("length %u != %zu\n", len,
87416 sizeof(struct ipt_getinfo));
87417 return -EINVAL;
87418 }
87419@@ -1112,7 +1112,7 @@ static int get_info(struct net *net, void __user *user,
87420 info.size = private->size;
87421 strcpy(info.name, name);
87422
87423- if (copy_to_user(user, &info, *len) != 0)
87424+ if (copy_to_user(user, &info, len) != 0)
87425 ret = -EFAULT;
87426 else
87427 ret = 0;
87428@@ -1966,7 +1966,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
87429
87430 switch (cmd) {
87431 case IPT_SO_GET_INFO:
87432- ret = get_info(sock_net(sk), user, len, 1);
87433+ ret = get_info(sock_net(sk), user, *len, 1);
87434 break;
87435 case IPT_SO_GET_ENTRIES:
87436 ret = compat_get_entries(sock_net(sk), user, len);
87437@@ -2013,7 +2013,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
87438
87439 switch (cmd) {
87440 case IPT_SO_GET_INFO:
87441- ret = get_info(sock_net(sk), user, len, 0);
87442+ ret = get_info(sock_net(sk), user, *len, 0);
87443 break;
87444
87445 case IPT_SO_GET_ENTRIES:
87446diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
87447index 2e91006..f084394 100644
87448--- a/net/ipv4/ping.c
87449+++ b/net/ipv4/ping.c
87450@@ -844,7 +844,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
87451 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
87452 0, sock_i_ino(sp),
87453 atomic_read(&sp->sk_refcnt), sp,
87454- atomic_read(&sp->sk_drops), len);
87455+ atomic_read_unchecked(&sp->sk_drops), len);
87456 }
87457
87458 static int ping_seq_show(struct seq_file *seq, void *v)
87459diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
87460index dd44e0a..06dcca4 100644
87461--- a/net/ipv4/raw.c
87462+++ b/net/ipv4/raw.c
87463@@ -309,7 +309,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
87464 int raw_rcv(struct sock *sk, struct sk_buff *skb)
87465 {
87466 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
87467- atomic_inc(&sk->sk_drops);
87468+ atomic_inc_unchecked(&sk->sk_drops);
87469 kfree_skb(skb);
87470 return NET_RX_DROP;
87471 }
87472@@ -745,16 +745,20 @@ static int raw_init(struct sock *sk)
87473
87474 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
87475 {
87476+ struct icmp_filter filter;
87477+
87478 if (optlen > sizeof(struct icmp_filter))
87479 optlen = sizeof(struct icmp_filter);
87480- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
87481+ if (copy_from_user(&filter, optval, optlen))
87482 return -EFAULT;
87483+ raw_sk(sk)->filter = filter;
87484 return 0;
87485 }
87486
87487 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
87488 {
87489 int len, ret = -EFAULT;
87490+ struct icmp_filter filter;
87491
87492 if (get_user(len, optlen))
87493 goto out;
87494@@ -764,8 +768,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
87495 if (len > sizeof(struct icmp_filter))
87496 len = sizeof(struct icmp_filter);
87497 ret = -EFAULT;
87498- if (put_user(len, optlen) ||
87499- copy_to_user(optval, &raw_sk(sk)->filter, len))
87500+ filter = raw_sk(sk)->filter;
87501+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
87502 goto out;
87503 ret = 0;
87504 out: return ret;
87505@@ -994,7 +998,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
87506 0, 0L, 0,
87507 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
87508 0, sock_i_ino(sp),
87509- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
87510+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
87511 }
87512
87513 static int raw_seq_show(struct seq_file *seq, void *v)
87514diff --git a/net/ipv4/route.c b/net/ipv4/route.c
87515index 6e28514..5e1b055 100644
87516--- a/net/ipv4/route.c
87517+++ b/net/ipv4/route.c
87518@@ -2553,34 +2553,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
87519 .maxlen = sizeof(int),
87520 .mode = 0200,
87521 .proc_handler = ipv4_sysctl_rtcache_flush,
87522+ .extra1 = &init_net,
87523 },
87524 { },
87525 };
87526
87527 static __net_init int sysctl_route_net_init(struct net *net)
87528 {
87529- struct ctl_table *tbl;
87530+ ctl_table_no_const *tbl = NULL;
87531
87532- tbl = ipv4_route_flush_table;
87533 if (!net_eq(net, &init_net)) {
87534- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
87535+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
87536 if (tbl == NULL)
87537 goto err_dup;
87538
87539 /* Don't export sysctls to unprivileged users */
87540 if (net->user_ns != &init_user_ns)
87541 tbl[0].procname = NULL;
87542- }
87543- tbl[0].extra1 = net;
87544+ tbl[0].extra1 = net;
87545+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
87546+ } else
87547+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
87548
87549- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
87550 if (net->ipv4.route_hdr == NULL)
87551 goto err_reg;
87552 return 0;
87553
87554 err_reg:
87555- if (tbl != ipv4_route_flush_table)
87556- kfree(tbl);
87557+ kfree(tbl);
87558 err_dup:
87559 return -ENOMEM;
87560 }
87561@@ -2603,7 +2603,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
87562
87563 static __net_init int rt_genid_init(struct net *net)
87564 {
87565- atomic_set(&net->rt_genid, 0);
87566+ atomic_set_unchecked(&net->rt_genid, 0);
87567 get_random_bytes(&net->ipv4.dev_addr_genid,
87568 sizeof(net->ipv4.dev_addr_genid));
87569 return 0;
87570diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
87571index 960fd29..d55bf64 100644
87572--- a/net/ipv4/sysctl_net_ipv4.c
87573+++ b/net/ipv4/sysctl_net_ipv4.c
87574@@ -55,7 +55,7 @@ static int ipv4_local_port_range(ctl_table *table, int write,
87575 {
87576 int ret;
87577 int range[2];
87578- ctl_table tmp = {
87579+ ctl_table_no_const tmp = {
87580 .data = &range,
87581 .maxlen = sizeof(range),
87582 .mode = table->mode,
87583@@ -108,7 +108,7 @@ static int ipv4_ping_group_range(ctl_table *table, int write,
87584 int ret;
87585 gid_t urange[2];
87586 kgid_t low, high;
87587- ctl_table tmp = {
87588+ ctl_table_no_const tmp = {
87589 .data = &urange,
87590 .maxlen = sizeof(urange),
87591 .mode = table->mode,
87592@@ -139,7 +139,7 @@ static int proc_tcp_congestion_control(ctl_table *ctl, int write,
87593 void __user *buffer, size_t *lenp, loff_t *ppos)
87594 {
87595 char val[TCP_CA_NAME_MAX];
87596- ctl_table tbl = {
87597+ ctl_table_no_const tbl = {
87598 .data = val,
87599 .maxlen = TCP_CA_NAME_MAX,
87600 };
87601@@ -158,7 +158,7 @@ static int proc_tcp_available_congestion_control(ctl_table *ctl,
87602 void __user *buffer, size_t *lenp,
87603 loff_t *ppos)
87604 {
87605- ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
87606+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
87607 int ret;
87608
87609 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
87610@@ -175,7 +175,7 @@ static int proc_allowed_congestion_control(ctl_table *ctl,
87611 void __user *buffer, size_t *lenp,
87612 loff_t *ppos)
87613 {
87614- ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
87615+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
87616 int ret;
87617
87618 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
87619@@ -201,15 +201,17 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
87620 struct mem_cgroup *memcg;
87621 #endif
87622
87623- ctl_table tmp = {
87624+ ctl_table_no_const tmp = {
87625 .data = &vec,
87626 .maxlen = sizeof(vec),
87627 .mode = ctl->mode,
87628 };
87629
87630 if (!write) {
87631- ctl->data = &net->ipv4.sysctl_tcp_mem;
87632- return proc_doulongvec_minmax(ctl, write, buffer, lenp, ppos);
87633+ ctl_table_no_const tcp_mem = *ctl;
87634+
87635+ tcp_mem.data = &net->ipv4.sysctl_tcp_mem;
87636+ return proc_doulongvec_minmax(&tcp_mem, write, buffer, lenp, ppos);
87637 }
87638
87639 ret = proc_doulongvec_minmax(&tmp, write, buffer, lenp, ppos);
87640@@ -236,7 +238,7 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
87641 static int proc_tcp_fastopen_key(ctl_table *ctl, int write, void __user *buffer,
87642 size_t *lenp, loff_t *ppos)
87643 {
87644- ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
87645+ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
87646 struct tcp_fastopen_context *ctxt;
87647 int ret;
87648 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
87649@@ -477,7 +479,7 @@ static struct ctl_table ipv4_table[] = {
87650 },
87651 {
87652 .procname = "ip_local_reserved_ports",
87653- .data = NULL, /* initialized in sysctl_ipv4_init */
87654+ .data = sysctl_local_reserved_ports,
87655 .maxlen = 65536,
87656 .mode = 0644,
87657 .proc_handler = proc_do_large_bitmap,
87658@@ -856,11 +858,10 @@ static struct ctl_table ipv4_net_table[] = {
87659
87660 static __net_init int ipv4_sysctl_init_net(struct net *net)
87661 {
87662- struct ctl_table *table;
87663+ ctl_table_no_const *table = NULL;
87664
87665- table = ipv4_net_table;
87666 if (!net_eq(net, &init_net)) {
87667- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
87668+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
87669 if (table == NULL)
87670 goto err_alloc;
87671
87672@@ -895,15 +896,17 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
87673
87674 tcp_init_mem(net);
87675
87676- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
87677+ if (!net_eq(net, &init_net))
87678+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
87679+ else
87680+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
87681 if (net->ipv4.ipv4_hdr == NULL)
87682 goto err_reg;
87683
87684 return 0;
87685
87686 err_reg:
87687- if (!net_eq(net, &init_net))
87688- kfree(table);
87689+ kfree(table);
87690 err_alloc:
87691 return -ENOMEM;
87692 }
87693@@ -925,16 +928,6 @@ static __net_initdata struct pernet_operations ipv4_sysctl_ops = {
87694 static __init int sysctl_ipv4_init(void)
87695 {
87696 struct ctl_table_header *hdr;
87697- struct ctl_table *i;
87698-
87699- for (i = ipv4_table; i->procname; i++) {
87700- if (strcmp(i->procname, "ip_local_reserved_ports") == 0) {
87701- i->data = sysctl_local_reserved_ports;
87702- break;
87703- }
87704- }
87705- if (!i->procname)
87706- return -EINVAL;
87707
87708 hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table);
87709 if (hdr == NULL)
87710diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
87711index e220207..cdeb839 100644
87712--- a/net/ipv4/tcp.c
87713+++ b/net/ipv4/tcp.c
87714@@ -3383,8 +3383,11 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
87715
87716 for (i = 0; i < shi->nr_frags; ++i) {
87717 const struct skb_frag_struct *f = &shi->frags[i];
87718- struct page *page = skb_frag_page(f);
87719- sg_set_page(&sg, page, skb_frag_size(f), f->page_offset);
87720+ unsigned int offset = f->page_offset;
87721+ struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT);
87722+
87723+ sg_set_page(&sg, page, skb_frag_size(f),
87724+ offset_in_page(offset));
87725 if (crypto_hash_update(desc, &sg, skb_frag_size(f)))
87726 return 1;
87727 }
87728diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
87729index 13b9c08..d33a8d0 100644
87730--- a/net/ipv4/tcp_input.c
87731+++ b/net/ipv4/tcp_input.c
87732@@ -4724,7 +4724,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
87733 * simplifies code)
87734 */
87735 static void
87736-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
87737+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
87738 struct sk_buff *head, struct sk_buff *tail,
87739 u32 start, u32 end)
87740 {
87741@@ -5838,6 +5838,7 @@ discard:
87742 tcp_paws_reject(&tp->rx_opt, 0))
87743 goto discard_and_undo;
87744
87745+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
87746 if (th->syn) {
87747 /* We see SYN without ACK. It is attempt of
87748 * simultaneous connect with crossed SYNs.
87749@@ -5888,6 +5889,7 @@ discard:
87750 goto discard;
87751 #endif
87752 }
87753+#endif
87754 /* "fifth, if neither of the SYN or RST bits is set then
87755 * drop the segment and return."
87756 */
87757@@ -5932,7 +5934,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
87758 goto discard;
87759
87760 if (th->syn) {
87761- if (th->fin)
87762+ if (th->fin || th->urg || th->psh)
87763 goto discard;
87764 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
87765 return 1;
87766diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
87767index d09203c..fd5cc91 100644
87768--- a/net/ipv4/tcp_ipv4.c
87769+++ b/net/ipv4/tcp_ipv4.c
87770@@ -90,6 +90,10 @@ int sysctl_tcp_low_latency __read_mostly;
87771 EXPORT_SYMBOL(sysctl_tcp_low_latency);
87772
87773
87774+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87775+extern int grsec_enable_blackhole;
87776+#endif
87777+
87778 #ifdef CONFIG_TCP_MD5SIG
87779 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
87780 __be32 daddr, __be32 saddr, const struct tcphdr *th);
87781@@ -1897,6 +1901,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
87782 return 0;
87783
87784 reset:
87785+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87786+ if (!grsec_enable_blackhole)
87787+#endif
87788 tcp_v4_send_reset(rsk, skb);
87789 discard:
87790 kfree_skb(skb);
87791@@ -1996,12 +2003,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
87792 TCP_SKB_CB(skb)->sacked = 0;
87793
87794 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
87795- if (!sk)
87796+ if (!sk) {
87797+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87798+ ret = 1;
87799+#endif
87800 goto no_tcp_socket;
87801-
87802+ }
87803 process:
87804- if (sk->sk_state == TCP_TIME_WAIT)
87805+ if (sk->sk_state == TCP_TIME_WAIT) {
87806+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87807+ ret = 2;
87808+#endif
87809 goto do_time_wait;
87810+ }
87811
87812 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
87813 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
87814@@ -2052,6 +2066,10 @@ no_tcp_socket:
87815 bad_packet:
87816 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
87817 } else {
87818+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87819+ if (!grsec_enable_blackhole || (ret == 1 &&
87820+ (skb->dev->flags & IFF_LOOPBACK)))
87821+#endif
87822 tcp_v4_send_reset(NULL, skb);
87823 }
87824
87825diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
87826index 2f672e7..b8895e9 100644
87827--- a/net/ipv4/tcp_minisocks.c
87828+++ b/net/ipv4/tcp_minisocks.c
87829@@ -27,6 +27,10 @@
87830 #include <net/inet_common.h>
87831 #include <net/xfrm.h>
87832
87833+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87834+extern int grsec_enable_blackhole;
87835+#endif
87836+
87837 int sysctl_tcp_syncookies __read_mostly = 1;
87838 EXPORT_SYMBOL(sysctl_tcp_syncookies);
87839
87840@@ -749,7 +753,10 @@ embryonic_reset:
87841 * avoid becoming vulnerable to outside attack aiming at
87842 * resetting legit local connections.
87843 */
87844- req->rsk_ops->send_reset(sk, skb);
87845+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87846+ if (!grsec_enable_blackhole)
87847+#endif
87848+ req->rsk_ops->send_reset(sk, skb);
87849 } else if (fastopen) { /* received a valid RST pkt */
87850 reqsk_fastopen_remove(sk, req, true);
87851 tcp_reset(sk);
87852diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
87853index d4943f6..e7a74a5 100644
87854--- a/net/ipv4/tcp_probe.c
87855+++ b/net/ipv4/tcp_probe.c
87856@@ -204,7 +204,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
87857 if (cnt + width >= len)
87858 break;
87859
87860- if (copy_to_user(buf + cnt, tbuf, width))
87861+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
87862 return -EFAULT;
87863 cnt += width;
87864 }
87865diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
87866index b78aac3..e18230b 100644
87867--- a/net/ipv4/tcp_timer.c
87868+++ b/net/ipv4/tcp_timer.c
87869@@ -22,6 +22,10 @@
87870 #include <linux/gfp.h>
87871 #include <net/tcp.h>
87872
87873+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87874+extern int grsec_lastack_retries;
87875+#endif
87876+
87877 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
87878 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
87879 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
87880@@ -185,6 +189,13 @@ static int tcp_write_timeout(struct sock *sk)
87881 }
87882 }
87883
87884+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87885+ if ((sk->sk_state == TCP_LAST_ACK) &&
87886+ (grsec_lastack_retries > 0) &&
87887+ (grsec_lastack_retries < retry_until))
87888+ retry_until = grsec_lastack_retries;
87889+#endif
87890+
87891 if (retransmits_timed_out(sk, retry_until,
87892 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
87893 /* Has it gone just too far? */
87894diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
87895index 0a073a2..ddf6279 100644
87896--- a/net/ipv4/udp.c
87897+++ b/net/ipv4/udp.c
87898@@ -87,6 +87,7 @@
87899 #include <linux/types.h>
87900 #include <linux/fcntl.h>
87901 #include <linux/module.h>
87902+#include <linux/security.h>
87903 #include <linux/socket.h>
87904 #include <linux/sockios.h>
87905 #include <linux/igmp.h>
87906@@ -111,6 +112,10 @@
87907 #include <trace/events/skb.h>
87908 #include "udp_impl.h"
87909
87910+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87911+extern int grsec_enable_blackhole;
87912+#endif
87913+
87914 struct udp_table udp_table __read_mostly;
87915 EXPORT_SYMBOL(udp_table);
87916
87917@@ -594,6 +599,9 @@ found:
87918 return s;
87919 }
87920
87921+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
87922+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
87923+
87924 /*
87925 * This routine is called by the ICMP module when it gets some
87926 * sort of error condition. If err < 0 then the socket should
87927@@ -889,9 +897,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
87928 dport = usin->sin_port;
87929 if (dport == 0)
87930 return -EINVAL;
87931+
87932+ err = gr_search_udp_sendmsg(sk, usin);
87933+ if (err)
87934+ return err;
87935 } else {
87936 if (sk->sk_state != TCP_ESTABLISHED)
87937 return -EDESTADDRREQ;
87938+
87939+ err = gr_search_udp_sendmsg(sk, NULL);
87940+ if (err)
87941+ return err;
87942+
87943 daddr = inet->inet_daddr;
87944 dport = inet->inet_dport;
87945 /* Open fast path for connected socket.
87946@@ -1133,7 +1150,7 @@ static unsigned int first_packet_length(struct sock *sk)
87947 udp_lib_checksum_complete(skb)) {
87948 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
87949 IS_UDPLITE(sk));
87950- atomic_inc(&sk->sk_drops);
87951+ atomic_inc_unchecked(&sk->sk_drops);
87952 __skb_unlink(skb, rcvq);
87953 __skb_queue_tail(&list_kill, skb);
87954 }
87955@@ -1219,6 +1236,10 @@ try_again:
87956 if (!skb)
87957 goto out;
87958
87959+ err = gr_search_udp_recvmsg(sk, skb);
87960+ if (err)
87961+ goto out_free;
87962+
87963 ulen = skb->len - sizeof(struct udphdr);
87964 copied = len;
87965 if (copied > ulen)
87966@@ -1252,7 +1273,7 @@ try_again:
87967 if (unlikely(err)) {
87968 trace_kfree_skb(skb, udp_recvmsg);
87969 if (!peeked) {
87970- atomic_inc(&sk->sk_drops);
87971+ atomic_inc_unchecked(&sk->sk_drops);
87972 UDP_INC_STATS_USER(sock_net(sk),
87973 UDP_MIB_INERRORS, is_udplite);
87974 }
87975@@ -1535,7 +1556,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
87976
87977 drop:
87978 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
87979- atomic_inc(&sk->sk_drops);
87980+ atomic_inc_unchecked(&sk->sk_drops);
87981 kfree_skb(skb);
87982 return -1;
87983 }
87984@@ -1554,7 +1575,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
87985 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
87986
87987 if (!skb1) {
87988- atomic_inc(&sk->sk_drops);
87989+ atomic_inc_unchecked(&sk->sk_drops);
87990 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
87991 IS_UDPLITE(sk));
87992 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
87993@@ -1723,6 +1744,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
87994 goto csum_error;
87995
87996 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
87997+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87998+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
87999+#endif
88000 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
88001
88002 /*
88003@@ -2152,7 +2176,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
88004 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
88005 0, sock_i_ino(sp),
88006 atomic_read(&sp->sk_refcnt), sp,
88007- atomic_read(&sp->sk_drops), len);
88008+ atomic_read_unchecked(&sp->sk_drops), len);
88009 }
88010
88011 int udp4_seq_show(struct seq_file *seq, void *v)
88012diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
88013index 9a459be..086b866 100644
88014--- a/net/ipv4/xfrm4_policy.c
88015+++ b/net/ipv4/xfrm4_policy.c
88016@@ -264,19 +264,18 @@ static struct ctl_table xfrm4_policy_table[] = {
88017
88018 static int __net_init xfrm4_net_init(struct net *net)
88019 {
88020- struct ctl_table *table;
88021+ ctl_table_no_const *table = NULL;
88022 struct ctl_table_header *hdr;
88023
88024- table = xfrm4_policy_table;
88025 if (!net_eq(net, &init_net)) {
88026- table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
88027+ table = kmemdup(xfrm4_policy_table, sizeof(xfrm4_policy_table), GFP_KERNEL);
88028 if (!table)
88029 goto err_alloc;
88030
88031 table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
88032- }
88033-
88034- hdr = register_net_sysctl(net, "net/ipv4", table);
88035+ hdr = register_net_sysctl(net, "net/ipv4", table);
88036+ } else
88037+ hdr = register_net_sysctl(net, "net/ipv4", xfrm4_policy_table);
88038 if (!hdr)
88039 goto err_reg;
88040
88041@@ -284,8 +283,7 @@ static int __net_init xfrm4_net_init(struct net *net)
88042 return 0;
88043
88044 err_reg:
88045- if (!net_eq(net, &init_net))
88046- kfree(table);
88047+ kfree(table);
88048 err_alloc:
88049 return -ENOMEM;
88050 }
88051diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
88052index dae802c..bfa4baa 100644
88053--- a/net/ipv6/addrconf.c
88054+++ b/net/ipv6/addrconf.c
88055@@ -2274,7 +2274,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
88056 p.iph.ihl = 5;
88057 p.iph.protocol = IPPROTO_IPV6;
88058 p.iph.ttl = 64;
88059- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
88060+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
88061
88062 if (ops->ndo_do_ioctl) {
88063 mm_segment_t oldfs = get_fs();
88064@@ -4410,7 +4410,7 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write,
88065 int *valp = ctl->data;
88066 int val = *valp;
88067 loff_t pos = *ppos;
88068- ctl_table lctl;
88069+ ctl_table_no_const lctl;
88070 int ret;
88071
88072 /*
88073@@ -4492,7 +4492,7 @@ int addrconf_sysctl_disable(ctl_table *ctl, int write,
88074 int *valp = ctl->data;
88075 int val = *valp;
88076 loff_t pos = *ppos;
88077- ctl_table lctl;
88078+ ctl_table_no_const lctl;
88079 int ret;
88080
88081 /*
88082diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
88083index fff5bdd..15194fb 100644
88084--- a/net/ipv6/icmp.c
88085+++ b/net/ipv6/icmp.c
88086@@ -973,7 +973,7 @@ ctl_table ipv6_icmp_table_template[] = {
88087
88088 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
88089 {
88090- struct ctl_table *table;
88091+ ctl_table_no_const *table;
88092
88093 table = kmemdup(ipv6_icmp_table_template,
88094 sizeof(ipv6_icmp_table_template),
88095diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
88096index 95d13c7..791fe2f 100644
88097--- a/net/ipv6/ip6_gre.c
88098+++ b/net/ipv6/ip6_gre.c
88099@@ -73,7 +73,7 @@ struct ip6gre_net {
88100 struct net_device *fb_tunnel_dev;
88101 };
88102
88103-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
88104+static struct rtnl_link_ops ip6gre_link_ops;
88105 static int ip6gre_tunnel_init(struct net_device *dev);
88106 static void ip6gre_tunnel_setup(struct net_device *dev);
88107 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
88108@@ -1337,7 +1337,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
88109 }
88110
88111
88112-static struct inet6_protocol ip6gre_protocol __read_mostly = {
88113+static struct inet6_protocol ip6gre_protocol = {
88114 .handler = ip6gre_rcv,
88115 .err_handler = ip6gre_err,
88116 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
88117@@ -1671,7 +1671,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
88118 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
88119 };
88120
88121-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
88122+static struct rtnl_link_ops ip6gre_link_ops = {
88123 .kind = "ip6gre",
88124 .maxtype = IFLA_GRE_MAX,
88125 .policy = ip6gre_policy,
88126@@ -1684,7 +1684,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
88127 .fill_info = ip6gre_fill_info,
88128 };
88129
88130-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
88131+static struct rtnl_link_ops ip6gre_tap_ops = {
88132 .kind = "ip6gretap",
88133 .maxtype = IFLA_GRE_MAX,
88134 .policy = ip6gre_policy,
88135diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
88136index 155eccf..851fdae 100644
88137--- a/net/ipv6/ip6_output.c
88138+++ b/net/ipv6/ip6_output.c
88139@@ -1147,7 +1147,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
88140 if (WARN_ON(np->cork.opt))
88141 return -EINVAL;
88142
88143- np->cork.opt = kmalloc(opt->tot_len, sk->sk_allocation);
88144+ np->cork.opt = kzalloc(opt->tot_len, sk->sk_allocation);
88145 if (unlikely(np->cork.opt == NULL))
88146 return -ENOBUFS;
88147
88148diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
88149index fff83cb..82d49dd 100644
88150--- a/net/ipv6/ip6_tunnel.c
88151+++ b/net/ipv6/ip6_tunnel.c
88152@@ -87,7 +87,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
88153
88154 static int ip6_tnl_dev_init(struct net_device *dev);
88155 static void ip6_tnl_dev_setup(struct net_device *dev);
88156-static struct rtnl_link_ops ip6_link_ops __read_mostly;
88157+static struct rtnl_link_ops ip6_link_ops;
88158
88159 static int ip6_tnl_net_id __read_mostly;
88160 struct ip6_tnl_net {
88161@@ -1684,7 +1684,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
88162 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
88163 };
88164
88165-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
88166+static struct rtnl_link_ops ip6_link_ops = {
88167 .kind = "ip6tnl",
88168 .maxtype = IFLA_IPTUN_MAX,
88169 .policy = ip6_tnl_policy,
88170diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
88171index d1e2e8e..51c19ae 100644
88172--- a/net/ipv6/ipv6_sockglue.c
88173+++ b/net/ipv6/ipv6_sockglue.c
88174@@ -991,7 +991,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
88175 if (sk->sk_type != SOCK_STREAM)
88176 return -ENOPROTOOPT;
88177
88178- msg.msg_control = optval;
88179+ msg.msg_control = (void __force_kernel *)optval;
88180 msg.msg_controllen = len;
88181 msg.msg_flags = flags;
88182
88183diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
88184index 341b54a..591e8ed 100644
88185--- a/net/ipv6/netfilter/ip6_tables.c
88186+++ b/net/ipv6/netfilter/ip6_tables.c
88187@@ -1076,14 +1076,14 @@ static int compat_table_info(const struct xt_table_info *info,
88188 #endif
88189
88190 static int get_info(struct net *net, void __user *user,
88191- const int *len, int compat)
88192+ int len, int compat)
88193 {
88194 char name[XT_TABLE_MAXNAMELEN];
88195 struct xt_table *t;
88196 int ret;
88197
88198- if (*len != sizeof(struct ip6t_getinfo)) {
88199- duprintf("length %u != %zu\n", *len,
88200+ if (len != sizeof(struct ip6t_getinfo)) {
88201+ duprintf("length %u != %zu\n", len,
88202 sizeof(struct ip6t_getinfo));
88203 return -EINVAL;
88204 }
88205@@ -1120,7 +1120,7 @@ static int get_info(struct net *net, void __user *user,
88206 info.size = private->size;
88207 strcpy(info.name, name);
88208
88209- if (copy_to_user(user, &info, *len) != 0)
88210+ if (copy_to_user(user, &info, len) != 0)
88211 ret = -EFAULT;
88212 else
88213 ret = 0;
88214@@ -1974,7 +1974,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
88215
88216 switch (cmd) {
88217 case IP6T_SO_GET_INFO:
88218- ret = get_info(sock_net(sk), user, len, 1);
88219+ ret = get_info(sock_net(sk), user, *len, 1);
88220 break;
88221 case IP6T_SO_GET_ENTRIES:
88222 ret = compat_get_entries(sock_net(sk), user, len);
88223@@ -2021,7 +2021,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
88224
88225 switch (cmd) {
88226 case IP6T_SO_GET_INFO:
88227- ret = get_info(sock_net(sk), user, len, 0);
88228+ ret = get_info(sock_net(sk), user, *len, 0);
88229 break;
88230
88231 case IP6T_SO_GET_ENTRIES:
88232diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
88233index 6700069..1e50f42 100644
88234--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
88235+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
88236@@ -89,12 +89,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
88237
88238 static int nf_ct_frag6_sysctl_register(struct net *net)
88239 {
88240- struct ctl_table *table;
88241+ ctl_table_no_const *table = NULL;
88242 struct ctl_table_header *hdr;
88243
88244- table = nf_ct_frag6_sysctl_table;
88245 if (!net_eq(net, &init_net)) {
88246- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
88247+ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
88248 GFP_KERNEL);
88249 if (table == NULL)
88250 goto err_alloc;
88251@@ -102,9 +101,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
88252 table[0].data = &net->nf_frag.frags.timeout;
88253 table[1].data = &net->nf_frag.frags.low_thresh;
88254 table[2].data = &net->nf_frag.frags.high_thresh;
88255- }
88256-
88257- hdr = register_net_sysctl(net, "net/netfilter", table);
88258+ hdr = register_net_sysctl(net, "net/netfilter", table);
88259+ } else
88260+ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
88261 if (hdr == NULL)
88262 goto err_reg;
88263
88264@@ -112,8 +111,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
88265 return 0;
88266
88267 err_reg:
88268- if (!net_eq(net, &init_net))
88269- kfree(table);
88270+ kfree(table);
88271 err_alloc:
88272 return -ENOMEM;
88273 }
88274diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
88275index 330b5e7..796fbf1 100644
88276--- a/net/ipv6/raw.c
88277+++ b/net/ipv6/raw.c
88278@@ -378,7 +378,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
88279 {
88280 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
88281 skb_checksum_complete(skb)) {
88282- atomic_inc(&sk->sk_drops);
88283+ atomic_inc_unchecked(&sk->sk_drops);
88284 kfree_skb(skb);
88285 return NET_RX_DROP;
88286 }
88287@@ -406,7 +406,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
88288 struct raw6_sock *rp = raw6_sk(sk);
88289
88290 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
88291- atomic_inc(&sk->sk_drops);
88292+ atomic_inc_unchecked(&sk->sk_drops);
88293 kfree_skb(skb);
88294 return NET_RX_DROP;
88295 }
88296@@ -430,7 +430,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
88297
88298 if (inet->hdrincl) {
88299 if (skb_checksum_complete(skb)) {
88300- atomic_inc(&sk->sk_drops);
88301+ atomic_inc_unchecked(&sk->sk_drops);
88302 kfree_skb(skb);
88303 return NET_RX_DROP;
88304 }
88305@@ -603,7 +603,7 @@ out:
88306 return err;
88307 }
88308
88309-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
88310+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
88311 struct flowi6 *fl6, struct dst_entry **dstp,
88312 unsigned int flags)
88313 {
88314@@ -915,12 +915,15 @@ do_confirm:
88315 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
88316 char __user *optval, int optlen)
88317 {
88318+ struct icmp6_filter filter;
88319+
88320 switch (optname) {
88321 case ICMPV6_FILTER:
88322 if (optlen > sizeof(struct icmp6_filter))
88323 optlen = sizeof(struct icmp6_filter);
88324- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
88325+ if (copy_from_user(&filter, optval, optlen))
88326 return -EFAULT;
88327+ raw6_sk(sk)->filter = filter;
88328 return 0;
88329 default:
88330 return -ENOPROTOOPT;
88331@@ -933,6 +936,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
88332 char __user *optval, int __user *optlen)
88333 {
88334 int len;
88335+ struct icmp6_filter filter;
88336
88337 switch (optname) {
88338 case ICMPV6_FILTER:
88339@@ -944,7 +948,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
88340 len = sizeof(struct icmp6_filter);
88341 if (put_user(len, optlen))
88342 return -EFAULT;
88343- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
88344+ filter = raw6_sk(sk)->filter;
88345+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
88346 return -EFAULT;
88347 return 0;
88348 default:
88349@@ -1252,7 +1257,7 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
88350 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
88351 0,
88352 sock_i_ino(sp),
88353- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
88354+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
88355 }
88356
88357 static int raw6_seq_show(struct seq_file *seq, void *v)
88358diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
88359index 0ba10e5..c14a4f6 100644
88360--- a/net/ipv6/reassembly.c
88361+++ b/net/ipv6/reassembly.c
88362@@ -602,12 +602,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
88363
88364 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
88365 {
88366- struct ctl_table *table;
88367+ ctl_table_no_const *table = NULL;
88368 struct ctl_table_header *hdr;
88369
88370- table = ip6_frags_ns_ctl_table;
88371 if (!net_eq(net, &init_net)) {
88372- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
88373+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
88374 if (table == NULL)
88375 goto err_alloc;
88376
88377@@ -618,9 +617,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
88378 /* Don't export sysctls to unprivileged users */
88379 if (net->user_ns != &init_user_ns)
88380 table[0].procname = NULL;
88381- }
88382+ hdr = register_net_sysctl(net, "net/ipv6", table);
88383+ } else
88384+ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
88385
88386- hdr = register_net_sysctl(net, "net/ipv6", table);
88387 if (hdr == NULL)
88388 goto err_reg;
88389
88390@@ -628,8 +628,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
88391 return 0;
88392
88393 err_reg:
88394- if (!net_eq(net, &init_net))
88395- kfree(table);
88396+ kfree(table);
88397 err_alloc:
88398 return -ENOMEM;
88399 }
88400diff --git a/net/ipv6/route.c b/net/ipv6/route.c
88401index e5fe004..9fe3e8e 100644
88402--- a/net/ipv6/route.c
88403+++ b/net/ipv6/route.c
88404@@ -2881,7 +2881,7 @@ ctl_table ipv6_route_table_template[] = {
88405
88406 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
88407 {
88408- struct ctl_table *table;
88409+ ctl_table_no_const *table;
88410
88411 table = kmemdup(ipv6_route_table_template,
88412 sizeof(ipv6_route_table_template),
88413diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
88414index 02f96dc..4a5a6e5 100644
88415--- a/net/ipv6/sit.c
88416+++ b/net/ipv6/sit.c
88417@@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev);
88418 static void ipip6_dev_free(struct net_device *dev);
88419 static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
88420 __be32 *v4dst);
88421-static struct rtnl_link_ops sit_link_ops __read_mostly;
88422+static struct rtnl_link_ops sit_link_ops;
88423
88424 static int sit_net_id __read_mostly;
88425 struct sit_net {
88426@@ -1486,7 +1486,7 @@ static const struct nla_policy ipip6_policy[IFLA_IPTUN_MAX + 1] = {
88427 #endif
88428 };
88429
88430-static struct rtnl_link_ops sit_link_ops __read_mostly = {
88431+static struct rtnl_link_ops sit_link_ops = {
88432 .kind = "sit",
88433 .maxtype = IFLA_IPTUN_MAX,
88434 .policy = ipip6_policy,
88435diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
88436index e85c48b..b8268d3 100644
88437--- a/net/ipv6/sysctl_net_ipv6.c
88438+++ b/net/ipv6/sysctl_net_ipv6.c
88439@@ -40,7 +40,7 @@ static ctl_table ipv6_rotable[] = {
88440
88441 static int __net_init ipv6_sysctl_net_init(struct net *net)
88442 {
88443- struct ctl_table *ipv6_table;
88444+ ctl_table_no_const *ipv6_table;
88445 struct ctl_table *ipv6_route_table;
88446 struct ctl_table *ipv6_icmp_table;
88447 int err;
88448diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
88449index 0fce928..c52a518 100644
88450--- a/net/ipv6/tcp_ipv6.c
88451+++ b/net/ipv6/tcp_ipv6.c
88452@@ -103,6 +103,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
88453 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
88454 }
88455
88456+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88457+extern int grsec_enable_blackhole;
88458+#endif
88459+
88460 static void tcp_v6_hash(struct sock *sk)
88461 {
88462 if (sk->sk_state != TCP_CLOSE) {
88463@@ -1446,6 +1450,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
88464 return 0;
88465
88466 reset:
88467+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88468+ if (!grsec_enable_blackhole)
88469+#endif
88470 tcp_v6_send_reset(sk, skb);
88471 discard:
88472 if (opt_skb)
88473@@ -1527,12 +1534,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
88474 TCP_SKB_CB(skb)->sacked = 0;
88475
88476 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
88477- if (!sk)
88478+ if (!sk) {
88479+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88480+ ret = 1;
88481+#endif
88482 goto no_tcp_socket;
88483+ }
88484
88485 process:
88486- if (sk->sk_state == TCP_TIME_WAIT)
88487+ if (sk->sk_state == TCP_TIME_WAIT) {
88488+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88489+ ret = 2;
88490+#endif
88491 goto do_time_wait;
88492+ }
88493
88494 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
88495 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
88496@@ -1581,6 +1596,10 @@ no_tcp_socket:
88497 bad_packet:
88498 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
88499 } else {
88500+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88501+ if (!grsec_enable_blackhole || (ret == 1 &&
88502+ (skb->dev->flags & IFF_LOOPBACK)))
88503+#endif
88504 tcp_v6_send_reset(NULL, skb);
88505 }
88506
88507diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
88508index 27f0f8e..949e7ee 100644
88509--- a/net/ipv6/udp.c
88510+++ b/net/ipv6/udp.c
88511@@ -52,6 +52,10 @@
88512 #include <trace/events/skb.h>
88513 #include "udp_impl.h"
88514
88515+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88516+extern int grsec_enable_blackhole;
88517+#endif
88518+
88519 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
88520 {
88521 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
88522@@ -419,7 +423,7 @@ try_again:
88523 if (unlikely(err)) {
88524 trace_kfree_skb(skb, udpv6_recvmsg);
88525 if (!peeked) {
88526- atomic_inc(&sk->sk_drops);
88527+ atomic_inc_unchecked(&sk->sk_drops);
88528 if (is_udp4)
88529 UDP_INC_STATS_USER(sock_net(sk),
88530 UDP_MIB_INERRORS,
88531@@ -657,7 +661,7 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
88532 return rc;
88533 drop:
88534 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
88535- atomic_inc(&sk->sk_drops);
88536+ atomic_inc_unchecked(&sk->sk_drops);
88537 kfree_skb(skb);
88538 return -1;
88539 }
88540@@ -715,7 +719,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
88541 if (likely(skb1 == NULL))
88542 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
88543 if (!skb1) {
88544- atomic_inc(&sk->sk_drops);
88545+ atomic_inc_unchecked(&sk->sk_drops);
88546 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
88547 IS_UDPLITE(sk));
88548 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
88549@@ -852,6 +856,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
88550 goto discard;
88551
88552 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
88553+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88554+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
88555+#endif
88556 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
88557
88558 kfree_skb(skb);
88559@@ -1377,7 +1384,7 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
88560 0,
88561 sock_i_ino(sp),
88562 atomic_read(&sp->sk_refcnt), sp,
88563- atomic_read(&sp->sk_drops));
88564+ atomic_read_unchecked(&sp->sk_drops));
88565 }
88566
88567 int udp6_seq_show(struct seq_file *seq, void *v)
88568diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
88569index 23ed03d..465a71d 100644
88570--- a/net/ipv6/xfrm6_policy.c
88571+++ b/net/ipv6/xfrm6_policy.c
88572@@ -324,19 +324,19 @@ static struct ctl_table xfrm6_policy_table[] = {
88573
88574 static int __net_init xfrm6_net_init(struct net *net)
88575 {
88576- struct ctl_table *table;
88577+ ctl_table_no_const *table = NULL;
88578 struct ctl_table_header *hdr;
88579
88580- table = xfrm6_policy_table;
88581 if (!net_eq(net, &init_net)) {
88582- table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
88583+ table = kmemdup(xfrm6_policy_table, sizeof(xfrm6_policy_table), GFP_KERNEL);
88584 if (!table)
88585 goto err_alloc;
88586
88587 table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
88588- }
88589+ hdr = register_net_sysctl(net, "net/ipv6", table);
88590+ } else
88591+ hdr = register_net_sysctl(net, "net/ipv6", xfrm6_policy_table);
88592
88593- hdr = register_net_sysctl(net, "net/ipv6", table);
88594 if (!hdr)
88595 goto err_reg;
88596
88597@@ -344,8 +344,7 @@ static int __net_init xfrm6_net_init(struct net *net)
88598 return 0;
88599
88600 err_reg:
88601- if (!net_eq(net, &init_net))
88602- kfree(table);
88603+ kfree(table);
88604 err_alloc:
88605 return -ENOMEM;
88606 }
88607diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
88608index 362ba47..66196f4 100644
88609--- a/net/irda/ircomm/ircomm_tty.c
88610+++ b/net/irda/ircomm/ircomm_tty.c
88611@@ -319,11 +319,11 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
88612 add_wait_queue(&port->open_wait, &wait);
88613
88614 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
88615- __FILE__, __LINE__, tty->driver->name, port->count);
88616+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
88617
88618 spin_lock_irqsave(&port->lock, flags);
88619 if (!tty_hung_up_p(filp))
88620- port->count--;
88621+ atomic_dec(&port->count);
88622 port->blocked_open++;
88623 spin_unlock_irqrestore(&port->lock, flags);
88624
88625@@ -358,7 +358,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
88626 }
88627
88628 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
88629- __FILE__, __LINE__, tty->driver->name, port->count);
88630+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
88631
88632 schedule();
88633 }
88634@@ -368,12 +368,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
88635
88636 spin_lock_irqsave(&port->lock, flags);
88637 if (!tty_hung_up_p(filp))
88638- port->count++;
88639+ atomic_inc(&port->count);
88640 port->blocked_open--;
88641 spin_unlock_irqrestore(&port->lock, flags);
88642
88643 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
88644- __FILE__, __LINE__, tty->driver->name, port->count);
88645+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
88646
88647 if (!retval)
88648 port->flags |= ASYNC_NORMAL_ACTIVE;
88649@@ -447,12 +447,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
88650
88651 /* ++ is not atomic, so this should be protected - Jean II */
88652 spin_lock_irqsave(&self->port.lock, flags);
88653- self->port.count++;
88654+ atomic_inc(&self->port.count);
88655 spin_unlock_irqrestore(&self->port.lock, flags);
88656 tty_port_tty_set(&self->port, tty);
88657
88658 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
88659- self->line, self->port.count);
88660+ self->line, atomic_read(&self->port.count));
88661
88662 /* Not really used by us, but lets do it anyway */
88663 self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
88664@@ -989,7 +989,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
88665 tty_kref_put(port->tty);
88666 }
88667 port->tty = NULL;
88668- port->count = 0;
88669+ atomic_set(&port->count, 0);
88670 spin_unlock_irqrestore(&port->lock, flags);
88671
88672 wake_up_interruptible(&port->open_wait);
88673@@ -1346,7 +1346,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
88674 seq_putc(m, '\n');
88675
88676 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
88677- seq_printf(m, "Open count: %d\n", self->port.count);
88678+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
88679 seq_printf(m, "Max data size: %d\n", self->max_data_size);
88680 seq_printf(m, "Max header size: %d\n", self->max_header_size);
88681
88682diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c
88683index 8c00416..9ea0c93 100644
88684--- a/net/irda/irlap_frame.c
88685+++ b/net/irda/irlap_frame.c
88686@@ -544,7 +544,7 @@ static void irlap_recv_discovery_xid_cmd(struct irlap_cb *self,
88687 /*
88688 * We now have some discovery info to deliver!
88689 */
88690- discovery = kmalloc(sizeof(discovery_t), GFP_ATOMIC);
88691+ discovery = kzalloc(sizeof(discovery_t), GFP_ATOMIC);
88692 if (!discovery) {
88693 IRDA_WARNING("%s: unable to malloc!\n", __func__);
88694 return;
88695diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
88696index 206ce6d..cfb27cd 100644
88697--- a/net/iucv/af_iucv.c
88698+++ b/net/iucv/af_iucv.c
88699@@ -773,10 +773,10 @@ static int iucv_sock_autobind(struct sock *sk)
88700
88701 write_lock_bh(&iucv_sk_list.lock);
88702
88703- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
88704+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
88705 while (__iucv_get_sock_by_name(name)) {
88706 sprintf(name, "%08x",
88707- atomic_inc_return(&iucv_sk_list.autobind_name));
88708+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
88709 }
88710
88711 write_unlock_bh(&iucv_sk_list.lock);
88712diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
88713index 4fe76ff..426a904 100644
88714--- a/net/iucv/iucv.c
88715+++ b/net/iucv/iucv.c
88716@@ -690,7 +690,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
88717 return NOTIFY_OK;
88718 }
88719
88720-static struct notifier_block __refdata iucv_cpu_notifier = {
88721+static struct notifier_block iucv_cpu_notifier = {
88722 .notifier_call = iucv_cpu_notify,
88723 };
88724
88725diff --git a/net/key/af_key.c b/net/key/af_key.c
88726index 5b1e5af..2358147 100644
88727--- a/net/key/af_key.c
88728+++ b/net/key/af_key.c
88729@@ -3041,10 +3041,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
88730 static u32 get_acqseq(void)
88731 {
88732 u32 res;
88733- static atomic_t acqseq;
88734+ static atomic_unchecked_t acqseq;
88735
88736 do {
88737- res = atomic_inc_return(&acqseq);
88738+ res = atomic_inc_return_unchecked(&acqseq);
88739 } while (!res);
88740 return res;
88741 }
88742diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
88743index 637a341..8dec687 100644
88744--- a/net/l2tp/l2tp_ppp.c
88745+++ b/net/l2tp/l2tp_ppp.c
88746@@ -346,19 +346,19 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
88747 skb_put(skb, 2);
88748
88749 /* Copy user data into skb */
88750- error = memcpy_fromiovec(skb->data, m->msg_iov, total_len);
88751+ error = memcpy_fromiovec(skb_put(skb, total_len), m->msg_iov,
88752+ total_len);
88753 if (error < 0) {
88754 kfree_skb(skb);
88755 goto error_put_sess_tun;
88756 }
88757- skb_put(skb, total_len);
88758
88759 l2tp_xmit_skb(session, skb, session->hdr_len);
88760
88761 sock_put(ps->tunnel_sock);
88762 sock_put(sk);
88763
88764- return error;
88765+ return total_len;
88766
88767 error_put_sess_tun:
88768 sock_put(ps->tunnel_sock);
88769diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
88770index 843d8c4..cb04fa1 100644
88771--- a/net/mac80211/cfg.c
88772+++ b/net/mac80211/cfg.c
88773@@ -799,7 +799,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
88774 ret = ieee80211_vif_use_channel(sdata, chandef,
88775 IEEE80211_CHANCTX_EXCLUSIVE);
88776 }
88777- } else if (local->open_count == local->monitors) {
88778+ } else if (local_read(&local->open_count) == local->monitors) {
88779 local->_oper_channel = chandef->chan;
88780 local->_oper_channel_type = cfg80211_get_chandef_type(chandef);
88781 ieee80211_hw_config(local, 0);
88782@@ -2834,7 +2834,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
88783 else
88784 local->probe_req_reg--;
88785
88786- if (!local->open_count)
88787+ if (!local_read(&local->open_count))
88788 break;
88789
88790 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
88791@@ -3297,8 +3297,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
88792 if (chanctx_conf) {
88793 *chandef = chanctx_conf->def;
88794 ret = 0;
88795- } else if (local->open_count > 0 &&
88796- local->open_count == local->monitors &&
88797+ } else if (local_read(&local->open_count) > 0 &&
88798+ local_read(&local->open_count) == local->monitors &&
88799 sdata->vif.type == NL80211_IFTYPE_MONITOR) {
88800 if (local->use_chanctx)
88801 *chandef = local->monitor_chandef;
88802diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
88803index 5672533..6738c93 100644
88804--- a/net/mac80211/ieee80211_i.h
88805+++ b/net/mac80211/ieee80211_i.h
88806@@ -28,6 +28,7 @@
88807 #include <net/ieee80211_radiotap.h>
88808 #include <net/cfg80211.h>
88809 #include <net/mac80211.h>
88810+#include <asm/local.h>
88811 #include "key.h"
88812 #include "sta_info.h"
88813 #include "debug.h"
88814@@ -897,7 +898,7 @@ struct ieee80211_local {
88815 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
88816 spinlock_t queue_stop_reason_lock;
88817
88818- int open_count;
88819+ local_t open_count;
88820 int monitors, cooked_mntrs;
88821 /* number of interfaces with corresponding FIF_ flags */
88822 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
88823diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
88824index 9cbebc2..14879bb 100644
88825--- a/net/mac80211/iface.c
88826+++ b/net/mac80211/iface.c
88827@@ -495,7 +495,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
88828 break;
88829 }
88830
88831- if (local->open_count == 0) {
88832+ if (local_read(&local->open_count) == 0) {
88833 res = drv_start(local);
88834 if (res)
88835 goto err_del_bss;
88836@@ -540,7 +540,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
88837 break;
88838 }
88839
88840- if (local->monitors == 0 && local->open_count == 0) {
88841+ if (local->monitors == 0 && local_read(&local->open_count) == 0) {
88842 res = ieee80211_add_virtual_monitor(local);
88843 if (res)
88844 goto err_stop;
88845@@ -649,7 +649,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
88846 atomic_inc(&local->iff_promiscs);
88847
88848 if (coming_up)
88849- local->open_count++;
88850+ local_inc(&local->open_count);
88851
88852 if (hw_reconf_flags)
88853 ieee80211_hw_config(local, hw_reconf_flags);
88854@@ -663,7 +663,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
88855 err_del_interface:
88856 drv_remove_interface(local, sdata);
88857 err_stop:
88858- if (!local->open_count)
88859+ if (!local_read(&local->open_count))
88860 drv_stop(local);
88861 err_del_bss:
88862 sdata->bss = NULL;
88863@@ -806,7 +806,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
88864 }
88865
88866 if (going_down)
88867- local->open_count--;
88868+ local_dec(&local->open_count);
88869
88870 switch (sdata->vif.type) {
88871 case NL80211_IFTYPE_AP_VLAN:
88872@@ -871,7 +871,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
88873
88874 ieee80211_recalc_ps(local, -1);
88875
88876- if (local->open_count == 0) {
88877+ if (local_read(&local->open_count) == 0) {
88878 if (local->ops->napi_poll)
88879 napi_disable(&local->napi);
88880 ieee80211_clear_tx_pending(local);
88881@@ -897,7 +897,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
88882 }
88883 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
88884
88885- if (local->monitors == local->open_count && local->monitors > 0)
88886+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
88887 ieee80211_add_virtual_monitor(local);
88888 }
88889
88890diff --git a/net/mac80211/main.c b/net/mac80211/main.c
88891index 1a8591b..ef5db54 100644
88892--- a/net/mac80211/main.c
88893+++ b/net/mac80211/main.c
88894@@ -180,7 +180,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
88895 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
88896 IEEE80211_CONF_CHANGE_POWER);
88897
88898- if (changed && local->open_count) {
88899+ if (changed && local_read(&local->open_count)) {
88900 ret = drv_config(local, changed);
88901 /*
88902 * Goal:
88903diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
88904index 835584c..be46e67 100644
88905--- a/net/mac80211/pm.c
88906+++ b/net/mac80211/pm.c
88907@@ -33,7 +33,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
88908 struct sta_info *sta;
88909 struct ieee80211_chanctx *ctx;
88910
88911- if (!local->open_count)
88912+ if (!local_read(&local->open_count))
88913 goto suspend;
88914
88915 ieee80211_scan_cancel(local);
88916@@ -75,7 +75,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
88917 cancel_work_sync(&local->dynamic_ps_enable_work);
88918 del_timer_sync(&local->dynamic_ps_timer);
88919
88920- local->wowlan = wowlan && local->open_count;
88921+ local->wowlan = wowlan && local_read(&local->open_count);
88922 if (local->wowlan) {
88923 int err = drv_suspend(local, wowlan);
88924 if (err < 0) {
88925@@ -214,7 +214,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
88926 mutex_unlock(&local->chanctx_mtx);
88927
88928 /* stop hardware - this must stop RX */
88929- if (local->open_count)
88930+ if (local_read(&local->open_count))
88931 ieee80211_stop_device(local);
88932
88933 suspend:
88934diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
88935index dd88381..eef4dd6 100644
88936--- a/net/mac80211/rate.c
88937+++ b/net/mac80211/rate.c
88938@@ -493,7 +493,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
88939
88940 ASSERT_RTNL();
88941
88942- if (local->open_count)
88943+ if (local_read(&local->open_count))
88944 return -EBUSY;
88945
88946 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
88947diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
88948index c97a065..ff61928 100644
88949--- a/net/mac80211/rc80211_pid_debugfs.c
88950+++ b/net/mac80211/rc80211_pid_debugfs.c
88951@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
88952
88953 spin_unlock_irqrestore(&events->lock, status);
88954
88955- if (copy_to_user(buf, pb, p))
88956+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
88957 return -EFAULT;
88958
88959 return p;
88960diff --git a/net/mac80211/util.c b/net/mac80211/util.c
88961index 0f38f43..e53d4a8 100644
88962--- a/net/mac80211/util.c
88963+++ b/net/mac80211/util.c
88964@@ -1388,7 +1388,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
88965 }
88966 #endif
88967 /* everything else happens only if HW was up & running */
88968- if (!local->open_count)
88969+ if (!local_read(&local->open_count))
88970 goto wake_up;
88971
88972 /*
88973diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
88974index 56d22ca..87c778f 100644
88975--- a/net/netfilter/Kconfig
88976+++ b/net/netfilter/Kconfig
88977@@ -958,6 +958,16 @@ config NETFILTER_XT_MATCH_ESP
88978
88979 To compile it as a module, choose M here. If unsure, say N.
88980
88981+config NETFILTER_XT_MATCH_GRADM
88982+ tristate '"gradm" match support'
88983+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
88984+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
88985+ ---help---
88986+ The gradm match allows to match on grsecurity RBAC being enabled.
88987+ It is useful when iptables rules are applied early on bootup to
88988+ prevent connections to the machine (except from a trusted host)
88989+ while the RBAC system is disabled.
88990+
88991 config NETFILTER_XT_MATCH_HASHLIMIT
88992 tristate '"hashlimit" match support'
88993 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
88994diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
88995index a1abf87..dbcb7ee 100644
88996--- a/net/netfilter/Makefile
88997+++ b/net/netfilter/Makefile
88998@@ -112,6 +112,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
88999 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
89000 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
89001 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
89002+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
89003 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
89004 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
89005 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
89006diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
89007index 1ba9dbc..e39f4ca 100644
89008--- a/net/netfilter/ipset/ip_set_core.c
89009+++ b/net/netfilter/ipset/ip_set_core.c
89010@@ -1801,7 +1801,7 @@ done:
89011 return ret;
89012 }
89013
89014-static struct nf_sockopt_ops so_set __read_mostly = {
89015+static struct nf_sockopt_ops so_set = {
89016 .pf = PF_INET,
89017 .get_optmin = SO_IP_SET,
89018 .get_optmax = SO_IP_SET + 1,
89019diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
89020index 704e514..d644cc2 100644
89021--- a/net/netfilter/ipvs/ip_vs_conn.c
89022+++ b/net/netfilter/ipvs/ip_vs_conn.c
89023@@ -551,7 +551,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
89024 /* Increase the refcnt counter of the dest */
89025 atomic_inc(&dest->refcnt);
89026
89027- conn_flags = atomic_read(&dest->conn_flags);
89028+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
89029 if (cp->protocol != IPPROTO_UDP)
89030 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
89031 flags = cp->flags;
89032@@ -895,7 +895,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
89033 atomic_set(&cp->refcnt, 1);
89034
89035 atomic_set(&cp->n_control, 0);
89036- atomic_set(&cp->in_pkts, 0);
89037+ atomic_set_unchecked(&cp->in_pkts, 0);
89038
89039 atomic_inc(&ipvs->conn_count);
89040 if (flags & IP_VS_CONN_F_NO_CPORT)
89041@@ -1174,7 +1174,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
89042
89043 /* Don't drop the entry if its number of incoming packets is not
89044 located in [0, 8] */
89045- i = atomic_read(&cp->in_pkts);
89046+ i = atomic_read_unchecked(&cp->in_pkts);
89047 if (i > 8 || i < 0) return 0;
89048
89049 if (!todrop_rate[i]) return 0;
89050diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
89051index 61f49d2..6c8c5bc 100644
89052--- a/net/netfilter/ipvs/ip_vs_core.c
89053+++ b/net/netfilter/ipvs/ip_vs_core.c
89054@@ -559,7 +559,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
89055 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
89056 /* do not touch skb anymore */
89057
89058- atomic_inc(&cp->in_pkts);
89059+ atomic_inc_unchecked(&cp->in_pkts);
89060 ip_vs_conn_put(cp);
89061 return ret;
89062 }
89063@@ -1689,7 +1689,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
89064 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
89065 pkts = sysctl_sync_threshold(ipvs);
89066 else
89067- pkts = atomic_add_return(1, &cp->in_pkts);
89068+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
89069
89070 if (ipvs->sync_state & IP_VS_STATE_MASTER)
89071 ip_vs_sync_conn(net, cp, pkts);
89072diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
89073index 9e2d1cc..6ed0748 100644
89074--- a/net/netfilter/ipvs/ip_vs_ctl.c
89075+++ b/net/netfilter/ipvs/ip_vs_ctl.c
89076@@ -787,7 +787,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
89077 ip_vs_rs_hash(ipvs, dest);
89078 write_unlock_bh(&ipvs->rs_lock);
89079 }
89080- atomic_set(&dest->conn_flags, conn_flags);
89081+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
89082
89083 /* bind the service */
89084 if (!dest->svc) {
89085@@ -1688,7 +1688,7 @@ proc_do_sync_ports(ctl_table *table, int write,
89086 * align with netns init in ip_vs_control_net_init()
89087 */
89088
89089-static struct ctl_table vs_vars[] = {
89090+static ctl_table_no_const vs_vars[] __read_only = {
89091 {
89092 .procname = "amemthresh",
89093 .maxlen = sizeof(int),
89094@@ -2087,7 +2087,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
89095 " %-7s %-6d %-10d %-10d\n",
89096 &dest->addr.in6,
89097 ntohs(dest->port),
89098- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
89099+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
89100 atomic_read(&dest->weight),
89101 atomic_read(&dest->activeconns),
89102 atomic_read(&dest->inactconns));
89103@@ -2098,7 +2098,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
89104 "%-7s %-6d %-10d %-10d\n",
89105 ntohl(dest->addr.ip),
89106 ntohs(dest->port),
89107- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
89108+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
89109 atomic_read(&dest->weight),
89110 atomic_read(&dest->activeconns),
89111 atomic_read(&dest->inactconns));
89112@@ -2562,13 +2562,14 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
89113 struct ip_vs_dest *dest;
89114 struct ip_vs_dest_entry entry;
89115
89116+ memset(&entry, 0, sizeof(entry));
89117 list_for_each_entry(dest, &svc->destinations, n_list) {
89118 if (count >= get->num_dests)
89119 break;
89120
89121 entry.addr = dest->addr.ip;
89122 entry.port = dest->port;
89123- entry.conn_flags = atomic_read(&dest->conn_flags);
89124+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
89125 entry.weight = atomic_read(&dest->weight);
89126 entry.u_threshold = dest->u_threshold;
89127 entry.l_threshold = dest->l_threshold;
89128@@ -3104,7 +3105,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
89129 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
89130 nla_put_u16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
89131 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
89132- (atomic_read(&dest->conn_flags) &
89133+ (atomic_read_unchecked(&dest->conn_flags) &
89134 IP_VS_CONN_F_FWD_MASK)) ||
89135 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
89136 atomic_read(&dest->weight)) ||
89137@@ -3694,7 +3695,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
89138 {
89139 int idx;
89140 struct netns_ipvs *ipvs = net_ipvs(net);
89141- struct ctl_table *tbl;
89142+ ctl_table_no_const *tbl;
89143
89144 atomic_set(&ipvs->dropentry, 0);
89145 spin_lock_init(&ipvs->dropentry_lock);
89146diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
89147index fdd89b9..bd96aa9 100644
89148--- a/net/netfilter/ipvs/ip_vs_lblc.c
89149+++ b/net/netfilter/ipvs/ip_vs_lblc.c
89150@@ -115,7 +115,7 @@ struct ip_vs_lblc_table {
89151 * IPVS LBLC sysctl table
89152 */
89153 #ifdef CONFIG_SYSCTL
89154-static ctl_table vs_vars_table[] = {
89155+static ctl_table_no_const vs_vars_table[] __read_only = {
89156 {
89157 .procname = "lblc_expiration",
89158 .data = NULL,
89159diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
89160index c03b6a3..8ce3681 100644
89161--- a/net/netfilter/ipvs/ip_vs_lblcr.c
89162+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
89163@@ -288,7 +288,7 @@ struct ip_vs_lblcr_table {
89164 * IPVS LBLCR sysctl table
89165 */
89166
89167-static ctl_table vs_vars_table[] = {
89168+static ctl_table_no_const vs_vars_table[] __read_only = {
89169 {
89170 .procname = "lblcr_expiration",
89171 .data = NULL,
89172diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
89173index 44fd10c..2a163b3 100644
89174--- a/net/netfilter/ipvs/ip_vs_sync.c
89175+++ b/net/netfilter/ipvs/ip_vs_sync.c
89176@@ -596,7 +596,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
89177 cp = cp->control;
89178 if (cp) {
89179 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
89180- pkts = atomic_add_return(1, &cp->in_pkts);
89181+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
89182 else
89183 pkts = sysctl_sync_threshold(ipvs);
89184 ip_vs_sync_conn(net, cp->control, pkts);
89185@@ -758,7 +758,7 @@ control:
89186 if (!cp)
89187 return;
89188 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
89189- pkts = atomic_add_return(1, &cp->in_pkts);
89190+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
89191 else
89192 pkts = sysctl_sync_threshold(ipvs);
89193 goto sloop;
89194@@ -885,7 +885,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
89195
89196 if (opt)
89197 memcpy(&cp->in_seq, opt, sizeof(*opt));
89198- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
89199+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
89200 cp->state = state;
89201 cp->old_state = cp->state;
89202 /*
89203diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
89204index ee6b7a9..f9a89f6 100644
89205--- a/net/netfilter/ipvs/ip_vs_xmit.c
89206+++ b/net/netfilter/ipvs/ip_vs_xmit.c
89207@@ -1210,7 +1210,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
89208 else
89209 rc = NF_ACCEPT;
89210 /* do not touch skb anymore */
89211- atomic_inc(&cp->in_pkts);
89212+ atomic_inc_unchecked(&cp->in_pkts);
89213 goto out;
89214 }
89215
89216@@ -1332,7 +1332,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
89217 else
89218 rc = NF_ACCEPT;
89219 /* do not touch skb anymore */
89220- atomic_inc(&cp->in_pkts);
89221+ atomic_inc_unchecked(&cp->in_pkts);
89222 goto out;
89223 }
89224
89225diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
89226index 2d3030a..7ba1c0a 100644
89227--- a/net/netfilter/nf_conntrack_acct.c
89228+++ b/net/netfilter/nf_conntrack_acct.c
89229@@ -60,7 +60,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
89230 #ifdef CONFIG_SYSCTL
89231 static int nf_conntrack_acct_init_sysctl(struct net *net)
89232 {
89233- struct ctl_table *table;
89234+ ctl_table_no_const *table;
89235
89236 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
89237 GFP_KERNEL);
89238diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
89239index c8e001a..f842a8b 100644
89240--- a/net/netfilter/nf_conntrack_core.c
89241+++ b/net/netfilter/nf_conntrack_core.c
89242@@ -1594,6 +1594,10 @@ void nf_conntrack_init_end(void)
89243 #define DYING_NULLS_VAL ((1<<30)+1)
89244 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
89245
89246+#ifdef CONFIG_GRKERNSEC_HIDESYM
89247+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
89248+#endif
89249+
89250 int nf_conntrack_init_net(struct net *net)
89251 {
89252 int ret;
89253@@ -1608,7 +1612,11 @@ int nf_conntrack_init_net(struct net *net)
89254 goto err_stat;
89255 }
89256
89257+#ifdef CONFIG_GRKERNSEC_HIDESYM
89258+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08lx", atomic_inc_return_unchecked(&conntrack_cache_id));
89259+#else
89260 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
89261+#endif
89262 if (!net->ct.slabname) {
89263 ret = -ENOMEM;
89264 goto err_slabname;
89265diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
89266index b5d2eb8..61ef19a 100644
89267--- a/net/netfilter/nf_conntrack_ecache.c
89268+++ b/net/netfilter/nf_conntrack_ecache.c
89269@@ -186,7 +186,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
89270 #ifdef CONFIG_SYSCTL
89271 static int nf_conntrack_event_init_sysctl(struct net *net)
89272 {
89273- struct ctl_table *table;
89274+ ctl_table_no_const *table;
89275
89276 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
89277 GFP_KERNEL);
89278diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
89279index 94b4b98..97cf0ad 100644
89280--- a/net/netfilter/nf_conntrack_helper.c
89281+++ b/net/netfilter/nf_conntrack_helper.c
89282@@ -56,7 +56,7 @@ static struct ctl_table helper_sysctl_table[] = {
89283
89284 static int nf_conntrack_helper_init_sysctl(struct net *net)
89285 {
89286- struct ctl_table *table;
89287+ ctl_table_no_const *table;
89288
89289 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
89290 GFP_KERNEL);
89291diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
89292index 58ab405..50eb8d3 100644
89293--- a/net/netfilter/nf_conntrack_proto.c
89294+++ b/net/netfilter/nf_conntrack_proto.c
89295@@ -51,7 +51,7 @@ nf_ct_register_sysctl(struct net *net,
89296
89297 static void
89298 nf_ct_unregister_sysctl(struct ctl_table_header **header,
89299- struct ctl_table **table,
89300+ ctl_table_no_const **table,
89301 unsigned int users)
89302 {
89303 if (users > 0)
89304diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
89305index ba65b20..2a4d937 100644
89306--- a/net/netfilter/nf_conntrack_proto_dccp.c
89307+++ b/net/netfilter/nf_conntrack_proto_dccp.c
89308@@ -456,7 +456,7 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
89309
89310 out_invalid:
89311 if (LOG_INVALID(net, IPPROTO_DCCP))
89312- nf_log_packet(nf_ct_l3num(ct), 0, skb, NULL, NULL, NULL, msg);
89313+ nf_log_packet(nf_ct_l3num(ct), 0, skb, NULL, NULL, NULL, "%s", msg);
89314 return false;
89315 }
89316
89317@@ -613,7 +613,7 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl,
89318
89319 out_invalid:
89320 if (LOG_INVALID(net, IPPROTO_DCCP))
89321- nf_log_packet(pf, 0, skb, NULL, NULL, NULL, msg);
89322+ nf_log_packet(pf, 0, skb, NULL, NULL, NULL, "%s", msg);
89323 return -NF_ACCEPT;
89324 }
89325
89326diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
89327index fedee39..d62a93d 100644
89328--- a/net/netfilter/nf_conntrack_standalone.c
89329+++ b/net/netfilter/nf_conntrack_standalone.c
89330@@ -470,7 +470,7 @@ static ctl_table nf_ct_netfilter_table[] = {
89331
89332 static int nf_conntrack_standalone_init_sysctl(struct net *net)
89333 {
89334- struct ctl_table *table;
89335+ ctl_table_no_const *table;
89336
89337 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
89338 GFP_KERNEL);
89339diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
89340index 902fb0a..87f7fdb 100644
89341--- a/net/netfilter/nf_conntrack_timestamp.c
89342+++ b/net/netfilter/nf_conntrack_timestamp.c
89343@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
89344 #ifdef CONFIG_SYSCTL
89345 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
89346 {
89347- struct ctl_table *table;
89348+ ctl_table_no_const *table;
89349
89350 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
89351 GFP_KERNEL);
89352diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
89353index 9e31269..bc4c1b7 100644
89354--- a/net/netfilter/nf_log.c
89355+++ b/net/netfilter/nf_log.c
89356@@ -215,7 +215,7 @@ static const struct file_operations nflog_file_ops = {
89357
89358 #ifdef CONFIG_SYSCTL
89359 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
89360-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
89361+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
89362 static struct ctl_table_header *nf_log_dir_header;
89363
89364 static int nf_log_proc_dostring(ctl_table *table, int write,
89365@@ -246,14 +246,16 @@ static int nf_log_proc_dostring(ctl_table *table, int write,
89366 rcu_assign_pointer(nf_loggers[tindex], logger);
89367 mutex_unlock(&nf_log_mutex);
89368 } else {
89369+ ctl_table_no_const nf_log_table = *table;
89370+
89371 mutex_lock(&nf_log_mutex);
89372 logger = rcu_dereference_protected(nf_loggers[tindex],
89373 lockdep_is_held(&nf_log_mutex));
89374 if (!logger)
89375- table->data = "NONE";
89376+ nf_log_table.data = "NONE";
89377 else
89378- table->data = logger->name;
89379- r = proc_dostring(table, write, buffer, lenp, ppos);
89380+ nf_log_table.data = logger->name;
89381+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
89382 mutex_unlock(&nf_log_mutex);
89383 }
89384
89385diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
89386index f042ae5..30ea486 100644
89387--- a/net/netfilter/nf_sockopt.c
89388+++ b/net/netfilter/nf_sockopt.c
89389@@ -45,7 +45,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
89390 }
89391 }
89392
89393- list_add(&reg->list, &nf_sockopts);
89394+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
89395 out:
89396 mutex_unlock(&nf_sockopt_mutex);
89397 return ret;
89398@@ -55,7 +55,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
89399 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
89400 {
89401 mutex_lock(&nf_sockopt_mutex);
89402- list_del(&reg->list);
89403+ pax_list_del((struct list_head *)&reg->list);
89404 mutex_unlock(&nf_sockopt_mutex);
89405 }
89406 EXPORT_SYMBOL(nf_unregister_sockopt);
89407diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
89408index f248db5..3778ad9 100644
89409--- a/net/netfilter/nfnetlink_log.c
89410+++ b/net/netfilter/nfnetlink_log.c
89411@@ -72,7 +72,7 @@ struct nfulnl_instance {
89412 };
89413
89414 static DEFINE_SPINLOCK(instances_lock);
89415-static atomic_t global_seq;
89416+static atomic_unchecked_t global_seq;
89417
89418 #define INSTANCE_BUCKETS 16
89419 static struct hlist_head instance_table[INSTANCE_BUCKETS];
89420@@ -536,7 +536,7 @@ __build_packet_message(struct nfulnl_instance *inst,
89421 /* global sequence number */
89422 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
89423 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
89424- htonl(atomic_inc_return(&global_seq))))
89425+ htonl(atomic_inc_return_unchecked(&global_seq))))
89426 goto nla_put_failure;
89427
89428 if (data_len) {
89429diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
89430new file mode 100644
89431index 0000000..c566332
89432--- /dev/null
89433+++ b/net/netfilter/xt_gradm.c
89434@@ -0,0 +1,51 @@
89435+/*
89436+ * gradm match for netfilter
89437