]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.9.1-3.8.6-201304102034.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9.1-3.8.6-201304102034.patch
CommitLineData
b9136b5e
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index b89a739..b47493f 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -14,6 +16,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -48,14 +51,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -69,6 +75,7 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52@@ -80,6 +87,7 @@ aic7*seq.h*
53 aicasm
54 aicdb.h*
55 altivec*.c
56+ashldi3.S
57 asm-offsets.h
58 asm_offsets.h
59 autoconf.h*
60@@ -92,19 +100,24 @@ bounds.h
61 bsetup
62 btfixupprep
63 build
64+builtin-policy.h
65 bvmlinux
66 bzImage*
67 capability_names.h
68 capflags.c
69 classlist.h*
70+clut_vga16.c
71+common-cmds.h
72 comp*.log
73 compile.h*
74 conf
75 config
76 config-*
77 config_data.h*
78+config.c
79 config.mak
80 config.mak.autogen
81+config.tmp
82 conmakehash
83 consolemap_deftbl.c*
84 cpustr.h
85@@ -115,9 +128,11 @@ devlist.h*
86 dnotify_test
87 docproc
88 dslm
89+dtc-lexer.lex.c
90 elf2ecoff
91 elfconfig.h*
92 evergreen_reg_safe.h
93+exception_policy.conf
94 fixdep
95 flask.h
96 fore200e_mkfirm
97@@ -125,12 +140,15 @@ fore200e_pca_fw.c*
98 gconf
99 gconf.glade.h
100 gen-devlist
101+gen-kdb_cmds.c
102 gen_crc32table
103 gen_init_cpio
104 generated
105 genheaders
106 genksyms
107 *_gray256.c
108+hash
109+hid-example
110 hpet_example
111 hugepage-mmap
112 hugepage-shm
113@@ -145,14 +163,14 @@ int32.c
114 int4.c
115 int8.c
116 kallsyms
117-kconfig
118+kern_constants.h
119 keywords.c
120 ksym.c*
121 ksym.h*
122 kxgettext
123 lex.c
124 lex.*.c
125-linux
126+lib1funcs.S
127 logo_*.c
128 logo_*_clut224.c
129 logo_*_mono.c
130@@ -162,14 +180,15 @@ mach-types.h
131 machtypes.h
132 map
133 map_hugetlb
134-media
135 mconf
136+mdp
137 miboot*
138 mk_elfconfig
139 mkboot
140 mkbugboot
141 mkcpustr
142 mkdep
143+mkpiggy
144 mkprep
145 mkregtable
146 mktables
147@@ -185,6 +204,8 @@ oui.c*
148 page-types
149 parse.c
150 parse.h
151+parse-events*
152+pasyms.h
153 patches*
154 pca200e.bin
155 pca200e_ecd.bin2
156@@ -194,6 +215,7 @@ perf-archive
157 piggyback
158 piggy.gzip
159 piggy.S
160+pmu-*
161 pnmtologo
162 ppc_defs.h*
163 pss_boot.h
164@@ -203,7 +225,10 @@ r200_reg_safe.h
165 r300_reg_safe.h
166 r420_reg_safe.h
167 r600_reg_safe.h
168+realmode.lds
169+realmode.relocs
170 recordmcount
171+regdb.c
172 relocs
173 rlim_names.h
174 rn50_reg_safe.h
175@@ -213,8 +238,12 @@ series
176 setup
177 setup.bin
178 setup.elf
179+signing_key*
180+size_overflow_hash.h
181 sImage
182+slabinfo
183 sm_tbl*
184+sortextable
185 split-include
186 syscalltab.h
187 tables.c
188@@ -224,6 +253,7 @@ tftpboot.img
189 timeconst.h
190 times.h*
191 trix_boot.h
192+user_constants.h
193 utsrelease.h*
194 vdso-syms.lds
195 vdso.lds
196@@ -235,13 +265,17 @@ vdso32.lds
197 vdso32.so.dbg
198 vdso64.lds
199 vdso64.so.dbg
200+vdsox32.lds
201+vdsox32-syms.lds
202 version.h*
203 vmImage
204 vmlinux
205 vmlinux-*
206 vmlinux.aout
207 vmlinux.bin.all
208+vmlinux.bin.bz2
209 vmlinux.lds
210+vmlinux.relocs
211 vmlinuz
212 voffset.h
213 vsyscall.lds
214@@ -249,9 +283,12 @@ vsyscall_32.lds
215 wanxlfw.inc
216 uImage
217 unifdef
218+utsrelease.h
219 wakeup.bin
220 wakeup.elf
221 wakeup.lds
222+x509*
223 zImage*
224 zconf.hash.c
225+zconf.lex.c
226 zoffset.h
227diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
228index 986614d..e8bfedc 100644
229--- a/Documentation/kernel-parameters.txt
230+++ b/Documentation/kernel-parameters.txt
231@@ -922,6 +922,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
232 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
233 Default: 1024
234
235+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
236+ ignore grsecurity's /proc restrictions
237+
238+
239 hashdist= [KNL,NUMA] Large hashes allocated during boot
240 are distributed across NUMA nodes. Defaults on
241 for 64-bit NUMA, off otherwise.
242@@ -2121,6 +2125,18 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
243 the specified number of seconds. This is to be used if
244 your oopses keep scrolling off the screen.
245
246+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
247+ virtualization environments that don't cope well with the
248+ expand down segment used by UDEREF on X86-32 or the frequent
249+ page table updates on X86-64.
250+
251+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
252+
253+ pax_extra_latent_entropy
254+ Enable a very simple form of latent entropy extraction
255+ from the first 4GB of memory as the bootmem allocator
256+ passes the memory pages to the buddy allocator.
257+
258 pcbit= [HW,ISDN]
259
260 pcd. [PARIDE]
261diff --git a/Makefile b/Makefile
262index 10075d6..8a01f3b 100644
263--- a/Makefile
264+++ b/Makefile
265@@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
266
267 HOSTCC = gcc
268 HOSTCXX = g++
269-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
270-HOSTCXXFLAGS = -O2
271+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
272+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
273+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
274
275 # Decide whether to build built-in, modular, or both.
276 # Normally, just do built-in.
277@@ -414,8 +415,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
278 # Rules shared between *config targets and build targets
279
280 # Basic helpers built in scripts/
281-PHONY += scripts_basic
282-scripts_basic:
283+PHONY += scripts_basic gcc-plugins
284+scripts_basic: gcc-plugins
285 $(Q)$(MAKE) $(build)=scripts/basic
286 $(Q)rm -f .tmp_quiet_recordmcount
287
288@@ -575,6 +576,65 @@ else
289 KBUILD_CFLAGS += -O2
290 endif
291
292+ifndef DISABLE_PAX_PLUGINS
293+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
294+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
295+else
296+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
297+endif
298+ifneq ($(PLUGINCC),)
299+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
300+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
301+endif
302+ifdef CONFIG_PAX_MEMORY_STACKLEAK
303+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
304+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
305+endif
306+ifdef CONFIG_KALLOCSTAT_PLUGIN
307+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
308+endif
309+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
310+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
311+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
312+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
313+endif
314+ifdef CONFIG_CHECKER_PLUGIN
315+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
316+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
317+endif
318+endif
319+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
320+ifdef CONFIG_PAX_SIZE_OVERFLOW
321+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
322+endif
323+ifdef CONFIG_PAX_LATENT_ENTROPY
324+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
325+endif
326+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
327+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
328+endif
329+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
330+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
331+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
332+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
333+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN
334+ifeq ($(KBUILD_EXTMOD),)
335+gcc-plugins:
336+ $(Q)$(MAKE) $(build)=tools/gcc
337+else
338+gcc-plugins: ;
339+endif
340+else
341+gcc-plugins:
342+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
343+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
344+else
345+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
346+endif
347+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
348+endif
349+endif
350+
351 include $(srctree)/arch/$(SRCARCH)/Makefile
352
353 ifdef CONFIG_READABLE_ASM
354@@ -731,7 +791,7 @@ export mod_sign_cmd
355
356
357 ifeq ($(KBUILD_EXTMOD),)
358-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
359+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
360
361 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
362 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
363@@ -778,6 +838,8 @@ endif
364
365 # The actual objects are generated when descending,
366 # make sure no implicit rule kicks in
367+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
368+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
369 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
370
371 # Handle descending into subdirectories listed in $(vmlinux-dirs)
372@@ -787,7 +849,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
373 # Error messages still appears in the original language
374
375 PHONY += $(vmlinux-dirs)
376-$(vmlinux-dirs): prepare scripts
377+$(vmlinux-dirs): gcc-plugins prepare scripts
378 $(Q)$(MAKE) $(build)=$@
379
380 # Store (new) KERNELRELASE string in include/config/kernel.release
381@@ -831,6 +893,7 @@ prepare0: archprepare FORCE
382 $(Q)$(MAKE) $(build)=.
383
384 # All the preparing..
385+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
386 prepare: prepare0
387
388 # Generate some files
389@@ -938,6 +1001,8 @@ all: modules
390 # using awk while concatenating to the final file.
391
392 PHONY += modules
393+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
394+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
395 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
396 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
397 @$(kecho) ' Building modules, stage 2.';
398@@ -953,7 +1018,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
399
400 # Target to prepare building external modules
401 PHONY += modules_prepare
402-modules_prepare: prepare scripts
403+modules_prepare: gcc-plugins prepare scripts
404
405 # Target to install modules
406 PHONY += modules_install
407@@ -1019,7 +1084,7 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
408 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
409 signing_key.priv signing_key.x509 x509.genkey \
410 extra_certificates signing_key.x509.keyid \
411- signing_key.x509.signer
412+ signing_key.x509.signer tools/gcc/size_overflow_hash.h
413
414 # clean - Delete most, but leave enough to build external modules
415 #
416@@ -1059,6 +1124,7 @@ distclean: mrproper
417 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
418 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
419 -o -name '.*.rej' \
420+ -o -name '.*.rej' -o -name '*.so' \
421 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
422 -type f -print | xargs rm -f
423
424@@ -1219,6 +1285,8 @@ PHONY += $(module-dirs) modules
425 $(module-dirs): crmodverdir $(objtree)/Module.symvers
426 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
427
428+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
429+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
430 modules: $(module-dirs)
431 @$(kecho) ' Building modules, stage 2.';
432 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
433@@ -1355,17 +1423,21 @@ else
434 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
435 endif
436
437-%.s: %.c prepare scripts FORCE
438+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
439+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
440+%.s: %.c gcc-plugins prepare scripts FORCE
441 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
442 %.i: %.c prepare scripts FORCE
443 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
444-%.o: %.c prepare scripts FORCE
445+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
446+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
447+%.o: %.c gcc-plugins prepare scripts FORCE
448 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
449 %.lst: %.c prepare scripts FORCE
450 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
451-%.s: %.S prepare scripts FORCE
452+%.s: %.S gcc-plugins prepare scripts FORCE
453 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
454-%.o: %.S prepare scripts FORCE
455+%.o: %.S gcc-plugins prepare scripts FORCE
456 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
457 %.symtypes: %.c prepare scripts FORCE
458 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
459@@ -1375,11 +1447,15 @@ endif
460 $(cmd_crmodverdir)
461 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
462 $(build)=$(build-dir)
463-%/: prepare scripts FORCE
464+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
465+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
466+%/: gcc-plugins prepare scripts FORCE
467 $(cmd_crmodverdir)
468 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
469 $(build)=$(build-dir)
470-%.ko: prepare scripts FORCE
471+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
472+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
473+%.ko: gcc-plugins prepare scripts FORCE
474 $(cmd_crmodverdir)
475 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
476 $(build)=$(build-dir) $(@:.ko=.o)
477diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
478index c2cbe4f..f7264b4 100644
479--- a/arch/alpha/include/asm/atomic.h
480+++ b/arch/alpha/include/asm/atomic.h
481@@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
482 #define atomic_dec(v) atomic_sub(1,(v))
483 #define atomic64_dec(v) atomic64_sub(1,(v))
484
485+#define atomic64_read_unchecked(v) atomic64_read(v)
486+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
487+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
488+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
489+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
490+#define atomic64_inc_unchecked(v) atomic64_inc(v)
491+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
492+#define atomic64_dec_unchecked(v) atomic64_dec(v)
493+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
494+
495 #define smp_mb__before_atomic_dec() smp_mb()
496 #define smp_mb__after_atomic_dec() smp_mb()
497 #define smp_mb__before_atomic_inc() smp_mb()
498diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
499index ad368a9..fbe0f25 100644
500--- a/arch/alpha/include/asm/cache.h
501+++ b/arch/alpha/include/asm/cache.h
502@@ -4,19 +4,19 @@
503 #ifndef __ARCH_ALPHA_CACHE_H
504 #define __ARCH_ALPHA_CACHE_H
505
506+#include <linux/const.h>
507
508 /* Bytes per L1 (data) cache line. */
509 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
510-# define L1_CACHE_BYTES 64
511 # define L1_CACHE_SHIFT 6
512 #else
513 /* Both EV4 and EV5 are write-through, read-allocate,
514 direct-mapped, physical.
515 */
516-# define L1_CACHE_BYTES 32
517 # define L1_CACHE_SHIFT 5
518 #endif
519
520+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
521 #define SMP_CACHE_BYTES L1_CACHE_BYTES
522
523 #endif
524diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
525index 968d999..d36b2df 100644
526--- a/arch/alpha/include/asm/elf.h
527+++ b/arch/alpha/include/asm/elf.h
528@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
529
530 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
531
532+#ifdef CONFIG_PAX_ASLR
533+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
534+
535+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
536+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
537+#endif
538+
539 /* $0 is set by ld.so to a pointer to a function which might be
540 registered using atexit. This provides a mean for the dynamic
541 linker to call DT_FINI functions for shared libraries that have
542diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
543index bc2a0da..8ad11ee 100644
544--- a/arch/alpha/include/asm/pgalloc.h
545+++ b/arch/alpha/include/asm/pgalloc.h
546@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
547 pgd_set(pgd, pmd);
548 }
549
550+static inline void
551+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
552+{
553+ pgd_populate(mm, pgd, pmd);
554+}
555+
556 extern pgd_t *pgd_alloc(struct mm_struct *mm);
557
558 static inline void
559diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
560index 81a4342..348b927 100644
561--- a/arch/alpha/include/asm/pgtable.h
562+++ b/arch/alpha/include/asm/pgtable.h
563@@ -102,6 +102,17 @@ struct vm_area_struct;
564 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
565 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
566 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
567+
568+#ifdef CONFIG_PAX_PAGEEXEC
569+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
570+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
571+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
572+#else
573+# define PAGE_SHARED_NOEXEC PAGE_SHARED
574+# define PAGE_COPY_NOEXEC PAGE_COPY
575+# define PAGE_READONLY_NOEXEC PAGE_READONLY
576+#endif
577+
578 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
579
580 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
581diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
582index 2fd00b7..cfd5069 100644
583--- a/arch/alpha/kernel/module.c
584+++ b/arch/alpha/kernel/module.c
585@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
586
587 /* The small sections were sorted to the end of the segment.
588 The following should definitely cover them. */
589- gp = (u64)me->module_core + me->core_size - 0x8000;
590+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
591 got = sechdrs[me->arch.gotsecindex].sh_addr;
592
593 for (i = 0; i < n; i++) {
594diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
595index 14db93e..47bed62 100644
596--- a/arch/alpha/kernel/osf_sys.c
597+++ b/arch/alpha/kernel/osf_sys.c
598@@ -1295,16 +1295,16 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
599 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
600
601 static unsigned long
602-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
603- unsigned long limit)
604+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
605+ unsigned long limit, unsigned long flags)
606 {
607 struct vm_area_struct *vma = find_vma(current->mm, addr);
608-
609+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
610 while (1) {
611 /* At this point: (!vma || addr < vma->vm_end). */
612 if (limit - len < addr)
613 return -ENOMEM;
614- if (!vma || addr + len <= vma->vm_start)
615+ if (check_heap_stack_gap(vma, addr, len, offset))
616 return addr;
617 addr = vma->vm_end;
618 vma = vma->vm_next;
619@@ -1340,20 +1340,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
620 merely specific addresses, but regions of memory -- perhaps
621 this feature should be incorporated into all ports? */
622
623+#ifdef CONFIG_PAX_RANDMMAP
624+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
625+#endif
626+
627 if (addr) {
628- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
629+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
630 if (addr != (unsigned long) -ENOMEM)
631 return addr;
632 }
633
634 /* Next, try allocating at TASK_UNMAPPED_BASE. */
635- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
636- len, limit);
637+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
638+
639 if (addr != (unsigned long) -ENOMEM)
640 return addr;
641
642 /* Finally, try allocating in low memory. */
643- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
644+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
645
646 return addr;
647 }
648diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
649index 0c4132d..88f0d53 100644
650--- a/arch/alpha/mm/fault.c
651+++ b/arch/alpha/mm/fault.c
652@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
653 __reload_thread(pcb);
654 }
655
656+#ifdef CONFIG_PAX_PAGEEXEC
657+/*
658+ * PaX: decide what to do with offenders (regs->pc = fault address)
659+ *
660+ * returns 1 when task should be killed
661+ * 2 when patched PLT trampoline was detected
662+ * 3 when unpatched PLT trampoline was detected
663+ */
664+static int pax_handle_fetch_fault(struct pt_regs *regs)
665+{
666+
667+#ifdef CONFIG_PAX_EMUPLT
668+ int err;
669+
670+ do { /* PaX: patched PLT emulation #1 */
671+ unsigned int ldah, ldq, jmp;
672+
673+ err = get_user(ldah, (unsigned int *)regs->pc);
674+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
675+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
676+
677+ if (err)
678+ break;
679+
680+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
681+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
682+ jmp == 0x6BFB0000U)
683+ {
684+ unsigned long r27, addr;
685+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
686+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
687+
688+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
689+ err = get_user(r27, (unsigned long *)addr);
690+ if (err)
691+ break;
692+
693+ regs->r27 = r27;
694+ regs->pc = r27;
695+ return 2;
696+ }
697+ } while (0);
698+
699+ do { /* PaX: patched PLT emulation #2 */
700+ unsigned int ldah, lda, br;
701+
702+ err = get_user(ldah, (unsigned int *)regs->pc);
703+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
704+ err |= get_user(br, (unsigned int *)(regs->pc+8));
705+
706+ if (err)
707+ break;
708+
709+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
710+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
711+ (br & 0xFFE00000U) == 0xC3E00000U)
712+ {
713+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
714+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
715+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
716+
717+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
718+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
719+ return 2;
720+ }
721+ } while (0);
722+
723+ do { /* PaX: unpatched PLT emulation */
724+ unsigned int br;
725+
726+ err = get_user(br, (unsigned int *)regs->pc);
727+
728+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
729+ unsigned int br2, ldq, nop, jmp;
730+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
731+
732+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
733+ err = get_user(br2, (unsigned int *)addr);
734+ err |= get_user(ldq, (unsigned int *)(addr+4));
735+ err |= get_user(nop, (unsigned int *)(addr+8));
736+ err |= get_user(jmp, (unsigned int *)(addr+12));
737+ err |= get_user(resolver, (unsigned long *)(addr+16));
738+
739+ if (err)
740+ break;
741+
742+ if (br2 == 0xC3600000U &&
743+ ldq == 0xA77B000CU &&
744+ nop == 0x47FF041FU &&
745+ jmp == 0x6B7B0000U)
746+ {
747+ regs->r28 = regs->pc+4;
748+ regs->r27 = addr+16;
749+ regs->pc = resolver;
750+ return 3;
751+ }
752+ }
753+ } while (0);
754+#endif
755+
756+ return 1;
757+}
758+
759+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
760+{
761+ unsigned long i;
762+
763+ printk(KERN_ERR "PAX: bytes at PC: ");
764+ for (i = 0; i < 5; i++) {
765+ unsigned int c;
766+ if (get_user(c, (unsigned int *)pc+i))
767+ printk(KERN_CONT "???????? ");
768+ else
769+ printk(KERN_CONT "%08x ", c);
770+ }
771+ printk("\n");
772+}
773+#endif
774
775 /*
776 * This routine handles page faults. It determines the address,
777@@ -133,8 +251,29 @@ retry:
778 good_area:
779 si_code = SEGV_ACCERR;
780 if (cause < 0) {
781- if (!(vma->vm_flags & VM_EXEC))
782+ if (!(vma->vm_flags & VM_EXEC)) {
783+
784+#ifdef CONFIG_PAX_PAGEEXEC
785+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
786+ goto bad_area;
787+
788+ up_read(&mm->mmap_sem);
789+ switch (pax_handle_fetch_fault(regs)) {
790+
791+#ifdef CONFIG_PAX_EMUPLT
792+ case 2:
793+ case 3:
794+ return;
795+#endif
796+
797+ }
798+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
799+ do_group_exit(SIGKILL);
800+#else
801 goto bad_area;
802+#endif
803+
804+ }
805 } else if (!cause) {
806 /* Allow reads even for write-only mappings */
807 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
808diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
809index 67874b8..9aa2d62 100644
810--- a/arch/arm/Kconfig
811+++ b/arch/arm/Kconfig
812@@ -1427,6 +1427,16 @@ config ARM_ERRATA_775420
813 to deadlock. This workaround puts DSB before executing ISB if
814 an abort may occur on cache maintenance.
815
816+config ARM_ERRATA_798181
817+ bool "ARM errata: TLBI/DSB failure on Cortex-A15"
818+ depends on CPU_V7 && SMP
819+ help
820+ On Cortex-A15 (r0p0..r3p2) the TLBI*IS/DSB operations are not
821+ adequately shooting down all use of the old entries. This
822+ option enables the Linux kernel workaround for this erratum
823+ which sends an IPI to the CPUs that are running the same ASID
824+ as the one being invalidated.
825+
826 endmenu
827
828 source "arch/arm/common/Kconfig"
829@@ -1813,7 +1823,7 @@ config ALIGNMENT_TRAP
830
831 config UACCESS_WITH_MEMCPY
832 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
833- depends on MMU
834+ depends on MMU && !PAX_MEMORY_UDEREF
835 default y if CPU_FEROCEON
836 help
837 Implement faster copy_to_user and clear_user methods for CPU
838diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
839index 87dfa902..3a523fc 100644
840--- a/arch/arm/common/gic.c
841+++ b/arch/arm/common/gic.c
842@@ -81,7 +81,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
843 * Supported arch specific GIC irq extension.
844 * Default make them NULL.
845 */
846-struct irq_chip gic_arch_extn = {
847+irq_chip_no_const gic_arch_extn __read_only = {
848 .irq_eoi = NULL,
849 .irq_mask = NULL,
850 .irq_unmask = NULL,
851@@ -329,7 +329,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
852 chained_irq_exit(chip, desc);
853 }
854
855-static struct irq_chip gic_chip = {
856+static irq_chip_no_const gic_chip __read_only = {
857 .name = "GIC",
858 .irq_mask = gic_mask_irq,
859 .irq_unmask = gic_unmask_irq,
860diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
861index c79f61f..9ac0642 100644
862--- a/arch/arm/include/asm/atomic.h
863+++ b/arch/arm/include/asm/atomic.h
864@@ -17,17 +17,35 @@
865 #include <asm/barrier.h>
866 #include <asm/cmpxchg.h>
867
868+#ifdef CONFIG_GENERIC_ATOMIC64
869+#include <asm-generic/atomic64.h>
870+#endif
871+
872 #define ATOMIC_INIT(i) { (i) }
873
874 #ifdef __KERNEL__
875
876+#define _ASM_EXTABLE(from, to) \
877+" .pushsection __ex_table,\"a\"\n"\
878+" .align 3\n" \
879+" .long " #from ", " #to"\n" \
880+" .popsection"
881+
882 /*
883 * On ARM, ordinary assignment (str instruction) doesn't clear the local
884 * strex/ldrex monitor on some implementations. The reason we can use it for
885 * atomic_set() is the clrex or dummy strex done on every exception return.
886 */
887 #define atomic_read(v) (*(volatile int *)&(v)->counter)
888+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
889+{
890+ return v->counter;
891+}
892 #define atomic_set(v,i) (((v)->counter) = (i))
893+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
894+{
895+ v->counter = i;
896+}
897
898 #if __LINUX_ARM_ARCH__ >= 6
899
900@@ -42,6 +60,35 @@ static inline void atomic_add(int i, atomic_t *v)
901 int result;
902
903 __asm__ __volatile__("@ atomic_add\n"
904+"1: ldrex %1, [%3]\n"
905+" adds %0, %1, %4\n"
906+
907+#ifdef CONFIG_PAX_REFCOUNT
908+" bvc 3f\n"
909+"2: bkpt 0xf103\n"
910+"3:\n"
911+#endif
912+
913+" strex %1, %0, [%3]\n"
914+" teq %1, #0\n"
915+" bne 1b"
916+
917+#ifdef CONFIG_PAX_REFCOUNT
918+"\n4:\n"
919+ _ASM_EXTABLE(2b, 4b)
920+#endif
921+
922+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
923+ : "r" (&v->counter), "Ir" (i)
924+ : "cc");
925+}
926+
927+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
928+{
929+ unsigned long tmp;
930+ int result;
931+
932+ __asm__ __volatile__("@ atomic_add_unchecked\n"
933 "1: ldrex %0, [%3]\n"
934 " add %0, %0, %4\n"
935 " strex %1, %0, [%3]\n"
936@@ -60,6 +107,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
937 smp_mb();
938
939 __asm__ __volatile__("@ atomic_add_return\n"
940+"1: ldrex %1, [%3]\n"
941+" adds %0, %1, %4\n"
942+
943+#ifdef CONFIG_PAX_REFCOUNT
944+" bvc 3f\n"
945+" mov %0, %1\n"
946+"2: bkpt 0xf103\n"
947+"3:\n"
948+#endif
949+
950+" strex %1, %0, [%3]\n"
951+" teq %1, #0\n"
952+" bne 1b"
953+
954+#ifdef CONFIG_PAX_REFCOUNT
955+"\n4:\n"
956+ _ASM_EXTABLE(2b, 4b)
957+#endif
958+
959+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
960+ : "r" (&v->counter), "Ir" (i)
961+ : "cc");
962+
963+ smp_mb();
964+
965+ return result;
966+}
967+
968+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
969+{
970+ unsigned long tmp;
971+ int result;
972+
973+ smp_mb();
974+
975+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
976 "1: ldrex %0, [%3]\n"
977 " add %0, %0, %4\n"
978 " strex %1, %0, [%3]\n"
979@@ -80,6 +163,35 @@ static inline void atomic_sub(int i, atomic_t *v)
980 int result;
981
982 __asm__ __volatile__("@ atomic_sub\n"
983+"1: ldrex %1, [%3]\n"
984+" subs %0, %1, %4\n"
985+
986+#ifdef CONFIG_PAX_REFCOUNT
987+" bvc 3f\n"
988+"2: bkpt 0xf103\n"
989+"3:\n"
990+#endif
991+
992+" strex %1, %0, [%3]\n"
993+" teq %1, #0\n"
994+" bne 1b"
995+
996+#ifdef CONFIG_PAX_REFCOUNT
997+"\n4:\n"
998+ _ASM_EXTABLE(2b, 4b)
999+#endif
1000+
1001+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1002+ : "r" (&v->counter), "Ir" (i)
1003+ : "cc");
1004+}
1005+
1006+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1007+{
1008+ unsigned long tmp;
1009+ int result;
1010+
1011+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
1012 "1: ldrex %0, [%3]\n"
1013 " sub %0, %0, %4\n"
1014 " strex %1, %0, [%3]\n"
1015@@ -98,11 +210,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1016 smp_mb();
1017
1018 __asm__ __volatile__("@ atomic_sub_return\n"
1019-"1: ldrex %0, [%3]\n"
1020-" sub %0, %0, %4\n"
1021+"1: ldrex %1, [%3]\n"
1022+" subs %0, %1, %4\n"
1023+
1024+#ifdef CONFIG_PAX_REFCOUNT
1025+" bvc 3f\n"
1026+" mov %0, %1\n"
1027+"2: bkpt 0xf103\n"
1028+"3:\n"
1029+#endif
1030+
1031 " strex %1, %0, [%3]\n"
1032 " teq %1, #0\n"
1033 " bne 1b"
1034+
1035+#ifdef CONFIG_PAX_REFCOUNT
1036+"\n4:\n"
1037+ _ASM_EXTABLE(2b, 4b)
1038+#endif
1039+
1040 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1041 : "r" (&v->counter), "Ir" (i)
1042 : "cc");
1043@@ -134,6 +260,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
1044 return oldval;
1045 }
1046
1047+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1048+{
1049+ unsigned long oldval, res;
1050+
1051+ smp_mb();
1052+
1053+ do {
1054+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1055+ "ldrex %1, [%3]\n"
1056+ "mov %0, #0\n"
1057+ "teq %1, %4\n"
1058+ "strexeq %0, %5, [%3]\n"
1059+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1060+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1061+ : "cc");
1062+ } while (res);
1063+
1064+ smp_mb();
1065+
1066+ return oldval;
1067+}
1068+
1069 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1070 {
1071 unsigned long tmp, tmp2;
1072@@ -167,7 +315,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
1073
1074 return val;
1075 }
1076+
1077+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1078+{
1079+ return atomic_add_return(i, v);
1080+}
1081+
1082 #define atomic_add(i, v) (void) atomic_add_return(i, v)
1083+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1084+{
1085+ (void) atomic_add_return(i, v);
1086+}
1087
1088 static inline int atomic_sub_return(int i, atomic_t *v)
1089 {
1090@@ -182,6 +340,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1091 return val;
1092 }
1093 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
1094+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1095+{
1096+ (void) atomic_sub_return(i, v);
1097+}
1098
1099 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1100 {
1101@@ -197,6 +359,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1102 return ret;
1103 }
1104
1105+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1106+{
1107+ return atomic_cmpxchg(v, old, new);
1108+}
1109+
1110 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1111 {
1112 unsigned long flags;
1113@@ -209,6 +376,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1114 #endif /* __LINUX_ARM_ARCH__ */
1115
1116 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1117+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1118+{
1119+ return xchg(&v->counter, new);
1120+}
1121
1122 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1123 {
1124@@ -221,11 +392,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1125 }
1126
1127 #define atomic_inc(v) atomic_add(1, v)
1128+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1129+{
1130+ atomic_add_unchecked(1, v);
1131+}
1132 #define atomic_dec(v) atomic_sub(1, v)
1133+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1134+{
1135+ atomic_sub_unchecked(1, v);
1136+}
1137
1138 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1139+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1140+{
1141+ return atomic_add_return_unchecked(1, v) == 0;
1142+}
1143 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1144 #define atomic_inc_return(v) (atomic_add_return(1, v))
1145+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1146+{
1147+ return atomic_add_return_unchecked(1, v);
1148+}
1149 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1150 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1151
1152@@ -241,6 +428,14 @@ typedef struct {
1153 u64 __aligned(8) counter;
1154 } atomic64_t;
1155
1156+#ifdef CONFIG_PAX_REFCOUNT
1157+typedef struct {
1158+ u64 __aligned(8) counter;
1159+} atomic64_unchecked_t;
1160+#else
1161+typedef atomic64_t atomic64_unchecked_t;
1162+#endif
1163+
1164 #define ATOMIC64_INIT(i) { (i) }
1165
1166 static inline u64 atomic64_read(const atomic64_t *v)
1167@@ -256,6 +451,19 @@ static inline u64 atomic64_read(const atomic64_t *v)
1168 return result;
1169 }
1170
1171+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
1172+{
1173+ u64 result;
1174+
1175+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1176+" ldrexd %0, %H0, [%1]"
1177+ : "=&r" (result)
1178+ : "r" (&v->counter), "Qo" (v->counter)
1179+ );
1180+
1181+ return result;
1182+}
1183+
1184 static inline void atomic64_set(atomic64_t *v, u64 i)
1185 {
1186 u64 tmp;
1187@@ -270,6 +478,20 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1188 : "cc");
1189 }
1190
1191+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1192+{
1193+ u64 tmp;
1194+
1195+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1196+"1: ldrexd %0, %H0, [%2]\n"
1197+" strexd %0, %3, %H3, [%2]\n"
1198+" teq %0, #0\n"
1199+" bne 1b"
1200+ : "=&r" (tmp), "=Qo" (v->counter)
1201+ : "r" (&v->counter), "r" (i)
1202+ : "cc");
1203+}
1204+
1205 static inline void atomic64_add(u64 i, atomic64_t *v)
1206 {
1207 u64 result;
1208@@ -278,6 +500,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1209 __asm__ __volatile__("@ atomic64_add\n"
1210 "1: ldrexd %0, %H0, [%3]\n"
1211 " adds %0, %0, %4\n"
1212+" adcs %H0, %H0, %H4\n"
1213+
1214+#ifdef CONFIG_PAX_REFCOUNT
1215+" bvc 3f\n"
1216+"2: bkpt 0xf103\n"
1217+"3:\n"
1218+#endif
1219+
1220+" strexd %1, %0, %H0, [%3]\n"
1221+" teq %1, #0\n"
1222+" bne 1b"
1223+
1224+#ifdef CONFIG_PAX_REFCOUNT
1225+"\n4:\n"
1226+ _ASM_EXTABLE(2b, 4b)
1227+#endif
1228+
1229+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1230+ : "r" (&v->counter), "r" (i)
1231+ : "cc");
1232+}
1233+
1234+static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1235+{
1236+ u64 result;
1237+ unsigned long tmp;
1238+
1239+ __asm__ __volatile__("@ atomic64_add_unchecked\n"
1240+"1: ldrexd %0, %H0, [%3]\n"
1241+" adds %0, %0, %4\n"
1242 " adc %H0, %H0, %H4\n"
1243 " strexd %1, %0, %H0, [%3]\n"
1244 " teq %1, #0\n"
1245@@ -289,12 +541,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1246
1247 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1248 {
1249- u64 result;
1250- unsigned long tmp;
1251+ u64 result, tmp;
1252
1253 smp_mb();
1254
1255 __asm__ __volatile__("@ atomic64_add_return\n"
1256+"1: ldrexd %1, %H1, [%3]\n"
1257+" adds %0, %1, %4\n"
1258+" adcs %H0, %H1, %H4\n"
1259+
1260+#ifdef CONFIG_PAX_REFCOUNT
1261+" bvc 3f\n"
1262+" mov %0, %1\n"
1263+" mov %H0, %H1\n"
1264+"2: bkpt 0xf103\n"
1265+"3:\n"
1266+#endif
1267+
1268+" strexd %1, %0, %H0, [%3]\n"
1269+" teq %1, #0\n"
1270+" bne 1b"
1271+
1272+#ifdef CONFIG_PAX_REFCOUNT
1273+"\n4:\n"
1274+ _ASM_EXTABLE(2b, 4b)
1275+#endif
1276+
1277+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1278+ : "r" (&v->counter), "r" (i)
1279+ : "cc");
1280+
1281+ smp_mb();
1282+
1283+ return result;
1284+}
1285+
1286+static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1287+{
1288+ u64 result;
1289+ unsigned long tmp;
1290+
1291+ smp_mb();
1292+
1293+ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1294 "1: ldrexd %0, %H0, [%3]\n"
1295 " adds %0, %0, %4\n"
1296 " adc %H0, %H0, %H4\n"
1297@@ -318,6 +607,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1298 __asm__ __volatile__("@ atomic64_sub\n"
1299 "1: ldrexd %0, %H0, [%3]\n"
1300 " subs %0, %0, %4\n"
1301+" sbcs %H0, %H0, %H4\n"
1302+
1303+#ifdef CONFIG_PAX_REFCOUNT
1304+" bvc 3f\n"
1305+"2: bkpt 0xf103\n"
1306+"3:\n"
1307+#endif
1308+
1309+" strexd %1, %0, %H0, [%3]\n"
1310+" teq %1, #0\n"
1311+" bne 1b"
1312+
1313+#ifdef CONFIG_PAX_REFCOUNT
1314+"\n4:\n"
1315+ _ASM_EXTABLE(2b, 4b)
1316+#endif
1317+
1318+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1319+ : "r" (&v->counter), "r" (i)
1320+ : "cc");
1321+}
1322+
1323+static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1324+{
1325+ u64 result;
1326+ unsigned long tmp;
1327+
1328+ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1329+"1: ldrexd %0, %H0, [%3]\n"
1330+" subs %0, %0, %4\n"
1331 " sbc %H0, %H0, %H4\n"
1332 " strexd %1, %0, %H0, [%3]\n"
1333 " teq %1, #0\n"
1334@@ -329,18 +648,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1335
1336 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1337 {
1338- u64 result;
1339- unsigned long tmp;
1340+ u64 result, tmp;
1341
1342 smp_mb();
1343
1344 __asm__ __volatile__("@ atomic64_sub_return\n"
1345-"1: ldrexd %0, %H0, [%3]\n"
1346-" subs %0, %0, %4\n"
1347-" sbc %H0, %H0, %H4\n"
1348+"1: ldrexd %1, %H1, [%3]\n"
1349+" subs %0, %1, %4\n"
1350+" sbcs %H0, %H1, %H4\n"
1351+
1352+#ifdef CONFIG_PAX_REFCOUNT
1353+" bvc 3f\n"
1354+" mov %0, %1\n"
1355+" mov %H0, %H1\n"
1356+"2: bkpt 0xf103\n"
1357+"3:\n"
1358+#endif
1359+
1360 " strexd %1, %0, %H0, [%3]\n"
1361 " teq %1, #0\n"
1362 " bne 1b"
1363+
1364+#ifdef CONFIG_PAX_REFCOUNT
1365+"\n4:\n"
1366+ _ASM_EXTABLE(2b, 4b)
1367+#endif
1368+
1369 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1370 : "r" (&v->counter), "r" (i)
1371 : "cc");
1372@@ -374,6 +707,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1373 return oldval;
1374 }
1375
1376+static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1377+{
1378+ u64 oldval;
1379+ unsigned long res;
1380+
1381+ smp_mb();
1382+
1383+ do {
1384+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1385+ "ldrexd %1, %H1, [%3]\n"
1386+ "mov %0, #0\n"
1387+ "teq %1, %4\n"
1388+ "teqeq %H1, %H4\n"
1389+ "strexdeq %0, %5, %H5, [%3]"
1390+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1391+ : "r" (&ptr->counter), "r" (old), "r" (new)
1392+ : "cc");
1393+ } while (res);
1394+
1395+ smp_mb();
1396+
1397+ return oldval;
1398+}
1399+
1400 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1401 {
1402 u64 result;
1403@@ -397,21 +754,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1404
1405 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1406 {
1407- u64 result;
1408- unsigned long tmp;
1409+ u64 result, tmp;
1410
1411 smp_mb();
1412
1413 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1414-"1: ldrexd %0, %H0, [%3]\n"
1415-" subs %0, %0, #1\n"
1416-" sbc %H0, %H0, #0\n"
1417+"1: ldrexd %1, %H1, [%3]\n"
1418+" subs %0, %1, #1\n"
1419+" sbcs %H0, %H1, #0\n"
1420+
1421+#ifdef CONFIG_PAX_REFCOUNT
1422+" bvc 3f\n"
1423+" mov %0, %1\n"
1424+" mov %H0, %H1\n"
1425+"2: bkpt 0xf103\n"
1426+"3:\n"
1427+#endif
1428+
1429 " teq %H0, #0\n"
1430-" bmi 2f\n"
1431+" bmi 4f\n"
1432 " strexd %1, %0, %H0, [%3]\n"
1433 " teq %1, #0\n"
1434 " bne 1b\n"
1435-"2:"
1436+"4:\n"
1437+
1438+#ifdef CONFIG_PAX_REFCOUNT
1439+ _ASM_EXTABLE(2b, 4b)
1440+#endif
1441+
1442 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1443 : "r" (&v->counter)
1444 : "cc");
1445@@ -434,13 +804,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1446 " teq %0, %5\n"
1447 " teqeq %H0, %H5\n"
1448 " moveq %1, #0\n"
1449-" beq 2f\n"
1450+" beq 4f\n"
1451 " adds %0, %0, %6\n"
1452-" adc %H0, %H0, %H6\n"
1453+" adcs %H0, %H0, %H6\n"
1454+
1455+#ifdef CONFIG_PAX_REFCOUNT
1456+" bvc 3f\n"
1457+"2: bkpt 0xf103\n"
1458+"3:\n"
1459+#endif
1460+
1461 " strexd %2, %0, %H0, [%4]\n"
1462 " teq %2, #0\n"
1463 " bne 1b\n"
1464-"2:"
1465+"4:\n"
1466+
1467+#ifdef CONFIG_PAX_REFCOUNT
1468+ _ASM_EXTABLE(2b, 4b)
1469+#endif
1470+
1471 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1472 : "r" (&v->counter), "r" (u), "r" (a)
1473 : "cc");
1474@@ -453,10 +835,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1475
1476 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1477 #define atomic64_inc(v) atomic64_add(1LL, (v))
1478+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1479 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1480+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1481 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1482 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1483 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1484+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1485 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1486 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1487 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1488diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1489index 75fe66b..ba3dee4 100644
1490--- a/arch/arm/include/asm/cache.h
1491+++ b/arch/arm/include/asm/cache.h
1492@@ -4,8 +4,10 @@
1493 #ifndef __ASMARM_CACHE_H
1494 #define __ASMARM_CACHE_H
1495
1496+#include <linux/const.h>
1497+
1498 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1499-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1500+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1501
1502 /*
1503 * Memory returned by kmalloc() may be used for DMA, so we must make
1504@@ -24,5 +26,6 @@
1505 #endif
1506
1507 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1508+#define __read_only __attribute__ ((__section__(".data..read_only")))
1509
1510 #endif
1511diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1512index e1489c5..d418304 100644
1513--- a/arch/arm/include/asm/cacheflush.h
1514+++ b/arch/arm/include/asm/cacheflush.h
1515@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1516 void (*dma_unmap_area)(const void *, size_t, int);
1517
1518 void (*dma_flush_range)(const void *, const void *);
1519-};
1520+} __no_const;
1521
1522 /*
1523 * Select the calling method
1524diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1525index 6dcc164..b14d917 100644
1526--- a/arch/arm/include/asm/checksum.h
1527+++ b/arch/arm/include/asm/checksum.h
1528@@ -37,7 +37,19 @@ __wsum
1529 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1530
1531 __wsum
1532-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1533+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1534+
1535+static inline __wsum
1536+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1537+{
1538+ __wsum ret;
1539+ pax_open_userland();
1540+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1541+ pax_close_userland();
1542+ return ret;
1543+}
1544+
1545+
1546
1547 /*
1548 * Fold a partial checksum without adding pseudo headers
1549diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1550index 7eb18c1..e38b6d2 100644
1551--- a/arch/arm/include/asm/cmpxchg.h
1552+++ b/arch/arm/include/asm/cmpxchg.h
1553@@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1554
1555 #define xchg(ptr,x) \
1556 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1557+#define xchg_unchecked(ptr,x) \
1558+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1559
1560 #include <asm-generic/cmpxchg-local.h>
1561
1562diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h
1563index 720799f..2f67631 100644
1564--- a/arch/arm/include/asm/delay.h
1565+++ b/arch/arm/include/asm/delay.h
1566@@ -25,9 +25,9 @@ extern struct arm_delay_ops {
1567 void (*const_udelay)(unsigned long);
1568 void (*udelay)(unsigned long);
1569 bool const_clock;
1570-} arm_delay_ops;
1571+} *arm_delay_ops;
1572
1573-#define __delay(n) arm_delay_ops.delay(n)
1574+#define __delay(n) arm_delay_ops->delay(n)
1575
1576 /*
1577 * This function intentionally does not exist; if you see references to
1578@@ -48,8 +48,8 @@ extern void __bad_udelay(void);
1579 * first constant multiplications gets optimized away if the delay is
1580 * a constant)
1581 */
1582-#define __udelay(n) arm_delay_ops.udelay(n)
1583-#define __const_udelay(n) arm_delay_ops.const_udelay(n)
1584+#define __udelay(n) arm_delay_ops->udelay(n)
1585+#define __const_udelay(n) arm_delay_ops->const_udelay(n)
1586
1587 #define udelay(n) \
1588 (__builtin_constant_p(n) ? \
1589diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1590index 6ddbe44..b5e38b1 100644
1591--- a/arch/arm/include/asm/domain.h
1592+++ b/arch/arm/include/asm/domain.h
1593@@ -48,18 +48,37 @@
1594 * Domain types
1595 */
1596 #define DOMAIN_NOACCESS 0
1597-#define DOMAIN_CLIENT 1
1598 #ifdef CONFIG_CPU_USE_DOMAINS
1599+#define DOMAIN_USERCLIENT 1
1600+#define DOMAIN_KERNELCLIENT 1
1601 #define DOMAIN_MANAGER 3
1602+#define DOMAIN_VECTORS DOMAIN_USER
1603 #else
1604+
1605+#ifdef CONFIG_PAX_KERNEXEC
1606 #define DOMAIN_MANAGER 1
1607+#define DOMAIN_KERNEXEC 3
1608+#else
1609+#define DOMAIN_MANAGER 1
1610+#endif
1611+
1612+#ifdef CONFIG_PAX_MEMORY_UDEREF
1613+#define DOMAIN_USERCLIENT 0
1614+#define DOMAIN_UDEREF 1
1615+#define DOMAIN_VECTORS DOMAIN_KERNEL
1616+#else
1617+#define DOMAIN_USERCLIENT 1
1618+#define DOMAIN_VECTORS DOMAIN_USER
1619+#endif
1620+#define DOMAIN_KERNELCLIENT 1
1621+
1622 #endif
1623
1624 #define domain_val(dom,type) ((type) << (2*(dom)))
1625
1626 #ifndef __ASSEMBLY__
1627
1628-#ifdef CONFIG_CPU_USE_DOMAINS
1629+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1630 static inline void set_domain(unsigned val)
1631 {
1632 asm volatile(
1633@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1634 isb();
1635 }
1636
1637-#define modify_domain(dom,type) \
1638- do { \
1639- struct thread_info *thread = current_thread_info(); \
1640- unsigned int domain = thread->cpu_domain; \
1641- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1642- thread->cpu_domain = domain | domain_val(dom, type); \
1643- set_domain(thread->cpu_domain); \
1644- } while (0)
1645-
1646+extern void modify_domain(unsigned int dom, unsigned int type);
1647 #else
1648 static inline void set_domain(unsigned val) { }
1649 static inline void modify_domain(unsigned dom, unsigned type) { }
1650diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1651index 38050b1..9d90e8b 100644
1652--- a/arch/arm/include/asm/elf.h
1653+++ b/arch/arm/include/asm/elf.h
1654@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1655 the loader. We need to make sure that it is out of the way of the program
1656 that it will "exec", and that there is sufficient room for the brk. */
1657
1658-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1659+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1660+
1661+#ifdef CONFIG_PAX_ASLR
1662+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1663+
1664+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1665+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1666+#endif
1667
1668 /* When the program starts, a1 contains a pointer to a function to be
1669 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1670@@ -126,8 +133,4 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1671 extern void elf_set_personality(const struct elf32_hdr *);
1672 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1673
1674-struct mm_struct;
1675-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1676-#define arch_randomize_brk arch_randomize_brk
1677-
1678 #endif
1679diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1680index de53547..52b9a28 100644
1681--- a/arch/arm/include/asm/fncpy.h
1682+++ b/arch/arm/include/asm/fncpy.h
1683@@ -81,7 +81,9 @@
1684 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1685 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1686 \
1687+ pax_open_kernel(); \
1688 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1689+ pax_close_kernel(); \
1690 flush_icache_range((unsigned long)(dest_buf), \
1691 (unsigned long)(dest_buf) + (size)); \
1692 \
1693diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1694index e42cf59..7b94b8f 100644
1695--- a/arch/arm/include/asm/futex.h
1696+++ b/arch/arm/include/asm/futex.h
1697@@ -50,6 +50,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1698 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1699 return -EFAULT;
1700
1701+ pax_open_userland();
1702+
1703 smp_mb();
1704 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1705 "1: ldrex %1, [%4]\n"
1706@@ -65,6 +67,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1707 : "cc", "memory");
1708 smp_mb();
1709
1710+ pax_close_userland();
1711+
1712 *uval = val;
1713 return ret;
1714 }
1715@@ -95,6 +99,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1716 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1717 return -EFAULT;
1718
1719+ pax_open_userland();
1720+
1721 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1722 "1: " TUSER(ldr) " %1, [%4]\n"
1723 " teq %1, %2\n"
1724@@ -105,6 +111,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1725 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1726 : "cc", "memory");
1727
1728+ pax_close_userland();
1729+
1730 *uval = val;
1731 return ret;
1732 }
1733@@ -127,6 +135,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1734 return -EFAULT;
1735
1736 pagefault_disable(); /* implies preempt_disable() */
1737+ pax_open_userland();
1738
1739 switch (op) {
1740 case FUTEX_OP_SET:
1741@@ -148,6 +157,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1742 ret = -ENOSYS;
1743 }
1744
1745+ pax_close_userland();
1746 pagefault_enable(); /* subsumes preempt_enable() */
1747
1748 if (!ret) {
1749diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h
1750index 4b1ce6c..bea3f73 100644
1751--- a/arch/arm/include/asm/hardware/gic.h
1752+++ b/arch/arm/include/asm/hardware/gic.h
1753@@ -34,9 +34,10 @@
1754
1755 #ifndef __ASSEMBLY__
1756 #include <linux/irqdomain.h>
1757+#include <linux/irq.h>
1758 struct device_node;
1759
1760-extern struct irq_chip gic_arch_extn;
1761+extern irq_chip_no_const gic_arch_extn;
1762
1763 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
1764 u32 offset, struct device_node *);
1765diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h
1766index 8c5e828..91b99ab 100644
1767--- a/arch/arm/include/asm/highmem.h
1768+++ b/arch/arm/include/asm/highmem.h
1769@@ -41,6 +41,13 @@ extern void kunmap_high(struct page *page);
1770 #endif
1771 #endif
1772
1773+/*
1774+ * Needed to be able to broadcast the TLB invalidation for kmap.
1775+ */
1776+#ifdef CONFIG_ARM_ERRATA_798181
1777+#undef ARCH_NEEDS_KMAP_HIGH_GET
1778+#endif
1779+
1780 #ifdef ARCH_NEEDS_KMAP_HIGH_GET
1781 extern void *kmap_high_get(struct page *page);
1782 #else
1783diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1784index 83eb2f7..ed77159 100644
1785--- a/arch/arm/include/asm/kmap_types.h
1786+++ b/arch/arm/include/asm/kmap_types.h
1787@@ -4,6 +4,6 @@
1788 /*
1789 * This is the "bare minimum". AIO seems to require this.
1790 */
1791-#define KM_TYPE_NR 16
1792+#define KM_TYPE_NR 17
1793
1794 #endif
1795diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1796index 9e614a1..3302cca 100644
1797--- a/arch/arm/include/asm/mach/dma.h
1798+++ b/arch/arm/include/asm/mach/dma.h
1799@@ -22,7 +22,7 @@ struct dma_ops {
1800 int (*residue)(unsigned int, dma_t *); /* optional */
1801 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1802 const char *type;
1803-};
1804+} __do_const;
1805
1806 struct dma_struct {
1807 void *addr; /* single DMA address */
1808diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1809index 2fe141f..192dc01 100644
1810--- a/arch/arm/include/asm/mach/map.h
1811+++ b/arch/arm/include/asm/mach/map.h
1812@@ -27,13 +27,16 @@ struct map_desc {
1813 #define MT_MINICLEAN 6
1814 #define MT_LOW_VECTORS 7
1815 #define MT_HIGH_VECTORS 8
1816-#define MT_MEMORY 9
1817+#define MT_MEMORY_RWX 9
1818 #define MT_ROM 10
1819-#define MT_MEMORY_NONCACHED 11
1820+#define MT_MEMORY_NONCACHED_RX 11
1821 #define MT_MEMORY_DTCM 12
1822 #define MT_MEMORY_ITCM 13
1823 #define MT_MEMORY_SO 14
1824 #define MT_MEMORY_DMA_READY 15
1825+#define MT_MEMORY_RW 16
1826+#define MT_MEMORY_RX 17
1827+#define MT_MEMORY_NONCACHED_RW 18
1828
1829 #ifdef CONFIG_MMU
1830 extern void iotable_init(struct map_desc *, int);
1831diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
1832index 863a661..a7b85e0 100644
1833--- a/arch/arm/include/asm/mmu_context.h
1834+++ b/arch/arm/include/asm/mmu_context.h
1835@@ -27,6 +27,8 @@ void __check_vmalloc_seq(struct mm_struct *mm);
1836 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
1837 #define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; })
1838
1839+DECLARE_PER_CPU(atomic64_t, active_asids);
1840+
1841 #else /* !CONFIG_CPU_HAS_ASID */
1842
1843 #ifdef CONFIG_MMU
1844diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1845index 53426c6..c7baff3 100644
1846--- a/arch/arm/include/asm/outercache.h
1847+++ b/arch/arm/include/asm/outercache.h
1848@@ -35,7 +35,7 @@ struct outer_cache_fns {
1849 #endif
1850 void (*set_debug)(unsigned long);
1851 void (*resume)(void);
1852-};
1853+} __no_const;
1854
1855 #ifdef CONFIG_OUTER_CACHE
1856
1857diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1858index 812a494..71fc0b6 100644
1859--- a/arch/arm/include/asm/page.h
1860+++ b/arch/arm/include/asm/page.h
1861@@ -114,7 +114,7 @@ struct cpu_user_fns {
1862 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1863 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1864 unsigned long vaddr, struct vm_area_struct *vma);
1865-};
1866+} __no_const;
1867
1868 #ifdef MULTI_USER
1869 extern struct cpu_user_fns cpu_user;
1870diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1871index 943504f..c37a730 100644
1872--- a/arch/arm/include/asm/pgalloc.h
1873+++ b/arch/arm/include/asm/pgalloc.h
1874@@ -17,6 +17,7 @@
1875 #include <asm/processor.h>
1876 #include <asm/cacheflush.h>
1877 #include <asm/tlbflush.h>
1878+#include <asm/system_info.h>
1879
1880 #define check_pgt_cache() do { } while (0)
1881
1882@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1883 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1884 }
1885
1886+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1887+{
1888+ pud_populate(mm, pud, pmd);
1889+}
1890+
1891 #else /* !CONFIG_ARM_LPAE */
1892
1893 /*
1894@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1895 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1896 #define pmd_free(mm, pmd) do { } while (0)
1897 #define pud_populate(mm,pmd,pte) BUG()
1898+#define pud_populate_kernel(mm,pmd,pte) BUG()
1899
1900 #endif /* CONFIG_ARM_LPAE */
1901
1902@@ -126,6 +133,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1903 __free_page(pte);
1904 }
1905
1906+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1907+{
1908+#ifdef CONFIG_ARM_LPAE
1909+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1910+#else
1911+ if (addr & SECTION_SIZE)
1912+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1913+ else
1914+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1915+#endif
1916+ flush_pmd_entry(pmdp);
1917+}
1918+
1919 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1920 pmdval_t prot)
1921 {
1922@@ -155,7 +175,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
1923 static inline void
1924 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
1925 {
1926- __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
1927+ __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE | __supported_pmd_mask);
1928 }
1929 #define pmd_pgtable(pmd) pmd_page(pmd)
1930
1931diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1932index 5cfba15..f415e1a 100644
1933--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1934+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1935@@ -20,12 +20,15 @@
1936 #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
1937 #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
1938 #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
1939+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
1940 #define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
1941 #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
1942 #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
1943+
1944 /*
1945 * - section
1946 */
1947+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1948 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1949 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1950 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1951@@ -37,6 +40,7 @@
1952 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1953 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1954 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1955+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
1956
1957 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1958 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1959@@ -66,6 +70,7 @@
1960 * - extended small page/tiny page
1961 */
1962 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
1963+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
1964 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
1965 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
1966 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
1967diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
1968index f97ee02..07f1be5 100644
1969--- a/arch/arm/include/asm/pgtable-2level.h
1970+++ b/arch/arm/include/asm/pgtable-2level.h
1971@@ -125,6 +125,7 @@
1972 #define L_PTE_XN (_AT(pteval_t, 1) << 9)
1973 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
1974 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
1975+#define L_PTE_PXN (_AT(pteval_t, 1) << 12) /* v7*/
1976
1977 /*
1978 * These are the memory types, defined to be compatible with
1979diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
1980index d795282..a43ea90 100644
1981--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
1982+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
1983@@ -32,15 +32,18 @@
1984 #define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0)
1985 #define PMD_BIT4 (_AT(pmdval_t, 0))
1986 #define PMD_DOMAIN(x) (_AT(pmdval_t, 0))
1987+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 59) /* PXNTable */
1988
1989 /*
1990 * - section
1991 */
1992 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1993 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1994+#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7)
1995 #define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
1996 #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
1997 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 11)
1998+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 53)
1999 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 54)
2000 #define PMD_SECT_AP_WRITE (_AT(pmdval_t, 0))
2001 #define PMD_SECT_AP_READ (_AT(pmdval_t, 0))
2002@@ -66,6 +69,7 @@
2003 #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
2004 #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
2005 #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
2006+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
2007 #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
2008
2009 /*
2010diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
2011index a3f3792..7b932a6 100644
2012--- a/arch/arm/include/asm/pgtable-3level.h
2013+++ b/arch/arm/include/asm/pgtable-3level.h
2014@@ -74,6 +74,7 @@
2015 #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
2016 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
2017 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
2018+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
2019 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
2020 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
2021 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
2022@@ -82,6 +83,7 @@
2023 /*
2024 * To be used in assembly code with the upper page attributes.
2025 */
2026+#define L_PTE_PXN_HIGH (1 << (53 - 32))
2027 #define L_PTE_XN_HIGH (1 << (54 - 32))
2028 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
2029
2030diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
2031index c094749..a6ff605 100644
2032--- a/arch/arm/include/asm/pgtable.h
2033+++ b/arch/arm/include/asm/pgtable.h
2034@@ -30,6 +30,9 @@
2035 #include <asm/pgtable-2level.h>
2036 #endif
2037
2038+#define ktla_ktva(addr) (addr)
2039+#define ktva_ktla(addr) (addr)
2040+
2041 /*
2042 * Just any arbitrary offset to the start of the vmalloc VM area: the
2043 * current 8MB value just means that there will be a 8MB "hole" after the
2044@@ -45,6 +48,9 @@
2045 #define LIBRARY_TEXT_START 0x0c000000
2046
2047 #ifndef __ASSEMBLY__
2048+extern pteval_t __supported_pte_mask;
2049+extern pmdval_t __supported_pmd_mask;
2050+
2051 extern void __pte_error(const char *file, int line, pte_t);
2052 extern void __pmd_error(const char *file, int line, pmd_t);
2053 extern void __pgd_error(const char *file, int line, pgd_t);
2054@@ -53,6 +59,50 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2055 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
2056 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
2057
2058+#define __HAVE_ARCH_PAX_OPEN_KERNEL
2059+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2060+
2061+#ifdef CONFIG_PAX_KERNEXEC
2062+#include <asm/domain.h>
2063+#include <linux/thread_info.h>
2064+#include <linux/preempt.h>
2065+#endif
2066+
2067+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2068+static inline int test_domain(int domain, int domaintype)
2069+{
2070+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2071+}
2072+#endif
2073+
2074+#ifdef CONFIG_PAX_KERNEXEC
2075+static inline unsigned long pax_open_kernel(void) {
2076+#ifdef CONFIG_ARM_LPAE
2077+ /* TODO */
2078+#else
2079+ preempt_disable();
2080+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2081+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2082+#endif
2083+ return 0;
2084+}
2085+
2086+static inline unsigned long pax_close_kernel(void) {
2087+#ifdef CONFIG_ARM_LPAE
2088+ /* TODO */
2089+#else
2090+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2091+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2092+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2093+ preempt_enable_no_resched();
2094+#endif
2095+ return 0;
2096+}
2097+#else
2098+static inline unsigned long pax_open_kernel(void) { return 0; }
2099+static inline unsigned long pax_close_kernel(void) { return 0; }
2100+#endif
2101+
2102 /*
2103 * This is the lowest virtual address we can permit any user space
2104 * mapping to be mapped at. This is particularly important for
2105@@ -63,8 +113,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2106 /*
2107 * The pgprot_* and protection_map entries will be fixed up in runtime
2108 * to include the cachable and bufferable bits based on memory policy,
2109- * as well as any architecture dependent bits like global/ASID and SMP
2110- * shared mapping bits.
2111+ * as well as any architecture dependent bits like global/ASID, PXN,
2112+ * and SMP shared mapping bits.
2113 */
2114 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2115
2116@@ -241,7 +291,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
2117 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2118 {
2119 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2120- L_PTE_NONE | L_PTE_VALID;
2121+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2122 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2123 return pte;
2124 }
2125diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
2126index f3628fb..a0672dd 100644
2127--- a/arch/arm/include/asm/proc-fns.h
2128+++ b/arch/arm/include/asm/proc-fns.h
2129@@ -75,7 +75,7 @@ extern struct processor {
2130 unsigned int suspend_size;
2131 void (*do_suspend)(void *);
2132 void (*do_resume)(void *);
2133-} processor;
2134+} __do_const processor;
2135
2136 #ifndef MULTI_CPU
2137 extern void cpu_proc_init(void);
2138diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
2139index 06e7d50..8a8e251 100644
2140--- a/arch/arm/include/asm/processor.h
2141+++ b/arch/arm/include/asm/processor.h
2142@@ -65,9 +65,8 @@ struct thread_struct {
2143 regs->ARM_cpsr |= PSR_ENDSTATE; \
2144 regs->ARM_pc = pc & ~1; /* pc */ \
2145 regs->ARM_sp = sp; /* sp */ \
2146- regs->ARM_r2 = stack[2]; /* r2 (envp) */ \
2147- regs->ARM_r1 = stack[1]; /* r1 (argv) */ \
2148- regs->ARM_r0 = stack[0]; /* r0 (argc) */ \
2149+ /* r2 (envp), r1 (argv), r0 (argc) */ \
2150+ (void)copy_from_user(&regs->ARM_r0, (const char __user *)stack, 3 * sizeof(unsigned long)); \
2151 nommu_start_thread(regs); \
2152 })
2153
2154diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2155index d3a22be..3a69ad5 100644
2156--- a/arch/arm/include/asm/smp.h
2157+++ b/arch/arm/include/asm/smp.h
2158@@ -107,7 +107,7 @@ struct smp_operations {
2159 int (*cpu_disable)(unsigned int cpu);
2160 #endif
2161 #endif
2162-};
2163+} __no_const;
2164
2165 /*
2166 * set platform specific SMP operations
2167diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2168index cddda1f..ff357f7 100644
2169--- a/arch/arm/include/asm/thread_info.h
2170+++ b/arch/arm/include/asm/thread_info.h
2171@@ -77,9 +77,9 @@ struct thread_info {
2172 .flags = 0, \
2173 .preempt_count = INIT_PREEMPT_COUNT, \
2174 .addr_limit = KERNEL_DS, \
2175- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2176- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2177- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2178+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2179+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2180+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2181 .restart_block = { \
2182 .fn = do_no_restart_syscall, \
2183 }, \
2184@@ -152,6 +152,12 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2185 #define TIF_SYSCALL_AUDIT 9
2186 #define TIF_SYSCALL_TRACEPOINT 10
2187 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2188+
2189+/* within 8 bits of TIF_SYSCALL_TRACE
2190+ * to meet flexible second operand requirements
2191+ */
2192+#define TIF_GRSEC_SETXID 12
2193+
2194 #define TIF_USING_IWMMXT 17
2195 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2196 #define TIF_RESTORE_SIGMASK 20
2197@@ -165,10 +171,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2198 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2199 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2200 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2201+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2202
2203 /* Checks for any syscall work in entry-common.S */
2204 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2205- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2206+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2207
2208 /*
2209 * Change these and you break ASM code in entry-common.S
2210diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
2211index 6e924d3..a9f3ddf 100644
2212--- a/arch/arm/include/asm/tlbflush.h
2213+++ b/arch/arm/include/asm/tlbflush.h
2214@@ -430,6 +430,21 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
2215 }
2216 }
2217
2218+#ifdef CONFIG_ARM_ERRATA_798181
2219+static inline void dummy_flush_tlb_a15_erratum(void)
2220+{
2221+ /*
2222+ * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0.
2223+ */
2224+ asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
2225+ dsb();
2226+}
2227+#else
2228+static inline void dummy_flush_tlb_a15_erratum(void)
2229+{
2230+}
2231+#endif
2232+
2233 /*
2234 * flush_pmd_entry
2235 *
2236diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2237index 7e1f760..752fcb7 100644
2238--- a/arch/arm/include/asm/uaccess.h
2239+++ b/arch/arm/include/asm/uaccess.h
2240@@ -18,6 +18,7 @@
2241 #include <asm/domain.h>
2242 #include <asm/unified.h>
2243 #include <asm/compiler.h>
2244+#include <asm/pgtable.h>
2245
2246 #define VERIFY_READ 0
2247 #define VERIFY_WRITE 1
2248@@ -60,10 +61,34 @@ extern int __put_user_bad(void);
2249 #define USER_DS TASK_SIZE
2250 #define get_fs() (current_thread_info()->addr_limit)
2251
2252+static inline void pax_open_userland(void)
2253+{
2254+
2255+#ifdef CONFIG_PAX_MEMORY_UDEREF
2256+ if (get_fs() == USER_DS) {
2257+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2258+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2259+ }
2260+#endif
2261+
2262+}
2263+
2264+static inline void pax_close_userland(void)
2265+{
2266+
2267+#ifdef CONFIG_PAX_MEMORY_UDEREF
2268+ if (get_fs() == USER_DS) {
2269+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2270+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2271+ }
2272+#endif
2273+
2274+}
2275+
2276 static inline void set_fs(mm_segment_t fs)
2277 {
2278 current_thread_info()->addr_limit = fs;
2279- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2280+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2281 }
2282
2283 #define segment_eq(a,b) ((a) == (b))
2284@@ -143,8 +168,12 @@ extern int __get_user_4(void *);
2285
2286 #define get_user(x,p) \
2287 ({ \
2288+ int __e; \
2289 might_fault(); \
2290- __get_user_check(x,p); \
2291+ pax_open_userland(); \
2292+ __e = __get_user_check(x,p); \
2293+ pax_close_userland(); \
2294+ __e; \
2295 })
2296
2297 extern int __put_user_1(void *, unsigned int);
2298@@ -188,8 +217,12 @@ extern int __put_user_8(void *, unsigned long long);
2299
2300 #define put_user(x,p) \
2301 ({ \
2302+ int __e; \
2303 might_fault(); \
2304- __put_user_check(x,p); \
2305+ pax_open_userland(); \
2306+ __e = __put_user_check(x,p); \
2307+ pax_close_userland(); \
2308+ __e; \
2309 })
2310
2311 #else /* CONFIG_MMU */
2312@@ -230,13 +263,17 @@ static inline void set_fs(mm_segment_t fs)
2313 #define __get_user(x,ptr) \
2314 ({ \
2315 long __gu_err = 0; \
2316+ pax_open_userland(); \
2317 __get_user_err((x),(ptr),__gu_err); \
2318+ pax_close_userland(); \
2319 __gu_err; \
2320 })
2321
2322 #define __get_user_error(x,ptr,err) \
2323 ({ \
2324+ pax_open_userland(); \
2325 __get_user_err((x),(ptr),err); \
2326+ pax_close_userland(); \
2327 (void) 0; \
2328 })
2329
2330@@ -312,13 +349,17 @@ do { \
2331 #define __put_user(x,ptr) \
2332 ({ \
2333 long __pu_err = 0; \
2334+ pax_open_userland(); \
2335 __put_user_err((x),(ptr),__pu_err); \
2336+ pax_close_userland(); \
2337 __pu_err; \
2338 })
2339
2340 #define __put_user_error(x,ptr,err) \
2341 ({ \
2342+ pax_open_userland(); \
2343 __put_user_err((x),(ptr),err); \
2344+ pax_close_userland(); \
2345 (void) 0; \
2346 })
2347
2348@@ -418,11 +459,44 @@ do { \
2349
2350
2351 #ifdef CONFIG_MMU
2352-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2353-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2354+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2355+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2356+
2357+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2358+{
2359+ unsigned long ret;
2360+
2361+ check_object_size(to, n, false);
2362+ pax_open_userland();
2363+ ret = ___copy_from_user(to, from, n);
2364+ pax_close_userland();
2365+ return ret;
2366+}
2367+
2368+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2369+{
2370+ unsigned long ret;
2371+
2372+ check_object_size(from, n, true);
2373+ pax_open_userland();
2374+ ret = ___copy_to_user(to, from, n);
2375+ pax_close_userland();
2376+ return ret;
2377+}
2378+
2379 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2380-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2381+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2382 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2383+
2384+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2385+{
2386+ unsigned long ret;
2387+ pax_open_userland();
2388+ ret = ___clear_user(addr, n);
2389+ pax_close_userland();
2390+ return ret;
2391+}
2392+
2393 #else
2394 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
2395 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
2396@@ -431,6 +505,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2397
2398 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2399 {
2400+ if ((long)n < 0)
2401+ return n;
2402+
2403 if (access_ok(VERIFY_READ, from, n))
2404 n = __copy_from_user(to, from, n);
2405 else /* security hole - plug it */
2406@@ -440,6 +517,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2407
2408 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2409 {
2410+ if ((long)n < 0)
2411+ return n;
2412+
2413 if (access_ok(VERIFY_WRITE, to, n))
2414 n = __copy_to_user(to, from, n);
2415 return n;
2416diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2417index 96ee092..37f1844 100644
2418--- a/arch/arm/include/uapi/asm/ptrace.h
2419+++ b/arch/arm/include/uapi/asm/ptrace.h
2420@@ -73,7 +73,7 @@
2421 * ARMv7 groups of PSR bits
2422 */
2423 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2424-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2425+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2426 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2427 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2428
2429diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2430index 60d3b73..d27ee09 100644
2431--- a/arch/arm/kernel/armksyms.c
2432+++ b/arch/arm/kernel/armksyms.c
2433@@ -89,9 +89,9 @@ EXPORT_SYMBOL(__memzero);
2434 #ifdef CONFIG_MMU
2435 EXPORT_SYMBOL(copy_page);
2436
2437-EXPORT_SYMBOL(__copy_from_user);
2438-EXPORT_SYMBOL(__copy_to_user);
2439-EXPORT_SYMBOL(__clear_user);
2440+EXPORT_SYMBOL(___copy_from_user);
2441+EXPORT_SYMBOL(___copy_to_user);
2442+EXPORT_SYMBOL(___clear_user);
2443
2444 EXPORT_SYMBOL(__get_user_1);
2445 EXPORT_SYMBOL(__get_user_2);
2446diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2447index 0f82098..3dbd3ee 100644
2448--- a/arch/arm/kernel/entry-armv.S
2449+++ b/arch/arm/kernel/entry-armv.S
2450@@ -47,6 +47,87 @@
2451 9997:
2452 .endm
2453
2454+ .macro pax_enter_kernel
2455+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2456+ @ make aligned space for saved DACR
2457+ sub sp, sp, #8
2458+ @ save regs
2459+ stmdb sp!, {r1, r2}
2460+ @ read DACR from cpu_domain into r1
2461+ mov r2, sp
2462+ @ assume 8K pages, since we have to split the immediate in two
2463+ bic r2, r2, #(0x1fc0)
2464+ bic r2, r2, #(0x3f)
2465+ ldr r1, [r2, #TI_CPU_DOMAIN]
2466+ @ store old DACR on stack
2467+ str r1, [sp, #8]
2468+#ifdef CONFIG_PAX_KERNEXEC
2469+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2470+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2471+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2472+#endif
2473+#ifdef CONFIG_PAX_MEMORY_UDEREF
2474+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2475+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2476+#endif
2477+ @ write r1 to current_thread_info()->cpu_domain
2478+ str r1, [r2, #TI_CPU_DOMAIN]
2479+ @ write r1 to DACR
2480+ mcr p15, 0, r1, c3, c0, 0
2481+ @ instruction sync
2482+ instr_sync
2483+ @ restore regs
2484+ ldmia sp!, {r1, r2}
2485+#endif
2486+ .endm
2487+
2488+ .macro pax_open_userland
2489+#ifdef CONFIG_PAX_MEMORY_UDEREF
2490+ @ save regs
2491+ stmdb sp!, {r0, r1}
2492+ @ read DACR from cpu_domain into r1
2493+ mov r0, sp
2494+ @ assume 8K pages, since we have to split the immediate in two
2495+ bic r0, r0, #(0x1fc0)
2496+ bic r0, r0, #(0x3f)
2497+ ldr r1, [r0, #TI_CPU_DOMAIN]
2498+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2499+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2500+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2501+ @ write r1 to current_thread_info()->cpu_domain
2502+ str r1, [r0, #TI_CPU_DOMAIN]
2503+ @ write r1 to DACR
2504+ mcr p15, 0, r1, c3, c0, 0
2505+ @ instruction sync
2506+ instr_sync
2507+ @ restore regs
2508+ ldmia sp!, {r0, r1}
2509+#endif
2510+ .endm
2511+
2512+ .macro pax_close_userland
2513+#ifdef CONFIG_PAX_MEMORY_UDEREF
2514+ @ save regs
2515+ stmdb sp!, {r0, r1}
2516+ @ read DACR from cpu_domain into r1
2517+ mov r0, sp
2518+ @ assume 8K pages, since we have to split the immediate in two
2519+ bic r0, r0, #(0x1fc0)
2520+ bic r0, r0, #(0x3f)
2521+ ldr r1, [r0, #TI_CPU_DOMAIN]
2522+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2523+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2524+ @ write r1 to current_thread_info()->cpu_domain
2525+ str r1, [r0, #TI_CPU_DOMAIN]
2526+ @ write r1 to DACR
2527+ mcr p15, 0, r1, c3, c0, 0
2528+ @ instruction sync
2529+ instr_sync
2530+ @ restore regs
2531+ ldmia sp!, {r0, r1}
2532+#endif
2533+ .endm
2534+
2535 .macro pabt_helper
2536 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2537 #ifdef MULTI_PABORT
2538@@ -89,11 +170,15 @@
2539 * Invalid mode handlers
2540 */
2541 .macro inv_entry, reason
2542+
2543+ pax_enter_kernel
2544+
2545 sub sp, sp, #S_FRAME_SIZE
2546 ARM( stmib sp, {r1 - lr} )
2547 THUMB( stmia sp, {r0 - r12} )
2548 THUMB( str sp, [sp, #S_SP] )
2549 THUMB( str lr, [sp, #S_LR] )
2550+
2551 mov r1, #\reason
2552 .endm
2553
2554@@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
2555 .macro svc_entry, stack_hole=0
2556 UNWIND(.fnstart )
2557 UNWIND(.save {r0 - pc} )
2558+
2559+ pax_enter_kernel
2560+
2561 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2562+
2563 #ifdef CONFIG_THUMB2_KERNEL
2564 SPFIX( str r0, [sp] ) @ temporarily saved
2565 SPFIX( mov r0, sp )
2566@@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
2567 ldmia r0, {r3 - r5}
2568 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2569 mov r6, #-1 @ "" "" "" ""
2570+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2571+ @ offset sp by 8 as done in pax_enter_kernel
2572+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2573+#else
2574 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2575+#endif
2576 SPFIX( addeq r2, r2, #4 )
2577 str r3, [sp, #-4]! @ save the "real" r0 copied
2578 @ from the exception stack
2579@@ -359,6 +453,9 @@ ENDPROC(__pabt_svc)
2580 .macro usr_entry
2581 UNWIND(.fnstart )
2582 UNWIND(.cantunwind ) @ don't unwind the user space
2583+
2584+ pax_enter_kernel_user
2585+
2586 sub sp, sp, #S_FRAME_SIZE
2587 ARM( stmib sp, {r1 - r12} )
2588 THUMB( stmia sp, {r0 - r12} )
2589@@ -456,7 +553,9 @@ __und_usr:
2590 tst r3, #PSR_T_BIT @ Thumb mode?
2591 bne __und_usr_thumb
2592 sub r4, r2, #4 @ ARM instr at LR - 4
2593+ pax_open_userland
2594 1: ldrt r0, [r4]
2595+ pax_close_userland
2596 #ifdef CONFIG_CPU_ENDIAN_BE8
2597 rev r0, r0 @ little endian instruction
2598 #endif
2599@@ -491,10 +590,14 @@ __und_usr_thumb:
2600 */
2601 .arch armv6t2
2602 #endif
2603+ pax_open_userland
2604 2: ldrht r5, [r4]
2605+ pax_close_userland
2606 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2607 blo __und_usr_fault_16 @ 16bit undefined instruction
2608+ pax_open_userland
2609 3: ldrht r0, [r2]
2610+ pax_close_userland
2611 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2612 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2613 orr r0, r0, r5, lsl #16
2614@@ -733,7 +836,7 @@ ENTRY(__switch_to)
2615 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
2616 THUMB( str sp, [ip], #4 )
2617 THUMB( str lr, [ip], #4 )
2618-#ifdef CONFIG_CPU_USE_DOMAINS
2619+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC)
2620 ldr r6, [r2, #TI_CPU_DOMAIN]
2621 #endif
2622 set_tls r3, r4, r5
2623@@ -742,7 +845,7 @@ ENTRY(__switch_to)
2624 ldr r8, =__stack_chk_guard
2625 ldr r7, [r7, #TSK_STACK_CANARY]
2626 #endif
2627-#ifdef CONFIG_CPU_USE_DOMAINS
2628+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC)
2629 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2630 #endif
2631 mov r5, r0
2632diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2633index a6c301e..908821b 100644
2634--- a/arch/arm/kernel/entry-common.S
2635+++ b/arch/arm/kernel/entry-common.S
2636@@ -10,18 +10,46 @@
2637
2638 #include <asm/unistd.h>
2639 #include <asm/ftrace.h>
2640+#include <asm/domain.h>
2641 #include <asm/unwind.h>
2642
2643+#include "entry-header.S"
2644+
2645 #ifdef CONFIG_NEED_RET_TO_USER
2646 #include <mach/entry-macro.S>
2647 #else
2648 .macro arch_ret_to_user, tmp1, tmp2
2649+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2650+ @ save regs
2651+ stmdb sp!, {r1, r2}
2652+ @ read DACR from cpu_domain into r1
2653+ mov r2, sp
2654+ @ assume 8K pages, since we have to split the immediate in two
2655+ bic r2, r2, #(0x1fc0)
2656+ bic r2, r2, #(0x3f)
2657+ ldr r1, [r2, #TI_CPU_DOMAIN]
2658+#ifdef CONFIG_PAX_KERNEXEC
2659+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2660+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2661+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2662+#endif
2663+#ifdef CONFIG_PAX_MEMORY_UDEREF
2664+ @ set current DOMAIN_USER to DOMAIN_UDEREF
2665+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2666+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2667+#endif
2668+ @ write r1 to current_thread_info()->cpu_domain
2669+ str r1, [r2, #TI_CPU_DOMAIN]
2670+ @ write r1 to DACR
2671+ mcr p15, 0, r1, c3, c0, 0
2672+ @ instruction sync
2673+ instr_sync
2674+ @ restore regs
2675+ ldmia sp!, {r1, r2}
2676+#endif
2677 .endm
2678 #endif
2679
2680-#include "entry-header.S"
2681-
2682-
2683 .align 5
2684 /*
2685 * This is the fast syscall return path. We do as little as
2686@@ -339,6 +367,7 @@ ENDPROC(ftrace_stub)
2687
2688 .align 5
2689 ENTRY(vector_swi)
2690+
2691 sub sp, sp, #S_FRAME_SIZE
2692 stmia sp, {r0 - r12} @ Calling r0 - r12
2693 ARM( add r8, sp, #S_PC )
2694@@ -388,6 +417,12 @@ ENTRY(vector_swi)
2695 ldr scno, [lr, #-4] @ get SWI instruction
2696 #endif
2697
2698+ /*
2699+ * do this here to avoid a performance hit of wrapping the code above
2700+ * that directly dereferences userland to parse the SWI instruction
2701+ */
2702+ pax_enter_kernel_user
2703+
2704 #ifdef CONFIG_ALIGNMENT_TRAP
2705 ldr ip, __cr_alignment
2706 ldr ip, [ip]
2707diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2708index 9a8531e..812e287 100644
2709--- a/arch/arm/kernel/entry-header.S
2710+++ b/arch/arm/kernel/entry-header.S
2711@@ -73,9 +73,66 @@
2712 msr cpsr_c, \rtemp @ switch back to the SVC mode
2713 .endm
2714
2715+ .macro pax_enter_kernel_user
2716+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2717+ @ save regs
2718+ stmdb sp!, {r0, r1}
2719+ @ read DACR from cpu_domain into r1
2720+ mov r0, sp
2721+ @ assume 8K pages, since we have to split the immediate in two
2722+ bic r0, r0, #(0x1fc0)
2723+ bic r0, r0, #(0x3f)
2724+ ldr r1, [r0, #TI_CPU_DOMAIN]
2725+#ifdef CONFIG_PAX_MEMORY_UDEREF
2726+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2727+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2728+#endif
2729+#ifdef CONFIG_PAX_KERNEXEC
2730+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2731+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2732+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2733+#endif
2734+ @ write r1 to current_thread_info()->cpu_domain
2735+ str r1, [r0, #TI_CPU_DOMAIN]
2736+ @ write r1 to DACR
2737+ mcr p15, 0, r1, c3, c0, 0
2738+ @ instruction sync
2739+ instr_sync
2740+ @ restore regs
2741+ ldmia sp!, {r0, r1}
2742+#endif
2743+ .endm
2744+
2745+ .macro pax_exit_kernel
2746+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2747+ @ save regs
2748+ stmdb sp!, {r0, r1}
2749+ @ read old DACR from stack into r1
2750+ ldr r1, [sp, #(8 + S_SP)]
2751+ sub r1, r1, #8
2752+ ldr r1, [r1]
2753+
2754+ @ write r1 to current_thread_info()->cpu_domain
2755+ mov r0, sp
2756+ @ assume 8K pages, since we have to split the immediate in two
2757+ bic r0, r0, #(0x1fc0)
2758+ bic r0, r0, #(0x3f)
2759+ str r1, [r0, #TI_CPU_DOMAIN]
2760+ @ write r1 to DACR
2761+ mcr p15, 0, r1, c3, c0, 0
2762+ @ instruction sync
2763+ instr_sync
2764+ @ restore regs
2765+ ldmia sp!, {r0, r1}
2766+#endif
2767+ .endm
2768+
2769 #ifndef CONFIG_THUMB2_KERNEL
2770 .macro svc_exit, rpsr
2771 msr spsr_cxsf, \rpsr
2772+
2773+ pax_exit_kernel
2774+
2775 #if defined(CONFIG_CPU_V6)
2776 ldr r0, [sp]
2777 strex r1, r2, [sp] @ clear the exclusive monitor
2778@@ -121,6 +178,9 @@
2779 .endm
2780 #else /* CONFIG_THUMB2_KERNEL */
2781 .macro svc_exit, rpsr
2782+
2783+ pax_exit_kernel
2784+
2785 ldr lr, [sp, #S_SP] @ top of the stack
2786 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2787 clrex @ clear the exclusive monitor
2788diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2789index 2adda11..7fbe958 100644
2790--- a/arch/arm/kernel/fiq.c
2791+++ b/arch/arm/kernel/fiq.c
2792@@ -82,7 +82,9 @@ void set_fiq_handler(void *start, unsigned int length)
2793 #if defined(CONFIG_CPU_USE_DOMAINS)
2794 memcpy((void *)0xffff001c, start, length);
2795 #else
2796+ pax_open_kernel();
2797 memcpy(vectors_page + 0x1c, start, length);
2798+ pax_close_kernel();
2799 #endif
2800 flush_icache_range(0xffff001c, 0xffff001c + length);
2801 if (!vectors_high())
2802diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2803index e0eb9a1..caee108 100644
2804--- a/arch/arm/kernel/head.S
2805+++ b/arch/arm/kernel/head.S
2806@@ -52,7 +52,9 @@
2807 .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
2808
2809 .macro pgtbl, rd, phys
2810- add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE
2811+ mov \rd, #TEXT_OFFSET
2812+ sub \rd, #PG_DIR_SIZE
2813+ add \rd, \rd, \phys
2814 .endm
2815
2816 /*
2817@@ -267,7 +269,7 @@ __create_page_tables:
2818 addne r6, r6, #1 << SECTION_SHIFT
2819 strne r6, [r3]
2820
2821-#if defined(CONFIG_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)
2822+#if defined(CONFIG_ARM_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)
2823 sub r4, r4, #4 @ Fixup page table pointer
2824 @ for 64-bit descriptors
2825 #endif
2826@@ -434,7 +436,7 @@ __enable_mmu:
2827 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2828 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2829 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2830- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2831+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2832 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2833 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2834 #endif
2835diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
2836index 5ff2e77..556d030 100644
2837--- a/arch/arm/kernel/hw_breakpoint.c
2838+++ b/arch/arm/kernel/hw_breakpoint.c
2839@@ -1011,7 +1011,7 @@ static int __cpuinit dbg_reset_notify(struct notifier_block *self,
2840 return NOTIFY_OK;
2841 }
2842
2843-static struct notifier_block __cpuinitdata dbg_reset_nb = {
2844+static struct notifier_block dbg_reset_nb = {
2845 .notifier_call = dbg_reset_notify,
2846 };
2847
2848diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2849index 1e9be5d..03edbc2 100644
2850--- a/arch/arm/kernel/module.c
2851+++ b/arch/arm/kernel/module.c
2852@@ -37,12 +37,37 @@
2853 #endif
2854
2855 #ifdef CONFIG_MMU
2856-void *module_alloc(unsigned long size)
2857+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2858 {
2859+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2860+ return NULL;
2861 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2862- GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
2863+ GFP_KERNEL, prot, -1,
2864 __builtin_return_address(0));
2865 }
2866+
2867+void *module_alloc(unsigned long size)
2868+{
2869+
2870+#ifdef CONFIG_PAX_KERNEXEC
2871+ return __module_alloc(size, PAGE_KERNEL);
2872+#else
2873+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2874+#endif
2875+
2876+}
2877+
2878+#ifdef CONFIG_PAX_KERNEXEC
2879+void module_free_exec(struct module *mod, void *module_region)
2880+{
2881+ module_free(mod, module_region);
2882+}
2883+
2884+void *module_alloc_exec(unsigned long size)
2885+{
2886+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2887+}
2888+#endif
2889 #endif
2890
2891 int
2892diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2893index 07314af..c46655c 100644
2894--- a/arch/arm/kernel/patch.c
2895+++ b/arch/arm/kernel/patch.c
2896@@ -18,6 +18,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2897 bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
2898 int size;
2899
2900+ pax_open_kernel();
2901 if (thumb2 && __opcode_is_thumb16(insn)) {
2902 *(u16 *)addr = __opcode_to_mem_thumb16(insn);
2903 size = sizeof(u16);
2904@@ -39,6 +40,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2905 *(u32 *)addr = insn;
2906 size = sizeof(u32);
2907 }
2908+ pax_close_kernel();
2909
2910 flush_icache_range((uintptr_t)(addr),
2911 (uintptr_t)(addr) + size);
2912diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
2913index 5f66206..dce492f 100644
2914--- a/arch/arm/kernel/perf_event_cpu.c
2915+++ b/arch/arm/kernel/perf_event_cpu.c
2916@@ -171,7 +171,7 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
2917 return NOTIFY_OK;
2918 }
2919
2920-static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = {
2921+static struct notifier_block cpu_pmu_hotplug_notifier = {
2922 .notifier_call = cpu_pmu_notify,
2923 };
2924
2925diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2926index c6dec5f..e0fddd1 100644
2927--- a/arch/arm/kernel/process.c
2928+++ b/arch/arm/kernel/process.c
2929@@ -28,7 +28,6 @@
2930 #include <linux/tick.h>
2931 #include <linux/utsname.h>
2932 #include <linux/uaccess.h>
2933-#include <linux/random.h>
2934 #include <linux/hw_breakpoint.h>
2935 #include <linux/cpuidle.h>
2936 #include <linux/leds.h>
2937@@ -256,9 +255,10 @@ void machine_power_off(void)
2938 machine_shutdown();
2939 if (pm_power_off)
2940 pm_power_off();
2941+ BUG();
2942 }
2943
2944-void machine_restart(char *cmd)
2945+__noreturn void machine_restart(char *cmd)
2946 {
2947 machine_shutdown();
2948
2949@@ -283,8 +283,8 @@ void __show_regs(struct pt_regs *regs)
2950 init_utsname()->release,
2951 (int)strcspn(init_utsname()->version, " "),
2952 init_utsname()->version);
2953- print_symbol("PC is at %s\n", instruction_pointer(regs));
2954- print_symbol("LR is at %s\n", regs->ARM_lr);
2955+ printk("PC is at %pA\n", instruction_pointer(regs));
2956+ printk("LR is at %pA\n", regs->ARM_lr);
2957 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2958 "sp : %08lx ip : %08lx fp : %08lx\n",
2959 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2960@@ -452,12 +452,6 @@ unsigned long get_wchan(struct task_struct *p)
2961 return 0;
2962 }
2963
2964-unsigned long arch_randomize_brk(struct mm_struct *mm)
2965-{
2966- unsigned long range_end = mm->brk + 0x02000000;
2967- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2968-}
2969-
2970 #ifdef CONFIG_MMU
2971 /*
2972 * The vectors page is always readable from user space for the
2973@@ -470,9 +464,8 @@ static int __init gate_vma_init(void)
2974 {
2975 gate_vma.vm_start = 0xffff0000;
2976 gate_vma.vm_end = 0xffff0000 + PAGE_SIZE;
2977- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
2978- gate_vma.vm_flags = VM_READ | VM_EXEC |
2979- VM_MAYREAD | VM_MAYEXEC;
2980+ gate_vma.vm_flags = VM_NONE;
2981+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
2982 return 0;
2983 }
2984 arch_initcall(gate_vma_init);
2985diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
2986index 03deeff..741ce88 100644
2987--- a/arch/arm/kernel/ptrace.c
2988+++ b/arch/arm/kernel/ptrace.c
2989@@ -937,10 +937,19 @@ static int tracehook_report_syscall(struct pt_regs *regs,
2990 return current_thread_info()->syscall;
2991 }
2992
2993+#ifdef CONFIG_GRKERNSEC_SETXID
2994+extern void gr_delayed_cred_worker(void);
2995+#endif
2996+
2997 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
2998 {
2999 current_thread_info()->syscall = scno;
3000
3001+#ifdef CONFIG_GRKERNSEC_SETXID
3002+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3003+ gr_delayed_cred_worker();
3004+#endif
3005+
3006 /* Do the secure computing check first; failures should be fast. */
3007 if (secure_computing(scno) == -1)
3008 return -1;
3009diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
3010index 3f6cbb2..39305c7 100644
3011--- a/arch/arm/kernel/setup.c
3012+++ b/arch/arm/kernel/setup.c
3013@@ -97,21 +97,23 @@ EXPORT_SYMBOL(system_serial_high);
3014 unsigned int elf_hwcap __read_mostly;
3015 EXPORT_SYMBOL(elf_hwcap);
3016
3017+pteval_t __supported_pte_mask __read_only;
3018+pmdval_t __supported_pmd_mask __read_only;
3019
3020 #ifdef MULTI_CPU
3021-struct processor processor __read_mostly;
3022+struct processor processor;
3023 #endif
3024 #ifdef MULTI_TLB
3025-struct cpu_tlb_fns cpu_tlb __read_mostly;
3026+struct cpu_tlb_fns cpu_tlb __read_only;
3027 #endif
3028 #ifdef MULTI_USER
3029-struct cpu_user_fns cpu_user __read_mostly;
3030+struct cpu_user_fns cpu_user __read_only;
3031 #endif
3032 #ifdef MULTI_CACHE
3033-struct cpu_cache_fns cpu_cache __read_mostly;
3034+struct cpu_cache_fns cpu_cache __read_only;
3035 #endif
3036 #ifdef CONFIG_OUTER_CACHE
3037-struct outer_cache_fns outer_cache __read_mostly;
3038+struct outer_cache_fns outer_cache __read_only;
3039 EXPORT_SYMBOL(outer_cache);
3040 #endif
3041
3042@@ -236,9 +238,13 @@ static int __get_cpu_architecture(void)
3043 asm("mrc p15, 0, %0, c0, c1, 4"
3044 : "=r" (mmfr0));
3045 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
3046- (mmfr0 & 0x000000f0) >= 0x00000030)
3047+ (mmfr0 & 0x000000f0) >= 0x00000030) {
3048 cpu_arch = CPU_ARCH_ARMv7;
3049- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3050+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
3051+ __supported_pte_mask |= L_PTE_PXN;
3052+ __supported_pmd_mask |= PMD_PXNTABLE;
3053+ }
3054+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3055 (mmfr0 & 0x000000f0) == 0x00000020)
3056 cpu_arch = CPU_ARCH_ARMv6;
3057 else
3058@@ -462,7 +468,7 @@ static void __init setup_processor(void)
3059 __cpu_architecture = __get_cpu_architecture();
3060
3061 #ifdef MULTI_CPU
3062- processor = *list->proc;
3063+ memcpy((void *)&processor, list->proc, sizeof processor);
3064 #endif
3065 #ifdef MULTI_TLB
3066 cpu_tlb = *list->tlb;
3067@@ -524,7 +530,7 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
3068 size -= start & ~PAGE_MASK;
3069 bank->start = PAGE_ALIGN(start);
3070
3071-#ifndef CONFIG_LPAE
3072+#ifndef CONFIG_ARM_LPAE
3073 if (bank->start + size < bank->start) {
3074 printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
3075 "32-bit physical address space\n", (long long)start);
3076diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3077index 56f72d2..6924200 100644
3078--- a/arch/arm/kernel/signal.c
3079+++ b/arch/arm/kernel/signal.c
3080@@ -433,22 +433,14 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka,
3081 __put_user(sigreturn_codes[idx+1], rc+1))
3082 return 1;
3083
3084- if (cpsr & MODE32_BIT) {
3085- /*
3086- * 32-bit code can use the new high-page
3087- * signal return code support.
3088- */
3089- retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb;
3090- } else {
3091- /*
3092- * Ensure that the instruction cache sees
3093- * the return code written onto the stack.
3094- */
3095- flush_icache_range((unsigned long)rc,
3096- (unsigned long)(rc + 2));
3097+ /*
3098+ * Ensure that the instruction cache sees
3099+ * the return code written onto the stack.
3100+ */
3101+ flush_icache_range((unsigned long)rc,
3102+ (unsigned long)(rc + 2));
3103
3104- retcode = ((unsigned long)rc) + thumb;
3105- }
3106+ retcode = ((unsigned long)rc) + thumb;
3107 }
3108
3109 regs->ARM_r0 = usig;
3110diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3111index 58af91c..343ce99 100644
3112--- a/arch/arm/kernel/smp.c
3113+++ b/arch/arm/kernel/smp.c
3114@@ -70,7 +70,7 @@ enum ipi_msg_type {
3115
3116 static DECLARE_COMPLETION(cpu_running);
3117
3118-static struct smp_operations smp_ops;
3119+static struct smp_operations smp_ops __read_only;
3120
3121 void __init smp_set_ops(struct smp_operations *ops)
3122 {
3123diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
3124index 02c5d2c..e5695ad 100644
3125--- a/arch/arm/kernel/smp_tlb.c
3126+++ b/arch/arm/kernel/smp_tlb.c
3127@@ -12,6 +12,7 @@
3128
3129 #include <asm/smp_plat.h>
3130 #include <asm/tlbflush.h>
3131+#include <asm/mmu_context.h>
3132
3133 /**********************************************************************/
3134
3135@@ -64,12 +65,72 @@ static inline void ipi_flush_tlb_kernel_range(void *arg)
3136 local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
3137 }
3138
3139+#ifdef CONFIG_ARM_ERRATA_798181
3140+static int erratum_a15_798181(void)
3141+{
3142+ unsigned int midr = read_cpuid_id();
3143+
3144+ /* Cortex-A15 r0p0..r3p2 affected */
3145+ if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2)
3146+ return 0;
3147+ return 1;
3148+}
3149+#else
3150+static int erratum_a15_798181(void)
3151+{
3152+ return 0;
3153+}
3154+#endif
3155+
3156+static void ipi_flush_tlb_a15_erratum(void *arg)
3157+{
3158+ dmb();
3159+}
3160+
3161+static void broadcast_tlb_a15_erratum(void)
3162+{
3163+ if (!erratum_a15_798181())
3164+ return;
3165+
3166+ dummy_flush_tlb_a15_erratum();
3167+ smp_call_function_many(cpu_online_mask, ipi_flush_tlb_a15_erratum,
3168+ NULL, 1);
3169+}
3170+
3171+static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
3172+{
3173+ int cpu;
3174+ cpumask_t mask = { CPU_BITS_NONE };
3175+
3176+ if (!erratum_a15_798181())
3177+ return;
3178+
3179+ dummy_flush_tlb_a15_erratum();
3180+ for_each_online_cpu(cpu) {
3181+ if (cpu == smp_processor_id())
3182+ continue;
3183+ /*
3184+ * We only need to send an IPI if the other CPUs are running
3185+ * the same ASID as the one being invalidated. There is no
3186+ * need for locking around the active_asids check since the
3187+ * switch_mm() function has at least one dmb() (as required by
3188+ * this workaround) in case a context switch happens on
3189+ * another CPU after the condition below.
3190+ */
3191+ if (atomic64_read(&mm->context.id) ==
3192+ atomic64_read(&per_cpu(active_asids, cpu)))
3193+ cpumask_set_cpu(cpu, &mask);
3194+ }
3195+ smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
3196+}
3197+
3198 void flush_tlb_all(void)
3199 {
3200 if (tlb_ops_need_broadcast())
3201 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
3202 else
3203 local_flush_tlb_all();
3204+ broadcast_tlb_a15_erratum();
3205 }
3206
3207 void flush_tlb_mm(struct mm_struct *mm)
3208@@ -78,6 +139,7 @@ void flush_tlb_mm(struct mm_struct *mm)
3209 on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1);
3210 else
3211 local_flush_tlb_mm(mm);
3212+ broadcast_tlb_mm_a15_erratum(mm);
3213 }
3214
3215 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
3216@@ -90,6 +152,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
3217 &ta, 1);
3218 } else
3219 local_flush_tlb_page(vma, uaddr);
3220+ broadcast_tlb_mm_a15_erratum(vma->vm_mm);
3221 }
3222
3223 void flush_tlb_kernel_page(unsigned long kaddr)
3224@@ -100,6 +163,7 @@ void flush_tlb_kernel_page(unsigned long kaddr)
3225 on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
3226 } else
3227 local_flush_tlb_kernel_page(kaddr);
3228+ broadcast_tlb_a15_erratum();
3229 }
3230
3231 void flush_tlb_range(struct vm_area_struct *vma,
3232@@ -114,6 +178,7 @@ void flush_tlb_range(struct vm_area_struct *vma,
3233 &ta, 1);
3234 } else
3235 local_flush_tlb_range(vma, start, end);
3236+ broadcast_tlb_mm_a15_erratum(vma->vm_mm);
3237 }
3238
3239 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
3240@@ -125,5 +190,6 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
3241 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
3242 } else
3243 local_flush_tlb_kernel_range(start, end);
3244+ broadcast_tlb_a15_erratum();
3245 }
3246
3247diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3248index b0179b8..829510e 100644
3249--- a/arch/arm/kernel/traps.c
3250+++ b/arch/arm/kernel/traps.c
3251@@ -57,7 +57,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3252 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3253 {
3254 #ifdef CONFIG_KALLSYMS
3255- printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3256+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3257 #else
3258 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3259 #endif
3260@@ -266,6 +266,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3261 static int die_owner = -1;
3262 static unsigned int die_nest_count;
3263
3264+extern void gr_handle_kernel_exploit(void);
3265+
3266 static unsigned long oops_begin(void)
3267 {
3268 int cpu;
3269@@ -308,6 +310,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3270 panic("Fatal exception in interrupt");
3271 if (panic_on_oops)
3272 panic("Fatal exception");
3273+
3274+ gr_handle_kernel_exploit();
3275+
3276 if (signr)
3277 do_exit(signr);
3278 }
3279@@ -601,7 +606,9 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
3280 * The user helper at 0xffff0fe0 must be used instead.
3281 * (see entry-armv.S for details)
3282 */
3283+ pax_open_kernel();
3284 *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
3285+ pax_close_kernel();
3286 }
3287 return 0;
3288
3289@@ -841,13 +848,10 @@ void __init early_trap_init(void *vectors_base)
3290 */
3291 kuser_get_tls_init(vectors);
3292
3293- /*
3294- * Copy signal return handlers into the vector page, and
3295- * set sigreturn to be a pointer to these.
3296- */
3297- memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
3298- sigreturn_codes, sizeof(sigreturn_codes));
3299-
3300 flush_icache_range(vectors, vectors + PAGE_SIZE);
3301- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3302+
3303+#ifndef CONFIG_PAX_MEMORY_UDEREF
3304+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3305+#endif
3306+
3307 }
3308diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3309index 11c1785..c67d54c 100644
3310--- a/arch/arm/kernel/vmlinux.lds.S
3311+++ b/arch/arm/kernel/vmlinux.lds.S
3312@@ -8,7 +8,11 @@
3313 #include <asm/thread_info.h>
3314 #include <asm/memory.h>
3315 #include <asm/page.h>
3316-
3317+
3318+#ifdef CONFIG_PAX_KERNEXEC
3319+#include <asm/pgtable.h>
3320+#endif
3321+
3322 #define PROC_INFO \
3323 . = ALIGN(4); \
3324 VMLINUX_SYMBOL(__proc_info_begin) = .; \
3325@@ -90,6 +94,11 @@ SECTIONS
3326 _text = .;
3327 HEAD_TEXT
3328 }
3329+
3330+#ifdef CONFIG_PAX_KERNEXEC
3331+ . = ALIGN(1<<SECTION_SHIFT);
3332+#endif
3333+
3334 .text : { /* Real text segment */
3335 _stext = .; /* Text and read-only data */
3336 __exception_text_start = .;
3337@@ -144,6 +153,10 @@ SECTIONS
3338
3339 _etext = .; /* End of text and rodata section */
3340
3341+#ifdef CONFIG_PAX_KERNEXEC
3342+ . = ALIGN(1<<SECTION_SHIFT);
3343+#endif
3344+
3345 #ifndef CONFIG_XIP_KERNEL
3346 . = ALIGN(PAGE_SIZE);
3347 __init_begin = .;
3348@@ -203,6 +216,11 @@ SECTIONS
3349 . = PAGE_OFFSET + TEXT_OFFSET;
3350 #else
3351 __init_end = .;
3352+
3353+#ifdef CONFIG_PAX_KERNEXEC
3354+ . = ALIGN(1<<SECTION_SHIFT);
3355+#endif
3356+
3357 . = ALIGN(THREAD_SIZE);
3358 __data_loc = .;
3359 #endif
3360diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3361index 14a0d98..7771a7d 100644
3362--- a/arch/arm/lib/clear_user.S
3363+++ b/arch/arm/lib/clear_user.S
3364@@ -12,14 +12,14 @@
3365
3366 .text
3367
3368-/* Prototype: int __clear_user(void *addr, size_t sz)
3369+/* Prototype: int ___clear_user(void *addr, size_t sz)
3370 * Purpose : clear some user memory
3371 * Params : addr - user memory address to clear
3372 * : sz - number of bytes to clear
3373 * Returns : number of bytes NOT cleared
3374 */
3375 ENTRY(__clear_user_std)
3376-WEAK(__clear_user)
3377+WEAK(___clear_user)
3378 stmfd sp!, {r1, lr}
3379 mov r2, #0
3380 cmp r1, #4
3381@@ -44,7 +44,7 @@ WEAK(__clear_user)
3382 USER( strnebt r2, [r0])
3383 mov r0, #0
3384 ldmfd sp!, {r1, pc}
3385-ENDPROC(__clear_user)
3386+ENDPROC(___clear_user)
3387 ENDPROC(__clear_user_std)
3388
3389 .pushsection .fixup,"ax"
3390diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3391index 66a477a..bee61d3 100644
3392--- a/arch/arm/lib/copy_from_user.S
3393+++ b/arch/arm/lib/copy_from_user.S
3394@@ -16,7 +16,7 @@
3395 /*
3396 * Prototype:
3397 *
3398- * size_t __copy_from_user(void *to, const void *from, size_t n)
3399+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3400 *
3401 * Purpose:
3402 *
3403@@ -84,11 +84,11 @@
3404
3405 .text
3406
3407-ENTRY(__copy_from_user)
3408+ENTRY(___copy_from_user)
3409
3410 #include "copy_template.S"
3411
3412-ENDPROC(__copy_from_user)
3413+ENDPROC(___copy_from_user)
3414
3415 .pushsection .fixup,"ax"
3416 .align 0
3417diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3418index 6ee2f67..d1cce76 100644
3419--- a/arch/arm/lib/copy_page.S
3420+++ b/arch/arm/lib/copy_page.S
3421@@ -10,6 +10,7 @@
3422 * ASM optimised string functions
3423 */
3424 #include <linux/linkage.h>
3425+#include <linux/const.h>
3426 #include <asm/assembler.h>
3427 #include <asm/asm-offsets.h>
3428 #include <asm/cache.h>
3429diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3430index d066df6..df28194 100644
3431--- a/arch/arm/lib/copy_to_user.S
3432+++ b/arch/arm/lib/copy_to_user.S
3433@@ -16,7 +16,7 @@
3434 /*
3435 * Prototype:
3436 *
3437- * size_t __copy_to_user(void *to, const void *from, size_t n)
3438+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3439 *
3440 * Purpose:
3441 *
3442@@ -88,11 +88,11 @@
3443 .text
3444
3445 ENTRY(__copy_to_user_std)
3446-WEAK(__copy_to_user)
3447+WEAK(___copy_to_user)
3448
3449 #include "copy_template.S"
3450
3451-ENDPROC(__copy_to_user)
3452+ENDPROC(___copy_to_user)
3453 ENDPROC(__copy_to_user_std)
3454
3455 .pushsection .fixup,"ax"
3456diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3457index 7d08b43..f7ca7ea 100644
3458--- a/arch/arm/lib/csumpartialcopyuser.S
3459+++ b/arch/arm/lib/csumpartialcopyuser.S
3460@@ -57,8 +57,8 @@
3461 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3462 */
3463
3464-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3465-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3466+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3467+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3468
3469 #include "csumpartialcopygeneric.S"
3470
3471diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3472index 6b93f6a..1aa92d0 100644
3473--- a/arch/arm/lib/delay.c
3474+++ b/arch/arm/lib/delay.c
3475@@ -28,12 +28,15 @@
3476 /*
3477 * Default to the loop-based delay implementation.
3478 */
3479-struct arm_delay_ops arm_delay_ops = {
3480+static struct arm_delay_ops arm_loop_delay_ops = {
3481 .delay = __loop_delay,
3482 .const_udelay = __loop_const_udelay,
3483 .udelay = __loop_udelay,
3484+ .const_clock = false,
3485 };
3486
3487+struct arm_delay_ops *arm_delay_ops __read_only = &arm_loop_delay_ops;
3488+
3489 static const struct delay_timer *delay_timer;
3490 static bool delay_calibrated;
3491
3492@@ -67,6 +70,13 @@ static void __timer_udelay(unsigned long usecs)
3493 __timer_const_udelay(usecs * UDELAY_MULT);
3494 }
3495
3496+static struct arm_delay_ops arm_timer_delay_ops = {
3497+ .delay = __timer_delay,
3498+ .const_udelay = __timer_const_udelay,
3499+ .udelay = __timer_udelay,
3500+ .const_clock = true,
3501+};
3502+
3503 void __init register_current_timer_delay(const struct delay_timer *timer)
3504 {
3505 if (!delay_calibrated) {
3506@@ -74,10 +84,7 @@ void __init register_current_timer_delay(const struct delay_timer *timer)
3507 delay_timer = timer;
3508 lpj_fine = timer->freq / HZ;
3509 loops_per_jiffy = lpj_fine;
3510- arm_delay_ops.delay = __timer_delay;
3511- arm_delay_ops.const_udelay = __timer_const_udelay;
3512- arm_delay_ops.udelay = __timer_udelay;
3513- arm_delay_ops.const_clock = true;
3514+ arm_delay_ops = &arm_timer_delay_ops;
3515 delay_calibrated = true;
3516 } else {
3517 pr_info("Ignoring duplicate/late registration of read_current_timer delay\n");
3518diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3519index 025f742..8432b08 100644
3520--- a/arch/arm/lib/uaccess_with_memcpy.c
3521+++ b/arch/arm/lib/uaccess_with_memcpy.c
3522@@ -104,7 +104,7 @@ out:
3523 }
3524
3525 unsigned long
3526-__copy_to_user(void __user *to, const void *from, unsigned long n)
3527+___copy_to_user(void __user *to, const void *from, unsigned long n)
3528 {
3529 /*
3530 * This test is stubbed out of the main function above to keep
3531diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
3532index bac21a5..b67ef8e 100644
3533--- a/arch/arm/mach-kirkwood/common.c
3534+++ b/arch/arm/mach-kirkwood/common.c
3535@@ -150,7 +150,16 @@ static void clk_gate_fn_disable(struct clk_hw *hw)
3536 clk_gate_ops.disable(hw);
3537 }
3538
3539-static struct clk_ops clk_gate_fn_ops;
3540+static int clk_gate_fn_is_enabled(struct clk_hw *hw)
3541+{
3542+ return clk_gate_ops.is_enabled(hw);
3543+}
3544+
3545+static struct clk_ops clk_gate_fn_ops = {
3546+ .enable = clk_gate_fn_enable,
3547+ .disable = clk_gate_fn_disable,
3548+ .is_enabled = clk_gate_fn_is_enabled,
3549+};
3550
3551 static struct clk __init *clk_register_gate_fn(struct device *dev,
3552 const char *name,
3553@@ -184,14 +193,6 @@ static struct clk __init *clk_register_gate_fn(struct device *dev,
3554 gate_fn->fn_en = fn_en;
3555 gate_fn->fn_dis = fn_dis;
3556
3557- /* ops is the gate ops, but with our enable/disable functions */
3558- if (clk_gate_fn_ops.enable != clk_gate_fn_enable ||
3559- clk_gate_fn_ops.disable != clk_gate_fn_disable) {
3560- clk_gate_fn_ops = clk_gate_ops;
3561- clk_gate_fn_ops.enable = clk_gate_fn_enable;
3562- clk_gate_fn_ops.disable = clk_gate_fn_disable;
3563- }
3564-
3565 clk = clk_register(dev, &gate_fn->gate.hw);
3566
3567 if (IS_ERR(clk))
3568diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3569index 0abb30f..54064da 100644
3570--- a/arch/arm/mach-omap2/board-n8x0.c
3571+++ b/arch/arm/mach-omap2/board-n8x0.c
3572@@ -631,7 +631,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3573 }
3574 #endif
3575
3576-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3577+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3578 .late_init = n8x0_menelaus_late_init,
3579 };
3580
3581diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
3582index 8033cb7..2f7cb62 100644
3583--- a/arch/arm/mach-omap2/gpmc.c
3584+++ b/arch/arm/mach-omap2/gpmc.c
3585@@ -139,7 +139,6 @@ struct omap3_gpmc_regs {
3586 };
3587
3588 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
3589-static struct irq_chip gpmc_irq_chip;
3590 static unsigned gpmc_irq_start;
3591
3592 static struct resource gpmc_mem_root;
3593@@ -700,6 +699,18 @@ static void gpmc_irq_noop(struct irq_data *data) { }
3594
3595 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
3596
3597+static struct irq_chip gpmc_irq_chip = {
3598+ .name = "gpmc",
3599+ .irq_startup = gpmc_irq_noop_ret,
3600+ .irq_enable = gpmc_irq_enable,
3601+ .irq_disable = gpmc_irq_disable,
3602+ .irq_shutdown = gpmc_irq_noop,
3603+ .irq_ack = gpmc_irq_noop,
3604+ .irq_mask = gpmc_irq_noop,
3605+ .irq_unmask = gpmc_irq_noop,
3606+
3607+};
3608+
3609 static int gpmc_setup_irq(void)
3610 {
3611 int i;
3612@@ -714,15 +725,6 @@ static int gpmc_setup_irq(void)
3613 return gpmc_irq_start;
3614 }
3615
3616- gpmc_irq_chip.name = "gpmc";
3617- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
3618- gpmc_irq_chip.irq_enable = gpmc_irq_enable;
3619- gpmc_irq_chip.irq_disable = gpmc_irq_disable;
3620- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
3621- gpmc_irq_chip.irq_ack = gpmc_irq_noop;
3622- gpmc_irq_chip.irq_mask = gpmc_irq_noop;
3623- gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
3624-
3625 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
3626 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
3627
3628diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3629index 5d3b4f4..ddba3c0 100644
3630--- a/arch/arm/mach-omap2/omap-wakeupgen.c
3631+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3632@@ -340,7 +340,7 @@ static int __cpuinit irq_cpu_hotplug_notify(struct notifier_block *self,
3633 return NOTIFY_OK;
3634 }
3635
3636-static struct notifier_block __refdata irq_hotplug_notifier = {
3637+static struct notifier_block irq_hotplug_notifier = {
3638 .notifier_call = irq_cpu_hotplug_notify,
3639 };
3640
3641diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3642index e065daa..7b1ad9b 100644
3643--- a/arch/arm/mach-omap2/omap_device.c
3644+++ b/arch/arm/mach-omap2/omap_device.c
3645@@ -686,7 +686,7 @@ void omap_device_delete(struct omap_device *od)
3646 * passes along the return value of omap_device_build_ss().
3647 */
3648 struct platform_device __init *omap_device_build(const char *pdev_name, int pdev_id,
3649- struct omap_hwmod *oh, void *pdata,
3650+ struct omap_hwmod *oh, const void *pdata,
3651 int pdata_len,
3652 struct omap_device_pm_latency *pm_lats,
3653 int pm_lats_cnt, int is_early_device)
3654@@ -720,7 +720,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name, int pdev
3655 */
3656 struct platform_device __init *omap_device_build_ss(const char *pdev_name, int pdev_id,
3657 struct omap_hwmod **ohs, int oh_cnt,
3658- void *pdata, int pdata_len,
3659+ const void *pdata, int pdata_len,
3660 struct omap_device_pm_latency *pm_lats,
3661 int pm_lats_cnt, int is_early_device)
3662 {
3663diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
3664index 0933c59..42b8e2d 100644
3665--- a/arch/arm/mach-omap2/omap_device.h
3666+++ b/arch/arm/mach-omap2/omap_device.h
3667@@ -91,14 +91,14 @@ int omap_device_shutdown(struct platform_device *pdev);
3668 /* Core code interface */
3669
3670 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
3671- struct omap_hwmod *oh, void *pdata,
3672+ struct omap_hwmod *oh, const void *pdata,
3673 int pdata_len,
3674 struct omap_device_pm_latency *pm_lats,
3675 int pm_lats_cnt, int is_early_device);
3676
3677 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
3678 struct omap_hwmod **oh, int oh_cnt,
3679- void *pdata, int pdata_len,
3680+ const void *pdata, int pdata_len,
3681 struct omap_device_pm_latency *pm_lats,
3682 int pm_lats_cnt, int is_early_device);
3683
3684diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3685index 4653efb..8c60bf7 100644
3686--- a/arch/arm/mach-omap2/omap_hwmod.c
3687+++ b/arch/arm/mach-omap2/omap_hwmod.c
3688@@ -189,10 +189,10 @@ struct omap_hwmod_soc_ops {
3689 int (*init_clkdm)(struct omap_hwmod *oh);
3690 void (*update_context_lost)(struct omap_hwmod *oh);
3691 int (*get_context_lost)(struct omap_hwmod *oh);
3692-};
3693+} __no_const;
3694
3695 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3696-static struct omap_hwmod_soc_ops soc_ops;
3697+static struct omap_hwmod_soc_ops soc_ops __read_only;
3698
3699 /* omap_hwmod_list contains all registered struct omap_hwmods */
3700 static LIST_HEAD(omap_hwmod_list);
3701diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3702index 7c2b4ed..b2ea51f 100644
3703--- a/arch/arm/mach-omap2/wd_timer.c
3704+++ b/arch/arm/mach-omap2/wd_timer.c
3705@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3706 struct omap_hwmod *oh;
3707 char *oh_name = "wd_timer2";
3708 char *dev_name = "omap_wdt";
3709- struct omap_wd_timer_platform_data pdata;
3710+ static struct omap_wd_timer_platform_data pdata = {
3711+ .read_reset_sources = prm_read_reset_sources
3712+ };
3713
3714 if (!cpu_class_is_omap2() || of_have_populated_dt())
3715 return 0;
3716@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3717 return -EINVAL;
3718 }
3719
3720- pdata.read_reset_sources = prm_read_reset_sources;
3721-
3722 pdev = omap_device_build(dev_name, id, oh, &pdata,
3723 sizeof(struct omap_wd_timer_platform_data),
3724 NULL, 0, 0);
3725diff --git a/arch/arm/mach-ux500/include/mach/setup.h b/arch/arm/mach-ux500/include/mach/setup.h
3726index 6be4c4d..32ac32a 100644
3727--- a/arch/arm/mach-ux500/include/mach/setup.h
3728+++ b/arch/arm/mach-ux500/include/mach/setup.h
3729@@ -38,13 +38,6 @@ extern struct sys_timer ux500_timer;
3730 .type = MT_DEVICE, \
3731 }
3732
3733-#define __MEM_DEV_DESC(x, sz) { \
3734- .virtual = IO_ADDRESS(x), \
3735- .pfn = __phys_to_pfn(x), \
3736- .length = sz, \
3737- .type = MT_MEMORY, \
3738-}
3739-
3740 extern struct smp_operations ux500_smp_ops;
3741 extern void ux500_cpu_die(unsigned int cpu);
3742
3743diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3744index 3fd629d..8b1aca9 100644
3745--- a/arch/arm/mm/Kconfig
3746+++ b/arch/arm/mm/Kconfig
3747@@ -425,7 +425,7 @@ config CPU_32v5
3748
3749 config CPU_32v6
3750 bool
3751- select CPU_USE_DOMAINS if CPU_V6 && MMU
3752+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC
3753 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3754
3755 config CPU_32v6K
3756@@ -577,6 +577,7 @@ config CPU_CP15_MPU
3757
3758 config CPU_USE_DOMAINS
3759 bool
3760+ depends on !ARM_LPAE && !PAX_KERNEXEC
3761 help
3762 This option enables or disables the use of domain switching
3763 via the set_fs() function.
3764diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
3765index db26e2e..ee44569 100644
3766--- a/arch/arm/mm/alignment.c
3767+++ b/arch/arm/mm/alignment.c
3768@@ -211,10 +211,12 @@ union offset_union {
3769 #define __get16_unaligned_check(ins,val,addr) \
3770 do { \
3771 unsigned int err = 0, v, a = addr; \
3772+ pax_open_userland(); \
3773 __get8_unaligned_check(ins,v,a,err); \
3774 val = v << ((BE) ? 8 : 0); \
3775 __get8_unaligned_check(ins,v,a,err); \
3776 val |= v << ((BE) ? 0 : 8); \
3777+ pax_close_userland(); \
3778 if (err) \
3779 goto fault; \
3780 } while (0)
3781@@ -228,6 +230,7 @@ union offset_union {
3782 #define __get32_unaligned_check(ins,val,addr) \
3783 do { \
3784 unsigned int err = 0, v, a = addr; \
3785+ pax_open_userland(); \
3786 __get8_unaligned_check(ins,v,a,err); \
3787 val = v << ((BE) ? 24 : 0); \
3788 __get8_unaligned_check(ins,v,a,err); \
3789@@ -236,6 +239,7 @@ union offset_union {
3790 val |= v << ((BE) ? 8 : 16); \
3791 __get8_unaligned_check(ins,v,a,err); \
3792 val |= v << ((BE) ? 0 : 24); \
3793+ pax_close_userland(); \
3794 if (err) \
3795 goto fault; \
3796 } while (0)
3797@@ -249,6 +253,7 @@ union offset_union {
3798 #define __put16_unaligned_check(ins,val,addr) \
3799 do { \
3800 unsigned int err = 0, v = val, a = addr; \
3801+ pax_open_userland(); \
3802 __asm__( FIRST_BYTE_16 \
3803 ARM( "1: "ins" %1, [%2], #1\n" ) \
3804 THUMB( "1: "ins" %1, [%2]\n" ) \
3805@@ -268,6 +273,7 @@ union offset_union {
3806 " .popsection\n" \
3807 : "=r" (err), "=&r" (v), "=&r" (a) \
3808 : "0" (err), "1" (v), "2" (a)); \
3809+ pax_close_userland(); \
3810 if (err) \
3811 goto fault; \
3812 } while (0)
3813@@ -281,6 +287,7 @@ union offset_union {
3814 #define __put32_unaligned_check(ins,val,addr) \
3815 do { \
3816 unsigned int err = 0, v = val, a = addr; \
3817+ pax_open_userland(); \
3818 __asm__( FIRST_BYTE_32 \
3819 ARM( "1: "ins" %1, [%2], #1\n" ) \
3820 THUMB( "1: "ins" %1, [%2]\n" ) \
3821@@ -310,6 +317,7 @@ union offset_union {
3822 " .popsection\n" \
3823 : "=r" (err), "=&r" (v), "=&r" (a) \
3824 : "0" (err), "1" (v), "2" (a)); \
3825+ pax_close_userland(); \
3826 if (err) \
3827 goto fault; \
3828 } while (0)
3829diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
3830index d07df17..59d5493 100644
3831--- a/arch/arm/mm/context.c
3832+++ b/arch/arm/mm/context.c
3833@@ -45,7 +45,7 @@ static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
3834 static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3835 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
3836
3837-static DEFINE_PER_CPU(atomic64_t, active_asids);
3838+DEFINE_PER_CPU(atomic64_t, active_asids);
3839 static DEFINE_PER_CPU(u64, reserved_asids);
3840 static cpumask_t tlb_flush_pending;
3841
3842@@ -209,8 +209,10 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
3843 atomic64_set(&mm->context.id, asid);
3844 }
3845
3846- if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
3847+ if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
3848 local_flush_tlb_all();
3849+ dummy_flush_tlb_a15_erratum();
3850+ }
3851
3852 atomic64_set(&per_cpu(active_asids, cpu), asid);
3853 cpumask_set_cpu(cpu, mm_cpumask(mm));
3854diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
3855index 5dbf13f..1a60561 100644
3856--- a/arch/arm/mm/fault.c
3857+++ b/arch/arm/mm/fault.c
3858@@ -25,6 +25,7 @@
3859 #include <asm/system_misc.h>
3860 #include <asm/system_info.h>
3861 #include <asm/tlbflush.h>
3862+#include <asm/sections.h>
3863
3864 #include "fault.h"
3865
3866@@ -138,6 +139,20 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
3867 if (fixup_exception(regs))
3868 return;
3869
3870+#ifdef CONFIG_PAX_KERNEXEC
3871+ if ((fsr & FSR_WRITE) &&
3872+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
3873+ (MODULES_VADDR <= addr && addr < MODULES_END)))
3874+ {
3875+ if (current->signal->curr_ip)
3876+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3877+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3878+ else
3879+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
3880+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3881+ }
3882+#endif
3883+
3884 /*
3885 * No handler, we'll have to terminate things with extreme prejudice.
3886 */
3887@@ -174,6 +189,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
3888 }
3889 #endif
3890
3891+#ifdef CONFIG_PAX_PAGEEXEC
3892+ if (fsr & FSR_LNX_PF) {
3893+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
3894+ do_group_exit(SIGKILL);
3895+ }
3896+#endif
3897+
3898 tsk->thread.address = addr;
3899 tsk->thread.error_code = fsr;
3900 tsk->thread.trap_no = 14;
3901@@ -398,6 +420,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3902 }
3903 #endif /* CONFIG_MMU */
3904
3905+#ifdef CONFIG_PAX_PAGEEXEC
3906+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3907+{
3908+ long i;
3909+
3910+ printk(KERN_ERR "PAX: bytes at PC: ");
3911+ for (i = 0; i < 20; i++) {
3912+ unsigned char c;
3913+ if (get_user(c, (__force unsigned char __user *)pc+i))
3914+ printk(KERN_CONT "?? ");
3915+ else
3916+ printk(KERN_CONT "%02x ", c);
3917+ }
3918+ printk("\n");
3919+
3920+ printk(KERN_ERR "PAX: bytes at SP-4: ");
3921+ for (i = -1; i < 20; i++) {
3922+ unsigned long c;
3923+ if (get_user(c, (__force unsigned long __user *)sp+i))
3924+ printk(KERN_CONT "???????? ");
3925+ else
3926+ printk(KERN_CONT "%08lx ", c);
3927+ }
3928+ printk("\n");
3929+}
3930+#endif
3931+
3932 /*
3933 * First Level Translation Fault Handler
3934 *
3935@@ -543,9 +592,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3936 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
3937 struct siginfo info;
3938
3939+#ifdef CONFIG_PAX_MEMORY_UDEREF
3940+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
3941+ if (current->signal->curr_ip)
3942+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3943+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3944+ else
3945+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3946+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3947+ goto die;
3948+ }
3949+#endif
3950+
3951 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
3952 return;
3953
3954+die:
3955 printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
3956 inf->name, fsr, addr);
3957
3958@@ -575,9 +637,49 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
3959 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
3960 struct siginfo info;
3961
3962+ if (user_mode(regs)) {
3963+ if (addr == 0xffff0fe0UL) {
3964+ /*
3965+ * PaX: __kuser_get_tls emulation
3966+ */
3967+ regs->ARM_r0 = current_thread_info()->tp_value;
3968+ regs->ARM_pc = regs->ARM_lr;
3969+ return;
3970+ }
3971+ }
3972+
3973+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3974+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
3975+ if (current->signal->curr_ip)
3976+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3977+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
3978+ addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr);
3979+ else
3980+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
3981+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
3982+ addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr);
3983+ goto die;
3984+ }
3985+#endif
3986+
3987+#ifdef CONFIG_PAX_REFCOUNT
3988+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
3989+ unsigned int bkpt;
3990+
3991+ if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
3992+ current->thread.error_code = ifsr;
3993+ current->thread.trap_no = 0;
3994+ pax_report_refcount_overflow(regs);
3995+ fixup_exception(regs);
3996+ return;
3997+ }
3998+ }
3999+#endif
4000+
4001 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
4002 return;
4003
4004+die:
4005 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
4006 inf->name, ifsr, addr);
4007
4008diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
4009index cf08bdf..772656c 100644
4010--- a/arch/arm/mm/fault.h
4011+++ b/arch/arm/mm/fault.h
4012@@ -3,6 +3,7 @@
4013
4014 /*
4015 * Fault status register encodings. We steal bit 31 for our own purposes.
4016+ * Set when the FSR value is from an instruction fault.
4017 */
4018 #define FSR_LNX_PF (1 << 31)
4019 #define FSR_WRITE (1 << 11)
4020@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
4021 }
4022 #endif
4023
4024+/* valid for LPAE and !LPAE */
4025+static inline int is_xn_fault(unsigned int fsr)
4026+{
4027+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
4028+}
4029+
4030+static inline int is_domain_fault(unsigned int fsr)
4031+{
4032+ return ((fsr_fs(fsr) & 0xD) == 0x9);
4033+}
4034+
4035 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
4036 unsigned long search_exception_table(unsigned long addr);
4037
4038diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
4039index ad722f1..763fdd3 100644
4040--- a/arch/arm/mm/init.c
4041+++ b/arch/arm/mm/init.c
4042@@ -30,6 +30,8 @@
4043 #include <asm/setup.h>
4044 #include <asm/tlb.h>
4045 #include <asm/fixmap.h>
4046+#include <asm/system_info.h>
4047+#include <asm/cp15.h>
4048
4049 #include <asm/mach/arch.h>
4050 #include <asm/mach/map.h>
4051@@ -736,7 +738,46 @@ void free_initmem(void)
4052 {
4053 #ifdef CONFIG_HAVE_TCM
4054 extern char __tcm_start, __tcm_end;
4055+#endif
4056
4057+#ifdef CONFIG_PAX_KERNEXEC
4058+ unsigned long addr;
4059+ pgd_t *pgd;
4060+ pud_t *pud;
4061+ pmd_t *pmd;
4062+ int cpu_arch = cpu_architecture();
4063+ unsigned int cr = get_cr();
4064+
4065+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
4066+ /* make pages tables, etc before .text NX */
4067+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
4068+ pgd = pgd_offset_k(addr);
4069+ pud = pud_offset(pgd, addr);
4070+ pmd = pmd_offset(pud, addr);
4071+ __section_update(pmd, addr, PMD_SECT_XN);
4072+ }
4073+ /* make init NX */
4074+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
4075+ pgd = pgd_offset_k(addr);
4076+ pud = pud_offset(pgd, addr);
4077+ pmd = pmd_offset(pud, addr);
4078+ __section_update(pmd, addr, PMD_SECT_XN);
4079+ }
4080+ /* make kernel code/rodata RX */
4081+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
4082+ pgd = pgd_offset_k(addr);
4083+ pud = pud_offset(pgd, addr);
4084+ pmd = pmd_offset(pud, addr);
4085+#ifdef CONFIG_ARM_LPAE
4086+ __section_update(pmd, addr, PMD_SECT_RDONLY);
4087+#else
4088+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
4089+#endif
4090+ }
4091+ }
4092+#endif
4093+
4094+#ifdef CONFIG_HAVE_TCM
4095 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
4096 totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
4097 __phys_to_pfn(__pa(&__tcm_end)),
4098diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
4099index 88fd86c..7a224ce 100644
4100--- a/arch/arm/mm/ioremap.c
4101+++ b/arch/arm/mm/ioremap.c
4102@@ -335,9 +335,9 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
4103 unsigned int mtype;
4104
4105 if (cached)
4106- mtype = MT_MEMORY;
4107+ mtype = MT_MEMORY_RX;
4108 else
4109- mtype = MT_MEMORY_NONCACHED;
4110+ mtype = MT_MEMORY_NONCACHED_RX;
4111
4112 return __arm_ioremap_caller(phys_addr, size, mtype,
4113 __builtin_return_address(0));
4114diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
4115index 10062ce..aa96dd7 100644
4116--- a/arch/arm/mm/mmap.c
4117+++ b/arch/arm/mm/mmap.c
4118@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4119 struct vm_area_struct *vma;
4120 int do_align = 0;
4121 int aliasing = cache_is_vipt_aliasing();
4122+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4123 struct vm_unmapped_area_info info;
4124
4125 /*
4126@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4127 if (len > TASK_SIZE)
4128 return -ENOMEM;
4129
4130+#ifdef CONFIG_PAX_RANDMMAP
4131+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4132+#endif
4133+
4134 if (addr) {
4135 if (do_align)
4136 addr = COLOUR_ALIGN(addr, pgoff);
4137@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4138 addr = PAGE_ALIGN(addr);
4139
4140 vma = find_vma(mm, addr);
4141- if (TASK_SIZE - len >= addr &&
4142- (!vma || addr + len <= vma->vm_start))
4143+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4144 return addr;
4145 }
4146
4147@@ -112,6 +116,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4148 unsigned long addr = addr0;
4149 int do_align = 0;
4150 int aliasing = cache_is_vipt_aliasing();
4151+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4152 struct vm_unmapped_area_info info;
4153
4154 /*
4155@@ -132,6 +137,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4156 return addr;
4157 }
4158
4159+#ifdef CONFIG_PAX_RANDMMAP
4160+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4161+#endif
4162+
4163 /* requesting a specific address */
4164 if (addr) {
4165 if (do_align)
4166@@ -139,8 +148,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4167 else
4168 addr = PAGE_ALIGN(addr);
4169 vma = find_vma(mm, addr);
4170- if (TASK_SIZE - len >= addr &&
4171- (!vma || addr + len <= vma->vm_start))
4172+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4173 return addr;
4174 }
4175
4176@@ -162,6 +170,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4177 VM_BUG_ON(addr != -ENOMEM);
4178 info.flags = 0;
4179 info.low_limit = mm->mmap_base;
4180+
4181+#ifdef CONFIG_PAX_RANDMMAP
4182+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4183+ info.low_limit += mm->delta_mmap;
4184+#endif
4185+
4186 info.high_limit = TASK_SIZE;
4187 addr = vm_unmapped_area(&info);
4188 }
4189@@ -173,6 +187,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4190 {
4191 unsigned long random_factor = 0UL;
4192
4193+#ifdef CONFIG_PAX_RANDMMAP
4194+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4195+#endif
4196+
4197 /* 8 bits of randomness in 20 address space bits */
4198 if ((current->flags & PF_RANDOMIZE) &&
4199 !(current->personality & ADDR_NO_RANDOMIZE))
4200@@ -180,10 +198,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4201
4202 if (mmap_is_legacy()) {
4203 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4204+
4205+#ifdef CONFIG_PAX_RANDMMAP
4206+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4207+ mm->mmap_base += mm->delta_mmap;
4208+#endif
4209+
4210 mm->get_unmapped_area = arch_get_unmapped_area;
4211 mm->unmap_area = arch_unmap_area;
4212 } else {
4213 mm->mmap_base = mmap_base(random_factor);
4214+
4215+#ifdef CONFIG_PAX_RANDMMAP
4216+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4217+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4218+#endif
4219+
4220 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4221 mm->unmap_area = arch_unmap_area_topdown;
4222 }
4223diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
4224index ce328c7..35b88dc 100644
4225--- a/arch/arm/mm/mmu.c
4226+++ b/arch/arm/mm/mmu.c
4227@@ -35,6 +35,23 @@
4228
4229 #include "mm.h"
4230
4231+
4232+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4233+void modify_domain(unsigned int dom, unsigned int type)
4234+{
4235+ struct thread_info *thread = current_thread_info();
4236+ unsigned int domain = thread->cpu_domain;
4237+ /*
4238+ * DOMAIN_MANAGER might be defined to some other value,
4239+ * use the arch-defined constant
4240+ */
4241+ domain &= ~domain_val(dom, 3);
4242+ thread->cpu_domain = domain | domain_val(dom, type);
4243+ set_domain(thread->cpu_domain);
4244+}
4245+EXPORT_SYMBOL(modify_domain);
4246+#endif
4247+
4248 /*
4249 * empty_zero_page is a special page that is used for
4250 * zero-initialized data and COW.
4251@@ -195,10 +212,18 @@ void adjust_cr(unsigned long mask, unsigned long set)
4252 }
4253 #endif
4254
4255-#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
4256+#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY
4257 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4258
4259-static struct mem_type mem_types[] = {
4260+#ifdef CONFIG_PAX_KERNEXEC
4261+#define L_PTE_KERNEXEC L_PTE_RDONLY
4262+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4263+#else
4264+#define L_PTE_KERNEXEC L_PTE_DIRTY
4265+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4266+#endif
4267+
4268+static struct mem_type mem_types[] __read_only = {
4269 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4270 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4271 L_PTE_SHARED,
4272@@ -227,16 +252,16 @@ static struct mem_type mem_types[] = {
4273 [MT_UNCACHED] = {
4274 .prot_pte = PROT_PTE_DEVICE,
4275 .prot_l1 = PMD_TYPE_TABLE,
4276- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4277+ .prot_sect = PROT_SECT_DEVICE,
4278 .domain = DOMAIN_IO,
4279 },
4280 [MT_CACHECLEAN] = {
4281- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4282+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4283 .domain = DOMAIN_KERNEL,
4284 },
4285 #ifndef CONFIG_ARM_LPAE
4286 [MT_MINICLEAN] = {
4287- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4288+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_RDONLY,
4289 .domain = DOMAIN_KERNEL,
4290 },
4291 #endif
4292@@ -244,36 +269,54 @@ static struct mem_type mem_types[] = {
4293 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4294 L_PTE_RDONLY,
4295 .prot_l1 = PMD_TYPE_TABLE,
4296- .domain = DOMAIN_USER,
4297+ .domain = DOMAIN_VECTORS,
4298 },
4299 [MT_HIGH_VECTORS] = {
4300 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4301- L_PTE_USER | L_PTE_RDONLY,
4302+ L_PTE_RDONLY,
4303 .prot_l1 = PMD_TYPE_TABLE,
4304- .domain = DOMAIN_USER,
4305+ .domain = DOMAIN_VECTORS,
4306 },
4307- [MT_MEMORY] = {
4308+ [MT_MEMORY_RWX] = {
4309 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4310 .prot_l1 = PMD_TYPE_TABLE,
4311 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4312 .domain = DOMAIN_KERNEL,
4313 },
4314+ [MT_MEMORY_RW] = {
4315+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4316+ .prot_l1 = PMD_TYPE_TABLE,
4317+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4318+ .domain = DOMAIN_KERNEL,
4319+ },
4320+ [MT_MEMORY_RX] = {
4321+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4322+ .prot_l1 = PMD_TYPE_TABLE,
4323+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4324+ .domain = DOMAIN_KERNEL,
4325+ },
4326 [MT_ROM] = {
4327- .prot_sect = PMD_TYPE_SECT,
4328+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4329 .domain = DOMAIN_KERNEL,
4330 },
4331- [MT_MEMORY_NONCACHED] = {
4332+ [MT_MEMORY_NONCACHED_RW] = {
4333 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4334 L_PTE_MT_BUFFERABLE,
4335 .prot_l1 = PMD_TYPE_TABLE,
4336 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4337 .domain = DOMAIN_KERNEL,
4338 },
4339+ [MT_MEMORY_NONCACHED_RX] = {
4340+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4341+ L_PTE_MT_BUFFERABLE,
4342+ .prot_l1 = PMD_TYPE_TABLE,
4343+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4344+ .domain = DOMAIN_KERNEL,
4345+ },
4346 [MT_MEMORY_DTCM] = {
4347- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4348- L_PTE_XN,
4349+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4350 .prot_l1 = PMD_TYPE_TABLE,
4351- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4352+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4353 .domain = DOMAIN_KERNEL,
4354 },
4355 [MT_MEMORY_ITCM] = {
4356@@ -283,10 +326,10 @@ static struct mem_type mem_types[] = {
4357 },
4358 [MT_MEMORY_SO] = {
4359 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4360- L_PTE_MT_UNCACHED | L_PTE_XN,
4361+ L_PTE_MT_UNCACHED,
4362 .prot_l1 = PMD_TYPE_TABLE,
4363 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
4364- PMD_SECT_UNCACHED | PMD_SECT_XN,
4365+ PMD_SECT_UNCACHED,
4366 .domain = DOMAIN_KERNEL,
4367 },
4368 [MT_MEMORY_DMA_READY] = {
4369@@ -371,9 +414,35 @@ static void __init build_mem_type_table(void)
4370 * to prevent speculative instruction fetches.
4371 */
4372 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
4373+ mem_types[MT_DEVICE].prot_pte |= L_PTE_XN;
4374 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
4375+ mem_types[MT_DEVICE_NONSHARED].prot_pte |= L_PTE_XN;
4376 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
4377+ mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_XN;
4378 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
4379+ mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_XN;
4380+
4381+ /* Mark other regions on ARMv6+ as execute-never */
4382+
4383+#ifdef CONFIG_PAX_KERNEXEC
4384+ mem_types[MT_UNCACHED].prot_sect |= PMD_SECT_XN;
4385+ mem_types[MT_UNCACHED].prot_pte |= L_PTE_XN;
4386+ mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_XN;
4387+ mem_types[MT_CACHECLEAN].prot_pte |= L_PTE_XN;
4388+#ifndef CONFIG_ARM_LPAE
4389+ mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_XN;
4390+ mem_types[MT_MINICLEAN].prot_pte |= L_PTE_XN;
4391+#endif
4392+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;
4393+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_XN;
4394+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_XN;
4395+ mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= PMD_SECT_XN;
4396+ mem_types[MT_MEMORY_DTCM].prot_sect |= PMD_SECT_XN;
4397+ mem_types[MT_MEMORY_DTCM].prot_pte |= L_PTE_XN;
4398+#endif
4399+
4400+ mem_types[MT_MEMORY_SO].prot_sect |= PMD_SECT_XN;
4401+ mem_types[MT_MEMORY_SO].prot_pte |= L_PTE_XN;
4402 }
4403 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4404 /*
4405@@ -432,6 +501,9 @@ static void __init build_mem_type_table(void)
4406 * from SVC mode and no access from userspace.
4407 */
4408 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4409+#ifdef CONFIG_PAX_KERNEXEC
4410+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4411+#endif
4412 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4413 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4414 #endif
4415@@ -448,11 +520,17 @@ static void __init build_mem_type_table(void)
4416 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4417 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4418 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4419- mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
4420- mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
4421+ mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4422+ mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4423+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
4424+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
4425+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
4426+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
4427 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
4428- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
4429- mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
4430+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_S;
4431+ mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= L_PTE_SHARED;
4432+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_S;
4433+ mem_types[MT_MEMORY_NONCACHED_RX].prot_pte |= L_PTE_SHARED;
4434 }
4435 }
4436
4437@@ -463,15 +541,20 @@ static void __init build_mem_type_table(void)
4438 if (cpu_arch >= CPU_ARCH_ARMv6) {
4439 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4440 /* Non-cacheable Normal is XCB = 001 */
4441- mem_types[MT_MEMORY_NONCACHED].prot_sect |=
4442+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
4443+ PMD_SECT_BUFFERED;
4444+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
4445 PMD_SECT_BUFFERED;
4446 } else {
4447 /* For both ARMv6 and non-TEX-remapping ARMv7 */
4448- mem_types[MT_MEMORY_NONCACHED].prot_sect |=
4449+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
4450+ PMD_SECT_TEX(1);
4451+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
4452 PMD_SECT_TEX(1);
4453 }
4454 } else {
4455- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4456+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_BUFFERABLE;
4457+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_BUFFERABLE;
4458 }
4459
4460 #ifdef CONFIG_ARM_LPAE
4461@@ -487,6 +570,8 @@ static void __init build_mem_type_table(void)
4462 vecs_pgprot |= PTE_EXT_AF;
4463 #endif
4464
4465+ user_pgprot |= __supported_pte_mask;
4466+
4467 for (i = 0; i < 16; i++) {
4468 pteval_t v = pgprot_val(protection_map[i]);
4469 protection_map[i] = __pgprot(v | user_pgprot);
4470@@ -501,10 +586,15 @@ static void __init build_mem_type_table(void)
4471
4472 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4473 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4474- mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
4475- mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
4476+ mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4477+ mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4478+ mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4479+ mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4480+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4481+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4482 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4483- mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
4484+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= ecc_mask;
4485+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= ecc_mask;
4486 mem_types[MT_ROM].prot_sect |= cp->pmd;
4487
4488 switch (cp->pmd) {
4489@@ -1105,18 +1195,15 @@ void __init arm_mm_memblock_reserve(void)
4490 * called function. This means you can't use any function or debugging
4491 * method which may touch any device, otherwise the kernel _will_ crash.
4492 */
4493+
4494+static char vectors[PAGE_SIZE] __read_only __aligned(PAGE_SIZE);
4495+
4496 static void __init devicemaps_init(struct machine_desc *mdesc)
4497 {
4498 struct map_desc map;
4499 unsigned long addr;
4500- void *vectors;
4501
4502- /*
4503- * Allocate the vector page early.
4504- */
4505- vectors = early_alloc(PAGE_SIZE);
4506-
4507- early_trap_init(vectors);
4508+ early_trap_init(&vectors);
4509
4510 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4511 pmd_clear(pmd_off_k(addr));
4512@@ -1156,7 +1243,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
4513 * location (0xffff0000). If we aren't using high-vectors, also
4514 * create a mapping at the low-vectors virtual address.
4515 */
4516- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4517+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4518 map.virtual = 0xffff0000;
4519 map.length = PAGE_SIZE;
4520 map.type = MT_HIGH_VECTORS;
4521@@ -1214,8 +1301,39 @@ static void __init map_lowmem(void)
4522 map.pfn = __phys_to_pfn(start);
4523 map.virtual = __phys_to_virt(start);
4524 map.length = end - start;
4525- map.type = MT_MEMORY;
4526
4527+#ifdef CONFIG_PAX_KERNEXEC
4528+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4529+ struct map_desc kernel;
4530+ struct map_desc initmap;
4531+
4532+ /* when freeing initmem we will make this RW */
4533+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4534+ initmap.virtual = (unsigned long)__init_begin;
4535+ initmap.length = _sdata - __init_begin;
4536+ initmap.type = MT_MEMORY_RWX;
4537+ create_mapping(&initmap);
4538+
4539+ /* when freeing initmem we will make this RX */
4540+ kernel.pfn = __phys_to_pfn(__pa(_stext));
4541+ kernel.virtual = (unsigned long)_stext;
4542+ kernel.length = __init_begin - _stext;
4543+ kernel.type = MT_MEMORY_RWX;
4544+ create_mapping(&kernel);
4545+
4546+ if (map.virtual < (unsigned long)_stext) {
4547+ map.length = (unsigned long)_stext - map.virtual;
4548+ map.type = MT_MEMORY_RWX;
4549+ create_mapping(&map);
4550+ }
4551+
4552+ map.pfn = __phys_to_pfn(__pa(_sdata));
4553+ map.virtual = (unsigned long)_sdata;
4554+ map.length = end - __pa(_sdata);
4555+ }
4556+#endif
4557+
4558+ map.type = MT_MEMORY_RW;
4559 create_mapping(&map);
4560 }
4561 }
4562diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
4563index 6d98c13..3cfb174 100644
4564--- a/arch/arm/mm/proc-v7-2level.S
4565+++ b/arch/arm/mm/proc-v7-2level.S
4566@@ -99,6 +99,9 @@ ENTRY(cpu_v7_set_pte_ext)
4567 tst r1, #L_PTE_XN
4568 orrne r3, r3, #PTE_EXT_XN
4569
4570+ tst r1, #L_PTE_PXN
4571+ orrne r3, r3, #PTE_EXT_PXN
4572+
4573 tst r1, #L_PTE_YOUNG
4574 tstne r1, #L_PTE_VALID
4575 #ifndef CONFIG_CPU_USE_DOMAINS
4576diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4577index a5bc92d..0bb4730 100644
4578--- a/arch/arm/plat-omap/sram.c
4579+++ b/arch/arm/plat-omap/sram.c
4580@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4581 * Looks like we need to preserve some bootloader code at the
4582 * beginning of SRAM for jumping to flash for reboot to work...
4583 */
4584+ pax_open_kernel();
4585 memset_io(omap_sram_base + omap_sram_skip, 0,
4586 omap_sram_size - omap_sram_skip);
4587+ pax_close_kernel();
4588 }
4589diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
4590index f5144cd..71f6d1f 100644
4591--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
4592+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
4593@@ -47,7 +47,7 @@ struct samsung_dma_ops {
4594 int (*started)(unsigned ch);
4595 int (*flush)(unsigned ch);
4596 int (*stop)(unsigned ch);
4597-};
4598+} __no_const;
4599
4600 extern void *samsung_dmadev_get_ops(void);
4601 extern void *s3c_dma_get_ops(void);
4602diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
4603index 0c3ba9f..95722b3 100644
4604--- a/arch/arm64/kernel/debug-monitors.c
4605+++ b/arch/arm64/kernel/debug-monitors.c
4606@@ -151,7 +151,7 @@ static int __cpuinit os_lock_notify(struct notifier_block *self,
4607 return NOTIFY_OK;
4608 }
4609
4610-static struct notifier_block __cpuinitdata os_lock_nb = {
4611+static struct notifier_block os_lock_nb = {
4612 .notifier_call = os_lock_notify,
4613 };
4614
4615diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
4616index 5ab825c..96aaec8 100644
4617--- a/arch/arm64/kernel/hw_breakpoint.c
4618+++ b/arch/arm64/kernel/hw_breakpoint.c
4619@@ -831,7 +831,7 @@ static int __cpuinit hw_breakpoint_reset_notify(struct notifier_block *self,
4620 return NOTIFY_OK;
4621 }
4622
4623-static struct notifier_block __cpuinitdata hw_breakpoint_reset_nb = {
4624+static struct notifier_block hw_breakpoint_reset_nb = {
4625 .notifier_call = hw_breakpoint_reset_notify,
4626 };
4627
4628diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4629index c3a58a1..78fbf54 100644
4630--- a/arch/avr32/include/asm/cache.h
4631+++ b/arch/avr32/include/asm/cache.h
4632@@ -1,8 +1,10 @@
4633 #ifndef __ASM_AVR32_CACHE_H
4634 #define __ASM_AVR32_CACHE_H
4635
4636+#include <linux/const.h>
4637+
4638 #define L1_CACHE_SHIFT 5
4639-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4640+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4641
4642 /*
4643 * Memory returned by kmalloc() may be used for DMA, so we must make
4644diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
4645index e2c3287..6c4f98c 100644
4646--- a/arch/avr32/include/asm/elf.h
4647+++ b/arch/avr32/include/asm/elf.h
4648@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
4649 the loader. We need to make sure that it is out of the way of the program
4650 that it will "exec", and that there is sufficient room for the brk. */
4651
4652-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
4653+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
4654
4655+#ifdef CONFIG_PAX_ASLR
4656+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
4657+
4658+#define PAX_DELTA_MMAP_LEN 15
4659+#define PAX_DELTA_STACK_LEN 15
4660+#endif
4661
4662 /* This yields a mask that user programs can use to figure out what
4663 instruction set this CPU supports. This could be done in user space,
4664diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
4665index 479330b..53717a8 100644
4666--- a/arch/avr32/include/asm/kmap_types.h
4667+++ b/arch/avr32/include/asm/kmap_types.h
4668@@ -2,9 +2,9 @@
4669 #define __ASM_AVR32_KMAP_TYPES_H
4670
4671 #ifdef CONFIG_DEBUG_HIGHMEM
4672-# define KM_TYPE_NR 29
4673+# define KM_TYPE_NR 30
4674 #else
4675-# define KM_TYPE_NR 14
4676+# define KM_TYPE_NR 15
4677 #endif
4678
4679 #endif /* __ASM_AVR32_KMAP_TYPES_H */
4680diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
4681index b2f2d2d..d1c85cb 100644
4682--- a/arch/avr32/mm/fault.c
4683+++ b/arch/avr32/mm/fault.c
4684@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
4685
4686 int exception_trace = 1;
4687
4688+#ifdef CONFIG_PAX_PAGEEXEC
4689+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4690+{
4691+ unsigned long i;
4692+
4693+ printk(KERN_ERR "PAX: bytes at PC: ");
4694+ for (i = 0; i < 20; i++) {
4695+ unsigned char c;
4696+ if (get_user(c, (unsigned char *)pc+i))
4697+ printk(KERN_CONT "???????? ");
4698+ else
4699+ printk(KERN_CONT "%02x ", c);
4700+ }
4701+ printk("\n");
4702+}
4703+#endif
4704+
4705 /*
4706 * This routine handles page faults. It determines the address and the
4707 * problem, and then passes it off to one of the appropriate routines.
4708@@ -174,6 +191,16 @@ bad_area:
4709 up_read(&mm->mmap_sem);
4710
4711 if (user_mode(regs)) {
4712+
4713+#ifdef CONFIG_PAX_PAGEEXEC
4714+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4715+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
4716+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
4717+ do_group_exit(SIGKILL);
4718+ }
4719+ }
4720+#endif
4721+
4722 if (exception_trace && printk_ratelimit())
4723 printk("%s%s[%d]: segfault at %08lx pc %08lx "
4724 "sp %08lx ecr %lu\n",
4725diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
4726index 568885a..f8008df 100644
4727--- a/arch/blackfin/include/asm/cache.h
4728+++ b/arch/blackfin/include/asm/cache.h
4729@@ -7,6 +7,7 @@
4730 #ifndef __ARCH_BLACKFIN_CACHE_H
4731 #define __ARCH_BLACKFIN_CACHE_H
4732
4733+#include <linux/const.h>
4734 #include <linux/linkage.h> /* for asmlinkage */
4735
4736 /*
4737@@ -14,7 +15,7 @@
4738 * Blackfin loads 32 bytes for cache
4739 */
4740 #define L1_CACHE_SHIFT 5
4741-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4742+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4743 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4744
4745 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
4746diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
4747index aea2718..3639a60 100644
4748--- a/arch/cris/include/arch-v10/arch/cache.h
4749+++ b/arch/cris/include/arch-v10/arch/cache.h
4750@@ -1,8 +1,9 @@
4751 #ifndef _ASM_ARCH_CACHE_H
4752 #define _ASM_ARCH_CACHE_H
4753
4754+#include <linux/const.h>
4755 /* Etrax 100LX have 32-byte cache-lines. */
4756-#define L1_CACHE_BYTES 32
4757 #define L1_CACHE_SHIFT 5
4758+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4759
4760 #endif /* _ASM_ARCH_CACHE_H */
4761diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
4762index 7caf25d..ee65ac5 100644
4763--- a/arch/cris/include/arch-v32/arch/cache.h
4764+++ b/arch/cris/include/arch-v32/arch/cache.h
4765@@ -1,11 +1,12 @@
4766 #ifndef _ASM_CRIS_ARCH_CACHE_H
4767 #define _ASM_CRIS_ARCH_CACHE_H
4768
4769+#include <linux/const.h>
4770 #include <arch/hwregs/dma.h>
4771
4772 /* A cache-line is 32 bytes. */
4773-#define L1_CACHE_BYTES 32
4774 #define L1_CACHE_SHIFT 5
4775+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4776
4777 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4778
4779diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
4780index b86329d..6709906 100644
4781--- a/arch/frv/include/asm/atomic.h
4782+++ b/arch/frv/include/asm/atomic.h
4783@@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v)
4784 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
4785 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
4786
4787+#define atomic64_read_unchecked(v) atomic64_read(v)
4788+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4789+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4790+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4791+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4792+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4793+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4794+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4795+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4796+
4797 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
4798 {
4799 int c, old;
4800diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
4801index 2797163..c2a401d 100644
4802--- a/arch/frv/include/asm/cache.h
4803+++ b/arch/frv/include/asm/cache.h
4804@@ -12,10 +12,11 @@
4805 #ifndef __ASM_CACHE_H
4806 #define __ASM_CACHE_H
4807
4808+#include <linux/const.h>
4809
4810 /* bytes per L1 cache line */
4811 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
4812-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4813+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4814
4815 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4816 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4817diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
4818index 43901f2..0d8b865 100644
4819--- a/arch/frv/include/asm/kmap_types.h
4820+++ b/arch/frv/include/asm/kmap_types.h
4821@@ -2,6 +2,6 @@
4822 #ifndef _ASM_KMAP_TYPES_H
4823 #define _ASM_KMAP_TYPES_H
4824
4825-#define KM_TYPE_NR 17
4826+#define KM_TYPE_NR 18
4827
4828 #endif
4829diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
4830index 385fd30..3aaf4fe 100644
4831--- a/arch/frv/mm/elf-fdpic.c
4832+++ b/arch/frv/mm/elf-fdpic.c
4833@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4834 {
4835 struct vm_area_struct *vma;
4836 unsigned long limit;
4837+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
4838
4839 if (len > TASK_SIZE)
4840 return -ENOMEM;
4841@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4842 if (addr) {
4843 addr = PAGE_ALIGN(addr);
4844 vma = find_vma(current->mm, addr);
4845- if (TASK_SIZE - len >= addr &&
4846- (!vma || addr + len <= vma->vm_start))
4847+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4848 goto success;
4849 }
4850
4851@@ -89,7 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4852 for (; vma; vma = vma->vm_next) {
4853 if (addr > limit)
4854 break;
4855- if (addr + len <= vma->vm_start)
4856+ if (check_heap_stack_gap(vma, addr, len, offset))
4857 goto success;
4858 addr = vma->vm_end;
4859 }
4860@@ -104,7 +104,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4861 for (; vma; vma = vma->vm_next) {
4862 if (addr > limit)
4863 break;
4864- if (addr + len <= vma->vm_start)
4865+ if (check_heap_stack_gap(vma, addr, len, offset))
4866 goto success;
4867 addr = vma->vm_end;
4868 }
4869diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
4870index f4ca594..adc72fd6 100644
4871--- a/arch/hexagon/include/asm/cache.h
4872+++ b/arch/hexagon/include/asm/cache.h
4873@@ -21,9 +21,11 @@
4874 #ifndef __ASM_CACHE_H
4875 #define __ASM_CACHE_H
4876
4877+#include <linux/const.h>
4878+
4879 /* Bytes per L1 cache line */
4880-#define L1_CACHE_SHIFT (5)
4881-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4882+#define L1_CACHE_SHIFT 5
4883+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4884
4885 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
4886 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
4887diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
4888index 6e6fe18..a6ae668 100644
4889--- a/arch/ia64/include/asm/atomic.h
4890+++ b/arch/ia64/include/asm/atomic.h
4891@@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
4892 #define atomic64_inc(v) atomic64_add(1, (v))
4893 #define atomic64_dec(v) atomic64_sub(1, (v))
4894
4895+#define atomic64_read_unchecked(v) atomic64_read(v)
4896+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4897+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4898+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4899+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4900+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4901+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4902+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4903+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4904+
4905 /* Atomic operations are already serializing */
4906 #define smp_mb__before_atomic_dec() barrier()
4907 #define smp_mb__after_atomic_dec() barrier()
4908diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
4909index 988254a..e1ee885 100644
4910--- a/arch/ia64/include/asm/cache.h
4911+++ b/arch/ia64/include/asm/cache.h
4912@@ -1,6 +1,7 @@
4913 #ifndef _ASM_IA64_CACHE_H
4914 #define _ASM_IA64_CACHE_H
4915
4916+#include <linux/const.h>
4917
4918 /*
4919 * Copyright (C) 1998-2000 Hewlett-Packard Co
4920@@ -9,7 +10,7 @@
4921
4922 /* Bytes per L1 (data) cache line. */
4923 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
4924-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4925+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4926
4927 #ifdef CONFIG_SMP
4928 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
4929diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
4930index b5298eb..67c6e62 100644
4931--- a/arch/ia64/include/asm/elf.h
4932+++ b/arch/ia64/include/asm/elf.h
4933@@ -42,6 +42,13 @@
4934 */
4935 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
4936
4937+#ifdef CONFIG_PAX_ASLR
4938+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
4939+
4940+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4941+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4942+#endif
4943+
4944 #define PT_IA_64_UNWIND 0x70000001
4945
4946 /* IA-64 relocations: */
4947diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
4948index 96a8d92..617a1cf 100644
4949--- a/arch/ia64/include/asm/pgalloc.h
4950+++ b/arch/ia64/include/asm/pgalloc.h
4951@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4952 pgd_val(*pgd_entry) = __pa(pud);
4953 }
4954
4955+static inline void
4956+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4957+{
4958+ pgd_populate(mm, pgd_entry, pud);
4959+}
4960+
4961 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
4962 {
4963 return quicklist_alloc(0, GFP_KERNEL, NULL);
4964@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
4965 pud_val(*pud_entry) = __pa(pmd);
4966 }
4967
4968+static inline void
4969+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
4970+{
4971+ pud_populate(mm, pud_entry, pmd);
4972+}
4973+
4974 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
4975 {
4976 return quicklist_alloc(0, GFP_KERNEL, NULL);
4977diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
4978index 815810c..d60bd4c 100644
4979--- a/arch/ia64/include/asm/pgtable.h
4980+++ b/arch/ia64/include/asm/pgtable.h
4981@@ -12,7 +12,7 @@
4982 * David Mosberger-Tang <davidm@hpl.hp.com>
4983 */
4984
4985-
4986+#include <linux/const.h>
4987 #include <asm/mman.h>
4988 #include <asm/page.h>
4989 #include <asm/processor.h>
4990@@ -142,6 +142,17 @@
4991 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4992 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4993 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
4994+
4995+#ifdef CONFIG_PAX_PAGEEXEC
4996+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
4997+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4998+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4999+#else
5000+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5001+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5002+# define PAGE_COPY_NOEXEC PAGE_COPY
5003+#endif
5004+
5005 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
5006 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
5007 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
5008diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
5009index 54ff557..70c88b7 100644
5010--- a/arch/ia64/include/asm/spinlock.h
5011+++ b/arch/ia64/include/asm/spinlock.h
5012@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
5013 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
5014
5015 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
5016- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
5017+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
5018 }
5019
5020 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
5021diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
5022index 449c8c0..18965fb 100644
5023--- a/arch/ia64/include/asm/uaccess.h
5024+++ b/arch/ia64/include/asm/uaccess.h
5025@@ -240,12 +240,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
5026 static inline unsigned long
5027 __copy_to_user (void __user *to, const void *from, unsigned long count)
5028 {
5029+ if (count > INT_MAX)
5030+ return count;
5031+
5032+ if (!__builtin_constant_p(count))
5033+ check_object_size(from, count, true);
5034+
5035 return __copy_user(to, (__force void __user *) from, count);
5036 }
5037
5038 static inline unsigned long
5039 __copy_from_user (void *to, const void __user *from, unsigned long count)
5040 {
5041+ if (count > INT_MAX)
5042+ return count;
5043+
5044+ if (!__builtin_constant_p(count))
5045+ check_object_size(to, count, false);
5046+
5047 return __copy_user((__force void __user *) to, from, count);
5048 }
5049
5050@@ -255,10 +267,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5051 ({ \
5052 void __user *__cu_to = (to); \
5053 const void *__cu_from = (from); \
5054- long __cu_len = (n); \
5055+ unsigned long __cu_len = (n); \
5056 \
5057- if (__access_ok(__cu_to, __cu_len, get_fs())) \
5058+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
5059+ if (!__builtin_constant_p(n)) \
5060+ check_object_size(__cu_from, __cu_len, true); \
5061 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
5062+ } \
5063 __cu_len; \
5064 })
5065
5066@@ -266,11 +281,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5067 ({ \
5068 void *__cu_to = (to); \
5069 const void __user *__cu_from = (from); \
5070- long __cu_len = (n); \
5071+ unsigned long __cu_len = (n); \
5072 \
5073 __chk_user_ptr(__cu_from); \
5074- if (__access_ok(__cu_from, __cu_len, get_fs())) \
5075+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
5076+ if (!__builtin_constant_p(n)) \
5077+ check_object_size(__cu_to, __cu_len, false); \
5078 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
5079+ } \
5080 __cu_len; \
5081 })
5082
5083diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
5084index 2d67317..07d8bfa 100644
5085--- a/arch/ia64/kernel/err_inject.c
5086+++ b/arch/ia64/kernel/err_inject.c
5087@@ -256,7 +256,7 @@ static int __cpuinit err_inject_cpu_callback(struct notifier_block *nfb,
5088 return NOTIFY_OK;
5089 }
5090
5091-static struct notifier_block __cpuinitdata err_inject_cpu_notifier =
5092+static struct notifier_block err_inject_cpu_notifier =
5093 {
5094 .notifier_call = err_inject_cpu_callback,
5095 };
5096diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
5097index 65bf9cd..794f06b 100644
5098--- a/arch/ia64/kernel/mca.c
5099+++ b/arch/ia64/kernel/mca.c
5100@@ -1922,7 +1922,7 @@ static int __cpuinit mca_cpu_callback(struct notifier_block *nfb,
5101 return NOTIFY_OK;
5102 }
5103
5104-static struct notifier_block mca_cpu_notifier __cpuinitdata = {
5105+static struct notifier_block mca_cpu_notifier = {
5106 .notifier_call = mca_cpu_callback
5107 };
5108
5109diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
5110index 24603be..948052d 100644
5111--- a/arch/ia64/kernel/module.c
5112+++ b/arch/ia64/kernel/module.c
5113@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
5114 void
5115 module_free (struct module *mod, void *module_region)
5116 {
5117- if (mod && mod->arch.init_unw_table &&
5118- module_region == mod->module_init) {
5119+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
5120 unw_remove_unwind_table(mod->arch.init_unw_table);
5121 mod->arch.init_unw_table = NULL;
5122 }
5123@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
5124 }
5125
5126 static inline int
5127+in_init_rx (const struct module *mod, uint64_t addr)
5128+{
5129+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
5130+}
5131+
5132+static inline int
5133+in_init_rw (const struct module *mod, uint64_t addr)
5134+{
5135+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
5136+}
5137+
5138+static inline int
5139 in_init (const struct module *mod, uint64_t addr)
5140 {
5141- return addr - (uint64_t) mod->module_init < mod->init_size;
5142+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
5143+}
5144+
5145+static inline int
5146+in_core_rx (const struct module *mod, uint64_t addr)
5147+{
5148+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
5149+}
5150+
5151+static inline int
5152+in_core_rw (const struct module *mod, uint64_t addr)
5153+{
5154+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
5155 }
5156
5157 static inline int
5158 in_core (const struct module *mod, uint64_t addr)
5159 {
5160- return addr - (uint64_t) mod->module_core < mod->core_size;
5161+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
5162 }
5163
5164 static inline int
5165@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
5166 break;
5167
5168 case RV_BDREL:
5169- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
5170+ if (in_init_rx(mod, val))
5171+ val -= (uint64_t) mod->module_init_rx;
5172+ else if (in_init_rw(mod, val))
5173+ val -= (uint64_t) mod->module_init_rw;
5174+ else if (in_core_rx(mod, val))
5175+ val -= (uint64_t) mod->module_core_rx;
5176+ else if (in_core_rw(mod, val))
5177+ val -= (uint64_t) mod->module_core_rw;
5178 break;
5179
5180 case RV_LTV:
5181@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
5182 * addresses have been selected...
5183 */
5184 uint64_t gp;
5185- if (mod->core_size > MAX_LTOFF)
5186+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
5187 /*
5188 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
5189 * at the end of the module.
5190 */
5191- gp = mod->core_size - MAX_LTOFF / 2;
5192+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
5193 else
5194- gp = mod->core_size / 2;
5195- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
5196+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
5197+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
5198 mod->arch.gp = gp;
5199 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
5200 }
5201diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
5202index 77597e5..189dd62f 100644
5203--- a/arch/ia64/kernel/palinfo.c
5204+++ b/arch/ia64/kernel/palinfo.c
5205@@ -977,7 +977,7 @@ create_palinfo_proc_entries(unsigned int cpu)
5206 struct proc_dir_entry **pdir;
5207 struct proc_dir_entry *cpu_dir;
5208 int j;
5209- char cpustr[sizeof(CPUSTR)];
5210+ char cpustr[3+4+1];
5211
5212
5213 /*
5214@@ -1045,7 +1045,7 @@ static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
5215 return NOTIFY_OK;
5216 }
5217
5218-static struct notifier_block __refdata palinfo_cpu_notifier =
5219+static struct notifier_block palinfo_cpu_notifier =
5220 {
5221 .notifier_call = palinfo_cpu_callback,
5222 .priority = 0,
5223diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
5224index 79802e5..1a89ec5 100644
5225--- a/arch/ia64/kernel/salinfo.c
5226+++ b/arch/ia64/kernel/salinfo.c
5227@@ -616,7 +616,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu
5228 return NOTIFY_OK;
5229 }
5230
5231-static struct notifier_block salinfo_cpu_notifier __cpuinitdata =
5232+static struct notifier_block salinfo_cpu_notifier =
5233 {
5234 .notifier_call = salinfo_cpu_callback,
5235 .priority = 0,
5236diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
5237index d9439ef..d0cac6b 100644
5238--- a/arch/ia64/kernel/sys_ia64.c
5239+++ b/arch/ia64/kernel/sys_ia64.c
5240@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5241 unsigned long start_addr, align_mask = PAGE_SIZE - 1;
5242 struct mm_struct *mm = current->mm;
5243 struct vm_area_struct *vma;
5244+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5245
5246 if (len > RGN_MAP_LIMIT)
5247 return -ENOMEM;
5248@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5249 if (REGION_NUMBER(addr) == RGN_HPAGE)
5250 addr = 0;
5251 #endif
5252+
5253+#ifdef CONFIG_PAX_RANDMMAP
5254+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5255+ addr = mm->free_area_cache;
5256+ else
5257+#endif
5258+
5259 if (!addr)
5260 addr = mm->free_area_cache;
5261
5262@@ -61,14 +69,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5263 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
5264 /* At this point: (!vma || addr < vma->vm_end). */
5265 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
5266- if (start_addr != TASK_UNMAPPED_BASE) {
5267+ if (start_addr != mm->mmap_base) {
5268 /* Start a new search --- just in case we missed some holes. */
5269- addr = TASK_UNMAPPED_BASE;
5270+ addr = mm->mmap_base;
5271 goto full_search;
5272 }
5273 return -ENOMEM;
5274 }
5275- if (!vma || addr + len <= vma->vm_start) {
5276+ if (check_heap_stack_gap(vma, addr, len, offset)) {
5277 /* Remember the address where we stopped this search: */
5278 mm->free_area_cache = addr + len;
5279 return addr;
5280diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
5281index dc00b2c..cce53c2 100644
5282--- a/arch/ia64/kernel/topology.c
5283+++ b/arch/ia64/kernel/topology.c
5284@@ -445,7 +445,7 @@ static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
5285 return NOTIFY_OK;
5286 }
5287
5288-static struct notifier_block __cpuinitdata cache_cpu_notifier =
5289+static struct notifier_block cache_cpu_notifier =
5290 {
5291 .notifier_call = cache_cpu_callback
5292 };
5293diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5294index 0ccb28f..8992469 100644
5295--- a/arch/ia64/kernel/vmlinux.lds.S
5296+++ b/arch/ia64/kernel/vmlinux.lds.S
5297@@ -198,7 +198,7 @@ SECTIONS {
5298 /* Per-cpu data: */
5299 . = ALIGN(PERCPU_PAGE_SIZE);
5300 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5301- __phys_per_cpu_start = __per_cpu_load;
5302+ __phys_per_cpu_start = per_cpu_load;
5303 /*
5304 * ensure percpu data fits
5305 * into percpu page size
5306diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5307index 6cf0341..d352594 100644
5308--- a/arch/ia64/mm/fault.c
5309+++ b/arch/ia64/mm/fault.c
5310@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5311 return pte_present(pte);
5312 }
5313
5314+#ifdef CONFIG_PAX_PAGEEXEC
5315+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5316+{
5317+ unsigned long i;
5318+
5319+ printk(KERN_ERR "PAX: bytes at PC: ");
5320+ for (i = 0; i < 8; i++) {
5321+ unsigned int c;
5322+ if (get_user(c, (unsigned int *)pc+i))
5323+ printk(KERN_CONT "???????? ");
5324+ else
5325+ printk(KERN_CONT "%08x ", c);
5326+ }
5327+ printk("\n");
5328+}
5329+#endif
5330+
5331 # define VM_READ_BIT 0
5332 # define VM_WRITE_BIT 1
5333 # define VM_EXEC_BIT 2
5334@@ -149,8 +166,21 @@ retry:
5335 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5336 goto bad_area;
5337
5338- if ((vma->vm_flags & mask) != mask)
5339+ if ((vma->vm_flags & mask) != mask) {
5340+
5341+#ifdef CONFIG_PAX_PAGEEXEC
5342+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5343+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5344+ goto bad_area;
5345+
5346+ up_read(&mm->mmap_sem);
5347+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5348+ do_group_exit(SIGKILL);
5349+ }
5350+#endif
5351+
5352 goto bad_area;
5353+ }
5354
5355 /*
5356 * If for any reason at all we couldn't handle the fault, make
5357diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5358index 5ca674b..127c3cb 100644
5359--- a/arch/ia64/mm/hugetlbpage.c
5360+++ b/arch/ia64/mm/hugetlbpage.c
5361@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5362 unsigned long pgoff, unsigned long flags)
5363 {
5364 struct vm_area_struct *vmm;
5365+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5366
5367 if (len > RGN_MAP_LIMIT)
5368 return -ENOMEM;
5369@@ -171,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5370 /* At this point: (!vmm || addr < vmm->vm_end). */
5371 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
5372 return -ENOMEM;
5373- if (!vmm || (addr + len) <= vmm->vm_start)
5374+ if (check_heap_stack_gap(vmm, addr, len, offset))
5375 return addr;
5376 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
5377 }
5378diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5379index b755ea9..b9a969e 100644
5380--- a/arch/ia64/mm/init.c
5381+++ b/arch/ia64/mm/init.c
5382@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
5383 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5384 vma->vm_end = vma->vm_start + PAGE_SIZE;
5385 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5386+
5387+#ifdef CONFIG_PAX_PAGEEXEC
5388+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5389+ vma->vm_flags &= ~VM_EXEC;
5390+
5391+#ifdef CONFIG_PAX_MPROTECT
5392+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
5393+ vma->vm_flags &= ~VM_MAYEXEC;
5394+#endif
5395+
5396+ }
5397+#endif
5398+
5399 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5400 down_write(&current->mm->mmap_sem);
5401 if (insert_vm_struct(current->mm, vma)) {
5402diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
5403index 40b3ee9..8c2c112 100644
5404--- a/arch/m32r/include/asm/cache.h
5405+++ b/arch/m32r/include/asm/cache.h
5406@@ -1,8 +1,10 @@
5407 #ifndef _ASM_M32R_CACHE_H
5408 #define _ASM_M32R_CACHE_H
5409
5410+#include <linux/const.h>
5411+
5412 /* L1 cache line size */
5413 #define L1_CACHE_SHIFT 4
5414-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5415+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5416
5417 #endif /* _ASM_M32R_CACHE_H */
5418diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
5419index 82abd15..d95ae5d 100644
5420--- a/arch/m32r/lib/usercopy.c
5421+++ b/arch/m32r/lib/usercopy.c
5422@@ -14,6 +14,9 @@
5423 unsigned long
5424 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5425 {
5426+ if ((long)n < 0)
5427+ return n;
5428+
5429 prefetch(from);
5430 if (access_ok(VERIFY_WRITE, to, n))
5431 __copy_user(to,from,n);
5432@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5433 unsigned long
5434 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
5435 {
5436+ if ((long)n < 0)
5437+ return n;
5438+
5439 prefetchw(to);
5440 if (access_ok(VERIFY_READ, from, n))
5441 __copy_user_zeroing(to,from,n);
5442diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
5443index 0395c51..5f26031 100644
5444--- a/arch/m68k/include/asm/cache.h
5445+++ b/arch/m68k/include/asm/cache.h
5446@@ -4,9 +4,11 @@
5447 #ifndef __ARCH_M68K_CACHE_H
5448 #define __ARCH_M68K_CACHE_H
5449
5450+#include <linux/const.h>
5451+
5452 /* bytes per L1 cache line */
5453 #define L1_CACHE_SHIFT 4
5454-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
5455+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5456
5457 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5458
5459diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5460index 4efe96a..60e8699 100644
5461--- a/arch/microblaze/include/asm/cache.h
5462+++ b/arch/microblaze/include/asm/cache.h
5463@@ -13,11 +13,12 @@
5464 #ifndef _ASM_MICROBLAZE_CACHE_H
5465 #define _ASM_MICROBLAZE_CACHE_H
5466
5467+#include <linux/const.h>
5468 #include <asm/registers.h>
5469
5470 #define L1_CACHE_SHIFT 5
5471 /* word-granular cache in microblaze */
5472-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5473+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5474
5475 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5476
5477diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
5478index 01cc6ba..bcb7a5d 100644
5479--- a/arch/mips/include/asm/atomic.h
5480+++ b/arch/mips/include/asm/atomic.h
5481@@ -21,6 +21,10 @@
5482 #include <asm/cmpxchg.h>
5483 #include <asm/war.h>
5484
5485+#ifdef CONFIG_GENERIC_ATOMIC64
5486+#include <asm-generic/atomic64.h>
5487+#endif
5488+
5489 #define ATOMIC_INIT(i) { (i) }
5490
5491 /*
5492@@ -759,6 +763,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
5493 */
5494 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
5495
5496+#define atomic64_read_unchecked(v) atomic64_read(v)
5497+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5498+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5499+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5500+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5501+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5502+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5503+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5504+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5505+
5506 #endif /* CONFIG_64BIT */
5507
5508 /*
5509diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
5510index b4db69f..8f3b093 100644
5511--- a/arch/mips/include/asm/cache.h
5512+++ b/arch/mips/include/asm/cache.h
5513@@ -9,10 +9,11 @@
5514 #ifndef _ASM_CACHE_H
5515 #define _ASM_CACHE_H
5516
5517+#include <linux/const.h>
5518 #include <kmalloc.h>
5519
5520 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
5521-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5522+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5523
5524 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
5525 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5526diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
5527index 455c0ac..ad65fbe 100644
5528--- a/arch/mips/include/asm/elf.h
5529+++ b/arch/mips/include/asm/elf.h
5530@@ -372,13 +372,16 @@ extern const char *__elf_platform;
5531 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
5532 #endif
5533
5534+#ifdef CONFIG_PAX_ASLR
5535+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
5536+
5537+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5538+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5539+#endif
5540+
5541 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
5542 struct linux_binprm;
5543 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
5544 int uses_interp);
5545
5546-struct mm_struct;
5547-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
5548-#define arch_randomize_brk arch_randomize_brk
5549-
5550 #endif /* _ASM_ELF_H */
5551diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
5552index c1f6afa..38cc6e9 100644
5553--- a/arch/mips/include/asm/exec.h
5554+++ b/arch/mips/include/asm/exec.h
5555@@ -12,6 +12,6 @@
5556 #ifndef _ASM_EXEC_H
5557 #define _ASM_EXEC_H
5558
5559-extern unsigned long arch_align_stack(unsigned long sp);
5560+#define arch_align_stack(x) ((x) & ~0xfUL)
5561
5562 #endif /* _ASM_EXEC_H */
5563diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
5564index dbaec94..6a14935 100644
5565--- a/arch/mips/include/asm/page.h
5566+++ b/arch/mips/include/asm/page.h
5567@@ -96,7 +96,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
5568 #ifdef CONFIG_CPU_MIPS32
5569 typedef struct { unsigned long pte_low, pte_high; } pte_t;
5570 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
5571- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
5572+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
5573 #else
5574 typedef struct { unsigned long long pte; } pte_t;
5575 #define pte_val(x) ((x).pte)
5576diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
5577index 881d18b..cea38bc 100644
5578--- a/arch/mips/include/asm/pgalloc.h
5579+++ b/arch/mips/include/asm/pgalloc.h
5580@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
5581 {
5582 set_pud(pud, __pud((unsigned long)pmd));
5583 }
5584+
5585+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
5586+{
5587+ pud_populate(mm, pud, pmd);
5588+}
5589 #endif
5590
5591 /*
5592diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
5593index b2050b9..d71bb1b 100644
5594--- a/arch/mips/include/asm/thread_info.h
5595+++ b/arch/mips/include/asm/thread_info.h
5596@@ -111,6 +111,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
5597 #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
5598 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
5599 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
5600+/* li takes a 32bit immediate */
5601+#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
5602 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
5603
5604 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
5605@@ -126,15 +128,18 @@ register struct thread_info *__current_thread_info __asm__("$28");
5606 #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
5607 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
5608 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
5609+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
5610+
5611+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
5612
5613 /* work to do in syscall_trace_leave() */
5614-#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
5615+#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
5616
5617 /* work to do on interrupt/exception return */
5618 #define _TIF_WORK_MASK \
5619 (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME)
5620 /* work to do on any return to u-space */
5621-#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT)
5622+#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT | _TIF_GRSEC_SETXID)
5623
5624 #endif /* __KERNEL__ */
5625
5626diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
5627index 9fdd8bc..4bd7f1a 100644
5628--- a/arch/mips/kernel/binfmt_elfn32.c
5629+++ b/arch/mips/kernel/binfmt_elfn32.c
5630@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
5631 #undef ELF_ET_DYN_BASE
5632 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
5633
5634+#ifdef CONFIG_PAX_ASLR
5635+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
5636+
5637+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5638+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5639+#endif
5640+
5641 #include <asm/processor.h>
5642 #include <linux/module.h>
5643 #include <linux/elfcore.h>
5644diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
5645index ff44823..97f8906 100644
5646--- a/arch/mips/kernel/binfmt_elfo32.c
5647+++ b/arch/mips/kernel/binfmt_elfo32.c
5648@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
5649 #undef ELF_ET_DYN_BASE
5650 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
5651
5652+#ifdef CONFIG_PAX_ASLR
5653+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
5654+
5655+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5656+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5657+#endif
5658+
5659 #include <asm/processor.h>
5660
5661 /*
5662diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
5663index a11c6f9..be5e164 100644
5664--- a/arch/mips/kernel/process.c
5665+++ b/arch/mips/kernel/process.c
5666@@ -460,15 +460,3 @@ unsigned long get_wchan(struct task_struct *task)
5667 out:
5668 return pc;
5669 }
5670-
5671-/*
5672- * Don't forget that the stack pointer must be aligned on a 8 bytes
5673- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
5674- */
5675-unsigned long arch_align_stack(unsigned long sp)
5676-{
5677- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5678- sp -= get_random_int() & ~PAGE_MASK;
5679-
5680- return sp & ALMASK;
5681-}
5682diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
5683index 4812c6d..2069554 100644
5684--- a/arch/mips/kernel/ptrace.c
5685+++ b/arch/mips/kernel/ptrace.c
5686@@ -528,6 +528,10 @@ static inline int audit_arch(void)
5687 return arch;
5688 }
5689
5690+#ifdef CONFIG_GRKERNSEC_SETXID
5691+extern void gr_delayed_cred_worker(void);
5692+#endif
5693+
5694 /*
5695 * Notification of system call entry/exit
5696 * - triggered by current->work.syscall_trace
5697@@ -537,6 +541,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
5698 /* do the secure computing check first */
5699 secure_computing_strict(regs->regs[2]);
5700
5701+#ifdef CONFIG_GRKERNSEC_SETXID
5702+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5703+ gr_delayed_cred_worker();
5704+#endif
5705+
5706 if (!(current->ptrace & PT_PTRACED))
5707 goto out;
5708
5709diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
5710index d20a4bc..7096ae5 100644
5711--- a/arch/mips/kernel/scall32-o32.S
5712+++ b/arch/mips/kernel/scall32-o32.S
5713@@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
5714
5715 stack_done:
5716 lw t0, TI_FLAGS($28) # syscall tracing enabled?
5717- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5718+ li t1, _TIF_SYSCALL_WORK
5719 and t0, t1
5720 bnez t0, syscall_trace_entry # -> yes
5721
5722diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
5723index b64f642..0fe6eab 100644
5724--- a/arch/mips/kernel/scall64-64.S
5725+++ b/arch/mips/kernel/scall64-64.S
5726@@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
5727
5728 sd a3, PT_R26(sp) # save a3 for syscall restarting
5729
5730- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5731+ li t1, _TIF_SYSCALL_WORK
5732 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
5733 and t0, t1, t0
5734 bnez t0, syscall_trace_entry
5735diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
5736index c29ac19..c592d05 100644
5737--- a/arch/mips/kernel/scall64-n32.S
5738+++ b/arch/mips/kernel/scall64-n32.S
5739@@ -47,7 +47,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
5740
5741 sd a3, PT_R26(sp) # save a3 for syscall restarting
5742
5743- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5744+ li t1, _TIF_SYSCALL_WORK
5745 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
5746 and t0, t1, t0
5747 bnez t0, n32_syscall_trace_entry
5748diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
5749index cf3e75e..72e93fe 100644
5750--- a/arch/mips/kernel/scall64-o32.S
5751+++ b/arch/mips/kernel/scall64-o32.S
5752@@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
5753 PTR 4b, bad_stack
5754 .previous
5755
5756- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5757+ li t1, _TIF_SYSCALL_WORK
5758 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
5759 and t0, t1, t0
5760 bnez t0, trace_a_syscall
5761diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
5762index ddcec1e..c7f983e 100644
5763--- a/arch/mips/mm/fault.c
5764+++ b/arch/mips/mm/fault.c
5765@@ -27,6 +27,23 @@
5766 #include <asm/highmem.h> /* For VMALLOC_END */
5767 #include <linux/kdebug.h>
5768
5769+#ifdef CONFIG_PAX_PAGEEXEC
5770+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5771+{
5772+ unsigned long i;
5773+
5774+ printk(KERN_ERR "PAX: bytes at PC: ");
5775+ for (i = 0; i < 5; i++) {
5776+ unsigned int c;
5777+ if (get_user(c, (unsigned int *)pc+i))
5778+ printk(KERN_CONT "???????? ");
5779+ else
5780+ printk(KERN_CONT "%08x ", c);
5781+ }
5782+ printk("\n");
5783+}
5784+#endif
5785+
5786 /*
5787 * This routine handles page faults. It determines the address,
5788 * and the problem, and then passes it off to one of the appropriate
5789diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
5790index 7e5fe27..479a219 100644
5791--- a/arch/mips/mm/mmap.c
5792+++ b/arch/mips/mm/mmap.c
5793@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
5794 struct vm_area_struct *vma;
5795 unsigned long addr = addr0;
5796 int do_color_align;
5797+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5798 struct vm_unmapped_area_info info;
5799
5800 if (unlikely(len > TASK_SIZE))
5801@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
5802 do_color_align = 1;
5803
5804 /* requesting a specific address */
5805+
5806+#ifdef CONFIG_PAX_RANDMMAP
5807+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
5808+#endif
5809+
5810 if (addr) {
5811 if (do_color_align)
5812 addr = COLOUR_ALIGN(addr, pgoff);
5813@@ -91,8 +97,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
5814 addr = PAGE_ALIGN(addr);
5815
5816 vma = find_vma(mm, addr);
5817- if (TASK_SIZE - len >= addr &&
5818- (!vma || addr + len <= vma->vm_start))
5819+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len, offset))
5820 return addr;
5821 }
5822
5823@@ -146,6 +151,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5824 {
5825 unsigned long random_factor = 0UL;
5826
5827+#ifdef CONFIG_PAX_RANDMMAP
5828+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5829+#endif
5830+
5831 if (current->flags & PF_RANDOMIZE) {
5832 random_factor = get_random_int();
5833 random_factor = random_factor << PAGE_SHIFT;
5834@@ -157,42 +166,27 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5835
5836 if (mmap_is_legacy()) {
5837 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5838+
5839+#ifdef CONFIG_PAX_RANDMMAP
5840+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5841+ mm->mmap_base += mm->delta_mmap;
5842+#endif
5843+
5844 mm->get_unmapped_area = arch_get_unmapped_area;
5845 mm->unmap_area = arch_unmap_area;
5846 } else {
5847 mm->mmap_base = mmap_base(random_factor);
5848+
5849+#ifdef CONFIG_PAX_RANDMMAP
5850+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5851+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5852+#endif
5853+
5854 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5855 mm->unmap_area = arch_unmap_area_topdown;
5856 }
5857 }
5858
5859-static inline unsigned long brk_rnd(void)
5860-{
5861- unsigned long rnd = get_random_int();
5862-
5863- rnd = rnd << PAGE_SHIFT;
5864- /* 8MB for 32bit, 256MB for 64bit */
5865- if (TASK_IS_32BIT_ADDR)
5866- rnd = rnd & 0x7ffffful;
5867- else
5868- rnd = rnd & 0xffffffful;
5869-
5870- return rnd;
5871-}
5872-
5873-unsigned long arch_randomize_brk(struct mm_struct *mm)
5874-{
5875- unsigned long base = mm->brk;
5876- unsigned long ret;
5877-
5878- ret = PAGE_ALIGN(base + brk_rnd());
5879-
5880- if (ret < mm->brk)
5881- return mm->brk;
5882-
5883- return ret;
5884-}
5885-
5886 int __virt_addr_valid(const volatile void *kaddr)
5887 {
5888 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
5889diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
5890index 967d144..db12197 100644
5891--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
5892+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
5893@@ -11,12 +11,14 @@
5894 #ifndef _ASM_PROC_CACHE_H
5895 #define _ASM_PROC_CACHE_H
5896
5897+#include <linux/const.h>
5898+
5899 /* L1 cache */
5900
5901 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
5902 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
5903-#define L1_CACHE_BYTES 16 /* bytes per entry */
5904 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
5905+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
5906 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
5907
5908 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
5909diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
5910index bcb5df2..84fabd2 100644
5911--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
5912+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
5913@@ -16,13 +16,15 @@
5914 #ifndef _ASM_PROC_CACHE_H
5915 #define _ASM_PROC_CACHE_H
5916
5917+#include <linux/const.h>
5918+
5919 /*
5920 * L1 cache
5921 */
5922 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
5923 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
5924-#define L1_CACHE_BYTES 32 /* bytes per entry */
5925 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
5926+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
5927 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
5928
5929 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
5930diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
5931index 4ce7a01..449202a 100644
5932--- a/arch/openrisc/include/asm/cache.h
5933+++ b/arch/openrisc/include/asm/cache.h
5934@@ -19,11 +19,13 @@
5935 #ifndef __ASM_OPENRISC_CACHE_H
5936 #define __ASM_OPENRISC_CACHE_H
5937
5938+#include <linux/const.h>
5939+
5940 /* FIXME: How can we replace these with values from the CPU...
5941 * they shouldn't be hard-coded!
5942 */
5943
5944-#define L1_CACHE_BYTES 16
5945 #define L1_CACHE_SHIFT 4
5946+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5947
5948 #endif /* __ASM_OPENRISC_CACHE_H */
5949diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
5950index af9cf30..2aae9b2 100644
5951--- a/arch/parisc/include/asm/atomic.h
5952+++ b/arch/parisc/include/asm/atomic.h
5953@@ -229,6 +229,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
5954
5955 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
5956
5957+#define atomic64_read_unchecked(v) atomic64_read(v)
5958+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5959+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5960+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5961+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5962+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5963+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5964+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5965+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5966+
5967 #endif /* !CONFIG_64BIT */
5968
5969
5970diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
5971index 47f11c7..3420df2 100644
5972--- a/arch/parisc/include/asm/cache.h
5973+++ b/arch/parisc/include/asm/cache.h
5974@@ -5,6 +5,7 @@
5975 #ifndef __ARCH_PARISC_CACHE_H
5976 #define __ARCH_PARISC_CACHE_H
5977
5978+#include <linux/const.h>
5979
5980 /*
5981 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
5982@@ -15,13 +16,13 @@
5983 * just ruin performance.
5984 */
5985 #ifdef CONFIG_PA20
5986-#define L1_CACHE_BYTES 64
5987 #define L1_CACHE_SHIFT 6
5988 #else
5989-#define L1_CACHE_BYTES 32
5990 #define L1_CACHE_SHIFT 5
5991 #endif
5992
5993+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5994+
5995 #ifndef __ASSEMBLY__
5996
5997 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5998diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
5999index 19f6cb1..6c78cf2 100644
6000--- a/arch/parisc/include/asm/elf.h
6001+++ b/arch/parisc/include/asm/elf.h
6002@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
6003
6004 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
6005
6006+#ifdef CONFIG_PAX_ASLR
6007+#define PAX_ELF_ET_DYN_BASE 0x10000UL
6008+
6009+#define PAX_DELTA_MMAP_LEN 16
6010+#define PAX_DELTA_STACK_LEN 16
6011+#endif
6012+
6013 /* This yields a mask that user programs can use to figure out what
6014 instruction set this CPU supports. This could be done in user space,
6015 but it's not easy, and we've already done it here. */
6016diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
6017index fc987a1..6e068ef 100644
6018--- a/arch/parisc/include/asm/pgalloc.h
6019+++ b/arch/parisc/include/asm/pgalloc.h
6020@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
6021 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
6022 }
6023
6024+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
6025+{
6026+ pgd_populate(mm, pgd, pmd);
6027+}
6028+
6029 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
6030 {
6031 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
6032@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
6033 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
6034 #define pmd_free(mm, x) do { } while (0)
6035 #define pgd_populate(mm, pmd, pte) BUG()
6036+#define pgd_populate_kernel(mm, pmd, pte) BUG()
6037
6038 #endif
6039
6040diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
6041index 7df49fa..38b62bf 100644
6042--- a/arch/parisc/include/asm/pgtable.h
6043+++ b/arch/parisc/include/asm/pgtable.h
6044@@ -218,6 +218,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
6045 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
6046 #define PAGE_COPY PAGE_EXECREAD
6047 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
6048+
6049+#ifdef CONFIG_PAX_PAGEEXEC
6050+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
6051+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
6052+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
6053+#else
6054+# define PAGE_SHARED_NOEXEC PAGE_SHARED
6055+# define PAGE_COPY_NOEXEC PAGE_COPY
6056+# define PAGE_READONLY_NOEXEC PAGE_READONLY
6057+#endif
6058+
6059 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
6060 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
6061 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
6062diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
6063index 4ba2c93..f5e3974 100644
6064--- a/arch/parisc/include/asm/uaccess.h
6065+++ b/arch/parisc/include/asm/uaccess.h
6066@@ -251,10 +251,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
6067 const void __user *from,
6068 unsigned long n)
6069 {
6070- int sz = __compiletime_object_size(to);
6071+ size_t sz = __compiletime_object_size(to);
6072 int ret = -EFAULT;
6073
6074- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
6075+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
6076 ret = __copy_from_user(to, from, n);
6077 else
6078 copy_from_user_overflow();
6079diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
6080index 2a625fb..9908930 100644
6081--- a/arch/parisc/kernel/module.c
6082+++ b/arch/parisc/kernel/module.c
6083@@ -98,16 +98,38 @@
6084
6085 /* three functions to determine where in the module core
6086 * or init pieces the location is */
6087+static inline int in_init_rx(struct module *me, void *loc)
6088+{
6089+ return (loc >= me->module_init_rx &&
6090+ loc < (me->module_init_rx + me->init_size_rx));
6091+}
6092+
6093+static inline int in_init_rw(struct module *me, void *loc)
6094+{
6095+ return (loc >= me->module_init_rw &&
6096+ loc < (me->module_init_rw + me->init_size_rw));
6097+}
6098+
6099 static inline int in_init(struct module *me, void *loc)
6100 {
6101- return (loc >= me->module_init &&
6102- loc <= (me->module_init + me->init_size));
6103+ return in_init_rx(me, loc) || in_init_rw(me, loc);
6104+}
6105+
6106+static inline int in_core_rx(struct module *me, void *loc)
6107+{
6108+ return (loc >= me->module_core_rx &&
6109+ loc < (me->module_core_rx + me->core_size_rx));
6110+}
6111+
6112+static inline int in_core_rw(struct module *me, void *loc)
6113+{
6114+ return (loc >= me->module_core_rw &&
6115+ loc < (me->module_core_rw + me->core_size_rw));
6116 }
6117
6118 static inline int in_core(struct module *me, void *loc)
6119 {
6120- return (loc >= me->module_core &&
6121- loc <= (me->module_core + me->core_size));
6122+ return in_core_rx(me, loc) || in_core_rw(me, loc);
6123 }
6124
6125 static inline int in_local(struct module *me, void *loc)
6126@@ -371,13 +393,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
6127 }
6128
6129 /* align things a bit */
6130- me->core_size = ALIGN(me->core_size, 16);
6131- me->arch.got_offset = me->core_size;
6132- me->core_size += gots * sizeof(struct got_entry);
6133+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
6134+ me->arch.got_offset = me->core_size_rw;
6135+ me->core_size_rw += gots * sizeof(struct got_entry);
6136
6137- me->core_size = ALIGN(me->core_size, 16);
6138- me->arch.fdesc_offset = me->core_size;
6139- me->core_size += fdescs * sizeof(Elf_Fdesc);
6140+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
6141+ me->arch.fdesc_offset = me->core_size_rw;
6142+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
6143
6144 me->arch.got_max = gots;
6145 me->arch.fdesc_max = fdescs;
6146@@ -395,7 +417,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
6147
6148 BUG_ON(value == 0);
6149
6150- got = me->module_core + me->arch.got_offset;
6151+ got = me->module_core_rw + me->arch.got_offset;
6152 for (i = 0; got[i].addr; i++)
6153 if (got[i].addr == value)
6154 goto out;
6155@@ -413,7 +435,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
6156 #ifdef CONFIG_64BIT
6157 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
6158 {
6159- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
6160+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
6161
6162 if (!value) {
6163 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
6164@@ -431,7 +453,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
6165
6166 /* Create new one */
6167 fdesc->addr = value;
6168- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
6169+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
6170 return (Elf_Addr)fdesc;
6171 }
6172 #endif /* CONFIG_64BIT */
6173@@ -843,7 +865,7 @@ register_unwind_table(struct module *me,
6174
6175 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
6176 end = table + sechdrs[me->arch.unwind_section].sh_size;
6177- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
6178+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
6179
6180 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
6181 me->arch.unwind_section, table, end, gp);
6182diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
6183index f76c108..92bad82 100644
6184--- a/arch/parisc/kernel/sys_parisc.c
6185+++ b/arch/parisc/kernel/sys_parisc.c
6186@@ -33,9 +33,11 @@
6187 #include <linux/utsname.h>
6188 #include <linux/personality.h>
6189
6190-static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
6191+static unsigned long get_unshared_area(struct file *filp, unsigned long addr, unsigned long len,
6192+ unsigned long flags)
6193 {
6194 struct vm_area_struct *vma;
6195+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
6196
6197 addr = PAGE_ALIGN(addr);
6198
6199@@ -43,7 +45,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
6200 /* At this point: (!vma || addr < vma->vm_end). */
6201 if (TASK_SIZE - len < addr)
6202 return -ENOMEM;
6203- if (!vma || addr + len <= vma->vm_start)
6204+ if (check_heap_stack_gap(vma, addr, len, offset))
6205 return addr;
6206 addr = vma->vm_end;
6207 }
6208@@ -67,11 +69,12 @@ static int get_offset(struct address_space *mapping)
6209 return offset & 0x3FF000;
6210 }
6211
6212-static unsigned long get_shared_area(struct address_space *mapping,
6213- unsigned long addr, unsigned long len, unsigned long pgoff)
6214+static unsigned long get_shared_area(struct file *filp, struct address_space *mapping,
6215+ unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
6216 {
6217 struct vm_area_struct *vma;
6218 int offset = mapping ? get_offset(mapping) : 0;
6219+ unsigned long rand_offset = gr_rand_threadstack_offset(current->mm, filp, flags);
6220
6221 offset = (offset + (pgoff << PAGE_SHIFT)) & 0x3FF000;
6222
6223@@ -81,7 +84,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
6224 /* At this point: (!vma || addr < vma->vm_end). */
6225 if (TASK_SIZE - len < addr)
6226 return -ENOMEM;
6227- if (!vma || addr + len <= vma->vm_start)
6228+ if (check_heap_stack_gap(vma, addr, len, rand_offset))
6229 return addr;
6230 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
6231 if (addr < vma->vm_end) /* handle wraparound */
6232@@ -100,14 +103,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
6233 if (flags & MAP_FIXED)
6234 return addr;
6235 if (!addr)
6236- addr = TASK_UNMAPPED_BASE;
6237+ addr = current->mm->mmap_base;
6238
6239 if (filp) {
6240- addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
6241+ addr = get_shared_area(filp, filp->f_mapping, addr, len, pgoff, flags);
6242 } else if(flags & MAP_SHARED) {
6243- addr = get_shared_area(NULL, addr, len, pgoff);
6244+ addr = get_shared_area(filp, NULL, addr, len, pgoff, flags);
6245 } else {
6246- addr = get_unshared_area(addr, len);
6247+ addr = get_unshared_area(filp, addr, len, flags);
6248 }
6249 return addr;
6250 }
6251diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
6252index 45ba99f..8e22c33 100644
6253--- a/arch/parisc/kernel/traps.c
6254+++ b/arch/parisc/kernel/traps.c
6255@@ -732,9 +732,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
6256
6257 down_read(&current->mm->mmap_sem);
6258 vma = find_vma(current->mm,regs->iaoq[0]);
6259- if (vma && (regs->iaoq[0] >= vma->vm_start)
6260- && (vma->vm_flags & VM_EXEC)) {
6261-
6262+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
6263 fault_address = regs->iaoq[0];
6264 fault_space = regs->iasq[0];
6265
6266diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
6267index 18162ce..94de376 100644
6268--- a/arch/parisc/mm/fault.c
6269+++ b/arch/parisc/mm/fault.c
6270@@ -15,6 +15,7 @@
6271 #include <linux/sched.h>
6272 #include <linux/interrupt.h>
6273 #include <linux/module.h>
6274+#include <linux/unistd.h>
6275
6276 #include <asm/uaccess.h>
6277 #include <asm/traps.h>
6278@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
6279 static unsigned long
6280 parisc_acctyp(unsigned long code, unsigned int inst)
6281 {
6282- if (code == 6 || code == 16)
6283+ if (code == 6 || code == 7 || code == 16)
6284 return VM_EXEC;
6285
6286 switch (inst & 0xf0000000) {
6287@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
6288 }
6289 #endif
6290
6291+#ifdef CONFIG_PAX_PAGEEXEC
6292+/*
6293+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
6294+ *
6295+ * returns 1 when task should be killed
6296+ * 2 when rt_sigreturn trampoline was detected
6297+ * 3 when unpatched PLT trampoline was detected
6298+ */
6299+static int pax_handle_fetch_fault(struct pt_regs *regs)
6300+{
6301+
6302+#ifdef CONFIG_PAX_EMUPLT
6303+ int err;
6304+
6305+ do { /* PaX: unpatched PLT emulation */
6306+ unsigned int bl, depwi;
6307+
6308+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
6309+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
6310+
6311+ if (err)
6312+ break;
6313+
6314+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
6315+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
6316+
6317+ err = get_user(ldw, (unsigned int *)addr);
6318+ err |= get_user(bv, (unsigned int *)(addr+4));
6319+ err |= get_user(ldw2, (unsigned int *)(addr+8));
6320+
6321+ if (err)
6322+ break;
6323+
6324+ if (ldw == 0x0E801096U &&
6325+ bv == 0xEAC0C000U &&
6326+ ldw2 == 0x0E881095U)
6327+ {
6328+ unsigned int resolver, map;
6329+
6330+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
6331+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
6332+ if (err)
6333+ break;
6334+
6335+ regs->gr[20] = instruction_pointer(regs)+8;
6336+ regs->gr[21] = map;
6337+ regs->gr[22] = resolver;
6338+ regs->iaoq[0] = resolver | 3UL;
6339+ regs->iaoq[1] = regs->iaoq[0] + 4;
6340+ return 3;
6341+ }
6342+ }
6343+ } while (0);
6344+#endif
6345+
6346+#ifdef CONFIG_PAX_EMUTRAMP
6347+
6348+#ifndef CONFIG_PAX_EMUSIGRT
6349+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
6350+ return 1;
6351+#endif
6352+
6353+ do { /* PaX: rt_sigreturn emulation */
6354+ unsigned int ldi1, ldi2, bel, nop;
6355+
6356+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
6357+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
6358+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
6359+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
6360+
6361+ if (err)
6362+ break;
6363+
6364+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
6365+ ldi2 == 0x3414015AU &&
6366+ bel == 0xE4008200U &&
6367+ nop == 0x08000240U)
6368+ {
6369+ regs->gr[25] = (ldi1 & 2) >> 1;
6370+ regs->gr[20] = __NR_rt_sigreturn;
6371+ regs->gr[31] = regs->iaoq[1] + 16;
6372+ regs->sr[0] = regs->iasq[1];
6373+ regs->iaoq[0] = 0x100UL;
6374+ regs->iaoq[1] = regs->iaoq[0] + 4;
6375+ regs->iasq[0] = regs->sr[2];
6376+ regs->iasq[1] = regs->sr[2];
6377+ return 2;
6378+ }
6379+ } while (0);
6380+#endif
6381+
6382+ return 1;
6383+}
6384+
6385+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6386+{
6387+ unsigned long i;
6388+
6389+ printk(KERN_ERR "PAX: bytes at PC: ");
6390+ for (i = 0; i < 5; i++) {
6391+ unsigned int c;
6392+ if (get_user(c, (unsigned int *)pc+i))
6393+ printk(KERN_CONT "???????? ");
6394+ else
6395+ printk(KERN_CONT "%08x ", c);
6396+ }
6397+ printk("\n");
6398+}
6399+#endif
6400+
6401 int fixup_exception(struct pt_regs *regs)
6402 {
6403 const struct exception_table_entry *fix;
6404@@ -192,8 +303,33 @@ good_area:
6405
6406 acc_type = parisc_acctyp(code,regs->iir);
6407
6408- if ((vma->vm_flags & acc_type) != acc_type)
6409+ if ((vma->vm_flags & acc_type) != acc_type) {
6410+
6411+#ifdef CONFIG_PAX_PAGEEXEC
6412+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
6413+ (address & ~3UL) == instruction_pointer(regs))
6414+ {
6415+ up_read(&mm->mmap_sem);
6416+ switch (pax_handle_fetch_fault(regs)) {
6417+
6418+#ifdef CONFIG_PAX_EMUPLT
6419+ case 3:
6420+ return;
6421+#endif
6422+
6423+#ifdef CONFIG_PAX_EMUTRAMP
6424+ case 2:
6425+ return;
6426+#endif
6427+
6428+ }
6429+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
6430+ do_group_exit(SIGKILL);
6431+ }
6432+#endif
6433+
6434 goto bad_area;
6435+ }
6436
6437 /*
6438 * If for any reason at all we couldn't handle the fault, make
6439diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
6440index e3b1d41..8e81edf 100644
6441--- a/arch/powerpc/include/asm/atomic.h
6442+++ b/arch/powerpc/include/asm/atomic.h
6443@@ -523,6 +523,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
6444 return t1;
6445 }
6446
6447+#define atomic64_read_unchecked(v) atomic64_read(v)
6448+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
6449+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
6450+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
6451+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
6452+#define atomic64_inc_unchecked(v) atomic64_inc(v)
6453+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
6454+#define atomic64_dec_unchecked(v) atomic64_dec(v)
6455+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
6456+
6457 #endif /* __powerpc64__ */
6458
6459 #endif /* __KERNEL__ */
6460diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
6461index 9e495c9..b6878e5 100644
6462--- a/arch/powerpc/include/asm/cache.h
6463+++ b/arch/powerpc/include/asm/cache.h
6464@@ -3,6 +3,7 @@
6465
6466 #ifdef __KERNEL__
6467
6468+#include <linux/const.h>
6469
6470 /* bytes per L1 cache line */
6471 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
6472@@ -22,7 +23,7 @@
6473 #define L1_CACHE_SHIFT 7
6474 #endif
6475
6476-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6477+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6478
6479 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6480
6481diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
6482index 6abf0a1..459d0f1 100644
6483--- a/arch/powerpc/include/asm/elf.h
6484+++ b/arch/powerpc/include/asm/elf.h
6485@@ -28,8 +28,19 @@
6486 the loader. We need to make sure that it is out of the way of the program
6487 that it will "exec", and that there is sufficient room for the brk. */
6488
6489-extern unsigned long randomize_et_dyn(unsigned long base);
6490-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
6491+#define ELF_ET_DYN_BASE (0x20000000)
6492+
6493+#ifdef CONFIG_PAX_ASLR
6494+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
6495+
6496+#ifdef __powerpc64__
6497+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
6498+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
6499+#else
6500+#define PAX_DELTA_MMAP_LEN 15
6501+#define PAX_DELTA_STACK_LEN 15
6502+#endif
6503+#endif
6504
6505 /*
6506 * Our registers are always unsigned longs, whether we're a 32 bit
6507@@ -124,10 +135,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6508 (0x7ff >> (PAGE_SHIFT - 12)) : \
6509 (0x3ffff >> (PAGE_SHIFT - 12)))
6510
6511-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
6512-#define arch_randomize_brk arch_randomize_brk
6513-
6514-
6515 #ifdef CONFIG_SPU_BASE
6516 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
6517 #define NT_SPU 1
6518diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
6519index 8196e9c..d83a9f3 100644
6520--- a/arch/powerpc/include/asm/exec.h
6521+++ b/arch/powerpc/include/asm/exec.h
6522@@ -4,6 +4,6 @@
6523 #ifndef _ASM_POWERPC_EXEC_H
6524 #define _ASM_POWERPC_EXEC_H
6525
6526-extern unsigned long arch_align_stack(unsigned long sp);
6527+#define arch_align_stack(x) ((x) & ~0xfUL)
6528
6529 #endif /* _ASM_POWERPC_EXEC_H */
6530diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
6531index 5acabbd..7ea14fa 100644
6532--- a/arch/powerpc/include/asm/kmap_types.h
6533+++ b/arch/powerpc/include/asm/kmap_types.h
6534@@ -10,7 +10,7 @@
6535 * 2 of the License, or (at your option) any later version.
6536 */
6537
6538-#define KM_TYPE_NR 16
6539+#define KM_TYPE_NR 17
6540
6541 #endif /* __KERNEL__ */
6542 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
6543diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
6544index 8565c25..2865190 100644
6545--- a/arch/powerpc/include/asm/mman.h
6546+++ b/arch/powerpc/include/asm/mman.h
6547@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
6548 }
6549 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
6550
6551-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
6552+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
6553 {
6554 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
6555 }
6556diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
6557index f072e97..b436dee 100644
6558--- a/arch/powerpc/include/asm/page.h
6559+++ b/arch/powerpc/include/asm/page.h
6560@@ -220,8 +220,9 @@ extern long long virt_phys_offset;
6561 * and needs to be executable. This means the whole heap ends
6562 * up being executable.
6563 */
6564-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
6565- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6566+#define VM_DATA_DEFAULT_FLAGS32 \
6567+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
6568+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6569
6570 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
6571 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6572@@ -249,6 +250,9 @@ extern long long virt_phys_offset;
6573 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
6574 #endif
6575
6576+#define ktla_ktva(addr) (addr)
6577+#define ktva_ktla(addr) (addr)
6578+
6579 /*
6580 * Use the top bit of the higher-level page table entries to indicate whether
6581 * the entries we point to contain hugepages. This works because we know that
6582diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
6583index cd915d6..c10cee8 100644
6584--- a/arch/powerpc/include/asm/page_64.h
6585+++ b/arch/powerpc/include/asm/page_64.h
6586@@ -154,15 +154,18 @@ do { \
6587 * stack by default, so in the absence of a PT_GNU_STACK program header
6588 * we turn execute permission off.
6589 */
6590-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
6591- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6592+#define VM_STACK_DEFAULT_FLAGS32 \
6593+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
6594+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6595
6596 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
6597 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6598
6599+#ifndef CONFIG_PAX_PAGEEXEC
6600 #define VM_STACK_DEFAULT_FLAGS \
6601 (is_32bit_task() ? \
6602 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
6603+#endif
6604
6605 #include <asm-generic/getorder.h>
6606
6607diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
6608index 292725c..f87ae14 100644
6609--- a/arch/powerpc/include/asm/pgalloc-64.h
6610+++ b/arch/powerpc/include/asm/pgalloc-64.h
6611@@ -50,6 +50,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
6612 #ifndef CONFIG_PPC_64K_PAGES
6613
6614 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
6615+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
6616
6617 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
6618 {
6619@@ -67,6 +68,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6620 pud_set(pud, (unsigned long)pmd);
6621 }
6622
6623+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6624+{
6625+ pud_populate(mm, pud, pmd);
6626+}
6627+
6628 #define pmd_populate(mm, pmd, pte_page) \
6629 pmd_populate_kernel(mm, pmd, page_address(pte_page))
6630 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
6631@@ -76,6 +82,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6632 #else /* CONFIG_PPC_64K_PAGES */
6633
6634 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
6635+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
6636
6637 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
6638 pte_t *pte)
6639diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
6640index a9cbd3b..3b67efa 100644
6641--- a/arch/powerpc/include/asm/pgtable.h
6642+++ b/arch/powerpc/include/asm/pgtable.h
6643@@ -2,6 +2,7 @@
6644 #define _ASM_POWERPC_PGTABLE_H
6645 #ifdef __KERNEL__
6646
6647+#include <linux/const.h>
6648 #ifndef __ASSEMBLY__
6649 #include <asm/processor.h> /* For TASK_SIZE */
6650 #include <asm/mmu.h>
6651diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
6652index 4aad413..85d86bf 100644
6653--- a/arch/powerpc/include/asm/pte-hash32.h
6654+++ b/arch/powerpc/include/asm/pte-hash32.h
6655@@ -21,6 +21,7 @@
6656 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
6657 #define _PAGE_USER 0x004 /* usermode access allowed */
6658 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
6659+#define _PAGE_EXEC _PAGE_GUARDED
6660 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
6661 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
6662 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
6663diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
6664index 3d5c9dc..62f8414 100644
6665--- a/arch/powerpc/include/asm/reg.h
6666+++ b/arch/powerpc/include/asm/reg.h
6667@@ -215,6 +215,7 @@
6668 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
6669 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
6670 #define DSISR_NOHPTE 0x40000000 /* no translation found */
6671+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
6672 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
6673 #define DSISR_ISSTORE 0x02000000 /* access was a store */
6674 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
6675diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
6676index 406b7b9..af63426 100644
6677--- a/arch/powerpc/include/asm/thread_info.h
6678+++ b/arch/powerpc/include/asm/thread_info.h
6679@@ -97,7 +97,6 @@ static inline struct thread_info *current_thread_info(void)
6680 #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
6681 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
6682 #define TIF_SINGLESTEP 8 /* singlestepping active */
6683-#define TIF_MEMDIE 9 /* is terminating due to OOM killer */
6684 #define TIF_SECCOMP 10 /* secure computing */
6685 #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
6686 #define TIF_NOERROR 12 /* Force successful syscall return */
6687@@ -106,6 +105,9 @@ static inline struct thread_info *current_thread_info(void)
6688 #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
6689 #define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
6690 for stack store? */
6691+#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
6692+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
6693+#define TIF_GRSEC_SETXID 9 /* update credentials on syscall entry/exit */
6694
6695 /* as above, but as bit values */
6696 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
6697@@ -124,8 +126,10 @@ static inline struct thread_info *current_thread_info(void)
6698 #define _TIF_UPROBE (1<<TIF_UPROBE)
6699 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6700 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
6701+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6702 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
6703- _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
6704+ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
6705+ _TIF_GRSEC_SETXID)
6706
6707 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
6708 _TIF_NOTIFY_RESUME | _TIF_UPROBE)
6709diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
6710index 4db4959..aba5c41 100644
6711--- a/arch/powerpc/include/asm/uaccess.h
6712+++ b/arch/powerpc/include/asm/uaccess.h
6713@@ -318,52 +318,6 @@ do { \
6714 extern unsigned long __copy_tofrom_user(void __user *to,
6715 const void __user *from, unsigned long size);
6716
6717-#ifndef __powerpc64__
6718-
6719-static inline unsigned long copy_from_user(void *to,
6720- const void __user *from, unsigned long n)
6721-{
6722- unsigned long over;
6723-
6724- if (access_ok(VERIFY_READ, from, n))
6725- return __copy_tofrom_user((__force void __user *)to, from, n);
6726- if ((unsigned long)from < TASK_SIZE) {
6727- over = (unsigned long)from + n - TASK_SIZE;
6728- return __copy_tofrom_user((__force void __user *)to, from,
6729- n - over) + over;
6730- }
6731- return n;
6732-}
6733-
6734-static inline unsigned long copy_to_user(void __user *to,
6735- const void *from, unsigned long n)
6736-{
6737- unsigned long over;
6738-
6739- if (access_ok(VERIFY_WRITE, to, n))
6740- return __copy_tofrom_user(to, (__force void __user *)from, n);
6741- if ((unsigned long)to < TASK_SIZE) {
6742- over = (unsigned long)to + n - TASK_SIZE;
6743- return __copy_tofrom_user(to, (__force void __user *)from,
6744- n - over) + over;
6745- }
6746- return n;
6747-}
6748-
6749-#else /* __powerpc64__ */
6750-
6751-#define __copy_in_user(to, from, size) \
6752- __copy_tofrom_user((to), (from), (size))
6753-
6754-extern unsigned long copy_from_user(void *to, const void __user *from,
6755- unsigned long n);
6756-extern unsigned long copy_to_user(void __user *to, const void *from,
6757- unsigned long n);
6758-extern unsigned long copy_in_user(void __user *to, const void __user *from,
6759- unsigned long n);
6760-
6761-#endif /* __powerpc64__ */
6762-
6763 static inline unsigned long __copy_from_user_inatomic(void *to,
6764 const void __user *from, unsigned long n)
6765 {
6766@@ -387,6 +341,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
6767 if (ret == 0)
6768 return 0;
6769 }
6770+
6771+ if (!__builtin_constant_p(n))
6772+ check_object_size(to, n, false);
6773+
6774 return __copy_tofrom_user((__force void __user *)to, from, n);
6775 }
6776
6777@@ -413,6 +371,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
6778 if (ret == 0)
6779 return 0;
6780 }
6781+
6782+ if (!__builtin_constant_p(n))
6783+ check_object_size(from, n, true);
6784+
6785 return __copy_tofrom_user(to, (__force const void __user *)from, n);
6786 }
6787
6788@@ -430,6 +392,92 @@ static inline unsigned long __copy_to_user(void __user *to,
6789 return __copy_to_user_inatomic(to, from, size);
6790 }
6791
6792+#ifndef __powerpc64__
6793+
6794+static inline unsigned long __must_check copy_from_user(void *to,
6795+ const void __user *from, unsigned long n)
6796+{
6797+ unsigned long over;
6798+
6799+ if ((long)n < 0)
6800+ return n;
6801+
6802+ if (access_ok(VERIFY_READ, from, n)) {
6803+ if (!__builtin_constant_p(n))
6804+ check_object_size(to, n, false);
6805+ return __copy_tofrom_user((__force void __user *)to, from, n);
6806+ }
6807+ if ((unsigned long)from < TASK_SIZE) {
6808+ over = (unsigned long)from + n - TASK_SIZE;
6809+ if (!__builtin_constant_p(n - over))
6810+ check_object_size(to, n - over, false);
6811+ return __copy_tofrom_user((__force void __user *)to, from,
6812+ n - over) + over;
6813+ }
6814+ return n;
6815+}
6816+
6817+static inline unsigned long __must_check copy_to_user(void __user *to,
6818+ const void *from, unsigned long n)
6819+{
6820+ unsigned long over;
6821+
6822+ if ((long)n < 0)
6823+ return n;
6824+
6825+ if (access_ok(VERIFY_WRITE, to, n)) {
6826+ if (!__builtin_constant_p(n))
6827+ check_object_size(from, n, true);
6828+ return __copy_tofrom_user(to, (__force void __user *)from, n);
6829+ }
6830+ if ((unsigned long)to < TASK_SIZE) {
6831+ over = (unsigned long)to + n - TASK_SIZE;
6832+ if (!__builtin_constant_p(n))
6833+ check_object_size(from, n - over, true);
6834+ return __copy_tofrom_user(to, (__force void __user *)from,
6835+ n - over) + over;
6836+ }
6837+ return n;
6838+}
6839+
6840+#else /* __powerpc64__ */
6841+
6842+#define __copy_in_user(to, from, size) \
6843+ __copy_tofrom_user((to), (from), (size))
6844+
6845+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
6846+{
6847+ if ((long)n < 0 || n > INT_MAX)
6848+ return n;
6849+
6850+ if (!__builtin_constant_p(n))
6851+ check_object_size(to, n, false);
6852+
6853+ if (likely(access_ok(VERIFY_READ, from, n)))
6854+ n = __copy_from_user(to, from, n);
6855+ else
6856+ memset(to, 0, n);
6857+ return n;
6858+}
6859+
6860+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
6861+{
6862+ if ((long)n < 0 || n > INT_MAX)
6863+ return n;
6864+
6865+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
6866+ if (!__builtin_constant_p(n))
6867+ check_object_size(from, n, true);
6868+ n = __copy_to_user(to, from, n);
6869+ }
6870+ return n;
6871+}
6872+
6873+extern unsigned long copy_in_user(void __user *to, const void __user *from,
6874+ unsigned long n);
6875+
6876+#endif /* __powerpc64__ */
6877+
6878 extern unsigned long __clear_user(void __user *addr, unsigned long size);
6879
6880 static inline unsigned long clear_user(void __user *addr, unsigned long size)
6881diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
6882index 4684e33..acc4d19e 100644
6883--- a/arch/powerpc/kernel/exceptions-64e.S
6884+++ b/arch/powerpc/kernel/exceptions-64e.S
6885@@ -715,6 +715,7 @@ storage_fault_common:
6886 std r14,_DAR(r1)
6887 std r15,_DSISR(r1)
6888 addi r3,r1,STACK_FRAME_OVERHEAD
6889+ bl .save_nvgprs
6890 mr r4,r14
6891 mr r5,r15
6892 ld r14,PACA_EXGEN+EX_R14(r13)
6893@@ -723,8 +724,7 @@ storage_fault_common:
6894 cmpdi r3,0
6895 bne- 1f
6896 b .ret_from_except_lite
6897-1: bl .save_nvgprs
6898- mr r5,r3
6899+1: mr r5,r3
6900 addi r3,r1,STACK_FRAME_OVERHEAD
6901 ld r4,_DAR(r1)
6902 bl .bad_page_fault
6903diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
6904index 3684cbd..bc89eab 100644
6905--- a/arch/powerpc/kernel/exceptions-64s.S
6906+++ b/arch/powerpc/kernel/exceptions-64s.S
6907@@ -1206,10 +1206,10 @@ handle_page_fault:
6908 11: ld r4,_DAR(r1)
6909 ld r5,_DSISR(r1)
6910 addi r3,r1,STACK_FRAME_OVERHEAD
6911+ bl .save_nvgprs
6912 bl .do_page_fault
6913 cmpdi r3,0
6914 beq+ 12f
6915- bl .save_nvgprs
6916 mr r5,r3
6917 addi r3,r1,STACK_FRAME_OVERHEAD
6918 lwz r4,_DAR(r1)
6919diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
6920index 2e3200c..72095ce 100644
6921--- a/arch/powerpc/kernel/module_32.c
6922+++ b/arch/powerpc/kernel/module_32.c
6923@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
6924 me->arch.core_plt_section = i;
6925 }
6926 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
6927- printk("Module doesn't contain .plt or .init.plt sections.\n");
6928+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
6929 return -ENOEXEC;
6930 }
6931
6932@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
6933
6934 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
6935 /* Init, or core PLT? */
6936- if (location >= mod->module_core
6937- && location < mod->module_core + mod->core_size)
6938+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
6939+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
6940 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
6941- else
6942+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
6943+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
6944 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
6945+ else {
6946+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
6947+ return ~0UL;
6948+ }
6949
6950 /* Find this entry, or if that fails, the next avail. entry */
6951 while (entry->jump[0]) {
6952diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
6953index 8143067..21ae55b 100644
6954--- a/arch/powerpc/kernel/process.c
6955+++ b/arch/powerpc/kernel/process.c
6956@@ -680,8 +680,8 @@ void show_regs(struct pt_regs * regs)
6957 * Lookup NIP late so we have the best change of getting the
6958 * above info out without failing
6959 */
6960- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
6961- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
6962+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
6963+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
6964 #endif
6965 show_stack(current, (unsigned long *) regs->gpr[1]);
6966 if (!user_mode(regs))
6967@@ -1129,10 +1129,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
6968 newsp = stack[0];
6969 ip = stack[STACK_FRAME_LR_SAVE];
6970 if (!firstframe || ip != lr) {
6971- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
6972+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
6973 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6974 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
6975- printk(" (%pS)",
6976+ printk(" (%pA)",
6977 (void *)current->ret_stack[curr_frame].ret);
6978 curr_frame--;
6979 }
6980@@ -1152,7 +1152,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
6981 struct pt_regs *regs = (struct pt_regs *)
6982 (sp + STACK_FRAME_OVERHEAD);
6983 lr = regs->link;
6984- printk("--- Exception: %lx at %pS\n LR = %pS\n",
6985+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
6986 regs->trap, (void *)regs->nip, (void *)lr);
6987 firstframe = 1;
6988 }
6989@@ -1194,58 +1194,3 @@ void __ppc64_runlatch_off(void)
6990 mtspr(SPRN_CTRLT, ctrl);
6991 }
6992 #endif /* CONFIG_PPC64 */
6993-
6994-unsigned long arch_align_stack(unsigned long sp)
6995-{
6996- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6997- sp -= get_random_int() & ~PAGE_MASK;
6998- return sp & ~0xf;
6999-}
7000-
7001-static inline unsigned long brk_rnd(void)
7002-{
7003- unsigned long rnd = 0;
7004-
7005- /* 8MB for 32bit, 1GB for 64bit */
7006- if (is_32bit_task())
7007- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
7008- else
7009- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
7010-
7011- return rnd << PAGE_SHIFT;
7012-}
7013-
7014-unsigned long arch_randomize_brk(struct mm_struct *mm)
7015-{
7016- unsigned long base = mm->brk;
7017- unsigned long ret;
7018-
7019-#ifdef CONFIG_PPC_STD_MMU_64
7020- /*
7021- * If we are using 1TB segments and we are allowed to randomise
7022- * the heap, we can put it above 1TB so it is backed by a 1TB
7023- * segment. Otherwise the heap will be in the bottom 1TB
7024- * which always uses 256MB segments and this may result in a
7025- * performance penalty.
7026- */
7027- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
7028- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
7029-#endif
7030-
7031- ret = PAGE_ALIGN(base + brk_rnd());
7032-
7033- if (ret < mm->brk)
7034- return mm->brk;
7035-
7036- return ret;
7037-}
7038-
7039-unsigned long randomize_et_dyn(unsigned long base)
7040-{
7041- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
7042-
7043- if (ret < base)
7044- return base;
7045-
7046- return ret;
7047-}
7048diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
7049index c497000..8fde506 100644
7050--- a/arch/powerpc/kernel/ptrace.c
7051+++ b/arch/powerpc/kernel/ptrace.c
7052@@ -1737,6 +1737,10 @@ long arch_ptrace(struct task_struct *child, long request,
7053 return ret;
7054 }
7055
7056+#ifdef CONFIG_GRKERNSEC_SETXID
7057+extern void gr_delayed_cred_worker(void);
7058+#endif
7059+
7060 /*
7061 * We must return the syscall number to actually look up in the table.
7062 * This can be -1L to skip running any syscall at all.
7063@@ -1747,6 +1751,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
7064
7065 secure_computing_strict(regs->gpr[0]);
7066
7067+#ifdef CONFIG_GRKERNSEC_SETXID
7068+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
7069+ gr_delayed_cred_worker();
7070+#endif
7071+
7072 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
7073 tracehook_report_syscall_entry(regs))
7074 /*
7075@@ -1781,6 +1790,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
7076 {
7077 int step;
7078
7079+#ifdef CONFIG_GRKERNSEC_SETXID
7080+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
7081+ gr_delayed_cred_worker();
7082+#endif
7083+
7084 audit_syscall_exit(regs);
7085
7086 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
7087diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
7088index 804e323..79181c1 100644
7089--- a/arch/powerpc/kernel/signal_32.c
7090+++ b/arch/powerpc/kernel/signal_32.c
7091@@ -851,7 +851,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
7092 /* Save user registers on the stack */
7093 frame = &rt_sf->uc.uc_mcontext;
7094 addr = frame;
7095- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
7096+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
7097 if (save_user_regs(regs, frame, 0, 1))
7098 goto badframe;
7099 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
7100diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
7101index 1ca045d..139c3f7 100644
7102--- a/arch/powerpc/kernel/signal_64.c
7103+++ b/arch/powerpc/kernel/signal_64.c
7104@@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
7105 current->thread.fpscr.val = 0;
7106
7107 /* Set up to return from userspace. */
7108- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
7109+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
7110 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
7111 } else {
7112 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
7113diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
7114index 3ce1f86..c30e629 100644
7115--- a/arch/powerpc/kernel/sysfs.c
7116+++ b/arch/powerpc/kernel/sysfs.c
7117@@ -522,7 +522,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
7118 return NOTIFY_OK;
7119 }
7120
7121-static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
7122+static struct notifier_block sysfs_cpu_nb = {
7123 .notifier_call = sysfs_cpu_notify,
7124 };
7125
7126diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
7127index 3251840..3f7c77a 100644
7128--- a/arch/powerpc/kernel/traps.c
7129+++ b/arch/powerpc/kernel/traps.c
7130@@ -133,6 +133,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
7131 return flags;
7132 }
7133
7134+extern void gr_handle_kernel_exploit(void);
7135+
7136 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
7137 int signr)
7138 {
7139@@ -182,6 +184,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
7140 panic("Fatal exception in interrupt");
7141 if (panic_on_oops)
7142 panic("Fatal exception");
7143+
7144+ gr_handle_kernel_exploit();
7145+
7146 do_exit(signr);
7147 }
7148
7149diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
7150index 1b2076f..835e4be 100644
7151--- a/arch/powerpc/kernel/vdso.c
7152+++ b/arch/powerpc/kernel/vdso.c
7153@@ -34,6 +34,7 @@
7154 #include <asm/firmware.h>
7155 #include <asm/vdso.h>
7156 #include <asm/vdso_datapage.h>
7157+#include <asm/mman.h>
7158
7159 #include "setup.h"
7160
7161@@ -218,7 +219,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
7162 vdso_base = VDSO32_MBASE;
7163 #endif
7164
7165- current->mm->context.vdso_base = 0;
7166+ current->mm->context.vdso_base = ~0UL;
7167
7168 /* vDSO has a problem and was disabled, just don't "enable" it for the
7169 * process
7170@@ -238,7 +239,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
7171 vdso_base = get_unmapped_area(NULL, vdso_base,
7172 (vdso_pages << PAGE_SHIFT) +
7173 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
7174- 0, 0);
7175+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
7176 if (IS_ERR_VALUE(vdso_base)) {
7177 rc = vdso_base;
7178 goto fail_mmapsem;
7179diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
7180index 5eea6f3..5d10396 100644
7181--- a/arch/powerpc/lib/usercopy_64.c
7182+++ b/arch/powerpc/lib/usercopy_64.c
7183@@ -9,22 +9,6 @@
7184 #include <linux/module.h>
7185 #include <asm/uaccess.h>
7186
7187-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
7188-{
7189- if (likely(access_ok(VERIFY_READ, from, n)))
7190- n = __copy_from_user(to, from, n);
7191- else
7192- memset(to, 0, n);
7193- return n;
7194-}
7195-
7196-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
7197-{
7198- if (likely(access_ok(VERIFY_WRITE, to, n)))
7199- n = __copy_to_user(to, from, n);
7200- return n;
7201-}
7202-
7203 unsigned long copy_in_user(void __user *to, const void __user *from,
7204 unsigned long n)
7205 {
7206@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
7207 return n;
7208 }
7209
7210-EXPORT_SYMBOL(copy_from_user);
7211-EXPORT_SYMBOL(copy_to_user);
7212 EXPORT_SYMBOL(copy_in_user);
7213
7214diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
7215index 3a8489a..6a63b3b 100644
7216--- a/arch/powerpc/mm/fault.c
7217+++ b/arch/powerpc/mm/fault.c
7218@@ -32,6 +32,10 @@
7219 #include <linux/perf_event.h>
7220 #include <linux/magic.h>
7221 #include <linux/ratelimit.h>
7222+#include <linux/slab.h>
7223+#include <linux/pagemap.h>
7224+#include <linux/compiler.h>
7225+#include <linux/unistd.h>
7226
7227 #include <asm/firmware.h>
7228 #include <asm/page.h>
7229@@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
7230 }
7231 #endif
7232
7233+#ifdef CONFIG_PAX_PAGEEXEC
7234+/*
7235+ * PaX: decide what to do with offenders (regs->nip = fault address)
7236+ *
7237+ * returns 1 when task should be killed
7238+ */
7239+static int pax_handle_fetch_fault(struct pt_regs *regs)
7240+{
7241+ return 1;
7242+}
7243+
7244+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7245+{
7246+ unsigned long i;
7247+
7248+ printk(KERN_ERR "PAX: bytes at PC: ");
7249+ for (i = 0; i < 5; i++) {
7250+ unsigned int c;
7251+ if (get_user(c, (unsigned int __user *)pc+i))
7252+ printk(KERN_CONT "???????? ");
7253+ else
7254+ printk(KERN_CONT "%08x ", c);
7255+ }
7256+ printk("\n");
7257+}
7258+#endif
7259+
7260 /*
7261 * Check whether the instruction at regs->nip is a store using
7262 * an update addressing form which will update r1.
7263@@ -213,7 +244,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
7264 * indicate errors in DSISR but can validly be set in SRR1.
7265 */
7266 if (trap == 0x400)
7267- error_code &= 0x48200000;
7268+ error_code &= 0x58200000;
7269 else
7270 is_write = error_code & DSISR_ISSTORE;
7271 #else
7272@@ -364,7 +395,7 @@ good_area:
7273 * "undefined". Of those that can be set, this is the only
7274 * one which seems bad.
7275 */
7276- if (error_code & 0x10000000)
7277+ if (error_code & DSISR_GUARDED)
7278 /* Guarded storage error. */
7279 goto bad_area;
7280 #endif /* CONFIG_8xx */
7281@@ -379,7 +410,7 @@ good_area:
7282 * processors use the same I/D cache coherency mechanism
7283 * as embedded.
7284 */
7285- if (error_code & DSISR_PROTFAULT)
7286+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
7287 goto bad_area;
7288 #endif /* CONFIG_PPC_STD_MMU */
7289
7290@@ -462,6 +493,23 @@ bad_area:
7291 bad_area_nosemaphore:
7292 /* User mode accesses cause a SIGSEGV */
7293 if (user_mode(regs)) {
7294+
7295+#ifdef CONFIG_PAX_PAGEEXEC
7296+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
7297+#ifdef CONFIG_PPC_STD_MMU
7298+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
7299+#else
7300+ if (is_exec && regs->nip == address) {
7301+#endif
7302+ switch (pax_handle_fetch_fault(regs)) {
7303+ }
7304+
7305+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
7306+ do_group_exit(SIGKILL);
7307+ }
7308+ }
7309+#endif
7310+
7311 _exception(SIGSEGV, regs, code, address);
7312 return 0;
7313 }
7314diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
7315index 67a42ed..cd463e0 100644
7316--- a/arch/powerpc/mm/mmap_64.c
7317+++ b/arch/powerpc/mm/mmap_64.c
7318@@ -57,6 +57,10 @@ static unsigned long mmap_rnd(void)
7319 {
7320 unsigned long rnd = 0;
7321
7322+#ifdef CONFIG_PAX_RANDMMAP
7323+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7324+#endif
7325+
7326 if (current->flags & PF_RANDOMIZE) {
7327 /* 8MB for 32bit, 1GB for 64bit */
7328 if (is_32bit_task())
7329@@ -91,10 +95,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7330 */
7331 if (mmap_is_legacy()) {
7332 mm->mmap_base = TASK_UNMAPPED_BASE;
7333+
7334+#ifdef CONFIG_PAX_RANDMMAP
7335+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7336+ mm->mmap_base += mm->delta_mmap;
7337+#endif
7338+
7339 mm->get_unmapped_area = arch_get_unmapped_area;
7340 mm->unmap_area = arch_unmap_area;
7341 } else {
7342 mm->mmap_base = mmap_base();
7343+
7344+#ifdef CONFIG_PAX_RANDMMAP
7345+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7346+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7347+#endif
7348+
7349 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7350 mm->unmap_area = arch_unmap_area_topdown;
7351 }
7352diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
7353index e779642..e5bb889 100644
7354--- a/arch/powerpc/mm/mmu_context_nohash.c
7355+++ b/arch/powerpc/mm/mmu_context_nohash.c
7356@@ -363,7 +363,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
7357 return NOTIFY_OK;
7358 }
7359
7360-static struct notifier_block __cpuinitdata mmu_context_cpu_nb = {
7361+static struct notifier_block mmu_context_cpu_nb = {
7362 .notifier_call = mmu_context_cpu_notify,
7363 };
7364
7365diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
7366index bba87ca..c346a33 100644
7367--- a/arch/powerpc/mm/numa.c
7368+++ b/arch/powerpc/mm/numa.c
7369@@ -932,7 +932,7 @@ static void __init *careful_zallocation(int nid, unsigned long size,
7370 return ret;
7371 }
7372
7373-static struct notifier_block __cpuinitdata ppc64_numa_nb = {
7374+static struct notifier_block ppc64_numa_nb = {
7375 .notifier_call = cpu_numa_callback,
7376 .priority = 1 /* Must run before sched domains notifier. */
7377 };
7378diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
7379index cf9dada..241529f 100644
7380--- a/arch/powerpc/mm/slice.c
7381+++ b/arch/powerpc/mm/slice.c
7382@@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
7383 if ((mm->task_size - len) < addr)
7384 return 0;
7385 vma = find_vma(mm, addr);
7386- return (!vma || (addr + len) <= vma->vm_start);
7387+ return check_heap_stack_gap(vma, addr, len, 0);
7388 }
7389
7390 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
7391@@ -272,7 +272,7 @@ full_search:
7392 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
7393 continue;
7394 }
7395- if (!vma || addr + len <= vma->vm_start) {
7396+ if (check_heap_stack_gap(vma, addr, len, 0)) {
7397 /*
7398 * Remember the place where we stopped the search:
7399 */
7400@@ -329,10 +329,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
7401 }
7402 }
7403
7404- addr = mm->mmap_base;
7405- while (addr > len) {
7406+ if (mm->mmap_base < len)
7407+ addr = -ENOMEM;
7408+ else
7409+ addr = mm->mmap_base - len;
7410+
7411+ while (!IS_ERR_VALUE(addr)) {
7412 /* Go down by chunk size */
7413- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
7414+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
7415
7416 /* Check for hit with different page size */
7417 mask = slice_range_to_mask(addr, len);
7418@@ -352,7 +356,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
7419 * return with success:
7420 */
7421 vma = find_vma(mm, addr);
7422- if (!vma || (addr + len) <= vma->vm_start) {
7423+ if (check_heap_stack_gap(vma, addr, len, 0)) {
7424 /* remember the address as a hint for next time */
7425 if (use_cache)
7426 mm->free_area_cache = addr;
7427@@ -364,7 +368,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
7428 mm->cached_hole_size = vma->vm_start - addr;
7429
7430 /* try just below the current vma->vm_start */
7431- addr = vma->vm_start;
7432+ addr = skip_heap_stack_gap(vma, len, 0);
7433 }
7434
7435 /*
7436@@ -442,6 +446,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
7437 if (fixed && addr > (mm->task_size - len))
7438 return -EINVAL;
7439
7440+#ifdef CONFIG_PAX_RANDMMAP
7441+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
7442+ addr = 0;
7443+#endif
7444+
7445 /* If hint, make sure it matches our alignment restrictions */
7446 if (!fixed && addr) {
7447 addr = _ALIGN_UP(addr, 1ul << pshift);
7448diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
7449index 0cfece4..2f1a0e5 100644
7450--- a/arch/powerpc/platforms/cell/spufs/file.c
7451+++ b/arch/powerpc/platforms/cell/spufs/file.c
7452@@ -281,9 +281,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
7453 return VM_FAULT_NOPAGE;
7454 }
7455
7456-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
7457+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
7458 unsigned long address,
7459- void *buf, int len, int write)
7460+ void *buf, size_t len, int write)
7461 {
7462 struct spu_context *ctx = vma->vm_file->private_data;
7463 unsigned long offset = address - vma->vm_start;
7464diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
7465index bdb738a..49c9f95 100644
7466--- a/arch/powerpc/platforms/powermac/smp.c
7467+++ b/arch/powerpc/platforms/powermac/smp.c
7468@@ -885,7 +885,7 @@ static int smp_core99_cpu_notify(struct notifier_block *self,
7469 return NOTIFY_OK;
7470 }
7471
7472-static struct notifier_block __cpuinitdata smp_core99_cpu_nb = {
7473+static struct notifier_block smp_core99_cpu_nb = {
7474 .notifier_call = smp_core99_cpu_notify,
7475 };
7476 #endif /* CONFIG_HOTPLUG_CPU */
7477diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
7478index c797832..ce575c8 100644
7479--- a/arch/s390/include/asm/atomic.h
7480+++ b/arch/s390/include/asm/atomic.h
7481@@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
7482 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
7483 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
7484
7485+#define atomic64_read_unchecked(v) atomic64_read(v)
7486+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7487+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7488+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7489+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7490+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7491+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7492+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7493+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7494+
7495 #define smp_mb__before_atomic_dec() smp_mb()
7496 #define smp_mb__after_atomic_dec() smp_mb()
7497 #define smp_mb__before_atomic_inc() smp_mb()
7498diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
7499index 4d7ccac..d03d0ad 100644
7500--- a/arch/s390/include/asm/cache.h
7501+++ b/arch/s390/include/asm/cache.h
7502@@ -9,8 +9,10 @@
7503 #ifndef __ARCH_S390_CACHE_H
7504 #define __ARCH_S390_CACHE_H
7505
7506-#define L1_CACHE_BYTES 256
7507+#include <linux/const.h>
7508+
7509 #define L1_CACHE_SHIFT 8
7510+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7511 #define NET_SKB_PAD 32
7512
7513 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
7514diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
7515index 178ff96..8c93bd1 100644
7516--- a/arch/s390/include/asm/elf.h
7517+++ b/arch/s390/include/asm/elf.h
7518@@ -160,8 +160,14 @@ extern unsigned int vdso_enabled;
7519 the loader. We need to make sure that it is out of the way of the program
7520 that it will "exec", and that there is sufficient room for the brk. */
7521
7522-extern unsigned long randomize_et_dyn(unsigned long base);
7523-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
7524+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
7525+
7526+#ifdef CONFIG_PAX_ASLR
7527+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
7528+
7529+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
7530+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
7531+#endif
7532
7533 /* This yields a mask that user programs can use to figure out what
7534 instruction set this CPU supports. */
7535@@ -210,9 +216,6 @@ struct linux_binprm;
7536 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
7537 int arch_setup_additional_pages(struct linux_binprm *, int);
7538
7539-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7540-#define arch_randomize_brk arch_randomize_brk
7541-
7542 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
7543
7544 #endif
7545diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
7546index c4a93d6..4d2a9b4 100644
7547--- a/arch/s390/include/asm/exec.h
7548+++ b/arch/s390/include/asm/exec.h
7549@@ -7,6 +7,6 @@
7550 #ifndef __ASM_EXEC_H
7551 #define __ASM_EXEC_H
7552
7553-extern unsigned long arch_align_stack(unsigned long sp);
7554+#define arch_align_stack(x) ((x) & ~0xfUL)
7555
7556 #endif /* __ASM_EXEC_H */
7557diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
7558index 34268df..ea97318 100644
7559--- a/arch/s390/include/asm/uaccess.h
7560+++ b/arch/s390/include/asm/uaccess.h
7561@@ -252,6 +252,10 @@ static inline unsigned long __must_check
7562 copy_to_user(void __user *to, const void *from, unsigned long n)
7563 {
7564 might_fault();
7565+
7566+ if ((long)n < 0)
7567+ return n;
7568+
7569 if (access_ok(VERIFY_WRITE, to, n))
7570 n = __copy_to_user(to, from, n);
7571 return n;
7572@@ -277,6 +281,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
7573 static inline unsigned long __must_check
7574 __copy_from_user(void *to, const void __user *from, unsigned long n)
7575 {
7576+ if ((long)n < 0)
7577+ return n;
7578+
7579 if (__builtin_constant_p(n) && (n <= 256))
7580 return uaccess.copy_from_user_small(n, from, to);
7581 else
7582@@ -308,10 +315,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
7583 static inline unsigned long __must_check
7584 copy_from_user(void *to, const void __user *from, unsigned long n)
7585 {
7586- unsigned int sz = __compiletime_object_size(to);
7587+ size_t sz = __compiletime_object_size(to);
7588
7589 might_fault();
7590- if (unlikely(sz != -1 && sz < n)) {
7591+
7592+ if ((long)n < 0)
7593+ return n;
7594+
7595+ if (unlikely(sz != (size_t)-1 && sz < n)) {
7596 copy_from_user_overflow();
7597 return n;
7598 }
7599diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
7600index 4610dea..cf0af21 100644
7601--- a/arch/s390/kernel/module.c
7602+++ b/arch/s390/kernel/module.c
7603@@ -171,11 +171,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
7604
7605 /* Increase core size by size of got & plt and set start
7606 offsets for got and plt. */
7607- me->core_size = ALIGN(me->core_size, 4);
7608- me->arch.got_offset = me->core_size;
7609- me->core_size += me->arch.got_size;
7610- me->arch.plt_offset = me->core_size;
7611- me->core_size += me->arch.plt_size;
7612+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
7613+ me->arch.got_offset = me->core_size_rw;
7614+ me->core_size_rw += me->arch.got_size;
7615+ me->arch.plt_offset = me->core_size_rx;
7616+ me->core_size_rx += me->arch.plt_size;
7617 return 0;
7618 }
7619
7620@@ -252,7 +252,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7621 if (info->got_initialized == 0) {
7622 Elf_Addr *gotent;
7623
7624- gotent = me->module_core + me->arch.got_offset +
7625+ gotent = me->module_core_rw + me->arch.got_offset +
7626 info->got_offset;
7627 *gotent = val;
7628 info->got_initialized = 1;
7629@@ -276,7 +276,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7630 else if (r_type == R_390_GOTENT ||
7631 r_type == R_390_GOTPLTENT)
7632 *(unsigned int *) loc =
7633- (val + (Elf_Addr) me->module_core - loc) >> 1;
7634+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
7635 else if (r_type == R_390_GOT64 ||
7636 r_type == R_390_GOTPLT64)
7637 *(unsigned long *) loc = val;
7638@@ -290,7 +290,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7639 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
7640 if (info->plt_initialized == 0) {
7641 unsigned int *ip;
7642- ip = me->module_core + me->arch.plt_offset +
7643+ ip = me->module_core_rx + me->arch.plt_offset +
7644 info->plt_offset;
7645 #ifndef CONFIG_64BIT
7646 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
7647@@ -315,7 +315,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7648 val - loc + 0xffffUL < 0x1ffffeUL) ||
7649 (r_type == R_390_PLT32DBL &&
7650 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
7651- val = (Elf_Addr) me->module_core +
7652+ val = (Elf_Addr) me->module_core_rx +
7653 me->arch.plt_offset +
7654 info->plt_offset;
7655 val += rela->r_addend - loc;
7656@@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7657 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
7658 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
7659 val = val + rela->r_addend -
7660- ((Elf_Addr) me->module_core + me->arch.got_offset);
7661+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
7662 if (r_type == R_390_GOTOFF16)
7663 *(unsigned short *) loc = val;
7664 else if (r_type == R_390_GOTOFF32)
7665@@ -347,7 +347,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7666 break;
7667 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
7668 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
7669- val = (Elf_Addr) me->module_core + me->arch.got_offset +
7670+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
7671 rela->r_addend - loc;
7672 if (r_type == R_390_GOTPC)
7673 *(unsigned int *) loc = val;
7674diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
7675index 536d645..4a5bd9e 100644
7676--- a/arch/s390/kernel/process.c
7677+++ b/arch/s390/kernel/process.c
7678@@ -250,39 +250,3 @@ unsigned long get_wchan(struct task_struct *p)
7679 }
7680 return 0;
7681 }
7682-
7683-unsigned long arch_align_stack(unsigned long sp)
7684-{
7685- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7686- sp -= get_random_int() & ~PAGE_MASK;
7687- return sp & ~0xf;
7688-}
7689-
7690-static inline unsigned long brk_rnd(void)
7691-{
7692- /* 8MB for 32bit, 1GB for 64bit */
7693- if (is_32bit_task())
7694- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
7695- else
7696- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
7697-}
7698-
7699-unsigned long arch_randomize_brk(struct mm_struct *mm)
7700-{
7701- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
7702-
7703- if (ret < mm->brk)
7704- return mm->brk;
7705- return ret;
7706-}
7707-
7708-unsigned long randomize_et_dyn(unsigned long base)
7709-{
7710- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
7711-
7712- if (!(current->flags & PF_RANDOMIZE))
7713- return base;
7714- if (ret < base)
7715- return base;
7716- return ret;
7717-}
7718diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
7719index c59a5ef..3fae59c 100644
7720--- a/arch/s390/mm/mmap.c
7721+++ b/arch/s390/mm/mmap.c
7722@@ -90,10 +90,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7723 */
7724 if (mmap_is_legacy()) {
7725 mm->mmap_base = TASK_UNMAPPED_BASE;
7726+
7727+#ifdef CONFIG_PAX_RANDMMAP
7728+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7729+ mm->mmap_base += mm->delta_mmap;
7730+#endif
7731+
7732 mm->get_unmapped_area = arch_get_unmapped_area;
7733 mm->unmap_area = arch_unmap_area;
7734 } else {
7735 mm->mmap_base = mmap_base();
7736+
7737+#ifdef CONFIG_PAX_RANDMMAP
7738+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7739+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7740+#endif
7741+
7742 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7743 mm->unmap_area = arch_unmap_area_topdown;
7744 }
7745@@ -172,10 +184,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7746 */
7747 if (mmap_is_legacy()) {
7748 mm->mmap_base = TASK_UNMAPPED_BASE;
7749+
7750+#ifdef CONFIG_PAX_RANDMMAP
7751+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7752+ mm->mmap_base += mm->delta_mmap;
7753+#endif
7754+
7755 mm->get_unmapped_area = s390_get_unmapped_area;
7756 mm->unmap_area = arch_unmap_area;
7757 } else {
7758 mm->mmap_base = mmap_base();
7759+
7760+#ifdef CONFIG_PAX_RANDMMAP
7761+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7762+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7763+#endif
7764+
7765 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
7766 mm->unmap_area = arch_unmap_area_topdown;
7767 }
7768diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
7769index ae3d59f..f65f075 100644
7770--- a/arch/score/include/asm/cache.h
7771+++ b/arch/score/include/asm/cache.h
7772@@ -1,7 +1,9 @@
7773 #ifndef _ASM_SCORE_CACHE_H
7774 #define _ASM_SCORE_CACHE_H
7775
7776+#include <linux/const.h>
7777+
7778 #define L1_CACHE_SHIFT 4
7779-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7780+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7781
7782 #endif /* _ASM_SCORE_CACHE_H */
7783diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
7784index f9f3cd5..58ff438 100644
7785--- a/arch/score/include/asm/exec.h
7786+++ b/arch/score/include/asm/exec.h
7787@@ -1,6 +1,6 @@
7788 #ifndef _ASM_SCORE_EXEC_H
7789 #define _ASM_SCORE_EXEC_H
7790
7791-extern unsigned long arch_align_stack(unsigned long sp);
7792+#define arch_align_stack(x) (x)
7793
7794 #endif /* _ASM_SCORE_EXEC_H */
7795diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
7796index 7956846..5f37677 100644
7797--- a/arch/score/kernel/process.c
7798+++ b/arch/score/kernel/process.c
7799@@ -134,8 +134,3 @@ unsigned long get_wchan(struct task_struct *task)
7800
7801 return task_pt_regs(task)->cp0_epc;
7802 }
7803-
7804-unsigned long arch_align_stack(unsigned long sp)
7805-{
7806- return sp;
7807-}
7808diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
7809index ef9e555..331bd29 100644
7810--- a/arch/sh/include/asm/cache.h
7811+++ b/arch/sh/include/asm/cache.h
7812@@ -9,10 +9,11 @@
7813 #define __ASM_SH_CACHE_H
7814 #ifdef __KERNEL__
7815
7816+#include <linux/const.h>
7817 #include <linux/init.h>
7818 #include <cpu/cache.h>
7819
7820-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7821+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7822
7823 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
7824
7825diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
7826index 03f2b55..b027032 100644
7827--- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
7828+++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
7829@@ -143,7 +143,7 @@ shx3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
7830 return NOTIFY_OK;
7831 }
7832
7833-static struct notifier_block __cpuinitdata shx3_cpu_notifier = {
7834+static struct notifier_block shx3_cpu_notifier = {
7835 .notifier_call = shx3_cpu_callback,
7836 };
7837
7838diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
7839index 6777177..cb5e44f 100644
7840--- a/arch/sh/mm/mmap.c
7841+++ b/arch/sh/mm/mmap.c
7842@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7843 struct mm_struct *mm = current->mm;
7844 struct vm_area_struct *vma;
7845 int do_colour_align;
7846+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
7847 struct vm_unmapped_area_info info;
7848
7849 if (flags & MAP_FIXED) {
7850@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7851 if (filp || (flags & MAP_SHARED))
7852 do_colour_align = 1;
7853
7854+#ifdef CONFIG_PAX_RANDMMAP
7855+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7856+#endif
7857+
7858 if (addr) {
7859 if (do_colour_align)
7860 addr = COLOUR_ALIGN(addr, pgoff);
7861@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7862 addr = PAGE_ALIGN(addr);
7863
7864 vma = find_vma(mm, addr);
7865- if (TASK_SIZE - len >= addr &&
7866- (!vma || addr + len <= vma->vm_start))
7867+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
7868 return addr;
7869 }
7870
7871 info.flags = 0;
7872 info.length = len;
7873- info.low_limit = TASK_UNMAPPED_BASE;
7874+ info.low_limit = mm->mmap_base;
7875 info.high_limit = TASK_SIZE;
7876 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
7877 info.align_offset = pgoff << PAGE_SHIFT;
7878@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7879 struct mm_struct *mm = current->mm;
7880 unsigned long addr = addr0;
7881 int do_colour_align;
7882+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
7883 struct vm_unmapped_area_info info;
7884
7885 if (flags & MAP_FIXED) {
7886@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7887 if (filp || (flags & MAP_SHARED))
7888 do_colour_align = 1;
7889
7890+#ifdef CONFIG_PAX_RANDMMAP
7891+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7892+#endif
7893+
7894 /* requesting a specific address */
7895 if (addr) {
7896 if (do_colour_align)
7897@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7898 addr = PAGE_ALIGN(addr);
7899
7900 vma = find_vma(mm, addr);
7901- if (TASK_SIZE - len >= addr &&
7902- (!vma || addr + len <= vma->vm_start))
7903+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
7904 return addr;
7905 }
7906
7907@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7908 VM_BUG_ON(addr != -ENOMEM);
7909 info.flags = 0;
7910 info.low_limit = TASK_UNMAPPED_BASE;
7911+
7912+#ifdef CONFIG_PAX_RANDMMAP
7913+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7914+ info.low_limit += mm->delta_mmap;
7915+#endif
7916+
7917 info.high_limit = TASK_SIZE;
7918 addr = vm_unmapped_area(&info);
7919 }
7920diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
7921index be56a24..443328f 100644
7922--- a/arch/sparc/include/asm/atomic_64.h
7923+++ b/arch/sparc/include/asm/atomic_64.h
7924@@ -14,18 +14,40 @@
7925 #define ATOMIC64_INIT(i) { (i) }
7926
7927 #define atomic_read(v) (*(volatile int *)&(v)->counter)
7928+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7929+{
7930+ return v->counter;
7931+}
7932 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
7933+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7934+{
7935+ return v->counter;
7936+}
7937
7938 #define atomic_set(v, i) (((v)->counter) = i)
7939+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7940+{
7941+ v->counter = i;
7942+}
7943 #define atomic64_set(v, i) (((v)->counter) = i)
7944+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7945+{
7946+ v->counter = i;
7947+}
7948
7949 extern void atomic_add(int, atomic_t *);
7950+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
7951 extern void atomic64_add(long, atomic64_t *);
7952+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
7953 extern void atomic_sub(int, atomic_t *);
7954+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
7955 extern void atomic64_sub(long, atomic64_t *);
7956+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
7957
7958 extern int atomic_add_ret(int, atomic_t *);
7959+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
7960 extern long atomic64_add_ret(long, atomic64_t *);
7961+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
7962 extern int atomic_sub_ret(int, atomic_t *);
7963 extern long atomic64_sub_ret(long, atomic64_t *);
7964
7965@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
7966 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
7967
7968 #define atomic_inc_return(v) atomic_add_ret(1, v)
7969+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7970+{
7971+ return atomic_add_ret_unchecked(1, v);
7972+}
7973 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
7974+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7975+{
7976+ return atomic64_add_ret_unchecked(1, v);
7977+}
7978
7979 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
7980 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
7981
7982 #define atomic_add_return(i, v) atomic_add_ret(i, v)
7983+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7984+{
7985+ return atomic_add_ret_unchecked(i, v);
7986+}
7987 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
7988+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
7989+{
7990+ return atomic64_add_ret_unchecked(i, v);
7991+}
7992
7993 /*
7994 * atomic_inc_and_test - increment and test
7995@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
7996 * other cases.
7997 */
7998 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
7999+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
8000+{
8001+ return atomic_inc_return_unchecked(v) == 0;
8002+}
8003 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
8004
8005 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
8006@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
8007 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
8008
8009 #define atomic_inc(v) atomic_add(1, v)
8010+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
8011+{
8012+ atomic_add_unchecked(1, v);
8013+}
8014 #define atomic64_inc(v) atomic64_add(1, v)
8015+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
8016+{
8017+ atomic64_add_unchecked(1, v);
8018+}
8019
8020 #define atomic_dec(v) atomic_sub(1, v)
8021+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
8022+{
8023+ atomic_sub_unchecked(1, v);
8024+}
8025 #define atomic64_dec(v) atomic64_sub(1, v)
8026+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
8027+{
8028+ atomic64_sub_unchecked(1, v);
8029+}
8030
8031 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
8032 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
8033
8034 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
8035+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8036+{
8037+ return cmpxchg(&v->counter, old, new);
8038+}
8039 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
8040+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8041+{
8042+ return xchg(&v->counter, new);
8043+}
8044
8045 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
8046 {
8047- int c, old;
8048+ int c, old, new;
8049 c = atomic_read(v);
8050 for (;;) {
8051- if (unlikely(c == (u)))
8052+ if (unlikely(c == u))
8053 break;
8054- old = atomic_cmpxchg((v), c, c + (a));
8055+
8056+ asm volatile("addcc %2, %0, %0\n"
8057+
8058+#ifdef CONFIG_PAX_REFCOUNT
8059+ "tvs %%icc, 6\n"
8060+#endif
8061+
8062+ : "=r" (new)
8063+ : "0" (c), "ir" (a)
8064+ : "cc");
8065+
8066+ old = atomic_cmpxchg(v, c, new);
8067 if (likely(old == c))
8068 break;
8069 c = old;
8070@@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
8071 #define atomic64_cmpxchg(v, o, n) \
8072 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
8073 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
8074+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
8075+{
8076+ return xchg(&v->counter, new);
8077+}
8078
8079 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
8080 {
8081- long c, old;
8082+ long c, old, new;
8083 c = atomic64_read(v);
8084 for (;;) {
8085- if (unlikely(c == (u)))
8086+ if (unlikely(c == u))
8087 break;
8088- old = atomic64_cmpxchg((v), c, c + (a));
8089+
8090+ asm volatile("addcc %2, %0, %0\n"
8091+
8092+#ifdef CONFIG_PAX_REFCOUNT
8093+ "tvs %%xcc, 6\n"
8094+#endif
8095+
8096+ : "=r" (new)
8097+ : "0" (c), "ir" (a)
8098+ : "cc");
8099+
8100+ old = atomic64_cmpxchg(v, c, new);
8101 if (likely(old == c))
8102 break;
8103 c = old;
8104 }
8105- return c != (u);
8106+ return c != u;
8107 }
8108
8109 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
8110diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
8111index 5bb6991..5c2132e 100644
8112--- a/arch/sparc/include/asm/cache.h
8113+++ b/arch/sparc/include/asm/cache.h
8114@@ -7,10 +7,12 @@
8115 #ifndef _SPARC_CACHE_H
8116 #define _SPARC_CACHE_H
8117
8118+#include <linux/const.h>
8119+
8120 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
8121
8122 #define L1_CACHE_SHIFT 5
8123-#define L1_CACHE_BYTES 32
8124+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8125
8126 #ifdef CONFIG_SPARC32
8127 #define SMP_CACHE_BYTES_SHIFT 5
8128diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
8129index ac74a2c..a9e58af 100644
8130--- a/arch/sparc/include/asm/elf_32.h
8131+++ b/arch/sparc/include/asm/elf_32.h
8132@@ -114,6 +114,13 @@ typedef struct {
8133
8134 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
8135
8136+#ifdef CONFIG_PAX_ASLR
8137+#define PAX_ELF_ET_DYN_BASE 0x10000UL
8138+
8139+#define PAX_DELTA_MMAP_LEN 16
8140+#define PAX_DELTA_STACK_LEN 16
8141+#endif
8142+
8143 /* This yields a mask that user programs can use to figure out what
8144 instruction set this cpu supports. This can NOT be done in userspace
8145 on Sparc. */
8146diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
8147index 370ca1e..d4f4a98 100644
8148--- a/arch/sparc/include/asm/elf_64.h
8149+++ b/arch/sparc/include/asm/elf_64.h
8150@@ -189,6 +189,13 @@ typedef struct {
8151 #define ELF_ET_DYN_BASE 0x0000010000000000UL
8152 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
8153
8154+#ifdef CONFIG_PAX_ASLR
8155+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
8156+
8157+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
8158+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
8159+#endif
8160+
8161 extern unsigned long sparc64_elf_hwcap;
8162 #define ELF_HWCAP sparc64_elf_hwcap
8163
8164diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
8165index 9b1c36d..209298b 100644
8166--- a/arch/sparc/include/asm/pgalloc_32.h
8167+++ b/arch/sparc/include/asm/pgalloc_32.h
8168@@ -33,6 +33,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
8169 }
8170
8171 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
8172+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
8173
8174 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
8175 unsigned long address)
8176diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
8177index bcfe063..b333142 100644
8178--- a/arch/sparc/include/asm/pgalloc_64.h
8179+++ b/arch/sparc/include/asm/pgalloc_64.h
8180@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
8181 }
8182
8183 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
8184+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
8185
8186 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
8187 {
8188diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
8189index 6fc1348..390c50a 100644
8190--- a/arch/sparc/include/asm/pgtable_32.h
8191+++ b/arch/sparc/include/asm/pgtable_32.h
8192@@ -50,6 +50,9 @@ extern unsigned long calc_highpages(void);
8193 #define PAGE_SHARED SRMMU_PAGE_SHARED
8194 #define PAGE_COPY SRMMU_PAGE_COPY
8195 #define PAGE_READONLY SRMMU_PAGE_RDONLY
8196+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
8197+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
8198+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
8199 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
8200
8201 /* Top-level page directory - dummy used by init-mm.
8202@@ -62,18 +65,18 @@ extern unsigned long ptr_in_current_pgd;
8203
8204 /* xwr */
8205 #define __P000 PAGE_NONE
8206-#define __P001 PAGE_READONLY
8207-#define __P010 PAGE_COPY
8208-#define __P011 PAGE_COPY
8209+#define __P001 PAGE_READONLY_NOEXEC
8210+#define __P010 PAGE_COPY_NOEXEC
8211+#define __P011 PAGE_COPY_NOEXEC
8212 #define __P100 PAGE_READONLY
8213 #define __P101 PAGE_READONLY
8214 #define __P110 PAGE_COPY
8215 #define __P111 PAGE_COPY
8216
8217 #define __S000 PAGE_NONE
8218-#define __S001 PAGE_READONLY
8219-#define __S010 PAGE_SHARED
8220-#define __S011 PAGE_SHARED
8221+#define __S001 PAGE_READONLY_NOEXEC
8222+#define __S010 PAGE_SHARED_NOEXEC
8223+#define __S011 PAGE_SHARED_NOEXEC
8224 #define __S100 PAGE_READONLY
8225 #define __S101 PAGE_READONLY
8226 #define __S110 PAGE_SHARED
8227diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
8228index 79da178..c2eede8 100644
8229--- a/arch/sparc/include/asm/pgtsrmmu.h
8230+++ b/arch/sparc/include/asm/pgtsrmmu.h
8231@@ -115,6 +115,11 @@
8232 SRMMU_EXEC | SRMMU_REF)
8233 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
8234 SRMMU_EXEC | SRMMU_REF)
8235+
8236+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
8237+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
8238+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
8239+
8240 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
8241 SRMMU_DIRTY | SRMMU_REF)
8242
8243diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
8244index 9689176..63c18ea 100644
8245--- a/arch/sparc/include/asm/spinlock_64.h
8246+++ b/arch/sparc/include/asm/spinlock_64.h
8247@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
8248
8249 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
8250
8251-static void inline arch_read_lock(arch_rwlock_t *lock)
8252+static inline void arch_read_lock(arch_rwlock_t *lock)
8253 {
8254 unsigned long tmp1, tmp2;
8255
8256 __asm__ __volatile__ (
8257 "1: ldsw [%2], %0\n"
8258 " brlz,pn %0, 2f\n"
8259-"4: add %0, 1, %1\n"
8260+"4: addcc %0, 1, %1\n"
8261+
8262+#ifdef CONFIG_PAX_REFCOUNT
8263+" tvs %%icc, 6\n"
8264+#endif
8265+
8266 " cas [%2], %0, %1\n"
8267 " cmp %0, %1\n"
8268 " bne,pn %%icc, 1b\n"
8269@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
8270 " .previous"
8271 : "=&r" (tmp1), "=&r" (tmp2)
8272 : "r" (lock)
8273- : "memory");
8274+ : "memory", "cc");
8275 }
8276
8277-static int inline arch_read_trylock(arch_rwlock_t *lock)
8278+static inline int arch_read_trylock(arch_rwlock_t *lock)
8279 {
8280 int tmp1, tmp2;
8281
8282@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
8283 "1: ldsw [%2], %0\n"
8284 " brlz,a,pn %0, 2f\n"
8285 " mov 0, %0\n"
8286-" add %0, 1, %1\n"
8287+" addcc %0, 1, %1\n"
8288+
8289+#ifdef CONFIG_PAX_REFCOUNT
8290+" tvs %%icc, 6\n"
8291+#endif
8292+
8293 " cas [%2], %0, %1\n"
8294 " cmp %0, %1\n"
8295 " bne,pn %%icc, 1b\n"
8296@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
8297 return tmp1;
8298 }
8299
8300-static void inline arch_read_unlock(arch_rwlock_t *lock)
8301+static inline void arch_read_unlock(arch_rwlock_t *lock)
8302 {
8303 unsigned long tmp1, tmp2;
8304
8305 __asm__ __volatile__(
8306 "1: lduw [%2], %0\n"
8307-" sub %0, 1, %1\n"
8308+" subcc %0, 1, %1\n"
8309+
8310+#ifdef CONFIG_PAX_REFCOUNT
8311+" tvs %%icc, 6\n"
8312+#endif
8313+
8314 " cas [%2], %0, %1\n"
8315 " cmp %0, %1\n"
8316 " bne,pn %%xcc, 1b\n"
8317@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
8318 : "memory");
8319 }
8320
8321-static void inline arch_write_lock(arch_rwlock_t *lock)
8322+static inline void arch_write_lock(arch_rwlock_t *lock)
8323 {
8324 unsigned long mask, tmp1, tmp2;
8325
8326@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
8327 : "memory");
8328 }
8329
8330-static void inline arch_write_unlock(arch_rwlock_t *lock)
8331+static inline void arch_write_unlock(arch_rwlock_t *lock)
8332 {
8333 __asm__ __volatile__(
8334 " stw %%g0, [%0]"
8335@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
8336 : "memory");
8337 }
8338
8339-static int inline arch_write_trylock(arch_rwlock_t *lock)
8340+static inline int arch_write_trylock(arch_rwlock_t *lock)
8341 {
8342 unsigned long mask, tmp1, tmp2, result;
8343
8344diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
8345index 25849ae..924c54b 100644
8346--- a/arch/sparc/include/asm/thread_info_32.h
8347+++ b/arch/sparc/include/asm/thread_info_32.h
8348@@ -49,6 +49,8 @@ struct thread_info {
8349 unsigned long w_saved;
8350
8351 struct restart_block restart_block;
8352+
8353+ unsigned long lowest_stack;
8354 };
8355
8356 /*
8357diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
8358index 269bd92..e46a9b8 100644
8359--- a/arch/sparc/include/asm/thread_info_64.h
8360+++ b/arch/sparc/include/asm/thread_info_64.h
8361@@ -63,6 +63,8 @@ struct thread_info {
8362 struct pt_regs *kern_una_regs;
8363 unsigned int kern_una_insn;
8364
8365+ unsigned long lowest_stack;
8366+
8367 unsigned long fpregs[0] __attribute__ ((aligned(64)));
8368 };
8369
8370@@ -192,10 +194,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
8371 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
8372 /* flag bit 6 is available */
8373 #define TIF_32BIT 7 /* 32-bit binary */
8374-/* flag bit 8 is available */
8375+#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */
8376 #define TIF_SECCOMP 9 /* secure computing */
8377 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
8378 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
8379+
8380 /* NOTE: Thread flags >= 12 should be ones we have no interest
8381 * in using in assembly, else we can't use the mask as
8382 * an immediate value in instructions such as andcc.
8383@@ -214,12 +217,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
8384 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
8385 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
8386 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
8387+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
8388
8389 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
8390 _TIF_DO_NOTIFY_RESUME_MASK | \
8391 _TIF_NEED_RESCHED)
8392 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
8393
8394+#define _TIF_WORK_SYSCALL \
8395+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
8396+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
8397+
8398+
8399 /*
8400 * Thread-synchronous status.
8401 *
8402diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
8403index 0167d26..767bb0c 100644
8404--- a/arch/sparc/include/asm/uaccess.h
8405+++ b/arch/sparc/include/asm/uaccess.h
8406@@ -1,5 +1,6 @@
8407 #ifndef ___ASM_SPARC_UACCESS_H
8408 #define ___ASM_SPARC_UACCESS_H
8409+
8410 #if defined(__sparc__) && defined(__arch64__)
8411 #include <asm/uaccess_64.h>
8412 #else
8413diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
8414index 53a28dd..50c38c3 100644
8415--- a/arch/sparc/include/asm/uaccess_32.h
8416+++ b/arch/sparc/include/asm/uaccess_32.h
8417@@ -250,27 +250,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
8418
8419 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
8420 {
8421- if (n && __access_ok((unsigned long) to, n))
8422+ if ((long)n < 0)
8423+ return n;
8424+
8425+ if (n && __access_ok((unsigned long) to, n)) {
8426+ if (!__builtin_constant_p(n))
8427+ check_object_size(from, n, true);
8428 return __copy_user(to, (__force void __user *) from, n);
8429- else
8430+ } else
8431 return n;
8432 }
8433
8434 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
8435 {
8436+ if ((long)n < 0)
8437+ return n;
8438+
8439+ if (!__builtin_constant_p(n))
8440+ check_object_size(from, n, true);
8441+
8442 return __copy_user(to, (__force void __user *) from, n);
8443 }
8444
8445 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
8446 {
8447- if (n && __access_ok((unsigned long) from, n))
8448+ if ((long)n < 0)
8449+ return n;
8450+
8451+ if (n && __access_ok((unsigned long) from, n)) {
8452+ if (!__builtin_constant_p(n))
8453+ check_object_size(to, n, false);
8454 return __copy_user((__force void __user *) to, from, n);
8455- else
8456+ } else
8457 return n;
8458 }
8459
8460 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
8461 {
8462+ if ((long)n < 0)
8463+ return n;
8464+
8465 return __copy_user((__force void __user *) to, from, n);
8466 }
8467
8468diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
8469index e562d3c..191f176 100644
8470--- a/arch/sparc/include/asm/uaccess_64.h
8471+++ b/arch/sparc/include/asm/uaccess_64.h
8472@@ -10,6 +10,7 @@
8473 #include <linux/compiler.h>
8474 #include <linux/string.h>
8475 #include <linux/thread_info.h>
8476+#include <linux/kernel.h>
8477 #include <asm/asi.h>
8478 #include <asm/spitfire.h>
8479 #include <asm-generic/uaccess-unaligned.h>
8480@@ -214,8 +215,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
8481 static inline unsigned long __must_check
8482 copy_from_user(void *to, const void __user *from, unsigned long size)
8483 {
8484- unsigned long ret = ___copy_from_user(to, from, size);
8485+ unsigned long ret;
8486
8487+ if ((long)size < 0 || size > INT_MAX)
8488+ return size;
8489+
8490+ if (!__builtin_constant_p(size))
8491+ check_object_size(to, size, false);
8492+
8493+ ret = ___copy_from_user(to, from, size);
8494 if (unlikely(ret))
8495 ret = copy_from_user_fixup(to, from, size);
8496
8497@@ -231,8 +239,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
8498 static inline unsigned long __must_check
8499 copy_to_user(void __user *to, const void *from, unsigned long size)
8500 {
8501- unsigned long ret = ___copy_to_user(to, from, size);
8502+ unsigned long ret;
8503
8504+ if ((long)size < 0 || size > INT_MAX)
8505+ return size;
8506+
8507+ if (!__builtin_constant_p(size))
8508+ check_object_size(from, size, true);
8509+
8510+ ret = ___copy_to_user(to, from, size);
8511 if (unlikely(ret))
8512 ret = copy_to_user_fixup(to, from, size);
8513 return ret;
8514diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
8515index 6cf591b..b49e65a 100644
8516--- a/arch/sparc/kernel/Makefile
8517+++ b/arch/sparc/kernel/Makefile
8518@@ -3,7 +3,7 @@
8519 #
8520
8521 asflags-y := -ansi
8522-ccflags-y := -Werror
8523+#ccflags-y := -Werror
8524
8525 extra-y := head_$(BITS).o
8526
8527diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
8528index be8e862..5b50b12 100644
8529--- a/arch/sparc/kernel/process_32.c
8530+++ b/arch/sparc/kernel/process_32.c
8531@@ -126,14 +126,14 @@ void show_regs(struct pt_regs *r)
8532
8533 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
8534 r->psr, r->pc, r->npc, r->y, print_tainted());
8535- printk("PC: <%pS>\n", (void *) r->pc);
8536+ printk("PC: <%pA>\n", (void *) r->pc);
8537 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
8538 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
8539 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
8540 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
8541 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
8542 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
8543- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
8544+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
8545
8546 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
8547 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
8548@@ -168,7 +168,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
8549 rw = (struct reg_window32 *) fp;
8550 pc = rw->ins[7];
8551 printk("[%08lx : ", pc);
8552- printk("%pS ] ", (void *) pc);
8553+ printk("%pA ] ", (void *) pc);
8554 fp = rw->ins[6];
8555 } while (++count < 16);
8556 printk("\n");
8557diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
8558index cdb80b2..5ca141d 100644
8559--- a/arch/sparc/kernel/process_64.c
8560+++ b/arch/sparc/kernel/process_64.c
8561@@ -181,14 +181,14 @@ static void show_regwindow(struct pt_regs *regs)
8562 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
8563 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
8564 if (regs->tstate & TSTATE_PRIV)
8565- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
8566+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
8567 }
8568
8569 void show_regs(struct pt_regs *regs)
8570 {
8571 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
8572 regs->tpc, regs->tnpc, regs->y, print_tainted());
8573- printk("TPC: <%pS>\n", (void *) regs->tpc);
8574+ printk("TPC: <%pA>\n", (void *) regs->tpc);
8575 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
8576 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
8577 regs->u_regs[3]);
8578@@ -201,7 +201,7 @@ void show_regs(struct pt_regs *regs)
8579 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
8580 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
8581 regs->u_regs[15]);
8582- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
8583+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
8584 show_regwindow(regs);
8585 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
8586 }
8587@@ -290,7 +290,7 @@ void arch_trigger_all_cpu_backtrace(void)
8588 ((tp && tp->task) ? tp->task->pid : -1));
8589
8590 if (gp->tstate & TSTATE_PRIV) {
8591- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
8592+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
8593 (void *) gp->tpc,
8594 (void *) gp->o7,
8595 (void *) gp->i7,
8596diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
8597index 1303021..c2a6321 100644
8598--- a/arch/sparc/kernel/prom_common.c
8599+++ b/arch/sparc/kernel/prom_common.c
8600@@ -143,7 +143,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
8601
8602 unsigned int prom_early_allocated __initdata;
8603
8604-static struct of_pdt_ops prom_sparc_ops __initdata = {
8605+static struct of_pdt_ops prom_sparc_ops __initconst = {
8606 .nextprop = prom_common_nextprop,
8607 .getproplen = prom_getproplen,
8608 .getproperty = prom_getproperty,
8609diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
8610index 7ff45e4..a58f271 100644
8611--- a/arch/sparc/kernel/ptrace_64.c
8612+++ b/arch/sparc/kernel/ptrace_64.c
8613@@ -1057,6 +1057,10 @@ long arch_ptrace(struct task_struct *child, long request,
8614 return ret;
8615 }
8616
8617+#ifdef CONFIG_GRKERNSEC_SETXID
8618+extern void gr_delayed_cred_worker(void);
8619+#endif
8620+
8621 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
8622 {
8623 int ret = 0;
8624@@ -1064,6 +1068,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
8625 /* do the secure computing check first */
8626 secure_computing_strict(regs->u_regs[UREG_G1]);
8627
8628+#ifdef CONFIG_GRKERNSEC_SETXID
8629+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8630+ gr_delayed_cred_worker();
8631+#endif
8632+
8633 if (test_thread_flag(TIF_SYSCALL_TRACE))
8634 ret = tracehook_report_syscall_entry(regs);
8635
8636@@ -1084,6 +1093,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
8637
8638 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
8639 {
8640+#ifdef CONFIG_GRKERNSEC_SETXID
8641+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8642+ gr_delayed_cred_worker();
8643+#endif
8644+
8645 audit_syscall_exit(regs);
8646
8647 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
8648diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
8649index 2da0bdc..79128d2 100644
8650--- a/arch/sparc/kernel/sys_sparc_32.c
8651+++ b/arch/sparc/kernel/sys_sparc_32.c
8652@@ -52,7 +52,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8653 if (len > TASK_SIZE - PAGE_SIZE)
8654 return -ENOMEM;
8655 if (!addr)
8656- addr = TASK_UNMAPPED_BASE;
8657+ addr = current->mm->mmap_base;
8658
8659 info.flags = 0;
8660 info.length = len;
8661diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
8662index 708bc29..f0129cb 100644
8663--- a/arch/sparc/kernel/sys_sparc_64.c
8664+++ b/arch/sparc/kernel/sys_sparc_64.c
8665@@ -90,13 +90,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8666 struct vm_area_struct * vma;
8667 unsigned long task_size = TASK_SIZE;
8668 int do_color_align;
8669+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8670 struct vm_unmapped_area_info info;
8671
8672 if (flags & MAP_FIXED) {
8673 /* We do not accept a shared mapping if it would violate
8674 * cache aliasing constraints.
8675 */
8676- if ((flags & MAP_SHARED) &&
8677+ if ((filp || (flags & MAP_SHARED)) &&
8678 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
8679 return -EINVAL;
8680 return addr;
8681@@ -111,6 +112,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8682 if (filp || (flags & MAP_SHARED))
8683 do_color_align = 1;
8684
8685+#ifdef CONFIG_PAX_RANDMMAP
8686+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8687+#endif
8688+
8689 if (addr) {
8690 if (do_color_align)
8691 addr = COLOR_ALIGN(addr, pgoff);
8692@@ -118,14 +123,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8693 addr = PAGE_ALIGN(addr);
8694
8695 vma = find_vma(mm, addr);
8696- if (task_size - len >= addr &&
8697- (!vma || addr + len <= vma->vm_start))
8698+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8699 return addr;
8700 }
8701
8702 info.flags = 0;
8703 info.length = len;
8704- info.low_limit = TASK_UNMAPPED_BASE;
8705+ info.low_limit = mm->mmap_base;
8706 info.high_limit = min(task_size, VA_EXCLUDE_START);
8707 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
8708 info.align_offset = pgoff << PAGE_SHIFT;
8709@@ -134,6 +138,12 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8710 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
8711 VM_BUG_ON(addr != -ENOMEM);
8712 info.low_limit = VA_EXCLUDE_END;
8713+
8714+#ifdef CONFIG_PAX_RANDMMAP
8715+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8716+ info.low_limit += mm->delta_mmap;
8717+#endif
8718+
8719 info.high_limit = task_size;
8720 addr = vm_unmapped_area(&info);
8721 }
8722@@ -151,6 +161,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8723 unsigned long task_size = STACK_TOP32;
8724 unsigned long addr = addr0;
8725 int do_color_align;
8726+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8727 struct vm_unmapped_area_info info;
8728
8729 /* This should only ever run for 32-bit processes. */
8730@@ -160,7 +171,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8731 /* We do not accept a shared mapping if it would violate
8732 * cache aliasing constraints.
8733 */
8734- if ((flags & MAP_SHARED) &&
8735+ if ((filp || (flags & MAP_SHARED)) &&
8736 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
8737 return -EINVAL;
8738 return addr;
8739@@ -173,6 +184,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8740 if (filp || (flags & MAP_SHARED))
8741 do_color_align = 1;
8742
8743+#ifdef CONFIG_PAX_RANDMMAP
8744+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8745+#endif
8746+
8747 /* requesting a specific address */
8748 if (addr) {
8749 if (do_color_align)
8750@@ -181,8 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8751 addr = PAGE_ALIGN(addr);
8752
8753 vma = find_vma(mm, addr);
8754- if (task_size - len >= addr &&
8755- (!vma || addr + len <= vma->vm_start))
8756+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8757 return addr;
8758 }
8759
8760@@ -204,6 +218,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8761 VM_BUG_ON(addr != -ENOMEM);
8762 info.flags = 0;
8763 info.low_limit = TASK_UNMAPPED_BASE;
8764+
8765+#ifdef CONFIG_PAX_RANDMMAP
8766+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8767+ info.low_limit += mm->delta_mmap;
8768+#endif
8769+
8770 info.high_limit = STACK_TOP32;
8771 addr = vm_unmapped_area(&info);
8772 }
8773@@ -264,6 +284,10 @@ static unsigned long mmap_rnd(void)
8774 {
8775 unsigned long rnd = 0UL;
8776
8777+#ifdef CONFIG_PAX_RANDMMAP
8778+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8779+#endif
8780+
8781 if (current->flags & PF_RANDOMIZE) {
8782 unsigned long val = get_random_int();
8783 if (test_thread_flag(TIF_32BIT))
8784@@ -289,6 +313,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8785 gap == RLIM_INFINITY ||
8786 sysctl_legacy_va_layout) {
8787 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
8788+
8789+#ifdef CONFIG_PAX_RANDMMAP
8790+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8791+ mm->mmap_base += mm->delta_mmap;
8792+#endif
8793+
8794 mm->get_unmapped_area = arch_get_unmapped_area;
8795 mm->unmap_area = arch_unmap_area;
8796 } else {
8797@@ -301,6 +331,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8798 gap = (task_size / 6 * 5);
8799
8800 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
8801+
8802+#ifdef CONFIG_PAX_RANDMMAP
8803+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8804+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
8805+#endif
8806+
8807 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
8808 mm->unmap_area = arch_unmap_area_topdown;
8809 }
8810diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
8811index e0fed77..604a7e5 100644
8812--- a/arch/sparc/kernel/syscalls.S
8813+++ b/arch/sparc/kernel/syscalls.S
8814@@ -58,7 +58,7 @@ sys32_rt_sigreturn:
8815 #endif
8816 .align 32
8817 1: ldx [%g6 + TI_FLAGS], %l5
8818- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8819+ andcc %l5, _TIF_WORK_SYSCALL, %g0
8820 be,pt %icc, rtrap
8821 nop
8822 call syscall_trace_leave
8823@@ -190,7 +190,7 @@ linux_sparc_syscall32:
8824
8825 srl %i5, 0, %o5 ! IEU1
8826 srl %i2, 0, %o2 ! IEU0 Group
8827- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8828+ andcc %l0, _TIF_WORK_SYSCALL, %g0
8829 bne,pn %icc, linux_syscall_trace32 ! CTI
8830 mov %i0, %l5 ! IEU1
8831 call %l7 ! CTI Group brk forced
8832@@ -213,7 +213,7 @@ linux_sparc_syscall:
8833
8834 mov %i3, %o3 ! IEU1
8835 mov %i4, %o4 ! IEU0 Group
8836- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8837+ andcc %l0, _TIF_WORK_SYSCALL, %g0
8838 bne,pn %icc, linux_syscall_trace ! CTI Group
8839 mov %i0, %l5 ! IEU0
8840 2: call %l7 ! CTI Group brk forced
8841@@ -229,7 +229,7 @@ ret_sys_call:
8842
8843 cmp %o0, -ERESTART_RESTARTBLOCK
8844 bgeu,pn %xcc, 1f
8845- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8846+ andcc %l0, _TIF_WORK_SYSCALL, %g0
8847 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
8848
8849 2:
8850diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
8851index 654e8aa..45f431b 100644
8852--- a/arch/sparc/kernel/sysfs.c
8853+++ b/arch/sparc/kernel/sysfs.c
8854@@ -266,7 +266,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
8855 return NOTIFY_OK;
8856 }
8857
8858-static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
8859+static struct notifier_block sysfs_cpu_nb = {
8860 .notifier_call = sysfs_cpu_notify,
8861 };
8862
8863diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
8864index a5785ea..405c5f7 100644
8865--- a/arch/sparc/kernel/traps_32.c
8866+++ b/arch/sparc/kernel/traps_32.c
8867@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
8868 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
8869 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
8870
8871+extern void gr_handle_kernel_exploit(void);
8872+
8873 void die_if_kernel(char *str, struct pt_regs *regs)
8874 {
8875 static int die_counter;
8876@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
8877 count++ < 30 &&
8878 (((unsigned long) rw) >= PAGE_OFFSET) &&
8879 !(((unsigned long) rw) & 0x7)) {
8880- printk("Caller[%08lx]: %pS\n", rw->ins[7],
8881+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
8882 (void *) rw->ins[7]);
8883 rw = (struct reg_window32 *)rw->ins[6];
8884 }
8885 }
8886 printk("Instruction DUMP:");
8887 instruction_dump ((unsigned long *) regs->pc);
8888- if(regs->psr & PSR_PS)
8889+ if(regs->psr & PSR_PS) {
8890+ gr_handle_kernel_exploit();
8891 do_exit(SIGKILL);
8892+ }
8893 do_exit(SIGSEGV);
8894 }
8895
8896diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
8897index e7ecf15..6520e65 100644
8898--- a/arch/sparc/kernel/traps_64.c
8899+++ b/arch/sparc/kernel/traps_64.c
8900@@ -76,7 +76,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
8901 i + 1,
8902 p->trapstack[i].tstate, p->trapstack[i].tpc,
8903 p->trapstack[i].tnpc, p->trapstack[i].tt);
8904- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
8905+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
8906 }
8907 }
8908
8909@@ -96,6 +96,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
8910
8911 lvl -= 0x100;
8912 if (regs->tstate & TSTATE_PRIV) {
8913+
8914+#ifdef CONFIG_PAX_REFCOUNT
8915+ if (lvl == 6)
8916+ pax_report_refcount_overflow(regs);
8917+#endif
8918+
8919 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
8920 die_if_kernel(buffer, regs);
8921 }
8922@@ -114,11 +120,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
8923 void bad_trap_tl1(struct pt_regs *regs, long lvl)
8924 {
8925 char buffer[32];
8926-
8927+
8928 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
8929 0, lvl, SIGTRAP) == NOTIFY_STOP)
8930 return;
8931
8932+#ifdef CONFIG_PAX_REFCOUNT
8933+ if (lvl == 6)
8934+ pax_report_refcount_overflow(regs);
8935+#endif
8936+
8937 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
8938
8939 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
8940@@ -1142,7 +1153,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
8941 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
8942 printk("%s" "ERROR(%d): ",
8943 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
8944- printk("TPC<%pS>\n", (void *) regs->tpc);
8945+ printk("TPC<%pA>\n", (void *) regs->tpc);
8946 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
8947 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
8948 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
8949@@ -1749,7 +1760,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
8950 smp_processor_id(),
8951 (type & 0x1) ? 'I' : 'D',
8952 regs->tpc);
8953- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
8954+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
8955 panic("Irrecoverable Cheetah+ parity error.");
8956 }
8957
8958@@ -1757,7 +1768,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
8959 smp_processor_id(),
8960 (type & 0x1) ? 'I' : 'D',
8961 regs->tpc);
8962- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
8963+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
8964 }
8965
8966 struct sun4v_error_entry {
8967@@ -2104,9 +2115,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
8968
8969 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
8970 regs->tpc, tl);
8971- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
8972+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
8973 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
8974- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
8975+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
8976 (void *) regs->u_regs[UREG_I7]);
8977 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
8978 "pte[%lx] error[%lx]\n",
8979@@ -2128,9 +2139,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
8980
8981 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
8982 regs->tpc, tl);
8983- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
8984+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
8985 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
8986- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
8987+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
8988 (void *) regs->u_regs[UREG_I7]);
8989 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
8990 "pte[%lx] error[%lx]\n",
8991@@ -2336,13 +2347,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
8992 fp = (unsigned long)sf->fp + STACK_BIAS;
8993 }
8994
8995- printk(" [%016lx] %pS\n", pc, (void *) pc);
8996+ printk(" [%016lx] %pA\n", pc, (void *) pc);
8997 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
8998 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
8999 int index = tsk->curr_ret_stack;
9000 if (tsk->ret_stack && index >= graph) {
9001 pc = tsk->ret_stack[index - graph].ret;
9002- printk(" [%016lx] %pS\n", pc, (void *) pc);
9003+ printk(" [%016lx] %pA\n", pc, (void *) pc);
9004 graph++;
9005 }
9006 }
9007@@ -2367,6 +2378,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
9008 return (struct reg_window *) (fp + STACK_BIAS);
9009 }
9010
9011+extern void gr_handle_kernel_exploit(void);
9012+
9013 void die_if_kernel(char *str, struct pt_regs *regs)
9014 {
9015 static int die_counter;
9016@@ -2395,7 +2408,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
9017 while (rw &&
9018 count++ < 30 &&
9019 kstack_valid(tp, (unsigned long) rw)) {
9020- printk("Caller[%016lx]: %pS\n", rw->ins[7],
9021+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
9022 (void *) rw->ins[7]);
9023
9024 rw = kernel_stack_up(rw);
9025@@ -2408,8 +2421,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
9026 }
9027 user_instruction_dump ((unsigned int __user *) regs->tpc);
9028 }
9029- if (regs->tstate & TSTATE_PRIV)
9030+ if (regs->tstate & TSTATE_PRIV) {
9031+ gr_handle_kernel_exploit();
9032 do_exit(SIGKILL);
9033+ }
9034 do_exit(SIGSEGV);
9035 }
9036 EXPORT_SYMBOL(die_if_kernel);
9037diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
9038index 8201c25e..072a2a7 100644
9039--- a/arch/sparc/kernel/unaligned_64.c
9040+++ b/arch/sparc/kernel/unaligned_64.c
9041@@ -286,7 +286,7 @@ static void log_unaligned(struct pt_regs *regs)
9042 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
9043
9044 if (__ratelimit(&ratelimit)) {
9045- printk("Kernel unaligned access at TPC[%lx] %pS\n",
9046+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
9047 regs->tpc, (void *) regs->tpc);
9048 }
9049 }
9050diff --git a/arch/sparc/kernel/us3_cpufreq.c b/arch/sparc/kernel/us3_cpufreq.c
9051index eb1624b..f9f4ddb 100644
9052--- a/arch/sparc/kernel/us3_cpufreq.c
9053+++ b/arch/sparc/kernel/us3_cpufreq.c
9054@@ -18,14 +18,12 @@
9055 #include <asm/head.h>
9056 #include <asm/timer.h>
9057
9058-static struct cpufreq_driver *cpufreq_us3_driver;
9059-
9060 struct us3_freq_percpu_info {
9061 struct cpufreq_frequency_table table[4];
9062 };
9063
9064 /* Indexed by cpu number. */
9065-static struct us3_freq_percpu_info *us3_freq_table;
9066+static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
9067
9068 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
9069 * in the Safari config register.
9070@@ -191,12 +189,25 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
9071
9072 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
9073 {
9074- if (cpufreq_us3_driver)
9075- us3_set_cpu_divider_index(policy->cpu, 0);
9076+ us3_set_cpu_divider_index(policy->cpu, 0);
9077
9078 return 0;
9079 }
9080
9081+static int __init us3_freq_init(void);
9082+static void __exit us3_freq_exit(void);
9083+
9084+static struct cpufreq_driver cpufreq_us3_driver = {
9085+ .init = us3_freq_cpu_init,
9086+ .verify = us3_freq_verify,
9087+ .target = us3_freq_target,
9088+ .get = us3_freq_get,
9089+ .exit = us3_freq_cpu_exit,
9090+ .owner = THIS_MODULE,
9091+ .name = "UltraSPARC-III",
9092+
9093+};
9094+
9095 static int __init us3_freq_init(void)
9096 {
9097 unsigned long manuf, impl, ver;
9098@@ -213,57 +224,15 @@ static int __init us3_freq_init(void)
9099 (impl == CHEETAH_IMPL ||
9100 impl == CHEETAH_PLUS_IMPL ||
9101 impl == JAGUAR_IMPL ||
9102- impl == PANTHER_IMPL)) {
9103- struct cpufreq_driver *driver;
9104-
9105- ret = -ENOMEM;
9106- driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
9107- if (!driver)
9108- goto err_out;
9109-
9110- us3_freq_table = kzalloc(
9111- (NR_CPUS * sizeof(struct us3_freq_percpu_info)),
9112- GFP_KERNEL);
9113- if (!us3_freq_table)
9114- goto err_out;
9115-
9116- driver->init = us3_freq_cpu_init;
9117- driver->verify = us3_freq_verify;
9118- driver->target = us3_freq_target;
9119- driver->get = us3_freq_get;
9120- driver->exit = us3_freq_cpu_exit;
9121- driver->owner = THIS_MODULE,
9122- strcpy(driver->name, "UltraSPARC-III");
9123-
9124- cpufreq_us3_driver = driver;
9125- ret = cpufreq_register_driver(driver);
9126- if (ret)
9127- goto err_out;
9128-
9129- return 0;
9130-
9131-err_out:
9132- if (driver) {
9133- kfree(driver);
9134- cpufreq_us3_driver = NULL;
9135- }
9136- kfree(us3_freq_table);
9137- us3_freq_table = NULL;
9138- return ret;
9139- }
9140+ impl == PANTHER_IMPL))
9141+ return cpufreq_register_driver(cpufreq_us3_driver);
9142
9143 return -ENODEV;
9144 }
9145
9146 static void __exit us3_freq_exit(void)
9147 {
9148- if (cpufreq_us3_driver) {
9149- cpufreq_unregister_driver(cpufreq_us3_driver);
9150- kfree(cpufreq_us3_driver);
9151- cpufreq_us3_driver = NULL;
9152- kfree(us3_freq_table);
9153- us3_freq_table = NULL;
9154- }
9155+ cpufreq_unregister_driver(cpufreq_us3_driver);
9156 }
9157
9158 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
9159diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
9160index 8410065f2..4fd4ca22 100644
9161--- a/arch/sparc/lib/Makefile
9162+++ b/arch/sparc/lib/Makefile
9163@@ -2,7 +2,7 @@
9164 #
9165
9166 asflags-y := -ansi -DST_DIV0=0x02
9167-ccflags-y := -Werror
9168+#ccflags-y := -Werror
9169
9170 lib-$(CONFIG_SPARC32) += ashrdi3.o
9171 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
9172diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
9173index 85c233d..68500e0 100644
9174--- a/arch/sparc/lib/atomic_64.S
9175+++ b/arch/sparc/lib/atomic_64.S
9176@@ -17,7 +17,12 @@
9177 ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
9178 BACKOFF_SETUP(%o2)
9179 1: lduw [%o1], %g1
9180- add %g1, %o0, %g7
9181+ addcc %g1, %o0, %g7
9182+
9183+#ifdef CONFIG_PAX_REFCOUNT
9184+ tvs %icc, 6
9185+#endif
9186+
9187 cas [%o1], %g1, %g7
9188 cmp %g1, %g7
9189 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
9190@@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
9191 2: BACKOFF_SPIN(%o2, %o3, 1b)
9192 ENDPROC(atomic_add)
9193
9194+ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
9195+ BACKOFF_SETUP(%o2)
9196+1: lduw [%o1], %g1
9197+ add %g1, %o0, %g7
9198+ cas [%o1], %g1, %g7
9199+ cmp %g1, %g7
9200+ bne,pn %icc, 2f
9201+ nop
9202+ retl
9203+ nop
9204+2: BACKOFF_SPIN(%o2, %o3, 1b)
9205+ENDPROC(atomic_add_unchecked)
9206+
9207 ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
9208 BACKOFF_SETUP(%o2)
9209 1: lduw [%o1], %g1
9210- sub %g1, %o0, %g7
9211+ subcc %g1, %o0, %g7
9212+
9213+#ifdef CONFIG_PAX_REFCOUNT
9214+ tvs %icc, 6
9215+#endif
9216+
9217 cas [%o1], %g1, %g7
9218 cmp %g1, %g7
9219 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
9220@@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
9221 2: BACKOFF_SPIN(%o2, %o3, 1b)
9222 ENDPROC(atomic_sub)
9223
9224+ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
9225+ BACKOFF_SETUP(%o2)
9226+1: lduw [%o1], %g1
9227+ sub %g1, %o0, %g7
9228+ cas [%o1], %g1, %g7
9229+ cmp %g1, %g7
9230+ bne,pn %icc, 2f
9231+ nop
9232+ retl
9233+ nop
9234+2: BACKOFF_SPIN(%o2, %o3, 1b)
9235+ENDPROC(atomic_sub_unchecked)
9236+
9237 ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
9238 BACKOFF_SETUP(%o2)
9239 1: lduw [%o1], %g1
9240- add %g1, %o0, %g7
9241+ addcc %g1, %o0, %g7
9242+
9243+#ifdef CONFIG_PAX_REFCOUNT
9244+ tvs %icc, 6
9245+#endif
9246+
9247 cas [%o1], %g1, %g7
9248 cmp %g1, %g7
9249 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
9250@@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
9251 2: BACKOFF_SPIN(%o2, %o3, 1b)
9252 ENDPROC(atomic_add_ret)
9253
9254+ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
9255+ BACKOFF_SETUP(%o2)
9256+1: lduw [%o1], %g1
9257+ addcc %g1, %o0, %g7
9258+ cas [%o1], %g1, %g7
9259+ cmp %g1, %g7
9260+ bne,pn %icc, 2f
9261+ add %g7, %o0, %g7
9262+ sra %g7, 0, %o0
9263+ retl
9264+ nop
9265+2: BACKOFF_SPIN(%o2, %o3, 1b)
9266+ENDPROC(atomic_add_ret_unchecked)
9267+
9268 ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
9269 BACKOFF_SETUP(%o2)
9270 1: lduw [%o1], %g1
9271- sub %g1, %o0, %g7
9272+ subcc %g1, %o0, %g7
9273+
9274+#ifdef CONFIG_PAX_REFCOUNT
9275+ tvs %icc, 6
9276+#endif
9277+
9278 cas [%o1], %g1, %g7
9279 cmp %g1, %g7
9280 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
9281@@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret)
9282 ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
9283 BACKOFF_SETUP(%o2)
9284 1: ldx [%o1], %g1
9285- add %g1, %o0, %g7
9286+ addcc %g1, %o0, %g7
9287+
9288+#ifdef CONFIG_PAX_REFCOUNT
9289+ tvs %xcc, 6
9290+#endif
9291+
9292 casx [%o1], %g1, %g7
9293 cmp %g1, %g7
9294 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
9295@@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
9296 2: BACKOFF_SPIN(%o2, %o3, 1b)
9297 ENDPROC(atomic64_add)
9298
9299+ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
9300+ BACKOFF_SETUP(%o2)
9301+1: ldx [%o1], %g1
9302+ addcc %g1, %o0, %g7
9303+ casx [%o1], %g1, %g7
9304+ cmp %g1, %g7
9305+ bne,pn %xcc, 2f
9306+ nop
9307+ retl
9308+ nop
9309+2: BACKOFF_SPIN(%o2, %o3, 1b)
9310+ENDPROC(atomic64_add_unchecked)
9311+
9312 ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
9313 BACKOFF_SETUP(%o2)
9314 1: ldx [%o1], %g1
9315- sub %g1, %o0, %g7
9316+ subcc %g1, %o0, %g7
9317+
9318+#ifdef CONFIG_PAX_REFCOUNT
9319+ tvs %xcc, 6
9320+#endif
9321+
9322 casx [%o1], %g1, %g7
9323 cmp %g1, %g7
9324 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
9325@@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
9326 2: BACKOFF_SPIN(%o2, %o3, 1b)
9327 ENDPROC(atomic64_sub)
9328
9329+ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
9330+ BACKOFF_SETUP(%o2)
9331+1: ldx [%o1], %g1
9332+ subcc %g1, %o0, %g7
9333+ casx [%o1], %g1, %g7
9334+ cmp %g1, %g7
9335+ bne,pn %xcc, 2f
9336+ nop
9337+ retl
9338+ nop
9339+2: BACKOFF_SPIN(%o2, %o3, 1b)
9340+ENDPROC(atomic64_sub_unchecked)
9341+
9342 ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
9343 BACKOFF_SETUP(%o2)
9344 1: ldx [%o1], %g1
9345- add %g1, %o0, %g7
9346+ addcc %g1, %o0, %g7
9347+
9348+#ifdef CONFIG_PAX_REFCOUNT
9349+ tvs %xcc, 6
9350+#endif
9351+
9352 casx [%o1], %g1, %g7
9353 cmp %g1, %g7
9354 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
9355@@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
9356 2: BACKOFF_SPIN(%o2, %o3, 1b)
9357 ENDPROC(atomic64_add_ret)
9358
9359+ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
9360+ BACKOFF_SETUP(%o2)
9361+1: ldx [%o1], %g1
9362+ addcc %g1, %o0, %g7
9363+ casx [%o1], %g1, %g7
9364+ cmp %g1, %g7
9365+ bne,pn %xcc, 2f
9366+ add %g7, %o0, %g7
9367+ mov %g7, %o0
9368+ retl
9369+ nop
9370+2: BACKOFF_SPIN(%o2, %o3, 1b)
9371+ENDPROC(atomic64_add_ret_unchecked)
9372+
9373 ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
9374 BACKOFF_SETUP(%o2)
9375 1: ldx [%o1], %g1
9376- sub %g1, %o0, %g7
9377+ subcc %g1, %o0, %g7
9378+
9379+#ifdef CONFIG_PAX_REFCOUNT
9380+ tvs %xcc, 6
9381+#endif
9382+
9383 casx [%o1], %g1, %g7
9384 cmp %g1, %g7
9385 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
9386diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
9387index 0c4e35e..745d3e4 100644
9388--- a/arch/sparc/lib/ksyms.c
9389+++ b/arch/sparc/lib/ksyms.c
9390@@ -109,12 +109,18 @@ EXPORT_SYMBOL(__downgrade_write);
9391
9392 /* Atomic counter implementation. */
9393 EXPORT_SYMBOL(atomic_add);
9394+EXPORT_SYMBOL(atomic_add_unchecked);
9395 EXPORT_SYMBOL(atomic_add_ret);
9396+EXPORT_SYMBOL(atomic_add_ret_unchecked);
9397 EXPORT_SYMBOL(atomic_sub);
9398+EXPORT_SYMBOL(atomic_sub_unchecked);
9399 EXPORT_SYMBOL(atomic_sub_ret);
9400 EXPORT_SYMBOL(atomic64_add);
9401+EXPORT_SYMBOL(atomic64_add_unchecked);
9402 EXPORT_SYMBOL(atomic64_add_ret);
9403+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
9404 EXPORT_SYMBOL(atomic64_sub);
9405+EXPORT_SYMBOL(atomic64_sub_unchecked);
9406 EXPORT_SYMBOL(atomic64_sub_ret);
9407 EXPORT_SYMBOL(atomic64_dec_if_positive);
9408
9409diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
9410index 30c3ecc..736f015 100644
9411--- a/arch/sparc/mm/Makefile
9412+++ b/arch/sparc/mm/Makefile
9413@@ -2,7 +2,7 @@
9414 #
9415
9416 asflags-y := -ansi
9417-ccflags-y := -Werror
9418+#ccflags-y := -Werror
9419
9420 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
9421 obj-y += fault_$(BITS).o
9422diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
9423index e98bfda..ea8d221 100644
9424--- a/arch/sparc/mm/fault_32.c
9425+++ b/arch/sparc/mm/fault_32.c
9426@@ -21,6 +21,9 @@
9427 #include <linux/perf_event.h>
9428 #include <linux/interrupt.h>
9429 #include <linux/kdebug.h>
9430+#include <linux/slab.h>
9431+#include <linux/pagemap.h>
9432+#include <linux/compiler.h>
9433
9434 #include <asm/page.h>
9435 #include <asm/pgtable.h>
9436@@ -159,6 +162,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
9437 return safe_compute_effective_address(regs, insn);
9438 }
9439
9440+#ifdef CONFIG_PAX_PAGEEXEC
9441+#ifdef CONFIG_PAX_DLRESOLVE
9442+static void pax_emuplt_close(struct vm_area_struct *vma)
9443+{
9444+ vma->vm_mm->call_dl_resolve = 0UL;
9445+}
9446+
9447+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9448+{
9449+ unsigned int *kaddr;
9450+
9451+ vmf->page = alloc_page(GFP_HIGHUSER);
9452+ if (!vmf->page)
9453+ return VM_FAULT_OOM;
9454+
9455+ kaddr = kmap(vmf->page);
9456+ memset(kaddr, 0, PAGE_SIZE);
9457+ kaddr[0] = 0x9DE3BFA8U; /* save */
9458+ flush_dcache_page(vmf->page);
9459+ kunmap(vmf->page);
9460+ return VM_FAULT_MAJOR;
9461+}
9462+
9463+static const struct vm_operations_struct pax_vm_ops = {
9464+ .close = pax_emuplt_close,
9465+ .fault = pax_emuplt_fault
9466+};
9467+
9468+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
9469+{
9470+ int ret;
9471+
9472+ INIT_LIST_HEAD(&vma->anon_vma_chain);
9473+ vma->vm_mm = current->mm;
9474+ vma->vm_start = addr;
9475+ vma->vm_end = addr + PAGE_SIZE;
9476+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
9477+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
9478+ vma->vm_ops = &pax_vm_ops;
9479+
9480+ ret = insert_vm_struct(current->mm, vma);
9481+ if (ret)
9482+ return ret;
9483+
9484+ ++current->mm->total_vm;
9485+ return 0;
9486+}
9487+#endif
9488+
9489+/*
9490+ * PaX: decide what to do with offenders (regs->pc = fault address)
9491+ *
9492+ * returns 1 when task should be killed
9493+ * 2 when patched PLT trampoline was detected
9494+ * 3 when unpatched PLT trampoline was detected
9495+ */
9496+static int pax_handle_fetch_fault(struct pt_regs *regs)
9497+{
9498+
9499+#ifdef CONFIG_PAX_EMUPLT
9500+ int err;
9501+
9502+ do { /* PaX: patched PLT emulation #1 */
9503+ unsigned int sethi1, sethi2, jmpl;
9504+
9505+ err = get_user(sethi1, (unsigned int *)regs->pc);
9506+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
9507+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
9508+
9509+ if (err)
9510+ break;
9511+
9512+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
9513+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
9514+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
9515+ {
9516+ unsigned int addr;
9517+
9518+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
9519+ addr = regs->u_regs[UREG_G1];
9520+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
9521+ regs->pc = addr;
9522+ regs->npc = addr+4;
9523+ return 2;
9524+ }
9525+ } while (0);
9526+
9527+ do { /* PaX: patched PLT emulation #2 */
9528+ unsigned int ba;
9529+
9530+ err = get_user(ba, (unsigned int *)regs->pc);
9531+
9532+ if (err)
9533+ break;
9534+
9535+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
9536+ unsigned int addr;
9537+
9538+ if ((ba & 0xFFC00000U) == 0x30800000U)
9539+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
9540+ else
9541+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
9542+ regs->pc = addr;
9543+ regs->npc = addr+4;
9544+ return 2;
9545+ }
9546+ } while (0);
9547+
9548+ do { /* PaX: patched PLT emulation #3 */
9549+ unsigned int sethi, bajmpl, nop;
9550+
9551+ err = get_user(sethi, (unsigned int *)regs->pc);
9552+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
9553+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
9554+
9555+ if (err)
9556+ break;
9557+
9558+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9559+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
9560+ nop == 0x01000000U)
9561+ {
9562+ unsigned int addr;
9563+
9564+ addr = (sethi & 0x003FFFFFU) << 10;
9565+ regs->u_regs[UREG_G1] = addr;
9566+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
9567+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
9568+ else
9569+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
9570+ regs->pc = addr;
9571+ regs->npc = addr+4;
9572+ return 2;
9573+ }
9574+ } while (0);
9575+
9576+ do { /* PaX: unpatched PLT emulation step 1 */
9577+ unsigned int sethi, ba, nop;
9578+
9579+ err = get_user(sethi, (unsigned int *)regs->pc);
9580+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
9581+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
9582+
9583+ if (err)
9584+ break;
9585+
9586+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9587+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
9588+ nop == 0x01000000U)
9589+ {
9590+ unsigned int addr, save, call;
9591+
9592+ if ((ba & 0xFFC00000U) == 0x30800000U)
9593+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
9594+ else
9595+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
9596+
9597+ err = get_user(save, (unsigned int *)addr);
9598+ err |= get_user(call, (unsigned int *)(addr+4));
9599+ err |= get_user(nop, (unsigned int *)(addr+8));
9600+ if (err)
9601+ break;
9602+
9603+#ifdef CONFIG_PAX_DLRESOLVE
9604+ if (save == 0x9DE3BFA8U &&
9605+ (call & 0xC0000000U) == 0x40000000U &&
9606+ nop == 0x01000000U)
9607+ {
9608+ struct vm_area_struct *vma;
9609+ unsigned long call_dl_resolve;
9610+
9611+ down_read(&current->mm->mmap_sem);
9612+ call_dl_resolve = current->mm->call_dl_resolve;
9613+ up_read(&current->mm->mmap_sem);
9614+ if (likely(call_dl_resolve))
9615+ goto emulate;
9616+
9617+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
9618+
9619+ down_write(&current->mm->mmap_sem);
9620+ if (current->mm->call_dl_resolve) {
9621+ call_dl_resolve = current->mm->call_dl_resolve;
9622+ up_write(&current->mm->mmap_sem);
9623+ if (vma)
9624+ kmem_cache_free(vm_area_cachep, vma);
9625+ goto emulate;
9626+ }
9627+
9628+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
9629+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
9630+ up_write(&current->mm->mmap_sem);
9631+ if (vma)
9632+ kmem_cache_free(vm_area_cachep, vma);
9633+ return 1;
9634+ }
9635+
9636+ if (pax_insert_vma(vma, call_dl_resolve)) {
9637+ up_write(&current->mm->mmap_sem);
9638+ kmem_cache_free(vm_area_cachep, vma);
9639+ return 1;
9640+ }
9641+
9642+ current->mm->call_dl_resolve = call_dl_resolve;
9643+ up_write(&current->mm->mmap_sem);
9644+
9645+emulate:
9646+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9647+ regs->pc = call_dl_resolve;
9648+ regs->npc = addr+4;
9649+ return 3;
9650+ }
9651+#endif
9652+
9653+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
9654+ if ((save & 0xFFC00000U) == 0x05000000U &&
9655+ (call & 0xFFFFE000U) == 0x85C0A000U &&
9656+ nop == 0x01000000U)
9657+ {
9658+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9659+ regs->u_regs[UREG_G2] = addr + 4;
9660+ addr = (save & 0x003FFFFFU) << 10;
9661+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
9662+ regs->pc = addr;
9663+ regs->npc = addr+4;
9664+ return 3;
9665+ }
9666+ }
9667+ } while (0);
9668+
9669+ do { /* PaX: unpatched PLT emulation step 2 */
9670+ unsigned int save, call, nop;
9671+
9672+ err = get_user(save, (unsigned int *)(regs->pc-4));
9673+ err |= get_user(call, (unsigned int *)regs->pc);
9674+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
9675+ if (err)
9676+ break;
9677+
9678+ if (save == 0x9DE3BFA8U &&
9679+ (call & 0xC0000000U) == 0x40000000U &&
9680+ nop == 0x01000000U)
9681+ {
9682+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
9683+
9684+ regs->u_regs[UREG_RETPC] = regs->pc;
9685+ regs->pc = dl_resolve;
9686+ regs->npc = dl_resolve+4;
9687+ return 3;
9688+ }
9689+ } while (0);
9690+#endif
9691+
9692+ return 1;
9693+}
9694+
9695+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9696+{
9697+ unsigned long i;
9698+
9699+ printk(KERN_ERR "PAX: bytes at PC: ");
9700+ for (i = 0; i < 8; i++) {
9701+ unsigned int c;
9702+ if (get_user(c, (unsigned int *)pc+i))
9703+ printk(KERN_CONT "???????? ");
9704+ else
9705+ printk(KERN_CONT "%08x ", c);
9706+ }
9707+ printk("\n");
9708+}
9709+#endif
9710+
9711 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
9712 int text_fault)
9713 {
9714@@ -230,6 +504,24 @@ good_area:
9715 if (!(vma->vm_flags & VM_WRITE))
9716 goto bad_area;
9717 } else {
9718+
9719+#ifdef CONFIG_PAX_PAGEEXEC
9720+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
9721+ up_read(&mm->mmap_sem);
9722+ switch (pax_handle_fetch_fault(regs)) {
9723+
9724+#ifdef CONFIG_PAX_EMUPLT
9725+ case 2:
9726+ case 3:
9727+ return;
9728+#endif
9729+
9730+ }
9731+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
9732+ do_group_exit(SIGKILL);
9733+ }
9734+#endif
9735+
9736 /* Allow reads even for write-only mappings */
9737 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
9738 goto bad_area;
9739diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
9740index 5062ff3..e0b75f3 100644
9741--- a/arch/sparc/mm/fault_64.c
9742+++ b/arch/sparc/mm/fault_64.c
9743@@ -21,6 +21,9 @@
9744 #include <linux/kprobes.h>
9745 #include <linux/kdebug.h>
9746 #include <linux/percpu.h>
9747+#include <linux/slab.h>
9748+#include <linux/pagemap.h>
9749+#include <linux/compiler.h>
9750
9751 #include <asm/page.h>
9752 #include <asm/pgtable.h>
9753@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
9754 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
9755 regs->tpc);
9756 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
9757- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
9758+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
9759 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
9760 dump_stack();
9761 unhandled_fault(regs->tpc, current, regs);
9762@@ -270,6 +273,466 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
9763 show_regs(regs);
9764 }
9765
9766+#ifdef CONFIG_PAX_PAGEEXEC
9767+#ifdef CONFIG_PAX_DLRESOLVE
9768+static void pax_emuplt_close(struct vm_area_struct *vma)
9769+{
9770+ vma->vm_mm->call_dl_resolve = 0UL;
9771+}
9772+
9773+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9774+{
9775+ unsigned int *kaddr;
9776+
9777+ vmf->page = alloc_page(GFP_HIGHUSER);
9778+ if (!vmf->page)
9779+ return VM_FAULT_OOM;
9780+
9781+ kaddr = kmap(vmf->page);
9782+ memset(kaddr, 0, PAGE_SIZE);
9783+ kaddr[0] = 0x9DE3BFA8U; /* save */
9784+ flush_dcache_page(vmf->page);
9785+ kunmap(vmf->page);
9786+ return VM_FAULT_MAJOR;
9787+}
9788+
9789+static const struct vm_operations_struct pax_vm_ops = {
9790+ .close = pax_emuplt_close,
9791+ .fault = pax_emuplt_fault
9792+};
9793+
9794+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
9795+{
9796+ int ret;
9797+
9798+ INIT_LIST_HEAD(&vma->anon_vma_chain);
9799+ vma->vm_mm = current->mm;
9800+ vma->vm_start = addr;
9801+ vma->vm_end = addr + PAGE_SIZE;
9802+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
9803+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
9804+ vma->vm_ops = &pax_vm_ops;
9805+
9806+ ret = insert_vm_struct(current->mm, vma);
9807+ if (ret)
9808+ return ret;
9809+
9810+ ++current->mm->total_vm;
9811+ return 0;
9812+}
9813+#endif
9814+
9815+/*
9816+ * PaX: decide what to do with offenders (regs->tpc = fault address)
9817+ *
9818+ * returns 1 when task should be killed
9819+ * 2 when patched PLT trampoline was detected
9820+ * 3 when unpatched PLT trampoline was detected
9821+ */
9822+static int pax_handle_fetch_fault(struct pt_regs *regs)
9823+{
9824+
9825+#ifdef CONFIG_PAX_EMUPLT
9826+ int err;
9827+
9828+ do { /* PaX: patched PLT emulation #1 */
9829+ unsigned int sethi1, sethi2, jmpl;
9830+
9831+ err = get_user(sethi1, (unsigned int *)regs->tpc);
9832+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
9833+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
9834+
9835+ if (err)
9836+ break;
9837+
9838+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
9839+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
9840+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
9841+ {
9842+ unsigned long addr;
9843+
9844+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
9845+ addr = regs->u_regs[UREG_G1];
9846+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
9847+
9848+ if (test_thread_flag(TIF_32BIT))
9849+ addr &= 0xFFFFFFFFUL;
9850+
9851+ regs->tpc = addr;
9852+ regs->tnpc = addr+4;
9853+ return 2;
9854+ }
9855+ } while (0);
9856+
9857+ do { /* PaX: patched PLT emulation #2 */
9858+ unsigned int ba;
9859+
9860+ err = get_user(ba, (unsigned int *)regs->tpc);
9861+
9862+ if (err)
9863+ break;
9864+
9865+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
9866+ unsigned long addr;
9867+
9868+ if ((ba & 0xFFC00000U) == 0x30800000U)
9869+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
9870+ else
9871+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
9872+
9873+ if (test_thread_flag(TIF_32BIT))
9874+ addr &= 0xFFFFFFFFUL;
9875+
9876+ regs->tpc = addr;
9877+ regs->tnpc = addr+4;
9878+ return 2;
9879+ }
9880+ } while (0);
9881+
9882+ do { /* PaX: patched PLT emulation #3 */
9883+ unsigned int sethi, bajmpl, nop;
9884+
9885+ err = get_user(sethi, (unsigned int *)regs->tpc);
9886+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
9887+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
9888+
9889+ if (err)
9890+ break;
9891+
9892+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9893+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
9894+ nop == 0x01000000U)
9895+ {
9896+ unsigned long addr;
9897+
9898+ addr = (sethi & 0x003FFFFFU) << 10;
9899+ regs->u_regs[UREG_G1] = addr;
9900+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
9901+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
9902+ else
9903+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
9904+
9905+ if (test_thread_flag(TIF_32BIT))
9906+ addr &= 0xFFFFFFFFUL;
9907+
9908+ regs->tpc = addr;
9909+ regs->tnpc = addr+4;
9910+ return 2;
9911+ }
9912+ } while (0);
9913+
9914+ do { /* PaX: patched PLT emulation #4 */
9915+ unsigned int sethi, mov1, call, mov2;
9916+
9917+ err = get_user(sethi, (unsigned int *)regs->tpc);
9918+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
9919+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
9920+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
9921+
9922+ if (err)
9923+ break;
9924+
9925+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9926+ mov1 == 0x8210000FU &&
9927+ (call & 0xC0000000U) == 0x40000000U &&
9928+ mov2 == 0x9E100001U)
9929+ {
9930+ unsigned long addr;
9931+
9932+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
9933+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
9934+
9935+ if (test_thread_flag(TIF_32BIT))
9936+ addr &= 0xFFFFFFFFUL;
9937+
9938+ regs->tpc = addr;
9939+ regs->tnpc = addr+4;
9940+ return 2;
9941+ }
9942+ } while (0);
9943+
9944+ do { /* PaX: patched PLT emulation #5 */
9945+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
9946+
9947+ err = get_user(sethi, (unsigned int *)regs->tpc);
9948+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
9949+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
9950+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
9951+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
9952+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
9953+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
9954+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
9955+
9956+ if (err)
9957+ break;
9958+
9959+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9960+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
9961+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
9962+ (or1 & 0xFFFFE000U) == 0x82106000U &&
9963+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
9964+ sllx == 0x83287020U &&
9965+ jmpl == 0x81C04005U &&
9966+ nop == 0x01000000U)
9967+ {
9968+ unsigned long addr;
9969+
9970+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
9971+ regs->u_regs[UREG_G1] <<= 32;
9972+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
9973+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
9974+ regs->tpc = addr;
9975+ regs->tnpc = addr+4;
9976+ return 2;
9977+ }
9978+ } while (0);
9979+
9980+ do { /* PaX: patched PLT emulation #6 */
9981+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
9982+
9983+ err = get_user(sethi, (unsigned int *)regs->tpc);
9984+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
9985+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
9986+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
9987+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
9988+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
9989+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
9990+
9991+ if (err)
9992+ break;
9993+
9994+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9995+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
9996+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
9997+ sllx == 0x83287020U &&
9998+ (or & 0xFFFFE000U) == 0x8A116000U &&
9999+ jmpl == 0x81C04005U &&
10000+ nop == 0x01000000U)
10001+ {
10002+ unsigned long addr;
10003+
10004+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
10005+ regs->u_regs[UREG_G1] <<= 32;
10006+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
10007+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
10008+ regs->tpc = addr;
10009+ regs->tnpc = addr+4;
10010+ return 2;
10011+ }
10012+ } while (0);
10013+
10014+ do { /* PaX: unpatched PLT emulation step 1 */
10015+ unsigned int sethi, ba, nop;
10016+
10017+ err = get_user(sethi, (unsigned int *)regs->tpc);
10018+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
10019+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
10020+
10021+ if (err)
10022+ break;
10023+
10024+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10025+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
10026+ nop == 0x01000000U)
10027+ {
10028+ unsigned long addr;
10029+ unsigned int save, call;
10030+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
10031+
10032+ if ((ba & 0xFFC00000U) == 0x30800000U)
10033+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
10034+ else
10035+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
10036+
10037+ if (test_thread_flag(TIF_32BIT))
10038+ addr &= 0xFFFFFFFFUL;
10039+
10040+ err = get_user(save, (unsigned int *)addr);
10041+ err |= get_user(call, (unsigned int *)(addr+4));
10042+ err |= get_user(nop, (unsigned int *)(addr+8));
10043+ if (err)
10044+ break;
10045+
10046+#ifdef CONFIG_PAX_DLRESOLVE
10047+ if (save == 0x9DE3BFA8U &&
10048+ (call & 0xC0000000U) == 0x40000000U &&
10049+ nop == 0x01000000U)
10050+ {
10051+ struct vm_area_struct *vma;
10052+ unsigned long call_dl_resolve;
10053+
10054+ down_read(&current->mm->mmap_sem);
10055+ call_dl_resolve = current->mm->call_dl_resolve;
10056+ up_read(&current->mm->mmap_sem);
10057+ if (likely(call_dl_resolve))
10058+ goto emulate;
10059+
10060+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
10061+
10062+ down_write(&current->mm->mmap_sem);
10063+ if (current->mm->call_dl_resolve) {
10064+ call_dl_resolve = current->mm->call_dl_resolve;
10065+ up_write(&current->mm->mmap_sem);
10066+ if (vma)
10067+ kmem_cache_free(vm_area_cachep, vma);
10068+ goto emulate;
10069+ }
10070+
10071+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
10072+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
10073+ up_write(&current->mm->mmap_sem);
10074+ if (vma)
10075+ kmem_cache_free(vm_area_cachep, vma);
10076+ return 1;
10077+ }
10078+
10079+ if (pax_insert_vma(vma, call_dl_resolve)) {
10080+ up_write(&current->mm->mmap_sem);
10081+ kmem_cache_free(vm_area_cachep, vma);
10082+ return 1;
10083+ }
10084+
10085+ current->mm->call_dl_resolve = call_dl_resolve;
10086+ up_write(&current->mm->mmap_sem);
10087+
10088+emulate:
10089+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
10090+ regs->tpc = call_dl_resolve;
10091+ regs->tnpc = addr+4;
10092+ return 3;
10093+ }
10094+#endif
10095+
10096+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
10097+ if ((save & 0xFFC00000U) == 0x05000000U &&
10098+ (call & 0xFFFFE000U) == 0x85C0A000U &&
10099+ nop == 0x01000000U)
10100+ {
10101+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
10102+ regs->u_regs[UREG_G2] = addr + 4;
10103+ addr = (save & 0x003FFFFFU) << 10;
10104+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
10105+
10106+ if (test_thread_flag(TIF_32BIT))
10107+ addr &= 0xFFFFFFFFUL;
10108+
10109+ regs->tpc = addr;
10110+ regs->tnpc = addr+4;
10111+ return 3;
10112+ }
10113+
10114+ /* PaX: 64-bit PLT stub */
10115+ err = get_user(sethi1, (unsigned int *)addr);
10116+ err |= get_user(sethi2, (unsigned int *)(addr+4));
10117+ err |= get_user(or1, (unsigned int *)(addr+8));
10118+ err |= get_user(or2, (unsigned int *)(addr+12));
10119+ err |= get_user(sllx, (unsigned int *)(addr+16));
10120+ err |= get_user(add, (unsigned int *)(addr+20));
10121+ err |= get_user(jmpl, (unsigned int *)(addr+24));
10122+ err |= get_user(nop, (unsigned int *)(addr+28));
10123+ if (err)
10124+ break;
10125+
10126+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
10127+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
10128+ (or1 & 0xFFFFE000U) == 0x88112000U &&
10129+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
10130+ sllx == 0x89293020U &&
10131+ add == 0x8A010005U &&
10132+ jmpl == 0x89C14000U &&
10133+ nop == 0x01000000U)
10134+ {
10135+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
10136+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
10137+ regs->u_regs[UREG_G4] <<= 32;
10138+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
10139+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
10140+ regs->u_regs[UREG_G4] = addr + 24;
10141+ addr = regs->u_regs[UREG_G5];
10142+ regs->tpc = addr;
10143+ regs->tnpc = addr+4;
10144+ return 3;
10145+ }
10146+ }
10147+ } while (0);
10148+
10149+#ifdef CONFIG_PAX_DLRESOLVE
10150+ do { /* PaX: unpatched PLT emulation step 2 */
10151+ unsigned int save, call, nop;
10152+
10153+ err = get_user(save, (unsigned int *)(regs->tpc-4));
10154+ err |= get_user(call, (unsigned int *)regs->tpc);
10155+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
10156+ if (err)
10157+ break;
10158+
10159+ if (save == 0x9DE3BFA8U &&
10160+ (call & 0xC0000000U) == 0x40000000U &&
10161+ nop == 0x01000000U)
10162+ {
10163+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
10164+
10165+ if (test_thread_flag(TIF_32BIT))
10166+ dl_resolve &= 0xFFFFFFFFUL;
10167+
10168+ regs->u_regs[UREG_RETPC] = regs->tpc;
10169+ regs->tpc = dl_resolve;
10170+ regs->tnpc = dl_resolve+4;
10171+ return 3;
10172+ }
10173+ } while (0);
10174+#endif
10175+
10176+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
10177+ unsigned int sethi, ba, nop;
10178+
10179+ err = get_user(sethi, (unsigned int *)regs->tpc);
10180+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
10181+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
10182+
10183+ if (err)
10184+ break;
10185+
10186+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10187+ (ba & 0xFFF00000U) == 0x30600000U &&
10188+ nop == 0x01000000U)
10189+ {
10190+ unsigned long addr;
10191+
10192+ addr = (sethi & 0x003FFFFFU) << 10;
10193+ regs->u_regs[UREG_G1] = addr;
10194+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
10195+
10196+ if (test_thread_flag(TIF_32BIT))
10197+ addr &= 0xFFFFFFFFUL;
10198+
10199+ regs->tpc = addr;
10200+ regs->tnpc = addr+4;
10201+ return 2;
10202+ }
10203+ } while (0);
10204+
10205+#endif
10206+
10207+ return 1;
10208+}
10209+
10210+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
10211+{
10212+ unsigned long i;
10213+
10214+ printk(KERN_ERR "PAX: bytes at PC: ");
10215+ for (i = 0; i < 8; i++) {
10216+ unsigned int c;
10217+ if (get_user(c, (unsigned int *)pc+i))
10218+ printk(KERN_CONT "???????? ");
10219+ else
10220+ printk(KERN_CONT "%08x ", c);
10221+ }
10222+ printk("\n");
10223+}
10224+#endif
10225+
10226 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
10227 {
10228 struct mm_struct *mm = current->mm;
10229@@ -341,6 +804,29 @@ retry:
10230 if (!vma)
10231 goto bad_area;
10232
10233+#ifdef CONFIG_PAX_PAGEEXEC
10234+ /* PaX: detect ITLB misses on non-exec pages */
10235+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
10236+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
10237+ {
10238+ if (address != regs->tpc)
10239+ goto good_area;
10240+
10241+ up_read(&mm->mmap_sem);
10242+ switch (pax_handle_fetch_fault(regs)) {
10243+
10244+#ifdef CONFIG_PAX_EMUPLT
10245+ case 2:
10246+ case 3:
10247+ return;
10248+#endif
10249+
10250+ }
10251+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
10252+ do_group_exit(SIGKILL);
10253+ }
10254+#endif
10255+
10256 /* Pure DTLB misses do not tell us whether the fault causing
10257 * load/store/atomic was a write or not, it only says that there
10258 * was no match. So in such a case we (carefully) read the
10259diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
10260index d2b5944..bd813f2 100644
10261--- a/arch/sparc/mm/hugetlbpage.c
10262+++ b/arch/sparc/mm/hugetlbpage.c
10263@@ -38,7 +38,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
10264
10265 info.flags = 0;
10266 info.length = len;
10267- info.low_limit = TASK_UNMAPPED_BASE;
10268+ info.low_limit = mm->mmap_base;
10269 info.high_limit = min(task_size, VA_EXCLUDE_START);
10270 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
10271 info.align_offset = 0;
10272@@ -47,6 +47,12 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
10273 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
10274 VM_BUG_ON(addr != -ENOMEM);
10275 info.low_limit = VA_EXCLUDE_END;
10276+
10277+#ifdef CONFIG_PAX_RANDMMAP
10278+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10279+ info.low_limit += mm->delta_mmap;
10280+#endif
10281+
10282 info.high_limit = task_size;
10283 addr = vm_unmapped_area(&info);
10284 }
10285@@ -85,6 +91,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10286 VM_BUG_ON(addr != -ENOMEM);
10287 info.flags = 0;
10288 info.low_limit = TASK_UNMAPPED_BASE;
10289+
10290+#ifdef CONFIG_PAX_RANDMMAP
10291+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10292+ info.low_limit += mm->delta_mmap;
10293+#endif
10294+
10295 info.high_limit = STACK_TOP32;
10296 addr = vm_unmapped_area(&info);
10297 }
10298@@ -99,6 +111,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
10299 struct mm_struct *mm = current->mm;
10300 struct vm_area_struct *vma;
10301 unsigned long task_size = TASK_SIZE;
10302+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
10303
10304 if (test_thread_flag(TIF_32BIT))
10305 task_size = STACK_TOP32;
10306@@ -114,11 +127,14 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
10307 return addr;
10308 }
10309
10310+#ifdef CONFIG_PAX_RANDMMAP
10311+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10312+#endif
10313+
10314 if (addr) {
10315 addr = ALIGN(addr, HPAGE_SIZE);
10316 vma = find_vma(mm, addr);
10317- if (task_size - len >= addr &&
10318- (!vma || addr + len <= vma->vm_start))
10319+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10320 return addr;
10321 }
10322 if (mm->get_unmapped_area == arch_get_unmapped_area)
10323diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
10324index f4500c6..889656c 100644
10325--- a/arch/tile/include/asm/atomic_64.h
10326+++ b/arch/tile/include/asm/atomic_64.h
10327@@ -143,6 +143,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
10328
10329 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10330
10331+#define atomic64_read_unchecked(v) atomic64_read(v)
10332+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
10333+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
10334+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
10335+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
10336+#define atomic64_inc_unchecked(v) atomic64_inc(v)
10337+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
10338+#define atomic64_dec_unchecked(v) atomic64_dec(v)
10339+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
10340+
10341 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
10342 #define smp_mb__before_atomic_dec() smp_mb()
10343 #define smp_mb__after_atomic_dec() smp_mb()
10344diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
10345index a9a5299..0fce79e 100644
10346--- a/arch/tile/include/asm/cache.h
10347+++ b/arch/tile/include/asm/cache.h
10348@@ -15,11 +15,12 @@
10349 #ifndef _ASM_TILE_CACHE_H
10350 #define _ASM_TILE_CACHE_H
10351
10352+#include <linux/const.h>
10353 #include <arch/chip.h>
10354
10355 /* bytes per L1 data cache line */
10356 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
10357-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10358+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10359
10360 /* bytes per L2 cache line */
10361 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
10362diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
10363index 9ab078a..d6635c2 100644
10364--- a/arch/tile/include/asm/uaccess.h
10365+++ b/arch/tile/include/asm/uaccess.h
10366@@ -403,9 +403,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
10367 const void __user *from,
10368 unsigned long n)
10369 {
10370- int sz = __compiletime_object_size(to);
10371+ size_t sz = __compiletime_object_size(to);
10372
10373- if (likely(sz == -1 || sz >= n))
10374+ if (likely(sz == (size_t)-1 || sz >= n))
10375 n = _copy_from_user(to, from, n);
10376 else
10377 copy_from_user_overflow();
10378diff --git a/arch/um/Makefile b/arch/um/Makefile
10379index 133f7de..1d6f2f1 100644
10380--- a/arch/um/Makefile
10381+++ b/arch/um/Makefile
10382@@ -62,6 +62,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
10383 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
10384 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
10385
10386+ifdef CONSTIFY_PLUGIN
10387+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
10388+endif
10389+
10390 #This will adjust *FLAGS accordingly to the platform.
10391 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
10392
10393diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
10394index 19e1bdd..3665b77 100644
10395--- a/arch/um/include/asm/cache.h
10396+++ b/arch/um/include/asm/cache.h
10397@@ -1,6 +1,7 @@
10398 #ifndef __UM_CACHE_H
10399 #define __UM_CACHE_H
10400
10401+#include <linux/const.h>
10402
10403 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
10404 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
10405@@ -12,6 +13,6 @@
10406 # define L1_CACHE_SHIFT 5
10407 #endif
10408
10409-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10410+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10411
10412 #endif
10413diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
10414index 2e0a6b1..a64d0f5 100644
10415--- a/arch/um/include/asm/kmap_types.h
10416+++ b/arch/um/include/asm/kmap_types.h
10417@@ -8,6 +8,6 @@
10418
10419 /* No more #include "asm/arch/kmap_types.h" ! */
10420
10421-#define KM_TYPE_NR 14
10422+#define KM_TYPE_NR 15
10423
10424 #endif
10425diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
10426index 5ff53d9..5850cdf 100644
10427--- a/arch/um/include/asm/page.h
10428+++ b/arch/um/include/asm/page.h
10429@@ -14,6 +14,9 @@
10430 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
10431 #define PAGE_MASK (~(PAGE_SIZE-1))
10432
10433+#define ktla_ktva(addr) (addr)
10434+#define ktva_ktla(addr) (addr)
10435+
10436 #ifndef __ASSEMBLY__
10437
10438 struct page;
10439diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
10440index 0032f92..cd151e0 100644
10441--- a/arch/um/include/asm/pgtable-3level.h
10442+++ b/arch/um/include/asm/pgtable-3level.h
10443@@ -58,6 +58,7 @@
10444 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
10445 #define pud_populate(mm, pud, pmd) \
10446 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
10447+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
10448
10449 #ifdef CONFIG_64BIT
10450 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
10451diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
10452index b462b13..e7a19aa 100644
10453--- a/arch/um/kernel/process.c
10454+++ b/arch/um/kernel/process.c
10455@@ -386,22 +386,6 @@ int singlestepping(void * t)
10456 return 2;
10457 }
10458
10459-/*
10460- * Only x86 and x86_64 have an arch_align_stack().
10461- * All other arches have "#define arch_align_stack(x) (x)"
10462- * in their asm/system.h
10463- * As this is included in UML from asm-um/system-generic.h,
10464- * we can use it to behave as the subarch does.
10465- */
10466-#ifndef arch_align_stack
10467-unsigned long arch_align_stack(unsigned long sp)
10468-{
10469- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
10470- sp -= get_random_int() % 8192;
10471- return sp & ~0xf;
10472-}
10473-#endif
10474-
10475 unsigned long get_wchan(struct task_struct *p)
10476 {
10477 unsigned long stack_page, sp, ip;
10478diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
10479index ad8f795..2c7eec6 100644
10480--- a/arch/unicore32/include/asm/cache.h
10481+++ b/arch/unicore32/include/asm/cache.h
10482@@ -12,8 +12,10 @@
10483 #ifndef __UNICORE_CACHE_H__
10484 #define __UNICORE_CACHE_H__
10485
10486-#define L1_CACHE_SHIFT (5)
10487-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10488+#include <linux/const.h>
10489+
10490+#define L1_CACHE_SHIFT 5
10491+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10492
10493 /*
10494 * Memory returned by kmalloc() may be used for DMA, so we must make
10495diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
10496index 0694d09..b58b3aa 100644
10497--- a/arch/x86/Kconfig
10498+++ b/arch/x86/Kconfig
10499@@ -238,7 +238,7 @@ config X86_HT
10500
10501 config X86_32_LAZY_GS
10502 def_bool y
10503- depends on X86_32 && !CC_STACKPROTECTOR
10504+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10505
10506 config ARCH_HWEIGHT_CFLAGS
10507 string
10508@@ -1031,6 +1031,7 @@ config MICROCODE_OLD_INTERFACE
10509
10510 config X86_MSR
10511 tristate "/dev/cpu/*/msr - Model-specific register support"
10512+ depends on !GRKERNSEC_KMEM
10513 ---help---
10514 This device gives privileged processes access to the x86
10515 Model-Specific Registers (MSRs). It is a character device with
10516@@ -1054,7 +1055,7 @@ choice
10517
10518 config NOHIGHMEM
10519 bool "off"
10520- depends on !X86_NUMAQ
10521+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10522 ---help---
10523 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
10524 However, the address space of 32-bit x86 processors is only 4
10525@@ -1091,7 +1092,7 @@ config NOHIGHMEM
10526
10527 config HIGHMEM4G
10528 bool "4GB"
10529- depends on !X86_NUMAQ
10530+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10531 ---help---
10532 Select this if you have a 32-bit processor and between 1 and 4
10533 gigabytes of physical RAM.
10534@@ -1145,7 +1146,7 @@ config PAGE_OFFSET
10535 hex
10536 default 0xB0000000 if VMSPLIT_3G_OPT
10537 default 0x80000000 if VMSPLIT_2G
10538- default 0x78000000 if VMSPLIT_2G_OPT
10539+ default 0x70000000 if VMSPLIT_2G_OPT
10540 default 0x40000000 if VMSPLIT_1G
10541 default 0xC0000000
10542 depends on X86_32
10543@@ -1542,6 +1543,7 @@ config SECCOMP
10544
10545 config CC_STACKPROTECTOR
10546 bool "Enable -fstack-protector buffer overflow detection"
10547+ depends on X86_64 || !PAX_MEMORY_UDEREF
10548 ---help---
10549 This option turns on the -fstack-protector GCC feature. This
10550 feature puts, at the beginning of functions, a canary value on
10551@@ -1599,6 +1601,7 @@ config KEXEC_JUMP
10552 config PHYSICAL_START
10553 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
10554 default "0x1000000"
10555+ range 0x400000 0x40000000
10556 ---help---
10557 This gives the physical address where the kernel is loaded.
10558
10559@@ -1662,6 +1665,7 @@ config X86_NEED_RELOCS
10560 config PHYSICAL_ALIGN
10561 hex "Alignment value to which kernel should be aligned" if X86_32
10562 default "0x1000000"
10563+ range 0x400000 0x1000000 if PAX_KERNEXEC
10564 range 0x2000 0x1000000
10565 ---help---
10566 This value puts the alignment restrictions on physical address
10567@@ -1737,9 +1741,10 @@ config DEBUG_HOTPLUG_CPU0
10568 If unsure, say N.
10569
10570 config COMPAT_VDSO
10571- def_bool y
10572+ def_bool n
10573 prompt "Compat VDSO support"
10574 depends on X86_32 || IA32_EMULATION
10575+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
10576 ---help---
10577 Map the 32-bit VDSO to the predictable old-style address too.
10578
10579diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
10580index c026cca..14657ae 100644
10581--- a/arch/x86/Kconfig.cpu
10582+++ b/arch/x86/Kconfig.cpu
10583@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
10584
10585 config X86_F00F_BUG
10586 def_bool y
10587- depends on M586MMX || M586TSC || M586 || M486
10588+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
10589
10590 config X86_INVD_BUG
10591 def_bool y
10592@@ -327,7 +327,7 @@ config X86_INVD_BUG
10593
10594 config X86_ALIGNMENT_16
10595 def_bool y
10596- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10597+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10598
10599 config X86_INTEL_USERCOPY
10600 def_bool y
10601@@ -373,7 +373,7 @@ config X86_CMPXCHG64
10602 # generates cmov.
10603 config X86_CMOV
10604 def_bool y
10605- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10606+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10607
10608 config X86_MINIMUM_CPU_FAMILY
10609 int
10610diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
10611index b322f12..652d0d9 100644
10612--- a/arch/x86/Kconfig.debug
10613+++ b/arch/x86/Kconfig.debug
10614@@ -84,7 +84,7 @@ config X86_PTDUMP
10615 config DEBUG_RODATA
10616 bool "Write protect kernel read-only data structures"
10617 default y
10618- depends on DEBUG_KERNEL
10619+ depends on DEBUG_KERNEL && BROKEN
10620 ---help---
10621 Mark the kernel read-only data as write-protected in the pagetables,
10622 in order to catch accidental (and incorrect) writes to such const
10623@@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
10624
10625 config DEBUG_SET_MODULE_RONX
10626 bool "Set loadable kernel module data as NX and text as RO"
10627- depends on MODULES
10628+ depends on MODULES && BROKEN
10629 ---help---
10630 This option helps catch unintended modifications to loadable
10631 kernel module's text and read-only data. It also prevents execution
10632@@ -294,7 +294,7 @@ config OPTIMIZE_INLINING
10633
10634 config DEBUG_STRICT_USER_COPY_CHECKS
10635 bool "Strict copy size checks"
10636- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
10637+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
10638 ---help---
10639 Enabling this option turns a certain set of sanity checks for user
10640 copy operations into compile time failures.
10641diff --git a/arch/x86/Makefile b/arch/x86/Makefile
10642index e71fc42..7829607 100644
10643--- a/arch/x86/Makefile
10644+++ b/arch/x86/Makefile
10645@@ -50,6 +50,7 @@ else
10646 UTS_MACHINE := x86_64
10647 CHECKFLAGS += -D__x86_64__ -m64
10648
10649+ biarch := $(call cc-option,-m64)
10650 KBUILD_AFLAGS += -m64
10651 KBUILD_CFLAGS += -m64
10652
10653@@ -230,3 +231,12 @@ define archhelp
10654 echo ' FDARGS="..." arguments for the booted kernel'
10655 echo ' FDINITRD=file initrd for the booted kernel'
10656 endef
10657+
10658+define OLD_LD
10659+
10660+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
10661+*** Please upgrade your binutils to 2.18 or newer
10662+endef
10663+
10664+archprepare:
10665+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
10666diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
10667index 379814b..add62ce 100644
10668--- a/arch/x86/boot/Makefile
10669+++ b/arch/x86/boot/Makefile
10670@@ -65,6 +65,9 @@ KBUILD_CFLAGS := $(USERINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
10671 $(call cc-option, -fno-stack-protector) \
10672 $(call cc-option, -mpreferred-stack-boundary=2)
10673 KBUILD_CFLAGS += $(call cc-option, -m32)
10674+ifdef CONSTIFY_PLUGIN
10675+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
10676+endif
10677 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10678 GCOV_PROFILE := n
10679
10680diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
10681index 878e4b9..20537ab 100644
10682--- a/arch/x86/boot/bitops.h
10683+++ b/arch/x86/boot/bitops.h
10684@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
10685 u8 v;
10686 const u32 *p = (const u32 *)addr;
10687
10688- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
10689+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
10690 return v;
10691 }
10692
10693@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
10694
10695 static inline void set_bit(int nr, void *addr)
10696 {
10697- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
10698+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
10699 }
10700
10701 #endif /* BOOT_BITOPS_H */
10702diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
10703index 18997e5..83d9c67 100644
10704--- a/arch/x86/boot/boot.h
10705+++ b/arch/x86/boot/boot.h
10706@@ -85,7 +85,7 @@ static inline void io_delay(void)
10707 static inline u16 ds(void)
10708 {
10709 u16 seg;
10710- asm("movw %%ds,%0" : "=rm" (seg));
10711+ asm volatile("movw %%ds,%0" : "=rm" (seg));
10712 return seg;
10713 }
10714
10715@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
10716 static inline int memcmp(const void *s1, const void *s2, size_t len)
10717 {
10718 u8 diff;
10719- asm("repe; cmpsb; setnz %0"
10720+ asm volatile("repe; cmpsb; setnz %0"
10721 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
10722 return diff;
10723 }
10724diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
10725index 8a84501..b2d165f 100644
10726--- a/arch/x86/boot/compressed/Makefile
10727+++ b/arch/x86/boot/compressed/Makefile
10728@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
10729 KBUILD_CFLAGS += $(cflags-y)
10730 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
10731 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
10732+ifdef CONSTIFY_PLUGIN
10733+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
10734+endif
10735
10736 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10737 GCOV_PROFILE := n
10738diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
10739index c205035..5853587 100644
10740--- a/arch/x86/boot/compressed/eboot.c
10741+++ b/arch/x86/boot/compressed/eboot.c
10742@@ -150,7 +150,6 @@ again:
10743 *addr = max_addr;
10744 }
10745
10746-free_pool:
10747 efi_call_phys1(sys_table->boottime->free_pool, map);
10748
10749 fail:
10750@@ -214,7 +213,6 @@ static efi_status_t low_alloc(unsigned long size, unsigned long align,
10751 if (i == map_size / desc_size)
10752 status = EFI_NOT_FOUND;
10753
10754-free_pool:
10755 efi_call_phys1(sys_table->boottime->free_pool, map);
10756 fail:
10757 return status;
10758diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
10759index 1e3184f..0d11e2e 100644
10760--- a/arch/x86/boot/compressed/head_32.S
10761+++ b/arch/x86/boot/compressed/head_32.S
10762@@ -118,7 +118,7 @@ preferred_addr:
10763 notl %eax
10764 andl %eax, %ebx
10765 #else
10766- movl $LOAD_PHYSICAL_ADDR, %ebx
10767+ movl $____LOAD_PHYSICAL_ADDR, %ebx
10768 #endif
10769
10770 /* Target address to relocate to for decompression */
10771@@ -204,7 +204,7 @@ relocated:
10772 * and where it was actually loaded.
10773 */
10774 movl %ebp, %ebx
10775- subl $LOAD_PHYSICAL_ADDR, %ebx
10776+ subl $____LOAD_PHYSICAL_ADDR, %ebx
10777 jz 2f /* Nothing to be done if loaded at compiled addr. */
10778 /*
10779 * Process relocations.
10780@@ -212,8 +212,7 @@ relocated:
10781
10782 1: subl $4, %edi
10783 movl (%edi), %ecx
10784- testl %ecx, %ecx
10785- jz 2f
10786+ jecxz 2f
10787 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
10788 jmp 1b
10789 2:
10790diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
10791index f5d1aaa..cce11dc 100644
10792--- a/arch/x86/boot/compressed/head_64.S
10793+++ b/arch/x86/boot/compressed/head_64.S
10794@@ -91,7 +91,7 @@ ENTRY(startup_32)
10795 notl %eax
10796 andl %eax, %ebx
10797 #else
10798- movl $LOAD_PHYSICAL_ADDR, %ebx
10799+ movl $____LOAD_PHYSICAL_ADDR, %ebx
10800 #endif
10801
10802 /* Target address to relocate to for decompression */
10803@@ -273,7 +273,7 @@ preferred_addr:
10804 notq %rax
10805 andq %rax, %rbp
10806 #else
10807- movq $LOAD_PHYSICAL_ADDR, %rbp
10808+ movq $____LOAD_PHYSICAL_ADDR, %rbp
10809 #endif
10810
10811 /* Target address to relocate to for decompression */
10812diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
10813index 88f7ff6..ed695dd 100644
10814--- a/arch/x86/boot/compressed/misc.c
10815+++ b/arch/x86/boot/compressed/misc.c
10816@@ -303,7 +303,7 @@ static void parse_elf(void *output)
10817 case PT_LOAD:
10818 #ifdef CONFIG_RELOCATABLE
10819 dest = output;
10820- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
10821+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
10822 #else
10823 dest = (void *)(phdr->p_paddr);
10824 #endif
10825@@ -352,7 +352,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
10826 error("Destination address too large");
10827 #endif
10828 #ifndef CONFIG_RELOCATABLE
10829- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
10830+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
10831 error("Wrong destination address");
10832 #endif
10833
10834diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
10835index 4d3ff03..e4972ff 100644
10836--- a/arch/x86/boot/cpucheck.c
10837+++ b/arch/x86/boot/cpucheck.c
10838@@ -74,7 +74,7 @@ static int has_fpu(void)
10839 u16 fcw = -1, fsw = -1;
10840 u32 cr0;
10841
10842- asm("movl %%cr0,%0" : "=r" (cr0));
10843+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
10844 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
10845 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
10846 asm volatile("movl %0,%%cr0" : : "r" (cr0));
10847@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
10848 {
10849 u32 f0, f1;
10850
10851- asm("pushfl ; "
10852+ asm volatile("pushfl ; "
10853 "pushfl ; "
10854 "popl %0 ; "
10855 "movl %0,%1 ; "
10856@@ -115,7 +115,7 @@ static void get_flags(void)
10857 set_bit(X86_FEATURE_FPU, cpu.flags);
10858
10859 if (has_eflag(X86_EFLAGS_ID)) {
10860- asm("cpuid"
10861+ asm volatile("cpuid"
10862 : "=a" (max_intel_level),
10863 "=b" (cpu_vendor[0]),
10864 "=d" (cpu_vendor[1]),
10865@@ -124,7 +124,7 @@ static void get_flags(void)
10866
10867 if (max_intel_level >= 0x00000001 &&
10868 max_intel_level <= 0x0000ffff) {
10869- asm("cpuid"
10870+ asm volatile("cpuid"
10871 : "=a" (tfms),
10872 "=c" (cpu.flags[4]),
10873 "=d" (cpu.flags[0])
10874@@ -136,7 +136,7 @@ static void get_flags(void)
10875 cpu.model += ((tfms >> 16) & 0xf) << 4;
10876 }
10877
10878- asm("cpuid"
10879+ asm volatile("cpuid"
10880 : "=a" (max_amd_level)
10881 : "a" (0x80000000)
10882 : "ebx", "ecx", "edx");
10883@@ -144,7 +144,7 @@ static void get_flags(void)
10884 if (max_amd_level >= 0x80000001 &&
10885 max_amd_level <= 0x8000ffff) {
10886 u32 eax = 0x80000001;
10887- asm("cpuid"
10888+ asm volatile("cpuid"
10889 : "+a" (eax),
10890 "=c" (cpu.flags[6]),
10891 "=d" (cpu.flags[1])
10892@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
10893 u32 ecx = MSR_K7_HWCR;
10894 u32 eax, edx;
10895
10896- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10897+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10898 eax &= ~(1 << 15);
10899- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10900+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10901
10902 get_flags(); /* Make sure it really did something */
10903 err = check_flags();
10904@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
10905 u32 ecx = MSR_VIA_FCR;
10906 u32 eax, edx;
10907
10908- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10909+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10910 eax |= (1<<1)|(1<<7);
10911- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10912+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10913
10914 set_bit(X86_FEATURE_CX8, cpu.flags);
10915 err = check_flags();
10916@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
10917 u32 eax, edx;
10918 u32 level = 1;
10919
10920- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10921- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
10922- asm("cpuid"
10923+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10924+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
10925+ asm volatile("cpuid"
10926 : "+a" (level), "=d" (cpu.flags[0])
10927 : : "ecx", "ebx");
10928- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10929+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10930
10931 err = check_flags();
10932 }
10933diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
10934index 944ce59..87ee37a 100644
10935--- a/arch/x86/boot/header.S
10936+++ b/arch/x86/boot/header.S
10937@@ -401,10 +401,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
10938 # single linked list of
10939 # struct setup_data
10940
10941-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
10942+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
10943
10944 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
10945+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
10946+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
10947+#else
10948 #define VO_INIT_SIZE (VO__end - VO__text)
10949+#endif
10950 #if ZO_INIT_SIZE > VO_INIT_SIZE
10951 #define INIT_SIZE ZO_INIT_SIZE
10952 #else
10953diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
10954index db75d07..8e6d0af 100644
10955--- a/arch/x86/boot/memory.c
10956+++ b/arch/x86/boot/memory.c
10957@@ -19,7 +19,7 @@
10958
10959 static int detect_memory_e820(void)
10960 {
10961- int count = 0;
10962+ unsigned int count = 0;
10963 struct biosregs ireg, oreg;
10964 struct e820entry *desc = boot_params.e820_map;
10965 static struct e820entry buf; /* static so it is zeroed */
10966diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
10967index 11e8c6e..fdbb1ed 100644
10968--- a/arch/x86/boot/video-vesa.c
10969+++ b/arch/x86/boot/video-vesa.c
10970@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
10971
10972 boot_params.screen_info.vesapm_seg = oreg.es;
10973 boot_params.screen_info.vesapm_off = oreg.di;
10974+ boot_params.screen_info.vesapm_size = oreg.cx;
10975 }
10976
10977 /*
10978diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
10979index 43eda28..5ab5fdb 100644
10980--- a/arch/x86/boot/video.c
10981+++ b/arch/x86/boot/video.c
10982@@ -96,7 +96,7 @@ static void store_mode_params(void)
10983 static unsigned int get_entry(void)
10984 {
10985 char entry_buf[4];
10986- int i, len = 0;
10987+ unsigned int i, len = 0;
10988 int key;
10989 unsigned int v;
10990
10991diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
10992index 5b577d5..3c1fed4 100644
10993--- a/arch/x86/crypto/aes-x86_64-asm_64.S
10994+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
10995@@ -8,6 +8,8 @@
10996 * including this sentence is retained in full.
10997 */
10998
10999+#include <asm/alternative-asm.h>
11000+
11001 .extern crypto_ft_tab
11002 .extern crypto_it_tab
11003 .extern crypto_fl_tab
11004@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
11005 je B192; \
11006 leaq 32(r9),r9;
11007
11008+#define ret pax_force_retaddr 0, 1; ret
11009+
11010 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
11011 movq r1,r2; \
11012 movq r3,r4; \
11013diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
11014index 3470624..201259d 100644
11015--- a/arch/x86/crypto/aesni-intel_asm.S
11016+++ b/arch/x86/crypto/aesni-intel_asm.S
11017@@ -31,6 +31,7 @@
11018
11019 #include <linux/linkage.h>
11020 #include <asm/inst.h>
11021+#include <asm/alternative-asm.h>
11022
11023 #ifdef __x86_64__
11024 .data
11025@@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
11026 pop %r14
11027 pop %r13
11028 pop %r12
11029+ pax_force_retaddr 0, 1
11030 ret
11031+ENDPROC(aesni_gcm_dec)
11032
11033
11034 /*****************************************************************************
11035@@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
11036 pop %r14
11037 pop %r13
11038 pop %r12
11039+ pax_force_retaddr 0, 1
11040 ret
11041+ENDPROC(aesni_gcm_enc)
11042
11043 #endif
11044
11045@@ -1714,6 +1719,7 @@ _key_expansion_256a:
11046 pxor %xmm1, %xmm0
11047 movaps %xmm0, (TKEYP)
11048 add $0x10, TKEYP
11049+ pax_force_retaddr_bts
11050 ret
11051
11052 .align 4
11053@@ -1738,6 +1744,7 @@ _key_expansion_192a:
11054 shufps $0b01001110, %xmm2, %xmm1
11055 movaps %xmm1, 0x10(TKEYP)
11056 add $0x20, TKEYP
11057+ pax_force_retaddr_bts
11058 ret
11059
11060 .align 4
11061@@ -1757,6 +1764,7 @@ _key_expansion_192b:
11062
11063 movaps %xmm0, (TKEYP)
11064 add $0x10, TKEYP
11065+ pax_force_retaddr_bts
11066 ret
11067
11068 .align 4
11069@@ -1769,6 +1777,7 @@ _key_expansion_256b:
11070 pxor %xmm1, %xmm2
11071 movaps %xmm2, (TKEYP)
11072 add $0x10, TKEYP
11073+ pax_force_retaddr_bts
11074 ret
11075
11076 /*
11077@@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
11078 #ifndef __x86_64__
11079 popl KEYP
11080 #endif
11081+ pax_force_retaddr 0, 1
11082 ret
11083+ENDPROC(aesni_set_key)
11084
11085 /*
11086 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
11087@@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
11088 popl KLEN
11089 popl KEYP
11090 #endif
11091+ pax_force_retaddr 0, 1
11092 ret
11093+ENDPROC(aesni_enc)
11094
11095 /*
11096 * _aesni_enc1: internal ABI
11097@@ -1959,6 +1972,7 @@ _aesni_enc1:
11098 AESENC KEY STATE
11099 movaps 0x70(TKEYP), KEY
11100 AESENCLAST KEY STATE
11101+ pax_force_retaddr_bts
11102 ret
11103
11104 /*
11105@@ -2067,6 +2081,7 @@ _aesni_enc4:
11106 AESENCLAST KEY STATE2
11107 AESENCLAST KEY STATE3
11108 AESENCLAST KEY STATE4
11109+ pax_force_retaddr_bts
11110 ret
11111
11112 /*
11113@@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
11114 popl KLEN
11115 popl KEYP
11116 #endif
11117+ pax_force_retaddr 0, 1
11118 ret
11119+ENDPROC(aesni_dec)
11120
11121 /*
11122 * _aesni_dec1: internal ABI
11123@@ -2146,6 +2163,7 @@ _aesni_dec1:
11124 AESDEC KEY STATE
11125 movaps 0x70(TKEYP), KEY
11126 AESDECLAST KEY STATE
11127+ pax_force_retaddr_bts
11128 ret
11129
11130 /*
11131@@ -2254,6 +2272,7 @@ _aesni_dec4:
11132 AESDECLAST KEY STATE2
11133 AESDECLAST KEY STATE3
11134 AESDECLAST KEY STATE4
11135+ pax_force_retaddr_bts
11136 ret
11137
11138 /*
11139@@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
11140 popl KEYP
11141 popl LEN
11142 #endif
11143+ pax_force_retaddr 0, 1
11144 ret
11145+ENDPROC(aesni_ecb_enc)
11146
11147 /*
11148 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
11149@@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
11150 popl KEYP
11151 popl LEN
11152 #endif
11153+ pax_force_retaddr 0, 1
11154 ret
11155+ENDPROC(aesni_ecb_dec)
11156
11157 /*
11158 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
11159@@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
11160 popl LEN
11161 popl IVP
11162 #endif
11163+ pax_force_retaddr 0, 1
11164 ret
11165+ENDPROC(aesni_cbc_enc)
11166
11167 /*
11168 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
11169@@ -2500,7 +2525,9 @@ ENTRY(aesni_cbc_dec)
11170 popl LEN
11171 popl IVP
11172 #endif
11173+ pax_force_retaddr 0, 1
11174 ret
11175+ENDPROC(aesni_cbc_dec)
11176
11177 #ifdef __x86_64__
11178 .align 16
11179@@ -2526,6 +2553,7 @@ _aesni_inc_init:
11180 mov $1, TCTR_LOW
11181 MOVQ_R64_XMM TCTR_LOW INC
11182 MOVQ_R64_XMM CTR TCTR_LOW
11183+ pax_force_retaddr_bts
11184 ret
11185
11186 /*
11187@@ -2554,6 +2582,7 @@ _aesni_inc:
11188 .Linc_low:
11189 movaps CTR, IV
11190 PSHUFB_XMM BSWAP_MASK IV
11191+ pax_force_retaddr_bts
11192 ret
11193
11194 /*
11195@@ -2614,5 +2643,7 @@ ENTRY(aesni_ctr_enc)
11196 .Lctr_enc_ret:
11197 movups IV, (IVP)
11198 .Lctr_enc_just_ret:
11199+ pax_force_retaddr 0, 1
11200 ret
11201+ENDPROC(aesni_ctr_enc)
11202 #endif
11203diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
11204index 391d245..67f35c2 100644
11205--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
11206+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
11207@@ -20,6 +20,8 @@
11208 *
11209 */
11210
11211+#include <asm/alternative-asm.h>
11212+
11213 .file "blowfish-x86_64-asm.S"
11214 .text
11215
11216@@ -151,9 +153,11 @@ __blowfish_enc_blk:
11217 jnz __enc_xor;
11218
11219 write_block();
11220+ pax_force_retaddr 0, 1
11221 ret;
11222 __enc_xor:
11223 xor_block();
11224+ pax_force_retaddr 0, 1
11225 ret;
11226
11227 .align 8
11228@@ -188,6 +192,7 @@ blowfish_dec_blk:
11229
11230 movq %r11, %rbp;
11231
11232+ pax_force_retaddr 0, 1
11233 ret;
11234
11235 /**********************************************************************
11236@@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
11237
11238 popq %rbx;
11239 popq %rbp;
11240+ pax_force_retaddr 0, 1
11241 ret;
11242
11243 __enc_xor4:
11244@@ -349,6 +355,7 @@ __enc_xor4:
11245
11246 popq %rbx;
11247 popq %rbp;
11248+ pax_force_retaddr 0, 1
11249 ret;
11250
11251 .align 8
11252@@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
11253 popq %rbx;
11254 popq %rbp;
11255
11256+ pax_force_retaddr 0, 1
11257 ret;
11258
11259diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
11260index 0b33743..7a56206 100644
11261--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
11262+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
11263@@ -20,6 +20,8 @@
11264 *
11265 */
11266
11267+#include <asm/alternative-asm.h>
11268+
11269 .file "camellia-x86_64-asm_64.S"
11270 .text
11271
11272@@ -229,12 +231,14 @@ __enc_done:
11273 enc_outunpack(mov, RT1);
11274
11275 movq RRBP, %rbp;
11276+ pax_force_retaddr 0, 1
11277 ret;
11278
11279 __enc_xor:
11280 enc_outunpack(xor, RT1);
11281
11282 movq RRBP, %rbp;
11283+ pax_force_retaddr 0, 1
11284 ret;
11285
11286 .global camellia_dec_blk;
11287@@ -275,6 +279,7 @@ __dec_rounds16:
11288 dec_outunpack();
11289
11290 movq RRBP, %rbp;
11291+ pax_force_retaddr 0, 1
11292 ret;
11293
11294 /**********************************************************************
11295@@ -468,6 +473,7 @@ __enc2_done:
11296
11297 movq RRBP, %rbp;
11298 popq %rbx;
11299+ pax_force_retaddr 0, 1
11300 ret;
11301
11302 __enc2_xor:
11303@@ -475,6 +481,7 @@ __enc2_xor:
11304
11305 movq RRBP, %rbp;
11306 popq %rbx;
11307+ pax_force_retaddr 0, 1
11308 ret;
11309
11310 .global camellia_dec_blk_2way;
11311@@ -517,4 +524,5 @@ __dec2_rounds16:
11312
11313 movq RRBP, %rbp;
11314 movq RXOR, %rbx;
11315+ pax_force_retaddr 0, 1
11316 ret;
11317diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
11318index 15b00ac..2071784 100644
11319--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
11320+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
11321@@ -23,6 +23,8 @@
11322 *
11323 */
11324
11325+#include <asm/alternative-asm.h>
11326+
11327 .file "cast5-avx-x86_64-asm_64.S"
11328
11329 .extern cast_s1
11330@@ -281,6 +283,7 @@ __skip_enc:
11331 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
11332 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
11333
11334+ pax_force_retaddr 0, 1
11335 ret;
11336
11337 .align 16
11338@@ -353,6 +356,7 @@ __dec_tail:
11339 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
11340 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
11341
11342+ pax_force_retaddr 0, 1
11343 ret;
11344
11345 __skip_dec:
11346@@ -392,6 +396,7 @@ cast5_ecb_enc_16way:
11347 vmovdqu RR4, (6*4*4)(%r11);
11348 vmovdqu RL4, (7*4*4)(%r11);
11349
11350+ pax_force_retaddr
11351 ret;
11352
11353 .align 16
11354@@ -427,6 +432,7 @@ cast5_ecb_dec_16way:
11355 vmovdqu RR4, (6*4*4)(%r11);
11356 vmovdqu RL4, (7*4*4)(%r11);
11357
11358+ pax_force_retaddr
11359 ret;
11360
11361 .align 16
11362@@ -479,6 +485,7 @@ cast5_cbc_dec_16way:
11363
11364 popq %r12;
11365
11366+ pax_force_retaddr
11367 ret;
11368
11369 .align 16
11370@@ -555,4 +562,5 @@ cast5_ctr_16way:
11371
11372 popq %r12;
11373
11374+ pax_force_retaddr
11375 ret;
11376diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
11377index 2569d0d..637c289 100644
11378--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
11379+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
11380@@ -23,6 +23,8 @@
11381 *
11382 */
11383
11384+#include <asm/alternative-asm.h>
11385+
11386 #include "glue_helper-asm-avx.S"
11387
11388 .file "cast6-avx-x86_64-asm_64.S"
11389@@ -294,6 +296,7 @@ __cast6_enc_blk8:
11390 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
11391 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
11392
11393+ pax_force_retaddr 0, 1
11394 ret;
11395
11396 .align 8
11397@@ -340,6 +343,7 @@ __cast6_dec_blk8:
11398 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
11399 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
11400
11401+ pax_force_retaddr 0, 1
11402 ret;
11403
11404 .align 8
11405@@ -361,6 +365,7 @@ cast6_ecb_enc_8way:
11406
11407 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11408
11409+ pax_force_retaddr
11410 ret;
11411
11412 .align 8
11413@@ -382,6 +387,7 @@ cast6_ecb_dec_8way:
11414
11415 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11416
11417+ pax_force_retaddr
11418 ret;
11419
11420 .align 8
11421@@ -408,6 +414,7 @@ cast6_cbc_dec_8way:
11422
11423 popq %r12;
11424
11425+ pax_force_retaddr
11426 ret;
11427
11428 .align 8
11429@@ -436,4 +443,5 @@ cast6_ctr_8way:
11430
11431 popq %r12;
11432
11433+ pax_force_retaddr
11434 ret;
11435diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
11436index 6214a9b..1f4fc9a 100644
11437--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
11438+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
11439@@ -1,3 +1,5 @@
11440+#include <asm/alternative-asm.h>
11441+
11442 # enter ECRYPT_encrypt_bytes
11443 .text
11444 .p2align 5
11445@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
11446 add %r11,%rsp
11447 mov %rdi,%rax
11448 mov %rsi,%rdx
11449+ pax_force_retaddr 0, 1
11450 ret
11451 # bytesatleast65:
11452 ._bytesatleast65:
11453@@ -891,6 +894,7 @@ ECRYPT_keysetup:
11454 add %r11,%rsp
11455 mov %rdi,%rax
11456 mov %rsi,%rdx
11457+ pax_force_retaddr
11458 ret
11459 # enter ECRYPT_ivsetup
11460 .text
11461@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
11462 add %r11,%rsp
11463 mov %rdi,%rax
11464 mov %rsi,%rdx
11465+ pax_force_retaddr
11466 ret
11467diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
11468index 02b0e9f..cf4cf5c 100644
11469--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
11470+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
11471@@ -24,6 +24,8 @@
11472 *
11473 */
11474
11475+#include <asm/alternative-asm.h>
11476+
11477 #include "glue_helper-asm-avx.S"
11478
11479 .file "serpent-avx-x86_64-asm_64.S"
11480@@ -618,6 +620,7 @@ __serpent_enc_blk8_avx:
11481 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
11482 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
11483
11484+ pax_force_retaddr
11485 ret;
11486
11487 .align 8
11488@@ -673,6 +676,7 @@ __serpent_dec_blk8_avx:
11489 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
11490 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
11491
11492+ pax_force_retaddr
11493 ret;
11494
11495 .align 8
11496@@ -692,6 +696,7 @@ serpent_ecb_enc_8way_avx:
11497
11498 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11499
11500+ pax_force_retaddr
11501 ret;
11502
11503 .align 8
11504@@ -711,6 +716,7 @@ serpent_ecb_dec_8way_avx:
11505
11506 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
11507
11508+ pax_force_retaddr
11509 ret;
11510
11511 .align 8
11512@@ -730,6 +736,7 @@ serpent_cbc_dec_8way_avx:
11513
11514 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
11515
11516+ pax_force_retaddr
11517 ret;
11518
11519 .align 8
11520@@ -751,4 +758,5 @@ serpent_ctr_8way_avx:
11521
11522 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11523
11524+ pax_force_retaddr
11525 ret;
11526diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
11527index 3ee1ff0..cbc568b 100644
11528--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
11529+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
11530@@ -24,6 +24,8 @@
11531 *
11532 */
11533
11534+#include <asm/alternative-asm.h>
11535+
11536 .file "serpent-sse2-x86_64-asm_64.S"
11537 .text
11538
11539@@ -692,12 +694,14 @@ __serpent_enc_blk_8way:
11540 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
11541 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
11542
11543+ pax_force_retaddr
11544 ret;
11545
11546 __enc_xor8:
11547 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
11548 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
11549
11550+ pax_force_retaddr
11551 ret;
11552
11553 .align 8
11554@@ -755,4 +759,5 @@ serpent_dec_blk_8way:
11555 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
11556 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
11557
11558+ pax_force_retaddr
11559 ret;
11560diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
11561index 49d6987..df66bd4 100644
11562--- a/arch/x86/crypto/sha1_ssse3_asm.S
11563+++ b/arch/x86/crypto/sha1_ssse3_asm.S
11564@@ -28,6 +28,8 @@
11565 * (at your option) any later version.
11566 */
11567
11568+#include <asm/alternative-asm.h>
11569+
11570 #define CTX %rdi // arg1
11571 #define BUF %rsi // arg2
11572 #define CNT %rdx // arg3
11573@@ -104,6 +106,7 @@
11574 pop %r12
11575 pop %rbp
11576 pop %rbx
11577+ pax_force_retaddr 0, 1
11578 ret
11579
11580 .size \name, .-\name
11581diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
11582index ebac16b..8092eb9 100644
11583--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
11584+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
11585@@ -23,6 +23,8 @@
11586 *
11587 */
11588
11589+#include <asm/alternative-asm.h>
11590+
11591 #include "glue_helper-asm-avx.S"
11592
11593 .file "twofish-avx-x86_64-asm_64.S"
11594@@ -283,6 +285,7 @@ __twofish_enc_blk8:
11595 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
11596 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
11597
11598+ pax_force_retaddr 0, 1
11599 ret;
11600
11601 .align 8
11602@@ -324,6 +327,7 @@ __twofish_dec_blk8:
11603 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
11604 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
11605
11606+ pax_force_retaddr 0, 1
11607 ret;
11608
11609 .align 8
11610@@ -345,6 +349,7 @@ twofish_ecb_enc_8way:
11611
11612 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
11613
11614+ pax_force_retaddr 0, 1
11615 ret;
11616
11617 .align 8
11618@@ -366,6 +371,7 @@ twofish_ecb_dec_8way:
11619
11620 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11621
11622+ pax_force_retaddr 0, 1
11623 ret;
11624
11625 .align 8
11626@@ -392,6 +398,7 @@ twofish_cbc_dec_8way:
11627
11628 popq %r12;
11629
11630+ pax_force_retaddr 0, 1
11631 ret;
11632
11633 .align 8
11634@@ -420,4 +427,5 @@ twofish_ctr_8way:
11635
11636 popq %r12;
11637
11638+ pax_force_retaddr 0, 1
11639 ret;
11640diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
11641index 5b012a2..36d5364 100644
11642--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
11643+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
11644@@ -20,6 +20,8 @@
11645 *
11646 */
11647
11648+#include <asm/alternative-asm.h>
11649+
11650 .file "twofish-x86_64-asm-3way.S"
11651 .text
11652
11653@@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
11654 popq %r13;
11655 popq %r14;
11656 popq %r15;
11657+ pax_force_retaddr 0, 1
11658 ret;
11659
11660 __enc_xor3:
11661@@ -271,6 +274,7 @@ __enc_xor3:
11662 popq %r13;
11663 popq %r14;
11664 popq %r15;
11665+ pax_force_retaddr 0, 1
11666 ret;
11667
11668 .global twofish_dec_blk_3way
11669@@ -312,5 +316,6 @@ twofish_dec_blk_3way:
11670 popq %r13;
11671 popq %r14;
11672 popq %r15;
11673+ pax_force_retaddr 0, 1
11674 ret;
11675
11676diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
11677index 7bcf3fc..f53832f 100644
11678--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
11679+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
11680@@ -21,6 +21,7 @@
11681 .text
11682
11683 #include <asm/asm-offsets.h>
11684+#include <asm/alternative-asm.h>
11685
11686 #define a_offset 0
11687 #define b_offset 4
11688@@ -268,6 +269,7 @@ twofish_enc_blk:
11689
11690 popq R1
11691 movq $1,%rax
11692+ pax_force_retaddr 0, 1
11693 ret
11694
11695 twofish_dec_blk:
11696@@ -319,4 +321,5 @@ twofish_dec_blk:
11697
11698 popq R1
11699 movq $1,%rax
11700+ pax_force_retaddr 0, 1
11701 ret
11702diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
11703index a703af1..f5b9c36 100644
11704--- a/arch/x86/ia32/ia32_aout.c
11705+++ b/arch/x86/ia32/ia32_aout.c
11706@@ -159,6 +159,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
11707 unsigned long dump_start, dump_size;
11708 struct user32 dump;
11709
11710+ memset(&dump, 0, sizeof(dump));
11711+
11712 fs = get_fs();
11713 set_fs(KERNEL_DS);
11714 has_dumped = 1;
11715diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
11716index a1daf4a..f8c4537 100644
11717--- a/arch/x86/ia32/ia32_signal.c
11718+++ b/arch/x86/ia32/ia32_signal.c
11719@@ -348,7 +348,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
11720 sp -= frame_size;
11721 /* Align the stack pointer according to the i386 ABI,
11722 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
11723- sp = ((sp + 4) & -16ul) - 4;
11724+ sp = ((sp - 12) & -16ul) - 4;
11725 return (void __user *) sp;
11726 }
11727
11728@@ -406,7 +406,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
11729 * These are actually not used anymore, but left because some
11730 * gdb versions depend on them as a marker.
11731 */
11732- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
11733+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
11734 } put_user_catch(err);
11735
11736 if (err)
11737@@ -448,7 +448,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
11738 0xb8,
11739 __NR_ia32_rt_sigreturn,
11740 0x80cd,
11741- 0,
11742+ 0
11743 };
11744
11745 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
11746@@ -471,16 +471,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
11747
11748 if (ka->sa.sa_flags & SA_RESTORER)
11749 restorer = ka->sa.sa_restorer;
11750+ else if (current->mm->context.vdso)
11751+ /* Return stub is in 32bit vsyscall page */
11752+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
11753 else
11754- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
11755- rt_sigreturn);
11756+ restorer = &frame->retcode;
11757 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
11758
11759 /*
11760 * Not actually used anymore, but left because some gdb
11761 * versions need it.
11762 */
11763- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
11764+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
11765 } put_user_catch(err);
11766
11767 err |= copy_siginfo_to_user32(&frame->info, info);
11768diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
11769index 142c4ce..19b683f 100644
11770--- a/arch/x86/ia32/ia32entry.S
11771+++ b/arch/x86/ia32/ia32entry.S
11772@@ -15,8 +15,10 @@
11773 #include <asm/irqflags.h>
11774 #include <asm/asm.h>
11775 #include <asm/smap.h>
11776+#include <asm/pgtable.h>
11777 #include <linux/linkage.h>
11778 #include <linux/err.h>
11779+#include <asm/alternative-asm.h>
11780
11781 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
11782 #include <linux/elf-em.h>
11783@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
11784 ENDPROC(native_irq_enable_sysexit)
11785 #endif
11786
11787+ .macro pax_enter_kernel_user
11788+ pax_set_fptr_mask
11789+#ifdef CONFIG_PAX_MEMORY_UDEREF
11790+ call pax_enter_kernel_user
11791+#endif
11792+ .endm
11793+
11794+ .macro pax_exit_kernel_user
11795+#ifdef CONFIG_PAX_MEMORY_UDEREF
11796+ call pax_exit_kernel_user
11797+#endif
11798+#ifdef CONFIG_PAX_RANDKSTACK
11799+ pushq %rax
11800+ pushq %r11
11801+ call pax_randomize_kstack
11802+ popq %r11
11803+ popq %rax
11804+#endif
11805+ .endm
11806+
11807+.macro pax_erase_kstack
11808+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11809+ call pax_erase_kstack
11810+#endif
11811+.endm
11812+
11813 /*
11814 * 32bit SYSENTER instruction entry.
11815 *
11816@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
11817 CFI_REGISTER rsp,rbp
11818 SWAPGS_UNSAFE_STACK
11819 movq PER_CPU_VAR(kernel_stack), %rsp
11820- addq $(KERNEL_STACK_OFFSET),%rsp
11821- /*
11822- * No need to follow this irqs on/off section: the syscall
11823- * disabled irqs, here we enable it straight after entry:
11824- */
11825- ENABLE_INTERRUPTS(CLBR_NONE)
11826 movl %ebp,%ebp /* zero extension */
11827 pushq_cfi $__USER32_DS
11828 /*CFI_REL_OFFSET ss,0*/
11829@@ -135,24 +157,44 @@ ENTRY(ia32_sysenter_target)
11830 CFI_REL_OFFSET rsp,0
11831 pushfq_cfi
11832 /*CFI_REL_OFFSET rflags,0*/
11833- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
11834- CFI_REGISTER rip,r10
11835+ orl $X86_EFLAGS_IF,(%rsp)
11836+ GET_THREAD_INFO(%r11)
11837+ movl TI_sysenter_return(%r11), %r11d
11838+ CFI_REGISTER rip,r11
11839 pushq_cfi $__USER32_CS
11840 /*CFI_REL_OFFSET cs,0*/
11841 movl %eax, %eax
11842- pushq_cfi %r10
11843+ pushq_cfi %r11
11844 CFI_REL_OFFSET rip,0
11845 pushq_cfi %rax
11846 cld
11847 SAVE_ARGS 0,1,0
11848+ pax_enter_kernel_user
11849+
11850+#ifdef CONFIG_PAX_RANDKSTACK
11851+ pax_erase_kstack
11852+#endif
11853+
11854+ /*
11855+ * No need to follow this irqs on/off section: the syscall
11856+ * disabled irqs, here we enable it straight after entry:
11857+ */
11858+ ENABLE_INTERRUPTS(CLBR_NONE)
11859 /* no need to do an access_ok check here because rbp has been
11860 32bit zero extended */
11861+
11862+#ifdef CONFIG_PAX_MEMORY_UDEREF
11863+ mov $PAX_USER_SHADOW_BASE,%r11
11864+ add %r11,%rbp
11865+#endif
11866+
11867 ASM_STAC
11868 1: movl (%rbp),%ebp
11869 _ASM_EXTABLE(1b,ia32_badarg)
11870 ASM_CLAC
11871- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11872- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11873+ GET_THREAD_INFO(%r11)
11874+ orl $TS_COMPAT,TI_status(%r11)
11875+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
11876 CFI_REMEMBER_STATE
11877 jnz sysenter_tracesys
11878 cmpq $(IA32_NR_syscalls-1),%rax
11879@@ -162,12 +204,15 @@ sysenter_do_call:
11880 sysenter_dispatch:
11881 call *ia32_sys_call_table(,%rax,8)
11882 movq %rax,RAX-ARGOFFSET(%rsp)
11883+ GET_THREAD_INFO(%r11)
11884 DISABLE_INTERRUPTS(CLBR_NONE)
11885 TRACE_IRQS_OFF
11886- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11887+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
11888 jnz sysexit_audit
11889 sysexit_from_sys_call:
11890- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11891+ pax_exit_kernel_user
11892+ pax_erase_kstack
11893+ andl $~TS_COMPAT,TI_status(%r11)
11894 /* clear IF, that popfq doesn't enable interrupts early */
11895 andl $~0x200,EFLAGS-R11(%rsp)
11896 movl RIP-R11(%rsp),%edx /* User %eip */
11897@@ -193,6 +238,9 @@ sysexit_from_sys_call:
11898 movl %eax,%esi /* 2nd arg: syscall number */
11899 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
11900 call __audit_syscall_entry
11901+
11902+ pax_erase_kstack
11903+
11904 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
11905 cmpq $(IA32_NR_syscalls-1),%rax
11906 ja ia32_badsys
11907@@ -204,7 +252,7 @@ sysexit_from_sys_call:
11908 .endm
11909
11910 .macro auditsys_exit exit
11911- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11912+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
11913 jnz ia32_ret_from_sys_call
11914 TRACE_IRQS_ON
11915 ENABLE_INTERRUPTS(CLBR_NONE)
11916@@ -215,11 +263,12 @@ sysexit_from_sys_call:
11917 1: setbe %al /* 1 if error, 0 if not */
11918 movzbl %al,%edi /* zero-extend that into %edi */
11919 call __audit_syscall_exit
11920+ GET_THREAD_INFO(%r11)
11921 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
11922 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
11923 DISABLE_INTERRUPTS(CLBR_NONE)
11924 TRACE_IRQS_OFF
11925- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11926+ testl %edi,TI_flags(%r11)
11927 jz \exit
11928 CLEAR_RREGS -ARGOFFSET
11929 jmp int_with_check
11930@@ -237,7 +286,7 @@ sysexit_audit:
11931
11932 sysenter_tracesys:
11933 #ifdef CONFIG_AUDITSYSCALL
11934- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11935+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
11936 jz sysenter_auditsys
11937 #endif
11938 SAVE_REST
11939@@ -249,6 +298,9 @@ sysenter_tracesys:
11940 RESTORE_REST
11941 cmpq $(IA32_NR_syscalls-1),%rax
11942 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
11943+
11944+ pax_erase_kstack
11945+
11946 jmp sysenter_do_call
11947 CFI_ENDPROC
11948 ENDPROC(ia32_sysenter_target)
11949@@ -276,19 +328,25 @@ ENDPROC(ia32_sysenter_target)
11950 ENTRY(ia32_cstar_target)
11951 CFI_STARTPROC32 simple
11952 CFI_SIGNAL_FRAME
11953- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
11954+ CFI_DEF_CFA rsp,0
11955 CFI_REGISTER rip,rcx
11956 /*CFI_REGISTER rflags,r11*/
11957 SWAPGS_UNSAFE_STACK
11958 movl %esp,%r8d
11959 CFI_REGISTER rsp,r8
11960 movq PER_CPU_VAR(kernel_stack),%rsp
11961+ SAVE_ARGS 8*6,0,0
11962+ pax_enter_kernel_user
11963+
11964+#ifdef CONFIG_PAX_RANDKSTACK
11965+ pax_erase_kstack
11966+#endif
11967+
11968 /*
11969 * No need to follow this irqs on/off section: the syscall
11970 * disabled irqs and here we enable it straight after entry:
11971 */
11972 ENABLE_INTERRUPTS(CLBR_NONE)
11973- SAVE_ARGS 8,0,0
11974 movl %eax,%eax /* zero extension */
11975 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
11976 movq %rcx,RIP-ARGOFFSET(%rsp)
11977@@ -304,12 +362,19 @@ ENTRY(ia32_cstar_target)
11978 /* no need to do an access_ok check here because r8 has been
11979 32bit zero extended */
11980 /* hardware stack frame is complete now */
11981+
11982+#ifdef CONFIG_PAX_MEMORY_UDEREF
11983+ mov $PAX_USER_SHADOW_BASE,%r11
11984+ add %r11,%r8
11985+#endif
11986+
11987 ASM_STAC
11988 1: movl (%r8),%r9d
11989 _ASM_EXTABLE(1b,ia32_badarg)
11990 ASM_CLAC
11991- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11992- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11993+ GET_THREAD_INFO(%r11)
11994+ orl $TS_COMPAT,TI_status(%r11)
11995+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
11996 CFI_REMEMBER_STATE
11997 jnz cstar_tracesys
11998 cmpq $IA32_NR_syscalls-1,%rax
11999@@ -319,12 +384,15 @@ cstar_do_call:
12000 cstar_dispatch:
12001 call *ia32_sys_call_table(,%rax,8)
12002 movq %rax,RAX-ARGOFFSET(%rsp)
12003+ GET_THREAD_INFO(%r11)
12004 DISABLE_INTERRUPTS(CLBR_NONE)
12005 TRACE_IRQS_OFF
12006- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12007+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
12008 jnz sysretl_audit
12009 sysretl_from_sys_call:
12010- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12011+ pax_exit_kernel_user
12012+ pax_erase_kstack
12013+ andl $~TS_COMPAT,TI_status(%r11)
12014 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
12015 movl RIP-ARGOFFSET(%rsp),%ecx
12016 CFI_REGISTER rip,rcx
12017@@ -352,7 +420,7 @@ sysretl_audit:
12018
12019 cstar_tracesys:
12020 #ifdef CONFIG_AUDITSYSCALL
12021- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12022+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
12023 jz cstar_auditsys
12024 #endif
12025 xchgl %r9d,%ebp
12026@@ -366,6 +434,9 @@ cstar_tracesys:
12027 xchgl %ebp,%r9d
12028 cmpq $(IA32_NR_syscalls-1),%rax
12029 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
12030+
12031+ pax_erase_kstack
12032+
12033 jmp cstar_do_call
12034 END(ia32_cstar_target)
12035
12036@@ -407,19 +478,26 @@ ENTRY(ia32_syscall)
12037 CFI_REL_OFFSET rip,RIP-RIP
12038 PARAVIRT_ADJUST_EXCEPTION_FRAME
12039 SWAPGS
12040- /*
12041- * No need to follow this irqs on/off section: the syscall
12042- * disabled irqs and here we enable it straight after entry:
12043- */
12044- ENABLE_INTERRUPTS(CLBR_NONE)
12045 movl %eax,%eax
12046 pushq_cfi %rax
12047 cld
12048 /* note the registers are not zero extended to the sf.
12049 this could be a problem. */
12050 SAVE_ARGS 0,1,0
12051- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12052- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12053+ pax_enter_kernel_user
12054+
12055+#ifdef CONFIG_PAX_RANDKSTACK
12056+ pax_erase_kstack
12057+#endif
12058+
12059+ /*
12060+ * No need to follow this irqs on/off section: the syscall
12061+ * disabled irqs and here we enable it straight after entry:
12062+ */
12063+ ENABLE_INTERRUPTS(CLBR_NONE)
12064+ GET_THREAD_INFO(%r11)
12065+ orl $TS_COMPAT,TI_status(%r11)
12066+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
12067 jnz ia32_tracesys
12068 cmpq $(IA32_NR_syscalls-1),%rax
12069 ja ia32_badsys
12070@@ -442,6 +520,9 @@ ia32_tracesys:
12071 RESTORE_REST
12072 cmpq $(IA32_NR_syscalls-1),%rax
12073 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
12074+
12075+ pax_erase_kstack
12076+
12077 jmp ia32_do_call
12078 END(ia32_syscall)
12079
12080diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
12081index d0b689b..6811ddc 100644
12082--- a/arch/x86/ia32/sys_ia32.c
12083+++ b/arch/x86/ia32/sys_ia32.c
12084@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
12085 */
12086 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
12087 {
12088- typeof(ubuf->st_uid) uid = 0;
12089- typeof(ubuf->st_gid) gid = 0;
12090+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
12091+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
12092 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
12093 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
12094 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
12095@@ -303,7 +303,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
12096 mm_segment_t old_fs = get_fs();
12097
12098 set_fs(KERNEL_DS);
12099- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
12100+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
12101 set_fs(old_fs);
12102 if (put_compat_timespec(&t, interval))
12103 return -EFAULT;
12104@@ -313,13 +313,13 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
12105 asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
12106 compat_size_t sigsetsize)
12107 {
12108- sigset_t s;
12109+ sigset_t s = { };
12110 compat_sigset_t s32;
12111 int ret;
12112 mm_segment_t old_fs = get_fs();
12113
12114 set_fs(KERNEL_DS);
12115- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
12116+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
12117 set_fs(old_fs);
12118 if (!ret) {
12119 switch (_NSIG_WORDS) {
12120@@ -344,7 +344,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
12121 if (copy_siginfo_from_user32(&info, uinfo))
12122 return -EFAULT;
12123 set_fs(KERNEL_DS);
12124- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
12125+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
12126 set_fs(old_fs);
12127 return ret;
12128 }
12129@@ -376,7 +376,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
12130 return -EFAULT;
12131
12132 set_fs(KERNEL_DS);
12133- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
12134+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
12135 count);
12136 set_fs(old_fs);
12137
12138diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
12139index 372231c..a5aa1a1 100644
12140--- a/arch/x86/include/asm/alternative-asm.h
12141+++ b/arch/x86/include/asm/alternative-asm.h
12142@@ -18,6 +18,45 @@
12143 .endm
12144 #endif
12145
12146+#ifdef KERNEXEC_PLUGIN
12147+ .macro pax_force_retaddr_bts rip=0
12148+ btsq $63,\rip(%rsp)
12149+ .endm
12150+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
12151+ .macro pax_force_retaddr rip=0, reload=0
12152+ btsq $63,\rip(%rsp)
12153+ .endm
12154+ .macro pax_force_fptr ptr
12155+ btsq $63,\ptr
12156+ .endm
12157+ .macro pax_set_fptr_mask
12158+ .endm
12159+#endif
12160+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
12161+ .macro pax_force_retaddr rip=0, reload=0
12162+ .if \reload
12163+ pax_set_fptr_mask
12164+ .endif
12165+ orq %r10,\rip(%rsp)
12166+ .endm
12167+ .macro pax_force_fptr ptr
12168+ orq %r10,\ptr
12169+ .endm
12170+ .macro pax_set_fptr_mask
12171+ movabs $0x8000000000000000,%r10
12172+ .endm
12173+#endif
12174+#else
12175+ .macro pax_force_retaddr rip=0, reload=0
12176+ .endm
12177+ .macro pax_force_fptr ptr
12178+ .endm
12179+ .macro pax_force_retaddr_bts rip=0
12180+ .endm
12181+ .macro pax_set_fptr_mask
12182+ .endm
12183+#endif
12184+
12185 .macro altinstruction_entry orig alt feature orig_len alt_len
12186 .long \orig - .
12187 .long \alt - .
12188diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
12189index 58ed6d9..f1cbe58 100644
12190--- a/arch/x86/include/asm/alternative.h
12191+++ b/arch/x86/include/asm/alternative.h
12192@@ -105,7 +105,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
12193 ".pushsection .discard,\"aw\",@progbits\n" \
12194 DISCARD_ENTRY(1) \
12195 ".popsection\n" \
12196- ".pushsection .altinstr_replacement, \"ax\"\n" \
12197+ ".pushsection .altinstr_replacement, \"a\"\n" \
12198 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
12199 ".popsection"
12200
12201@@ -119,7 +119,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
12202 DISCARD_ENTRY(1) \
12203 DISCARD_ENTRY(2) \
12204 ".popsection\n" \
12205- ".pushsection .altinstr_replacement, \"ax\"\n" \
12206+ ".pushsection .altinstr_replacement, \"a\"\n" \
12207 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
12208 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
12209 ".popsection"
12210diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
12211index 3388034..050f0b9 100644
12212--- a/arch/x86/include/asm/apic.h
12213+++ b/arch/x86/include/asm/apic.h
12214@@ -44,7 +44,7 @@ static inline void generic_apic_probe(void)
12215
12216 #ifdef CONFIG_X86_LOCAL_APIC
12217
12218-extern unsigned int apic_verbosity;
12219+extern int apic_verbosity;
12220 extern int local_apic_timer_c2_ok;
12221
12222 extern int disable_apic;
12223diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
12224index 20370c6..a2eb9b0 100644
12225--- a/arch/x86/include/asm/apm.h
12226+++ b/arch/x86/include/asm/apm.h
12227@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
12228 __asm__ __volatile__(APM_DO_ZERO_SEGS
12229 "pushl %%edi\n\t"
12230 "pushl %%ebp\n\t"
12231- "lcall *%%cs:apm_bios_entry\n\t"
12232+ "lcall *%%ss:apm_bios_entry\n\t"
12233 "setc %%al\n\t"
12234 "popl %%ebp\n\t"
12235 "popl %%edi\n\t"
12236@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
12237 __asm__ __volatile__(APM_DO_ZERO_SEGS
12238 "pushl %%edi\n\t"
12239 "pushl %%ebp\n\t"
12240- "lcall *%%cs:apm_bios_entry\n\t"
12241+ "lcall *%%ss:apm_bios_entry\n\t"
12242 "setc %%bl\n\t"
12243 "popl %%ebp\n\t"
12244 "popl %%edi\n\t"
12245diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
12246index 722aa3b..3a0bb27 100644
12247--- a/arch/x86/include/asm/atomic.h
12248+++ b/arch/x86/include/asm/atomic.h
12249@@ -22,7 +22,18 @@
12250 */
12251 static inline int atomic_read(const atomic_t *v)
12252 {
12253- return (*(volatile int *)&(v)->counter);
12254+ return (*(volatile const int *)&(v)->counter);
12255+}
12256+
12257+/**
12258+ * atomic_read_unchecked - read atomic variable
12259+ * @v: pointer of type atomic_unchecked_t
12260+ *
12261+ * Atomically reads the value of @v.
12262+ */
12263+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
12264+{
12265+ return (*(volatile const int *)&(v)->counter);
12266 }
12267
12268 /**
12269@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
12270 }
12271
12272 /**
12273+ * atomic_set_unchecked - set atomic variable
12274+ * @v: pointer of type atomic_unchecked_t
12275+ * @i: required value
12276+ *
12277+ * Atomically sets the value of @v to @i.
12278+ */
12279+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
12280+{
12281+ v->counter = i;
12282+}
12283+
12284+/**
12285 * atomic_add - add integer to atomic variable
12286 * @i: integer value to add
12287 * @v: pointer of type atomic_t
12288@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
12289 */
12290 static inline void atomic_add(int i, atomic_t *v)
12291 {
12292- asm volatile(LOCK_PREFIX "addl %1,%0"
12293+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
12294+
12295+#ifdef CONFIG_PAX_REFCOUNT
12296+ "jno 0f\n"
12297+ LOCK_PREFIX "subl %1,%0\n"
12298+ "int $4\n0:\n"
12299+ _ASM_EXTABLE(0b, 0b)
12300+#endif
12301+
12302+ : "+m" (v->counter)
12303+ : "ir" (i));
12304+}
12305+
12306+/**
12307+ * atomic_add_unchecked - add integer to atomic variable
12308+ * @i: integer value to add
12309+ * @v: pointer of type atomic_unchecked_t
12310+ *
12311+ * Atomically adds @i to @v.
12312+ */
12313+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
12314+{
12315+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
12316 : "+m" (v->counter)
12317 : "ir" (i));
12318 }
12319@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
12320 */
12321 static inline void atomic_sub(int i, atomic_t *v)
12322 {
12323- asm volatile(LOCK_PREFIX "subl %1,%0"
12324+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
12325+
12326+#ifdef CONFIG_PAX_REFCOUNT
12327+ "jno 0f\n"
12328+ LOCK_PREFIX "addl %1,%0\n"
12329+ "int $4\n0:\n"
12330+ _ASM_EXTABLE(0b, 0b)
12331+#endif
12332+
12333+ : "+m" (v->counter)
12334+ : "ir" (i));
12335+}
12336+
12337+/**
12338+ * atomic_sub_unchecked - subtract integer from atomic variable
12339+ * @i: integer value to subtract
12340+ * @v: pointer of type atomic_unchecked_t
12341+ *
12342+ * Atomically subtracts @i from @v.
12343+ */
12344+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
12345+{
12346+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
12347 : "+m" (v->counter)
12348 : "ir" (i));
12349 }
12350@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
12351 {
12352 unsigned char c;
12353
12354- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
12355+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
12356+
12357+#ifdef CONFIG_PAX_REFCOUNT
12358+ "jno 0f\n"
12359+ LOCK_PREFIX "addl %2,%0\n"
12360+ "int $4\n0:\n"
12361+ _ASM_EXTABLE(0b, 0b)
12362+#endif
12363+
12364+ "sete %1\n"
12365 : "+m" (v->counter), "=qm" (c)
12366 : "ir" (i) : "memory");
12367 return c;
12368@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
12369 */
12370 static inline void atomic_inc(atomic_t *v)
12371 {
12372- asm volatile(LOCK_PREFIX "incl %0"
12373+ asm volatile(LOCK_PREFIX "incl %0\n"
12374+
12375+#ifdef CONFIG_PAX_REFCOUNT
12376+ "jno 0f\n"
12377+ LOCK_PREFIX "decl %0\n"
12378+ "int $4\n0:\n"
12379+ _ASM_EXTABLE(0b, 0b)
12380+#endif
12381+
12382+ : "+m" (v->counter));
12383+}
12384+
12385+/**
12386+ * atomic_inc_unchecked - increment atomic variable
12387+ * @v: pointer of type atomic_unchecked_t
12388+ *
12389+ * Atomically increments @v by 1.
12390+ */
12391+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
12392+{
12393+ asm volatile(LOCK_PREFIX "incl %0\n"
12394 : "+m" (v->counter));
12395 }
12396
12397@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
12398 */
12399 static inline void atomic_dec(atomic_t *v)
12400 {
12401- asm volatile(LOCK_PREFIX "decl %0"
12402+ asm volatile(LOCK_PREFIX "decl %0\n"
12403+
12404+#ifdef CONFIG_PAX_REFCOUNT
12405+ "jno 0f\n"
12406+ LOCK_PREFIX "incl %0\n"
12407+ "int $4\n0:\n"
12408+ _ASM_EXTABLE(0b, 0b)
12409+#endif
12410+
12411+ : "+m" (v->counter));
12412+}
12413+
12414+/**
12415+ * atomic_dec_unchecked - decrement atomic variable
12416+ * @v: pointer of type atomic_unchecked_t
12417+ *
12418+ * Atomically decrements @v by 1.
12419+ */
12420+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
12421+{
12422+ asm volatile(LOCK_PREFIX "decl %0\n"
12423 : "+m" (v->counter));
12424 }
12425
12426@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
12427 {
12428 unsigned char c;
12429
12430- asm volatile(LOCK_PREFIX "decl %0; sete %1"
12431+ asm volatile(LOCK_PREFIX "decl %0\n"
12432+
12433+#ifdef CONFIG_PAX_REFCOUNT
12434+ "jno 0f\n"
12435+ LOCK_PREFIX "incl %0\n"
12436+ "int $4\n0:\n"
12437+ _ASM_EXTABLE(0b, 0b)
12438+#endif
12439+
12440+ "sete %1\n"
12441 : "+m" (v->counter), "=qm" (c)
12442 : : "memory");
12443 return c != 0;
12444@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
12445 {
12446 unsigned char c;
12447
12448- asm volatile(LOCK_PREFIX "incl %0; sete %1"
12449+ asm volatile(LOCK_PREFIX "incl %0\n"
12450+
12451+#ifdef CONFIG_PAX_REFCOUNT
12452+ "jno 0f\n"
12453+ LOCK_PREFIX "decl %0\n"
12454+ "int $4\n0:\n"
12455+ _ASM_EXTABLE(0b, 0b)
12456+#endif
12457+
12458+ "sete %1\n"
12459+ : "+m" (v->counter), "=qm" (c)
12460+ : : "memory");
12461+ return c != 0;
12462+}
12463+
12464+/**
12465+ * atomic_inc_and_test_unchecked - increment and test
12466+ * @v: pointer of type atomic_unchecked_t
12467+ *
12468+ * Atomically increments @v by 1
12469+ * and returns true if the result is zero, or false for all
12470+ * other cases.
12471+ */
12472+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
12473+{
12474+ unsigned char c;
12475+
12476+ asm volatile(LOCK_PREFIX "incl %0\n"
12477+ "sete %1\n"
12478 : "+m" (v->counter), "=qm" (c)
12479 : : "memory");
12480 return c != 0;
12481@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
12482 {
12483 unsigned char c;
12484
12485- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
12486+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
12487+
12488+#ifdef CONFIG_PAX_REFCOUNT
12489+ "jno 0f\n"
12490+ LOCK_PREFIX "subl %2,%0\n"
12491+ "int $4\n0:\n"
12492+ _ASM_EXTABLE(0b, 0b)
12493+#endif
12494+
12495+ "sets %1\n"
12496 : "+m" (v->counter), "=qm" (c)
12497 : "ir" (i) : "memory");
12498 return c;
12499@@ -172,6 +334,18 @@ static inline int atomic_add_negative(int i, atomic_t *v)
12500 */
12501 static inline int atomic_add_return(int i, atomic_t *v)
12502 {
12503+ return i + xadd_check_overflow(&v->counter, i);
12504+}
12505+
12506+/**
12507+ * atomic_add_return_unchecked - add integer and return
12508+ * @i: integer value to add
12509+ * @v: pointer of type atomic_unchecked_t
12510+ *
12511+ * Atomically adds @i to @v and returns @i + @v
12512+ */
12513+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
12514+{
12515 return i + xadd(&v->counter, i);
12516 }
12517
12518@@ -188,6 +362,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
12519 }
12520
12521 #define atomic_inc_return(v) (atomic_add_return(1, v))
12522+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
12523+{
12524+ return atomic_add_return_unchecked(1, v);
12525+}
12526 #define atomic_dec_return(v) (atomic_sub_return(1, v))
12527
12528 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
12529@@ -195,11 +373,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
12530 return cmpxchg(&v->counter, old, new);
12531 }
12532
12533+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
12534+{
12535+ return cmpxchg(&v->counter, old, new);
12536+}
12537+
12538 static inline int atomic_xchg(atomic_t *v, int new)
12539 {
12540 return xchg(&v->counter, new);
12541 }
12542
12543+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
12544+{
12545+ return xchg(&v->counter, new);
12546+}
12547+
12548 /**
12549 * __atomic_add_unless - add unless the number is already a given value
12550 * @v: pointer of type atomic_t
12551@@ -211,12 +399,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
12552 */
12553 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
12554 {
12555- int c, old;
12556+ int c, old, new;
12557 c = atomic_read(v);
12558 for (;;) {
12559- if (unlikely(c == (u)))
12560+ if (unlikely(c == u))
12561 break;
12562- old = atomic_cmpxchg((v), c, c + (a));
12563+
12564+ asm volatile("addl %2,%0\n"
12565+
12566+#ifdef CONFIG_PAX_REFCOUNT
12567+ "jno 0f\n"
12568+ "subl %2,%0\n"
12569+ "int $4\n0:\n"
12570+ _ASM_EXTABLE(0b, 0b)
12571+#endif
12572+
12573+ : "=r" (new)
12574+ : "0" (c), "ir" (a));
12575+
12576+ old = atomic_cmpxchg(v, c, new);
12577 if (likely(old == c))
12578 break;
12579 c = old;
12580@@ -225,6 +426,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
12581 }
12582
12583 /**
12584+ * atomic_inc_not_zero_hint - increment if not null
12585+ * @v: pointer of type atomic_t
12586+ * @hint: probable value of the atomic before the increment
12587+ *
12588+ * This version of atomic_inc_not_zero() gives a hint of probable
12589+ * value of the atomic. This helps processor to not read the memory
12590+ * before doing the atomic read/modify/write cycle, lowering
12591+ * number of bus transactions on some arches.
12592+ *
12593+ * Returns: 0 if increment was not done, 1 otherwise.
12594+ */
12595+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
12596+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
12597+{
12598+ int val, c = hint, new;
12599+
12600+ /* sanity test, should be removed by compiler if hint is a constant */
12601+ if (!hint)
12602+ return __atomic_add_unless(v, 1, 0);
12603+
12604+ do {
12605+ asm volatile("incl %0\n"
12606+
12607+#ifdef CONFIG_PAX_REFCOUNT
12608+ "jno 0f\n"
12609+ "decl %0\n"
12610+ "int $4\n0:\n"
12611+ _ASM_EXTABLE(0b, 0b)
12612+#endif
12613+
12614+ : "=r" (new)
12615+ : "0" (c));
12616+
12617+ val = atomic_cmpxchg(v, c, new);
12618+ if (val == c)
12619+ return 1;
12620+ c = val;
12621+ } while (c);
12622+
12623+ return 0;
12624+}
12625+
12626+/**
12627 * atomic_inc_short - increment of a short integer
12628 * @v: pointer to type int
12629 *
12630@@ -253,14 +497,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
12631 #endif
12632
12633 /* These are x86-specific, used by some header files */
12634-#define atomic_clear_mask(mask, addr) \
12635- asm volatile(LOCK_PREFIX "andl %0,%1" \
12636- : : "r" (~(mask)), "m" (*(addr)) : "memory")
12637+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
12638+{
12639+ asm volatile(LOCK_PREFIX "andl %1,%0"
12640+ : "+m" (v->counter)
12641+ : "r" (~(mask))
12642+ : "memory");
12643+}
12644
12645-#define atomic_set_mask(mask, addr) \
12646- asm volatile(LOCK_PREFIX "orl %0,%1" \
12647- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
12648- : "memory")
12649+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
12650+{
12651+ asm volatile(LOCK_PREFIX "andl %1,%0"
12652+ : "+m" (v->counter)
12653+ : "r" (~(mask))
12654+ : "memory");
12655+}
12656+
12657+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
12658+{
12659+ asm volatile(LOCK_PREFIX "orl %1,%0"
12660+ : "+m" (v->counter)
12661+ : "r" (mask)
12662+ : "memory");
12663+}
12664+
12665+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
12666+{
12667+ asm volatile(LOCK_PREFIX "orl %1,%0"
12668+ : "+m" (v->counter)
12669+ : "r" (mask)
12670+ : "memory");
12671+}
12672
12673 /* Atomic operations are already serializing on x86 */
12674 #define smp_mb__before_atomic_dec() barrier()
12675diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
12676index b154de7..aadebd8 100644
12677--- a/arch/x86/include/asm/atomic64_32.h
12678+++ b/arch/x86/include/asm/atomic64_32.h
12679@@ -12,6 +12,14 @@ typedef struct {
12680 u64 __aligned(8) counter;
12681 } atomic64_t;
12682
12683+#ifdef CONFIG_PAX_REFCOUNT
12684+typedef struct {
12685+ u64 __aligned(8) counter;
12686+} atomic64_unchecked_t;
12687+#else
12688+typedef atomic64_t atomic64_unchecked_t;
12689+#endif
12690+
12691 #define ATOMIC64_INIT(val) { (val) }
12692
12693 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
12694@@ -37,21 +45,31 @@ typedef struct {
12695 ATOMIC64_DECL_ONE(sym##_386)
12696
12697 ATOMIC64_DECL_ONE(add_386);
12698+ATOMIC64_DECL_ONE(add_unchecked_386);
12699 ATOMIC64_DECL_ONE(sub_386);
12700+ATOMIC64_DECL_ONE(sub_unchecked_386);
12701 ATOMIC64_DECL_ONE(inc_386);
12702+ATOMIC64_DECL_ONE(inc_unchecked_386);
12703 ATOMIC64_DECL_ONE(dec_386);
12704+ATOMIC64_DECL_ONE(dec_unchecked_386);
12705 #endif
12706
12707 #define alternative_atomic64(f, out, in...) \
12708 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
12709
12710 ATOMIC64_DECL(read);
12711+ATOMIC64_DECL(read_unchecked);
12712 ATOMIC64_DECL(set);
12713+ATOMIC64_DECL(set_unchecked);
12714 ATOMIC64_DECL(xchg);
12715 ATOMIC64_DECL(add_return);
12716+ATOMIC64_DECL(add_return_unchecked);
12717 ATOMIC64_DECL(sub_return);
12718+ATOMIC64_DECL(sub_return_unchecked);
12719 ATOMIC64_DECL(inc_return);
12720+ATOMIC64_DECL(inc_return_unchecked);
12721 ATOMIC64_DECL(dec_return);
12722+ATOMIC64_DECL(dec_return_unchecked);
12723 ATOMIC64_DECL(dec_if_positive);
12724 ATOMIC64_DECL(inc_not_zero);
12725 ATOMIC64_DECL(add_unless);
12726@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
12727 }
12728
12729 /**
12730+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
12731+ * @p: pointer to type atomic64_unchecked_t
12732+ * @o: expected value
12733+ * @n: new value
12734+ *
12735+ * Atomically sets @v to @n if it was equal to @o and returns
12736+ * the old value.
12737+ */
12738+
12739+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
12740+{
12741+ return cmpxchg64(&v->counter, o, n);
12742+}
12743+
12744+/**
12745 * atomic64_xchg - xchg atomic64 variable
12746 * @v: pointer to type atomic64_t
12747 * @n: value to assign
12748@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
12749 }
12750
12751 /**
12752+ * atomic64_set_unchecked - set atomic64 variable
12753+ * @v: pointer to type atomic64_unchecked_t
12754+ * @n: value to assign
12755+ *
12756+ * Atomically sets the value of @v to @n.
12757+ */
12758+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
12759+{
12760+ unsigned high = (unsigned)(i >> 32);
12761+ unsigned low = (unsigned)i;
12762+ alternative_atomic64(set, /* no output */,
12763+ "S" (v), "b" (low), "c" (high)
12764+ : "eax", "edx", "memory");
12765+}
12766+
12767+/**
12768 * atomic64_read - read atomic64 variable
12769 * @v: pointer to type atomic64_t
12770 *
12771@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
12772 }
12773
12774 /**
12775+ * atomic64_read_unchecked - read atomic64 variable
12776+ * @v: pointer to type atomic64_unchecked_t
12777+ *
12778+ * Atomically reads the value of @v and returns it.
12779+ */
12780+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
12781+{
12782+ long long r;
12783+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
12784+ return r;
12785+ }
12786+
12787+/**
12788 * atomic64_add_return - add and return
12789 * @i: integer value to add
12790 * @v: pointer to type atomic64_t
12791@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
12792 return i;
12793 }
12794
12795+/**
12796+ * atomic64_add_return_unchecked - add and return
12797+ * @i: integer value to add
12798+ * @v: pointer to type atomic64_unchecked_t
12799+ *
12800+ * Atomically adds @i to @v and returns @i + *@v
12801+ */
12802+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
12803+{
12804+ alternative_atomic64(add_return_unchecked,
12805+ ASM_OUTPUT2("+A" (i), "+c" (v)),
12806+ ASM_NO_INPUT_CLOBBER("memory"));
12807+ return i;
12808+}
12809+
12810 /*
12811 * Other variants with different arithmetic operators:
12812 */
12813@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
12814 return a;
12815 }
12816
12817+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
12818+{
12819+ long long a;
12820+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
12821+ "S" (v) : "memory", "ecx");
12822+ return a;
12823+}
12824+
12825 static inline long long atomic64_dec_return(atomic64_t *v)
12826 {
12827 long long a;
12828@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
12829 }
12830
12831 /**
12832+ * atomic64_add_unchecked - add integer to atomic64 variable
12833+ * @i: integer value to add
12834+ * @v: pointer to type atomic64_unchecked_t
12835+ *
12836+ * Atomically adds @i to @v.
12837+ */
12838+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
12839+{
12840+ __alternative_atomic64(add_unchecked, add_return_unchecked,
12841+ ASM_OUTPUT2("+A" (i), "+c" (v)),
12842+ ASM_NO_INPUT_CLOBBER("memory"));
12843+ return i;
12844+}
12845+
12846+/**
12847 * atomic64_sub - subtract the atomic64 variable
12848 * @i: integer value to subtract
12849 * @v: pointer to type atomic64_t
12850diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
12851index 0e1cbfc..5623683 100644
12852--- a/arch/x86/include/asm/atomic64_64.h
12853+++ b/arch/x86/include/asm/atomic64_64.h
12854@@ -18,7 +18,19 @@
12855 */
12856 static inline long atomic64_read(const atomic64_t *v)
12857 {
12858- return (*(volatile long *)&(v)->counter);
12859+ return (*(volatile const long *)&(v)->counter);
12860+}
12861+
12862+/**
12863+ * atomic64_read_unchecked - read atomic64 variable
12864+ * @v: pointer of type atomic64_unchecked_t
12865+ *
12866+ * Atomically reads the value of @v.
12867+ * Doesn't imply a read memory barrier.
12868+ */
12869+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
12870+{
12871+ return (*(volatile const long *)&(v)->counter);
12872 }
12873
12874 /**
12875@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
12876 }
12877
12878 /**
12879+ * atomic64_set_unchecked - set atomic64 variable
12880+ * @v: pointer to type atomic64_unchecked_t
12881+ * @i: required value
12882+ *
12883+ * Atomically sets the value of @v to @i.
12884+ */
12885+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
12886+{
12887+ v->counter = i;
12888+}
12889+
12890+/**
12891 * atomic64_add - add integer to atomic64 variable
12892 * @i: integer value to add
12893 * @v: pointer to type atomic64_t
12894@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
12895 */
12896 static inline void atomic64_add(long i, atomic64_t *v)
12897 {
12898+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
12899+
12900+#ifdef CONFIG_PAX_REFCOUNT
12901+ "jno 0f\n"
12902+ LOCK_PREFIX "subq %1,%0\n"
12903+ "int $4\n0:\n"
12904+ _ASM_EXTABLE(0b, 0b)
12905+#endif
12906+
12907+ : "=m" (v->counter)
12908+ : "er" (i), "m" (v->counter));
12909+}
12910+
12911+/**
12912+ * atomic64_add_unchecked - add integer to atomic64 variable
12913+ * @i: integer value to add
12914+ * @v: pointer to type atomic64_unchecked_t
12915+ *
12916+ * Atomically adds @i to @v.
12917+ */
12918+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
12919+{
12920 asm volatile(LOCK_PREFIX "addq %1,%0"
12921 : "=m" (v->counter)
12922 : "er" (i), "m" (v->counter));
12923@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
12924 */
12925 static inline void atomic64_sub(long i, atomic64_t *v)
12926 {
12927- asm volatile(LOCK_PREFIX "subq %1,%0"
12928+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
12929+
12930+#ifdef CONFIG_PAX_REFCOUNT
12931+ "jno 0f\n"
12932+ LOCK_PREFIX "addq %1,%0\n"
12933+ "int $4\n0:\n"
12934+ _ASM_EXTABLE(0b, 0b)
12935+#endif
12936+
12937+ : "=m" (v->counter)
12938+ : "er" (i), "m" (v->counter));
12939+}
12940+
12941+/**
12942+ * atomic64_sub_unchecked - subtract the atomic64 variable
12943+ * @i: integer value to subtract
12944+ * @v: pointer to type atomic64_unchecked_t
12945+ *
12946+ * Atomically subtracts @i from @v.
12947+ */
12948+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
12949+{
12950+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
12951 : "=m" (v->counter)
12952 : "er" (i), "m" (v->counter));
12953 }
12954@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
12955 {
12956 unsigned char c;
12957
12958- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
12959+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
12960+
12961+#ifdef CONFIG_PAX_REFCOUNT
12962+ "jno 0f\n"
12963+ LOCK_PREFIX "addq %2,%0\n"
12964+ "int $4\n0:\n"
12965+ _ASM_EXTABLE(0b, 0b)
12966+#endif
12967+
12968+ "sete %1\n"
12969 : "=m" (v->counter), "=qm" (c)
12970 : "er" (i), "m" (v->counter) : "memory");
12971 return c;
12972@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
12973 */
12974 static inline void atomic64_inc(atomic64_t *v)
12975 {
12976+ asm volatile(LOCK_PREFIX "incq %0\n"
12977+
12978+#ifdef CONFIG_PAX_REFCOUNT
12979+ "jno 0f\n"
12980+ LOCK_PREFIX "decq %0\n"
12981+ "int $4\n0:\n"
12982+ _ASM_EXTABLE(0b, 0b)
12983+#endif
12984+
12985+ : "=m" (v->counter)
12986+ : "m" (v->counter));
12987+}
12988+
12989+/**
12990+ * atomic64_inc_unchecked - increment atomic64 variable
12991+ * @v: pointer to type atomic64_unchecked_t
12992+ *
12993+ * Atomically increments @v by 1.
12994+ */
12995+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
12996+{
12997 asm volatile(LOCK_PREFIX "incq %0"
12998 : "=m" (v->counter)
12999 : "m" (v->counter));
13000@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
13001 */
13002 static inline void atomic64_dec(atomic64_t *v)
13003 {
13004- asm volatile(LOCK_PREFIX "decq %0"
13005+ asm volatile(LOCK_PREFIX "decq %0\n"
13006+
13007+#ifdef CONFIG_PAX_REFCOUNT
13008+ "jno 0f\n"
13009+ LOCK_PREFIX "incq %0\n"
13010+ "int $4\n0:\n"
13011+ _ASM_EXTABLE(0b, 0b)
13012+#endif
13013+
13014+ : "=m" (v->counter)
13015+ : "m" (v->counter));
13016+}
13017+
13018+/**
13019+ * atomic64_dec_unchecked - decrement atomic64 variable
13020+ * @v: pointer to type atomic64_t
13021+ *
13022+ * Atomically decrements @v by 1.
13023+ */
13024+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
13025+{
13026+ asm volatile(LOCK_PREFIX "decq %0\n"
13027 : "=m" (v->counter)
13028 : "m" (v->counter));
13029 }
13030@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
13031 {
13032 unsigned char c;
13033
13034- asm volatile(LOCK_PREFIX "decq %0; sete %1"
13035+ asm volatile(LOCK_PREFIX "decq %0\n"
13036+
13037+#ifdef CONFIG_PAX_REFCOUNT
13038+ "jno 0f\n"
13039+ LOCK_PREFIX "incq %0\n"
13040+ "int $4\n0:\n"
13041+ _ASM_EXTABLE(0b, 0b)
13042+#endif
13043+
13044+ "sete %1\n"
13045 : "=m" (v->counter), "=qm" (c)
13046 : "m" (v->counter) : "memory");
13047 return c != 0;
13048@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
13049 {
13050 unsigned char c;
13051
13052- asm volatile(LOCK_PREFIX "incq %0; sete %1"
13053+ asm volatile(LOCK_PREFIX "incq %0\n"
13054+
13055+#ifdef CONFIG_PAX_REFCOUNT
13056+ "jno 0f\n"
13057+ LOCK_PREFIX "decq %0\n"
13058+ "int $4\n0:\n"
13059+ _ASM_EXTABLE(0b, 0b)
13060+#endif
13061+
13062+ "sete %1\n"
13063 : "=m" (v->counter), "=qm" (c)
13064 : "m" (v->counter) : "memory");
13065 return c != 0;
13066@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
13067 {
13068 unsigned char c;
13069
13070- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
13071+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
13072+
13073+#ifdef CONFIG_PAX_REFCOUNT
13074+ "jno 0f\n"
13075+ LOCK_PREFIX "subq %2,%0\n"
13076+ "int $4\n0:\n"
13077+ _ASM_EXTABLE(0b, 0b)
13078+#endif
13079+
13080+ "sets %1\n"
13081 : "=m" (v->counter), "=qm" (c)
13082 : "er" (i), "m" (v->counter) : "memory");
13083 return c;
13084@@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
13085 */
13086 static inline long atomic64_add_return(long i, atomic64_t *v)
13087 {
13088+ return i + xadd_check_overflow(&v->counter, i);
13089+}
13090+
13091+/**
13092+ * atomic64_add_return_unchecked - add and return
13093+ * @i: integer value to add
13094+ * @v: pointer to type atomic64_unchecked_t
13095+ *
13096+ * Atomically adds @i to @v and returns @i + @v
13097+ */
13098+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
13099+{
13100 return i + xadd(&v->counter, i);
13101 }
13102
13103@@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
13104 }
13105
13106 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
13107+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
13108+{
13109+ return atomic64_add_return_unchecked(1, v);
13110+}
13111 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
13112
13113 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
13114@@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
13115 return cmpxchg(&v->counter, old, new);
13116 }
13117
13118+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
13119+{
13120+ return cmpxchg(&v->counter, old, new);
13121+}
13122+
13123 static inline long atomic64_xchg(atomic64_t *v, long new)
13124 {
13125 return xchg(&v->counter, new);
13126@@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
13127 */
13128 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
13129 {
13130- long c, old;
13131+ long c, old, new;
13132 c = atomic64_read(v);
13133 for (;;) {
13134- if (unlikely(c == (u)))
13135+ if (unlikely(c == u))
13136 break;
13137- old = atomic64_cmpxchg((v), c, c + (a));
13138+
13139+ asm volatile("add %2,%0\n"
13140+
13141+#ifdef CONFIG_PAX_REFCOUNT
13142+ "jno 0f\n"
13143+ "sub %2,%0\n"
13144+ "int $4\n0:\n"
13145+ _ASM_EXTABLE(0b, 0b)
13146+#endif
13147+
13148+ : "=r" (new)
13149+ : "0" (c), "ir" (a));
13150+
13151+ old = atomic64_cmpxchg(v, c, new);
13152 if (likely(old == c))
13153 break;
13154 c = old;
13155 }
13156- return c != (u);
13157+ return c != u;
13158 }
13159
13160 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
13161diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
13162index 6dfd019..28e188d 100644
13163--- a/arch/x86/include/asm/bitops.h
13164+++ b/arch/x86/include/asm/bitops.h
13165@@ -40,7 +40,7 @@
13166 * a mask operation on a byte.
13167 */
13168 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
13169-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
13170+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
13171 #define CONST_MASK(nr) (1 << ((nr) & 7))
13172
13173 /**
13174@@ -486,7 +486,7 @@ static inline int fls(int x)
13175 * at position 64.
13176 */
13177 #ifdef CONFIG_X86_64
13178-static __always_inline int fls64(__u64 x)
13179+static __always_inline long fls64(__u64 x)
13180 {
13181 int bitpos = -1;
13182 /*
13183diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
13184index 4fa687a..60f2d39 100644
13185--- a/arch/x86/include/asm/boot.h
13186+++ b/arch/x86/include/asm/boot.h
13187@@ -6,10 +6,15 @@
13188 #include <uapi/asm/boot.h>
13189
13190 /* Physical address where kernel should be loaded. */
13191-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
13192+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
13193 + (CONFIG_PHYSICAL_ALIGN - 1)) \
13194 & ~(CONFIG_PHYSICAL_ALIGN - 1))
13195
13196+#ifndef __ASSEMBLY__
13197+extern unsigned char __LOAD_PHYSICAL_ADDR[];
13198+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
13199+#endif
13200+
13201 /* Minimum kernel alignment, as a power of two */
13202 #ifdef CONFIG_X86_64
13203 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
13204diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
13205index 48f99f1..d78ebf9 100644
13206--- a/arch/x86/include/asm/cache.h
13207+++ b/arch/x86/include/asm/cache.h
13208@@ -5,12 +5,13 @@
13209
13210 /* L1 cache line size */
13211 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
13212-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
13213+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
13214
13215 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
13216+#define __read_only __attribute__((__section__(".data..read_only")))
13217
13218 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
13219-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
13220+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
13221
13222 #ifdef CONFIG_X86_VSMP
13223 #ifdef CONFIG_SMP
13224diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
13225index 9863ee3..4a1f8e1 100644
13226--- a/arch/x86/include/asm/cacheflush.h
13227+++ b/arch/x86/include/asm/cacheflush.h
13228@@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
13229 unsigned long pg_flags = pg->flags & _PGMT_MASK;
13230
13231 if (pg_flags == _PGMT_DEFAULT)
13232- return -1;
13233+ return ~0UL;
13234 else if (pg_flags == _PGMT_WC)
13235 return _PAGE_CACHE_WC;
13236 else if (pg_flags == _PGMT_UC_MINUS)
13237diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
13238index 46fc474..b02b0f9 100644
13239--- a/arch/x86/include/asm/checksum_32.h
13240+++ b/arch/x86/include/asm/checksum_32.h
13241@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
13242 int len, __wsum sum,
13243 int *src_err_ptr, int *dst_err_ptr);
13244
13245+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
13246+ int len, __wsum sum,
13247+ int *src_err_ptr, int *dst_err_ptr);
13248+
13249+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
13250+ int len, __wsum sum,
13251+ int *src_err_ptr, int *dst_err_ptr);
13252+
13253 /*
13254 * Note: when you get a NULL pointer exception here this means someone
13255 * passed in an incorrect kernel address to one of these functions.
13256@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
13257 int *err_ptr)
13258 {
13259 might_sleep();
13260- return csum_partial_copy_generic((__force void *)src, dst,
13261+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
13262 len, sum, err_ptr, NULL);
13263 }
13264
13265@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
13266 {
13267 might_sleep();
13268 if (access_ok(VERIFY_WRITE, dst, len))
13269- return csum_partial_copy_generic(src, (__force void *)dst,
13270+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
13271 len, sum, NULL, err_ptr);
13272
13273 if (len)
13274diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
13275index 8d871ea..c1a0dc9 100644
13276--- a/arch/x86/include/asm/cmpxchg.h
13277+++ b/arch/x86/include/asm/cmpxchg.h
13278@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
13279 __compiletime_error("Bad argument size for cmpxchg");
13280 extern void __xadd_wrong_size(void)
13281 __compiletime_error("Bad argument size for xadd");
13282+extern void __xadd_check_overflow_wrong_size(void)
13283+ __compiletime_error("Bad argument size for xadd_check_overflow");
13284 extern void __add_wrong_size(void)
13285 __compiletime_error("Bad argument size for add");
13286+extern void __add_check_overflow_wrong_size(void)
13287+ __compiletime_error("Bad argument size for add_check_overflow");
13288
13289 /*
13290 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
13291@@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
13292 __ret; \
13293 })
13294
13295+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
13296+ ({ \
13297+ __typeof__ (*(ptr)) __ret = (arg); \
13298+ switch (sizeof(*(ptr))) { \
13299+ case __X86_CASE_L: \
13300+ asm volatile (lock #op "l %0, %1\n" \
13301+ "jno 0f\n" \
13302+ "mov %0,%1\n" \
13303+ "int $4\n0:\n" \
13304+ _ASM_EXTABLE(0b, 0b) \
13305+ : "+r" (__ret), "+m" (*(ptr)) \
13306+ : : "memory", "cc"); \
13307+ break; \
13308+ case __X86_CASE_Q: \
13309+ asm volatile (lock #op "q %q0, %1\n" \
13310+ "jno 0f\n" \
13311+ "mov %0,%1\n" \
13312+ "int $4\n0:\n" \
13313+ _ASM_EXTABLE(0b, 0b) \
13314+ : "+r" (__ret), "+m" (*(ptr)) \
13315+ : : "memory", "cc"); \
13316+ break; \
13317+ default: \
13318+ __ ## op ## _check_overflow_wrong_size(); \
13319+ } \
13320+ __ret; \
13321+ })
13322+
13323 /*
13324 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
13325 * Since this is generally used to protect other memory information, we
13326@@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
13327 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
13328 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
13329
13330+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
13331+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
13332+
13333 #define __add(ptr, inc, lock) \
13334 ({ \
13335 __typeof__ (*(ptr)) __ret = (inc); \
13336diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
13337index 59c6c40..5e0b22c 100644
13338--- a/arch/x86/include/asm/compat.h
13339+++ b/arch/x86/include/asm/compat.h
13340@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
13341 typedef u32 compat_uint_t;
13342 typedef u32 compat_ulong_t;
13343 typedef u64 __attribute__((aligned(4))) compat_u64;
13344-typedef u32 compat_uptr_t;
13345+typedef u32 __user compat_uptr_t;
13346
13347 struct compat_timespec {
13348 compat_time_t tv_sec;
13349diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
13350index 2d9075e..b75a844 100644
13351--- a/arch/x86/include/asm/cpufeature.h
13352+++ b/arch/x86/include/asm/cpufeature.h
13353@@ -206,7 +206,7 @@
13354 #define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
13355 #define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
13356 #define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
13357-#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
13358+#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Prevention */
13359 #define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
13360 #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
13361 #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
13362@@ -375,7 +375,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
13363 ".section .discard,\"aw\",@progbits\n"
13364 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
13365 ".previous\n"
13366- ".section .altinstr_replacement,\"ax\"\n"
13367+ ".section .altinstr_replacement,\"a\"\n"
13368 "3: movb $1,%0\n"
13369 "4:\n"
13370 ".previous\n"
13371diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
13372index 8bf1c06..b6ae785 100644
13373--- a/arch/x86/include/asm/desc.h
13374+++ b/arch/x86/include/asm/desc.h
13375@@ -4,6 +4,7 @@
13376 #include <asm/desc_defs.h>
13377 #include <asm/ldt.h>
13378 #include <asm/mmu.h>
13379+#include <asm/pgtable.h>
13380
13381 #include <linux/smp.h>
13382 #include <linux/percpu.h>
13383@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
13384
13385 desc->type = (info->read_exec_only ^ 1) << 1;
13386 desc->type |= info->contents << 2;
13387+ desc->type |= info->seg_not_present ^ 1;
13388
13389 desc->s = 1;
13390 desc->dpl = 0x3;
13391@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
13392 }
13393
13394 extern struct desc_ptr idt_descr;
13395-extern gate_desc idt_table[];
13396 extern struct desc_ptr nmi_idt_descr;
13397-extern gate_desc nmi_idt_table[];
13398-
13399-struct gdt_page {
13400- struct desc_struct gdt[GDT_ENTRIES];
13401-} __attribute__((aligned(PAGE_SIZE)));
13402-
13403-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
13404+extern gate_desc idt_table[256];
13405+extern gate_desc nmi_idt_table[256];
13406
13407+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
13408 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
13409 {
13410- return per_cpu(gdt_page, cpu).gdt;
13411+ return cpu_gdt_table[cpu];
13412 }
13413
13414 #ifdef CONFIG_X86_64
13415@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
13416 unsigned long base, unsigned dpl, unsigned flags,
13417 unsigned short seg)
13418 {
13419- gate->a = (seg << 16) | (base & 0xffff);
13420- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
13421+ gate->gate.offset_low = base;
13422+ gate->gate.seg = seg;
13423+ gate->gate.reserved = 0;
13424+ gate->gate.type = type;
13425+ gate->gate.s = 0;
13426+ gate->gate.dpl = dpl;
13427+ gate->gate.p = 1;
13428+ gate->gate.offset_high = base >> 16;
13429 }
13430
13431 #endif
13432@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
13433
13434 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
13435 {
13436+ pax_open_kernel();
13437 memcpy(&idt[entry], gate, sizeof(*gate));
13438+ pax_close_kernel();
13439 }
13440
13441 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
13442 {
13443+ pax_open_kernel();
13444 memcpy(&ldt[entry], desc, 8);
13445+ pax_close_kernel();
13446 }
13447
13448 static inline void
13449@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
13450 default: size = sizeof(*gdt); break;
13451 }
13452
13453+ pax_open_kernel();
13454 memcpy(&gdt[entry], desc, size);
13455+ pax_close_kernel();
13456 }
13457
13458 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
13459@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
13460
13461 static inline void native_load_tr_desc(void)
13462 {
13463+ pax_open_kernel();
13464 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
13465+ pax_close_kernel();
13466 }
13467
13468 static inline void native_load_gdt(const struct desc_ptr *dtr)
13469@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
13470 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
13471 unsigned int i;
13472
13473+ pax_open_kernel();
13474 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
13475 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
13476+ pax_close_kernel();
13477 }
13478
13479 #define _LDT_empty(info) \
13480@@ -287,7 +300,7 @@ static inline void load_LDT(mm_context_t *pc)
13481 preempt_enable();
13482 }
13483
13484-static inline unsigned long get_desc_base(const struct desc_struct *desc)
13485+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
13486 {
13487 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
13488 }
13489@@ -311,7 +324,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
13490 }
13491
13492 #ifdef CONFIG_X86_64
13493-static inline void set_nmi_gate(int gate, void *addr)
13494+static inline void set_nmi_gate(int gate, const void *addr)
13495 {
13496 gate_desc s;
13497
13498@@ -320,7 +333,7 @@ static inline void set_nmi_gate(int gate, void *addr)
13499 }
13500 #endif
13501
13502-static inline void _set_gate(int gate, unsigned type, void *addr,
13503+static inline void _set_gate(int gate, unsigned type, const void *addr,
13504 unsigned dpl, unsigned ist, unsigned seg)
13505 {
13506 gate_desc s;
13507@@ -339,7 +352,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
13508 * Pentium F0 0F bugfix can have resulted in the mapped
13509 * IDT being write-protected.
13510 */
13511-static inline void set_intr_gate(unsigned int n, void *addr)
13512+static inline void set_intr_gate(unsigned int n, const void *addr)
13513 {
13514 BUG_ON((unsigned)n > 0xFF);
13515 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
13516@@ -369,19 +382,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
13517 /*
13518 * This routine sets up an interrupt gate at directory privilege level 3.
13519 */
13520-static inline void set_system_intr_gate(unsigned int n, void *addr)
13521+static inline void set_system_intr_gate(unsigned int n, const void *addr)
13522 {
13523 BUG_ON((unsigned)n > 0xFF);
13524 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
13525 }
13526
13527-static inline void set_system_trap_gate(unsigned int n, void *addr)
13528+static inline void set_system_trap_gate(unsigned int n, const void *addr)
13529 {
13530 BUG_ON((unsigned)n > 0xFF);
13531 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
13532 }
13533
13534-static inline void set_trap_gate(unsigned int n, void *addr)
13535+static inline void set_trap_gate(unsigned int n, const void *addr)
13536 {
13537 BUG_ON((unsigned)n > 0xFF);
13538 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
13539@@ -390,19 +403,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
13540 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
13541 {
13542 BUG_ON((unsigned)n > 0xFF);
13543- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
13544+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
13545 }
13546
13547-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
13548+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
13549 {
13550 BUG_ON((unsigned)n > 0xFF);
13551 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
13552 }
13553
13554-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
13555+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
13556 {
13557 BUG_ON((unsigned)n > 0xFF);
13558 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
13559 }
13560
13561+#ifdef CONFIG_X86_32
13562+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
13563+{
13564+ struct desc_struct d;
13565+
13566+ if (likely(limit))
13567+ limit = (limit - 1UL) >> PAGE_SHIFT;
13568+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
13569+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
13570+}
13571+#endif
13572+
13573 #endif /* _ASM_X86_DESC_H */
13574diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
13575index 278441f..b95a174 100644
13576--- a/arch/x86/include/asm/desc_defs.h
13577+++ b/arch/x86/include/asm/desc_defs.h
13578@@ -31,6 +31,12 @@ struct desc_struct {
13579 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
13580 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
13581 };
13582+ struct {
13583+ u16 offset_low;
13584+ u16 seg;
13585+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
13586+ unsigned offset_high: 16;
13587+ } gate;
13588 };
13589 } __attribute__((packed));
13590
13591diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
13592index ced283a..ffe04cc 100644
13593--- a/arch/x86/include/asm/div64.h
13594+++ b/arch/x86/include/asm/div64.h
13595@@ -39,7 +39,7 @@
13596 __mod; \
13597 })
13598
13599-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
13600+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
13601 {
13602 union {
13603 u64 v64;
13604diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
13605index 9c999c1..3860cb8 100644
13606--- a/arch/x86/include/asm/elf.h
13607+++ b/arch/x86/include/asm/elf.h
13608@@ -243,7 +243,25 @@ extern int force_personality32;
13609 the loader. We need to make sure that it is out of the way of the program
13610 that it will "exec", and that there is sufficient room for the brk. */
13611
13612+#ifdef CONFIG_PAX_SEGMEXEC
13613+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
13614+#else
13615 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
13616+#endif
13617+
13618+#ifdef CONFIG_PAX_ASLR
13619+#ifdef CONFIG_X86_32
13620+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
13621+
13622+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
13623+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
13624+#else
13625+#define PAX_ELF_ET_DYN_BASE 0x400000UL
13626+
13627+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
13628+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
13629+#endif
13630+#endif
13631
13632 /* This yields a mask that user programs can use to figure out what
13633 instruction set this CPU supports. This could be done in user space,
13634@@ -296,16 +314,12 @@ do { \
13635
13636 #define ARCH_DLINFO \
13637 do { \
13638- if (vdso_enabled) \
13639- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
13640- (unsigned long)current->mm->context.vdso); \
13641+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
13642 } while (0)
13643
13644 #define ARCH_DLINFO_X32 \
13645 do { \
13646- if (vdso_enabled) \
13647- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
13648- (unsigned long)current->mm->context.vdso); \
13649+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
13650 } while (0)
13651
13652 #define AT_SYSINFO 32
13653@@ -320,7 +334,7 @@ else \
13654
13655 #endif /* !CONFIG_X86_32 */
13656
13657-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
13658+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
13659
13660 #define VDSO_ENTRY \
13661 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
13662@@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
13663 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
13664 #define compat_arch_setup_additional_pages syscall32_setup_pages
13665
13666-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
13667-#define arch_randomize_brk arch_randomize_brk
13668-
13669 /*
13670 * True on X86_32 or when emulating IA32 on X86_64
13671 */
13672diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
13673index 75ce3f4..882e801 100644
13674--- a/arch/x86/include/asm/emergency-restart.h
13675+++ b/arch/x86/include/asm/emergency-restart.h
13676@@ -13,6 +13,6 @@ enum reboot_type {
13677
13678 extern enum reboot_type reboot_type;
13679
13680-extern void machine_emergency_restart(void);
13681+extern void machine_emergency_restart(void) __noreturn;
13682
13683 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
13684diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
13685index 41ab26e..a88c9e6 100644
13686--- a/arch/x86/include/asm/fpu-internal.h
13687+++ b/arch/x86/include/asm/fpu-internal.h
13688@@ -126,7 +126,9 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
13689 ({ \
13690 int err; \
13691 asm volatile(ASM_STAC "\n" \
13692- "1:" #insn "\n\t" \
13693+ "1:" \
13694+ __copyuser_seg \
13695+ #insn "\n\t" \
13696 "2: " ASM_CLAC "\n" \
13697 ".section .fixup,\"ax\"\n" \
13698 "3: movl $-1,%[err]\n" \
13699@@ -299,7 +301,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
13700 "emms\n\t" /* clear stack tags */
13701 "fildl %P[addr]", /* set F?P to defined value */
13702 X86_FEATURE_FXSAVE_LEAK,
13703- [addr] "m" (tsk->thread.fpu.has_fpu));
13704+ [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
13705
13706 return fpu_restore_checking(&tsk->thread.fpu);
13707 }
13708diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
13709index be27ba1..8f13ff9 100644
13710--- a/arch/x86/include/asm/futex.h
13711+++ b/arch/x86/include/asm/futex.h
13712@@ -12,6 +12,7 @@
13713 #include <asm/smap.h>
13714
13715 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
13716+ typecheck(u32 __user *, uaddr); \
13717 asm volatile("\t" ASM_STAC "\n" \
13718 "1:\t" insn "\n" \
13719 "2:\t" ASM_CLAC "\n" \
13720@@ -20,15 +21,16 @@
13721 "\tjmp\t2b\n" \
13722 "\t.previous\n" \
13723 _ASM_EXTABLE(1b, 3b) \
13724- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
13725+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
13726 : "i" (-EFAULT), "0" (oparg), "1" (0))
13727
13728 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
13729+ typecheck(u32 __user *, uaddr); \
13730 asm volatile("\t" ASM_STAC "\n" \
13731 "1:\tmovl %2, %0\n" \
13732 "\tmovl\t%0, %3\n" \
13733 "\t" insn "\n" \
13734- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
13735+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
13736 "\tjnz\t1b\n" \
13737 "3:\t" ASM_CLAC "\n" \
13738 "\t.section .fixup,\"ax\"\n" \
13739@@ -38,7 +40,7 @@
13740 _ASM_EXTABLE(1b, 4b) \
13741 _ASM_EXTABLE(2b, 4b) \
13742 : "=&a" (oldval), "=&r" (ret), \
13743- "+m" (*uaddr), "=&r" (tem) \
13744+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
13745 : "r" (oparg), "i" (-EFAULT), "1" (0))
13746
13747 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
13748@@ -59,10 +61,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
13749
13750 switch (op) {
13751 case FUTEX_OP_SET:
13752- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
13753+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
13754 break;
13755 case FUTEX_OP_ADD:
13756- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
13757+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
13758 uaddr, oparg);
13759 break;
13760 case FUTEX_OP_OR:
13761@@ -116,14 +118,14 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
13762 return -EFAULT;
13763
13764 asm volatile("\t" ASM_STAC "\n"
13765- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
13766+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
13767 "2:\t" ASM_CLAC "\n"
13768 "\t.section .fixup, \"ax\"\n"
13769 "3:\tmov %3, %0\n"
13770 "\tjmp 2b\n"
13771 "\t.previous\n"
13772 _ASM_EXTABLE(1b, 3b)
13773- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
13774+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
13775 : "i" (-EFAULT), "r" (newval), "1" (oldval)
13776 : "memory"
13777 );
13778diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
13779index eb92a6e..b98b2f4 100644
13780--- a/arch/x86/include/asm/hw_irq.h
13781+++ b/arch/x86/include/asm/hw_irq.h
13782@@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
13783 extern void enable_IO_APIC(void);
13784
13785 /* Statistics */
13786-extern atomic_t irq_err_count;
13787-extern atomic_t irq_mis_count;
13788+extern atomic_unchecked_t irq_err_count;
13789+extern atomic_unchecked_t irq_mis_count;
13790
13791 /* EISA */
13792 extern void eisa_set_level_irq(unsigned int irq);
13793diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
13794index a203659..9889f1c 100644
13795--- a/arch/x86/include/asm/i8259.h
13796+++ b/arch/x86/include/asm/i8259.h
13797@@ -62,7 +62,7 @@ struct legacy_pic {
13798 void (*init)(int auto_eoi);
13799 int (*irq_pending)(unsigned int irq);
13800 void (*make_irq)(unsigned int irq);
13801-};
13802+} __do_const;
13803
13804 extern struct legacy_pic *legacy_pic;
13805 extern struct legacy_pic null_legacy_pic;
13806diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
13807index d8e8eef..1765f78 100644
13808--- a/arch/x86/include/asm/io.h
13809+++ b/arch/x86/include/asm/io.h
13810@@ -51,12 +51,12 @@ static inline void name(type val, volatile void __iomem *addr) \
13811 "m" (*(volatile type __force *)addr) barrier); }
13812
13813 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
13814-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
13815-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
13816+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
13817+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
13818
13819 build_mmio_read(__readb, "b", unsigned char, "=q", )
13820-build_mmio_read(__readw, "w", unsigned short, "=r", )
13821-build_mmio_read(__readl, "l", unsigned int, "=r", )
13822+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
13823+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
13824
13825 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
13826 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
13827@@ -184,7 +184,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
13828 return ioremap_nocache(offset, size);
13829 }
13830
13831-extern void iounmap(volatile void __iomem *addr);
13832+extern void iounmap(const volatile void __iomem *addr);
13833
13834 extern void set_iounmap_nonlazy(void);
13835
13836@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
13837
13838 #include <linux/vmalloc.h>
13839
13840+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
13841+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
13842+{
13843+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
13844+}
13845+
13846+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
13847+{
13848+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
13849+}
13850+
13851 /*
13852 * Convert a virtual cached pointer to an uncached pointer
13853 */
13854diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
13855index bba3cf8..06bc8da 100644
13856--- a/arch/x86/include/asm/irqflags.h
13857+++ b/arch/x86/include/asm/irqflags.h
13858@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
13859 sti; \
13860 sysexit
13861
13862+#define GET_CR0_INTO_RDI mov %cr0, %rdi
13863+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
13864+#define GET_CR3_INTO_RDI mov %cr3, %rdi
13865+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
13866+
13867 #else
13868 #define INTERRUPT_RETURN iret
13869 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
13870diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
13871index d3ddd17..c9fb0cc 100644
13872--- a/arch/x86/include/asm/kprobes.h
13873+++ b/arch/x86/include/asm/kprobes.h
13874@@ -38,13 +38,8 @@ typedef u8 kprobe_opcode_t;
13875 #define RELATIVEJUMP_SIZE 5
13876 #define RELATIVECALL_OPCODE 0xe8
13877 #define RELATIVE_ADDR_SIZE 4
13878-#define MAX_STACK_SIZE 64
13879-#define MIN_STACK_SIZE(ADDR) \
13880- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
13881- THREAD_SIZE - (unsigned long)(ADDR))) \
13882- ? (MAX_STACK_SIZE) \
13883- : (((unsigned long)current_thread_info()) + \
13884- THREAD_SIZE - (unsigned long)(ADDR)))
13885+#define MAX_STACK_SIZE 64UL
13886+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
13887
13888 #define flush_insn_slot(p) do { } while (0)
13889
13890diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
13891index dc87b65..85039f9 100644
13892--- a/arch/x86/include/asm/kvm_host.h
13893+++ b/arch/x86/include/asm/kvm_host.h
13894@@ -419,8 +419,8 @@ struct kvm_vcpu_arch {
13895 gpa_t time;
13896 struct pvclock_vcpu_time_info hv_clock;
13897 unsigned int hw_tsc_khz;
13898- unsigned int time_offset;
13899- struct page *time_page;
13900+ struct gfn_to_hva_cache pv_time;
13901+ bool pv_time_enabled;
13902 /* set guest stopped flag in pvclock flags field */
13903 bool pvclock_set_guest_stopped_request;
13904
13905diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
13906index 2d89e39..baee879 100644
13907--- a/arch/x86/include/asm/local.h
13908+++ b/arch/x86/include/asm/local.h
13909@@ -10,33 +10,97 @@ typedef struct {
13910 atomic_long_t a;
13911 } local_t;
13912
13913+typedef struct {
13914+ atomic_long_unchecked_t a;
13915+} local_unchecked_t;
13916+
13917 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
13918
13919 #define local_read(l) atomic_long_read(&(l)->a)
13920+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
13921 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
13922+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
13923
13924 static inline void local_inc(local_t *l)
13925 {
13926- asm volatile(_ASM_INC "%0"
13927+ asm volatile(_ASM_INC "%0\n"
13928+
13929+#ifdef CONFIG_PAX_REFCOUNT
13930+ "jno 0f\n"
13931+ _ASM_DEC "%0\n"
13932+ "int $4\n0:\n"
13933+ _ASM_EXTABLE(0b, 0b)
13934+#endif
13935+
13936+ : "+m" (l->a.counter));
13937+}
13938+
13939+static inline void local_inc_unchecked(local_unchecked_t *l)
13940+{
13941+ asm volatile(_ASM_INC "%0\n"
13942 : "+m" (l->a.counter));
13943 }
13944
13945 static inline void local_dec(local_t *l)
13946 {
13947- asm volatile(_ASM_DEC "%0"
13948+ asm volatile(_ASM_DEC "%0\n"
13949+
13950+#ifdef CONFIG_PAX_REFCOUNT
13951+ "jno 0f\n"
13952+ _ASM_INC "%0\n"
13953+ "int $4\n0:\n"
13954+ _ASM_EXTABLE(0b, 0b)
13955+#endif
13956+
13957+ : "+m" (l->a.counter));
13958+}
13959+
13960+static inline void local_dec_unchecked(local_unchecked_t *l)
13961+{
13962+ asm volatile(_ASM_DEC "%0\n"
13963 : "+m" (l->a.counter));
13964 }
13965
13966 static inline void local_add(long i, local_t *l)
13967 {
13968- asm volatile(_ASM_ADD "%1,%0"
13969+ asm volatile(_ASM_ADD "%1,%0\n"
13970+
13971+#ifdef CONFIG_PAX_REFCOUNT
13972+ "jno 0f\n"
13973+ _ASM_SUB "%1,%0\n"
13974+ "int $4\n0:\n"
13975+ _ASM_EXTABLE(0b, 0b)
13976+#endif
13977+
13978+ : "+m" (l->a.counter)
13979+ : "ir" (i));
13980+}
13981+
13982+static inline void local_add_unchecked(long i, local_unchecked_t *l)
13983+{
13984+ asm volatile(_ASM_ADD "%1,%0\n"
13985 : "+m" (l->a.counter)
13986 : "ir" (i));
13987 }
13988
13989 static inline void local_sub(long i, local_t *l)
13990 {
13991- asm volatile(_ASM_SUB "%1,%0"
13992+ asm volatile(_ASM_SUB "%1,%0\n"
13993+
13994+#ifdef CONFIG_PAX_REFCOUNT
13995+ "jno 0f\n"
13996+ _ASM_ADD "%1,%0\n"
13997+ "int $4\n0:\n"
13998+ _ASM_EXTABLE(0b, 0b)
13999+#endif
14000+
14001+ : "+m" (l->a.counter)
14002+ : "ir" (i));
14003+}
14004+
14005+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
14006+{
14007+ asm volatile(_ASM_SUB "%1,%0\n"
14008 : "+m" (l->a.counter)
14009 : "ir" (i));
14010 }
14011@@ -54,7 +118,16 @@ static inline int local_sub_and_test(long i, local_t *l)
14012 {
14013 unsigned char c;
14014
14015- asm volatile(_ASM_SUB "%2,%0; sete %1"
14016+ asm volatile(_ASM_SUB "%2,%0\n"
14017+
14018+#ifdef CONFIG_PAX_REFCOUNT
14019+ "jno 0f\n"
14020+ _ASM_ADD "%2,%0\n"
14021+ "int $4\n0:\n"
14022+ _ASM_EXTABLE(0b, 0b)
14023+#endif
14024+
14025+ "sete %1\n"
14026 : "+m" (l->a.counter), "=qm" (c)
14027 : "ir" (i) : "memory");
14028 return c;
14029@@ -72,7 +145,16 @@ static inline int local_dec_and_test(local_t *l)
14030 {
14031 unsigned char c;
14032
14033- asm volatile(_ASM_DEC "%0; sete %1"
14034+ asm volatile(_ASM_DEC "%0\n"
14035+
14036+#ifdef CONFIG_PAX_REFCOUNT
14037+ "jno 0f\n"
14038+ _ASM_INC "%0\n"
14039+ "int $4\n0:\n"
14040+ _ASM_EXTABLE(0b, 0b)
14041+#endif
14042+
14043+ "sete %1\n"
14044 : "+m" (l->a.counter), "=qm" (c)
14045 : : "memory");
14046 return c != 0;
14047@@ -90,7 +172,16 @@ static inline int local_inc_and_test(local_t *l)
14048 {
14049 unsigned char c;
14050
14051- asm volatile(_ASM_INC "%0; sete %1"
14052+ asm volatile(_ASM_INC "%0\n"
14053+
14054+#ifdef CONFIG_PAX_REFCOUNT
14055+ "jno 0f\n"
14056+ _ASM_DEC "%0\n"
14057+ "int $4\n0:\n"
14058+ _ASM_EXTABLE(0b, 0b)
14059+#endif
14060+
14061+ "sete %1\n"
14062 : "+m" (l->a.counter), "=qm" (c)
14063 : : "memory");
14064 return c != 0;
14065@@ -109,7 +200,16 @@ static inline int local_add_negative(long i, local_t *l)
14066 {
14067 unsigned char c;
14068
14069- asm volatile(_ASM_ADD "%2,%0; sets %1"
14070+ asm volatile(_ASM_ADD "%2,%0\n"
14071+
14072+#ifdef CONFIG_PAX_REFCOUNT
14073+ "jno 0f\n"
14074+ _ASM_SUB "%2,%0\n"
14075+ "int $4\n0:\n"
14076+ _ASM_EXTABLE(0b, 0b)
14077+#endif
14078+
14079+ "sets %1\n"
14080 : "+m" (l->a.counter), "=qm" (c)
14081 : "ir" (i) : "memory");
14082 return c;
14083@@ -125,6 +225,30 @@ static inline int local_add_negative(long i, local_t *l)
14084 static inline long local_add_return(long i, local_t *l)
14085 {
14086 long __i = i;
14087+ asm volatile(_ASM_XADD "%0, %1\n"
14088+
14089+#ifdef CONFIG_PAX_REFCOUNT
14090+ "jno 0f\n"
14091+ _ASM_MOV "%0,%1\n"
14092+ "int $4\n0:\n"
14093+ _ASM_EXTABLE(0b, 0b)
14094+#endif
14095+
14096+ : "+r" (i), "+m" (l->a.counter)
14097+ : : "memory");
14098+ return i + __i;
14099+}
14100+
14101+/**
14102+ * local_add_return_unchecked - add and return
14103+ * @i: integer value to add
14104+ * @l: pointer to type local_unchecked_t
14105+ *
14106+ * Atomically adds @i to @l and returns @i + @l
14107+ */
14108+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
14109+{
14110+ long __i = i;
14111 asm volatile(_ASM_XADD "%0, %1;"
14112 : "+r" (i), "+m" (l->a.counter)
14113 : : "memory");
14114@@ -141,6 +265,8 @@ static inline long local_sub_return(long i, local_t *l)
14115
14116 #define local_cmpxchg(l, o, n) \
14117 (cmpxchg_local(&((l)->a.counter), (o), (n)))
14118+#define local_cmpxchg_unchecked(l, o, n) \
14119+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
14120 /* Always has a lock prefix */
14121 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
14122
14123diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
14124new file mode 100644
14125index 0000000..2bfd3ba
14126--- /dev/null
14127+++ b/arch/x86/include/asm/mman.h
14128@@ -0,0 +1,15 @@
14129+#ifndef _X86_MMAN_H
14130+#define _X86_MMAN_H
14131+
14132+#include <uapi/asm/mman.h>
14133+
14134+#ifdef __KERNEL__
14135+#ifndef __ASSEMBLY__
14136+#ifdef CONFIG_X86_32
14137+#define arch_mmap_check i386_mmap_check
14138+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
14139+#endif
14140+#endif
14141+#endif
14142+
14143+#endif /* X86_MMAN_H */
14144diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
14145index 5f55e69..e20bfb1 100644
14146--- a/arch/x86/include/asm/mmu.h
14147+++ b/arch/x86/include/asm/mmu.h
14148@@ -9,7 +9,7 @@
14149 * we put the segment information here.
14150 */
14151 typedef struct {
14152- void *ldt;
14153+ struct desc_struct *ldt;
14154 int size;
14155
14156 #ifdef CONFIG_X86_64
14157@@ -18,7 +18,19 @@ typedef struct {
14158 #endif
14159
14160 struct mutex lock;
14161- void *vdso;
14162+ unsigned long vdso;
14163+
14164+#ifdef CONFIG_X86_32
14165+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
14166+ unsigned long user_cs_base;
14167+ unsigned long user_cs_limit;
14168+
14169+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
14170+ cpumask_t cpu_user_cs_mask;
14171+#endif
14172+
14173+#endif
14174+#endif
14175 } mm_context_t;
14176
14177 #ifdef CONFIG_SMP
14178diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
14179index cdbf367..adb37ac 100644
14180--- a/arch/x86/include/asm/mmu_context.h
14181+++ b/arch/x86/include/asm/mmu_context.h
14182@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
14183
14184 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
14185 {
14186+
14187+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
14188+ unsigned int i;
14189+ pgd_t *pgd;
14190+
14191+ pax_open_kernel();
14192+ pgd = get_cpu_pgd(smp_processor_id());
14193+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
14194+ set_pgd_batched(pgd+i, native_make_pgd(0));
14195+ pax_close_kernel();
14196+#endif
14197+
14198 #ifdef CONFIG_SMP
14199 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
14200 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
14201@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
14202 struct task_struct *tsk)
14203 {
14204 unsigned cpu = smp_processor_id();
14205+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
14206+ int tlbstate = TLBSTATE_OK;
14207+#endif
14208
14209 if (likely(prev != next)) {
14210 #ifdef CONFIG_SMP
14211+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
14212+ tlbstate = this_cpu_read(cpu_tlbstate.state);
14213+#endif
14214 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
14215 this_cpu_write(cpu_tlbstate.active_mm, next);
14216 #endif
14217 cpumask_set_cpu(cpu, mm_cpumask(next));
14218
14219 /* Re-load page tables */
14220+#ifdef CONFIG_PAX_PER_CPU_PGD
14221+ pax_open_kernel();
14222+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
14223+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
14224+ pax_close_kernel();
14225+ load_cr3(get_cpu_pgd(cpu));
14226+#else
14227 load_cr3(next->pgd);
14228+#endif
14229
14230 /* stop flush ipis for the previous mm */
14231 cpumask_clear_cpu(cpu, mm_cpumask(prev));
14232@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
14233 */
14234 if (unlikely(prev->context.ldt != next->context.ldt))
14235 load_LDT_nolock(&next->context);
14236- }
14237+
14238+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
14239+ if (!(__supported_pte_mask & _PAGE_NX)) {
14240+ smp_mb__before_clear_bit();
14241+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
14242+ smp_mb__after_clear_bit();
14243+ cpu_set(cpu, next->context.cpu_user_cs_mask);
14244+ }
14245+#endif
14246+
14247+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
14248+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
14249+ prev->context.user_cs_limit != next->context.user_cs_limit))
14250+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
14251 #ifdef CONFIG_SMP
14252+ else if (unlikely(tlbstate != TLBSTATE_OK))
14253+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
14254+#endif
14255+#endif
14256+
14257+ }
14258 else {
14259+
14260+#ifdef CONFIG_PAX_PER_CPU_PGD
14261+ pax_open_kernel();
14262+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
14263+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
14264+ pax_close_kernel();
14265+ load_cr3(get_cpu_pgd(cpu));
14266+#endif
14267+
14268+#ifdef CONFIG_SMP
14269 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
14270 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
14271
14272@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
14273 * tlb flush IPI delivery. We must reload CR3
14274 * to make sure to use no freed page tables.
14275 */
14276+
14277+#ifndef CONFIG_PAX_PER_CPU_PGD
14278 load_cr3(next->pgd);
14279+#endif
14280+
14281 load_LDT_nolock(&next->context);
14282+
14283+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
14284+ if (!(__supported_pte_mask & _PAGE_NX))
14285+ cpu_set(cpu, next->context.cpu_user_cs_mask);
14286+#endif
14287+
14288+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
14289+#ifdef CONFIG_PAX_PAGEEXEC
14290+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
14291+#endif
14292+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
14293+#endif
14294+
14295 }
14296+#endif
14297 }
14298-#endif
14299 }
14300
14301 #define activate_mm(prev, next) \
14302diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
14303index e3b7819..b257c64 100644
14304--- a/arch/x86/include/asm/module.h
14305+++ b/arch/x86/include/asm/module.h
14306@@ -5,6 +5,7 @@
14307
14308 #ifdef CONFIG_X86_64
14309 /* X86_64 does not define MODULE_PROC_FAMILY */
14310+#define MODULE_PROC_FAMILY ""
14311 #elif defined CONFIG_M486
14312 #define MODULE_PROC_FAMILY "486 "
14313 #elif defined CONFIG_M586
14314@@ -57,8 +58,20 @@
14315 #error unknown processor family
14316 #endif
14317
14318-#ifdef CONFIG_X86_32
14319-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
14320+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
14321+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
14322+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
14323+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
14324+#else
14325+#define MODULE_PAX_KERNEXEC ""
14326 #endif
14327
14328+#ifdef CONFIG_PAX_MEMORY_UDEREF
14329+#define MODULE_PAX_UDEREF "UDEREF "
14330+#else
14331+#define MODULE_PAX_UDEREF ""
14332+#endif
14333+
14334+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
14335+
14336 #endif /* _ASM_X86_MODULE_H */
14337diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
14338index c0fa356..07a498a 100644
14339--- a/arch/x86/include/asm/nmi.h
14340+++ b/arch/x86/include/asm/nmi.h
14341@@ -42,11 +42,11 @@ struct nmiaction {
14342 nmi_handler_t handler;
14343 unsigned long flags;
14344 const char *name;
14345-};
14346+} __do_const;
14347
14348 #define register_nmi_handler(t, fn, fg, n, init...) \
14349 ({ \
14350- static struct nmiaction init fn##_na = { \
14351+ static const struct nmiaction init fn##_na = { \
14352 .handler = (fn), \
14353 .name = (n), \
14354 .flags = (fg), \
14355@@ -54,7 +54,7 @@ struct nmiaction {
14356 __register_nmi_handler((t), &fn##_na); \
14357 })
14358
14359-int __register_nmi_handler(unsigned int, struct nmiaction *);
14360+int __register_nmi_handler(unsigned int, const struct nmiaction *);
14361
14362 void unregister_nmi_handler(unsigned int, const char *);
14363
14364diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
14365index 320f7bb..e89f8f8 100644
14366--- a/arch/x86/include/asm/page_64_types.h
14367+++ b/arch/x86/include/asm/page_64_types.h
14368@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
14369
14370 /* duplicated to the one in bootmem.h */
14371 extern unsigned long max_pfn;
14372-extern unsigned long phys_base;
14373+extern const unsigned long phys_base;
14374
14375 extern unsigned long __phys_addr(unsigned long);
14376 #define __phys_reloc_hide(x) (x)
14377diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
14378index 5edd174..c395822 100644
14379--- a/arch/x86/include/asm/paravirt.h
14380+++ b/arch/x86/include/asm/paravirt.h
14381@@ -564,7 +564,7 @@ static inline pmd_t __pmd(pmdval_t val)
14382 return (pmd_t) { ret };
14383 }
14384
14385-static inline pmdval_t pmd_val(pmd_t pmd)
14386+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
14387 {
14388 pmdval_t ret;
14389
14390@@ -630,6 +630,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
14391 val);
14392 }
14393
14394+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
14395+{
14396+ pgdval_t val = native_pgd_val(pgd);
14397+
14398+ if (sizeof(pgdval_t) > sizeof(long))
14399+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
14400+ val, (u64)val >> 32);
14401+ else
14402+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
14403+ val);
14404+}
14405+
14406 static inline void pgd_clear(pgd_t *pgdp)
14407 {
14408 set_pgd(pgdp, __pgd(0));
14409@@ -711,6 +723,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
14410 pv_mmu_ops.set_fixmap(idx, phys, flags);
14411 }
14412
14413+#ifdef CONFIG_PAX_KERNEXEC
14414+static inline unsigned long pax_open_kernel(void)
14415+{
14416+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
14417+}
14418+
14419+static inline unsigned long pax_close_kernel(void)
14420+{
14421+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
14422+}
14423+#else
14424+static inline unsigned long pax_open_kernel(void) { return 0; }
14425+static inline unsigned long pax_close_kernel(void) { return 0; }
14426+#endif
14427+
14428 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
14429
14430 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
14431@@ -927,7 +954,7 @@ extern void default_banner(void);
14432
14433 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
14434 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
14435-#define PARA_INDIRECT(addr) *%cs:addr
14436+#define PARA_INDIRECT(addr) *%ss:addr
14437 #endif
14438
14439 #define INTERRUPT_RETURN \
14440@@ -1002,6 +1029,21 @@ extern void default_banner(void);
14441 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
14442 CLBR_NONE, \
14443 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
14444+
14445+#define GET_CR0_INTO_RDI \
14446+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
14447+ mov %rax,%rdi
14448+
14449+#define SET_RDI_INTO_CR0 \
14450+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
14451+
14452+#define GET_CR3_INTO_RDI \
14453+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
14454+ mov %rax,%rdi
14455+
14456+#define SET_RDI_INTO_CR3 \
14457+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
14458+
14459 #endif /* CONFIG_X86_32 */
14460
14461 #endif /* __ASSEMBLY__ */
14462diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
14463index 142236e..5446ffbc 100644
14464--- a/arch/x86/include/asm/paravirt_types.h
14465+++ b/arch/x86/include/asm/paravirt_types.h
14466@@ -84,7 +84,7 @@ struct pv_init_ops {
14467 */
14468 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
14469 unsigned long addr, unsigned len);
14470-};
14471+} __no_const;
14472
14473
14474 struct pv_lazy_ops {
14475@@ -97,7 +97,7 @@ struct pv_time_ops {
14476 unsigned long long (*sched_clock)(void);
14477 unsigned long long (*steal_clock)(int cpu);
14478 unsigned long (*get_tsc_khz)(void);
14479-};
14480+} __no_const;
14481
14482 struct pv_cpu_ops {
14483 /* hooks for various privileged instructions */
14484@@ -191,7 +191,7 @@ struct pv_cpu_ops {
14485
14486 void (*start_context_switch)(struct task_struct *prev);
14487 void (*end_context_switch)(struct task_struct *next);
14488-};
14489+} __no_const;
14490
14491 struct pv_irq_ops {
14492 /*
14493@@ -222,7 +222,7 @@ struct pv_apic_ops {
14494 unsigned long start_eip,
14495 unsigned long start_esp);
14496 #endif
14497-};
14498+} __no_const;
14499
14500 struct pv_mmu_ops {
14501 unsigned long (*read_cr2)(void);
14502@@ -312,6 +312,7 @@ struct pv_mmu_ops {
14503 struct paravirt_callee_save make_pud;
14504
14505 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
14506+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
14507 #endif /* PAGETABLE_LEVELS == 4 */
14508 #endif /* PAGETABLE_LEVELS >= 3 */
14509
14510@@ -323,6 +324,12 @@ struct pv_mmu_ops {
14511 an mfn. We can tell which is which from the index. */
14512 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
14513 phys_addr_t phys, pgprot_t flags);
14514+
14515+#ifdef CONFIG_PAX_KERNEXEC
14516+ unsigned long (*pax_open_kernel)(void);
14517+ unsigned long (*pax_close_kernel)(void);
14518+#endif
14519+
14520 };
14521
14522 struct arch_spinlock;
14523@@ -333,7 +340,7 @@ struct pv_lock_ops {
14524 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
14525 int (*spin_trylock)(struct arch_spinlock *lock);
14526 void (*spin_unlock)(struct arch_spinlock *lock);
14527-};
14528+} __no_const;
14529
14530 /* This contains all the paravirt structures: we get a convenient
14531 * number for each function using the offset which we use to indicate
14532diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
14533index b4389a4..7024269 100644
14534--- a/arch/x86/include/asm/pgalloc.h
14535+++ b/arch/x86/include/asm/pgalloc.h
14536@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
14537 pmd_t *pmd, pte_t *pte)
14538 {
14539 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
14540+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
14541+}
14542+
14543+static inline void pmd_populate_user(struct mm_struct *mm,
14544+ pmd_t *pmd, pte_t *pte)
14545+{
14546+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
14547 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
14548 }
14549
14550@@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
14551
14552 #ifdef CONFIG_X86_PAE
14553 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
14554+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
14555+{
14556+ pud_populate(mm, pudp, pmd);
14557+}
14558 #else /* !CONFIG_X86_PAE */
14559 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
14560 {
14561 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
14562 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
14563 }
14564+
14565+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
14566+{
14567+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
14568+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
14569+}
14570 #endif /* CONFIG_X86_PAE */
14571
14572 #if PAGETABLE_LEVELS > 3
14573@@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
14574 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
14575 }
14576
14577+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
14578+{
14579+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
14580+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
14581+}
14582+
14583 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
14584 {
14585 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
14586diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
14587index f2b489c..4f7e2e5 100644
14588--- a/arch/x86/include/asm/pgtable-2level.h
14589+++ b/arch/x86/include/asm/pgtable-2level.h
14590@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
14591
14592 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
14593 {
14594+ pax_open_kernel();
14595 *pmdp = pmd;
14596+ pax_close_kernel();
14597 }
14598
14599 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
14600diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
14601index 4cc9f2b..5fd9226 100644
14602--- a/arch/x86/include/asm/pgtable-3level.h
14603+++ b/arch/x86/include/asm/pgtable-3level.h
14604@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
14605
14606 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
14607 {
14608+ pax_open_kernel();
14609 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
14610+ pax_close_kernel();
14611 }
14612
14613 static inline void native_set_pud(pud_t *pudp, pud_t pud)
14614 {
14615+ pax_open_kernel();
14616 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
14617+ pax_close_kernel();
14618 }
14619
14620 /*
14621diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
14622index 1c1a955..50f828c 100644
14623--- a/arch/x86/include/asm/pgtable.h
14624+++ b/arch/x86/include/asm/pgtable.h
14625@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
14626
14627 #ifndef __PAGETABLE_PUD_FOLDED
14628 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
14629+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
14630 #define pgd_clear(pgd) native_pgd_clear(pgd)
14631 #endif
14632
14633@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
14634
14635 #define arch_end_context_switch(prev) do {} while(0)
14636
14637+#define pax_open_kernel() native_pax_open_kernel()
14638+#define pax_close_kernel() native_pax_close_kernel()
14639 #endif /* CONFIG_PARAVIRT */
14640
14641+#define __HAVE_ARCH_PAX_OPEN_KERNEL
14642+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
14643+
14644+#ifdef CONFIG_PAX_KERNEXEC
14645+static inline unsigned long native_pax_open_kernel(void)
14646+{
14647+ unsigned long cr0;
14648+
14649+ preempt_disable();
14650+ barrier();
14651+ cr0 = read_cr0() ^ X86_CR0_WP;
14652+ BUG_ON(cr0 & X86_CR0_WP);
14653+ write_cr0(cr0);
14654+ return cr0 ^ X86_CR0_WP;
14655+}
14656+
14657+static inline unsigned long native_pax_close_kernel(void)
14658+{
14659+ unsigned long cr0;
14660+
14661+ cr0 = read_cr0() ^ X86_CR0_WP;
14662+ BUG_ON(!(cr0 & X86_CR0_WP));
14663+ write_cr0(cr0);
14664+ barrier();
14665+ preempt_enable_no_resched();
14666+ return cr0 ^ X86_CR0_WP;
14667+}
14668+#else
14669+static inline unsigned long native_pax_open_kernel(void) { return 0; }
14670+static inline unsigned long native_pax_close_kernel(void) { return 0; }
14671+#endif
14672+
14673 /*
14674 * The following only work if pte_present() is true.
14675 * Undefined behaviour if not..
14676 */
14677+static inline int pte_user(pte_t pte)
14678+{
14679+ return pte_val(pte) & _PAGE_USER;
14680+}
14681+
14682 static inline int pte_dirty(pte_t pte)
14683 {
14684 return pte_flags(pte) & _PAGE_DIRTY;
14685@@ -200,9 +240,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
14686 return pte_clear_flags(pte, _PAGE_RW);
14687 }
14688
14689+static inline pte_t pte_mkread(pte_t pte)
14690+{
14691+ return __pte(pte_val(pte) | _PAGE_USER);
14692+}
14693+
14694 static inline pte_t pte_mkexec(pte_t pte)
14695 {
14696- return pte_clear_flags(pte, _PAGE_NX);
14697+#ifdef CONFIG_X86_PAE
14698+ if (__supported_pte_mask & _PAGE_NX)
14699+ return pte_clear_flags(pte, _PAGE_NX);
14700+ else
14701+#endif
14702+ return pte_set_flags(pte, _PAGE_USER);
14703+}
14704+
14705+static inline pte_t pte_exprotect(pte_t pte)
14706+{
14707+#ifdef CONFIG_X86_PAE
14708+ if (__supported_pte_mask & _PAGE_NX)
14709+ return pte_set_flags(pte, _PAGE_NX);
14710+ else
14711+#endif
14712+ return pte_clear_flags(pte, _PAGE_USER);
14713 }
14714
14715 static inline pte_t pte_mkdirty(pte_t pte)
14716@@ -394,6 +454,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
14717 #endif
14718
14719 #ifndef __ASSEMBLY__
14720+
14721+#ifdef CONFIG_PAX_PER_CPU_PGD
14722+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
14723+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
14724+{
14725+ return cpu_pgd[cpu];
14726+}
14727+#endif
14728+
14729 #include <linux/mm_types.h>
14730
14731 static inline int pte_none(pte_t pte)
14732@@ -583,7 +652,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
14733
14734 static inline int pgd_bad(pgd_t pgd)
14735 {
14736- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
14737+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
14738 }
14739
14740 static inline int pgd_none(pgd_t pgd)
14741@@ -606,7 +675,12 @@ static inline int pgd_none(pgd_t pgd)
14742 * pgd_offset() returns a (pgd_t *)
14743 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
14744 */
14745-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
14746+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
14747+
14748+#ifdef CONFIG_PAX_PER_CPU_PGD
14749+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
14750+#endif
14751+
14752 /*
14753 * a shortcut which implies the use of the kernel's pgd, instead
14754 * of a process's
14755@@ -617,6 +691,20 @@ static inline int pgd_none(pgd_t pgd)
14756 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
14757 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
14758
14759+#ifdef CONFIG_X86_32
14760+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
14761+#else
14762+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
14763+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
14764+
14765+#ifdef CONFIG_PAX_MEMORY_UDEREF
14766+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
14767+#else
14768+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
14769+#endif
14770+
14771+#endif
14772+
14773 #ifndef __ASSEMBLY__
14774
14775 extern int direct_gbpages;
14776@@ -781,11 +869,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
14777 * dst and src can be on the same page, but the range must not overlap,
14778 * and must not cross a page boundary.
14779 */
14780-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
14781+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
14782 {
14783- memcpy(dst, src, count * sizeof(pgd_t));
14784+ pax_open_kernel();
14785+ while (count--)
14786+ *dst++ = *src++;
14787+ pax_close_kernel();
14788 }
14789
14790+#ifdef CONFIG_PAX_PER_CPU_PGD
14791+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
14792+#endif
14793+
14794+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
14795+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
14796+#else
14797+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
14798+#endif
14799
14800 #include <asm-generic/pgtable.h>
14801 #endif /* __ASSEMBLY__ */
14802diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
14803index 8faa215..a8a17ea 100644
14804--- a/arch/x86/include/asm/pgtable_32.h
14805+++ b/arch/x86/include/asm/pgtable_32.h
14806@@ -25,9 +25,6 @@
14807 struct mm_struct;
14808 struct vm_area_struct;
14809
14810-extern pgd_t swapper_pg_dir[1024];
14811-extern pgd_t initial_page_table[1024];
14812-
14813 static inline void pgtable_cache_init(void) { }
14814 static inline void check_pgt_cache(void) { }
14815 void paging_init(void);
14816@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
14817 # include <asm/pgtable-2level.h>
14818 #endif
14819
14820+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
14821+extern pgd_t initial_page_table[PTRS_PER_PGD];
14822+#ifdef CONFIG_X86_PAE
14823+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
14824+#endif
14825+
14826 #if defined(CONFIG_HIGHPTE)
14827 #define pte_offset_map(dir, address) \
14828 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
14829@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
14830 /* Clear a kernel PTE and flush it from the TLB */
14831 #define kpte_clear_flush(ptep, vaddr) \
14832 do { \
14833+ pax_open_kernel(); \
14834 pte_clear(&init_mm, (vaddr), (ptep)); \
14835+ pax_close_kernel(); \
14836 __flush_tlb_one((vaddr)); \
14837 } while (0)
14838
14839@@ -75,6 +80,9 @@ do { \
14840
14841 #endif /* !__ASSEMBLY__ */
14842
14843+#define HAVE_ARCH_UNMAPPED_AREA
14844+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
14845+
14846 /*
14847 * kern_addr_valid() is (1) for FLATMEM and (0) for
14848 * SPARSEMEM and DISCONTIGMEM
14849diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
14850index ed5903b..c7fe163 100644
14851--- a/arch/x86/include/asm/pgtable_32_types.h
14852+++ b/arch/x86/include/asm/pgtable_32_types.h
14853@@ -8,7 +8,7 @@
14854 */
14855 #ifdef CONFIG_X86_PAE
14856 # include <asm/pgtable-3level_types.h>
14857-# define PMD_SIZE (1UL << PMD_SHIFT)
14858+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
14859 # define PMD_MASK (~(PMD_SIZE - 1))
14860 #else
14861 # include <asm/pgtable-2level_types.h>
14862@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
14863 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
14864 #endif
14865
14866+#ifdef CONFIG_PAX_KERNEXEC
14867+#ifndef __ASSEMBLY__
14868+extern unsigned char MODULES_EXEC_VADDR[];
14869+extern unsigned char MODULES_EXEC_END[];
14870+#endif
14871+#include <asm/boot.h>
14872+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
14873+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
14874+#else
14875+#define ktla_ktva(addr) (addr)
14876+#define ktva_ktla(addr) (addr)
14877+#endif
14878+
14879 #define MODULES_VADDR VMALLOC_START
14880 #define MODULES_END VMALLOC_END
14881 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
14882diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
14883index 47356f9..deb94a2 100644
14884--- a/arch/x86/include/asm/pgtable_64.h
14885+++ b/arch/x86/include/asm/pgtable_64.h
14886@@ -16,10 +16,14 @@
14887
14888 extern pud_t level3_kernel_pgt[512];
14889 extern pud_t level3_ident_pgt[512];
14890+extern pud_t level3_vmalloc_start_pgt[512];
14891+extern pud_t level3_vmalloc_end_pgt[512];
14892+extern pud_t level3_vmemmap_pgt[512];
14893+extern pud_t level2_vmemmap_pgt[512];
14894 extern pmd_t level2_kernel_pgt[512];
14895 extern pmd_t level2_fixmap_pgt[512];
14896-extern pmd_t level2_ident_pgt[512];
14897-extern pgd_t init_level4_pgt[];
14898+extern pmd_t level2_ident_pgt[512*2];
14899+extern pgd_t init_level4_pgt[512];
14900
14901 #define swapper_pg_dir init_level4_pgt
14902
14903@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
14904
14905 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
14906 {
14907+ pax_open_kernel();
14908 *pmdp = pmd;
14909+ pax_close_kernel();
14910 }
14911
14912 static inline void native_pmd_clear(pmd_t *pmd)
14913@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
14914
14915 static inline void native_set_pud(pud_t *pudp, pud_t pud)
14916 {
14917+ pax_open_kernel();
14918 *pudp = pud;
14919+ pax_close_kernel();
14920 }
14921
14922 static inline void native_pud_clear(pud_t *pud)
14923@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
14924
14925 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
14926 {
14927+ pax_open_kernel();
14928+ *pgdp = pgd;
14929+ pax_close_kernel();
14930+}
14931+
14932+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
14933+{
14934 *pgdp = pgd;
14935 }
14936
14937diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
14938index 766ea16..5b96cb3 100644
14939--- a/arch/x86/include/asm/pgtable_64_types.h
14940+++ b/arch/x86/include/asm/pgtable_64_types.h
14941@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
14942 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
14943 #define MODULES_END _AC(0xffffffffff000000, UL)
14944 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
14945+#define MODULES_EXEC_VADDR MODULES_VADDR
14946+#define MODULES_EXEC_END MODULES_END
14947+
14948+#define ktla_ktva(addr) (addr)
14949+#define ktva_ktla(addr) (addr)
14950
14951 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
14952diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
14953index 3c32db8..1ddccf5 100644
14954--- a/arch/x86/include/asm/pgtable_types.h
14955+++ b/arch/x86/include/asm/pgtable_types.h
14956@@ -16,13 +16,12 @@
14957 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
14958 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
14959 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
14960-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
14961+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
14962 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
14963 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
14964 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
14965-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
14966-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
14967-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
14968+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
14969+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
14970 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
14971
14972 /* If _PAGE_BIT_PRESENT is clear, we use these: */
14973@@ -40,7 +39,6 @@
14974 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
14975 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
14976 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
14977-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
14978 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
14979 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
14980 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
14981@@ -57,8 +55,10 @@
14982
14983 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
14984 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
14985-#else
14986+#elif defined(CONFIG_KMEMCHECK)
14987 #define _PAGE_NX (_AT(pteval_t, 0))
14988+#else
14989+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
14990 #endif
14991
14992 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
14993@@ -116,6 +116,9 @@
14994 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
14995 _PAGE_ACCESSED)
14996
14997+#define PAGE_READONLY_NOEXEC PAGE_READONLY
14998+#define PAGE_SHARED_NOEXEC PAGE_SHARED
14999+
15000 #define __PAGE_KERNEL_EXEC \
15001 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
15002 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
15003@@ -126,7 +129,7 @@
15004 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
15005 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
15006 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
15007-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
15008+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
15009 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
15010 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
15011 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
15012@@ -188,8 +191,8 @@
15013 * bits are combined, this will alow user to access the high address mapped
15014 * VDSO in the presence of CONFIG_COMPAT_VDSO
15015 */
15016-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
15017-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
15018+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
15019+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
15020 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
15021 #endif
15022
15023@@ -227,7 +230,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
15024 {
15025 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
15026 }
15027+#endif
15028
15029+#if PAGETABLE_LEVELS == 3
15030+#include <asm-generic/pgtable-nopud.h>
15031+#endif
15032+
15033+#if PAGETABLE_LEVELS == 2
15034+#include <asm-generic/pgtable-nopmd.h>
15035+#endif
15036+
15037+#ifndef __ASSEMBLY__
15038 #if PAGETABLE_LEVELS > 3
15039 typedef struct { pudval_t pud; } pud_t;
15040
15041@@ -241,8 +254,6 @@ static inline pudval_t native_pud_val(pud_t pud)
15042 return pud.pud;
15043 }
15044 #else
15045-#include <asm-generic/pgtable-nopud.h>
15046-
15047 static inline pudval_t native_pud_val(pud_t pud)
15048 {
15049 return native_pgd_val(pud.pgd);
15050@@ -262,8 +273,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
15051 return pmd.pmd;
15052 }
15053 #else
15054-#include <asm-generic/pgtable-nopmd.h>
15055-
15056 static inline pmdval_t native_pmd_val(pmd_t pmd)
15057 {
15058 return native_pgd_val(pmd.pud.pgd);
15059@@ -303,7 +312,6 @@ typedef struct page *pgtable_t;
15060
15061 extern pteval_t __supported_pte_mask;
15062 extern void set_nx(void);
15063-extern int nx_enabled;
15064
15065 #define pgprot_writecombine pgprot_writecombine
15066 extern pgprot_t pgprot_writecombine(pgprot_t prot);
15067diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
15068index 888184b..a07ac89 100644
15069--- a/arch/x86/include/asm/processor.h
15070+++ b/arch/x86/include/asm/processor.h
15071@@ -287,7 +287,7 @@ struct tss_struct {
15072
15073 } ____cacheline_aligned;
15074
15075-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
15076+extern struct tss_struct init_tss[NR_CPUS];
15077
15078 /*
15079 * Save the original ist values for checking stack pointers during debugging
15080@@ -827,11 +827,18 @@ static inline void spin_lock_prefetch(const void *x)
15081 */
15082 #define TASK_SIZE PAGE_OFFSET
15083 #define TASK_SIZE_MAX TASK_SIZE
15084+
15085+#ifdef CONFIG_PAX_SEGMEXEC
15086+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
15087+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
15088+#else
15089 #define STACK_TOP TASK_SIZE
15090-#define STACK_TOP_MAX STACK_TOP
15091+#endif
15092+
15093+#define STACK_TOP_MAX TASK_SIZE
15094
15095 #define INIT_THREAD { \
15096- .sp0 = sizeof(init_stack) + (long)&init_stack, \
15097+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
15098 .vm86_info = NULL, \
15099 .sysenter_cs = __KERNEL_CS, \
15100 .io_bitmap_ptr = NULL, \
15101@@ -845,7 +852,7 @@ static inline void spin_lock_prefetch(const void *x)
15102 */
15103 #define INIT_TSS { \
15104 .x86_tss = { \
15105- .sp0 = sizeof(init_stack) + (long)&init_stack, \
15106+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
15107 .ss0 = __KERNEL_DS, \
15108 .ss1 = __KERNEL_CS, \
15109 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
15110@@ -856,11 +863,7 @@ static inline void spin_lock_prefetch(const void *x)
15111 extern unsigned long thread_saved_pc(struct task_struct *tsk);
15112
15113 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
15114-#define KSTK_TOP(info) \
15115-({ \
15116- unsigned long *__ptr = (unsigned long *)(info); \
15117- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
15118-})
15119+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
15120
15121 /*
15122 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
15123@@ -875,7 +878,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
15124 #define task_pt_regs(task) \
15125 ({ \
15126 struct pt_regs *__regs__; \
15127- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
15128+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
15129 __regs__ - 1; \
15130 })
15131
15132@@ -885,13 +888,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
15133 /*
15134 * User space process size. 47bits minus one guard page.
15135 */
15136-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
15137+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
15138
15139 /* This decides where the kernel will search for a free chunk of vm
15140 * space during mmap's.
15141 */
15142 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
15143- 0xc0000000 : 0xFFFFe000)
15144+ 0xc0000000 : 0xFFFFf000)
15145
15146 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
15147 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
15148@@ -902,11 +905,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
15149 #define STACK_TOP_MAX TASK_SIZE_MAX
15150
15151 #define INIT_THREAD { \
15152- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
15153+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
15154 }
15155
15156 #define INIT_TSS { \
15157- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
15158+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
15159 }
15160
15161 /*
15162@@ -934,6 +937,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
15163 */
15164 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
15165
15166+#ifdef CONFIG_PAX_SEGMEXEC
15167+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
15168+#endif
15169+
15170 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
15171
15172 /* Get/set a process' ability to use the timestamp counter instruction */
15173@@ -994,12 +1001,12 @@ extern bool cpu_has_amd_erratum(const int *);
15174 #define cpu_has_amd_erratum(x) (false)
15175 #endif /* CONFIG_CPU_SUP_AMD */
15176
15177-extern unsigned long arch_align_stack(unsigned long sp);
15178+#define arch_align_stack(x) ((x) & ~0xfUL)
15179 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
15180
15181 void default_idle(void);
15182 bool set_pm_idle_to_default(void);
15183
15184-void stop_this_cpu(void *dummy);
15185+void stop_this_cpu(void *dummy) __noreturn;
15186
15187 #endif /* _ASM_X86_PROCESSOR_H */
15188diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
15189index 942a086..6c26446 100644
15190--- a/arch/x86/include/asm/ptrace.h
15191+++ b/arch/x86/include/asm/ptrace.h
15192@@ -85,28 +85,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
15193 }
15194
15195 /*
15196- * user_mode_vm(regs) determines whether a register set came from user mode.
15197+ * user_mode(regs) determines whether a register set came from user mode.
15198 * This is true if V8086 mode was enabled OR if the register set was from
15199 * protected mode with RPL-3 CS value. This tricky test checks that with
15200 * one comparison. Many places in the kernel can bypass this full check
15201- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
15202+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
15203+ * be used.
15204 */
15205-static inline int user_mode(struct pt_regs *regs)
15206+static inline int user_mode_novm(struct pt_regs *regs)
15207 {
15208 #ifdef CONFIG_X86_32
15209 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
15210 #else
15211- return !!(regs->cs & 3);
15212+ return !!(regs->cs & SEGMENT_RPL_MASK);
15213 #endif
15214 }
15215
15216-static inline int user_mode_vm(struct pt_regs *regs)
15217+static inline int user_mode(struct pt_regs *regs)
15218 {
15219 #ifdef CONFIG_X86_32
15220 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
15221 USER_RPL;
15222 #else
15223- return user_mode(regs);
15224+ return user_mode_novm(regs);
15225 #endif
15226 }
15227
15228@@ -122,15 +123,16 @@ static inline int v8086_mode(struct pt_regs *regs)
15229 #ifdef CONFIG_X86_64
15230 static inline bool user_64bit_mode(struct pt_regs *regs)
15231 {
15232+ unsigned long cs = regs->cs & 0xffff;
15233 #ifndef CONFIG_PARAVIRT
15234 /*
15235 * On non-paravirt systems, this is the only long mode CPL 3
15236 * selector. We do not allow long mode selectors in the LDT.
15237 */
15238- return regs->cs == __USER_CS;
15239+ return cs == __USER_CS;
15240 #else
15241 /* Headers are too twisted for this to go in paravirt.h. */
15242- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
15243+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
15244 #endif
15245 }
15246
15247@@ -181,9 +183,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
15248 * Traps from the kernel do not save sp and ss.
15249 * Use the helper function to retrieve sp.
15250 */
15251- if (offset == offsetof(struct pt_regs, sp) &&
15252- regs->cs == __KERNEL_CS)
15253- return kernel_stack_pointer(regs);
15254+ if (offset == offsetof(struct pt_regs, sp)) {
15255+ unsigned long cs = regs->cs & 0xffff;
15256+ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
15257+ return kernel_stack_pointer(regs);
15258+ }
15259 #endif
15260 return *(unsigned long *)((unsigned long)regs + offset);
15261 }
15262diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
15263index fe1ec5b..dc5c3fe 100644
15264--- a/arch/x86/include/asm/realmode.h
15265+++ b/arch/x86/include/asm/realmode.h
15266@@ -22,16 +22,14 @@ struct real_mode_header {
15267 #endif
15268 /* APM/BIOS reboot */
15269 u32 machine_real_restart_asm;
15270-#ifdef CONFIG_X86_64
15271 u32 machine_real_restart_seg;
15272-#endif
15273 };
15274
15275 /* This must match data at trampoline_32/64.S */
15276 struct trampoline_header {
15277 #ifdef CONFIG_X86_32
15278 u32 start;
15279- u16 gdt_pad;
15280+ u16 boot_cs;
15281 u16 gdt_limit;
15282 u32 gdt_base;
15283 #else
15284diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
15285index a82c4f1..ac45053 100644
15286--- a/arch/x86/include/asm/reboot.h
15287+++ b/arch/x86/include/asm/reboot.h
15288@@ -6,13 +6,13 @@
15289 struct pt_regs;
15290
15291 struct machine_ops {
15292- void (*restart)(char *cmd);
15293- void (*halt)(void);
15294- void (*power_off)(void);
15295+ void (* __noreturn restart)(char *cmd);
15296+ void (* __noreturn halt)(void);
15297+ void (* __noreturn power_off)(void);
15298 void (*shutdown)(void);
15299 void (*crash_shutdown)(struct pt_regs *);
15300- void (*emergency_restart)(void);
15301-};
15302+ void (* __noreturn emergency_restart)(void);
15303+} __no_const;
15304
15305 extern struct machine_ops machine_ops;
15306
15307diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
15308index 2dbe4a7..ce1db00 100644
15309--- a/arch/x86/include/asm/rwsem.h
15310+++ b/arch/x86/include/asm/rwsem.h
15311@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
15312 {
15313 asm volatile("# beginning down_read\n\t"
15314 LOCK_PREFIX _ASM_INC "(%1)\n\t"
15315+
15316+#ifdef CONFIG_PAX_REFCOUNT
15317+ "jno 0f\n"
15318+ LOCK_PREFIX _ASM_DEC "(%1)\n"
15319+ "int $4\n0:\n"
15320+ _ASM_EXTABLE(0b, 0b)
15321+#endif
15322+
15323 /* adds 0x00000001 */
15324 " jns 1f\n"
15325 " call call_rwsem_down_read_failed\n"
15326@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
15327 "1:\n\t"
15328 " mov %1,%2\n\t"
15329 " add %3,%2\n\t"
15330+
15331+#ifdef CONFIG_PAX_REFCOUNT
15332+ "jno 0f\n"
15333+ "sub %3,%2\n"
15334+ "int $4\n0:\n"
15335+ _ASM_EXTABLE(0b, 0b)
15336+#endif
15337+
15338 " jle 2f\n\t"
15339 LOCK_PREFIX " cmpxchg %2,%0\n\t"
15340 " jnz 1b\n\t"
15341@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
15342 long tmp;
15343 asm volatile("# beginning down_write\n\t"
15344 LOCK_PREFIX " xadd %1,(%2)\n\t"
15345+
15346+#ifdef CONFIG_PAX_REFCOUNT
15347+ "jno 0f\n"
15348+ "mov %1,(%2)\n"
15349+ "int $4\n0:\n"
15350+ _ASM_EXTABLE(0b, 0b)
15351+#endif
15352+
15353 /* adds 0xffff0001, returns the old value */
15354 " test %1,%1\n\t"
15355 /* was the count 0 before? */
15356@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
15357 long tmp;
15358 asm volatile("# beginning __up_read\n\t"
15359 LOCK_PREFIX " xadd %1,(%2)\n\t"
15360+
15361+#ifdef CONFIG_PAX_REFCOUNT
15362+ "jno 0f\n"
15363+ "mov %1,(%2)\n"
15364+ "int $4\n0:\n"
15365+ _ASM_EXTABLE(0b, 0b)
15366+#endif
15367+
15368 /* subtracts 1, returns the old value */
15369 " jns 1f\n\t"
15370 " call call_rwsem_wake\n" /* expects old value in %edx */
15371@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
15372 long tmp;
15373 asm volatile("# beginning __up_write\n\t"
15374 LOCK_PREFIX " xadd %1,(%2)\n\t"
15375+
15376+#ifdef CONFIG_PAX_REFCOUNT
15377+ "jno 0f\n"
15378+ "mov %1,(%2)\n"
15379+ "int $4\n0:\n"
15380+ _ASM_EXTABLE(0b, 0b)
15381+#endif
15382+
15383 /* subtracts 0xffff0001, returns the old value */
15384 " jns 1f\n\t"
15385 " call call_rwsem_wake\n" /* expects old value in %edx */
15386@@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
15387 {
15388 asm volatile("# beginning __downgrade_write\n\t"
15389 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
15390+
15391+#ifdef CONFIG_PAX_REFCOUNT
15392+ "jno 0f\n"
15393+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
15394+ "int $4\n0:\n"
15395+ _ASM_EXTABLE(0b, 0b)
15396+#endif
15397+
15398 /*
15399 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
15400 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
15401@@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
15402 */
15403 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
15404 {
15405- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
15406+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
15407+
15408+#ifdef CONFIG_PAX_REFCOUNT
15409+ "jno 0f\n"
15410+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
15411+ "int $4\n0:\n"
15412+ _ASM_EXTABLE(0b, 0b)
15413+#endif
15414+
15415 : "+m" (sem->count)
15416 : "er" (delta));
15417 }
15418@@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
15419 */
15420 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
15421 {
15422- return delta + xadd(&sem->count, delta);
15423+ return delta + xadd_check_overflow(&sem->count, delta);
15424 }
15425
15426 #endif /* __KERNEL__ */
15427diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
15428index c48a950..c6d7468 100644
15429--- a/arch/x86/include/asm/segment.h
15430+++ b/arch/x86/include/asm/segment.h
15431@@ -64,10 +64,15 @@
15432 * 26 - ESPFIX small SS
15433 * 27 - per-cpu [ offset to per-cpu data area ]
15434 * 28 - stack_canary-20 [ for stack protector ]
15435- * 29 - unused
15436- * 30 - unused
15437+ * 29 - PCI BIOS CS
15438+ * 30 - PCI BIOS DS
15439 * 31 - TSS for double fault handler
15440 */
15441+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
15442+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
15443+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
15444+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
15445+
15446 #define GDT_ENTRY_TLS_MIN 6
15447 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
15448
15449@@ -79,6 +84,8 @@
15450
15451 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
15452
15453+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
15454+
15455 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
15456
15457 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
15458@@ -104,6 +111,12 @@
15459 #define __KERNEL_STACK_CANARY 0
15460 #endif
15461
15462+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
15463+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
15464+
15465+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
15466+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
15467+
15468 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
15469
15470 /*
15471@@ -141,7 +154,7 @@
15472 */
15473
15474 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
15475-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
15476+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
15477
15478
15479 #else
15480@@ -165,6 +178,8 @@
15481 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
15482 #define __USER32_DS __USER_DS
15483
15484+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
15485+
15486 #define GDT_ENTRY_TSS 8 /* needs two entries */
15487 #define GDT_ENTRY_LDT 10 /* needs two entries */
15488 #define GDT_ENTRY_TLS_MIN 12
15489@@ -185,6 +200,7 @@
15490 #endif
15491
15492 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
15493+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
15494 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
15495 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
15496 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
15497@@ -265,7 +281,7 @@ static inline unsigned long get_limit(unsigned long segment)
15498 {
15499 unsigned long __limit;
15500 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
15501- return __limit + 1;
15502+ return __limit;
15503 }
15504
15505 #endif /* !__ASSEMBLY__ */
15506diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
15507index b073aae..39f9bdd 100644
15508--- a/arch/x86/include/asm/smp.h
15509+++ b/arch/x86/include/asm/smp.h
15510@@ -36,7 +36,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
15511 /* cpus sharing the last level cache: */
15512 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
15513 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
15514-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
15515+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
15516
15517 static inline struct cpumask *cpu_sibling_mask(int cpu)
15518 {
15519@@ -79,7 +79,7 @@ struct smp_ops {
15520
15521 void (*send_call_func_ipi)(const struct cpumask *mask);
15522 void (*send_call_func_single_ipi)(int cpu);
15523-};
15524+} __no_const;
15525
15526 /* Globals due to paravirt */
15527 extern void set_cpu_sibling_map(int cpu);
15528@@ -191,14 +191,8 @@ extern unsigned disabled_cpus __cpuinitdata;
15529 extern int safe_smp_processor_id(void);
15530
15531 #elif defined(CONFIG_X86_64_SMP)
15532-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
15533-
15534-#define stack_smp_processor_id() \
15535-({ \
15536- struct thread_info *ti; \
15537- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
15538- ti->cpu; \
15539-})
15540+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
15541+#define stack_smp_processor_id() raw_smp_processor_id()
15542 #define safe_smp_processor_id() smp_processor_id()
15543
15544 #endif
15545diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
15546index 33692ea..350a534 100644
15547--- a/arch/x86/include/asm/spinlock.h
15548+++ b/arch/x86/include/asm/spinlock.h
15549@@ -172,6 +172,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
15550 static inline void arch_read_lock(arch_rwlock_t *rw)
15551 {
15552 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
15553+
15554+#ifdef CONFIG_PAX_REFCOUNT
15555+ "jno 0f\n"
15556+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
15557+ "int $4\n0:\n"
15558+ _ASM_EXTABLE(0b, 0b)
15559+#endif
15560+
15561 "jns 1f\n"
15562 "call __read_lock_failed\n\t"
15563 "1:\n"
15564@@ -181,6 +189,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
15565 static inline void arch_write_lock(arch_rwlock_t *rw)
15566 {
15567 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
15568+
15569+#ifdef CONFIG_PAX_REFCOUNT
15570+ "jno 0f\n"
15571+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
15572+ "int $4\n0:\n"
15573+ _ASM_EXTABLE(0b, 0b)
15574+#endif
15575+
15576 "jz 1f\n"
15577 "call __write_lock_failed\n\t"
15578 "1:\n"
15579@@ -210,13 +226,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
15580
15581 static inline void arch_read_unlock(arch_rwlock_t *rw)
15582 {
15583- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
15584+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
15585+
15586+#ifdef CONFIG_PAX_REFCOUNT
15587+ "jno 0f\n"
15588+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
15589+ "int $4\n0:\n"
15590+ _ASM_EXTABLE(0b, 0b)
15591+#endif
15592+
15593 :"+m" (rw->lock) : : "memory");
15594 }
15595
15596 static inline void arch_write_unlock(arch_rwlock_t *rw)
15597 {
15598- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
15599+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
15600+
15601+#ifdef CONFIG_PAX_REFCOUNT
15602+ "jno 0f\n"
15603+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
15604+ "int $4\n0:\n"
15605+ _ASM_EXTABLE(0b, 0b)
15606+#endif
15607+
15608 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
15609 }
15610
15611diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
15612index 6a99859..03cb807 100644
15613--- a/arch/x86/include/asm/stackprotector.h
15614+++ b/arch/x86/include/asm/stackprotector.h
15615@@ -47,7 +47,7 @@
15616 * head_32 for boot CPU and setup_per_cpu_areas() for others.
15617 */
15618 #define GDT_STACK_CANARY_INIT \
15619- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
15620+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
15621
15622 /*
15623 * Initialize the stackprotector canary value.
15624@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
15625
15626 static inline void load_stack_canary_segment(void)
15627 {
15628-#ifdef CONFIG_X86_32
15629+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
15630 asm volatile ("mov %0, %%gs" : : "r" (0));
15631 #endif
15632 }
15633diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
15634index 70bbe39..4ae2bd4 100644
15635--- a/arch/x86/include/asm/stacktrace.h
15636+++ b/arch/x86/include/asm/stacktrace.h
15637@@ -11,28 +11,20 @@
15638
15639 extern int kstack_depth_to_print;
15640
15641-struct thread_info;
15642+struct task_struct;
15643 struct stacktrace_ops;
15644
15645-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
15646- unsigned long *stack,
15647- unsigned long bp,
15648- const struct stacktrace_ops *ops,
15649- void *data,
15650- unsigned long *end,
15651- int *graph);
15652+typedef unsigned long walk_stack_t(struct task_struct *task,
15653+ void *stack_start,
15654+ unsigned long *stack,
15655+ unsigned long bp,
15656+ const struct stacktrace_ops *ops,
15657+ void *data,
15658+ unsigned long *end,
15659+ int *graph);
15660
15661-extern unsigned long
15662-print_context_stack(struct thread_info *tinfo,
15663- unsigned long *stack, unsigned long bp,
15664- const struct stacktrace_ops *ops, void *data,
15665- unsigned long *end, int *graph);
15666-
15667-extern unsigned long
15668-print_context_stack_bp(struct thread_info *tinfo,
15669- unsigned long *stack, unsigned long bp,
15670- const struct stacktrace_ops *ops, void *data,
15671- unsigned long *end, int *graph);
15672+extern walk_stack_t print_context_stack;
15673+extern walk_stack_t print_context_stack_bp;
15674
15675 /* Generic stack tracer with callbacks */
15676
15677@@ -40,7 +32,7 @@ struct stacktrace_ops {
15678 void (*address)(void *data, unsigned long address, int reliable);
15679 /* On negative return stop dumping */
15680 int (*stack)(void *data, char *name);
15681- walk_stack_t walk_stack;
15682+ walk_stack_t *walk_stack;
15683 };
15684
15685 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
15686diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
15687index 4ec45b3..a4f0a8a 100644
15688--- a/arch/x86/include/asm/switch_to.h
15689+++ b/arch/x86/include/asm/switch_to.h
15690@@ -108,7 +108,7 @@ do { \
15691 "call __switch_to\n\t" \
15692 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
15693 __switch_canary \
15694- "movq %P[thread_info](%%rsi),%%r8\n\t" \
15695+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
15696 "movq %%rax,%%rdi\n\t" \
15697 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
15698 "jnz ret_from_fork\n\t" \
15699@@ -119,7 +119,7 @@ do { \
15700 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
15701 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
15702 [_tif_fork] "i" (_TIF_FORK), \
15703- [thread_info] "i" (offsetof(struct task_struct, stack)), \
15704+ [thread_info] "m" (current_tinfo), \
15705 [current_task] "m" (current_task) \
15706 __switch_canary_iparam \
15707 : "memory", "cc" __EXTRA_CLOBBER)
15708diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
15709index 2d946e6..e453ec4 100644
15710--- a/arch/x86/include/asm/thread_info.h
15711+++ b/arch/x86/include/asm/thread_info.h
15712@@ -10,6 +10,7 @@
15713 #include <linux/compiler.h>
15714 #include <asm/page.h>
15715 #include <asm/types.h>
15716+#include <asm/percpu.h>
15717
15718 /*
15719 * low level task data that entry.S needs immediate access to
15720@@ -24,7 +25,6 @@ struct exec_domain;
15721 #include <linux/atomic.h>
15722
15723 struct thread_info {
15724- struct task_struct *task; /* main task structure */
15725 struct exec_domain *exec_domain; /* execution domain */
15726 __u32 flags; /* low level flags */
15727 __u32 status; /* thread synchronous flags */
15728@@ -34,19 +34,13 @@ struct thread_info {
15729 mm_segment_t addr_limit;
15730 struct restart_block restart_block;
15731 void __user *sysenter_return;
15732-#ifdef CONFIG_X86_32
15733- unsigned long previous_esp; /* ESP of the previous stack in
15734- case of nested (IRQ) stacks
15735- */
15736- __u8 supervisor_stack[0];
15737-#endif
15738+ unsigned long lowest_stack;
15739 unsigned int sig_on_uaccess_error:1;
15740 unsigned int uaccess_err:1; /* uaccess failed */
15741 };
15742
15743-#define INIT_THREAD_INFO(tsk) \
15744+#define INIT_THREAD_INFO \
15745 { \
15746- .task = &tsk, \
15747 .exec_domain = &default_exec_domain, \
15748 .flags = 0, \
15749 .cpu = 0, \
15750@@ -57,7 +51,7 @@ struct thread_info {
15751 }, \
15752 }
15753
15754-#define init_thread_info (init_thread_union.thread_info)
15755+#define init_thread_info (init_thread_union.stack)
15756 #define init_stack (init_thread_union.stack)
15757
15758 #else /* !__ASSEMBLY__ */
15759@@ -98,6 +92,7 @@ struct thread_info {
15760 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
15761 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
15762 #define TIF_X32 30 /* 32-bit native x86-64 binary */
15763+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
15764
15765 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
15766 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
15767@@ -122,17 +117,18 @@ struct thread_info {
15768 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
15769 #define _TIF_ADDR32 (1 << TIF_ADDR32)
15770 #define _TIF_X32 (1 << TIF_X32)
15771+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
15772
15773 /* work to do in syscall_trace_enter() */
15774 #define _TIF_WORK_SYSCALL_ENTRY \
15775 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
15776 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
15777- _TIF_NOHZ)
15778+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
15779
15780 /* work to do in syscall_trace_leave() */
15781 #define _TIF_WORK_SYSCALL_EXIT \
15782 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
15783- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
15784+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
15785
15786 /* work to do on interrupt/exception return */
15787 #define _TIF_WORK_MASK \
15788@@ -143,7 +139,7 @@ struct thread_info {
15789 /* work to do on any return to user space */
15790 #define _TIF_ALLWORK_MASK \
15791 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
15792- _TIF_NOHZ)
15793+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
15794
15795 /* Only used for 64 bit */
15796 #define _TIF_DO_NOTIFY_MASK \
15797@@ -159,45 +155,40 @@ struct thread_info {
15798
15799 #define PREEMPT_ACTIVE 0x10000000
15800
15801-#ifdef CONFIG_X86_32
15802-
15803-#define STACK_WARN (THREAD_SIZE/8)
15804-/*
15805- * macros/functions for gaining access to the thread information structure
15806- *
15807- * preempt_count needs to be 1 initially, until the scheduler is functional.
15808- */
15809-#ifndef __ASSEMBLY__
15810-
15811-
15812-/* how to get the current stack pointer from C */
15813-register unsigned long current_stack_pointer asm("esp") __used;
15814-
15815-/* how to get the thread information struct from C */
15816-static inline struct thread_info *current_thread_info(void)
15817-{
15818- return (struct thread_info *)
15819- (current_stack_pointer & ~(THREAD_SIZE - 1));
15820-}
15821-
15822-#else /* !__ASSEMBLY__ */
15823-
15824+#ifdef __ASSEMBLY__
15825 /* how to get the thread information struct from ASM */
15826 #define GET_THREAD_INFO(reg) \
15827- movl $-THREAD_SIZE, reg; \
15828- andl %esp, reg
15829+ mov PER_CPU_VAR(current_tinfo), reg
15830
15831 /* use this one if reg already contains %esp */
15832-#define GET_THREAD_INFO_WITH_ESP(reg) \
15833- andl $-THREAD_SIZE, reg
15834+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
15835+#else
15836+/* how to get the thread information struct from C */
15837+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
15838+
15839+static __always_inline struct thread_info *current_thread_info(void)
15840+{
15841+ return this_cpu_read_stable(current_tinfo);
15842+}
15843+#endif
15844+
15845+#ifdef CONFIG_X86_32
15846+
15847+#define STACK_WARN (THREAD_SIZE/8)
15848+/*
15849+ * macros/functions for gaining access to the thread information structure
15850+ *
15851+ * preempt_count needs to be 1 initially, until the scheduler is functional.
15852+ */
15853+#ifndef __ASSEMBLY__
15854+
15855+/* how to get the current stack pointer from C */
15856+register unsigned long current_stack_pointer asm("esp") __used;
15857
15858 #endif
15859
15860 #else /* X86_32 */
15861
15862-#include <asm/percpu.h>
15863-#define KERNEL_STACK_OFFSET (5*8)
15864-
15865 /*
15866 * macros/functions for gaining access to the thread information structure
15867 * preempt_count needs to be 1 initially, until the scheduler is functional.
15868@@ -205,27 +196,8 @@ static inline struct thread_info *current_thread_info(void)
15869 #ifndef __ASSEMBLY__
15870 DECLARE_PER_CPU(unsigned long, kernel_stack);
15871
15872-static inline struct thread_info *current_thread_info(void)
15873-{
15874- struct thread_info *ti;
15875- ti = (void *)(this_cpu_read_stable(kernel_stack) +
15876- KERNEL_STACK_OFFSET - THREAD_SIZE);
15877- return ti;
15878-}
15879-
15880-#else /* !__ASSEMBLY__ */
15881-
15882-/* how to get the thread information struct from ASM */
15883-#define GET_THREAD_INFO(reg) \
15884- movq PER_CPU_VAR(kernel_stack),reg ; \
15885- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
15886-
15887-/*
15888- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
15889- * a certain register (to be used in assembler memory operands).
15890- */
15891-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
15892-
15893+/* how to get the current stack pointer from C */
15894+register unsigned long current_stack_pointer asm("rsp") __used;
15895 #endif
15896
15897 #endif /* !X86_32 */
15898@@ -286,5 +258,12 @@ static inline bool is_ia32_task(void)
15899 extern void arch_task_cache_init(void);
15900 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
15901 extern void arch_release_task_struct(struct task_struct *tsk);
15902+
15903+#define __HAVE_THREAD_FUNCTIONS
15904+#define task_thread_info(task) (&(task)->tinfo)
15905+#define task_stack_page(task) ((task)->stack)
15906+#define setup_thread_stack(p, org) do {} while (0)
15907+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
15908+
15909 #endif
15910 #endif /* _ASM_X86_THREAD_INFO_H */
15911diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
15912index 1709801..0a60f2f 100644
15913--- a/arch/x86/include/asm/uaccess.h
15914+++ b/arch/x86/include/asm/uaccess.h
15915@@ -7,6 +7,7 @@
15916 #include <linux/compiler.h>
15917 #include <linux/thread_info.h>
15918 #include <linux/string.h>
15919+#include <linux/sched.h>
15920 #include <asm/asm.h>
15921 #include <asm/page.h>
15922 #include <asm/smap.h>
15923@@ -29,7 +30,12 @@
15924
15925 #define get_ds() (KERNEL_DS)
15926 #define get_fs() (current_thread_info()->addr_limit)
15927+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15928+void __set_fs(mm_segment_t x);
15929+void set_fs(mm_segment_t x);
15930+#else
15931 #define set_fs(x) (current_thread_info()->addr_limit = (x))
15932+#endif
15933
15934 #define segment_eq(a, b) ((a).seg == (b).seg)
15935
15936@@ -77,8 +83,33 @@
15937 * checks that the pointer is in the user space range - after calling
15938 * this function, memory access functions may still return -EFAULT.
15939 */
15940-#define access_ok(type, addr, size) \
15941- (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
15942+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
15943+#define access_ok(type, addr, size) \
15944+({ \
15945+ long __size = size; \
15946+ unsigned long __addr = (unsigned long)addr; \
15947+ unsigned long __addr_ao = __addr & PAGE_MASK; \
15948+ unsigned long __end_ao = __addr + __size - 1; \
15949+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
15950+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
15951+ while(__addr_ao <= __end_ao) { \
15952+ char __c_ao; \
15953+ __addr_ao += PAGE_SIZE; \
15954+ if (__size > PAGE_SIZE) \
15955+ cond_resched(); \
15956+ if (__get_user(__c_ao, (char __user *)__addr)) \
15957+ break; \
15958+ if (type != VERIFY_WRITE) { \
15959+ __addr = __addr_ao; \
15960+ continue; \
15961+ } \
15962+ if (__put_user(__c_ao, (char __user *)__addr)) \
15963+ break; \
15964+ __addr = __addr_ao; \
15965+ } \
15966+ } \
15967+ __ret_ao; \
15968+})
15969
15970 /*
15971 * The exception table consists of pairs of addresses relative to the
15972@@ -189,13 +220,21 @@ extern int __get_user_bad(void);
15973 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
15974 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
15975
15976-
15977+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15978+#define __copyuser_seg "gs;"
15979+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
15980+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
15981+#else
15982+#define __copyuser_seg
15983+#define __COPYUSER_SET_ES
15984+#define __COPYUSER_RESTORE_ES
15985+#endif
15986
15987 #ifdef CONFIG_X86_32
15988 #define __put_user_asm_u64(x, addr, err, errret) \
15989 asm volatile(ASM_STAC "\n" \
15990- "1: movl %%eax,0(%2)\n" \
15991- "2: movl %%edx,4(%2)\n" \
15992+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
15993+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
15994 "3: " ASM_CLAC "\n" \
15995 ".section .fixup,\"ax\"\n" \
15996 "4: movl %3,%0\n" \
15997@@ -208,8 +247,8 @@ extern int __get_user_bad(void);
15998
15999 #define __put_user_asm_ex_u64(x, addr) \
16000 asm volatile(ASM_STAC "\n" \
16001- "1: movl %%eax,0(%1)\n" \
16002- "2: movl %%edx,4(%1)\n" \
16003+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
16004+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
16005 "3: " ASM_CLAC "\n" \
16006 _ASM_EXTABLE_EX(1b, 2b) \
16007 _ASM_EXTABLE_EX(2b, 3b) \
16008@@ -259,7 +298,7 @@ extern void __put_user_8(void);
16009 __typeof__(*(ptr)) __pu_val; \
16010 __chk_user_ptr(ptr); \
16011 might_fault(); \
16012- __pu_val = x; \
16013+ __pu_val = (x); \
16014 switch (sizeof(*(ptr))) { \
16015 case 1: \
16016 __put_user_x(1, __pu_val, ptr, __ret_pu); \
16017@@ -358,7 +397,7 @@ do { \
16018
16019 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
16020 asm volatile(ASM_STAC "\n" \
16021- "1: mov"itype" %2,%"rtype"1\n" \
16022+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
16023 "2: " ASM_CLAC "\n" \
16024 ".section .fixup,\"ax\"\n" \
16025 "3: mov %3,%0\n" \
16026@@ -366,7 +405,7 @@ do { \
16027 " jmp 2b\n" \
16028 ".previous\n" \
16029 _ASM_EXTABLE(1b, 3b) \
16030- : "=r" (err), ltype(x) \
16031+ : "=r" (err), ltype (x) \
16032 : "m" (__m(addr)), "i" (errret), "0" (err))
16033
16034 #define __get_user_size_ex(x, ptr, size) \
16035@@ -391,7 +430,7 @@ do { \
16036 } while (0)
16037
16038 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
16039- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
16040+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
16041 "2:\n" \
16042 _ASM_EXTABLE_EX(1b, 2b) \
16043 : ltype(x) : "m" (__m(addr)))
16044@@ -408,13 +447,24 @@ do { \
16045 int __gu_err; \
16046 unsigned long __gu_val; \
16047 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
16048- (x) = (__force __typeof__(*(ptr)))__gu_val; \
16049+ (x) = (__typeof__(*(ptr)))__gu_val; \
16050 __gu_err; \
16051 })
16052
16053 /* FIXME: this hack is definitely wrong -AK */
16054 struct __large_struct { unsigned long buf[100]; };
16055-#define __m(x) (*(struct __large_struct __user *)(x))
16056+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16057+#define ____m(x) \
16058+({ \
16059+ unsigned long ____x = (unsigned long)(x); \
16060+ if (____x < PAX_USER_SHADOW_BASE) \
16061+ ____x += PAX_USER_SHADOW_BASE; \
16062+ (void __user *)____x; \
16063+})
16064+#else
16065+#define ____m(x) (x)
16066+#endif
16067+#define __m(x) (*(struct __large_struct __user *)____m(x))
16068
16069 /*
16070 * Tell gcc we read from memory instead of writing: this is because
16071@@ -423,7 +473,7 @@ struct __large_struct { unsigned long buf[100]; };
16072 */
16073 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
16074 asm volatile(ASM_STAC "\n" \
16075- "1: mov"itype" %"rtype"1,%2\n" \
16076+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
16077 "2: " ASM_CLAC "\n" \
16078 ".section .fixup,\"ax\"\n" \
16079 "3: mov %3,%0\n" \
16080@@ -431,10 +481,10 @@ struct __large_struct { unsigned long buf[100]; };
16081 ".previous\n" \
16082 _ASM_EXTABLE(1b, 3b) \
16083 : "=r"(err) \
16084- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
16085+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
16086
16087 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
16088- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
16089+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
16090 "2:\n" \
16091 _ASM_EXTABLE_EX(1b, 2b) \
16092 : : ltype(x), "m" (__m(addr)))
16093@@ -473,8 +523,12 @@ struct __large_struct { unsigned long buf[100]; };
16094 * On error, the variable @x is set to zero.
16095 */
16096
16097+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16098+#define __get_user(x, ptr) get_user((x), (ptr))
16099+#else
16100 #define __get_user(x, ptr) \
16101 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
16102+#endif
16103
16104 /**
16105 * __put_user: - Write a simple value into user space, with less checking.
16106@@ -496,8 +550,12 @@ struct __large_struct { unsigned long buf[100]; };
16107 * Returns zero on success, or -EFAULT on error.
16108 */
16109
16110+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16111+#define __put_user(x, ptr) put_user((x), (ptr))
16112+#else
16113 #define __put_user(x, ptr) \
16114 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
16115+#endif
16116
16117 #define __get_user_unaligned __get_user
16118 #define __put_user_unaligned __put_user
16119@@ -515,7 +573,7 @@ struct __large_struct { unsigned long buf[100]; };
16120 #define get_user_ex(x, ptr) do { \
16121 unsigned long __gue_val; \
16122 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
16123- (x) = (__force __typeof__(*(ptr)))__gue_val; \
16124+ (x) = (__typeof__(*(ptr)))__gue_val; \
16125 } while (0)
16126
16127 #define put_user_try uaccess_try
16128@@ -532,8 +590,8 @@ strncpy_from_user(char *dst, const char __user *src, long count);
16129 extern __must_check long strlen_user(const char __user *str);
16130 extern __must_check long strnlen_user(const char __user *str, long n);
16131
16132-unsigned long __must_check clear_user(void __user *mem, unsigned long len);
16133-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
16134+unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
16135+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
16136
16137 /*
16138 * movsl can be slow when source and dest are not both 8-byte aligned
16139diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
16140index 7f760a9..04b1c65 100644
16141--- a/arch/x86/include/asm/uaccess_32.h
16142+++ b/arch/x86/include/asm/uaccess_32.h
16143@@ -11,15 +11,15 @@
16144 #include <asm/page.h>
16145
16146 unsigned long __must_check __copy_to_user_ll
16147- (void __user *to, const void *from, unsigned long n);
16148+ (void __user *to, const void *from, unsigned long n) __size_overflow(3);
16149 unsigned long __must_check __copy_from_user_ll
16150- (void *to, const void __user *from, unsigned long n);
16151+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
16152 unsigned long __must_check __copy_from_user_ll_nozero
16153- (void *to, const void __user *from, unsigned long n);
16154+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
16155 unsigned long __must_check __copy_from_user_ll_nocache
16156- (void *to, const void __user *from, unsigned long n);
16157+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
16158 unsigned long __must_check __copy_from_user_ll_nocache_nozero
16159- (void *to, const void __user *from, unsigned long n);
16160+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
16161
16162 /**
16163 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
16164@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
16165 static __always_inline unsigned long __must_check
16166 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
16167 {
16168+ if ((long)n < 0)
16169+ return n;
16170+
16171+ check_object_size(from, n, true);
16172+
16173 if (__builtin_constant_p(n)) {
16174 unsigned long ret;
16175
16176@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
16177 __copy_to_user(void __user *to, const void *from, unsigned long n)
16178 {
16179 might_fault();
16180+
16181 return __copy_to_user_inatomic(to, from, n);
16182 }
16183
16184 static __always_inline unsigned long
16185 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
16186 {
16187+ if ((long)n < 0)
16188+ return n;
16189+
16190 /* Avoid zeroing the tail if the copy fails..
16191 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
16192 * but as the zeroing behaviour is only significant when n is not
16193@@ -137,6 +146,12 @@ static __always_inline unsigned long
16194 __copy_from_user(void *to, const void __user *from, unsigned long n)
16195 {
16196 might_fault();
16197+
16198+ if ((long)n < 0)
16199+ return n;
16200+
16201+ check_object_size(to, n, false);
16202+
16203 if (__builtin_constant_p(n)) {
16204 unsigned long ret;
16205
16206@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
16207 const void __user *from, unsigned long n)
16208 {
16209 might_fault();
16210+
16211+ if ((long)n < 0)
16212+ return n;
16213+
16214 if (__builtin_constant_p(n)) {
16215 unsigned long ret;
16216
16217@@ -181,15 +200,19 @@ static __always_inline unsigned long
16218 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
16219 unsigned long n)
16220 {
16221- return __copy_from_user_ll_nocache_nozero(to, from, n);
16222+ if ((long)n < 0)
16223+ return n;
16224+
16225+ return __copy_from_user_ll_nocache_nozero(to, from, n);
16226 }
16227
16228-unsigned long __must_check copy_to_user(void __user *to,
16229- const void *from, unsigned long n);
16230-unsigned long __must_check _copy_from_user(void *to,
16231- const void __user *from,
16232- unsigned long n);
16233-
16234+extern void copy_to_user_overflow(void)
16235+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
16236+ __compiletime_error("copy_to_user() buffer size is not provably correct")
16237+#else
16238+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
16239+#endif
16240+;
16241
16242 extern void copy_from_user_overflow(void)
16243 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
16244@@ -199,17 +222,60 @@ extern void copy_from_user_overflow(void)
16245 #endif
16246 ;
16247
16248-static inline unsigned long __must_check copy_from_user(void *to,
16249- const void __user *from,
16250- unsigned long n)
16251+/**
16252+ * copy_to_user: - Copy a block of data into user space.
16253+ * @to: Destination address, in user space.
16254+ * @from: Source address, in kernel space.
16255+ * @n: Number of bytes to copy.
16256+ *
16257+ * Context: User context only. This function may sleep.
16258+ *
16259+ * Copy data from kernel space to user space.
16260+ *
16261+ * Returns number of bytes that could not be copied.
16262+ * On success, this will be zero.
16263+ */
16264+static inline unsigned long __must_check
16265+copy_to_user(void __user *to, const void *from, unsigned long n)
16266 {
16267- int sz = __compiletime_object_size(to);
16268+ size_t sz = __compiletime_object_size(from);
16269
16270- if (likely(sz == -1 || sz >= n))
16271- n = _copy_from_user(to, from, n);
16272- else
16273+ if (unlikely(sz != (size_t)-1 && sz < n))
16274+ copy_to_user_overflow();
16275+ else if (access_ok(VERIFY_WRITE, to, n))
16276+ n = __copy_to_user(to, from, n);
16277+ return n;
16278+}
16279+
16280+/**
16281+ * copy_from_user: - Copy a block of data from user space.
16282+ * @to: Destination address, in kernel space.
16283+ * @from: Source address, in user space.
16284+ * @n: Number of bytes to copy.
16285+ *
16286+ * Context: User context only. This function may sleep.
16287+ *
16288+ * Copy data from user space to kernel space.
16289+ *
16290+ * Returns number of bytes that could not be copied.
16291+ * On success, this will be zero.
16292+ *
16293+ * If some data could not be copied, this function will pad the copied
16294+ * data to the requested size using zero bytes.
16295+ */
16296+static inline unsigned long __must_check
16297+copy_from_user(void *to, const void __user *from, unsigned long n)
16298+{
16299+ size_t sz = __compiletime_object_size(to);
16300+
16301+ check_object_size(to, n, false);
16302+
16303+ if (unlikely(sz != (size_t)-1 && sz < n))
16304 copy_from_user_overflow();
16305-
16306+ else if (access_ok(VERIFY_READ, from, n))
16307+ n = __copy_from_user(to, from, n);
16308+ else if ((long)n > 0)
16309+ memset(to, 0, n);
16310 return n;
16311 }
16312
16313diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
16314index 142810c..1f2a0a7 100644
16315--- a/arch/x86/include/asm/uaccess_64.h
16316+++ b/arch/x86/include/asm/uaccess_64.h
16317@@ -10,6 +10,9 @@
16318 #include <asm/alternative.h>
16319 #include <asm/cpufeature.h>
16320 #include <asm/page.h>
16321+#include <asm/pgtable.h>
16322+
16323+#define set_fs(x) (current_thread_info()->addr_limit = (x))
16324
16325 /*
16326 * Copy To/From Userspace
16327@@ -17,13 +20,13 @@
16328
16329 /* Handles exceptions in both to and from, but doesn't do access_ok */
16330 __must_check unsigned long
16331-copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
16332+copy_user_enhanced_fast_string(void *to, const void *from, unsigned len) __size_overflow(3);
16333 __must_check unsigned long
16334-copy_user_generic_string(void *to, const void *from, unsigned len);
16335+copy_user_generic_string(void *to, const void *from, unsigned len) __size_overflow(3);
16336 __must_check unsigned long
16337-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
16338+copy_user_generic_unrolled(void *to, const void *from, unsigned len) __size_overflow(3);
16339
16340-static __always_inline __must_check unsigned long
16341+static __always_inline __must_check __size_overflow(3) unsigned long
16342 copy_user_generic(void *to, const void *from, unsigned len)
16343 {
16344 unsigned ret;
16345@@ -41,142 +44,204 @@ copy_user_generic(void *to, const void *from, unsigned len)
16346 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
16347 "=d" (len)),
16348 "1" (to), "2" (from), "3" (len)
16349- : "memory", "rcx", "r8", "r9", "r10", "r11");
16350+ : "memory", "rcx", "r8", "r9", "r11");
16351 return ret;
16352 }
16353
16354+static __always_inline __must_check unsigned long
16355+__copy_to_user(void __user *to, const void *from, unsigned long len);
16356+static __always_inline __must_check unsigned long
16357+__copy_from_user(void *to, const void __user *from, unsigned long len);
16358 __must_check unsigned long
16359-_copy_to_user(void __user *to, const void *from, unsigned len);
16360-__must_check unsigned long
16361-_copy_from_user(void *to, const void __user *from, unsigned len);
16362-__must_check unsigned long
16363-copy_in_user(void __user *to, const void __user *from, unsigned len);
16364+copy_in_user(void __user *to, const void __user *from, unsigned long len);
16365+
16366+extern void copy_to_user_overflow(void)
16367+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
16368+ __compiletime_error("copy_to_user() buffer size is not provably correct")
16369+#else
16370+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
16371+#endif
16372+;
16373+
16374+extern void copy_from_user_overflow(void)
16375+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
16376+ __compiletime_error("copy_from_user() buffer size is not provably correct")
16377+#else
16378+ __compiletime_warning("copy_from_user() buffer size is not provably correct")
16379+#endif
16380+;
16381
16382 static inline unsigned long __must_check copy_from_user(void *to,
16383 const void __user *from,
16384 unsigned long n)
16385 {
16386- int sz = __compiletime_object_size(to);
16387-
16388 might_fault();
16389- if (likely(sz == -1 || sz >= n))
16390- n = _copy_from_user(to, from, n);
16391-#ifdef CONFIG_DEBUG_VM
16392- else
16393- WARN(1, "Buffer overflow detected!\n");
16394-#endif
16395+
16396+ check_object_size(to, n, false);
16397+
16398+ if (access_ok(VERIFY_READ, from, n))
16399+ n = __copy_from_user(to, from, n);
16400+ else if (n < INT_MAX)
16401+ memset(to, 0, n);
16402 return n;
16403 }
16404
16405 static __always_inline __must_check
16406-int copy_to_user(void __user *dst, const void *src, unsigned size)
16407+int copy_to_user(void __user *dst, const void *src, unsigned long size)
16408 {
16409 might_fault();
16410
16411- return _copy_to_user(dst, src, size);
16412+ if (access_ok(VERIFY_WRITE, dst, size))
16413+ size = __copy_to_user(dst, src, size);
16414+ return size;
16415 }
16416
16417 static __always_inline __must_check
16418-int __copy_from_user(void *dst, const void __user *src, unsigned size)
16419+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
16420 {
16421- int ret = 0;
16422+ size_t sz = __compiletime_object_size(dst);
16423+ unsigned ret = 0;
16424
16425 might_fault();
16426+
16427+ if (size > INT_MAX)
16428+ return size;
16429+
16430+ check_object_size(dst, size, false);
16431+
16432+#ifdef CONFIG_PAX_MEMORY_UDEREF
16433+ if (!__access_ok(VERIFY_READ, src, size))
16434+ return size;
16435+#endif
16436+
16437+ if (unlikely(sz != (size_t)-1 && sz < size)) {
16438+ copy_from_user_overflow();
16439+ return size;
16440+ }
16441+
16442 if (!__builtin_constant_p(size))
16443- return copy_user_generic(dst, (__force void *)src, size);
16444+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
16445 switch (size) {
16446- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
16447+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
16448 ret, "b", "b", "=q", 1);
16449 return ret;
16450- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
16451+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
16452 ret, "w", "w", "=r", 2);
16453 return ret;
16454- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
16455+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
16456 ret, "l", "k", "=r", 4);
16457 return ret;
16458- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
16459+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
16460 ret, "q", "", "=r", 8);
16461 return ret;
16462 case 10:
16463- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
16464+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
16465 ret, "q", "", "=r", 10);
16466 if (unlikely(ret))
16467 return ret;
16468 __get_user_asm(*(u16 *)(8 + (char *)dst),
16469- (u16 __user *)(8 + (char __user *)src),
16470+ (const u16 __user *)(8 + (const char __user *)src),
16471 ret, "w", "w", "=r", 2);
16472 return ret;
16473 case 16:
16474- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
16475+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
16476 ret, "q", "", "=r", 16);
16477 if (unlikely(ret))
16478 return ret;
16479 __get_user_asm(*(u64 *)(8 + (char *)dst),
16480- (u64 __user *)(8 + (char __user *)src),
16481+ (const u64 __user *)(8 + (const char __user *)src),
16482 ret, "q", "", "=r", 8);
16483 return ret;
16484 default:
16485- return copy_user_generic(dst, (__force void *)src, size);
16486+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
16487 }
16488 }
16489
16490 static __always_inline __must_check
16491-int __copy_to_user(void __user *dst, const void *src, unsigned size)
16492+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
16493 {
16494- int ret = 0;
16495+ size_t sz = __compiletime_object_size(src);
16496+ unsigned ret = 0;
16497
16498 might_fault();
16499+
16500+ if (size > INT_MAX)
16501+ return size;
16502+
16503+ check_object_size(src, size, true);
16504+
16505+#ifdef CONFIG_PAX_MEMORY_UDEREF
16506+ if (!__access_ok(VERIFY_WRITE, dst, size))
16507+ return size;
16508+#endif
16509+
16510+ if (unlikely(sz != (size_t)-1 && sz < size)) {
16511+ copy_to_user_overflow();
16512+ return size;
16513+ }
16514+
16515 if (!__builtin_constant_p(size))
16516- return copy_user_generic((__force void *)dst, src, size);
16517+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
16518 switch (size) {
16519- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
16520+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
16521 ret, "b", "b", "iq", 1);
16522 return ret;
16523- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
16524+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
16525 ret, "w", "w", "ir", 2);
16526 return ret;
16527- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
16528+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
16529 ret, "l", "k", "ir", 4);
16530 return ret;
16531- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
16532+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
16533 ret, "q", "", "er", 8);
16534 return ret;
16535 case 10:
16536- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
16537+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
16538 ret, "q", "", "er", 10);
16539 if (unlikely(ret))
16540 return ret;
16541 asm("":::"memory");
16542- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
16543+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
16544 ret, "w", "w", "ir", 2);
16545 return ret;
16546 case 16:
16547- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
16548+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
16549 ret, "q", "", "er", 16);
16550 if (unlikely(ret))
16551 return ret;
16552 asm("":::"memory");
16553- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
16554+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
16555 ret, "q", "", "er", 8);
16556 return ret;
16557 default:
16558- return copy_user_generic((__force void *)dst, src, size);
16559+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
16560 }
16561 }
16562
16563 static __always_inline __must_check
16564-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
16565+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
16566 {
16567- int ret = 0;
16568+ unsigned ret = 0;
16569
16570 might_fault();
16571+
16572+ if (size > INT_MAX)
16573+ return size;
16574+
16575+#ifdef CONFIG_PAX_MEMORY_UDEREF
16576+ if (!__access_ok(VERIFY_READ, src, size))
16577+ return size;
16578+ if (!__access_ok(VERIFY_WRITE, dst, size))
16579+ return size;
16580+#endif
16581+
16582 if (!__builtin_constant_p(size))
16583- return copy_user_generic((__force void *)dst,
16584- (__force void *)src, size);
16585+ return copy_user_generic((__force_kernel void *)____m(dst),
16586+ (__force_kernel const void *)____m(src), size);
16587 switch (size) {
16588 case 1: {
16589 u8 tmp;
16590- __get_user_asm(tmp, (u8 __user *)src,
16591+ __get_user_asm(tmp, (const u8 __user *)src,
16592 ret, "b", "b", "=q", 1);
16593 if (likely(!ret))
16594 __put_user_asm(tmp, (u8 __user *)dst,
16595@@ -185,7 +250,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
16596 }
16597 case 2: {
16598 u16 tmp;
16599- __get_user_asm(tmp, (u16 __user *)src,
16600+ __get_user_asm(tmp, (const u16 __user *)src,
16601 ret, "w", "w", "=r", 2);
16602 if (likely(!ret))
16603 __put_user_asm(tmp, (u16 __user *)dst,
16604@@ -195,7 +260,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
16605
16606 case 4: {
16607 u32 tmp;
16608- __get_user_asm(tmp, (u32 __user *)src,
16609+ __get_user_asm(tmp, (const u32 __user *)src,
16610 ret, "l", "k", "=r", 4);
16611 if (likely(!ret))
16612 __put_user_asm(tmp, (u32 __user *)dst,
16613@@ -204,7 +269,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
16614 }
16615 case 8: {
16616 u64 tmp;
16617- __get_user_asm(tmp, (u64 __user *)src,
16618+ __get_user_asm(tmp, (const u64 __user *)src,
16619 ret, "q", "", "=r", 8);
16620 if (likely(!ret))
16621 __put_user_asm(tmp, (u64 __user *)dst,
16622@@ -212,41 +277,72 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
16623 return ret;
16624 }
16625 default:
16626- return copy_user_generic((__force void *)dst,
16627- (__force void *)src, size);
16628+ return copy_user_generic((__force_kernel void *)____m(dst),
16629+ (__force_kernel const void *)____m(src), size);
16630 }
16631 }
16632
16633 static __must_check __always_inline int
16634-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
16635+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
16636 {
16637- return copy_user_generic(dst, (__force const void *)src, size);
16638+ if (size > INT_MAX)
16639+ return size;
16640+
16641+#ifdef CONFIG_PAX_MEMORY_UDEREF
16642+ if (!__access_ok(VERIFY_READ, src, size))
16643+ return size;
16644+#endif
16645+
16646+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
16647 }
16648
16649-static __must_check __always_inline int
16650-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
16651+static __must_check __always_inline unsigned long
16652+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
16653 {
16654- return copy_user_generic((__force void *)dst, src, size);
16655+ if (size > INT_MAX)
16656+ return size;
16657+
16658+#ifdef CONFIG_PAX_MEMORY_UDEREF
16659+ if (!__access_ok(VERIFY_WRITE, dst, size))
16660+ return size;
16661+#endif
16662+
16663+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
16664 }
16665
16666-extern long __copy_user_nocache(void *dst, const void __user *src,
16667- unsigned size, int zerorest);
16668+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
16669+ unsigned long size, int zerorest) __size_overflow(3);
16670
16671-static inline int
16672-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
16673+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
16674 {
16675 might_sleep();
16676+
16677+ if (size > INT_MAX)
16678+ return size;
16679+
16680+#ifdef CONFIG_PAX_MEMORY_UDEREF
16681+ if (!__access_ok(VERIFY_READ, src, size))
16682+ return size;
16683+#endif
16684+
16685 return __copy_user_nocache(dst, src, size, 1);
16686 }
16687
16688-static inline int
16689-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
16690- unsigned size)
16691+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
16692+ unsigned long size)
16693 {
16694+ if (size > INT_MAX)
16695+ return size;
16696+
16697+#ifdef CONFIG_PAX_MEMORY_UDEREF
16698+ if (!__access_ok(VERIFY_READ, src, size))
16699+ return size;
16700+#endif
16701+
16702 return __copy_user_nocache(dst, src, size, 0);
16703 }
16704
16705-unsigned long
16706-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
16707+extern unsigned long
16708+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
16709
16710 #endif /* _ASM_X86_UACCESS_64_H */
16711diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
16712index 5b238981..77fdd78 100644
16713--- a/arch/x86/include/asm/word-at-a-time.h
16714+++ b/arch/x86/include/asm/word-at-a-time.h
16715@@ -11,7 +11,7 @@
16716 * and shift, for example.
16717 */
16718 struct word_at_a_time {
16719- const unsigned long one_bits, high_bits;
16720+ unsigned long one_bits, high_bits;
16721 };
16722
16723 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
16724diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
16725index 5769349..a3d3e2a 100644
16726--- a/arch/x86/include/asm/x86_init.h
16727+++ b/arch/x86/include/asm/x86_init.h
16728@@ -141,7 +141,7 @@ struct x86_init_ops {
16729 struct x86_init_timers timers;
16730 struct x86_init_iommu iommu;
16731 struct x86_init_pci pci;
16732-};
16733+} __no_const;
16734
16735 /**
16736 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
16737@@ -152,7 +152,7 @@ struct x86_cpuinit_ops {
16738 void (*setup_percpu_clockev)(void);
16739 void (*early_percpu_clock_init)(void);
16740 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
16741-};
16742+} __no_const;
16743
16744 /**
16745 * struct x86_platform_ops - platform specific runtime functions
16746@@ -178,7 +178,7 @@ struct x86_platform_ops {
16747 void (*save_sched_clock_state)(void);
16748 void (*restore_sched_clock_state)(void);
16749 void (*apic_post_init)(void);
16750-};
16751+} __no_const;
16752
16753 struct pci_dev;
16754
16755@@ -187,14 +187,14 @@ struct x86_msi_ops {
16756 void (*teardown_msi_irq)(unsigned int irq);
16757 void (*teardown_msi_irqs)(struct pci_dev *dev);
16758 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
16759-};
16760+} __no_const;
16761
16762 struct x86_io_apic_ops {
16763 void (*init) (void);
16764 unsigned int (*read) (unsigned int apic, unsigned int reg);
16765 void (*write) (unsigned int apic, unsigned int reg, unsigned int value);
16766 void (*modify)(unsigned int apic, unsigned int reg, unsigned int value);
16767-};
16768+} __no_const;
16769
16770 extern struct x86_init_ops x86_init;
16771 extern struct x86_cpuinit_ops x86_cpuinit;
16772diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
16773index 0415cda..b43d877 100644
16774--- a/arch/x86/include/asm/xsave.h
16775+++ b/arch/x86/include/asm/xsave.h
16776@@ -71,7 +71,9 @@ static inline int xsave_user(struct xsave_struct __user *buf)
16777 return -EFAULT;
16778
16779 __asm__ __volatile__(ASM_STAC "\n"
16780- "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
16781+ "1:"
16782+ __copyuser_seg
16783+ ".byte " REX_PREFIX "0x0f,0xae,0x27\n"
16784 "2: " ASM_CLAC "\n"
16785 ".section .fixup,\"ax\"\n"
16786 "3: movl $-1,%[err]\n"
16787@@ -87,12 +89,14 @@ static inline int xsave_user(struct xsave_struct __user *buf)
16788 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
16789 {
16790 int err;
16791- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
16792+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
16793 u32 lmask = mask;
16794 u32 hmask = mask >> 32;
16795
16796 __asm__ __volatile__(ASM_STAC "\n"
16797- "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
16798+ "1:"
16799+ __copyuser_seg
16800+ ".byte " REX_PREFIX "0x0f,0xae,0x2f\n"
16801 "2: " ASM_CLAC "\n"
16802 ".section .fixup,\"ax\"\n"
16803 "3: movl $-1,%[err]\n"
16804diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
16805index bbae024..e1528f9 100644
16806--- a/arch/x86/include/uapi/asm/e820.h
16807+++ b/arch/x86/include/uapi/asm/e820.h
16808@@ -63,7 +63,7 @@ struct e820map {
16809 #define ISA_START_ADDRESS 0xa0000
16810 #define ISA_END_ADDRESS 0x100000
16811
16812-#define BIOS_BEGIN 0x000a0000
16813+#define BIOS_BEGIN 0x000c0000
16814 #define BIOS_END 0x00100000
16815
16816 #define BIOS_ROM_BASE 0xffe00000
16817diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
16818index 34e923a..0c6bb6e 100644
16819--- a/arch/x86/kernel/Makefile
16820+++ b/arch/x86/kernel/Makefile
16821@@ -22,7 +22,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
16822 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
16823 obj-$(CONFIG_IRQ_WORK) += irq_work.o
16824 obj-y += probe_roms.o
16825-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
16826+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
16827 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
16828 obj-y += syscall_$(BITS).o
16829 obj-$(CONFIG_X86_64) += vsyscall_64.o
16830diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
16831index bacf4b0..4ede72e 100644
16832--- a/arch/x86/kernel/acpi/boot.c
16833+++ b/arch/x86/kernel/acpi/boot.c
16834@@ -1358,7 +1358,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
16835 * If your system is blacklisted here, but you find that acpi=force
16836 * works for you, please contact linux-acpi@vger.kernel.org
16837 */
16838-static struct dmi_system_id __initdata acpi_dmi_table[] = {
16839+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
16840 /*
16841 * Boxes that need ACPI disabled
16842 */
16843@@ -1433,7 +1433,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
16844 };
16845
16846 /* second table for DMI checks that should run after early-quirks */
16847-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
16848+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
16849 /*
16850 * HP laptops which use a DSDT reporting as HP/SB400/10000,
16851 * which includes some code which overrides all temperature
16852diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
16853index d5e0d71..6533e08 100644
16854--- a/arch/x86/kernel/acpi/sleep.c
16855+++ b/arch/x86/kernel/acpi/sleep.c
16856@@ -74,8 +74,12 @@ int acpi_suspend_lowlevel(void)
16857 #else /* CONFIG_64BIT */
16858 #ifdef CONFIG_SMP
16859 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
16860+
16861+ pax_open_kernel();
16862 early_gdt_descr.address =
16863 (unsigned long)get_cpu_gdt_table(smp_processor_id());
16864+ pax_close_kernel();
16865+
16866 initial_gs = per_cpu_offset(smp_processor_id());
16867 #endif
16868 initial_code = (unsigned long)wakeup_long64;
16869diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
16870index 13ab720..95d5442 100644
16871--- a/arch/x86/kernel/acpi/wakeup_32.S
16872+++ b/arch/x86/kernel/acpi/wakeup_32.S
16873@@ -30,13 +30,11 @@ wakeup_pmode_return:
16874 # and restore the stack ... but you need gdt for this to work
16875 movl saved_context_esp, %esp
16876
16877- movl %cs:saved_magic, %eax
16878- cmpl $0x12345678, %eax
16879+ cmpl $0x12345678, saved_magic
16880 jne bogus_magic
16881
16882 # jump to place where we left off
16883- movl saved_eip, %eax
16884- jmp *%eax
16885+ jmp *(saved_eip)
16886
16887 bogus_magic:
16888 jmp bogus_magic
16889diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
16890index ef5ccca..bd83949 100644
16891--- a/arch/x86/kernel/alternative.c
16892+++ b/arch/x86/kernel/alternative.c
16893@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
16894 */
16895 for (a = start; a < end; a++) {
16896 instr = (u8 *)&a->instr_offset + a->instr_offset;
16897+
16898+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16899+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16900+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
16901+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16902+#endif
16903+
16904 replacement = (u8 *)&a->repl_offset + a->repl_offset;
16905 BUG_ON(a->replacementlen > a->instrlen);
16906 BUG_ON(a->instrlen > sizeof(insnbuf));
16907@@ -299,10 +306,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
16908 for (poff = start; poff < end; poff++) {
16909 u8 *ptr = (u8 *)poff + *poff;
16910
16911+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16912+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16913+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
16914+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16915+#endif
16916+
16917 if (!*poff || ptr < text || ptr >= text_end)
16918 continue;
16919 /* turn DS segment override prefix into lock prefix */
16920- if (*ptr == 0x3e)
16921+ if (*ktla_ktva(ptr) == 0x3e)
16922 text_poke(ptr, ((unsigned char []){0xf0}), 1);
16923 }
16924 mutex_unlock(&text_mutex);
16925@@ -317,10 +330,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
16926 for (poff = start; poff < end; poff++) {
16927 u8 *ptr = (u8 *)poff + *poff;
16928
16929+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16930+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16931+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
16932+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16933+#endif
16934+
16935 if (!*poff || ptr < text || ptr >= text_end)
16936 continue;
16937 /* turn lock prefix into DS segment override prefix */
16938- if (*ptr == 0xf0)
16939+ if (*ktla_ktva(ptr) == 0xf0)
16940 text_poke(ptr, ((unsigned char []){0x3E}), 1);
16941 }
16942 mutex_unlock(&text_mutex);
16943@@ -468,7 +487,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
16944
16945 BUG_ON(p->len > MAX_PATCH_LEN);
16946 /* prep the buffer with the original instructions */
16947- memcpy(insnbuf, p->instr, p->len);
16948+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
16949 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
16950 (unsigned long)p->instr, p->len);
16951
16952@@ -515,7 +534,7 @@ void __init alternative_instructions(void)
16953 if (!uniproc_patched || num_possible_cpus() == 1)
16954 free_init_pages("SMP alternatives",
16955 (unsigned long)__smp_locks,
16956- (unsigned long)__smp_locks_end);
16957+ PAGE_ALIGN((unsigned long)__smp_locks_end));
16958 #endif
16959
16960 apply_paravirt(__parainstructions, __parainstructions_end);
16961@@ -535,13 +554,17 @@ void __init alternative_instructions(void)
16962 * instructions. And on the local CPU you need to be protected again NMI or MCE
16963 * handlers seeing an inconsistent instruction while you patch.
16964 */
16965-void *__init_or_module text_poke_early(void *addr, const void *opcode,
16966+void *__kprobes text_poke_early(void *addr, const void *opcode,
16967 size_t len)
16968 {
16969 unsigned long flags;
16970 local_irq_save(flags);
16971- memcpy(addr, opcode, len);
16972+
16973+ pax_open_kernel();
16974+ memcpy(ktla_ktva(addr), opcode, len);
16975 sync_core();
16976+ pax_close_kernel();
16977+
16978 local_irq_restore(flags);
16979 /* Could also do a CLFLUSH here to speed up CPU recovery; but
16980 that causes hangs on some VIA CPUs. */
16981@@ -563,36 +586,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
16982 */
16983 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
16984 {
16985- unsigned long flags;
16986- char *vaddr;
16987+ unsigned char *vaddr = ktla_ktva(addr);
16988 struct page *pages[2];
16989- int i;
16990+ size_t i;
16991
16992 if (!core_kernel_text((unsigned long)addr)) {
16993- pages[0] = vmalloc_to_page(addr);
16994- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
16995+ pages[0] = vmalloc_to_page(vaddr);
16996+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
16997 } else {
16998- pages[0] = virt_to_page(addr);
16999+ pages[0] = virt_to_page(vaddr);
17000 WARN_ON(!PageReserved(pages[0]));
17001- pages[1] = virt_to_page(addr + PAGE_SIZE);
17002+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
17003 }
17004 BUG_ON(!pages[0]);
17005- local_irq_save(flags);
17006- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
17007- if (pages[1])
17008- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
17009- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
17010- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
17011- clear_fixmap(FIX_TEXT_POKE0);
17012- if (pages[1])
17013- clear_fixmap(FIX_TEXT_POKE1);
17014- local_flush_tlb();
17015- sync_core();
17016- /* Could also do a CLFLUSH here to speed up CPU recovery; but
17017- that causes hangs on some VIA CPUs. */
17018+ text_poke_early(addr, opcode, len);
17019 for (i = 0; i < len; i++)
17020- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
17021- local_irq_restore(flags);
17022+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
17023 return addr;
17024 }
17025
17026diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
17027index cbf5121..812b537 100644
17028--- a/arch/x86/kernel/apic/apic.c
17029+++ b/arch/x86/kernel/apic/apic.c
17030@@ -189,7 +189,7 @@ int first_system_vector = 0xfe;
17031 /*
17032 * Debug level, exported for io_apic.c
17033 */
17034-unsigned int apic_verbosity;
17035+int apic_verbosity;
17036
17037 int pic_mode;
17038
17039@@ -1956,7 +1956,7 @@ void smp_error_interrupt(struct pt_regs *regs)
17040 apic_write(APIC_ESR, 0);
17041 v1 = apic_read(APIC_ESR);
17042 ack_APIC_irq();
17043- atomic_inc(&irq_err_count);
17044+ atomic_inc_unchecked(&irq_err_count);
17045
17046 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
17047 smp_processor_id(), v0 , v1);
17048diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
17049index 00c77cf..2dc6a2d 100644
17050--- a/arch/x86/kernel/apic/apic_flat_64.c
17051+++ b/arch/x86/kernel/apic/apic_flat_64.c
17052@@ -157,7 +157,7 @@ static int flat_probe(void)
17053 return 1;
17054 }
17055
17056-static struct apic apic_flat = {
17057+static struct apic apic_flat __read_only = {
17058 .name = "flat",
17059 .probe = flat_probe,
17060 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
17061@@ -271,7 +271,7 @@ static int physflat_probe(void)
17062 return 0;
17063 }
17064
17065-static struct apic apic_physflat = {
17066+static struct apic apic_physflat __read_only = {
17067
17068 .name = "physical flat",
17069 .probe = physflat_probe,
17070diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
17071index e145f28..2752888 100644
17072--- a/arch/x86/kernel/apic/apic_noop.c
17073+++ b/arch/x86/kernel/apic/apic_noop.c
17074@@ -119,7 +119,7 @@ static void noop_apic_write(u32 reg, u32 v)
17075 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
17076 }
17077
17078-struct apic apic_noop = {
17079+struct apic apic_noop __read_only = {
17080 .name = "noop",
17081 .probe = noop_probe,
17082 .acpi_madt_oem_check = NULL,
17083diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
17084index d50e364..543bee3 100644
17085--- a/arch/x86/kernel/apic/bigsmp_32.c
17086+++ b/arch/x86/kernel/apic/bigsmp_32.c
17087@@ -152,7 +152,7 @@ static int probe_bigsmp(void)
17088 return dmi_bigsmp;
17089 }
17090
17091-static struct apic apic_bigsmp = {
17092+static struct apic apic_bigsmp __read_only = {
17093
17094 .name = "bigsmp",
17095 .probe = probe_bigsmp,
17096diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
17097index 0874799..a7a7892 100644
17098--- a/arch/x86/kernel/apic/es7000_32.c
17099+++ b/arch/x86/kernel/apic/es7000_32.c
17100@@ -608,8 +608,7 @@ static int es7000_mps_oem_check_cluster(struct mpc_table *mpc, char *oem,
17101 return ret && es7000_apic_is_cluster();
17102 }
17103
17104-/* We've been warned by a false positive warning.Use __refdata to keep calm. */
17105-static struct apic __refdata apic_es7000_cluster = {
17106+static struct apic apic_es7000_cluster __read_only = {
17107
17108 .name = "es7000",
17109 .probe = probe_es7000,
17110@@ -675,7 +674,7 @@ static struct apic __refdata apic_es7000_cluster = {
17111 .x86_32_early_logical_apicid = es7000_early_logical_apicid,
17112 };
17113
17114-static struct apic __refdata apic_es7000 = {
17115+static struct apic apic_es7000 __read_only = {
17116
17117 .name = "es7000",
17118 .probe = probe_es7000,
17119diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
17120index b739d39..aebc14c 100644
17121--- a/arch/x86/kernel/apic/io_apic.c
17122+++ b/arch/x86/kernel/apic/io_apic.c
17123@@ -1084,7 +1084,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
17124 }
17125 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
17126
17127-void lock_vector_lock(void)
17128+void lock_vector_lock(void) __acquires(vector_lock)
17129 {
17130 /* Used to the online set of cpus does not change
17131 * during assign_irq_vector.
17132@@ -1092,7 +1092,7 @@ void lock_vector_lock(void)
17133 raw_spin_lock(&vector_lock);
17134 }
17135
17136-void unlock_vector_lock(void)
17137+void unlock_vector_lock(void) __releases(vector_lock)
17138 {
17139 raw_spin_unlock(&vector_lock);
17140 }
17141@@ -2399,7 +2399,7 @@ static void ack_apic_edge(struct irq_data *data)
17142 ack_APIC_irq();
17143 }
17144
17145-atomic_t irq_mis_count;
17146+atomic_unchecked_t irq_mis_count;
17147
17148 #ifdef CONFIG_GENERIC_PENDING_IRQ
17149 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
17150@@ -2540,7 +2540,7 @@ static void ack_apic_level(struct irq_data *data)
17151 * at the cpu.
17152 */
17153 if (!(v & (1 << (i & 0x1f)))) {
17154- atomic_inc(&irq_mis_count);
17155+ atomic_inc_unchecked(&irq_mis_count);
17156
17157 eoi_ioapic_irq(irq, cfg);
17158 }
17159@@ -2567,11 +2567,13 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
17160
17161 static void irq_remap_modify_chip_defaults(struct irq_chip *chip)
17162 {
17163- chip->irq_print_chip = ir_print_prefix;
17164- chip->irq_ack = ir_ack_apic_edge;
17165- chip->irq_eoi = ir_ack_apic_level;
17166+ pax_open_kernel();
17167+ *(void **)&chip->irq_print_chip = ir_print_prefix;
17168+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
17169+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
17170
17171- chip->irq_set_affinity = set_remapped_irq_affinity;
17172+ *(void **)&chip->irq_set_affinity = set_remapped_irq_affinity;
17173+ pax_close_kernel();
17174 }
17175 #endif /* CONFIG_IRQ_REMAP */
17176
17177diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
17178index d661ee9..791fd33 100644
17179--- a/arch/x86/kernel/apic/numaq_32.c
17180+++ b/arch/x86/kernel/apic/numaq_32.c
17181@@ -455,8 +455,7 @@ static void numaq_setup_portio_remap(void)
17182 (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
17183 }
17184
17185-/* Use __refdata to keep false positive warning calm. */
17186-static struct apic __refdata apic_numaq = {
17187+static struct apic apic_numaq __read_only = {
17188
17189 .name = "NUMAQ",
17190 .probe = probe_numaq,
17191diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
17192index eb35ef9..f184a21 100644
17193--- a/arch/x86/kernel/apic/probe_32.c
17194+++ b/arch/x86/kernel/apic/probe_32.c
17195@@ -72,7 +72,7 @@ static int probe_default(void)
17196 return 1;
17197 }
17198
17199-static struct apic apic_default = {
17200+static struct apic apic_default __read_only = {
17201
17202 .name = "default",
17203 .probe = probe_default,
17204diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
17205index 77c95c0..434f8a4 100644
17206--- a/arch/x86/kernel/apic/summit_32.c
17207+++ b/arch/x86/kernel/apic/summit_32.c
17208@@ -486,7 +486,7 @@ void setup_summit(void)
17209 }
17210 #endif
17211
17212-static struct apic apic_summit = {
17213+static struct apic apic_summit __read_only = {
17214
17215 .name = "summit",
17216 .probe = probe_summit,
17217diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
17218index c88baa4..757aee1 100644
17219--- a/arch/x86/kernel/apic/x2apic_cluster.c
17220+++ b/arch/x86/kernel/apic/x2apic_cluster.c
17221@@ -183,7 +183,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
17222 return notifier_from_errno(err);
17223 }
17224
17225-static struct notifier_block __refdata x2apic_cpu_notifier = {
17226+static struct notifier_block x2apic_cpu_notifier = {
17227 .notifier_call = update_clusterinfo,
17228 };
17229
17230@@ -235,7 +235,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
17231 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
17232 }
17233
17234-static struct apic apic_x2apic_cluster = {
17235+static struct apic apic_x2apic_cluster __read_only = {
17236
17237 .name = "cluster x2apic",
17238 .probe = x2apic_cluster_probe,
17239diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
17240index 562a76d..a003c0f 100644
17241--- a/arch/x86/kernel/apic/x2apic_phys.c
17242+++ b/arch/x86/kernel/apic/x2apic_phys.c
17243@@ -89,7 +89,7 @@ static int x2apic_phys_probe(void)
17244 return apic == &apic_x2apic_phys;
17245 }
17246
17247-static struct apic apic_x2apic_phys = {
17248+static struct apic apic_x2apic_phys __read_only = {
17249
17250 .name = "physical x2apic",
17251 .probe = x2apic_phys_probe,
17252diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
17253index 8cfade9..b9d04fc 100644
17254--- a/arch/x86/kernel/apic/x2apic_uv_x.c
17255+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
17256@@ -333,7 +333,7 @@ static int uv_probe(void)
17257 return apic == &apic_x2apic_uv_x;
17258 }
17259
17260-static struct apic __refdata apic_x2apic_uv_x = {
17261+static struct apic apic_x2apic_uv_x __read_only = {
17262
17263 .name = "UV large system",
17264 .probe = uv_probe,
17265diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
17266index d65464e..1035d31 100644
17267--- a/arch/x86/kernel/apm_32.c
17268+++ b/arch/x86/kernel/apm_32.c
17269@@ -412,7 +412,7 @@ static DEFINE_MUTEX(apm_mutex);
17270 * This is for buggy BIOS's that refer to (real mode) segment 0x40
17271 * even though they are called in protected mode.
17272 */
17273-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
17274+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
17275 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
17276
17277 static const char driver_version[] = "1.16ac"; /* no spaces */
17278@@ -590,7 +590,10 @@ static long __apm_bios_call(void *_call)
17279 BUG_ON(cpu != 0);
17280 gdt = get_cpu_gdt_table(cpu);
17281 save_desc_40 = gdt[0x40 / 8];
17282+
17283+ pax_open_kernel();
17284 gdt[0x40 / 8] = bad_bios_desc;
17285+ pax_close_kernel();
17286
17287 apm_irq_save(flags);
17288 APM_DO_SAVE_SEGS;
17289@@ -599,7 +602,11 @@ static long __apm_bios_call(void *_call)
17290 &call->esi);
17291 APM_DO_RESTORE_SEGS;
17292 apm_irq_restore(flags);
17293+
17294+ pax_open_kernel();
17295 gdt[0x40 / 8] = save_desc_40;
17296+ pax_close_kernel();
17297+
17298 put_cpu();
17299
17300 return call->eax & 0xff;
17301@@ -666,7 +673,10 @@ static long __apm_bios_call_simple(void *_call)
17302 BUG_ON(cpu != 0);
17303 gdt = get_cpu_gdt_table(cpu);
17304 save_desc_40 = gdt[0x40 / 8];
17305+
17306+ pax_open_kernel();
17307 gdt[0x40 / 8] = bad_bios_desc;
17308+ pax_close_kernel();
17309
17310 apm_irq_save(flags);
17311 APM_DO_SAVE_SEGS;
17312@@ -674,7 +684,11 @@ static long __apm_bios_call_simple(void *_call)
17313 &call->eax);
17314 APM_DO_RESTORE_SEGS;
17315 apm_irq_restore(flags);
17316+
17317+ pax_open_kernel();
17318 gdt[0x40 / 8] = save_desc_40;
17319+ pax_close_kernel();
17320+
17321 put_cpu();
17322 return error;
17323 }
17324@@ -2345,12 +2359,15 @@ static int __init apm_init(void)
17325 * code to that CPU.
17326 */
17327 gdt = get_cpu_gdt_table(0);
17328+
17329+ pax_open_kernel();
17330 set_desc_base(&gdt[APM_CS >> 3],
17331 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
17332 set_desc_base(&gdt[APM_CS_16 >> 3],
17333 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
17334 set_desc_base(&gdt[APM_DS >> 3],
17335 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
17336+ pax_close_kernel();
17337
17338 proc_create("apm", 0, NULL, &apm_file_ops);
17339
17340diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
17341index 2861082..6d4718e 100644
17342--- a/arch/x86/kernel/asm-offsets.c
17343+++ b/arch/x86/kernel/asm-offsets.c
17344@@ -33,6 +33,8 @@ void common(void) {
17345 OFFSET(TI_status, thread_info, status);
17346 OFFSET(TI_addr_limit, thread_info, addr_limit);
17347 OFFSET(TI_preempt_count, thread_info, preempt_count);
17348+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
17349+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
17350
17351 BLANK();
17352 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
17353@@ -53,8 +55,26 @@ void common(void) {
17354 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
17355 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
17356 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
17357+
17358+#ifdef CONFIG_PAX_KERNEXEC
17359+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
17360 #endif
17361
17362+#ifdef CONFIG_PAX_MEMORY_UDEREF
17363+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
17364+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
17365+#ifdef CONFIG_X86_64
17366+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
17367+#endif
17368+#endif
17369+
17370+#endif
17371+
17372+ BLANK();
17373+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
17374+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
17375+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
17376+
17377 #ifdef CONFIG_XEN
17378 BLANK();
17379 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
17380diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
17381index 1b4754f..fbb4227 100644
17382--- a/arch/x86/kernel/asm-offsets_64.c
17383+++ b/arch/x86/kernel/asm-offsets_64.c
17384@@ -76,6 +76,7 @@ int main(void)
17385 BLANK();
17386 #undef ENTRY
17387
17388+ DEFINE(TSS_size, sizeof(struct tss_struct));
17389 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
17390 BLANK();
17391
17392diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
17393index a0e067d..9c7db16 100644
17394--- a/arch/x86/kernel/cpu/Makefile
17395+++ b/arch/x86/kernel/cpu/Makefile
17396@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
17397 CFLAGS_REMOVE_perf_event.o = -pg
17398 endif
17399
17400-# Make sure load_percpu_segment has no stackprotector
17401-nostackp := $(call cc-option, -fno-stack-protector)
17402-CFLAGS_common.o := $(nostackp)
17403-
17404 obj-y := intel_cacheinfo.o scattered.o topology.o
17405 obj-y += proc.o capflags.o powerflags.o common.o
17406 obj-y += vmware.o hypervisor.o mshyperv.o
17407diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
17408index 15239ff..e23e04e 100644
17409--- a/arch/x86/kernel/cpu/amd.c
17410+++ b/arch/x86/kernel/cpu/amd.c
17411@@ -733,7 +733,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
17412 unsigned int size)
17413 {
17414 /* AMD errata T13 (order #21922) */
17415- if ((c->x86 == 6)) {
17416+ if (c->x86 == 6) {
17417 /* Duron Rev A0 */
17418 if (c->x86_model == 3 && c->x86_mask == 0)
17419 size = 64;
17420diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
17421index 9c3ab43..51e6366 100644
17422--- a/arch/x86/kernel/cpu/common.c
17423+++ b/arch/x86/kernel/cpu/common.c
17424@@ -86,60 +86,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
17425
17426 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
17427
17428-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
17429-#ifdef CONFIG_X86_64
17430- /*
17431- * We need valid kernel segments for data and code in long mode too
17432- * IRET will check the segment types kkeil 2000/10/28
17433- * Also sysret mandates a special GDT layout
17434- *
17435- * TLS descriptors are currently at a different place compared to i386.
17436- * Hopefully nobody expects them at a fixed place (Wine?)
17437- */
17438- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
17439- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
17440- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
17441- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
17442- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
17443- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
17444-#else
17445- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
17446- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
17447- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
17448- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
17449- /*
17450- * Segments used for calling PnP BIOS have byte granularity.
17451- * They code segments and data segments have fixed 64k limits,
17452- * the transfer segment sizes are set at run time.
17453- */
17454- /* 32-bit code */
17455- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
17456- /* 16-bit code */
17457- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
17458- /* 16-bit data */
17459- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
17460- /* 16-bit data */
17461- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
17462- /* 16-bit data */
17463- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
17464- /*
17465- * The APM segments have byte granularity and their bases
17466- * are set at run time. All have 64k limits.
17467- */
17468- /* 32-bit code */
17469- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
17470- /* 16-bit code */
17471- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
17472- /* data */
17473- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
17474-
17475- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
17476- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
17477- GDT_STACK_CANARY_INIT
17478-#endif
17479-} };
17480-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
17481-
17482 static int __init x86_xsave_setup(char *s)
17483 {
17484 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
17485@@ -389,7 +335,7 @@ void switch_to_new_gdt(int cpu)
17486 {
17487 struct desc_ptr gdt_descr;
17488
17489- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
17490+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
17491 gdt_descr.size = GDT_SIZE - 1;
17492 load_gdt(&gdt_descr);
17493 /* Reload the per-cpu base */
17494@@ -885,6 +831,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
17495 /* Filter out anything that depends on CPUID levels we don't have */
17496 filter_cpuid_features(c, true);
17497
17498+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
17499+ setup_clear_cpu_cap(X86_FEATURE_SEP);
17500+#endif
17501+
17502 /* If the model name is still unset, do table lookup. */
17503 if (!c->x86_model_id[0]) {
17504 const char *p;
17505@@ -1068,10 +1018,12 @@ static __init int setup_disablecpuid(char *arg)
17506 }
17507 __setup("clearcpuid=", setup_disablecpuid);
17508
17509+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
17510+EXPORT_PER_CPU_SYMBOL(current_tinfo);
17511+
17512 #ifdef CONFIG_X86_64
17513 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
17514-struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
17515- (unsigned long) nmi_idt_table };
17516+struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
17517
17518 DEFINE_PER_CPU_FIRST(union irq_stack_union,
17519 irq_stack_union) __aligned(PAGE_SIZE);
17520@@ -1085,7 +1037,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
17521 EXPORT_PER_CPU_SYMBOL(current_task);
17522
17523 DEFINE_PER_CPU(unsigned long, kernel_stack) =
17524- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
17525+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
17526 EXPORT_PER_CPU_SYMBOL(kernel_stack);
17527
17528 DEFINE_PER_CPU(char *, irq_stack_ptr) =
17529@@ -1224,7 +1176,7 @@ void __cpuinit cpu_init(void)
17530 int i;
17531
17532 cpu = stack_smp_processor_id();
17533- t = &per_cpu(init_tss, cpu);
17534+ t = init_tss + cpu;
17535 oist = &per_cpu(orig_ist, cpu);
17536
17537 #ifdef CONFIG_NUMA
17538@@ -1250,7 +1202,7 @@ void __cpuinit cpu_init(void)
17539 switch_to_new_gdt(cpu);
17540 loadsegment(fs, 0);
17541
17542- load_idt((const struct desc_ptr *)&idt_descr);
17543+ load_idt(&idt_descr);
17544
17545 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
17546 syscall_init();
17547@@ -1259,7 +1211,6 @@ void __cpuinit cpu_init(void)
17548 wrmsrl(MSR_KERNEL_GS_BASE, 0);
17549 barrier();
17550
17551- x86_configure_nx();
17552 enable_x2apic();
17553
17554 /*
17555@@ -1311,7 +1262,7 @@ void __cpuinit cpu_init(void)
17556 {
17557 int cpu = smp_processor_id();
17558 struct task_struct *curr = current;
17559- struct tss_struct *t = &per_cpu(init_tss, cpu);
17560+ struct tss_struct *t = init_tss + cpu;
17561 struct thread_struct *thread = &curr->thread;
17562
17563 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
17564diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
17565index fcaabd0..7b55a26 100644
17566--- a/arch/x86/kernel/cpu/intel.c
17567+++ b/arch/x86/kernel/cpu/intel.c
17568@@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
17569 * Update the IDT descriptor and reload the IDT so that
17570 * it uses the read-only mapped virtual address.
17571 */
17572- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
17573+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
17574 load_idt(&idt_descr);
17575 }
17576 #endif
17577diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
17578index 84c1309..39b7224 100644
17579--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
17580+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
17581@@ -1017,6 +1017,22 @@ static struct attribute *default_attrs[] = {
17582 };
17583
17584 #ifdef CONFIG_AMD_NB
17585+static struct attribute *default_attrs_amd_nb[] = {
17586+ &type.attr,
17587+ &level.attr,
17588+ &coherency_line_size.attr,
17589+ &physical_line_partition.attr,
17590+ &ways_of_associativity.attr,
17591+ &number_of_sets.attr,
17592+ &size.attr,
17593+ &shared_cpu_map.attr,
17594+ &shared_cpu_list.attr,
17595+ NULL,
17596+ NULL,
17597+ NULL,
17598+ NULL
17599+};
17600+
17601 static struct attribute ** __cpuinit amd_l3_attrs(void)
17602 {
17603 static struct attribute **attrs;
17604@@ -1027,18 +1043,7 @@ static struct attribute ** __cpuinit amd_l3_attrs(void)
17605
17606 n = ARRAY_SIZE(default_attrs);
17607
17608- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
17609- n += 2;
17610-
17611- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
17612- n += 1;
17613-
17614- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
17615- if (attrs == NULL)
17616- return attrs = default_attrs;
17617-
17618- for (n = 0; default_attrs[n]; n++)
17619- attrs[n] = default_attrs[n];
17620+ attrs = default_attrs_amd_nb;
17621
17622 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
17623 attrs[n++] = &cache_disable_0.attr;
17624@@ -1089,6 +1094,13 @@ static struct kobj_type ktype_cache = {
17625 .default_attrs = default_attrs,
17626 };
17627
17628+#ifdef CONFIG_AMD_NB
17629+static struct kobj_type ktype_cache_amd_nb = {
17630+ .sysfs_ops = &sysfs_ops,
17631+ .default_attrs = default_attrs_amd_nb,
17632+};
17633+#endif
17634+
17635 static struct kobj_type ktype_percpu_entry = {
17636 .sysfs_ops = &sysfs_ops,
17637 };
17638@@ -1154,20 +1166,26 @@ static int __cpuinit cache_add_dev(struct device *dev)
17639 return retval;
17640 }
17641
17642+#ifdef CONFIG_AMD_NB
17643+ amd_l3_attrs();
17644+#endif
17645+
17646 for (i = 0; i < num_cache_leaves; i++) {
17647+ struct kobj_type *ktype;
17648+
17649 this_object = INDEX_KOBJECT_PTR(cpu, i);
17650 this_object->cpu = cpu;
17651 this_object->index = i;
17652
17653 this_leaf = CPUID4_INFO_IDX(cpu, i);
17654
17655- ktype_cache.default_attrs = default_attrs;
17656+ ktype = &ktype_cache;
17657 #ifdef CONFIG_AMD_NB
17658 if (this_leaf->base.nb)
17659- ktype_cache.default_attrs = amd_l3_attrs();
17660+ ktype = &ktype_cache_amd_nb;
17661 #endif
17662 retval = kobject_init_and_add(&(this_object->kobj),
17663- &ktype_cache,
17664+ ktype,
17665 per_cpu(ici_cache_kobject, cpu),
17666 "index%1lu", i);
17667 if (unlikely(retval)) {
17668@@ -1222,7 +1240,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
17669 return NOTIFY_OK;
17670 }
17671
17672-static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
17673+static struct notifier_block cacheinfo_cpu_notifier = {
17674 .notifier_call = cacheinfo_cpu_callback,
17675 };
17676
17677diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
17678index 80dbda8..be16652 100644
17679--- a/arch/x86/kernel/cpu/mcheck/mce.c
17680+++ b/arch/x86/kernel/cpu/mcheck/mce.c
17681@@ -45,6 +45,7 @@
17682 #include <asm/processor.h>
17683 #include <asm/mce.h>
17684 #include <asm/msr.h>
17685+#include <asm/local.h>
17686
17687 #include "mce-internal.h"
17688
17689@@ -246,7 +247,7 @@ static void print_mce(struct mce *m)
17690 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
17691 m->cs, m->ip);
17692
17693- if (m->cs == __KERNEL_CS)
17694+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
17695 print_symbol("{%s}", m->ip);
17696 pr_cont("\n");
17697 }
17698@@ -279,10 +280,10 @@ static void print_mce(struct mce *m)
17699
17700 #define PANIC_TIMEOUT 5 /* 5 seconds */
17701
17702-static atomic_t mce_paniced;
17703+static atomic_unchecked_t mce_paniced;
17704
17705 static int fake_panic;
17706-static atomic_t mce_fake_paniced;
17707+static atomic_unchecked_t mce_fake_paniced;
17708
17709 /* Panic in progress. Enable interrupts and wait for final IPI */
17710 static void wait_for_panic(void)
17711@@ -306,7 +307,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
17712 /*
17713 * Make sure only one CPU runs in machine check panic
17714 */
17715- if (atomic_inc_return(&mce_paniced) > 1)
17716+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
17717 wait_for_panic();
17718 barrier();
17719
17720@@ -314,7 +315,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
17721 console_verbose();
17722 } else {
17723 /* Don't log too much for fake panic */
17724- if (atomic_inc_return(&mce_fake_paniced) > 1)
17725+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
17726 return;
17727 }
17728 /* First print corrected ones that are still unlogged */
17729@@ -686,7 +687,7 @@ static int mce_timed_out(u64 *t)
17730 * might have been modified by someone else.
17731 */
17732 rmb();
17733- if (atomic_read(&mce_paniced))
17734+ if (atomic_read_unchecked(&mce_paniced))
17735 wait_for_panic();
17736 if (!mca_cfg.monarch_timeout)
17737 goto out;
17738@@ -1662,7 +1663,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
17739 }
17740
17741 /* Call the installed machine check handler for this CPU setup. */
17742-void (*machine_check_vector)(struct pt_regs *, long error_code) =
17743+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
17744 unexpected_machine_check;
17745
17746 /*
17747@@ -1685,7 +1686,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
17748 return;
17749 }
17750
17751+ pax_open_kernel();
17752 machine_check_vector = do_machine_check;
17753+ pax_close_kernel();
17754
17755 __mcheck_cpu_init_generic();
17756 __mcheck_cpu_init_vendor(c);
17757@@ -1699,7 +1702,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
17758 */
17759
17760 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
17761-static int mce_chrdev_open_count; /* #times opened */
17762+static local_t mce_chrdev_open_count; /* #times opened */
17763 static int mce_chrdev_open_exclu; /* already open exclusive? */
17764
17765 static int mce_chrdev_open(struct inode *inode, struct file *file)
17766@@ -1707,7 +1710,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
17767 spin_lock(&mce_chrdev_state_lock);
17768
17769 if (mce_chrdev_open_exclu ||
17770- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
17771+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
17772 spin_unlock(&mce_chrdev_state_lock);
17773
17774 return -EBUSY;
17775@@ -1715,7 +1718,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
17776
17777 if (file->f_flags & O_EXCL)
17778 mce_chrdev_open_exclu = 1;
17779- mce_chrdev_open_count++;
17780+ local_inc(&mce_chrdev_open_count);
17781
17782 spin_unlock(&mce_chrdev_state_lock);
17783
17784@@ -1726,7 +1729,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
17785 {
17786 spin_lock(&mce_chrdev_state_lock);
17787
17788- mce_chrdev_open_count--;
17789+ local_dec(&mce_chrdev_open_count);
17790 mce_chrdev_open_exclu = 0;
17791
17792 spin_unlock(&mce_chrdev_state_lock);
17793@@ -2372,7 +2375,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
17794 return NOTIFY_OK;
17795 }
17796
17797-static struct notifier_block mce_cpu_notifier __cpuinitdata = {
17798+static struct notifier_block mce_cpu_notifier = {
17799 .notifier_call = mce_cpu_callback,
17800 };
17801
17802@@ -2382,7 +2385,7 @@ static __init void mce_init_banks(void)
17803
17804 for (i = 0; i < mca_cfg.banks; i++) {
17805 struct mce_bank *b = &mce_banks[i];
17806- struct device_attribute *a = &b->attr;
17807+ device_attribute_no_const *a = &b->attr;
17808
17809 sysfs_attr_init(&a->attr);
17810 a->attr.name = b->attrname;
17811@@ -2450,7 +2453,7 @@ struct dentry *mce_get_debugfs_dir(void)
17812 static void mce_reset(void)
17813 {
17814 cpu_missing = 0;
17815- atomic_set(&mce_fake_paniced, 0);
17816+ atomic_set_unchecked(&mce_fake_paniced, 0);
17817 atomic_set(&mce_executing, 0);
17818 atomic_set(&mce_callin, 0);
17819 atomic_set(&global_nwo, 0);
17820diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
17821index 2d5454c..51987eb 100644
17822--- a/arch/x86/kernel/cpu/mcheck/p5.c
17823+++ b/arch/x86/kernel/cpu/mcheck/p5.c
17824@@ -11,6 +11,7 @@
17825 #include <asm/processor.h>
17826 #include <asm/mce.h>
17827 #include <asm/msr.h>
17828+#include <asm/pgtable.h>
17829
17830 /* By default disabled */
17831 int mce_p5_enabled __read_mostly;
17832@@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
17833 if (!cpu_has(c, X86_FEATURE_MCE))
17834 return;
17835
17836+ pax_open_kernel();
17837 machine_check_vector = pentium_machine_check;
17838+ pax_close_kernel();
17839 /* Make sure the vector pointer is visible before we enable MCEs: */
17840 wmb();
17841
17842diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
17843index 47a1870..8c019a7 100644
17844--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
17845+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
17846@@ -288,7 +288,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
17847 return notifier_from_errno(err);
17848 }
17849
17850-static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata =
17851+static struct notifier_block thermal_throttle_cpu_notifier =
17852 {
17853 .notifier_call = thermal_throttle_cpu_callback,
17854 };
17855diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
17856index 2d7998f..17c9de1 100644
17857--- a/arch/x86/kernel/cpu/mcheck/winchip.c
17858+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
17859@@ -10,6 +10,7 @@
17860 #include <asm/processor.h>
17861 #include <asm/mce.h>
17862 #include <asm/msr.h>
17863+#include <asm/pgtable.h>
17864
17865 /* Machine check handler for WinChip C6: */
17866 static void winchip_machine_check(struct pt_regs *regs, long error_code)
17867@@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
17868 {
17869 u32 lo, hi;
17870
17871+ pax_open_kernel();
17872 machine_check_vector = winchip_machine_check;
17873+ pax_close_kernel();
17874 /* Make sure the vector pointer is visible before we enable MCEs: */
17875 wmb();
17876
17877diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
17878index 726bf96..81f0526 100644
17879--- a/arch/x86/kernel/cpu/mtrr/main.c
17880+++ b/arch/x86/kernel/cpu/mtrr/main.c
17881@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
17882 u64 size_or_mask, size_and_mask;
17883 static bool mtrr_aps_delayed_init;
17884
17885-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
17886+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
17887
17888 const struct mtrr_ops *mtrr_if;
17889
17890diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
17891index df5e41f..816c719 100644
17892--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
17893+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
17894@@ -25,7 +25,7 @@ struct mtrr_ops {
17895 int (*validate_add_page)(unsigned long base, unsigned long size,
17896 unsigned int type);
17897 int (*have_wrcomb)(void);
17898-};
17899+} __do_const;
17900
17901 extern int generic_get_free_region(unsigned long base, unsigned long size,
17902 int replace_reg);
17903diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
17904index 6774c17..72c1b22 100644
17905--- a/arch/x86/kernel/cpu/perf_event.c
17906+++ b/arch/x86/kernel/cpu/perf_event.c
17907@@ -1305,7 +1305,7 @@ static void __init pmu_check_apic(void)
17908 pr_info("no hardware sampling interrupt available.\n");
17909 }
17910
17911-static struct attribute_group x86_pmu_format_group = {
17912+static attribute_group_no_const x86_pmu_format_group = {
17913 .name = "format",
17914 .attrs = NULL,
17915 };
17916@@ -1313,7 +1313,7 @@ static struct attribute_group x86_pmu_format_group = {
17917 struct perf_pmu_events_attr {
17918 struct device_attribute attr;
17919 u64 id;
17920-};
17921+} __do_const;
17922
17923 /*
17924 * Remove all undefined events (x86_pmu.event_map(id) == 0)
17925@@ -1381,7 +1381,7 @@ static struct attribute *events_attr[] = {
17926 NULL,
17927 };
17928
17929-static struct attribute_group x86_pmu_events_group = {
17930+static attribute_group_no_const x86_pmu_events_group = {
17931 .name = "events",
17932 .attrs = events_attr,
17933 };
17934@@ -1880,7 +1880,7 @@ static unsigned long get_segment_base(unsigned int segment)
17935 if (idx > GDT_ENTRIES)
17936 return 0;
17937
17938- desc = __this_cpu_ptr(&gdt_page.gdt[0]);
17939+ desc = get_cpu_gdt_table(smp_processor_id());
17940 }
17941
17942 return get_desc_base(desc + idx);
17943@@ -1970,7 +1970,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
17944 break;
17945
17946 perf_callchain_store(entry, frame.return_address);
17947- fp = frame.next_frame;
17948+ fp = (const void __force_user *)frame.next_frame;
17949 }
17950 }
17951
17952diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
17953index 4914e94..60b06e3 100644
17954--- a/arch/x86/kernel/cpu/perf_event_intel.c
17955+++ b/arch/x86/kernel/cpu/perf_event_intel.c
17956@@ -1958,10 +1958,10 @@ __init int intel_pmu_init(void)
17957 * v2 and above have a perf capabilities MSR
17958 */
17959 if (version > 1) {
17960- u64 capabilities;
17961+ u64 capabilities = x86_pmu.intel_cap.capabilities;
17962
17963- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
17964- x86_pmu.intel_cap.capabilities = capabilities;
17965+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
17966+ x86_pmu.intel_cap.capabilities = capabilities;
17967 }
17968
17969 intel_ds_init();
17970diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
17971index b43200d..d235b3e 100644
17972--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
17973+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
17974@@ -2428,7 +2428,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
17975 static int __init uncore_type_init(struct intel_uncore_type *type)
17976 {
17977 struct intel_uncore_pmu *pmus;
17978- struct attribute_group *events_group;
17979+ attribute_group_no_const *attr_group;
17980 struct attribute **attrs;
17981 int i, j;
17982
17983@@ -2455,19 +2455,19 @@ static int __init uncore_type_init(struct intel_uncore_type *type)
17984 while (type->event_descs[i].attr.attr.name)
17985 i++;
17986
17987- events_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
17988- sizeof(*events_group), GFP_KERNEL);
17989- if (!events_group)
17990+ attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
17991+ sizeof(*attr_group), GFP_KERNEL);
17992+ if (!attr_group)
17993 goto fail;
17994
17995- attrs = (struct attribute **)(events_group + 1);
17996- events_group->name = "events";
17997- events_group->attrs = attrs;
17998+ attrs = (struct attribute **)(attr_group + 1);
17999+ attr_group->name = "events";
18000+ attr_group->attrs = attrs;
18001
18002 for (j = 0; j < i; j++)
18003 attrs[j] = &type->event_descs[j].attr.attr;
18004
18005- type->events_group = events_group;
18006+ type->events_group = attr_group;
18007 }
18008
18009 type->pmu_group = &uncore_pmu_attr_group;
18010@@ -2826,7 +2826,7 @@ static int
18011 return NOTIFY_OK;
18012 }
18013
18014-static struct notifier_block uncore_cpu_nb __cpuinitdata = {
18015+static struct notifier_block uncore_cpu_nb = {
18016 .notifier_call = uncore_cpu_notifier,
18017 /*
18018 * to migrate uncore events, our notifier should be executed
18019diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
18020index e68a455..975a932 100644
18021--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
18022+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
18023@@ -428,7 +428,7 @@ struct intel_uncore_box {
18024 struct uncore_event_desc {
18025 struct kobj_attribute attr;
18026 const char *config;
18027-};
18028+} __do_const;
18029
18030 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \
18031 { \
18032diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
18033index 60c7891..9e911d3 100644
18034--- a/arch/x86/kernel/cpuid.c
18035+++ b/arch/x86/kernel/cpuid.c
18036@@ -171,7 +171,7 @@ static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb,
18037 return notifier_from_errno(err);
18038 }
18039
18040-static struct notifier_block __refdata cpuid_class_cpu_notifier =
18041+static struct notifier_block cpuid_class_cpu_notifier =
18042 {
18043 .notifier_call = cpuid_class_cpu_callback,
18044 };
18045diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
18046index 74467fe..18793d5 100644
18047--- a/arch/x86/kernel/crash.c
18048+++ b/arch/x86/kernel/crash.c
18049@@ -58,10 +58,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
18050 {
18051 #ifdef CONFIG_X86_32
18052 struct pt_regs fixed_regs;
18053-#endif
18054
18055-#ifdef CONFIG_X86_32
18056- if (!user_mode_vm(regs)) {
18057+ if (!user_mode(regs)) {
18058 crash_fixup_ss_esp(&fixed_regs, regs);
18059 regs = &fixed_regs;
18060 }
18061diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
18062index 37250fe..bf2ec74 100644
18063--- a/arch/x86/kernel/doublefault_32.c
18064+++ b/arch/x86/kernel/doublefault_32.c
18065@@ -11,7 +11,7 @@
18066
18067 #define DOUBLEFAULT_STACKSIZE (1024)
18068 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
18069-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
18070+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
18071
18072 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
18073
18074@@ -21,7 +21,7 @@ static void doublefault_fn(void)
18075 unsigned long gdt, tss;
18076
18077 store_gdt(&gdt_desc);
18078- gdt = gdt_desc.address;
18079+ gdt = (unsigned long)gdt_desc.address;
18080
18081 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
18082
18083@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
18084 /* 0x2 bit is always set */
18085 .flags = X86_EFLAGS_SF | 0x2,
18086 .sp = STACK_START,
18087- .es = __USER_DS,
18088+ .es = __KERNEL_DS,
18089 .cs = __KERNEL_CS,
18090 .ss = __KERNEL_DS,
18091- .ds = __USER_DS,
18092+ .ds = __KERNEL_DS,
18093 .fs = __KERNEL_PERCPU,
18094
18095 .__cr3 = __pa_nodebug(swapper_pg_dir),
18096diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
18097index ae42418b..787c16b 100644
18098--- a/arch/x86/kernel/dumpstack.c
18099+++ b/arch/x86/kernel/dumpstack.c
18100@@ -2,6 +2,9 @@
18101 * Copyright (C) 1991, 1992 Linus Torvalds
18102 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
18103 */
18104+#ifdef CONFIG_GRKERNSEC_HIDESYM
18105+#define __INCLUDED_BY_HIDESYM 1
18106+#endif
18107 #include <linux/kallsyms.h>
18108 #include <linux/kprobes.h>
18109 #include <linux/uaccess.h>
18110@@ -35,16 +38,14 @@ void printk_address(unsigned long address, int reliable)
18111 static void
18112 print_ftrace_graph_addr(unsigned long addr, void *data,
18113 const struct stacktrace_ops *ops,
18114- struct thread_info *tinfo, int *graph)
18115+ struct task_struct *task, int *graph)
18116 {
18117- struct task_struct *task;
18118 unsigned long ret_addr;
18119 int index;
18120
18121 if (addr != (unsigned long)return_to_handler)
18122 return;
18123
18124- task = tinfo->task;
18125 index = task->curr_ret_stack;
18126
18127 if (!task->ret_stack || index < *graph)
18128@@ -61,7 +62,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
18129 static inline void
18130 print_ftrace_graph_addr(unsigned long addr, void *data,
18131 const struct stacktrace_ops *ops,
18132- struct thread_info *tinfo, int *graph)
18133+ struct task_struct *task, int *graph)
18134 { }
18135 #endif
18136
18137@@ -72,10 +73,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
18138 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
18139 */
18140
18141-static inline int valid_stack_ptr(struct thread_info *tinfo,
18142- void *p, unsigned int size, void *end)
18143+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
18144 {
18145- void *t = tinfo;
18146 if (end) {
18147 if (p < end && p >= (end-THREAD_SIZE))
18148 return 1;
18149@@ -86,14 +85,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
18150 }
18151
18152 unsigned long
18153-print_context_stack(struct thread_info *tinfo,
18154+print_context_stack(struct task_struct *task, void *stack_start,
18155 unsigned long *stack, unsigned long bp,
18156 const struct stacktrace_ops *ops, void *data,
18157 unsigned long *end, int *graph)
18158 {
18159 struct stack_frame *frame = (struct stack_frame *)bp;
18160
18161- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
18162+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
18163 unsigned long addr;
18164
18165 addr = *stack;
18166@@ -105,7 +104,7 @@ print_context_stack(struct thread_info *tinfo,
18167 } else {
18168 ops->address(data, addr, 0);
18169 }
18170- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
18171+ print_ftrace_graph_addr(addr, data, ops, task, graph);
18172 }
18173 stack++;
18174 }
18175@@ -114,7 +113,7 @@ print_context_stack(struct thread_info *tinfo,
18176 EXPORT_SYMBOL_GPL(print_context_stack);
18177
18178 unsigned long
18179-print_context_stack_bp(struct thread_info *tinfo,
18180+print_context_stack_bp(struct task_struct *task, void *stack_start,
18181 unsigned long *stack, unsigned long bp,
18182 const struct stacktrace_ops *ops, void *data,
18183 unsigned long *end, int *graph)
18184@@ -122,7 +121,7 @@ print_context_stack_bp(struct thread_info *tinfo,
18185 struct stack_frame *frame = (struct stack_frame *)bp;
18186 unsigned long *ret_addr = &frame->return_address;
18187
18188- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
18189+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
18190 unsigned long addr = *ret_addr;
18191
18192 if (!__kernel_text_address(addr))
18193@@ -131,7 +130,7 @@ print_context_stack_bp(struct thread_info *tinfo,
18194 ops->address(data, addr, 1);
18195 frame = frame->next_frame;
18196 ret_addr = &frame->return_address;
18197- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
18198+ print_ftrace_graph_addr(addr, data, ops, task, graph);
18199 }
18200
18201 return (unsigned long)frame;
18202@@ -189,7 +188,7 @@ void dump_stack(void)
18203
18204 bp = stack_frame(current, NULL);
18205 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
18206- current->pid, current->comm, print_tainted(),
18207+ task_pid_nr(current), current->comm, print_tainted(),
18208 init_utsname()->release,
18209 (int)strcspn(init_utsname()->version, " "),
18210 init_utsname()->version);
18211@@ -225,6 +224,8 @@ unsigned __kprobes long oops_begin(void)
18212 }
18213 EXPORT_SYMBOL_GPL(oops_begin);
18214
18215+extern void gr_handle_kernel_exploit(void);
18216+
18217 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
18218 {
18219 if (regs && kexec_should_crash(current))
18220@@ -246,7 +247,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
18221 panic("Fatal exception in interrupt");
18222 if (panic_on_oops)
18223 panic("Fatal exception");
18224- do_exit(signr);
18225+
18226+ gr_handle_kernel_exploit();
18227+
18228+ do_group_exit(signr);
18229 }
18230
18231 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
18232@@ -274,7 +278,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
18233 print_modules();
18234 show_regs(regs);
18235 #ifdef CONFIG_X86_32
18236- if (user_mode_vm(regs)) {
18237+ if (user_mode(regs)) {
18238 sp = regs->sp;
18239 ss = regs->ss & 0xffff;
18240 } else {
18241@@ -302,7 +306,7 @@ void die(const char *str, struct pt_regs *regs, long err)
18242 unsigned long flags = oops_begin();
18243 int sig = SIGSEGV;
18244
18245- if (!user_mode_vm(regs))
18246+ if (!user_mode(regs))
18247 report_bug(regs->ip, regs);
18248
18249 if (__die(str, regs, err))
18250diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
18251index 1038a41..db2c12b 100644
18252--- a/arch/x86/kernel/dumpstack_32.c
18253+++ b/arch/x86/kernel/dumpstack_32.c
18254@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18255 bp = stack_frame(task, regs);
18256
18257 for (;;) {
18258- struct thread_info *context;
18259+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
18260
18261- context = (struct thread_info *)
18262- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
18263- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
18264+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
18265
18266- stack = (unsigned long *)context->previous_esp;
18267- if (!stack)
18268+ if (stack_start == task_stack_page(task))
18269 break;
18270+ stack = *(unsigned long **)stack_start;
18271 if (ops->stack(data, "IRQ") < 0)
18272 break;
18273 touch_nmi_watchdog();
18274@@ -86,7 +84,7 @@ void show_regs(struct pt_regs *regs)
18275 {
18276 int i;
18277
18278- __show_regs(regs, !user_mode_vm(regs));
18279+ __show_regs(regs, !user_mode(regs));
18280
18281 pr_emerg("Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n",
18282 TASK_COMM_LEN, current->comm, task_pid_nr(current),
18283@@ -95,21 +93,22 @@ void show_regs(struct pt_regs *regs)
18284 * When in-kernel, we also print out the stack and code at the
18285 * time of the fault..
18286 */
18287- if (!user_mode_vm(regs)) {
18288+ if (!user_mode(regs)) {
18289 unsigned int code_prologue = code_bytes * 43 / 64;
18290 unsigned int code_len = code_bytes;
18291 unsigned char c;
18292 u8 *ip;
18293+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
18294
18295 pr_emerg("Stack:\n");
18296 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
18297
18298 pr_emerg("Code:");
18299
18300- ip = (u8 *)regs->ip - code_prologue;
18301+ ip = (u8 *)regs->ip - code_prologue + cs_base;
18302 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
18303 /* try starting at IP */
18304- ip = (u8 *)regs->ip;
18305+ ip = (u8 *)regs->ip + cs_base;
18306 code_len = code_len - code_prologue + 1;
18307 }
18308 for (i = 0; i < code_len; i++, ip++) {
18309@@ -118,7 +117,7 @@ void show_regs(struct pt_regs *regs)
18310 pr_cont(" Bad EIP value.");
18311 break;
18312 }
18313- if (ip == (u8 *)regs->ip)
18314+ if (ip == (u8 *)regs->ip + cs_base)
18315 pr_cont(" <%02x>", c);
18316 else
18317 pr_cont(" %02x", c);
18318@@ -131,6 +130,7 @@ int is_valid_bugaddr(unsigned long ip)
18319 {
18320 unsigned short ud2;
18321
18322+ ip = ktla_ktva(ip);
18323 if (ip < PAGE_OFFSET)
18324 return 0;
18325 if (probe_kernel_address((unsigned short *)ip, ud2))
18326@@ -138,3 +138,15 @@ int is_valid_bugaddr(unsigned long ip)
18327
18328 return ud2 == 0x0b0f;
18329 }
18330+
18331+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
18332+void pax_check_alloca(unsigned long size)
18333+{
18334+ unsigned long sp = (unsigned long)&sp, stack_left;
18335+
18336+ /* all kernel stacks are of the same size */
18337+ stack_left = sp & (THREAD_SIZE - 1);
18338+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
18339+}
18340+EXPORT_SYMBOL(pax_check_alloca);
18341+#endif
18342diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
18343index b653675..51cc8c0 100644
18344--- a/arch/x86/kernel/dumpstack_64.c
18345+++ b/arch/x86/kernel/dumpstack_64.c
18346@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18347 unsigned long *irq_stack_end =
18348 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
18349 unsigned used = 0;
18350- struct thread_info *tinfo;
18351 int graph = 0;
18352 unsigned long dummy;
18353+ void *stack_start;
18354
18355 if (!task)
18356 task = current;
18357@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18358 * current stack address. If the stacks consist of nested
18359 * exceptions
18360 */
18361- tinfo = task_thread_info(task);
18362 for (;;) {
18363 char *id;
18364 unsigned long *estack_end;
18365+
18366 estack_end = in_exception_stack(cpu, (unsigned long)stack,
18367 &used, &id);
18368
18369@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18370 if (ops->stack(data, id) < 0)
18371 break;
18372
18373- bp = ops->walk_stack(tinfo, stack, bp, ops,
18374+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
18375 data, estack_end, &graph);
18376 ops->stack(data, "<EOE>");
18377 /*
18378@@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18379 * second-to-last pointer (index -2 to end) in the
18380 * exception stack:
18381 */
18382+ if ((u16)estack_end[-1] != __KERNEL_DS)
18383+ goto out;
18384 stack = (unsigned long *) estack_end[-2];
18385 continue;
18386 }
18387@@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18388 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
18389 if (ops->stack(data, "IRQ") < 0)
18390 break;
18391- bp = ops->walk_stack(tinfo, stack, bp,
18392+ bp = ops->walk_stack(task, irq_stack, stack, bp,
18393 ops, data, irq_stack_end, &graph);
18394 /*
18395 * We link to the next stack (which would be
18396@@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18397 /*
18398 * This handles the process stack:
18399 */
18400- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
18401+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
18402+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
18403+out:
18404 put_cpu();
18405 }
18406 EXPORT_SYMBOL(dump_trace);
18407@@ -249,7 +253,7 @@ void show_regs(struct pt_regs *regs)
18408 {
18409 int i;
18410 unsigned long sp;
18411- const int cpu = smp_processor_id();
18412+ const int cpu = raw_smp_processor_id();
18413 struct task_struct *cur = current;
18414
18415 sp = regs->sp;
18416@@ -304,3 +308,50 @@ int is_valid_bugaddr(unsigned long ip)
18417
18418 return ud2 == 0x0b0f;
18419 }
18420+
18421+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
18422+void pax_check_alloca(unsigned long size)
18423+{
18424+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
18425+ unsigned cpu, used;
18426+ char *id;
18427+
18428+ /* check the process stack first */
18429+ stack_start = (unsigned long)task_stack_page(current);
18430+ stack_end = stack_start + THREAD_SIZE;
18431+ if (likely(stack_start <= sp && sp < stack_end)) {
18432+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
18433+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
18434+ return;
18435+ }
18436+
18437+ cpu = get_cpu();
18438+
18439+ /* check the irq stacks */
18440+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
18441+ stack_start = stack_end - IRQ_STACK_SIZE;
18442+ if (stack_start <= sp && sp < stack_end) {
18443+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
18444+ put_cpu();
18445+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
18446+ return;
18447+ }
18448+
18449+ /* check the exception stacks */
18450+ used = 0;
18451+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
18452+ stack_start = stack_end - EXCEPTION_STKSZ;
18453+ if (stack_end && stack_start <= sp && sp < stack_end) {
18454+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
18455+ put_cpu();
18456+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
18457+ return;
18458+ }
18459+
18460+ put_cpu();
18461+
18462+ /* unknown stack */
18463+ BUG();
18464+}
18465+EXPORT_SYMBOL(pax_check_alloca);
18466+#endif
18467diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
18468index 9b9f18b..9fcaa04 100644
18469--- a/arch/x86/kernel/early_printk.c
18470+++ b/arch/x86/kernel/early_printk.c
18471@@ -7,6 +7,7 @@
18472 #include <linux/pci_regs.h>
18473 #include <linux/pci_ids.h>
18474 #include <linux/errno.h>
18475+#include <linux/sched.h>
18476 #include <asm/io.h>
18477 #include <asm/processor.h>
18478 #include <asm/fcntl.h>
18479diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
18480index 6ed91d9..6cc365b 100644
18481--- a/arch/x86/kernel/entry_32.S
18482+++ b/arch/x86/kernel/entry_32.S
18483@@ -177,13 +177,153 @@
18484 /*CFI_REL_OFFSET gs, PT_GS*/
18485 .endm
18486 .macro SET_KERNEL_GS reg
18487+
18488+#ifdef CONFIG_CC_STACKPROTECTOR
18489 movl $(__KERNEL_STACK_CANARY), \reg
18490+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
18491+ movl $(__USER_DS), \reg
18492+#else
18493+ xorl \reg, \reg
18494+#endif
18495+
18496 movl \reg, %gs
18497 .endm
18498
18499 #endif /* CONFIG_X86_32_LAZY_GS */
18500
18501-.macro SAVE_ALL
18502+.macro pax_enter_kernel
18503+#ifdef CONFIG_PAX_KERNEXEC
18504+ call pax_enter_kernel
18505+#endif
18506+.endm
18507+
18508+.macro pax_exit_kernel
18509+#ifdef CONFIG_PAX_KERNEXEC
18510+ call pax_exit_kernel
18511+#endif
18512+.endm
18513+
18514+#ifdef CONFIG_PAX_KERNEXEC
18515+ENTRY(pax_enter_kernel)
18516+#ifdef CONFIG_PARAVIRT
18517+ pushl %eax
18518+ pushl %ecx
18519+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
18520+ mov %eax, %esi
18521+#else
18522+ mov %cr0, %esi
18523+#endif
18524+ bts $16, %esi
18525+ jnc 1f
18526+ mov %cs, %esi
18527+ cmp $__KERNEL_CS, %esi
18528+ jz 3f
18529+ ljmp $__KERNEL_CS, $3f
18530+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
18531+2:
18532+#ifdef CONFIG_PARAVIRT
18533+ mov %esi, %eax
18534+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
18535+#else
18536+ mov %esi, %cr0
18537+#endif
18538+3:
18539+#ifdef CONFIG_PARAVIRT
18540+ popl %ecx
18541+ popl %eax
18542+#endif
18543+ ret
18544+ENDPROC(pax_enter_kernel)
18545+
18546+ENTRY(pax_exit_kernel)
18547+#ifdef CONFIG_PARAVIRT
18548+ pushl %eax
18549+ pushl %ecx
18550+#endif
18551+ mov %cs, %esi
18552+ cmp $__KERNEXEC_KERNEL_CS, %esi
18553+ jnz 2f
18554+#ifdef CONFIG_PARAVIRT
18555+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
18556+ mov %eax, %esi
18557+#else
18558+ mov %cr0, %esi
18559+#endif
18560+ btr $16, %esi
18561+ ljmp $__KERNEL_CS, $1f
18562+1:
18563+#ifdef CONFIG_PARAVIRT
18564+ mov %esi, %eax
18565+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
18566+#else
18567+ mov %esi, %cr0
18568+#endif
18569+2:
18570+#ifdef CONFIG_PARAVIRT
18571+ popl %ecx
18572+ popl %eax
18573+#endif
18574+ ret
18575+ENDPROC(pax_exit_kernel)
18576+#endif
18577+
18578+.macro pax_erase_kstack
18579+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
18580+ call pax_erase_kstack
18581+#endif
18582+.endm
18583+
18584+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
18585+/*
18586+ * ebp: thread_info
18587+ */
18588+ENTRY(pax_erase_kstack)
18589+ pushl %edi
18590+ pushl %ecx
18591+ pushl %eax
18592+
18593+ mov TI_lowest_stack(%ebp), %edi
18594+ mov $-0xBEEF, %eax
18595+ std
18596+
18597+1: mov %edi, %ecx
18598+ and $THREAD_SIZE_asm - 1, %ecx
18599+ shr $2, %ecx
18600+ repne scasl
18601+ jecxz 2f
18602+
18603+ cmp $2*16, %ecx
18604+ jc 2f
18605+
18606+ mov $2*16, %ecx
18607+ repe scasl
18608+ jecxz 2f
18609+ jne 1b
18610+
18611+2: cld
18612+ mov %esp, %ecx
18613+ sub %edi, %ecx
18614+
18615+ cmp $THREAD_SIZE_asm, %ecx
18616+ jb 3f
18617+ ud2
18618+3:
18619+
18620+ shr $2, %ecx
18621+ rep stosl
18622+
18623+ mov TI_task_thread_sp0(%ebp), %edi
18624+ sub $128, %edi
18625+ mov %edi, TI_lowest_stack(%ebp)
18626+
18627+ popl %eax
18628+ popl %ecx
18629+ popl %edi
18630+ ret
18631+ENDPROC(pax_erase_kstack)
18632+#endif
18633+
18634+.macro __SAVE_ALL _DS
18635 cld
18636 PUSH_GS
18637 pushl_cfi %fs
18638@@ -206,7 +346,7 @@
18639 CFI_REL_OFFSET ecx, 0
18640 pushl_cfi %ebx
18641 CFI_REL_OFFSET ebx, 0
18642- movl $(__USER_DS), %edx
18643+ movl $\_DS, %edx
18644 movl %edx, %ds
18645 movl %edx, %es
18646 movl $(__KERNEL_PERCPU), %edx
18647@@ -214,6 +354,15 @@
18648 SET_KERNEL_GS %edx
18649 .endm
18650
18651+.macro SAVE_ALL
18652+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
18653+ __SAVE_ALL __KERNEL_DS
18654+ pax_enter_kernel
18655+#else
18656+ __SAVE_ALL __USER_DS
18657+#endif
18658+.endm
18659+
18660 .macro RESTORE_INT_REGS
18661 popl_cfi %ebx
18662 CFI_RESTORE ebx
18663@@ -297,7 +446,7 @@ ENTRY(ret_from_fork)
18664 popfl_cfi
18665 jmp syscall_exit
18666 CFI_ENDPROC
18667-END(ret_from_fork)
18668+ENDPROC(ret_from_fork)
18669
18670 ENTRY(ret_from_kernel_thread)
18671 CFI_STARTPROC
18672@@ -344,7 +493,15 @@ ret_from_intr:
18673 andl $SEGMENT_RPL_MASK, %eax
18674 #endif
18675 cmpl $USER_RPL, %eax
18676+
18677+#ifdef CONFIG_PAX_KERNEXEC
18678+ jae resume_userspace
18679+
18680+ pax_exit_kernel
18681+ jmp resume_kernel
18682+#else
18683 jb resume_kernel # not returning to v8086 or userspace
18684+#endif
18685
18686 ENTRY(resume_userspace)
18687 LOCKDEP_SYS_EXIT
18688@@ -356,8 +513,8 @@ ENTRY(resume_userspace)
18689 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
18690 # int/exception return?
18691 jne work_pending
18692- jmp restore_all
18693-END(ret_from_exception)
18694+ jmp restore_all_pax
18695+ENDPROC(ret_from_exception)
18696
18697 #ifdef CONFIG_PREEMPT
18698 ENTRY(resume_kernel)
18699@@ -372,7 +529,7 @@ need_resched:
18700 jz restore_all
18701 call preempt_schedule_irq
18702 jmp need_resched
18703-END(resume_kernel)
18704+ENDPROC(resume_kernel)
18705 #endif
18706 CFI_ENDPROC
18707 /*
18708@@ -406,30 +563,45 @@ sysenter_past_esp:
18709 /*CFI_REL_OFFSET cs, 0*/
18710 /*
18711 * Push current_thread_info()->sysenter_return to the stack.
18712- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
18713- * pushed above; +8 corresponds to copy_thread's esp0 setting.
18714 */
18715- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
18716+ pushl_cfi $0
18717 CFI_REL_OFFSET eip, 0
18718
18719 pushl_cfi %eax
18720 SAVE_ALL
18721+ GET_THREAD_INFO(%ebp)
18722+ movl TI_sysenter_return(%ebp),%ebp
18723+ movl %ebp,PT_EIP(%esp)
18724 ENABLE_INTERRUPTS(CLBR_NONE)
18725
18726 /*
18727 * Load the potential sixth argument from user stack.
18728 * Careful about security.
18729 */
18730+ movl PT_OLDESP(%esp),%ebp
18731+
18732+#ifdef CONFIG_PAX_MEMORY_UDEREF
18733+ mov PT_OLDSS(%esp),%ds
18734+1: movl %ds:(%ebp),%ebp
18735+ push %ss
18736+ pop %ds
18737+#else
18738 cmpl $__PAGE_OFFSET-3,%ebp
18739 jae syscall_fault
18740 ASM_STAC
18741 1: movl (%ebp),%ebp
18742 ASM_CLAC
18743+#endif
18744+
18745 movl %ebp,PT_EBP(%esp)
18746 _ASM_EXTABLE(1b,syscall_fault)
18747
18748 GET_THREAD_INFO(%ebp)
18749
18750+#ifdef CONFIG_PAX_RANDKSTACK
18751+ pax_erase_kstack
18752+#endif
18753+
18754 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
18755 jnz sysenter_audit
18756 sysenter_do_call:
18757@@ -444,12 +616,24 @@ sysenter_do_call:
18758 testl $_TIF_ALLWORK_MASK, %ecx
18759 jne sysexit_audit
18760 sysenter_exit:
18761+
18762+#ifdef CONFIG_PAX_RANDKSTACK
18763+ pushl_cfi %eax
18764+ movl %esp, %eax
18765+ call pax_randomize_kstack
18766+ popl_cfi %eax
18767+#endif
18768+
18769+ pax_erase_kstack
18770+
18771 /* if something modifies registers it must also disable sysexit */
18772 movl PT_EIP(%esp), %edx
18773 movl PT_OLDESP(%esp), %ecx
18774 xorl %ebp,%ebp
18775 TRACE_IRQS_ON
18776 1: mov PT_FS(%esp), %fs
18777+2: mov PT_DS(%esp), %ds
18778+3: mov PT_ES(%esp), %es
18779 PTGS_TO_GS
18780 ENABLE_INTERRUPTS_SYSEXIT
18781
18782@@ -466,6 +650,9 @@ sysenter_audit:
18783 movl %eax,%edx /* 2nd arg: syscall number */
18784 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
18785 call __audit_syscall_entry
18786+
18787+ pax_erase_kstack
18788+
18789 pushl_cfi %ebx
18790 movl PT_EAX(%esp),%eax /* reload syscall number */
18791 jmp sysenter_do_call
18792@@ -491,10 +678,16 @@ sysexit_audit:
18793
18794 CFI_ENDPROC
18795 .pushsection .fixup,"ax"
18796-2: movl $0,PT_FS(%esp)
18797+4: movl $0,PT_FS(%esp)
18798+ jmp 1b
18799+5: movl $0,PT_DS(%esp)
18800+ jmp 1b
18801+6: movl $0,PT_ES(%esp)
18802 jmp 1b
18803 .popsection
18804- _ASM_EXTABLE(1b,2b)
18805+ _ASM_EXTABLE(1b,4b)
18806+ _ASM_EXTABLE(2b,5b)
18807+ _ASM_EXTABLE(3b,6b)
18808 PTGS_TO_GS_EX
18809 ENDPROC(ia32_sysenter_target)
18810
18811@@ -509,6 +702,11 @@ ENTRY(system_call)
18812 pushl_cfi %eax # save orig_eax
18813 SAVE_ALL
18814 GET_THREAD_INFO(%ebp)
18815+
18816+#ifdef CONFIG_PAX_RANDKSTACK
18817+ pax_erase_kstack
18818+#endif
18819+
18820 # system call tracing in operation / emulation
18821 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
18822 jnz syscall_trace_entry
18823@@ -527,6 +725,15 @@ syscall_exit:
18824 testl $_TIF_ALLWORK_MASK, %ecx # current->work
18825 jne syscall_exit_work
18826
18827+restore_all_pax:
18828+
18829+#ifdef CONFIG_PAX_RANDKSTACK
18830+ movl %esp, %eax
18831+ call pax_randomize_kstack
18832+#endif
18833+
18834+ pax_erase_kstack
18835+
18836 restore_all:
18837 TRACE_IRQS_IRET
18838 restore_all_notrace:
18839@@ -583,14 +790,34 @@ ldt_ss:
18840 * compensating for the offset by changing to the ESPFIX segment with
18841 * a base address that matches for the difference.
18842 */
18843-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
18844+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
18845 mov %esp, %edx /* load kernel esp */
18846 mov PT_OLDESP(%esp), %eax /* load userspace esp */
18847 mov %dx, %ax /* eax: new kernel esp */
18848 sub %eax, %edx /* offset (low word is 0) */
18849+#ifdef CONFIG_SMP
18850+ movl PER_CPU_VAR(cpu_number), %ebx
18851+ shll $PAGE_SHIFT_asm, %ebx
18852+ addl $cpu_gdt_table, %ebx
18853+#else
18854+ movl $cpu_gdt_table, %ebx
18855+#endif
18856 shr $16, %edx
18857- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
18858- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
18859+
18860+#ifdef CONFIG_PAX_KERNEXEC
18861+ mov %cr0, %esi
18862+ btr $16, %esi
18863+ mov %esi, %cr0
18864+#endif
18865+
18866+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
18867+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
18868+
18869+#ifdef CONFIG_PAX_KERNEXEC
18870+ bts $16, %esi
18871+ mov %esi, %cr0
18872+#endif
18873+
18874 pushl_cfi $__ESPFIX_SS
18875 pushl_cfi %eax /* new kernel esp */
18876 /* Disable interrupts, but do not irqtrace this section: we
18877@@ -619,20 +846,18 @@ work_resched:
18878 movl TI_flags(%ebp), %ecx
18879 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
18880 # than syscall tracing?
18881- jz restore_all
18882+ jz restore_all_pax
18883 testb $_TIF_NEED_RESCHED, %cl
18884 jnz work_resched
18885
18886 work_notifysig: # deal with pending signals and
18887 # notify-resume requests
18888+ movl %esp, %eax
18889 #ifdef CONFIG_VM86
18890 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
18891- movl %esp, %eax
18892 jne work_notifysig_v86 # returning to kernel-space or
18893 # vm86-space
18894 1:
18895-#else
18896- movl %esp, %eax
18897 #endif
18898 TRACE_IRQS_ON
18899 ENABLE_INTERRUPTS(CLBR_NONE)
18900@@ -653,7 +878,7 @@ work_notifysig_v86:
18901 movl %eax, %esp
18902 jmp 1b
18903 #endif
18904-END(work_pending)
18905+ENDPROC(work_pending)
18906
18907 # perform syscall exit tracing
18908 ALIGN
18909@@ -661,11 +886,14 @@ syscall_trace_entry:
18910 movl $-ENOSYS,PT_EAX(%esp)
18911 movl %esp, %eax
18912 call syscall_trace_enter
18913+
18914+ pax_erase_kstack
18915+
18916 /* What it returned is what we'll actually use. */
18917 cmpl $(NR_syscalls), %eax
18918 jnae syscall_call
18919 jmp syscall_exit
18920-END(syscall_trace_entry)
18921+ENDPROC(syscall_trace_entry)
18922
18923 # perform syscall exit tracing
18924 ALIGN
18925@@ -678,21 +906,25 @@ syscall_exit_work:
18926 movl %esp, %eax
18927 call syscall_trace_leave
18928 jmp resume_userspace
18929-END(syscall_exit_work)
18930+ENDPROC(syscall_exit_work)
18931 CFI_ENDPROC
18932
18933 RING0_INT_FRAME # can't unwind into user space anyway
18934 syscall_fault:
18935+#ifdef CONFIG_PAX_MEMORY_UDEREF
18936+ push %ss
18937+ pop %ds
18938+#endif
18939 ASM_CLAC
18940 GET_THREAD_INFO(%ebp)
18941 movl $-EFAULT,PT_EAX(%esp)
18942 jmp resume_userspace
18943-END(syscall_fault)
18944+ENDPROC(syscall_fault)
18945
18946 syscall_badsys:
18947 movl $-ENOSYS,PT_EAX(%esp)
18948 jmp resume_userspace
18949-END(syscall_badsys)
18950+ENDPROC(syscall_badsys)
18951 CFI_ENDPROC
18952 /*
18953 * End of kprobes section
18954@@ -753,8 +985,15 @@ PTREGSCALL1(vm86old)
18955 * normal stack and adjusts ESP with the matching offset.
18956 */
18957 /* fixup the stack */
18958- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
18959- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
18960+#ifdef CONFIG_SMP
18961+ movl PER_CPU_VAR(cpu_number), %ebx
18962+ shll $PAGE_SHIFT_asm, %ebx
18963+ addl $cpu_gdt_table, %ebx
18964+#else
18965+ movl $cpu_gdt_table, %ebx
18966+#endif
18967+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
18968+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
18969 shl $16, %eax
18970 addl %esp, %eax /* the adjusted stack pointer */
18971 pushl_cfi $__KERNEL_DS
18972@@ -807,7 +1046,7 @@ vector=vector+1
18973 .endr
18974 2: jmp common_interrupt
18975 .endr
18976-END(irq_entries_start)
18977+ENDPROC(irq_entries_start)
18978
18979 .previous
18980 END(interrupt)
18981@@ -858,7 +1097,7 @@ ENTRY(coprocessor_error)
18982 pushl_cfi $do_coprocessor_error
18983 jmp error_code
18984 CFI_ENDPROC
18985-END(coprocessor_error)
18986+ENDPROC(coprocessor_error)
18987
18988 ENTRY(simd_coprocessor_error)
18989 RING0_INT_FRAME
18990@@ -880,7 +1119,7 @@ ENTRY(simd_coprocessor_error)
18991 #endif
18992 jmp error_code
18993 CFI_ENDPROC
18994-END(simd_coprocessor_error)
18995+ENDPROC(simd_coprocessor_error)
18996
18997 ENTRY(device_not_available)
18998 RING0_INT_FRAME
18999@@ -889,18 +1128,18 @@ ENTRY(device_not_available)
19000 pushl_cfi $do_device_not_available
19001 jmp error_code
19002 CFI_ENDPROC
19003-END(device_not_available)
19004+ENDPROC(device_not_available)
19005
19006 #ifdef CONFIG_PARAVIRT
19007 ENTRY(native_iret)
19008 iret
19009 _ASM_EXTABLE(native_iret, iret_exc)
19010-END(native_iret)
19011+ENDPROC(native_iret)
19012
19013 ENTRY(native_irq_enable_sysexit)
19014 sti
19015 sysexit
19016-END(native_irq_enable_sysexit)
19017+ENDPROC(native_irq_enable_sysexit)
19018 #endif
19019
19020 ENTRY(overflow)
19021@@ -910,7 +1149,7 @@ ENTRY(overflow)
19022 pushl_cfi $do_overflow
19023 jmp error_code
19024 CFI_ENDPROC
19025-END(overflow)
19026+ENDPROC(overflow)
19027
19028 ENTRY(bounds)
19029 RING0_INT_FRAME
19030@@ -919,7 +1158,7 @@ ENTRY(bounds)
19031 pushl_cfi $do_bounds
19032 jmp error_code
19033 CFI_ENDPROC
19034-END(bounds)
19035+ENDPROC(bounds)
19036
19037 ENTRY(invalid_op)
19038 RING0_INT_FRAME
19039@@ -928,7 +1167,7 @@ ENTRY(invalid_op)
19040 pushl_cfi $do_invalid_op
19041 jmp error_code
19042 CFI_ENDPROC
19043-END(invalid_op)
19044+ENDPROC(invalid_op)
19045
19046 ENTRY(coprocessor_segment_overrun)
19047 RING0_INT_FRAME
19048@@ -937,7 +1176,7 @@ ENTRY(coprocessor_segment_overrun)
19049 pushl_cfi $do_coprocessor_segment_overrun
19050 jmp error_code
19051 CFI_ENDPROC
19052-END(coprocessor_segment_overrun)
19053+ENDPROC(coprocessor_segment_overrun)
19054
19055 ENTRY(invalid_TSS)
19056 RING0_EC_FRAME
19057@@ -945,7 +1184,7 @@ ENTRY(invalid_TSS)
19058 pushl_cfi $do_invalid_TSS
19059 jmp error_code
19060 CFI_ENDPROC
19061-END(invalid_TSS)
19062+ENDPROC(invalid_TSS)
19063
19064 ENTRY(segment_not_present)
19065 RING0_EC_FRAME
19066@@ -953,7 +1192,7 @@ ENTRY(segment_not_present)
19067 pushl_cfi $do_segment_not_present
19068 jmp error_code
19069 CFI_ENDPROC
19070-END(segment_not_present)
19071+ENDPROC(segment_not_present)
19072
19073 ENTRY(stack_segment)
19074 RING0_EC_FRAME
19075@@ -961,7 +1200,7 @@ ENTRY(stack_segment)
19076 pushl_cfi $do_stack_segment
19077 jmp error_code
19078 CFI_ENDPROC
19079-END(stack_segment)
19080+ENDPROC(stack_segment)
19081
19082 ENTRY(alignment_check)
19083 RING0_EC_FRAME
19084@@ -969,7 +1208,7 @@ ENTRY(alignment_check)
19085 pushl_cfi $do_alignment_check
19086 jmp error_code
19087 CFI_ENDPROC
19088-END(alignment_check)
19089+ENDPROC(alignment_check)
19090
19091 ENTRY(divide_error)
19092 RING0_INT_FRAME
19093@@ -978,7 +1217,7 @@ ENTRY(divide_error)
19094 pushl_cfi $do_divide_error
19095 jmp error_code
19096 CFI_ENDPROC
19097-END(divide_error)
19098+ENDPROC(divide_error)
19099
19100 #ifdef CONFIG_X86_MCE
19101 ENTRY(machine_check)
19102@@ -988,7 +1227,7 @@ ENTRY(machine_check)
19103 pushl_cfi machine_check_vector
19104 jmp error_code
19105 CFI_ENDPROC
19106-END(machine_check)
19107+ENDPROC(machine_check)
19108 #endif
19109
19110 ENTRY(spurious_interrupt_bug)
19111@@ -998,7 +1237,7 @@ ENTRY(spurious_interrupt_bug)
19112 pushl_cfi $do_spurious_interrupt_bug
19113 jmp error_code
19114 CFI_ENDPROC
19115-END(spurious_interrupt_bug)
19116+ENDPROC(spurious_interrupt_bug)
19117 /*
19118 * End of kprobes section
19119 */
19120@@ -1101,7 +1340,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
19121
19122 ENTRY(mcount)
19123 ret
19124-END(mcount)
19125+ENDPROC(mcount)
19126
19127 ENTRY(ftrace_caller)
19128 cmpl $0, function_trace_stop
19129@@ -1134,7 +1373,7 @@ ftrace_graph_call:
19130 .globl ftrace_stub
19131 ftrace_stub:
19132 ret
19133-END(ftrace_caller)
19134+ENDPROC(ftrace_caller)
19135
19136 ENTRY(ftrace_regs_caller)
19137 pushf /* push flags before compare (in cs location) */
19138@@ -1235,7 +1474,7 @@ trace:
19139 popl %ecx
19140 popl %eax
19141 jmp ftrace_stub
19142-END(mcount)
19143+ENDPROC(mcount)
19144 #endif /* CONFIG_DYNAMIC_FTRACE */
19145 #endif /* CONFIG_FUNCTION_TRACER */
19146
19147@@ -1253,7 +1492,7 @@ ENTRY(ftrace_graph_caller)
19148 popl %ecx
19149 popl %eax
19150 ret
19151-END(ftrace_graph_caller)
19152+ENDPROC(ftrace_graph_caller)
19153
19154 .globl return_to_handler
19155 return_to_handler:
19156@@ -1309,15 +1548,18 @@ error_code:
19157 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
19158 REG_TO_PTGS %ecx
19159 SET_KERNEL_GS %ecx
19160- movl $(__USER_DS), %ecx
19161+ movl $(__KERNEL_DS), %ecx
19162 movl %ecx, %ds
19163 movl %ecx, %es
19164+
19165+ pax_enter_kernel
19166+
19167 TRACE_IRQS_OFF
19168 movl %esp,%eax # pt_regs pointer
19169 call *%edi
19170 jmp ret_from_exception
19171 CFI_ENDPROC
19172-END(page_fault)
19173+ENDPROC(page_fault)
19174
19175 /*
19176 * Debug traps and NMI can happen at the one SYSENTER instruction
19177@@ -1360,7 +1602,7 @@ debug_stack_correct:
19178 call do_debug
19179 jmp ret_from_exception
19180 CFI_ENDPROC
19181-END(debug)
19182+ENDPROC(debug)
19183
19184 /*
19185 * NMI is doubly nasty. It can happen _while_ we're handling
19186@@ -1398,6 +1640,9 @@ nmi_stack_correct:
19187 xorl %edx,%edx # zero error code
19188 movl %esp,%eax # pt_regs pointer
19189 call do_nmi
19190+
19191+ pax_exit_kernel
19192+
19193 jmp restore_all_notrace
19194 CFI_ENDPROC
19195
19196@@ -1434,12 +1679,15 @@ nmi_espfix_stack:
19197 FIXUP_ESPFIX_STACK # %eax == %esp
19198 xorl %edx,%edx # zero error code
19199 call do_nmi
19200+
19201+ pax_exit_kernel
19202+
19203 RESTORE_REGS
19204 lss 12+4(%esp), %esp # back to espfix stack
19205 CFI_ADJUST_CFA_OFFSET -24
19206 jmp irq_return
19207 CFI_ENDPROC
19208-END(nmi)
19209+ENDPROC(nmi)
19210
19211 ENTRY(int3)
19212 RING0_INT_FRAME
19213@@ -1452,14 +1700,14 @@ ENTRY(int3)
19214 call do_int3
19215 jmp ret_from_exception
19216 CFI_ENDPROC
19217-END(int3)
19218+ENDPROC(int3)
19219
19220 ENTRY(general_protection)
19221 RING0_EC_FRAME
19222 pushl_cfi $do_general_protection
19223 jmp error_code
19224 CFI_ENDPROC
19225-END(general_protection)
19226+ENDPROC(general_protection)
19227
19228 #ifdef CONFIG_KVM_GUEST
19229 ENTRY(async_page_fault)
19230@@ -1468,7 +1716,7 @@ ENTRY(async_page_fault)
19231 pushl_cfi $do_async_page_fault
19232 jmp error_code
19233 CFI_ENDPROC
19234-END(async_page_fault)
19235+ENDPROC(async_page_fault)
19236 #endif
19237
19238 /*
19239diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
19240index cb3c591..bc63707 100644
19241--- a/arch/x86/kernel/entry_64.S
19242+++ b/arch/x86/kernel/entry_64.S
19243@@ -59,6 +59,8 @@
19244 #include <asm/context_tracking.h>
19245 #include <asm/smap.h>
19246 #include <linux/err.h>
19247+#include <asm/pgtable.h>
19248+#include <asm/alternative-asm.h>
19249
19250 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
19251 #include <linux/elf-em.h>
19252@@ -80,8 +82,9 @@
19253 #ifdef CONFIG_DYNAMIC_FTRACE
19254
19255 ENTRY(function_hook)
19256+ pax_force_retaddr
19257 retq
19258-END(function_hook)
19259+ENDPROC(function_hook)
19260
19261 /* skip is set if stack has been adjusted */
19262 .macro ftrace_caller_setup skip=0
19263@@ -122,8 +125,9 @@ GLOBAL(ftrace_graph_call)
19264 #endif
19265
19266 GLOBAL(ftrace_stub)
19267+ pax_force_retaddr
19268 retq
19269-END(ftrace_caller)
19270+ENDPROC(ftrace_caller)
19271
19272 ENTRY(ftrace_regs_caller)
19273 /* Save the current flags before compare (in SS location)*/
19274@@ -191,7 +195,7 @@ ftrace_restore_flags:
19275 popfq
19276 jmp ftrace_stub
19277
19278-END(ftrace_regs_caller)
19279+ENDPROC(ftrace_regs_caller)
19280
19281
19282 #else /* ! CONFIG_DYNAMIC_FTRACE */
19283@@ -212,6 +216,7 @@ ENTRY(function_hook)
19284 #endif
19285
19286 GLOBAL(ftrace_stub)
19287+ pax_force_retaddr
19288 retq
19289
19290 trace:
19291@@ -225,12 +230,13 @@ trace:
19292 #endif
19293 subq $MCOUNT_INSN_SIZE, %rdi
19294
19295+ pax_force_fptr ftrace_trace_function
19296 call *ftrace_trace_function
19297
19298 MCOUNT_RESTORE_FRAME
19299
19300 jmp ftrace_stub
19301-END(function_hook)
19302+ENDPROC(function_hook)
19303 #endif /* CONFIG_DYNAMIC_FTRACE */
19304 #endif /* CONFIG_FUNCTION_TRACER */
19305
19306@@ -252,8 +258,9 @@ ENTRY(ftrace_graph_caller)
19307
19308 MCOUNT_RESTORE_FRAME
19309
19310+ pax_force_retaddr
19311 retq
19312-END(ftrace_graph_caller)
19313+ENDPROC(ftrace_graph_caller)
19314
19315 GLOBAL(return_to_handler)
19316 subq $24, %rsp
19317@@ -269,7 +276,9 @@ GLOBAL(return_to_handler)
19318 movq 8(%rsp), %rdx
19319 movq (%rsp), %rax
19320 addq $24, %rsp
19321+ pax_force_fptr %rdi
19322 jmp *%rdi
19323+ENDPROC(return_to_handler)
19324 #endif
19325
19326
19327@@ -284,6 +293,273 @@ ENTRY(native_usergs_sysret64)
19328 ENDPROC(native_usergs_sysret64)
19329 #endif /* CONFIG_PARAVIRT */
19330
19331+ .macro ljmpq sel, off
19332+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
19333+ .byte 0x48; ljmp *1234f(%rip)
19334+ .pushsection .rodata
19335+ .align 16
19336+ 1234: .quad \off; .word \sel
19337+ .popsection
19338+#else
19339+ pushq $\sel
19340+ pushq $\off
19341+ lretq
19342+#endif
19343+ .endm
19344+
19345+ .macro pax_enter_kernel
19346+ pax_set_fptr_mask
19347+#ifdef CONFIG_PAX_KERNEXEC
19348+ call pax_enter_kernel
19349+#endif
19350+ .endm
19351+
19352+ .macro pax_exit_kernel
19353+#ifdef CONFIG_PAX_KERNEXEC
19354+ call pax_exit_kernel
19355+#endif
19356+ .endm
19357+
19358+#ifdef CONFIG_PAX_KERNEXEC
19359+ENTRY(pax_enter_kernel)
19360+ pushq %rdi
19361+
19362+#ifdef CONFIG_PARAVIRT
19363+ PV_SAVE_REGS(CLBR_RDI)
19364+#endif
19365+
19366+ GET_CR0_INTO_RDI
19367+ bts $16,%rdi
19368+ jnc 3f
19369+ mov %cs,%edi
19370+ cmp $__KERNEL_CS,%edi
19371+ jnz 2f
19372+1:
19373+
19374+#ifdef CONFIG_PARAVIRT
19375+ PV_RESTORE_REGS(CLBR_RDI)
19376+#endif
19377+
19378+ popq %rdi
19379+ pax_force_retaddr
19380+ retq
19381+
19382+2: ljmpq __KERNEL_CS,1f
19383+3: ljmpq __KERNEXEC_KERNEL_CS,4f
19384+4: SET_RDI_INTO_CR0
19385+ jmp 1b
19386+ENDPROC(pax_enter_kernel)
19387+
19388+ENTRY(pax_exit_kernel)
19389+ pushq %rdi
19390+
19391+#ifdef CONFIG_PARAVIRT
19392+ PV_SAVE_REGS(CLBR_RDI)
19393+#endif
19394+
19395+ mov %cs,%rdi
19396+ cmp $__KERNEXEC_KERNEL_CS,%edi
19397+ jz 2f
19398+1:
19399+
19400+#ifdef CONFIG_PARAVIRT
19401+ PV_RESTORE_REGS(CLBR_RDI);
19402+#endif
19403+
19404+ popq %rdi
19405+ pax_force_retaddr
19406+ retq
19407+
19408+2: GET_CR0_INTO_RDI
19409+ btr $16,%rdi
19410+ ljmpq __KERNEL_CS,3f
19411+3: SET_RDI_INTO_CR0
19412+ jmp 1b
19413+ENDPROC(pax_exit_kernel)
19414+#endif
19415+
19416+ .macro pax_enter_kernel_user
19417+ pax_set_fptr_mask
19418+#ifdef CONFIG_PAX_MEMORY_UDEREF
19419+ call pax_enter_kernel_user
19420+#endif
19421+ .endm
19422+
19423+ .macro pax_exit_kernel_user
19424+#ifdef CONFIG_PAX_MEMORY_UDEREF
19425+ call pax_exit_kernel_user
19426+#endif
19427+#ifdef CONFIG_PAX_RANDKSTACK
19428+ pushq %rax
19429+ call pax_randomize_kstack
19430+ popq %rax
19431+#endif
19432+ .endm
19433+
19434+#ifdef CONFIG_PAX_MEMORY_UDEREF
19435+ENTRY(pax_enter_kernel_user)
19436+ pushq %rdi
19437+ pushq %rbx
19438+
19439+#ifdef CONFIG_PARAVIRT
19440+ PV_SAVE_REGS(CLBR_RDI)
19441+#endif
19442+
19443+ GET_CR3_INTO_RDI
19444+ mov %rdi,%rbx
19445+ add $__START_KERNEL_map,%rbx
19446+ sub phys_base(%rip),%rbx
19447+
19448+#ifdef CONFIG_PARAVIRT
19449+ pushq %rdi
19450+ cmpl $0, pv_info+PARAVIRT_enabled
19451+ jz 1f
19452+ i = 0
19453+ .rept USER_PGD_PTRS
19454+ mov i*8(%rbx),%rsi
19455+ mov $0,%sil
19456+ lea i*8(%rbx),%rdi
19457+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
19458+ i = i + 1
19459+ .endr
19460+ jmp 2f
19461+1:
19462+#endif
19463+
19464+ i = 0
19465+ .rept USER_PGD_PTRS
19466+ movb $0,i*8(%rbx)
19467+ i = i + 1
19468+ .endr
19469+
19470+#ifdef CONFIG_PARAVIRT
19471+2: popq %rdi
19472+#endif
19473+ SET_RDI_INTO_CR3
19474+
19475+#ifdef CONFIG_PAX_KERNEXEC
19476+ GET_CR0_INTO_RDI
19477+ bts $16,%rdi
19478+ SET_RDI_INTO_CR0
19479+#endif
19480+
19481+#ifdef CONFIG_PARAVIRT
19482+ PV_RESTORE_REGS(CLBR_RDI)
19483+#endif
19484+
19485+ popq %rbx
19486+ popq %rdi
19487+ pax_force_retaddr
19488+ retq
19489+ENDPROC(pax_enter_kernel_user)
19490+
19491+ENTRY(pax_exit_kernel_user)
19492+ push %rdi
19493+
19494+#ifdef CONFIG_PARAVIRT
19495+ pushq %rbx
19496+ PV_SAVE_REGS(CLBR_RDI)
19497+#endif
19498+
19499+#ifdef CONFIG_PAX_KERNEXEC
19500+ GET_CR0_INTO_RDI
19501+ btr $16,%rdi
19502+ SET_RDI_INTO_CR0
19503+#endif
19504+
19505+ GET_CR3_INTO_RDI
19506+ add $__START_KERNEL_map,%rdi
19507+ sub phys_base(%rip),%rdi
19508+
19509+#ifdef CONFIG_PARAVIRT
19510+ cmpl $0, pv_info+PARAVIRT_enabled
19511+ jz 1f
19512+ mov %rdi,%rbx
19513+ i = 0
19514+ .rept USER_PGD_PTRS
19515+ mov i*8(%rbx),%rsi
19516+ mov $0x67,%sil
19517+ lea i*8(%rbx),%rdi
19518+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
19519+ i = i + 1
19520+ .endr
19521+ jmp 2f
19522+1:
19523+#endif
19524+
19525+ i = 0
19526+ .rept USER_PGD_PTRS
19527+ movb $0x67,i*8(%rdi)
19528+ i = i + 1
19529+ .endr
19530+
19531+#ifdef CONFIG_PARAVIRT
19532+2: PV_RESTORE_REGS(CLBR_RDI)
19533+ popq %rbx
19534+#endif
19535+
19536+ popq %rdi
19537+ pax_force_retaddr
19538+ retq
19539+ENDPROC(pax_exit_kernel_user)
19540+#endif
19541+
19542+.macro pax_erase_kstack
19543+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
19544+ call pax_erase_kstack
19545+#endif
19546+.endm
19547+
19548+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
19549+ENTRY(pax_erase_kstack)
19550+ pushq %rdi
19551+ pushq %rcx
19552+ pushq %rax
19553+ pushq %r11
19554+
19555+ GET_THREAD_INFO(%r11)
19556+ mov TI_lowest_stack(%r11), %rdi
19557+ mov $-0xBEEF, %rax
19558+ std
19559+
19560+1: mov %edi, %ecx
19561+ and $THREAD_SIZE_asm - 1, %ecx
19562+ shr $3, %ecx
19563+ repne scasq
19564+ jecxz 2f
19565+
19566+ cmp $2*8, %ecx
19567+ jc 2f
19568+
19569+ mov $2*8, %ecx
19570+ repe scasq
19571+ jecxz 2f
19572+ jne 1b
19573+
19574+2: cld
19575+ mov %esp, %ecx
19576+ sub %edi, %ecx
19577+
19578+ cmp $THREAD_SIZE_asm, %rcx
19579+ jb 3f
19580+ ud2
19581+3:
19582+
19583+ shr $3, %ecx
19584+ rep stosq
19585+
19586+ mov TI_task_thread_sp0(%r11), %rdi
19587+ sub $256, %rdi
19588+ mov %rdi, TI_lowest_stack(%r11)
19589+
19590+ popq %r11
19591+ popq %rax
19592+ popq %rcx
19593+ popq %rdi
19594+ pax_force_retaddr
19595+ ret
19596+ENDPROC(pax_erase_kstack)
19597+#endif
19598
19599 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
19600 #ifdef CONFIG_TRACE_IRQFLAGS
19601@@ -375,8 +651,8 @@ ENDPROC(native_usergs_sysret64)
19602 .endm
19603
19604 .macro UNFAKE_STACK_FRAME
19605- addq $8*6, %rsp
19606- CFI_ADJUST_CFA_OFFSET -(6*8)
19607+ addq $8*6 + ARG_SKIP, %rsp
19608+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
19609 .endm
19610
19611 /*
19612@@ -463,7 +739,7 @@ ENDPROC(native_usergs_sysret64)
19613 movq %rsp, %rsi
19614
19615 leaq -RBP(%rsp),%rdi /* arg1 for handler */
19616- testl $3, CS-RBP(%rsi)
19617+ testb $3, CS-RBP(%rsi)
19618 je 1f
19619 SWAPGS
19620 /*
19621@@ -498,9 +774,10 @@ ENTRY(save_rest)
19622 movq_cfi r15, R15+16
19623 movq %r11, 8(%rsp) /* return address */
19624 FIXUP_TOP_OF_STACK %r11, 16
19625+ pax_force_retaddr
19626 ret
19627 CFI_ENDPROC
19628-END(save_rest)
19629+ENDPROC(save_rest)
19630
19631 /* save complete stack frame */
19632 .pushsection .kprobes.text, "ax"
19633@@ -529,9 +806,10 @@ ENTRY(save_paranoid)
19634 js 1f /* negative -> in kernel */
19635 SWAPGS
19636 xorl %ebx,%ebx
19637-1: ret
19638+1: pax_force_retaddr_bts
19639+ ret
19640 CFI_ENDPROC
19641-END(save_paranoid)
19642+ENDPROC(save_paranoid)
19643 .popsection
19644
19645 /*
19646@@ -553,7 +831,7 @@ ENTRY(ret_from_fork)
19647
19648 RESTORE_REST
19649
19650- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
19651+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
19652 jz 1f
19653
19654 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
19655@@ -571,7 +849,7 @@ ENTRY(ret_from_fork)
19656 RESTORE_REST
19657 jmp int_ret_from_sys_call
19658 CFI_ENDPROC
19659-END(ret_from_fork)
19660+ENDPROC(ret_from_fork)
19661
19662 /*
19663 * System call entry. Up to 6 arguments in registers are supported.
19664@@ -608,7 +886,7 @@ END(ret_from_fork)
19665 ENTRY(system_call)
19666 CFI_STARTPROC simple
19667 CFI_SIGNAL_FRAME
19668- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
19669+ CFI_DEF_CFA rsp,0
19670 CFI_REGISTER rip,rcx
19671 /*CFI_REGISTER rflags,r11*/
19672 SWAPGS_UNSAFE_STACK
19673@@ -621,16 +899,23 @@ GLOBAL(system_call_after_swapgs)
19674
19675 movq %rsp,PER_CPU_VAR(old_rsp)
19676 movq PER_CPU_VAR(kernel_stack),%rsp
19677+ SAVE_ARGS 8*6,0
19678+ pax_enter_kernel_user
19679+
19680+#ifdef CONFIG_PAX_RANDKSTACK
19681+ pax_erase_kstack
19682+#endif
19683+
19684 /*
19685 * No need to follow this irqs off/on section - it's straight
19686 * and short:
19687 */
19688 ENABLE_INTERRUPTS(CLBR_NONE)
19689- SAVE_ARGS 8,0
19690 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
19691 movq %rcx,RIP-ARGOFFSET(%rsp)
19692 CFI_REL_OFFSET rip,RIP-ARGOFFSET
19693- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
19694+ GET_THREAD_INFO(%rcx)
19695+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
19696 jnz tracesys
19697 system_call_fastpath:
19698 #if __SYSCALL_MASK == ~0
19699@@ -640,7 +925,7 @@ system_call_fastpath:
19700 cmpl $__NR_syscall_max,%eax
19701 #endif
19702 ja badsys
19703- movq %r10,%rcx
19704+ movq R10-ARGOFFSET(%rsp),%rcx
19705 call *sys_call_table(,%rax,8) # XXX: rip relative
19706 movq %rax,RAX-ARGOFFSET(%rsp)
19707 /*
19708@@ -654,10 +939,13 @@ sysret_check:
19709 LOCKDEP_SYS_EXIT
19710 DISABLE_INTERRUPTS(CLBR_NONE)
19711 TRACE_IRQS_OFF
19712- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
19713+ GET_THREAD_INFO(%rcx)
19714+ movl TI_flags(%rcx),%edx
19715 andl %edi,%edx
19716 jnz sysret_careful
19717 CFI_REMEMBER_STATE
19718+ pax_exit_kernel_user
19719+ pax_erase_kstack
19720 /*
19721 * sysretq will re-enable interrupts:
19722 */
19723@@ -709,14 +997,18 @@ badsys:
19724 * jump back to the normal fast path.
19725 */
19726 auditsys:
19727- movq %r10,%r9 /* 6th arg: 4th syscall arg */
19728+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
19729 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
19730 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
19731 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
19732 movq %rax,%rsi /* 2nd arg: syscall number */
19733 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
19734 call __audit_syscall_entry
19735+
19736+ pax_erase_kstack
19737+
19738 LOAD_ARGS 0 /* reload call-clobbered registers */
19739+ pax_set_fptr_mask
19740 jmp system_call_fastpath
19741
19742 /*
19743@@ -737,7 +1029,7 @@ sysret_audit:
19744 /* Do syscall tracing */
19745 tracesys:
19746 #ifdef CONFIG_AUDITSYSCALL
19747- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
19748+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
19749 jz auditsys
19750 #endif
19751 SAVE_REST
19752@@ -745,12 +1037,16 @@ tracesys:
19753 FIXUP_TOP_OF_STACK %rdi
19754 movq %rsp,%rdi
19755 call syscall_trace_enter
19756+
19757+ pax_erase_kstack
19758+
19759 /*
19760 * Reload arg registers from stack in case ptrace changed them.
19761 * We don't reload %rax because syscall_trace_enter() returned
19762 * the value it wants us to use in the table lookup.
19763 */
19764 LOAD_ARGS ARGOFFSET, 1
19765+ pax_set_fptr_mask
19766 RESTORE_REST
19767 #if __SYSCALL_MASK == ~0
19768 cmpq $__NR_syscall_max,%rax
19769@@ -759,7 +1055,7 @@ tracesys:
19770 cmpl $__NR_syscall_max,%eax
19771 #endif
19772 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
19773- movq %r10,%rcx /* fixup for C */
19774+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
19775 call *sys_call_table(,%rax,8)
19776 movq %rax,RAX-ARGOFFSET(%rsp)
19777 /* Use IRET because user could have changed frame */
19778@@ -780,7 +1076,9 @@ GLOBAL(int_with_check)
19779 andl %edi,%edx
19780 jnz int_careful
19781 andl $~TS_COMPAT,TI_status(%rcx)
19782- jmp retint_swapgs
19783+ pax_exit_kernel_user
19784+ pax_erase_kstack
19785+ jmp retint_swapgs_pax
19786
19787 /* Either reschedule or signal or syscall exit tracking needed. */
19788 /* First do a reschedule test. */
19789@@ -826,7 +1124,7 @@ int_restore_rest:
19790 TRACE_IRQS_OFF
19791 jmp int_with_check
19792 CFI_ENDPROC
19793-END(system_call)
19794+ENDPROC(system_call)
19795
19796 /*
19797 * Certain special system calls that need to save a complete full stack frame.
19798@@ -842,7 +1140,7 @@ ENTRY(\label)
19799 call \func
19800 jmp ptregscall_common
19801 CFI_ENDPROC
19802-END(\label)
19803+ENDPROC(\label)
19804 .endm
19805
19806 .macro FORK_LIKE func
19807@@ -856,9 +1154,10 @@ ENTRY(stub_\func)
19808 DEFAULT_FRAME 0 8 /* offset 8: return address */
19809 call sys_\func
19810 RESTORE_TOP_OF_STACK %r11, 8
19811+ pax_force_retaddr
19812 ret $REST_SKIP /* pop extended registers */
19813 CFI_ENDPROC
19814-END(stub_\func)
19815+ENDPROC(stub_\func)
19816 .endm
19817
19818 FORK_LIKE clone
19819@@ -875,9 +1174,10 @@ ENTRY(ptregscall_common)
19820 movq_cfi_restore R12+8, r12
19821 movq_cfi_restore RBP+8, rbp
19822 movq_cfi_restore RBX+8, rbx
19823+ pax_force_retaddr
19824 ret $REST_SKIP /* pop extended registers */
19825 CFI_ENDPROC
19826-END(ptregscall_common)
19827+ENDPROC(ptregscall_common)
19828
19829 ENTRY(stub_execve)
19830 CFI_STARTPROC
19831@@ -891,7 +1191,7 @@ ENTRY(stub_execve)
19832 RESTORE_REST
19833 jmp int_ret_from_sys_call
19834 CFI_ENDPROC
19835-END(stub_execve)
19836+ENDPROC(stub_execve)
19837
19838 /*
19839 * sigreturn is special because it needs to restore all registers on return.
19840@@ -909,7 +1209,7 @@ ENTRY(stub_rt_sigreturn)
19841 RESTORE_REST
19842 jmp int_ret_from_sys_call
19843 CFI_ENDPROC
19844-END(stub_rt_sigreturn)
19845+ENDPROC(stub_rt_sigreturn)
19846
19847 #ifdef CONFIG_X86_X32_ABI
19848 ENTRY(stub_x32_rt_sigreturn)
19849@@ -975,7 +1275,7 @@ vector=vector+1
19850 2: jmp common_interrupt
19851 .endr
19852 CFI_ENDPROC
19853-END(irq_entries_start)
19854+ENDPROC(irq_entries_start)
19855
19856 .previous
19857 END(interrupt)
19858@@ -995,6 +1295,16 @@ END(interrupt)
19859 subq $ORIG_RAX-RBP, %rsp
19860 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
19861 SAVE_ARGS_IRQ
19862+#ifdef CONFIG_PAX_MEMORY_UDEREF
19863+ testb $3, CS(%rdi)
19864+ jnz 1f
19865+ pax_enter_kernel
19866+ jmp 2f
19867+1: pax_enter_kernel_user
19868+2:
19869+#else
19870+ pax_enter_kernel
19871+#endif
19872 call \func
19873 .endm
19874
19875@@ -1027,7 +1337,7 @@ ret_from_intr:
19876
19877 exit_intr:
19878 GET_THREAD_INFO(%rcx)
19879- testl $3,CS-ARGOFFSET(%rsp)
19880+ testb $3,CS-ARGOFFSET(%rsp)
19881 je retint_kernel
19882
19883 /* Interrupt came from user space */
19884@@ -1049,12 +1359,16 @@ retint_swapgs: /* return to user-space */
19885 * The iretq could re-enable interrupts:
19886 */
19887 DISABLE_INTERRUPTS(CLBR_ANY)
19888+ pax_exit_kernel_user
19889+retint_swapgs_pax:
19890 TRACE_IRQS_IRETQ
19891 SWAPGS
19892 jmp restore_args
19893
19894 retint_restore_args: /* return to kernel space */
19895 DISABLE_INTERRUPTS(CLBR_ANY)
19896+ pax_exit_kernel
19897+ pax_force_retaddr (RIP-ARGOFFSET)
19898 /*
19899 * The iretq could re-enable interrupts:
19900 */
19901@@ -1137,7 +1451,7 @@ ENTRY(retint_kernel)
19902 #endif
19903
19904 CFI_ENDPROC
19905-END(common_interrupt)
19906+ENDPROC(common_interrupt)
19907 /*
19908 * End of kprobes section
19909 */
19910@@ -1155,7 +1469,7 @@ ENTRY(\sym)
19911 interrupt \do_sym
19912 jmp ret_from_intr
19913 CFI_ENDPROC
19914-END(\sym)
19915+ENDPROC(\sym)
19916 .endm
19917
19918 #ifdef CONFIG_SMP
19919@@ -1211,12 +1525,22 @@ ENTRY(\sym)
19920 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
19921 call error_entry
19922 DEFAULT_FRAME 0
19923+#ifdef CONFIG_PAX_MEMORY_UDEREF
19924+ testb $3, CS(%rsp)
19925+ jnz 1f
19926+ pax_enter_kernel
19927+ jmp 2f
19928+1: pax_enter_kernel_user
19929+2:
19930+#else
19931+ pax_enter_kernel
19932+#endif
19933 movq %rsp,%rdi /* pt_regs pointer */
19934 xorl %esi,%esi /* no error code */
19935 call \do_sym
19936 jmp error_exit /* %ebx: no swapgs flag */
19937 CFI_ENDPROC
19938-END(\sym)
19939+ENDPROC(\sym)
19940 .endm
19941
19942 .macro paranoidzeroentry sym do_sym
19943@@ -1229,15 +1553,25 @@ ENTRY(\sym)
19944 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
19945 call save_paranoid
19946 TRACE_IRQS_OFF
19947+#ifdef CONFIG_PAX_MEMORY_UDEREF
19948+ testb $3, CS(%rsp)
19949+ jnz 1f
19950+ pax_enter_kernel
19951+ jmp 2f
19952+1: pax_enter_kernel_user
19953+2:
19954+#else
19955+ pax_enter_kernel
19956+#endif
19957 movq %rsp,%rdi /* pt_regs pointer */
19958 xorl %esi,%esi /* no error code */
19959 call \do_sym
19960 jmp paranoid_exit /* %ebx: no swapgs flag */
19961 CFI_ENDPROC
19962-END(\sym)
19963+ENDPROC(\sym)
19964 .endm
19965
19966-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
19967+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
19968 .macro paranoidzeroentry_ist sym do_sym ist
19969 ENTRY(\sym)
19970 INTR_FRAME
19971@@ -1248,14 +1582,30 @@ ENTRY(\sym)
19972 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
19973 call save_paranoid
19974 TRACE_IRQS_OFF_DEBUG
19975+#ifdef CONFIG_PAX_MEMORY_UDEREF
19976+ testb $3, CS(%rsp)
19977+ jnz 1f
19978+ pax_enter_kernel
19979+ jmp 2f
19980+1: pax_enter_kernel_user
19981+2:
19982+#else
19983+ pax_enter_kernel
19984+#endif
19985 movq %rsp,%rdi /* pt_regs pointer */
19986 xorl %esi,%esi /* no error code */
19987+#ifdef CONFIG_SMP
19988+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
19989+ lea init_tss(%r12), %r12
19990+#else
19991+ lea init_tss(%rip), %r12
19992+#endif
19993 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
19994 call \do_sym
19995 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
19996 jmp paranoid_exit /* %ebx: no swapgs flag */
19997 CFI_ENDPROC
19998-END(\sym)
19999+ENDPROC(\sym)
20000 .endm
20001
20002 .macro errorentry sym do_sym
20003@@ -1267,13 +1617,23 @@ ENTRY(\sym)
20004 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
20005 call error_entry
20006 DEFAULT_FRAME 0
20007+#ifdef CONFIG_PAX_MEMORY_UDEREF
20008+ testb $3, CS(%rsp)
20009+ jnz 1f
20010+ pax_enter_kernel
20011+ jmp 2f
20012+1: pax_enter_kernel_user
20013+2:
20014+#else
20015+ pax_enter_kernel
20016+#endif
20017 movq %rsp,%rdi /* pt_regs pointer */
20018 movq ORIG_RAX(%rsp),%rsi /* get error code */
20019 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
20020 call \do_sym
20021 jmp error_exit /* %ebx: no swapgs flag */
20022 CFI_ENDPROC
20023-END(\sym)
20024+ENDPROC(\sym)
20025 .endm
20026
20027 /* error code is on the stack already */
20028@@ -1287,13 +1647,23 @@ ENTRY(\sym)
20029 call save_paranoid
20030 DEFAULT_FRAME 0
20031 TRACE_IRQS_OFF
20032+#ifdef CONFIG_PAX_MEMORY_UDEREF
20033+ testb $3, CS(%rsp)
20034+ jnz 1f
20035+ pax_enter_kernel
20036+ jmp 2f
20037+1: pax_enter_kernel_user
20038+2:
20039+#else
20040+ pax_enter_kernel
20041+#endif
20042 movq %rsp,%rdi /* pt_regs pointer */
20043 movq ORIG_RAX(%rsp),%rsi /* get error code */
20044 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
20045 call \do_sym
20046 jmp paranoid_exit /* %ebx: no swapgs flag */
20047 CFI_ENDPROC
20048-END(\sym)
20049+ENDPROC(\sym)
20050 .endm
20051
20052 zeroentry divide_error do_divide_error
20053@@ -1323,9 +1693,10 @@ gs_change:
20054 2: mfence /* workaround */
20055 SWAPGS
20056 popfq_cfi
20057+ pax_force_retaddr
20058 ret
20059 CFI_ENDPROC
20060-END(native_load_gs_index)
20061+ENDPROC(native_load_gs_index)
20062
20063 _ASM_EXTABLE(gs_change,bad_gs)
20064 .section .fixup,"ax"
20065@@ -1353,9 +1724,10 @@ ENTRY(call_softirq)
20066 CFI_DEF_CFA_REGISTER rsp
20067 CFI_ADJUST_CFA_OFFSET -8
20068 decl PER_CPU_VAR(irq_count)
20069+ pax_force_retaddr
20070 ret
20071 CFI_ENDPROC
20072-END(call_softirq)
20073+ENDPROC(call_softirq)
20074
20075 #ifdef CONFIG_XEN
20076 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
20077@@ -1393,7 +1765,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
20078 decl PER_CPU_VAR(irq_count)
20079 jmp error_exit
20080 CFI_ENDPROC
20081-END(xen_do_hypervisor_callback)
20082+ENDPROC(xen_do_hypervisor_callback)
20083
20084 /*
20085 * Hypervisor uses this for application faults while it executes.
20086@@ -1452,7 +1824,7 @@ ENTRY(xen_failsafe_callback)
20087 SAVE_ALL
20088 jmp error_exit
20089 CFI_ENDPROC
20090-END(xen_failsafe_callback)
20091+ENDPROC(xen_failsafe_callback)
20092
20093 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
20094 xen_hvm_callback_vector xen_evtchn_do_upcall
20095@@ -1501,16 +1873,31 @@ ENTRY(paranoid_exit)
20096 TRACE_IRQS_OFF_DEBUG
20097 testl %ebx,%ebx /* swapgs needed? */
20098 jnz paranoid_restore
20099- testl $3,CS(%rsp)
20100+ testb $3,CS(%rsp)
20101 jnz paranoid_userspace
20102+#ifdef CONFIG_PAX_MEMORY_UDEREF
20103+ pax_exit_kernel
20104+ TRACE_IRQS_IRETQ 0
20105+ SWAPGS_UNSAFE_STACK
20106+ RESTORE_ALL 8
20107+ pax_force_retaddr_bts
20108+ jmp irq_return
20109+#endif
20110 paranoid_swapgs:
20111+#ifdef CONFIG_PAX_MEMORY_UDEREF
20112+ pax_exit_kernel_user
20113+#else
20114+ pax_exit_kernel
20115+#endif
20116 TRACE_IRQS_IRETQ 0
20117 SWAPGS_UNSAFE_STACK
20118 RESTORE_ALL 8
20119 jmp irq_return
20120 paranoid_restore:
20121+ pax_exit_kernel
20122 TRACE_IRQS_IRETQ_DEBUG 0
20123 RESTORE_ALL 8
20124+ pax_force_retaddr_bts
20125 jmp irq_return
20126 paranoid_userspace:
20127 GET_THREAD_INFO(%rcx)
20128@@ -1539,7 +1926,7 @@ paranoid_schedule:
20129 TRACE_IRQS_OFF
20130 jmp paranoid_userspace
20131 CFI_ENDPROC
20132-END(paranoid_exit)
20133+ENDPROC(paranoid_exit)
20134
20135 /*
20136 * Exception entry point. This expects an error code/orig_rax on the stack.
20137@@ -1566,12 +1953,13 @@ ENTRY(error_entry)
20138 movq_cfi r14, R14+8
20139 movq_cfi r15, R15+8
20140 xorl %ebx,%ebx
20141- testl $3,CS+8(%rsp)
20142+ testb $3,CS+8(%rsp)
20143 je error_kernelspace
20144 error_swapgs:
20145 SWAPGS
20146 error_sti:
20147 TRACE_IRQS_OFF
20148+ pax_force_retaddr_bts
20149 ret
20150
20151 /*
20152@@ -1598,7 +1986,7 @@ bstep_iret:
20153 movq %rcx,RIP+8(%rsp)
20154 jmp error_swapgs
20155 CFI_ENDPROC
20156-END(error_entry)
20157+ENDPROC(error_entry)
20158
20159
20160 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
20161@@ -1618,7 +2006,7 @@ ENTRY(error_exit)
20162 jnz retint_careful
20163 jmp retint_swapgs
20164 CFI_ENDPROC
20165-END(error_exit)
20166+ENDPROC(error_exit)
20167
20168 /*
20169 * Test if a given stack is an NMI stack or not.
20170@@ -1676,9 +2064,11 @@ ENTRY(nmi)
20171 * If %cs was not the kernel segment, then the NMI triggered in user
20172 * space, which means it is definitely not nested.
20173 */
20174+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
20175+ je 1f
20176 cmpl $__KERNEL_CS, 16(%rsp)
20177 jne first_nmi
20178-
20179+1:
20180 /*
20181 * Check the special variable on the stack to see if NMIs are
20182 * executing.
20183@@ -1847,6 +2237,17 @@ end_repeat_nmi:
20184 */
20185 movq %cr2, %r12
20186
20187+#ifdef CONFIG_PAX_MEMORY_UDEREF
20188+ testb $3, CS(%rsp)
20189+ jnz 1f
20190+ pax_enter_kernel
20191+ jmp 2f
20192+1: pax_enter_kernel_user
20193+2:
20194+#else
20195+ pax_enter_kernel
20196+#endif
20197+
20198 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
20199 movq %rsp,%rdi
20200 movq $-1,%rsi
20201@@ -1862,23 +2263,34 @@ end_repeat_nmi:
20202 testl %ebx,%ebx /* swapgs needed? */
20203 jnz nmi_restore
20204 nmi_swapgs:
20205+#ifdef CONFIG_PAX_MEMORY_UDEREF
20206+ pax_exit_kernel_user
20207+#else
20208+ pax_exit_kernel
20209+#endif
20210 SWAPGS_UNSAFE_STACK
20211+ RESTORE_ALL 6*8
20212+ /* Clear the NMI executing stack variable */
20213+ movq $0, 5*8(%rsp)
20214+ jmp irq_return
20215 nmi_restore:
20216+ pax_exit_kernel
20217 /* Pop the extra iret frame at once */
20218 RESTORE_ALL 6*8
20219+ pax_force_retaddr_bts
20220
20221 /* Clear the NMI executing stack variable */
20222 movq $0, 5*8(%rsp)
20223 jmp irq_return
20224 CFI_ENDPROC
20225-END(nmi)
20226+ENDPROC(nmi)
20227
20228 ENTRY(ignore_sysret)
20229 CFI_STARTPROC
20230 mov $-ENOSYS,%eax
20231 sysret
20232 CFI_ENDPROC
20233-END(ignore_sysret)
20234+ENDPROC(ignore_sysret)
20235
20236 /*
20237 * End of kprobes section
20238diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
20239index 1d41402..af9a46a 100644
20240--- a/arch/x86/kernel/ftrace.c
20241+++ b/arch/x86/kernel/ftrace.c
20242@@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
20243 {
20244 unsigned char replaced[MCOUNT_INSN_SIZE];
20245
20246+ ip = ktla_ktva(ip);
20247+
20248 /*
20249 * Note: Due to modules and __init, code can
20250 * disappear and change, we need to protect against faulting
20251@@ -227,7 +229,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
20252 unsigned char old[MCOUNT_INSN_SIZE], *new;
20253 int ret;
20254
20255- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
20256+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
20257 new = ftrace_call_replace(ip, (unsigned long)func);
20258
20259 /* See comment above by declaration of modifying_ftrace_code */
20260@@ -238,7 +240,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
20261 /* Also update the regs callback function */
20262 if (!ret) {
20263 ip = (unsigned long)(&ftrace_regs_call);
20264- memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE);
20265+ memcpy(old, ktla_ktva((void *)&ftrace_regs_call), MCOUNT_INSN_SIZE);
20266 new = ftrace_call_replace(ip, (unsigned long)func);
20267 ret = ftrace_modify_code(ip, old, new);
20268 }
20269@@ -279,7 +281,7 @@ static int ftrace_write(unsigned long ip, const char *val, int size)
20270 * kernel identity mapping to modify code.
20271 */
20272 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
20273- ip = (unsigned long)__va(__pa(ip));
20274+ ip = (unsigned long)__va(__pa(ktla_ktva(ip)));
20275
20276 return probe_kernel_write((void *)ip, val, size);
20277 }
20278@@ -289,7 +291,7 @@ static int add_break(unsigned long ip, const char *old)
20279 unsigned char replaced[MCOUNT_INSN_SIZE];
20280 unsigned char brk = BREAKPOINT_INSTRUCTION;
20281
20282- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
20283+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
20284 return -EFAULT;
20285
20286 /* Make sure it is what we expect it to be */
20287@@ -637,7 +639,7 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
20288 return ret;
20289
20290 fail_update:
20291- probe_kernel_write((void *)ip, &old_code[0], 1);
20292+ probe_kernel_write((void *)ktla_ktva(ip), &old_code[0], 1);
20293 goto out;
20294 }
20295
20296@@ -670,6 +672,8 @@ static int ftrace_mod_jmp(unsigned long ip,
20297 {
20298 unsigned char code[MCOUNT_INSN_SIZE];
20299
20300+ ip = ktla_ktva(ip);
20301+
20302 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
20303 return -EFAULT;
20304
20305diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
20306index c18f59d..9c0c9f6 100644
20307--- a/arch/x86/kernel/head32.c
20308+++ b/arch/x86/kernel/head32.c
20309@@ -18,6 +18,7 @@
20310 #include <asm/io_apic.h>
20311 #include <asm/bios_ebda.h>
20312 #include <asm/tlbflush.h>
20313+#include <asm/boot.h>
20314
20315 static void __init i386_default_early_setup(void)
20316 {
20317@@ -30,8 +31,7 @@ static void __init i386_default_early_setup(void)
20318
20319 void __init i386_start_kernel(void)
20320 {
20321- memblock_reserve(__pa_symbol(&_text),
20322- __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
20323+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop) - LOAD_PHYSICAL_ADDR);
20324
20325 #ifdef CONFIG_BLK_DEV_INITRD
20326 /* Reserve INITRD */
20327diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
20328index c8932c7..d56b622 100644
20329--- a/arch/x86/kernel/head_32.S
20330+++ b/arch/x86/kernel/head_32.S
20331@@ -26,6 +26,12 @@
20332 /* Physical address */
20333 #define pa(X) ((X) - __PAGE_OFFSET)
20334
20335+#ifdef CONFIG_PAX_KERNEXEC
20336+#define ta(X) (X)
20337+#else
20338+#define ta(X) ((X) - __PAGE_OFFSET)
20339+#endif
20340+
20341 /*
20342 * References to members of the new_cpu_data structure.
20343 */
20344@@ -55,11 +61,7 @@
20345 * and small than max_low_pfn, otherwise will waste some page table entries
20346 */
20347
20348-#if PTRS_PER_PMD > 1
20349-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
20350-#else
20351-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
20352-#endif
20353+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
20354
20355 /* Number of possible pages in the lowmem region */
20356 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
20357@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
20358 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
20359
20360 /*
20361+ * Real beginning of normal "text" segment
20362+ */
20363+ENTRY(stext)
20364+ENTRY(_stext)
20365+
20366+/*
20367 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
20368 * %esi points to the real-mode code as a 32-bit pointer.
20369 * CS and DS must be 4 GB flat segments, but we don't depend on
20370@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
20371 * can.
20372 */
20373 __HEAD
20374+
20375+#ifdef CONFIG_PAX_KERNEXEC
20376+ jmp startup_32
20377+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
20378+.fill PAGE_SIZE-5,1,0xcc
20379+#endif
20380+
20381 ENTRY(startup_32)
20382 movl pa(stack_start),%ecx
20383
20384@@ -106,6 +121,59 @@ ENTRY(startup_32)
20385 2:
20386 leal -__PAGE_OFFSET(%ecx),%esp
20387
20388+#ifdef CONFIG_SMP
20389+ movl $pa(cpu_gdt_table),%edi
20390+ movl $__per_cpu_load,%eax
20391+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
20392+ rorl $16,%eax
20393+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
20394+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
20395+ movl $__per_cpu_end - 1,%eax
20396+ subl $__per_cpu_start,%eax
20397+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
20398+#endif
20399+
20400+#ifdef CONFIG_PAX_MEMORY_UDEREF
20401+ movl $NR_CPUS,%ecx
20402+ movl $pa(cpu_gdt_table),%edi
20403+1:
20404+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
20405+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
20406+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
20407+ addl $PAGE_SIZE_asm,%edi
20408+ loop 1b
20409+#endif
20410+
20411+#ifdef CONFIG_PAX_KERNEXEC
20412+ movl $pa(boot_gdt),%edi
20413+ movl $__LOAD_PHYSICAL_ADDR,%eax
20414+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
20415+ rorl $16,%eax
20416+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
20417+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
20418+ rorl $16,%eax
20419+
20420+ ljmp $(__BOOT_CS),$1f
20421+1:
20422+
20423+ movl $NR_CPUS,%ecx
20424+ movl $pa(cpu_gdt_table),%edi
20425+ addl $__PAGE_OFFSET,%eax
20426+1:
20427+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
20428+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
20429+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
20430+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
20431+ rorl $16,%eax
20432+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
20433+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
20434+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
20435+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
20436+ rorl $16,%eax
20437+ addl $PAGE_SIZE_asm,%edi
20438+ loop 1b
20439+#endif
20440+
20441 /*
20442 * Clear BSS first so that there are no surprises...
20443 */
20444@@ -196,8 +264,11 @@ ENTRY(startup_32)
20445 movl %eax, pa(max_pfn_mapped)
20446
20447 /* Do early initialization of the fixmap area */
20448- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
20449- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
20450+#ifdef CONFIG_COMPAT_VDSO
20451+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
20452+#else
20453+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
20454+#endif
20455 #else /* Not PAE */
20456
20457 page_pde_offset = (__PAGE_OFFSET >> 20);
20458@@ -227,8 +298,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
20459 movl %eax, pa(max_pfn_mapped)
20460
20461 /* Do early initialization of the fixmap area */
20462- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
20463- movl %eax,pa(initial_page_table+0xffc)
20464+#ifdef CONFIG_COMPAT_VDSO
20465+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
20466+#else
20467+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
20468+#endif
20469 #endif
20470
20471 #ifdef CONFIG_PARAVIRT
20472@@ -242,9 +316,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
20473 cmpl $num_subarch_entries, %eax
20474 jae bad_subarch
20475
20476- movl pa(subarch_entries)(,%eax,4), %eax
20477- subl $__PAGE_OFFSET, %eax
20478- jmp *%eax
20479+ jmp *pa(subarch_entries)(,%eax,4)
20480
20481 bad_subarch:
20482 WEAK(lguest_entry)
20483@@ -256,10 +328,10 @@ WEAK(xen_entry)
20484 __INITDATA
20485
20486 subarch_entries:
20487- .long default_entry /* normal x86/PC */
20488- .long lguest_entry /* lguest hypervisor */
20489- .long xen_entry /* Xen hypervisor */
20490- .long default_entry /* Moorestown MID */
20491+ .long ta(default_entry) /* normal x86/PC */
20492+ .long ta(lguest_entry) /* lguest hypervisor */
20493+ .long ta(xen_entry) /* Xen hypervisor */
20494+ .long ta(default_entry) /* Moorestown MID */
20495 num_subarch_entries = (. - subarch_entries) / 4
20496 .previous
20497 #else
20498@@ -335,6 +407,7 @@ default_entry:
20499 movl pa(mmu_cr4_features),%eax
20500 movl %eax,%cr4
20501
20502+#ifdef CONFIG_X86_PAE
20503 testb $X86_CR4_PAE, %al # check if PAE is enabled
20504 jz 6f
20505
20506@@ -363,6 +436,9 @@ default_entry:
20507 /* Make changes effective */
20508 wrmsr
20509
20510+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
20511+#endif
20512+
20513 6:
20514
20515 /*
20516@@ -460,14 +536,20 @@ is386: movl $2,%ecx # set MP
20517 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
20518 movl %eax,%ss # after changing gdt.
20519
20520- movl $(__USER_DS),%eax # DS/ES contains default USER segment
20521+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
20522 movl %eax,%ds
20523 movl %eax,%es
20524
20525 movl $(__KERNEL_PERCPU), %eax
20526 movl %eax,%fs # set this cpu's percpu
20527
20528+#ifdef CONFIG_CC_STACKPROTECTOR
20529 movl $(__KERNEL_STACK_CANARY),%eax
20530+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
20531+ movl $(__USER_DS),%eax
20532+#else
20533+ xorl %eax,%eax
20534+#endif
20535 movl %eax,%gs
20536
20537 xorl %eax,%eax # Clear LDT
20538@@ -544,8 +626,11 @@ setup_once:
20539 * relocation. Manually set base address in stack canary
20540 * segment descriptor.
20541 */
20542- movl $gdt_page,%eax
20543+ movl $cpu_gdt_table,%eax
20544 movl $stack_canary,%ecx
20545+#ifdef CONFIG_SMP
20546+ addl $__per_cpu_load,%ecx
20547+#endif
20548 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
20549 shrl $16, %ecx
20550 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
20551@@ -576,7 +661,7 @@ ENDPROC(early_idt_handlers)
20552 /* This is global to keep gas from relaxing the jumps */
20553 ENTRY(early_idt_handler)
20554 cld
20555- cmpl $2,%ss:early_recursion_flag
20556+ cmpl $1,%ss:early_recursion_flag
20557 je hlt_loop
20558 incl %ss:early_recursion_flag
20559
20560@@ -614,8 +699,8 @@ ENTRY(early_idt_handler)
20561 pushl (20+6*4)(%esp) /* trapno */
20562 pushl $fault_msg
20563 call printk
20564-#endif
20565 call dump_stack
20566+#endif
20567 hlt_loop:
20568 hlt
20569 jmp hlt_loop
20570@@ -634,8 +719,11 @@ ENDPROC(early_idt_handler)
20571 /* This is the default interrupt "handler" :-) */
20572 ALIGN
20573 ignore_int:
20574- cld
20575 #ifdef CONFIG_PRINTK
20576+ cmpl $2,%ss:early_recursion_flag
20577+ je hlt_loop
20578+ incl %ss:early_recursion_flag
20579+ cld
20580 pushl %eax
20581 pushl %ecx
20582 pushl %edx
20583@@ -644,9 +732,6 @@ ignore_int:
20584 movl $(__KERNEL_DS),%eax
20585 movl %eax,%ds
20586 movl %eax,%es
20587- cmpl $2,early_recursion_flag
20588- je hlt_loop
20589- incl early_recursion_flag
20590 pushl 16(%esp)
20591 pushl 24(%esp)
20592 pushl 32(%esp)
20593@@ -680,29 +765,43 @@ ENTRY(setup_once_ref)
20594 /*
20595 * BSS section
20596 */
20597-__PAGE_ALIGNED_BSS
20598- .align PAGE_SIZE
20599 #ifdef CONFIG_X86_PAE
20600+.section .initial_pg_pmd,"a",@progbits
20601 initial_pg_pmd:
20602 .fill 1024*KPMDS,4,0
20603 #else
20604+.section .initial_page_table,"a",@progbits
20605 ENTRY(initial_page_table)
20606 .fill 1024,4,0
20607 #endif
20608+.section .initial_pg_fixmap,"a",@progbits
20609 initial_pg_fixmap:
20610 .fill 1024,4,0
20611+.section .empty_zero_page,"a",@progbits
20612 ENTRY(empty_zero_page)
20613 .fill 4096,1,0
20614+.section .swapper_pg_dir,"a",@progbits
20615 ENTRY(swapper_pg_dir)
20616+#ifdef CONFIG_X86_PAE
20617+ .fill 4,8,0
20618+#else
20619 .fill 1024,4,0
20620+#endif
20621+
20622+/*
20623+ * The IDT has to be page-aligned to simplify the Pentium
20624+ * F0 0F bug workaround.. We have a special link segment
20625+ * for this.
20626+ */
20627+.section .idt,"a",@progbits
20628+ENTRY(idt_table)
20629+ .fill 256,8,0
20630
20631 /*
20632 * This starts the data section.
20633 */
20634 #ifdef CONFIG_X86_PAE
20635-__PAGE_ALIGNED_DATA
20636- /* Page-aligned for the benefit of paravirt? */
20637- .align PAGE_SIZE
20638+.section .initial_page_table,"a",@progbits
20639 ENTRY(initial_page_table)
20640 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
20641 # if KPMDS == 3
20642@@ -721,12 +820,20 @@ ENTRY(initial_page_table)
20643 # error "Kernel PMDs should be 1, 2 or 3"
20644 # endif
20645 .align PAGE_SIZE /* needs to be page-sized too */
20646+
20647+#ifdef CONFIG_PAX_PER_CPU_PGD
20648+ENTRY(cpu_pgd)
20649+ .rept NR_CPUS
20650+ .fill 4,8,0
20651+ .endr
20652+#endif
20653+
20654 #endif
20655
20656 .data
20657 .balign 4
20658 ENTRY(stack_start)
20659- .long init_thread_union+THREAD_SIZE
20660+ .long init_thread_union+THREAD_SIZE-8
20661
20662 __INITRODATA
20663 int_msg:
20664@@ -754,7 +861,7 @@ fault_msg:
20665 * segment size, and 32-bit linear address value:
20666 */
20667
20668- .data
20669+.section .rodata,"a",@progbits
20670 .globl boot_gdt_descr
20671 .globl idt_descr
20672
20673@@ -763,7 +870,7 @@ fault_msg:
20674 .word 0 # 32 bit align gdt_desc.address
20675 boot_gdt_descr:
20676 .word __BOOT_DS+7
20677- .long boot_gdt - __PAGE_OFFSET
20678+ .long pa(boot_gdt)
20679
20680 .word 0 # 32-bit align idt_desc.address
20681 idt_descr:
20682@@ -774,7 +881,7 @@ idt_descr:
20683 .word 0 # 32 bit align gdt_desc.address
20684 ENTRY(early_gdt_descr)
20685 .word GDT_ENTRIES*8-1
20686- .long gdt_page /* Overwritten for secondary CPUs */
20687+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
20688
20689 /*
20690 * The boot_gdt must mirror the equivalent in setup.S and is
20691@@ -783,5 +890,65 @@ ENTRY(early_gdt_descr)
20692 .align L1_CACHE_BYTES
20693 ENTRY(boot_gdt)
20694 .fill GDT_ENTRY_BOOT_CS,8,0
20695- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
20696- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
20697+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
20698+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
20699+
20700+ .align PAGE_SIZE_asm
20701+ENTRY(cpu_gdt_table)
20702+ .rept NR_CPUS
20703+ .quad 0x0000000000000000 /* NULL descriptor */
20704+ .quad 0x0000000000000000 /* 0x0b reserved */
20705+ .quad 0x0000000000000000 /* 0x13 reserved */
20706+ .quad 0x0000000000000000 /* 0x1b reserved */
20707+
20708+#ifdef CONFIG_PAX_KERNEXEC
20709+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
20710+#else
20711+ .quad 0x0000000000000000 /* 0x20 unused */
20712+#endif
20713+
20714+ .quad 0x0000000000000000 /* 0x28 unused */
20715+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
20716+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
20717+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
20718+ .quad 0x0000000000000000 /* 0x4b reserved */
20719+ .quad 0x0000000000000000 /* 0x53 reserved */
20720+ .quad 0x0000000000000000 /* 0x5b reserved */
20721+
20722+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
20723+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
20724+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
20725+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
20726+
20727+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
20728+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
20729+
20730+ /*
20731+ * Segments used for calling PnP BIOS have byte granularity.
20732+ * The code segments and data segments have fixed 64k limits,
20733+ * the transfer segment sizes are set at run time.
20734+ */
20735+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
20736+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
20737+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
20738+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
20739+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
20740+
20741+ /*
20742+ * The APM segments have byte granularity and their bases
20743+ * are set at run time. All have 64k limits.
20744+ */
20745+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
20746+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
20747+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
20748+
20749+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
20750+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
20751+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
20752+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
20753+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
20754+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
20755+
20756+ /* Be sure this is zeroed to avoid false validations in Xen */
20757+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
20758+ .endr
20759diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
20760index 980053c..74d3b44 100644
20761--- a/arch/x86/kernel/head_64.S
20762+++ b/arch/x86/kernel/head_64.S
20763@@ -20,6 +20,8 @@
20764 #include <asm/processor-flags.h>
20765 #include <asm/percpu.h>
20766 #include <asm/nops.h>
20767+#include <asm/cpufeature.h>
20768+#include <asm/alternative-asm.h>
20769
20770 #ifdef CONFIG_PARAVIRT
20771 #include <asm/asm-offsets.h>
20772@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
20773 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
20774 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
20775 L3_START_KERNEL = pud_index(__START_KERNEL_map)
20776+L4_VMALLOC_START = pgd_index(VMALLOC_START)
20777+L3_VMALLOC_START = pud_index(VMALLOC_START)
20778+L4_VMALLOC_END = pgd_index(VMALLOC_END)
20779+L3_VMALLOC_END = pud_index(VMALLOC_END)
20780+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
20781+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
20782
20783 .text
20784 __HEAD
20785@@ -88,35 +96,23 @@ startup_64:
20786 */
20787 addq %rbp, init_level4_pgt + 0(%rip)
20788 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
20789+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
20790+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
20791+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
20792 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
20793
20794 addq %rbp, level3_ident_pgt + 0(%rip)
20795+#ifndef CONFIG_XEN
20796+ addq %rbp, level3_ident_pgt + 8(%rip)
20797+#endif
20798
20799- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
20800- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
20801+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
20802+
20803+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
20804+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
20805
20806 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
20807-
20808- /* Add an Identity mapping if I am above 1G */
20809- leaq _text(%rip), %rdi
20810- andq $PMD_PAGE_MASK, %rdi
20811-
20812- movq %rdi, %rax
20813- shrq $PUD_SHIFT, %rax
20814- andq $(PTRS_PER_PUD - 1), %rax
20815- jz ident_complete
20816-
20817- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
20818- leaq level3_ident_pgt(%rip), %rbx
20819- movq %rdx, 0(%rbx, %rax, 8)
20820-
20821- movq %rdi, %rax
20822- shrq $PMD_SHIFT, %rax
20823- andq $(PTRS_PER_PMD - 1), %rax
20824- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
20825- leaq level2_spare_pgt(%rip), %rbx
20826- movq %rdx, 0(%rbx, %rax, 8)
20827-ident_complete:
20828+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
20829
20830 /*
20831 * Fixup the kernel text+data virtual addresses. Note that
20832@@ -159,8 +155,8 @@ ENTRY(secondary_startup_64)
20833 * after the boot processor executes this code.
20834 */
20835
20836- /* Enable PAE mode and PGE */
20837- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
20838+ /* Enable PAE mode and PSE/PGE */
20839+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
20840 movq %rax, %cr4
20841
20842 /* Setup early boot stage 4 level pagetables. */
20843@@ -182,9 +178,17 @@ ENTRY(secondary_startup_64)
20844 movl $MSR_EFER, %ecx
20845 rdmsr
20846 btsl $_EFER_SCE, %eax /* Enable System Call */
20847- btl $20,%edi /* No Execute supported? */
20848+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
20849 jnc 1f
20850 btsl $_EFER_NX, %eax
20851+ leaq init_level4_pgt(%rip), %rdi
20852+#ifndef CONFIG_EFI
20853+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
20854+#endif
20855+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
20856+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
20857+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
20858+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
20859 1: wrmsr /* Make changes effective */
20860
20861 /* Setup cr0 */
20862@@ -246,6 +250,7 @@ ENTRY(secondary_startup_64)
20863 * jump. In addition we need to ensure %cs is set so we make this
20864 * a far return.
20865 */
20866+ pax_set_fptr_mask
20867 movq initial_code(%rip),%rax
20868 pushq $0 # fake return address to stop unwinder
20869 pushq $__KERNEL_CS # set correct cs
20870@@ -284,7 +289,7 @@ ENDPROC(start_cpu0)
20871 bad_address:
20872 jmp bad_address
20873
20874- .section ".init.text","ax"
20875+ __INIT
20876 .globl early_idt_handlers
20877 early_idt_handlers:
20878 # 104(%rsp) %rflags
20879@@ -343,7 +348,7 @@ ENTRY(early_idt_handler)
20880 call dump_stack
20881 #ifdef CONFIG_KALLSYMS
20882 leaq early_idt_ripmsg(%rip),%rdi
20883- movq 40(%rsp),%rsi # %rip again
20884+ movq 88(%rsp),%rsi # %rip again
20885 call __print_symbol
20886 #endif
20887 #endif /* EARLY_PRINTK */
20888@@ -363,11 +368,15 @@ ENTRY(early_idt_handler)
20889 addq $16,%rsp # drop vector number and error code
20890 decl early_recursion_flag(%rip)
20891 INTERRUPT_RETURN
20892+ .previous
20893
20894+ __INITDATA
20895 .balign 4
20896 early_recursion_flag:
20897 .long 0
20898+ .previous
20899
20900+ .section .rodata,"a",@progbits
20901 #ifdef CONFIG_EARLY_PRINTK
20902 early_idt_msg:
20903 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
20904@@ -376,6 +385,7 @@ early_idt_ripmsg:
20905 #endif /* CONFIG_EARLY_PRINTK */
20906 .previous
20907
20908+ .section .rodata,"a",@progbits
20909 #define NEXT_PAGE(name) \
20910 .balign PAGE_SIZE; \
20911 ENTRY(name)
20912@@ -388,7 +398,6 @@ ENTRY(name)
20913 i = i + 1 ; \
20914 .endr
20915
20916- .data
20917 /*
20918 * This default setting generates an ident mapping at address 0x100000
20919 * and a mapping for the kernel that precisely maps virtual address
20920@@ -399,13 +408,41 @@ NEXT_PAGE(init_level4_pgt)
20921 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
20922 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
20923 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
20924+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
20925+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
20926+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
20927+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
20928+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
20929+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
20930 .org init_level4_pgt + L4_START_KERNEL*8, 0
20931 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
20932 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
20933
20934+#ifdef CONFIG_PAX_PER_CPU_PGD
20935+NEXT_PAGE(cpu_pgd)
20936+ .rept NR_CPUS
20937+ .fill 512,8,0
20938+ .endr
20939+#endif
20940+
20941 NEXT_PAGE(level3_ident_pgt)
20942 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
20943+#ifdef CONFIG_XEN
20944 .fill 511,8,0
20945+#else
20946+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
20947+ .fill 510,8,0
20948+#endif
20949+
20950+NEXT_PAGE(level3_vmalloc_start_pgt)
20951+ .fill 512,8,0
20952+
20953+NEXT_PAGE(level3_vmalloc_end_pgt)
20954+ .fill 512,8,0
20955+
20956+NEXT_PAGE(level3_vmemmap_pgt)
20957+ .fill L3_VMEMMAP_START,8,0
20958+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
20959
20960 NEXT_PAGE(level3_kernel_pgt)
20961 .fill L3_START_KERNEL,8,0
20962@@ -413,20 +450,23 @@ NEXT_PAGE(level3_kernel_pgt)
20963 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
20964 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
20965
20966+NEXT_PAGE(level2_vmemmap_pgt)
20967+ .fill 512,8,0
20968+
20969 NEXT_PAGE(level2_fixmap_pgt)
20970- .fill 506,8,0
20971- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
20972- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
20973- .fill 5,8,0
20974+ .fill 507,8,0
20975+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
20976+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
20977+ .fill 4,8,0
20978
20979-NEXT_PAGE(level1_fixmap_pgt)
20980+NEXT_PAGE(level1_vsyscall_pgt)
20981 .fill 512,8,0
20982
20983-NEXT_PAGE(level2_ident_pgt)
20984- /* Since I easily can, map the first 1G.
20985+ /* Since I easily can, map the first 2G.
20986 * Don't set NX because code runs from these pages.
20987 */
20988- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
20989+NEXT_PAGE(level2_ident_pgt)
20990+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
20991
20992 NEXT_PAGE(level2_kernel_pgt)
20993 /*
20994@@ -439,37 +479,59 @@ NEXT_PAGE(level2_kernel_pgt)
20995 * If you want to increase this then increase MODULES_VADDR
20996 * too.)
20997 */
20998- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
20999- KERNEL_IMAGE_SIZE/PMD_SIZE)
21000-
21001-NEXT_PAGE(level2_spare_pgt)
21002- .fill 512, 8, 0
21003+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
21004
21005 #undef PMDS
21006 #undef NEXT_PAGE
21007
21008- .data
21009+ .align PAGE_SIZE
21010+ENTRY(cpu_gdt_table)
21011+ .rept NR_CPUS
21012+ .quad 0x0000000000000000 /* NULL descriptor */
21013+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
21014+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
21015+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
21016+ .quad 0x00cffb000000ffff /* __USER32_CS */
21017+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
21018+ .quad 0x00affb000000ffff /* __USER_CS */
21019+
21020+#ifdef CONFIG_PAX_KERNEXEC
21021+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
21022+#else
21023+ .quad 0x0 /* unused */
21024+#endif
21025+
21026+ .quad 0,0 /* TSS */
21027+ .quad 0,0 /* LDT */
21028+ .quad 0,0,0 /* three TLS descriptors */
21029+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
21030+ /* asm/segment.h:GDT_ENTRIES must match this */
21031+
21032+ /* zero the remaining page */
21033+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
21034+ .endr
21035+
21036 .align 16
21037 .globl early_gdt_descr
21038 early_gdt_descr:
21039 .word GDT_ENTRIES*8-1
21040 early_gdt_descr_base:
21041- .quad INIT_PER_CPU_VAR(gdt_page)
21042+ .quad cpu_gdt_table
21043
21044 ENTRY(phys_base)
21045 /* This must match the first entry in level2_kernel_pgt */
21046 .quad 0x0000000000000000
21047
21048 #include "../../x86/xen/xen-head.S"
21049-
21050- .section .bss, "aw", @nobits
21051+
21052+ .section .rodata,"a",@progbits
21053 .align L1_CACHE_BYTES
21054 ENTRY(idt_table)
21055- .skip IDT_ENTRIES * 16
21056+ .fill 512,8,0
21057
21058 .align L1_CACHE_BYTES
21059 ENTRY(nmi_idt_table)
21060- .skip IDT_ENTRIES * 16
21061+ .fill 512,8,0
21062
21063 __PAGE_ALIGNED_BSS
21064 .align PAGE_SIZE
21065diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
21066index 9c3bd4a..e1d9b35 100644
21067--- a/arch/x86/kernel/i386_ksyms_32.c
21068+++ b/arch/x86/kernel/i386_ksyms_32.c
21069@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
21070 EXPORT_SYMBOL(cmpxchg8b_emu);
21071 #endif
21072
21073+EXPORT_SYMBOL_GPL(cpu_gdt_table);
21074+
21075 /* Networking helper routines. */
21076 EXPORT_SYMBOL(csum_partial_copy_generic);
21077+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
21078+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
21079
21080 EXPORT_SYMBOL(__get_user_1);
21081 EXPORT_SYMBOL(__get_user_2);
21082@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
21083
21084 EXPORT_SYMBOL(csum_partial);
21085 EXPORT_SYMBOL(empty_zero_page);
21086+
21087+#ifdef CONFIG_PAX_KERNEXEC
21088+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
21089+#endif
21090diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
21091index 245a71d..89d9ce4 100644
21092--- a/arch/x86/kernel/i387.c
21093+++ b/arch/x86/kernel/i387.c
21094@@ -55,7 +55,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
21095 static inline bool interrupted_user_mode(void)
21096 {
21097 struct pt_regs *regs = get_irq_regs();
21098- return regs && user_mode_vm(regs);
21099+ return regs && user_mode(regs);
21100 }
21101
21102 /*
21103diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
21104index 9a5c460..b332a4b 100644
21105--- a/arch/x86/kernel/i8259.c
21106+++ b/arch/x86/kernel/i8259.c
21107@@ -209,7 +209,7 @@ spurious_8259A_irq:
21108 "spurious 8259A interrupt: IRQ%d.\n", irq);
21109 spurious_irq_mask |= irqmask;
21110 }
21111- atomic_inc(&irq_err_count);
21112+ atomic_inc_unchecked(&irq_err_count);
21113 /*
21114 * Theoretically we do not have to handle this IRQ,
21115 * but in Linux this does not cause problems and is
21116@@ -333,14 +333,16 @@ static void init_8259A(int auto_eoi)
21117 /* (slave's support for AEOI in flat mode is to be investigated) */
21118 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
21119
21120+ pax_open_kernel();
21121 if (auto_eoi)
21122 /*
21123 * In AEOI mode we just have to mask the interrupt
21124 * when acking.
21125 */
21126- i8259A_chip.irq_mask_ack = disable_8259A_irq;
21127+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
21128 else
21129- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
21130+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
21131+ pax_close_kernel();
21132
21133 udelay(100); /* wait for 8259A to initialize */
21134
21135diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
21136index a979b5b..1d6db75 100644
21137--- a/arch/x86/kernel/io_delay.c
21138+++ b/arch/x86/kernel/io_delay.c
21139@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
21140 * Quirk table for systems that misbehave (lock up, etc.) if port
21141 * 0x80 is used:
21142 */
21143-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
21144+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
21145 {
21146 .callback = dmi_io_delay_0xed_port,
21147 .ident = "Compaq Presario V6000",
21148diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
21149index 8c96897..be66bfa 100644
21150--- a/arch/x86/kernel/ioport.c
21151+++ b/arch/x86/kernel/ioport.c
21152@@ -6,6 +6,7 @@
21153 #include <linux/sched.h>
21154 #include <linux/kernel.h>
21155 #include <linux/capability.h>
21156+#include <linux/security.h>
21157 #include <linux/errno.h>
21158 #include <linux/types.h>
21159 #include <linux/ioport.h>
21160@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
21161
21162 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
21163 return -EINVAL;
21164+#ifdef CONFIG_GRKERNSEC_IO
21165+ if (turn_on && grsec_disable_privio) {
21166+ gr_handle_ioperm();
21167+ return -EPERM;
21168+ }
21169+#endif
21170 if (turn_on && !capable(CAP_SYS_RAWIO))
21171 return -EPERM;
21172
21173@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
21174 * because the ->io_bitmap_max value must match the bitmap
21175 * contents:
21176 */
21177- tss = &per_cpu(init_tss, get_cpu());
21178+ tss = init_tss + get_cpu();
21179
21180 if (turn_on)
21181 bitmap_clear(t->io_bitmap_ptr, from, num);
21182@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
21183 return -EINVAL;
21184 /* Trying to gain more privileges? */
21185 if (level > old) {
21186+#ifdef CONFIG_GRKERNSEC_IO
21187+ if (grsec_disable_privio) {
21188+ gr_handle_iopl();
21189+ return -EPERM;
21190+ }
21191+#endif
21192 if (!capable(CAP_SYS_RAWIO))
21193 return -EPERM;
21194 }
21195diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
21196index e4595f1..ee3bfb8 100644
21197--- a/arch/x86/kernel/irq.c
21198+++ b/arch/x86/kernel/irq.c
21199@@ -18,7 +18,7 @@
21200 #include <asm/mce.h>
21201 #include <asm/hw_irq.h>
21202
21203-atomic_t irq_err_count;
21204+atomic_unchecked_t irq_err_count;
21205
21206 /* Function pointer for generic interrupt vector handling */
21207 void (*x86_platform_ipi_callback)(void) = NULL;
21208@@ -122,9 +122,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
21209 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
21210 seq_printf(p, " Machine check polls\n");
21211 #endif
21212- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
21213+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
21214 #if defined(CONFIG_X86_IO_APIC)
21215- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
21216+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
21217 #endif
21218 return 0;
21219 }
21220@@ -164,10 +164,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
21221
21222 u64 arch_irq_stat(void)
21223 {
21224- u64 sum = atomic_read(&irq_err_count);
21225+ u64 sum = atomic_read_unchecked(&irq_err_count);
21226
21227 #ifdef CONFIG_X86_IO_APIC
21228- sum += atomic_read(&irq_mis_count);
21229+ sum += atomic_read_unchecked(&irq_mis_count);
21230 #endif
21231 return sum;
21232 }
21233diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
21234index 344faf8..355f60d 100644
21235--- a/arch/x86/kernel/irq_32.c
21236+++ b/arch/x86/kernel/irq_32.c
21237@@ -39,7 +39,7 @@ static int check_stack_overflow(void)
21238 __asm__ __volatile__("andl %%esp,%0" :
21239 "=r" (sp) : "0" (THREAD_SIZE - 1));
21240
21241- return sp < (sizeof(struct thread_info) + STACK_WARN);
21242+ return sp < STACK_WARN;
21243 }
21244
21245 static void print_stack_overflow(void)
21246@@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
21247 * per-CPU IRQ handling contexts (thread information and stack)
21248 */
21249 union irq_ctx {
21250- struct thread_info tinfo;
21251- u32 stack[THREAD_SIZE/sizeof(u32)];
21252+ unsigned long previous_esp;
21253+ u32 stack[THREAD_SIZE/sizeof(u32)];
21254 } __attribute__((aligned(THREAD_SIZE)));
21255
21256 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
21257@@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
21258 static inline int
21259 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
21260 {
21261- union irq_ctx *curctx, *irqctx;
21262+ union irq_ctx *irqctx;
21263 u32 *isp, arg1, arg2;
21264
21265- curctx = (union irq_ctx *) current_thread_info();
21266 irqctx = __this_cpu_read(hardirq_ctx);
21267
21268 /*
21269@@ -92,16 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
21270 * handler) we can't do that and just have to keep using the
21271 * current stack (which is the irq stack already after all)
21272 */
21273- if (unlikely(curctx == irqctx))
21274+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
21275 return 0;
21276
21277 /* build the stack frame on the IRQ stack */
21278- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
21279- irqctx->tinfo.task = curctx->tinfo.task;
21280- irqctx->tinfo.previous_esp = current_stack_pointer;
21281+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
21282+ irqctx->previous_esp = current_stack_pointer;
21283
21284- /* Copy the preempt_count so that the [soft]irq checks work. */
21285- irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
21286+#ifdef CONFIG_PAX_MEMORY_UDEREF
21287+ __set_fs(MAKE_MM_SEG(0));
21288+#endif
21289
21290 if (unlikely(overflow))
21291 call_on_stack(print_stack_overflow, isp);
21292@@ -113,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
21293 : "0" (irq), "1" (desc), "2" (isp),
21294 "D" (desc->handle_irq)
21295 : "memory", "cc", "ecx");
21296+
21297+#ifdef CONFIG_PAX_MEMORY_UDEREF
21298+ __set_fs(current_thread_info()->addr_limit);
21299+#endif
21300+
21301 return 1;
21302 }
21303
21304@@ -121,29 +125,14 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
21305 */
21306 void __cpuinit irq_ctx_init(int cpu)
21307 {
21308- union irq_ctx *irqctx;
21309-
21310 if (per_cpu(hardirq_ctx, cpu))
21311 return;
21312
21313- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
21314- THREADINFO_GFP,
21315- THREAD_SIZE_ORDER));
21316- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
21317- irqctx->tinfo.cpu = cpu;
21318- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
21319- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
21320-
21321- per_cpu(hardirq_ctx, cpu) = irqctx;
21322-
21323- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
21324- THREADINFO_GFP,
21325- THREAD_SIZE_ORDER));
21326- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
21327- irqctx->tinfo.cpu = cpu;
21328- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
21329-
21330- per_cpu(softirq_ctx, cpu) = irqctx;
21331+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
21332+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
21333+
21334+ printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
21335+ cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
21336
21337 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
21338 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
21339@@ -152,7 +141,6 @@ void __cpuinit irq_ctx_init(int cpu)
21340 asmlinkage void do_softirq(void)
21341 {
21342 unsigned long flags;
21343- struct thread_info *curctx;
21344 union irq_ctx *irqctx;
21345 u32 *isp;
21346
21347@@ -162,15 +150,22 @@ asmlinkage void do_softirq(void)
21348 local_irq_save(flags);
21349
21350 if (local_softirq_pending()) {
21351- curctx = current_thread_info();
21352 irqctx = __this_cpu_read(softirq_ctx);
21353- irqctx->tinfo.task = curctx->task;
21354- irqctx->tinfo.previous_esp = current_stack_pointer;
21355+ irqctx->previous_esp = current_stack_pointer;
21356
21357 /* build the stack frame on the softirq stack */
21358- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
21359+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
21360+
21361+#ifdef CONFIG_PAX_MEMORY_UDEREF
21362+ __set_fs(MAKE_MM_SEG(0));
21363+#endif
21364
21365 call_on_stack(__do_softirq, isp);
21366+
21367+#ifdef CONFIG_PAX_MEMORY_UDEREF
21368+ __set_fs(current_thread_info()->addr_limit);
21369+#endif
21370+
21371 /*
21372 * Shouldn't happen, we returned above if in_interrupt():
21373 */
21374@@ -191,7 +186,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
21375 if (unlikely(!desc))
21376 return false;
21377
21378- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
21379+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
21380 if (unlikely(overflow))
21381 print_stack_overflow();
21382 desc->handle_irq(irq, desc);
21383diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
21384index d04d3ec..ea4b374 100644
21385--- a/arch/x86/kernel/irq_64.c
21386+++ b/arch/x86/kernel/irq_64.c
21387@@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
21388 u64 estack_top, estack_bottom;
21389 u64 curbase = (u64)task_stack_page(current);
21390
21391- if (user_mode_vm(regs))
21392+ if (user_mode(regs))
21393 return;
21394
21395 if (regs->sp >= curbase + sizeof(struct thread_info) +
21396diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
21397index dc1404b..bbc43e7 100644
21398--- a/arch/x86/kernel/kdebugfs.c
21399+++ b/arch/x86/kernel/kdebugfs.c
21400@@ -27,7 +27,7 @@ struct setup_data_node {
21401 u32 len;
21402 };
21403
21404-static ssize_t setup_data_read(struct file *file, char __user *user_buf,
21405+static ssize_t __size_overflow(3) setup_data_read(struct file *file, char __user *user_buf,
21406 size_t count, loff_t *ppos)
21407 {
21408 struct setup_data_node *node = file->private_data;
21409diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
21410index 836f832..a8bda67 100644
21411--- a/arch/x86/kernel/kgdb.c
21412+++ b/arch/x86/kernel/kgdb.c
21413@@ -127,11 +127,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
21414 #ifdef CONFIG_X86_32
21415 switch (regno) {
21416 case GDB_SS:
21417- if (!user_mode_vm(regs))
21418+ if (!user_mode(regs))
21419 *(unsigned long *)mem = __KERNEL_DS;
21420 break;
21421 case GDB_SP:
21422- if (!user_mode_vm(regs))
21423+ if (!user_mode(regs))
21424 *(unsigned long *)mem = kernel_stack_pointer(regs);
21425 break;
21426 case GDB_GS:
21427@@ -229,7 +229,10 @@ static void kgdb_correct_hw_break(void)
21428 bp->attr.bp_addr = breakinfo[breakno].addr;
21429 bp->attr.bp_len = breakinfo[breakno].len;
21430 bp->attr.bp_type = breakinfo[breakno].type;
21431- info->address = breakinfo[breakno].addr;
21432+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
21433+ info->address = ktla_ktva(breakinfo[breakno].addr);
21434+ else
21435+ info->address = breakinfo[breakno].addr;
21436 info->len = breakinfo[breakno].len;
21437 info->type = breakinfo[breakno].type;
21438 val = arch_install_hw_breakpoint(bp);
21439@@ -476,12 +479,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
21440 case 'k':
21441 /* clear the trace bit */
21442 linux_regs->flags &= ~X86_EFLAGS_TF;
21443- atomic_set(&kgdb_cpu_doing_single_step, -1);
21444+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
21445
21446 /* set the trace bit if we're stepping */
21447 if (remcomInBuffer[0] == 's') {
21448 linux_regs->flags |= X86_EFLAGS_TF;
21449- atomic_set(&kgdb_cpu_doing_single_step,
21450+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
21451 raw_smp_processor_id());
21452 }
21453
21454@@ -546,7 +549,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
21455
21456 switch (cmd) {
21457 case DIE_DEBUG:
21458- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
21459+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
21460 if (user_mode(regs))
21461 return single_step_cont(regs, args);
21462 break;
21463@@ -751,11 +754,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
21464 #endif /* CONFIG_DEBUG_RODATA */
21465
21466 bpt->type = BP_BREAKPOINT;
21467- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
21468+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
21469 BREAK_INSTR_SIZE);
21470 if (err)
21471 return err;
21472- err = probe_kernel_write((char *)bpt->bpt_addr,
21473+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
21474 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
21475 #ifdef CONFIG_DEBUG_RODATA
21476 if (!err)
21477@@ -768,7 +771,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
21478 return -EBUSY;
21479 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
21480 BREAK_INSTR_SIZE);
21481- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
21482+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
21483 if (err)
21484 return err;
21485 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
21486@@ -793,13 +796,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
21487 if (mutex_is_locked(&text_mutex))
21488 goto knl_write;
21489 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
21490- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
21491+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
21492 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
21493 goto knl_write;
21494 return err;
21495 knl_write:
21496 #endif /* CONFIG_DEBUG_RODATA */
21497- return probe_kernel_write((char *)bpt->bpt_addr,
21498+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
21499 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
21500 }
21501
21502diff --git a/arch/x86/kernel/kprobes-opt.c b/arch/x86/kernel/kprobes-opt.c
21503index c5e410e..ed5a7f0 100644
21504--- a/arch/x86/kernel/kprobes-opt.c
21505+++ b/arch/x86/kernel/kprobes-opt.c
21506@@ -338,7 +338,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
21507 * Verify if the address gap is in 2GB range, because this uses
21508 * a relative jump.
21509 */
21510- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
21511+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
21512 if (abs(rel) > 0x7fffffff)
21513 return -ERANGE;
21514
21515@@ -353,16 +353,16 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
21516 op->optinsn.size = ret;
21517
21518 /* Copy arch-dep-instance from template */
21519- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
21520+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
21521
21522 /* Set probe information */
21523 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
21524
21525 /* Set probe function call */
21526- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
21527+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
21528
21529 /* Set returning jmp instruction at the tail of out-of-line buffer */
21530- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
21531+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
21532 (u8 *)op->kp.addr + op->optinsn.size);
21533
21534 flush_icache_range((unsigned long) buf,
21535@@ -385,7 +385,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
21536 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
21537
21538 /* Backup instructions which will be replaced by jump address */
21539- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
21540+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
21541 RELATIVE_ADDR_SIZE);
21542
21543 insn_buf[0] = RELATIVEJUMP_OPCODE;
21544@@ -483,7 +483,7 @@ setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
21545 /* This kprobe is really able to run optimized path. */
21546 op = container_of(p, struct optimized_kprobe, kp);
21547 /* Detour through copied instructions */
21548- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
21549+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
21550 if (!reenter)
21551 reset_current_kprobe();
21552 preempt_enable_no_resched();
21553diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
21554index 57916c0..9e0b9d0 100644
21555--- a/arch/x86/kernel/kprobes.c
21556+++ b/arch/x86/kernel/kprobes.c
21557@@ -119,9 +119,12 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
21558 s32 raddr;
21559 } __attribute__((packed)) *insn;
21560
21561- insn = (struct __arch_relative_insn *)from;
21562+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
21563+
21564+ pax_open_kernel();
21565 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
21566 insn->op = op;
21567+ pax_close_kernel();
21568 }
21569
21570 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
21571@@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t *opcodes)
21572 kprobe_opcode_t opcode;
21573 kprobe_opcode_t *orig_opcodes = opcodes;
21574
21575- if (search_exception_tables((unsigned long)opcodes))
21576+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
21577 return 0; /* Page fault may occur on this address. */
21578
21579 retry:
21580@@ -238,9 +241,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
21581 * for the first byte, we can recover the original instruction
21582 * from it and kp->opcode.
21583 */
21584- memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
21585+ memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
21586 buf[0] = kp->opcode;
21587- return (unsigned long)buf;
21588+ return ktva_ktla((unsigned long)buf);
21589 }
21590
21591 /*
21592@@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
21593 /* Another subsystem puts a breakpoint, failed to recover */
21594 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
21595 return 0;
21596+ pax_open_kernel();
21597 memcpy(dest, insn.kaddr, insn.length);
21598+ pax_close_kernel();
21599
21600 #ifdef CONFIG_X86_64
21601 if (insn_rip_relative(&insn)) {
21602@@ -355,7 +360,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
21603 newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
21604 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
21605 disp = (u8 *) dest + insn_offset_displacement(&insn);
21606+ pax_open_kernel();
21607 *(s32 *) disp = (s32) newdisp;
21608+ pax_close_kernel();
21609 }
21610 #endif
21611 return insn.length;
21612@@ -485,7 +492,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
21613 * nor set current_kprobe, because it doesn't use single
21614 * stepping.
21615 */
21616- regs->ip = (unsigned long)p->ainsn.insn;
21617+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
21618 preempt_enable_no_resched();
21619 return;
21620 }
21621@@ -502,9 +509,9 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
21622 regs->flags &= ~X86_EFLAGS_IF;
21623 /* single step inline if the instruction is an int3 */
21624 if (p->opcode == BREAKPOINT_INSTRUCTION)
21625- regs->ip = (unsigned long)p->addr;
21626+ regs->ip = ktla_ktva((unsigned long)p->addr);
21627 else
21628- regs->ip = (unsigned long)p->ainsn.insn;
21629+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
21630 }
21631
21632 /*
21633@@ -600,7 +607,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
21634 setup_singlestep(p, regs, kcb, 0);
21635 return 1;
21636 }
21637- } else if (*addr != BREAKPOINT_INSTRUCTION) {
21638+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
21639 /*
21640 * The breakpoint instruction was removed right
21641 * after we hit it. Another cpu has removed
21642@@ -651,6 +658,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
21643 " movq %rax, 152(%rsp)\n"
21644 RESTORE_REGS_STRING
21645 " popfq\n"
21646+#ifdef KERNEXEC_PLUGIN
21647+ " btsq $63,(%rsp)\n"
21648+#endif
21649 #else
21650 " pushf\n"
21651 SAVE_REGS_STRING
21652@@ -788,7 +798,7 @@ static void __kprobes
21653 resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
21654 {
21655 unsigned long *tos = stack_addr(regs);
21656- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
21657+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
21658 unsigned long orig_ip = (unsigned long)p->addr;
21659 kprobe_opcode_t *insn = p->ainsn.insn;
21660
21661@@ -970,7 +980,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
21662 struct die_args *args = data;
21663 int ret = NOTIFY_DONE;
21664
21665- if (args->regs && user_mode_vm(args->regs))
21666+ if (args->regs && user_mode(args->regs))
21667 return ret;
21668
21669 switch (val) {
21670diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
21671index 9c2bd8b..bb1131c 100644
21672--- a/arch/x86/kernel/kvm.c
21673+++ b/arch/x86/kernel/kvm.c
21674@@ -452,7 +452,7 @@ static int __cpuinit kvm_cpu_notify(struct notifier_block *self,
21675 return NOTIFY_OK;
21676 }
21677
21678-static struct notifier_block __cpuinitdata kvm_cpu_notifier = {
21679+static struct notifier_block kvm_cpu_notifier = {
21680 .notifier_call = kvm_cpu_notify,
21681 };
21682 #endif
21683diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
21684index ebc9873..1b9724b 100644
21685--- a/arch/x86/kernel/ldt.c
21686+++ b/arch/x86/kernel/ldt.c
21687@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
21688 if (reload) {
21689 #ifdef CONFIG_SMP
21690 preempt_disable();
21691- load_LDT(pc);
21692+ load_LDT_nolock(pc);
21693 if (!cpumask_equal(mm_cpumask(current->mm),
21694 cpumask_of(smp_processor_id())))
21695 smp_call_function(flush_ldt, current->mm, 1);
21696 preempt_enable();
21697 #else
21698- load_LDT(pc);
21699+ load_LDT_nolock(pc);
21700 #endif
21701 }
21702 if (oldsize) {
21703@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
21704 return err;
21705
21706 for (i = 0; i < old->size; i++)
21707- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
21708+ write_ldt_entry(new->ldt, i, old->ldt + i);
21709 return 0;
21710 }
21711
21712@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
21713 retval = copy_ldt(&mm->context, &old_mm->context);
21714 mutex_unlock(&old_mm->context.lock);
21715 }
21716+
21717+ if (tsk == current) {
21718+ mm->context.vdso = 0;
21719+
21720+#ifdef CONFIG_X86_32
21721+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21722+ mm->context.user_cs_base = 0UL;
21723+ mm->context.user_cs_limit = ~0UL;
21724+
21725+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
21726+ cpus_clear(mm->context.cpu_user_cs_mask);
21727+#endif
21728+
21729+#endif
21730+#endif
21731+
21732+ }
21733+
21734 return retval;
21735 }
21736
21737@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
21738 }
21739 }
21740
21741+#ifdef CONFIG_PAX_SEGMEXEC
21742+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
21743+ error = -EINVAL;
21744+ goto out_unlock;
21745+ }
21746+#endif
21747+
21748 fill_ldt(&ldt, &ldt_info);
21749 if (oldmode)
21750 ldt.avl = 0;
21751diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
21752index 5b19e4d..6476a76 100644
21753--- a/arch/x86/kernel/machine_kexec_32.c
21754+++ b/arch/x86/kernel/machine_kexec_32.c
21755@@ -26,7 +26,7 @@
21756 #include <asm/cacheflush.h>
21757 #include <asm/debugreg.h>
21758
21759-static void set_idt(void *newidt, __u16 limit)
21760+static void set_idt(struct desc_struct *newidt, __u16 limit)
21761 {
21762 struct desc_ptr curidt;
21763
21764@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
21765 }
21766
21767
21768-static void set_gdt(void *newgdt, __u16 limit)
21769+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
21770 {
21771 struct desc_ptr curgdt;
21772
21773@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
21774 }
21775
21776 control_page = page_address(image->control_code_page);
21777- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
21778+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
21779
21780 relocate_kernel_ptr = control_page;
21781 page_list[PA_CONTROL_PAGE] = __pa(control_page);
21782diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
21783index 3a04b22..1d2eb09 100644
21784--- a/arch/x86/kernel/microcode_core.c
21785+++ b/arch/x86/kernel/microcode_core.c
21786@@ -512,7 +512,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
21787 return NOTIFY_OK;
21788 }
21789
21790-static struct notifier_block __refdata mc_cpu_notifier = {
21791+static struct notifier_block mc_cpu_notifier = {
21792 .notifier_call = mc_cpu_callback,
21793 };
21794
21795diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
21796index 3544aed..01ddc1c 100644
21797--- a/arch/x86/kernel/microcode_intel.c
21798+++ b/arch/x86/kernel/microcode_intel.c
21799@@ -431,13 +431,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
21800
21801 static int get_ucode_user(void *to, const void *from, size_t n)
21802 {
21803- return copy_from_user(to, from, n);
21804+ return copy_from_user(to, (const void __force_user *)from, n);
21805 }
21806
21807 static enum ucode_state
21808 request_microcode_user(int cpu, const void __user *buf, size_t size)
21809 {
21810- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
21811+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
21812 }
21813
21814 static void microcode_fini_cpu(int cpu)
21815diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
21816index 216a4d7..228255a 100644
21817--- a/arch/x86/kernel/module.c
21818+++ b/arch/x86/kernel/module.c
21819@@ -43,15 +43,60 @@ do { \
21820 } while (0)
21821 #endif
21822
21823-void *module_alloc(unsigned long size)
21824+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
21825 {
21826- if (PAGE_ALIGN(size) > MODULES_LEN)
21827+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
21828 return NULL;
21829 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
21830- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
21831+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
21832 -1, __builtin_return_address(0));
21833 }
21834
21835+void *module_alloc(unsigned long size)
21836+{
21837+
21838+#ifdef CONFIG_PAX_KERNEXEC
21839+ return __module_alloc(size, PAGE_KERNEL);
21840+#else
21841+ return __module_alloc(size, PAGE_KERNEL_EXEC);
21842+#endif
21843+
21844+}
21845+
21846+#ifdef CONFIG_PAX_KERNEXEC
21847+#ifdef CONFIG_X86_32
21848+void *module_alloc_exec(unsigned long size)
21849+{
21850+ struct vm_struct *area;
21851+
21852+ if (size == 0)
21853+ return NULL;
21854+
21855+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
21856+ return area ? area->addr : NULL;
21857+}
21858+EXPORT_SYMBOL(module_alloc_exec);
21859+
21860+void module_free_exec(struct module *mod, void *module_region)
21861+{
21862+ vunmap(module_region);
21863+}
21864+EXPORT_SYMBOL(module_free_exec);
21865+#else
21866+void module_free_exec(struct module *mod, void *module_region)
21867+{
21868+ module_free(mod, module_region);
21869+}
21870+EXPORT_SYMBOL(module_free_exec);
21871+
21872+void *module_alloc_exec(unsigned long size)
21873+{
21874+ return __module_alloc(size, PAGE_KERNEL_RX);
21875+}
21876+EXPORT_SYMBOL(module_alloc_exec);
21877+#endif
21878+#endif
21879+
21880 #ifdef CONFIG_X86_32
21881 int apply_relocate(Elf32_Shdr *sechdrs,
21882 const char *strtab,
21883@@ -62,14 +107,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
21884 unsigned int i;
21885 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
21886 Elf32_Sym *sym;
21887- uint32_t *location;
21888+ uint32_t *plocation, location;
21889
21890 DEBUGP("Applying relocate section %u to %u\n",
21891 relsec, sechdrs[relsec].sh_info);
21892 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
21893 /* This is where to make the change */
21894- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
21895- + rel[i].r_offset;
21896+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
21897+ location = (uint32_t)plocation;
21898+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
21899+ plocation = ktla_ktva((void *)plocation);
21900 /* This is the symbol it is referring to. Note that all
21901 undefined symbols have been resolved. */
21902 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
21903@@ -78,11 +125,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
21904 switch (ELF32_R_TYPE(rel[i].r_info)) {
21905 case R_386_32:
21906 /* We add the value into the location given */
21907- *location += sym->st_value;
21908+ pax_open_kernel();
21909+ *plocation += sym->st_value;
21910+ pax_close_kernel();
21911 break;
21912 case R_386_PC32:
21913 /* Add the value, subtract its position */
21914- *location += sym->st_value - (uint32_t)location;
21915+ pax_open_kernel();
21916+ *plocation += sym->st_value - location;
21917+ pax_close_kernel();
21918 break;
21919 default:
21920 pr_err("%s: Unknown relocation: %u\n",
21921@@ -127,21 +178,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
21922 case R_X86_64_NONE:
21923 break;
21924 case R_X86_64_64:
21925+ pax_open_kernel();
21926 *(u64 *)loc = val;
21927+ pax_close_kernel();
21928 break;
21929 case R_X86_64_32:
21930+ pax_open_kernel();
21931 *(u32 *)loc = val;
21932+ pax_close_kernel();
21933 if (val != *(u32 *)loc)
21934 goto overflow;
21935 break;
21936 case R_X86_64_32S:
21937+ pax_open_kernel();
21938 *(s32 *)loc = val;
21939+ pax_close_kernel();
21940 if ((s64)val != *(s32 *)loc)
21941 goto overflow;
21942 break;
21943 case R_X86_64_PC32:
21944 val -= (u64)loc;
21945+ pax_open_kernel();
21946 *(u32 *)loc = val;
21947+ pax_close_kernel();
21948+
21949 #if 0
21950 if ((s64)val != *(s32 *)loc)
21951 goto overflow;
21952diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
21953index 4929502..686c291 100644
21954--- a/arch/x86/kernel/msr.c
21955+++ b/arch/x86/kernel/msr.c
21956@@ -234,7 +234,7 @@ static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb,
21957 return notifier_from_errno(err);
21958 }
21959
21960-static struct notifier_block __refdata msr_class_cpu_notifier = {
21961+static struct notifier_block msr_class_cpu_notifier = {
21962 .notifier_call = msr_class_cpu_callback,
21963 };
21964
21965diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
21966index f84f5c5..f404e81 100644
21967--- a/arch/x86/kernel/nmi.c
21968+++ b/arch/x86/kernel/nmi.c
21969@@ -105,7 +105,7 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2
21970 return handled;
21971 }
21972
21973-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
21974+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
21975 {
21976 struct nmi_desc *desc = nmi_to_desc(type);
21977 unsigned long flags;
21978@@ -129,9 +129,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
21979 * event confuses some handlers (kdump uses this flag)
21980 */
21981 if (action->flags & NMI_FLAG_FIRST)
21982- list_add_rcu(&action->list, &desc->head);
21983+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
21984 else
21985- list_add_tail_rcu(&action->list, &desc->head);
21986+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
21987
21988 spin_unlock_irqrestore(&desc->lock, flags);
21989 return 0;
21990@@ -154,7 +154,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
21991 if (!strcmp(n->name, name)) {
21992 WARN(in_nmi(),
21993 "Trying to free NMI (%s) from NMI context!\n", n->name);
21994- list_del_rcu(&n->list);
21995+ pax_list_del_rcu((struct list_head *)&n->list);
21996 break;
21997 }
21998 }
21999@@ -479,6 +479,17 @@ static inline void nmi_nesting_postprocess(void)
22000 dotraplinkage notrace __kprobes void
22001 do_nmi(struct pt_regs *regs, long error_code)
22002 {
22003+
22004+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
22005+ if (!user_mode(regs)) {
22006+ unsigned long cs = regs->cs & 0xFFFF;
22007+ unsigned long ip = ktva_ktla(regs->ip);
22008+
22009+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
22010+ regs->ip = ip;
22011+ }
22012+#endif
22013+
22014 nmi_nesting_preprocess(regs);
22015
22016 nmi_enter();
22017diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
22018index 6d9582e..f746287 100644
22019--- a/arch/x86/kernel/nmi_selftest.c
22020+++ b/arch/x86/kernel/nmi_selftest.c
22021@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
22022 {
22023 /* trap all the unknown NMIs we may generate */
22024 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
22025- __initdata);
22026+ __initconst);
22027 }
22028
22029 static void __init cleanup_nmi_testsuite(void)
22030@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
22031 unsigned long timeout;
22032
22033 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
22034- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
22035+ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
22036 nmi_fail = FAILURE;
22037 return;
22038 }
22039diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
22040index 676b8c7..870ba04 100644
22041--- a/arch/x86/kernel/paravirt-spinlocks.c
22042+++ b/arch/x86/kernel/paravirt-spinlocks.c
22043@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
22044 arch_spin_lock(lock);
22045 }
22046
22047-struct pv_lock_ops pv_lock_ops = {
22048+struct pv_lock_ops pv_lock_ops __read_only = {
22049 #ifdef CONFIG_SMP
22050 .spin_is_locked = __ticket_spin_is_locked,
22051 .spin_is_contended = __ticket_spin_is_contended,
22052diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
22053index 17fff18..5cfa0f4 100644
22054--- a/arch/x86/kernel/paravirt.c
22055+++ b/arch/x86/kernel/paravirt.c
22056@@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
22057 {
22058 return x;
22059 }
22060+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
22061+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
22062+#endif
22063
22064 void __init default_banner(void)
22065 {
22066@@ -147,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
22067 if (opfunc == NULL)
22068 /* If there's no function, patch it with a ud2a (BUG) */
22069 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
22070- else if (opfunc == _paravirt_nop)
22071+ else if (opfunc == (void *)_paravirt_nop)
22072 /* If the operation is a nop, then nop the callsite */
22073 ret = paravirt_patch_nop();
22074
22075 /* identity functions just return their single argument */
22076- else if (opfunc == _paravirt_ident_32)
22077+ else if (opfunc == (void *)_paravirt_ident_32)
22078 ret = paravirt_patch_ident_32(insnbuf, len);
22079- else if (opfunc == _paravirt_ident_64)
22080+ else if (opfunc == (void *)_paravirt_ident_64)
22081 ret = paravirt_patch_ident_64(insnbuf, len);
22082+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
22083+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
22084+ ret = paravirt_patch_ident_64(insnbuf, len);
22085+#endif
22086
22087 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
22088 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
22089@@ -180,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
22090 if (insn_len > len || start == NULL)
22091 insn_len = len;
22092 else
22093- memcpy(insnbuf, start, insn_len);
22094+ memcpy(insnbuf, ktla_ktva(start), insn_len);
22095
22096 return insn_len;
22097 }
22098@@ -304,7 +311,7 @@ void arch_flush_lazy_mmu_mode(void)
22099 preempt_enable();
22100 }
22101
22102-struct pv_info pv_info = {
22103+struct pv_info pv_info __read_only = {
22104 .name = "bare hardware",
22105 .paravirt_enabled = 0,
22106 .kernel_rpl = 0,
22107@@ -315,16 +322,16 @@ struct pv_info pv_info = {
22108 #endif
22109 };
22110
22111-struct pv_init_ops pv_init_ops = {
22112+struct pv_init_ops pv_init_ops __read_only = {
22113 .patch = native_patch,
22114 };
22115
22116-struct pv_time_ops pv_time_ops = {
22117+struct pv_time_ops pv_time_ops __read_only = {
22118 .sched_clock = native_sched_clock,
22119 .steal_clock = native_steal_clock,
22120 };
22121
22122-struct pv_irq_ops pv_irq_ops = {
22123+struct pv_irq_ops pv_irq_ops __read_only = {
22124 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
22125 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
22126 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
22127@@ -336,7 +343,7 @@ struct pv_irq_ops pv_irq_ops = {
22128 #endif
22129 };
22130
22131-struct pv_cpu_ops pv_cpu_ops = {
22132+struct pv_cpu_ops pv_cpu_ops __read_only = {
22133 .cpuid = native_cpuid,
22134 .get_debugreg = native_get_debugreg,
22135 .set_debugreg = native_set_debugreg,
22136@@ -395,21 +402,26 @@ struct pv_cpu_ops pv_cpu_ops = {
22137 .end_context_switch = paravirt_nop,
22138 };
22139
22140-struct pv_apic_ops pv_apic_ops = {
22141+struct pv_apic_ops pv_apic_ops __read_only= {
22142 #ifdef CONFIG_X86_LOCAL_APIC
22143 .startup_ipi_hook = paravirt_nop,
22144 #endif
22145 };
22146
22147-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
22148+#ifdef CONFIG_X86_32
22149+#ifdef CONFIG_X86_PAE
22150+/* 64-bit pagetable entries */
22151+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
22152+#else
22153 /* 32-bit pagetable entries */
22154 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
22155+#endif
22156 #else
22157 /* 64-bit pagetable entries */
22158 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
22159 #endif
22160
22161-struct pv_mmu_ops pv_mmu_ops = {
22162+struct pv_mmu_ops pv_mmu_ops __read_only = {
22163
22164 .read_cr2 = native_read_cr2,
22165 .write_cr2 = native_write_cr2,
22166@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
22167 .make_pud = PTE_IDENT,
22168
22169 .set_pgd = native_set_pgd,
22170+ .set_pgd_batched = native_set_pgd_batched,
22171 #endif
22172 #endif /* PAGETABLE_LEVELS >= 3 */
22173
22174@@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
22175 },
22176
22177 .set_fixmap = native_set_fixmap,
22178+
22179+#ifdef CONFIG_PAX_KERNEXEC
22180+ .pax_open_kernel = native_pax_open_kernel,
22181+ .pax_close_kernel = native_pax_close_kernel,
22182+#endif
22183+
22184 };
22185
22186 EXPORT_SYMBOL_GPL(pv_time_ops);
22187diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
22188index 35ccf75..7a15747 100644
22189--- a/arch/x86/kernel/pci-iommu_table.c
22190+++ b/arch/x86/kernel/pci-iommu_table.c
22191@@ -2,7 +2,7 @@
22192 #include <asm/iommu_table.h>
22193 #include <linux/string.h>
22194 #include <linux/kallsyms.h>
22195-
22196+#include <linux/sched.h>
22197
22198 #define DEBUG 1
22199
22200diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
22201index 6c483ba..d10ce2f 100644
22202--- a/arch/x86/kernel/pci-swiotlb.c
22203+++ b/arch/x86/kernel/pci-swiotlb.c
22204@@ -32,7 +32,7 @@ static void x86_swiotlb_free_coherent(struct device *dev, size_t size,
22205 void *vaddr, dma_addr_t dma_addr,
22206 struct dma_attrs *attrs)
22207 {
22208- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
22209+ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
22210 }
22211
22212 static struct dma_map_ops swiotlb_dma_ops = {
22213diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
22214index 2ed787f..f70c9f6 100644
22215--- a/arch/x86/kernel/process.c
22216+++ b/arch/x86/kernel/process.c
22217@@ -36,7 +36,8 @@
22218 * section. Since TSS's are completely CPU-local, we want them
22219 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
22220 */
22221-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
22222+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
22223+EXPORT_SYMBOL(init_tss);
22224
22225 #ifdef CONFIG_X86_64
22226 static DEFINE_PER_CPU(unsigned char, is_idle);
22227@@ -92,7 +93,7 @@ void arch_task_cache_init(void)
22228 task_xstate_cachep =
22229 kmem_cache_create("task_xstate", xstate_size,
22230 __alignof__(union thread_xstate),
22231- SLAB_PANIC | SLAB_NOTRACK, NULL);
22232+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
22233 }
22234
22235 /*
22236@@ -105,7 +106,7 @@ void exit_thread(void)
22237 unsigned long *bp = t->io_bitmap_ptr;
22238
22239 if (bp) {
22240- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
22241+ struct tss_struct *tss = init_tss + get_cpu();
22242
22243 t->io_bitmap_ptr = NULL;
22244 clear_thread_flag(TIF_IO_BITMAP);
22245@@ -136,7 +137,7 @@ void show_regs_common(void)
22246 board = dmi_get_system_info(DMI_BOARD_NAME);
22247
22248 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s %s%s%s\n",
22249- current->pid, current->comm, print_tainted(),
22250+ task_pid_nr(current), current->comm, print_tainted(),
22251 init_utsname()->release,
22252 (int)strcspn(init_utsname()->version, " "),
22253 init_utsname()->version,
22254@@ -149,6 +150,9 @@ void flush_thread(void)
22255 {
22256 struct task_struct *tsk = current;
22257
22258+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
22259+ loadsegment(gs, 0);
22260+#endif
22261 flush_ptrace_hw_breakpoint(tsk);
22262 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
22263 drop_init_fpu(tsk);
22264@@ -301,7 +305,7 @@ static void __exit_idle(void)
22265 void exit_idle(void)
22266 {
22267 /* idle loop has pid 0 */
22268- if (current->pid)
22269+ if (task_pid_nr(current))
22270 return;
22271 __exit_idle();
22272 }
22273@@ -404,7 +408,7 @@ bool set_pm_idle_to_default(void)
22274
22275 return ret;
22276 }
22277-void stop_this_cpu(void *dummy)
22278+__noreturn void stop_this_cpu(void *dummy)
22279 {
22280 local_irq_disable();
22281 /*
22282@@ -632,16 +636,37 @@ static int __init idle_setup(char *str)
22283 }
22284 early_param("idle", idle_setup);
22285
22286-unsigned long arch_align_stack(unsigned long sp)
22287+#ifdef CONFIG_PAX_RANDKSTACK
22288+void pax_randomize_kstack(struct pt_regs *regs)
22289 {
22290- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
22291- sp -= get_random_int() % 8192;
22292- return sp & ~0xf;
22293-}
22294+ struct thread_struct *thread = &current->thread;
22295+ unsigned long time;
22296
22297-unsigned long arch_randomize_brk(struct mm_struct *mm)
22298-{
22299- unsigned long range_end = mm->brk + 0x02000000;
22300- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
22301-}
22302+ if (!randomize_va_space)
22303+ return;
22304+
22305+ if (v8086_mode(regs))
22306+ return;
22307
22308+ rdtscl(time);
22309+
22310+ /* P4 seems to return a 0 LSB, ignore it */
22311+#ifdef CONFIG_MPENTIUM4
22312+ time &= 0x3EUL;
22313+ time <<= 2;
22314+#elif defined(CONFIG_X86_64)
22315+ time &= 0xFUL;
22316+ time <<= 4;
22317+#else
22318+ time &= 0x1FUL;
22319+ time <<= 3;
22320+#endif
22321+
22322+ thread->sp0 ^= time;
22323+ load_sp0(init_tss + smp_processor_id(), thread);
22324+
22325+#ifdef CONFIG_X86_64
22326+ this_cpu_write(kernel_stack, thread->sp0);
22327+#endif
22328+}
22329+#endif
22330diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
22331index b5a8905..d9cacac 100644
22332--- a/arch/x86/kernel/process_32.c
22333+++ b/arch/x86/kernel/process_32.c
22334@@ -65,6 +65,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
22335 unsigned long thread_saved_pc(struct task_struct *tsk)
22336 {
22337 return ((unsigned long *)tsk->thread.sp)[3];
22338+//XXX return tsk->thread.eip;
22339 }
22340
22341 void __show_regs(struct pt_regs *regs, int all)
22342@@ -74,21 +75,20 @@ void __show_regs(struct pt_regs *regs, int all)
22343 unsigned long sp;
22344 unsigned short ss, gs;
22345
22346- if (user_mode_vm(regs)) {
22347+ if (user_mode(regs)) {
22348 sp = regs->sp;
22349 ss = regs->ss & 0xffff;
22350- gs = get_user_gs(regs);
22351 } else {
22352 sp = kernel_stack_pointer(regs);
22353 savesegment(ss, ss);
22354- savesegment(gs, gs);
22355 }
22356+ gs = get_user_gs(regs);
22357
22358 show_regs_common();
22359
22360 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
22361 (u16)regs->cs, regs->ip, regs->flags,
22362- smp_processor_id());
22363+ raw_smp_processor_id());
22364 print_symbol("EIP is at %s\n", regs->ip);
22365
22366 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
22367@@ -130,20 +130,21 @@ void release_thread(struct task_struct *dead_task)
22368 int copy_thread(unsigned long clone_flags, unsigned long sp,
22369 unsigned long arg, struct task_struct *p)
22370 {
22371- struct pt_regs *childregs = task_pt_regs(p);
22372+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
22373 struct task_struct *tsk;
22374 int err;
22375
22376 p->thread.sp = (unsigned long) childregs;
22377 p->thread.sp0 = (unsigned long) (childregs+1);
22378+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
22379
22380 if (unlikely(p->flags & PF_KTHREAD)) {
22381 /* kernel thread */
22382 memset(childregs, 0, sizeof(struct pt_regs));
22383 p->thread.ip = (unsigned long) ret_from_kernel_thread;
22384- task_user_gs(p) = __KERNEL_STACK_CANARY;
22385- childregs->ds = __USER_DS;
22386- childregs->es = __USER_DS;
22387+ savesegment(gs, childregs->gs);
22388+ childregs->ds = __KERNEL_DS;
22389+ childregs->es = __KERNEL_DS;
22390 childregs->fs = __KERNEL_PERCPU;
22391 childregs->bx = sp; /* function */
22392 childregs->bp = arg;
22393@@ -250,7 +251,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22394 struct thread_struct *prev = &prev_p->thread,
22395 *next = &next_p->thread;
22396 int cpu = smp_processor_id();
22397- struct tss_struct *tss = &per_cpu(init_tss, cpu);
22398+ struct tss_struct *tss = init_tss + cpu;
22399 fpu_switch_t fpu;
22400
22401 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
22402@@ -274,6 +275,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22403 */
22404 lazy_save_gs(prev->gs);
22405
22406+#ifdef CONFIG_PAX_MEMORY_UDEREF
22407+ __set_fs(task_thread_info(next_p)->addr_limit);
22408+#endif
22409+
22410 /*
22411 * Load the per-thread Thread-Local Storage descriptor.
22412 */
22413@@ -304,6 +309,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22414 */
22415 arch_end_context_switch(next_p);
22416
22417+ this_cpu_write(current_task, next_p);
22418+ this_cpu_write(current_tinfo, &next_p->tinfo);
22419+
22420 /*
22421 * Restore %gs if needed (which is common)
22422 */
22423@@ -312,8 +320,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22424
22425 switch_fpu_finish(next_p, fpu);
22426
22427- this_cpu_write(current_task, next_p);
22428-
22429 return prev_p;
22430 }
22431
22432@@ -343,4 +349,3 @@ unsigned long get_wchan(struct task_struct *p)
22433 } while (count++ < 16);
22434 return 0;
22435 }
22436-
22437diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
22438index 6e68a61..955a9a5 100644
22439--- a/arch/x86/kernel/process_64.c
22440+++ b/arch/x86/kernel/process_64.c
22441@@ -152,10 +152,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
22442 struct pt_regs *childregs;
22443 struct task_struct *me = current;
22444
22445- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
22446+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
22447 childregs = task_pt_regs(p);
22448 p->thread.sp = (unsigned long) childregs;
22449 p->thread.usersp = me->thread.usersp;
22450+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
22451 set_tsk_thread_flag(p, TIF_FORK);
22452 p->fpu_counter = 0;
22453 p->thread.io_bitmap_ptr = NULL;
22454@@ -274,7 +275,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22455 struct thread_struct *prev = &prev_p->thread;
22456 struct thread_struct *next = &next_p->thread;
22457 int cpu = smp_processor_id();
22458- struct tss_struct *tss = &per_cpu(init_tss, cpu);
22459+ struct tss_struct *tss = init_tss + cpu;
22460 unsigned fsindex, gsindex;
22461 fpu_switch_t fpu;
22462
22463@@ -356,10 +357,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22464 prev->usersp = this_cpu_read(old_rsp);
22465 this_cpu_write(old_rsp, next->usersp);
22466 this_cpu_write(current_task, next_p);
22467+ this_cpu_write(current_tinfo, &next_p->tinfo);
22468
22469- this_cpu_write(kernel_stack,
22470- (unsigned long)task_stack_page(next_p) +
22471- THREAD_SIZE - KERNEL_STACK_OFFSET);
22472+ this_cpu_write(kernel_stack, next->sp0);
22473
22474 /*
22475 * Now maybe reload the debug registers and handle I/O bitmaps
22476@@ -428,12 +428,11 @@ unsigned long get_wchan(struct task_struct *p)
22477 if (!p || p == current || p->state == TASK_RUNNING)
22478 return 0;
22479 stack = (unsigned long)task_stack_page(p);
22480- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
22481+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
22482 return 0;
22483 fp = *(u64 *)(p->thread.sp);
22484 do {
22485- if (fp < (unsigned long)stack ||
22486- fp >= (unsigned long)stack+THREAD_SIZE)
22487+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
22488 return 0;
22489 ip = *(u64 *)(fp+8);
22490 if (!in_sched_functions(ip))
22491diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
22492index b629bbe..0fa615a 100644
22493--- a/arch/x86/kernel/ptrace.c
22494+++ b/arch/x86/kernel/ptrace.c
22495@@ -184,14 +184,13 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
22496 {
22497 unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
22498 unsigned long sp = (unsigned long)&regs->sp;
22499- struct thread_info *tinfo;
22500
22501- if (context == (sp & ~(THREAD_SIZE - 1)))
22502+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
22503 return sp;
22504
22505- tinfo = (struct thread_info *)context;
22506- if (tinfo->previous_esp)
22507- return tinfo->previous_esp;
22508+ sp = *(unsigned long *)context;
22509+ if (sp)
22510+ return sp;
22511
22512 return (unsigned long)regs;
22513 }
22514@@ -588,7 +587,7 @@ static void ptrace_triggered(struct perf_event *bp,
22515 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
22516 {
22517 int i;
22518- int dr7 = 0;
22519+ unsigned long dr7 = 0;
22520 struct arch_hw_breakpoint *info;
22521
22522 for (i = 0; i < HBP_NUM; i++) {
22523@@ -856,7 +855,7 @@ long arch_ptrace(struct task_struct *child, long request,
22524 unsigned long addr, unsigned long data)
22525 {
22526 int ret;
22527- unsigned long __user *datap = (unsigned long __user *)data;
22528+ unsigned long __user *datap = (__force unsigned long __user *)data;
22529
22530 switch (request) {
22531 /* read the word at location addr in the USER area. */
22532@@ -941,14 +940,14 @@ long arch_ptrace(struct task_struct *child, long request,
22533 if ((int) addr < 0)
22534 return -EIO;
22535 ret = do_get_thread_area(child, addr,
22536- (struct user_desc __user *)data);
22537+ (__force struct user_desc __user *) data);
22538 break;
22539
22540 case PTRACE_SET_THREAD_AREA:
22541 if ((int) addr < 0)
22542 return -EIO;
22543 ret = do_set_thread_area(child, addr,
22544- (struct user_desc __user *)data, 0);
22545+ (__force struct user_desc __user *) data, 0);
22546 break;
22547 #endif
22548
22549@@ -1326,7 +1325,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
22550
22551 #ifdef CONFIG_X86_64
22552
22553-static struct user_regset x86_64_regsets[] __read_mostly = {
22554+static user_regset_no_const x86_64_regsets[] __read_only = {
22555 [REGSET_GENERAL] = {
22556 .core_note_type = NT_PRSTATUS,
22557 .n = sizeof(struct user_regs_struct) / sizeof(long),
22558@@ -1367,7 +1366,7 @@ static const struct user_regset_view user_x86_64_view = {
22559 #endif /* CONFIG_X86_64 */
22560
22561 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
22562-static struct user_regset x86_32_regsets[] __read_mostly = {
22563+static user_regset_no_const x86_32_regsets[] __read_only = {
22564 [REGSET_GENERAL] = {
22565 .core_note_type = NT_PRSTATUS,
22566 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
22567@@ -1420,7 +1419,7 @@ static const struct user_regset_view user_x86_32_view = {
22568 */
22569 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
22570
22571-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
22572+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
22573 {
22574 #ifdef CONFIG_X86_64
22575 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
22576@@ -1455,7 +1454,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
22577 memset(info, 0, sizeof(*info));
22578 info->si_signo = SIGTRAP;
22579 info->si_code = si_code;
22580- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
22581+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
22582 }
22583
22584 void user_single_step_siginfo(struct task_struct *tsk,
22585@@ -1484,6 +1483,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
22586 # define IS_IA32 0
22587 #endif
22588
22589+#ifdef CONFIG_GRKERNSEC_SETXID
22590+extern void gr_delayed_cred_worker(void);
22591+#endif
22592+
22593 /*
22594 * We must return the syscall number to actually look up in the table.
22595 * This can be -1L to skip running any syscall at all.
22596@@ -1494,6 +1497,11 @@ long syscall_trace_enter(struct pt_regs *regs)
22597
22598 user_exit();
22599
22600+#ifdef CONFIG_GRKERNSEC_SETXID
22601+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
22602+ gr_delayed_cred_worker();
22603+#endif
22604+
22605 /*
22606 * If we stepped into a sysenter/syscall insn, it trapped in
22607 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
22608@@ -1549,6 +1557,11 @@ void syscall_trace_leave(struct pt_regs *regs)
22609 */
22610 user_exit();
22611
22612+#ifdef CONFIG_GRKERNSEC_SETXID
22613+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
22614+ gr_delayed_cred_worker();
22615+#endif
22616+
22617 audit_syscall_exit(regs);
22618
22619 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
22620diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
22621index 2cb9470..ff1fd80 100644
22622--- a/arch/x86/kernel/pvclock.c
22623+++ b/arch/x86/kernel/pvclock.c
22624@@ -43,11 +43,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
22625 return pv_tsc_khz;
22626 }
22627
22628-static atomic64_t last_value = ATOMIC64_INIT(0);
22629+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
22630
22631 void pvclock_resume(void)
22632 {
22633- atomic64_set(&last_value, 0);
22634+ atomic64_set_unchecked(&last_value, 0);
22635 }
22636
22637 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
22638@@ -92,11 +92,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
22639 * updating at the same time, and one of them could be slightly behind,
22640 * making the assumption that last_value always go forward fail to hold.
22641 */
22642- last = atomic64_read(&last_value);
22643+ last = atomic64_read_unchecked(&last_value);
22644 do {
22645 if (ret < last)
22646 return last;
22647- last = atomic64_cmpxchg(&last_value, last, ret);
22648+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
22649 } while (unlikely(last != ret));
22650
22651 return ret;
22652diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
22653index 76fa1e9..abf09ea 100644
22654--- a/arch/x86/kernel/reboot.c
22655+++ b/arch/x86/kernel/reboot.c
22656@@ -36,7 +36,7 @@ void (*pm_power_off)(void);
22657 EXPORT_SYMBOL(pm_power_off);
22658
22659 static const struct desc_ptr no_idt = {};
22660-static int reboot_mode;
22661+static unsigned short reboot_mode;
22662 enum reboot_type reboot_type = BOOT_ACPI;
22663 int reboot_force;
22664
22665@@ -157,6 +157,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
22666
22667 void __noreturn machine_real_restart(unsigned int type)
22668 {
22669+
22670+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
22671+ struct desc_struct *gdt;
22672+#endif
22673+
22674 local_irq_disable();
22675
22676 /*
22677@@ -184,7 +189,29 @@ void __noreturn machine_real_restart(unsigned int type)
22678
22679 /* Jump to the identity-mapped low memory code */
22680 #ifdef CONFIG_X86_32
22681- asm volatile("jmpl *%0" : :
22682+
22683+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22684+ gdt = get_cpu_gdt_table(smp_processor_id());
22685+ pax_open_kernel();
22686+#ifdef CONFIG_PAX_MEMORY_UDEREF
22687+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
22688+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
22689+ loadsegment(ds, __KERNEL_DS);
22690+ loadsegment(es, __KERNEL_DS);
22691+ loadsegment(ss, __KERNEL_DS);
22692+#endif
22693+#ifdef CONFIG_PAX_KERNEXEC
22694+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
22695+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
22696+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
22697+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
22698+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
22699+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
22700+#endif
22701+ pax_close_kernel();
22702+#endif
22703+
22704+ asm volatile("ljmpl *%0" : :
22705 "rm" (real_mode_header->machine_real_restart_asm),
22706 "a" (type));
22707 #else
22708@@ -531,7 +558,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
22709 * try to force a triple fault and then cycle between hitting the keyboard
22710 * controller and doing that
22711 */
22712-static void native_machine_emergency_restart(void)
22713+static void __noreturn native_machine_emergency_restart(void)
22714 {
22715 int i;
22716 int attempt = 0;
22717@@ -654,13 +681,13 @@ void native_machine_shutdown(void)
22718 #endif
22719 }
22720
22721-static void __machine_emergency_restart(int emergency)
22722+static void __noreturn __machine_emergency_restart(int emergency)
22723 {
22724 reboot_emergency = emergency;
22725 machine_ops.emergency_restart();
22726 }
22727
22728-static void native_machine_restart(char *__unused)
22729+static void __noreturn native_machine_restart(char *__unused)
22730 {
22731 pr_notice("machine restart\n");
22732
22733@@ -669,7 +696,7 @@ static void native_machine_restart(char *__unused)
22734 __machine_emergency_restart(0);
22735 }
22736
22737-static void native_machine_halt(void)
22738+static void __noreturn native_machine_halt(void)
22739 {
22740 /* Stop other cpus and apics */
22741 machine_shutdown();
22742@@ -679,7 +706,7 @@ static void native_machine_halt(void)
22743 stop_this_cpu(NULL);
22744 }
22745
22746-static void native_machine_power_off(void)
22747+static void __noreturn native_machine_power_off(void)
22748 {
22749 if (pm_power_off) {
22750 if (!reboot_force)
22751@@ -688,9 +715,10 @@ static void native_machine_power_off(void)
22752 }
22753 /* A fallback in case there is no PM info available */
22754 tboot_shutdown(TB_SHUTDOWN_HALT);
22755+ unreachable();
22756 }
22757
22758-struct machine_ops machine_ops = {
22759+struct machine_ops machine_ops __read_only = {
22760 .power_off = native_machine_power_off,
22761 .shutdown = native_machine_shutdown,
22762 .emergency_restart = native_machine_emergency_restart,
22763diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
22764index 7a6f3b3..bed145d7 100644
22765--- a/arch/x86/kernel/relocate_kernel_64.S
22766+++ b/arch/x86/kernel/relocate_kernel_64.S
22767@@ -11,6 +11,7 @@
22768 #include <asm/kexec.h>
22769 #include <asm/processor-flags.h>
22770 #include <asm/pgtable_types.h>
22771+#include <asm/alternative-asm.h>
22772
22773 /*
22774 * Must be relocatable PIC code callable as a C function
22775@@ -160,13 +161,14 @@ identity_mapped:
22776 xorq %rbp, %rbp
22777 xorq %r8, %r8
22778 xorq %r9, %r9
22779- xorq %r10, %r9
22780+ xorq %r10, %r10
22781 xorq %r11, %r11
22782 xorq %r12, %r12
22783 xorq %r13, %r13
22784 xorq %r14, %r14
22785 xorq %r15, %r15
22786
22787+ pax_force_retaddr 0, 1
22788 ret
22789
22790 1:
22791diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
22792index 8b24289..d37b58b 100644
22793--- a/arch/x86/kernel/setup.c
22794+++ b/arch/x86/kernel/setup.c
22795@@ -437,7 +437,7 @@ static void __init parse_setup_data(void)
22796
22797 switch (data->type) {
22798 case SETUP_E820_EXT:
22799- parse_e820_ext(data);
22800+ parse_e820_ext((struct setup_data __force_kernel *)data);
22801 break;
22802 case SETUP_DTB:
22803 add_dtb(pa_data);
22804@@ -706,7 +706,7 @@ static void __init trim_bios_range(void)
22805 * area (640->1Mb) as ram even though it is not.
22806 * take them out.
22807 */
22808- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
22809+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
22810
22811 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
22812 }
22813@@ -830,14 +830,14 @@ void __init setup_arch(char **cmdline_p)
22814
22815 if (!boot_params.hdr.root_flags)
22816 root_mountflags &= ~MS_RDONLY;
22817- init_mm.start_code = (unsigned long) _text;
22818- init_mm.end_code = (unsigned long) _etext;
22819+ init_mm.start_code = ktla_ktva((unsigned long) _text);
22820+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
22821 init_mm.end_data = (unsigned long) _edata;
22822 init_mm.brk = _brk_end;
22823
22824- code_resource.start = virt_to_phys(_text);
22825- code_resource.end = virt_to_phys(_etext)-1;
22826- data_resource.start = virt_to_phys(_etext);
22827+ code_resource.start = virt_to_phys(ktla_ktva(_text));
22828+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
22829+ data_resource.start = virt_to_phys(_sdata);
22830 data_resource.end = virt_to_phys(_edata)-1;
22831 bss_resource.start = virt_to_phys(&__bss_start);
22832 bss_resource.end = virt_to_phys(&__bss_stop)-1;
22833diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
22834index 5cdff03..80fa283 100644
22835--- a/arch/x86/kernel/setup_percpu.c
22836+++ b/arch/x86/kernel/setup_percpu.c
22837@@ -21,19 +21,17 @@
22838 #include <asm/cpu.h>
22839 #include <asm/stackprotector.h>
22840
22841-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
22842+#ifdef CONFIG_SMP
22843+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
22844 EXPORT_PER_CPU_SYMBOL(cpu_number);
22845+#endif
22846
22847-#ifdef CONFIG_X86_64
22848 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
22849-#else
22850-#define BOOT_PERCPU_OFFSET 0
22851-#endif
22852
22853 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
22854 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
22855
22856-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
22857+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
22858 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
22859 };
22860 EXPORT_SYMBOL(__per_cpu_offset);
22861@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
22862 {
22863 #ifdef CONFIG_NEED_MULTIPLE_NODES
22864 pg_data_t *last = NULL;
22865- unsigned int cpu;
22866+ int cpu;
22867
22868 for_each_possible_cpu(cpu) {
22869 int node = early_cpu_to_node(cpu);
22870@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
22871 {
22872 #ifdef CONFIG_X86_32
22873 struct desc_struct gdt;
22874+ unsigned long base = per_cpu_offset(cpu);
22875
22876- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
22877- 0x2 | DESCTYPE_S, 0x8);
22878- gdt.s = 1;
22879+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
22880+ 0x83 | DESCTYPE_S, 0xC);
22881 write_gdt_entry(get_cpu_gdt_table(cpu),
22882 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
22883 #endif
22884@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
22885 /* alrighty, percpu areas up and running */
22886 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
22887 for_each_possible_cpu(cpu) {
22888+#ifdef CONFIG_CC_STACKPROTECTOR
22889+#ifdef CONFIG_X86_32
22890+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
22891+#endif
22892+#endif
22893 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
22894 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
22895 per_cpu(cpu_number, cpu) = cpu;
22896@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
22897 */
22898 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
22899 #endif
22900+#ifdef CONFIG_CC_STACKPROTECTOR
22901+#ifdef CONFIG_X86_32
22902+ if (!cpu)
22903+ per_cpu(stack_canary.canary, cpu) = canary;
22904+#endif
22905+#endif
22906 /*
22907 * Up to this point, the boot CPU has been using .init.data
22908 * area. Reload any changed state for the boot CPU.
22909diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
22910index d6bf1f3..3ffce5a 100644
22911--- a/arch/x86/kernel/signal.c
22912+++ b/arch/x86/kernel/signal.c
22913@@ -196,7 +196,7 @@ static unsigned long align_sigframe(unsigned long sp)
22914 * Align the stack pointer according to the i386 ABI,
22915 * i.e. so that on function entry ((sp + 4) & 15) == 0.
22916 */
22917- sp = ((sp + 4) & -16ul) - 4;
22918+ sp = ((sp - 12) & -16ul) - 4;
22919 #else /* !CONFIG_X86_32 */
22920 sp = round_down(sp, 16) - 8;
22921 #endif
22922@@ -304,9 +304,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
22923 }
22924
22925 if (current->mm->context.vdso)
22926- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
22927+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
22928 else
22929- restorer = &frame->retcode;
22930+ restorer = (void __user *)&frame->retcode;
22931 if (ka->sa.sa_flags & SA_RESTORER)
22932 restorer = ka->sa.sa_restorer;
22933
22934@@ -320,7 +320,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
22935 * reasons and because gdb uses it as a signature to notice
22936 * signal handler stack frames.
22937 */
22938- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
22939+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
22940
22941 if (err)
22942 return -EFAULT;
22943@@ -367,7 +367,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
22944 err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
22945
22946 /* Set up to return from userspace. */
22947- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
22948+ if (current->mm->context.vdso)
22949+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
22950+ else
22951+ restorer = (void __user *)&frame->retcode;
22952 if (ka->sa.sa_flags & SA_RESTORER)
22953 restorer = ka->sa.sa_restorer;
22954 put_user_ex(restorer, &frame->pretcode);
22955@@ -379,7 +382,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
22956 * reasons and because gdb uses it as a signature to notice
22957 * signal handler stack frames.
22958 */
22959- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
22960+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
22961 } put_user_catch(err);
22962
22963 err |= copy_siginfo_to_user(&frame->info, info);
22964diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
22965index 48d2b7d..90d328a 100644
22966--- a/arch/x86/kernel/smp.c
22967+++ b/arch/x86/kernel/smp.c
22968@@ -285,7 +285,7 @@ static int __init nonmi_ipi_setup(char *str)
22969
22970 __setup("nonmi_ipi", nonmi_ipi_setup);
22971
22972-struct smp_ops smp_ops = {
22973+struct smp_ops smp_ops __read_only = {
22974 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
22975 .smp_prepare_cpus = native_smp_prepare_cpus,
22976 .smp_cpus_done = native_smp_cpus_done,
22977diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
22978index ed0fe38..87fc692 100644
22979--- a/arch/x86/kernel/smpboot.c
22980+++ b/arch/x86/kernel/smpboot.c
22981@@ -748,6 +748,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
22982 idle->thread.sp = (unsigned long) (((struct pt_regs *)
22983 (THREAD_SIZE + task_stack_page(idle))) - 1);
22984 per_cpu(current_task, cpu) = idle;
22985+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
22986
22987 #ifdef CONFIG_X86_32
22988 /* Stack for startup_32 can be just as for start_secondary onwards */
22989@@ -755,11 +756,13 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
22990 #else
22991 clear_tsk_thread_flag(idle, TIF_FORK);
22992 initial_gs = per_cpu_offset(cpu);
22993- per_cpu(kernel_stack, cpu) =
22994- (unsigned long)task_stack_page(idle) -
22995- KERNEL_STACK_OFFSET + THREAD_SIZE;
22996+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
22997 #endif
22998+
22999+ pax_open_kernel();
23000 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
23001+ pax_close_kernel();
23002+
23003 initial_code = (unsigned long)start_secondary;
23004 stack_start = idle->thread.sp;
23005
23006@@ -908,6 +911,15 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
23007 /* the FPU context is blank, nobody can own it */
23008 __cpu_disable_lazy_restore(cpu);
23009
23010+#ifdef CONFIG_PAX_PER_CPU_PGD
23011+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
23012+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
23013+ KERNEL_PGD_PTRS);
23014+#endif
23015+
23016+ /* the FPU context is blank, nobody can own it */
23017+ __cpu_disable_lazy_restore(cpu);
23018+
23019 err = do_boot_cpu(apicid, cpu, tidle);
23020 if (err) {
23021 pr_debug("do_boot_cpu failed %d\n", err);
23022diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
23023index 9b4d51d..5d28b58 100644
23024--- a/arch/x86/kernel/step.c
23025+++ b/arch/x86/kernel/step.c
23026@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
23027 struct desc_struct *desc;
23028 unsigned long base;
23029
23030- seg &= ~7UL;
23031+ seg >>= 3;
23032
23033 mutex_lock(&child->mm->context.lock);
23034- if (unlikely((seg >> 3) >= child->mm->context.size))
23035+ if (unlikely(seg >= child->mm->context.size))
23036 addr = -1L; /* bogus selector, access would fault */
23037 else {
23038 desc = child->mm->context.ldt + seg;
23039@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
23040 addr += base;
23041 }
23042 mutex_unlock(&child->mm->context.lock);
23043- }
23044+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
23045+ addr = ktla_ktva(addr);
23046
23047 return addr;
23048 }
23049@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
23050 unsigned char opcode[15];
23051 unsigned long addr = convert_ip_to_linear(child, regs);
23052
23053+ if (addr == -EINVAL)
23054+ return 0;
23055+
23056 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
23057 for (i = 0; i < copied; i++) {
23058 switch (opcode[i]) {
23059diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
23060new file mode 100644
23061index 0000000..207bec6
23062--- /dev/null
23063+++ b/arch/x86/kernel/sys_i386_32.c
23064@@ -0,0 +1,250 @@
23065+/*
23066+ * This file contains various random system calls that
23067+ * have a non-standard calling sequence on the Linux/i386
23068+ * platform.
23069+ */
23070+
23071+#include <linux/errno.h>
23072+#include <linux/sched.h>
23073+#include <linux/mm.h>
23074+#include <linux/fs.h>
23075+#include <linux/smp.h>
23076+#include <linux/sem.h>
23077+#include <linux/msg.h>
23078+#include <linux/shm.h>
23079+#include <linux/stat.h>
23080+#include <linux/syscalls.h>
23081+#include <linux/mman.h>
23082+#include <linux/file.h>
23083+#include <linux/utsname.h>
23084+#include <linux/ipc.h>
23085+
23086+#include <linux/uaccess.h>
23087+#include <linux/unistd.h>
23088+
23089+#include <asm/syscalls.h>
23090+
23091+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
23092+{
23093+ unsigned long pax_task_size = TASK_SIZE;
23094+
23095+#ifdef CONFIG_PAX_SEGMEXEC
23096+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
23097+ pax_task_size = SEGMEXEC_TASK_SIZE;
23098+#endif
23099+
23100+ if (flags & MAP_FIXED)
23101+ if (len > pax_task_size || addr > pax_task_size - len)
23102+ return -EINVAL;
23103+
23104+ return 0;
23105+}
23106+
23107+unsigned long
23108+arch_get_unmapped_area(struct file *filp, unsigned long addr,
23109+ unsigned long len, unsigned long pgoff, unsigned long flags)
23110+{
23111+ struct mm_struct *mm = current->mm;
23112+ struct vm_area_struct *vma;
23113+ unsigned long start_addr, pax_task_size = TASK_SIZE;
23114+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
23115+
23116+#ifdef CONFIG_PAX_SEGMEXEC
23117+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
23118+ pax_task_size = SEGMEXEC_TASK_SIZE;
23119+#endif
23120+
23121+ pax_task_size -= PAGE_SIZE;
23122+
23123+ if (len > pax_task_size)
23124+ return -ENOMEM;
23125+
23126+ if (flags & MAP_FIXED)
23127+ return addr;
23128+
23129+#ifdef CONFIG_PAX_RANDMMAP
23130+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
23131+#endif
23132+
23133+ if (addr) {
23134+ addr = PAGE_ALIGN(addr);
23135+ if (pax_task_size - len >= addr) {
23136+ vma = find_vma(mm, addr);
23137+ if (check_heap_stack_gap(vma, addr, len, offset))
23138+ return addr;
23139+ }
23140+ }
23141+ if (len > mm->cached_hole_size) {
23142+ start_addr = addr = mm->free_area_cache;
23143+ } else {
23144+ start_addr = addr = mm->mmap_base;
23145+ mm->cached_hole_size = 0;
23146+ }
23147+
23148+#ifdef CONFIG_PAX_PAGEEXEC
23149+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
23150+ start_addr = 0x00110000UL;
23151+
23152+#ifdef CONFIG_PAX_RANDMMAP
23153+ if (mm->pax_flags & MF_PAX_RANDMMAP)
23154+ start_addr += mm->delta_mmap & 0x03FFF000UL;
23155+#endif
23156+
23157+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
23158+ start_addr = addr = mm->mmap_base;
23159+ else
23160+ addr = start_addr;
23161+ }
23162+#endif
23163+
23164+full_search:
23165+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
23166+ /* At this point: (!vma || addr < vma->vm_end). */
23167+ if (pax_task_size - len < addr) {
23168+ /*
23169+ * Start a new search - just in case we missed
23170+ * some holes.
23171+ */
23172+ if (start_addr != mm->mmap_base) {
23173+ start_addr = addr = mm->mmap_base;
23174+ mm->cached_hole_size = 0;
23175+ goto full_search;
23176+ }
23177+ return -ENOMEM;
23178+ }
23179+ if (check_heap_stack_gap(vma, addr, len, offset))
23180+ break;
23181+ if (addr + mm->cached_hole_size < vma->vm_start)
23182+ mm->cached_hole_size = vma->vm_start - addr;
23183+ addr = vma->vm_end;
23184+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
23185+ start_addr = addr = mm->mmap_base;
23186+ mm->cached_hole_size = 0;
23187+ goto full_search;
23188+ }
23189+ }
23190+
23191+ /*
23192+ * Remember the place where we stopped the search:
23193+ */
23194+ mm->free_area_cache = addr + len;
23195+ return addr;
23196+}
23197+
23198+unsigned long
23199+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
23200+ const unsigned long len, const unsigned long pgoff,
23201+ const unsigned long flags)
23202+{
23203+ struct vm_area_struct *vma;
23204+ struct mm_struct *mm = current->mm;
23205+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
23206+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
23207+
23208+#ifdef CONFIG_PAX_SEGMEXEC
23209+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
23210+ pax_task_size = SEGMEXEC_TASK_SIZE;
23211+#endif
23212+
23213+ pax_task_size -= PAGE_SIZE;
23214+
23215+ /* requested length too big for entire address space */
23216+ if (len > pax_task_size)
23217+ return -ENOMEM;
23218+
23219+ if (flags & MAP_FIXED)
23220+ return addr;
23221+
23222+#ifdef CONFIG_PAX_PAGEEXEC
23223+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
23224+ goto bottomup;
23225+#endif
23226+
23227+#ifdef CONFIG_PAX_RANDMMAP
23228+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
23229+#endif
23230+
23231+ /* requesting a specific address */
23232+ if (addr) {
23233+ addr = PAGE_ALIGN(addr);
23234+ if (pax_task_size - len >= addr) {
23235+ vma = find_vma(mm, addr);
23236+ if (check_heap_stack_gap(vma, addr, len, offset))
23237+ return addr;
23238+ }
23239+ }
23240+
23241+ /* check if free_area_cache is useful for us */
23242+ if (len <= mm->cached_hole_size) {
23243+ mm->cached_hole_size = 0;
23244+ mm->free_area_cache = mm->mmap_base;
23245+ }
23246+
23247+ /* either no address requested or can't fit in requested address hole */
23248+ addr = mm->free_area_cache;
23249+
23250+ /* make sure it can fit in the remaining address space */
23251+ if (addr > len) {
23252+ vma = find_vma(mm, addr-len);
23253+ if (check_heap_stack_gap(vma, addr - len, len, offset))
23254+ /* remember the address as a hint for next time */
23255+ return (mm->free_area_cache = addr-len);
23256+ }
23257+
23258+ if (mm->mmap_base < len)
23259+ goto bottomup;
23260+
23261+ addr = mm->mmap_base-len;
23262+
23263+ do {
23264+ /*
23265+ * Lookup failure means no vma is above this address,
23266+ * else if new region fits below vma->vm_start,
23267+ * return with success:
23268+ */
23269+ vma = find_vma(mm, addr);
23270+ if (check_heap_stack_gap(vma, addr, len, offset))
23271+ /* remember the address as a hint for next time */
23272+ return (mm->free_area_cache = addr);
23273+
23274+ /* remember the largest hole we saw so far */
23275+ if (addr + mm->cached_hole_size < vma->vm_start)
23276+ mm->cached_hole_size = vma->vm_start - addr;
23277+
23278+ /* try just below the current vma->vm_start */
23279+ addr = skip_heap_stack_gap(vma, len, offset);
23280+ } while (!IS_ERR_VALUE(addr));
23281+
23282+bottomup:
23283+ /*
23284+ * A failed mmap() very likely causes application failure,
23285+ * so fall back to the bottom-up function here. This scenario
23286+ * can happen with large stack limits and large mmap()
23287+ * allocations.
23288+ */
23289+
23290+#ifdef CONFIG_PAX_SEGMEXEC
23291+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
23292+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
23293+ else
23294+#endif
23295+
23296+ mm->mmap_base = TASK_UNMAPPED_BASE;
23297+
23298+#ifdef CONFIG_PAX_RANDMMAP
23299+ if (mm->pax_flags & MF_PAX_RANDMMAP)
23300+ mm->mmap_base += mm->delta_mmap;
23301+#endif
23302+
23303+ mm->free_area_cache = mm->mmap_base;
23304+ mm->cached_hole_size = ~0UL;
23305+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
23306+ /*
23307+ * Restore the topdown base:
23308+ */
23309+ mm->mmap_base = base;
23310+ mm->free_area_cache = base;
23311+ mm->cached_hole_size = ~0UL;
23312+
23313+ return addr;
23314+}
23315diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
23316index 97ef74b..57a1882 100644
23317--- a/arch/x86/kernel/sys_x86_64.c
23318+++ b/arch/x86/kernel/sys_x86_64.c
23319@@ -81,8 +81,8 @@ out:
23320 return error;
23321 }
23322
23323-static void find_start_end(unsigned long flags, unsigned long *begin,
23324- unsigned long *end)
23325+static void find_start_end(struct mm_struct *mm, unsigned long flags,
23326+ unsigned long *begin, unsigned long *end)
23327 {
23328 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
23329 unsigned long new_begin;
23330@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
23331 *begin = new_begin;
23332 }
23333 } else {
23334- *begin = TASK_UNMAPPED_BASE;
23335+ *begin = mm->mmap_base;
23336 *end = TASK_SIZE;
23337 }
23338 }
23339@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
23340 struct vm_area_struct *vma;
23341 struct vm_unmapped_area_info info;
23342 unsigned long begin, end;
23343+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
23344
23345 if (flags & MAP_FIXED)
23346 return addr;
23347
23348- find_start_end(flags, &begin, &end);
23349+ find_start_end(mm, flags, &begin, &end);
23350
23351 if (len > end)
23352 return -ENOMEM;
23353
23354+#ifdef CONFIG_PAX_RANDMMAP
23355+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
23356+#endif
23357+
23358 if (addr) {
23359 addr = PAGE_ALIGN(addr);
23360 vma = find_vma(mm, addr);
23361- if (end - len >= addr &&
23362- (!vma || addr + len <= vma->vm_start))
23363+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
23364 return addr;
23365 }
23366
23367@@ -161,6 +165,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
23368 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
23369 goto bottomup;
23370
23371+#ifdef CONFIG_PAX_RANDMMAP
23372+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
23373+#endif
23374+
23375 /* requesting a specific address */
23376 if (addr) {
23377 addr = PAGE_ALIGN(addr);
23378diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
23379index f84fe00..f41d9f1 100644
23380--- a/arch/x86/kernel/tboot.c
23381+++ b/arch/x86/kernel/tboot.c
23382@@ -220,7 +220,7 @@ static int tboot_setup_sleep(void)
23383
23384 void tboot_shutdown(u32 shutdown_type)
23385 {
23386- void (*shutdown)(void);
23387+ void (* __noreturn shutdown)(void);
23388
23389 if (!tboot_enabled())
23390 return;
23391@@ -242,7 +242,7 @@ void tboot_shutdown(u32 shutdown_type)
23392
23393 switch_to_tboot_pt();
23394
23395- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
23396+ shutdown = (void *)tboot->shutdown_entry;
23397 shutdown();
23398
23399 /* should not reach here */
23400@@ -300,7 +300,7 @@ static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
23401 return 0;
23402 }
23403
23404-static atomic_t ap_wfs_count;
23405+static atomic_unchecked_t ap_wfs_count;
23406
23407 static int tboot_wait_for_aps(int num_aps)
23408 {
23409@@ -324,16 +324,16 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
23410 {
23411 switch (action) {
23412 case CPU_DYING:
23413- atomic_inc(&ap_wfs_count);
23414+ atomic_inc_unchecked(&ap_wfs_count);
23415 if (num_online_cpus() == 1)
23416- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
23417+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
23418 return NOTIFY_BAD;
23419 break;
23420 }
23421 return NOTIFY_OK;
23422 }
23423
23424-static struct notifier_block tboot_cpu_notifier __cpuinitdata =
23425+static struct notifier_block tboot_cpu_notifier =
23426 {
23427 .notifier_call = tboot_cpu_callback,
23428 };
23429@@ -345,7 +345,7 @@ static __init int tboot_late_init(void)
23430
23431 tboot_create_trampoline();
23432
23433- atomic_set(&ap_wfs_count, 0);
23434+ atomic_set_unchecked(&ap_wfs_count, 0);
23435 register_hotcpu_notifier(&tboot_cpu_notifier);
23436
23437 acpi_os_set_prepare_sleep(&tboot_sleep);
23438diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
23439index 24d3c91..d06b473 100644
23440--- a/arch/x86/kernel/time.c
23441+++ b/arch/x86/kernel/time.c
23442@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
23443 {
23444 unsigned long pc = instruction_pointer(regs);
23445
23446- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
23447+ if (!user_mode(regs) && in_lock_functions(pc)) {
23448 #ifdef CONFIG_FRAME_POINTER
23449- return *(unsigned long *)(regs->bp + sizeof(long));
23450+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
23451 #else
23452 unsigned long *sp =
23453 (unsigned long *)kernel_stack_pointer(regs);
23454@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
23455 * or above a saved flags. Eflags has bits 22-31 zero,
23456 * kernel addresses don't.
23457 */
23458+
23459+#ifdef CONFIG_PAX_KERNEXEC
23460+ return ktla_ktva(sp[0]);
23461+#else
23462 if (sp[0] >> 22)
23463 return sp[0];
23464 if (sp[1] >> 22)
23465 return sp[1];
23466 #endif
23467+
23468+#endif
23469 }
23470 return pc;
23471 }
23472diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
23473index 9d9d2f9..cad418a 100644
23474--- a/arch/x86/kernel/tls.c
23475+++ b/arch/x86/kernel/tls.c
23476@@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
23477 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
23478 return -EINVAL;
23479
23480+#ifdef CONFIG_PAX_SEGMEXEC
23481+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
23482+ return -EINVAL;
23483+#endif
23484+
23485 set_tls_desc(p, idx, &info, 1);
23486
23487 return 0;
23488@@ -204,7 +209,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
23489
23490 if (kbuf)
23491 info = kbuf;
23492- else if (__copy_from_user(infobuf, ubuf, count))
23493+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
23494 return -EFAULT;
23495 else
23496 info = infobuf;
23497diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
23498index ecffca1..95c4d13 100644
23499--- a/arch/x86/kernel/traps.c
23500+++ b/arch/x86/kernel/traps.c
23501@@ -68,12 +68,6 @@
23502 #include <asm/setup.h>
23503
23504 asmlinkage int system_call(void);
23505-
23506-/*
23507- * The IDT has to be page-aligned to simplify the Pentium
23508- * F0 0F bug workaround.
23509- */
23510-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
23511 #endif
23512
23513 DECLARE_BITMAP(used_vectors, NR_VECTORS);
23514@@ -106,11 +100,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
23515 }
23516
23517 static int __kprobes
23518-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
23519+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
23520 struct pt_regs *regs, long error_code)
23521 {
23522 #ifdef CONFIG_X86_32
23523- if (regs->flags & X86_VM_MASK) {
23524+ if (v8086_mode(regs)) {
23525 /*
23526 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
23527 * On nmi (interrupt 2), do_trap should not be called.
23528@@ -123,12 +117,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
23529 return -1;
23530 }
23531 #endif
23532- if (!user_mode(regs)) {
23533+ if (!user_mode_novm(regs)) {
23534 if (!fixup_exception(regs)) {
23535 tsk->thread.error_code = error_code;
23536 tsk->thread.trap_nr = trapnr;
23537+
23538+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23539+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
23540+ str = "PAX: suspicious stack segment fault";
23541+#endif
23542+
23543 die(str, regs, error_code);
23544 }
23545+
23546+#ifdef CONFIG_PAX_REFCOUNT
23547+ if (trapnr == 4)
23548+ pax_report_refcount_overflow(regs);
23549+#endif
23550+
23551 return 0;
23552 }
23553
23554@@ -136,7 +142,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
23555 }
23556
23557 static void __kprobes
23558-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
23559+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
23560 long error_code, siginfo_t *info)
23561 {
23562 struct task_struct *tsk = current;
23563@@ -160,7 +166,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
23564 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
23565 printk_ratelimit()) {
23566 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
23567- tsk->comm, tsk->pid, str,
23568+ tsk->comm, task_pid_nr(tsk), str,
23569 regs->ip, regs->sp, error_code);
23570 print_vma_addr(" in ", regs->ip);
23571 pr_cont("\n");
23572@@ -266,7 +272,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
23573 conditional_sti(regs);
23574
23575 #ifdef CONFIG_X86_32
23576- if (regs->flags & X86_VM_MASK) {
23577+ if (v8086_mode(regs)) {
23578 local_irq_enable();
23579 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
23580 goto exit;
23581@@ -274,18 +280,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
23582 #endif
23583
23584 tsk = current;
23585- if (!user_mode(regs)) {
23586+ if (!user_mode_novm(regs)) {
23587 if (fixup_exception(regs))
23588 goto exit;
23589
23590 tsk->thread.error_code = error_code;
23591 tsk->thread.trap_nr = X86_TRAP_GP;
23592 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
23593- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
23594+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
23595+
23596+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23597+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
23598+ die("PAX: suspicious general protection fault", regs, error_code);
23599+ else
23600+#endif
23601+
23602 die("general protection fault", regs, error_code);
23603+ }
23604 goto exit;
23605 }
23606
23607+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23608+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
23609+ struct mm_struct *mm = tsk->mm;
23610+ unsigned long limit;
23611+
23612+ down_write(&mm->mmap_sem);
23613+ limit = mm->context.user_cs_limit;
23614+ if (limit < TASK_SIZE) {
23615+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
23616+ up_write(&mm->mmap_sem);
23617+ return;
23618+ }
23619+ up_write(&mm->mmap_sem);
23620+ }
23621+#endif
23622+
23623 tsk->thread.error_code = error_code;
23624 tsk->thread.trap_nr = X86_TRAP_GP;
23625
23626@@ -440,7 +470,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
23627 /* It's safe to allow irq's after DR6 has been saved */
23628 preempt_conditional_sti(regs);
23629
23630- if (regs->flags & X86_VM_MASK) {
23631+ if (v8086_mode(regs)) {
23632 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
23633 X86_TRAP_DB);
23634 preempt_conditional_cli(regs);
23635@@ -455,7 +485,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
23636 * We already checked v86 mode above, so we can check for kernel mode
23637 * by just checking the CPL of CS.
23638 */
23639- if ((dr6 & DR_STEP) && !user_mode(regs)) {
23640+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
23641 tsk->thread.debugreg6 &= ~DR_STEP;
23642 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
23643 regs->flags &= ~X86_EFLAGS_TF;
23644@@ -487,7 +517,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
23645 return;
23646 conditional_sti(regs);
23647
23648- if (!user_mode_vm(regs))
23649+ if (!user_mode(regs))
23650 {
23651 if (!fixup_exception(regs)) {
23652 task->thread.error_code = error_code;
23653diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
23654index c71025b..b117501 100644
23655--- a/arch/x86/kernel/uprobes.c
23656+++ b/arch/x86/kernel/uprobes.c
23657@@ -629,7 +629,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
23658 int ret = NOTIFY_DONE;
23659
23660 /* We are only interested in userspace traps */
23661- if (regs && !user_mode_vm(regs))
23662+ if (regs && !user_mode(regs))
23663 return NOTIFY_DONE;
23664
23665 switch (val) {
23666diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
23667index b9242ba..50c5edd 100644
23668--- a/arch/x86/kernel/verify_cpu.S
23669+++ b/arch/x86/kernel/verify_cpu.S
23670@@ -20,6 +20,7 @@
23671 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
23672 * arch/x86/kernel/trampoline_64.S: secondary processor verification
23673 * arch/x86/kernel/head_32.S: processor startup
23674+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
23675 *
23676 * verify_cpu, returns the status of longmode and SSE in register %eax.
23677 * 0: Success 1: Failure
23678diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
23679index 1dfe69c..a3df6f6 100644
23680--- a/arch/x86/kernel/vm86_32.c
23681+++ b/arch/x86/kernel/vm86_32.c
23682@@ -43,6 +43,7 @@
23683 #include <linux/ptrace.h>
23684 #include <linux/audit.h>
23685 #include <linux/stddef.h>
23686+#include <linux/grsecurity.h>
23687
23688 #include <asm/uaccess.h>
23689 #include <asm/io.h>
23690@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
23691 do_exit(SIGSEGV);
23692 }
23693
23694- tss = &per_cpu(init_tss, get_cpu());
23695+ tss = init_tss + get_cpu();
23696 current->thread.sp0 = current->thread.saved_sp0;
23697 current->thread.sysenter_cs = __KERNEL_CS;
23698 load_sp0(tss, &current->thread);
23699@@ -212,6 +213,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
23700 struct task_struct *tsk;
23701 int tmp, ret = -EPERM;
23702
23703+#ifdef CONFIG_GRKERNSEC_VM86
23704+ if (!capable(CAP_SYS_RAWIO)) {
23705+ gr_handle_vm86();
23706+ goto out;
23707+ }
23708+#endif
23709+
23710 tsk = current;
23711 if (tsk->thread.saved_sp0)
23712 goto out;
23713@@ -242,6 +250,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
23714 int tmp, ret;
23715 struct vm86plus_struct __user *v86;
23716
23717+#ifdef CONFIG_GRKERNSEC_VM86
23718+ if (!capable(CAP_SYS_RAWIO)) {
23719+ gr_handle_vm86();
23720+ ret = -EPERM;
23721+ goto out;
23722+ }
23723+#endif
23724+
23725 tsk = current;
23726 switch (cmd) {
23727 case VM86_REQUEST_IRQ:
23728@@ -328,7 +344,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
23729 tsk->thread.saved_fs = info->regs32->fs;
23730 tsk->thread.saved_gs = get_user_gs(info->regs32);
23731
23732- tss = &per_cpu(init_tss, get_cpu());
23733+ tss = init_tss + get_cpu();
23734 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
23735 if (cpu_has_sep)
23736 tsk->thread.sysenter_cs = 0;
23737@@ -535,7 +551,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
23738 goto cannot_handle;
23739 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
23740 goto cannot_handle;
23741- intr_ptr = (unsigned long __user *) (i << 2);
23742+ intr_ptr = (__force unsigned long __user *) (i << 2);
23743 if (get_user(segoffs, intr_ptr))
23744 goto cannot_handle;
23745 if ((segoffs >> 16) == BIOSSEG)
23746diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
23747index 22a1530..8fbaaad 100644
23748--- a/arch/x86/kernel/vmlinux.lds.S
23749+++ b/arch/x86/kernel/vmlinux.lds.S
23750@@ -26,6 +26,13 @@
23751 #include <asm/page_types.h>
23752 #include <asm/cache.h>
23753 #include <asm/boot.h>
23754+#include <asm/segment.h>
23755+
23756+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23757+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
23758+#else
23759+#define __KERNEL_TEXT_OFFSET 0
23760+#endif
23761
23762 #undef i386 /* in case the preprocessor is a 32bit one */
23763
23764@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
23765
23766 PHDRS {
23767 text PT_LOAD FLAGS(5); /* R_E */
23768+#ifdef CONFIG_X86_32
23769+ module PT_LOAD FLAGS(5); /* R_E */
23770+#endif
23771+#ifdef CONFIG_XEN
23772+ rodata PT_LOAD FLAGS(5); /* R_E */
23773+#else
23774+ rodata PT_LOAD FLAGS(4); /* R__ */
23775+#endif
23776 data PT_LOAD FLAGS(6); /* RW_ */
23777-#ifdef CONFIG_X86_64
23778+ init.begin PT_LOAD FLAGS(6); /* RW_ */
23779 #ifdef CONFIG_SMP
23780 percpu PT_LOAD FLAGS(6); /* RW_ */
23781 #endif
23782+ text.init PT_LOAD FLAGS(5); /* R_E */
23783+ text.exit PT_LOAD FLAGS(5); /* R_E */
23784 init PT_LOAD FLAGS(7); /* RWE */
23785-#endif
23786 note PT_NOTE FLAGS(0); /* ___ */
23787 }
23788
23789 SECTIONS
23790 {
23791 #ifdef CONFIG_X86_32
23792- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
23793- phys_startup_32 = startup_32 - LOAD_OFFSET;
23794+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
23795 #else
23796- . = __START_KERNEL;
23797- phys_startup_64 = startup_64 - LOAD_OFFSET;
23798+ . = __START_KERNEL;
23799 #endif
23800
23801 /* Text and read-only data */
23802- .text : AT(ADDR(.text) - LOAD_OFFSET) {
23803- _text = .;
23804+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
23805 /* bootstrapping code */
23806+#ifdef CONFIG_X86_32
23807+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
23808+#else
23809+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
23810+#endif
23811+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
23812+ _text = .;
23813 HEAD_TEXT
23814 #ifdef CONFIG_X86_32
23815 . = ALIGN(PAGE_SIZE);
23816@@ -108,13 +128,48 @@ SECTIONS
23817 IRQENTRY_TEXT
23818 *(.fixup)
23819 *(.gnu.warning)
23820- /* End of text section */
23821- _etext = .;
23822 } :text = 0x9090
23823
23824- NOTES :text :note
23825+ . += __KERNEL_TEXT_OFFSET;
23826
23827- EXCEPTION_TABLE(16) :text = 0x9090
23828+#ifdef CONFIG_X86_32
23829+ . = ALIGN(PAGE_SIZE);
23830+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
23831+
23832+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
23833+ MODULES_EXEC_VADDR = .;
23834+ BYTE(0)
23835+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
23836+ . = ALIGN(HPAGE_SIZE) - 1;
23837+ MODULES_EXEC_END = .;
23838+#endif
23839+
23840+ } :module
23841+#endif
23842+
23843+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
23844+ /* End of text section */
23845+ BYTE(0)
23846+ _etext = . - __KERNEL_TEXT_OFFSET;
23847+ }
23848+
23849+#ifdef CONFIG_X86_32
23850+ . = ALIGN(PAGE_SIZE);
23851+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
23852+ *(.idt)
23853+ . = ALIGN(PAGE_SIZE);
23854+ *(.empty_zero_page)
23855+ *(.initial_pg_fixmap)
23856+ *(.initial_pg_pmd)
23857+ *(.initial_page_table)
23858+ *(.swapper_pg_dir)
23859+ } :rodata
23860+#endif
23861+
23862+ . = ALIGN(PAGE_SIZE);
23863+ NOTES :rodata :note
23864+
23865+ EXCEPTION_TABLE(16) :rodata
23866
23867 #if defined(CONFIG_DEBUG_RODATA)
23868 /* .text should occupy whole number of pages */
23869@@ -126,16 +181,20 @@ SECTIONS
23870
23871 /* Data */
23872 .data : AT(ADDR(.data) - LOAD_OFFSET) {
23873+
23874+#ifdef CONFIG_PAX_KERNEXEC
23875+ . = ALIGN(HPAGE_SIZE);
23876+#else
23877+ . = ALIGN(PAGE_SIZE);
23878+#endif
23879+
23880 /* Start of data section */
23881 _sdata = .;
23882
23883 /* init_task */
23884 INIT_TASK_DATA(THREAD_SIZE)
23885
23886-#ifdef CONFIG_X86_32
23887- /* 32 bit has nosave before _edata */
23888 NOSAVE_DATA
23889-#endif
23890
23891 PAGE_ALIGNED_DATA(PAGE_SIZE)
23892
23893@@ -176,12 +235,19 @@ SECTIONS
23894 #endif /* CONFIG_X86_64 */
23895
23896 /* Init code and data - will be freed after init */
23897- . = ALIGN(PAGE_SIZE);
23898 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
23899+ BYTE(0)
23900+
23901+#ifdef CONFIG_PAX_KERNEXEC
23902+ . = ALIGN(HPAGE_SIZE);
23903+#else
23904+ . = ALIGN(PAGE_SIZE);
23905+#endif
23906+
23907 __init_begin = .; /* paired with __init_end */
23908- }
23909+ } :init.begin
23910
23911-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
23912+#ifdef CONFIG_SMP
23913 /*
23914 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
23915 * output PHDR, so the next output section - .init.text - should
23916@@ -190,12 +256,27 @@ SECTIONS
23917 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
23918 #endif
23919
23920- INIT_TEXT_SECTION(PAGE_SIZE)
23921-#ifdef CONFIG_X86_64
23922- :init
23923-#endif
23924+ . = ALIGN(PAGE_SIZE);
23925+ init_begin = .;
23926+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
23927+ VMLINUX_SYMBOL(_sinittext) = .;
23928+ INIT_TEXT
23929+ VMLINUX_SYMBOL(_einittext) = .;
23930+ . = ALIGN(PAGE_SIZE);
23931+ } :text.init
23932
23933- INIT_DATA_SECTION(16)
23934+ /*
23935+ * .exit.text is discard at runtime, not link time, to deal with
23936+ * references from .altinstructions and .eh_frame
23937+ */
23938+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
23939+ EXIT_TEXT
23940+ . = ALIGN(16);
23941+ } :text.exit
23942+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
23943+
23944+ . = ALIGN(PAGE_SIZE);
23945+ INIT_DATA_SECTION(16) :init
23946
23947 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
23948 __x86_cpu_dev_start = .;
23949@@ -257,19 +338,12 @@ SECTIONS
23950 }
23951
23952 . = ALIGN(8);
23953- /*
23954- * .exit.text is discard at runtime, not link time, to deal with
23955- * references from .altinstructions and .eh_frame
23956- */
23957- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
23958- EXIT_TEXT
23959- }
23960
23961 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
23962 EXIT_DATA
23963 }
23964
23965-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
23966+#ifndef CONFIG_SMP
23967 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
23968 #endif
23969
23970@@ -288,16 +362,10 @@ SECTIONS
23971 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
23972 __smp_locks = .;
23973 *(.smp_locks)
23974- . = ALIGN(PAGE_SIZE);
23975 __smp_locks_end = .;
23976+ . = ALIGN(PAGE_SIZE);
23977 }
23978
23979-#ifdef CONFIG_X86_64
23980- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
23981- NOSAVE_DATA
23982- }
23983-#endif
23984-
23985 /* BSS */
23986 . = ALIGN(PAGE_SIZE);
23987 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
23988@@ -313,6 +381,7 @@ SECTIONS
23989 __brk_base = .;
23990 . += 64 * 1024; /* 64k alignment slop space */
23991 *(.brk_reservation) /* areas brk users have reserved */
23992+ . = ALIGN(HPAGE_SIZE);
23993 __brk_limit = .;
23994 }
23995
23996@@ -339,13 +408,12 @@ SECTIONS
23997 * for the boot processor.
23998 */
23999 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
24000-INIT_PER_CPU(gdt_page);
24001 INIT_PER_CPU(irq_stack_union);
24002
24003 /*
24004 * Build-time check on the image size:
24005 */
24006-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
24007+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
24008 "kernel image bigger than KERNEL_IMAGE_SIZE");
24009
24010 #ifdef CONFIG_SMP
24011diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
24012index 9a907a6..f83f921 100644
24013--- a/arch/x86/kernel/vsyscall_64.c
24014+++ b/arch/x86/kernel/vsyscall_64.c
24015@@ -56,15 +56,13 @@
24016 DEFINE_VVAR(int, vgetcpu_mode);
24017 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
24018
24019-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
24020+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
24021
24022 static int __init vsyscall_setup(char *str)
24023 {
24024 if (str) {
24025 if (!strcmp("emulate", str))
24026 vsyscall_mode = EMULATE;
24027- else if (!strcmp("native", str))
24028- vsyscall_mode = NATIVE;
24029 else if (!strcmp("none", str))
24030 vsyscall_mode = NONE;
24031 else
24032@@ -323,8 +321,7 @@ do_ret:
24033 return true;
24034
24035 sigsegv:
24036- force_sig(SIGSEGV, current);
24037- return true;
24038+ do_group_exit(SIGKILL);
24039 }
24040
24041 /*
24042@@ -377,10 +374,7 @@ void __init map_vsyscall(void)
24043 extern char __vvar_page;
24044 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
24045
24046- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
24047- vsyscall_mode == NATIVE
24048- ? PAGE_KERNEL_VSYSCALL
24049- : PAGE_KERNEL_VVAR);
24050+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
24051 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
24052 (unsigned long)VSYSCALL_START);
24053
24054diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
24055index 1330dd1..d220b99 100644
24056--- a/arch/x86/kernel/x8664_ksyms_64.c
24057+++ b/arch/x86/kernel/x8664_ksyms_64.c
24058@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
24059 EXPORT_SYMBOL(copy_user_generic_unrolled);
24060 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
24061 EXPORT_SYMBOL(__copy_user_nocache);
24062-EXPORT_SYMBOL(_copy_from_user);
24063-EXPORT_SYMBOL(_copy_to_user);
24064
24065 EXPORT_SYMBOL(copy_page);
24066 EXPORT_SYMBOL(clear_page);
24067diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
24068index 7a3d075..6cb373d 100644
24069--- a/arch/x86/kernel/x86_init.c
24070+++ b/arch/x86/kernel/x86_init.c
24071@@ -88,7 +88,7 @@ struct x86_init_ops x86_init __initdata = {
24072 },
24073 };
24074
24075-struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
24076+struct x86_cpuinit_ops x86_cpuinit __cpuinitconst = {
24077 .early_percpu_clock_init = x86_init_noop,
24078 .setup_percpu_clockev = setup_secondary_APIC_clock,
24079 };
24080@@ -96,7 +96,7 @@ struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
24081 static void default_nmi_init(void) { };
24082 static int default_i8042_detect(void) { return 1; };
24083
24084-struct x86_platform_ops x86_platform = {
24085+struct x86_platform_ops x86_platform __read_only = {
24086 .calibrate_tsc = native_calibrate_tsc,
24087 .get_wallclock = mach_get_cmos_time,
24088 .set_wallclock = mach_set_rtc_mmss,
24089@@ -110,14 +110,14 @@ struct x86_platform_ops x86_platform = {
24090 };
24091
24092 EXPORT_SYMBOL_GPL(x86_platform);
24093-struct x86_msi_ops x86_msi = {
24094+struct x86_msi_ops x86_msi __read_only = {
24095 .setup_msi_irqs = native_setup_msi_irqs,
24096 .teardown_msi_irq = native_teardown_msi_irq,
24097 .teardown_msi_irqs = default_teardown_msi_irqs,
24098 .restore_msi_irqs = default_restore_msi_irqs,
24099 };
24100
24101-struct x86_io_apic_ops x86_io_apic_ops = {
24102+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
24103 .init = native_io_apic_init_mappings,
24104 .read = native_io_apic_read,
24105 .write = native_io_apic_write,
24106diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
24107index ada87a3..afea76d 100644
24108--- a/arch/x86/kernel/xsave.c
24109+++ b/arch/x86/kernel/xsave.c
24110@@ -199,6 +199,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
24111 {
24112 int err;
24113
24114+ buf = (struct xsave_struct __user *)____m(buf);
24115 if (use_xsave())
24116 err = xsave_user(buf);
24117 else if (use_fxsr())
24118@@ -311,6 +312,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
24119 */
24120 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
24121 {
24122+ buf = (void __user *)____m(buf);
24123 if (use_xsave()) {
24124 if ((unsigned long)buf % 64 || fx_only) {
24125 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
24126diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
24127index a20ecb5..d0e2194 100644
24128--- a/arch/x86/kvm/cpuid.c
24129+++ b/arch/x86/kvm/cpuid.c
24130@@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
24131 struct kvm_cpuid2 *cpuid,
24132 struct kvm_cpuid_entry2 __user *entries)
24133 {
24134- int r;
24135+ int r, i;
24136
24137 r = -E2BIG;
24138 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
24139 goto out;
24140 r = -EFAULT;
24141- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
24142- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
24143+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
24144 goto out;
24145+ for (i = 0; i < cpuid->nent; ++i) {
24146+ struct kvm_cpuid_entry2 cpuid_entry;
24147+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
24148+ goto out;
24149+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
24150+ }
24151 vcpu->arch.cpuid_nent = cpuid->nent;
24152 kvm_apic_set_version(vcpu);
24153 kvm_x86_ops->cpuid_update(vcpu);
24154@@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
24155 struct kvm_cpuid2 *cpuid,
24156 struct kvm_cpuid_entry2 __user *entries)
24157 {
24158- int r;
24159+ int r, i;
24160
24161 r = -E2BIG;
24162 if (cpuid->nent < vcpu->arch.cpuid_nent)
24163 goto out;
24164 r = -EFAULT;
24165- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
24166- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
24167+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
24168 goto out;
24169+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
24170+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
24171+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
24172+ goto out;
24173+ }
24174 return 0;
24175
24176 out:
24177diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
24178index a27e763..54bfe43 100644
24179--- a/arch/x86/kvm/emulate.c
24180+++ b/arch/x86/kvm/emulate.c
24181@@ -292,6 +292,7 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
24182
24183 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
24184 do { \
24185+ unsigned long _tmp; \
24186 __asm__ __volatile__ ( \
24187 _PRE_EFLAGS("0", "4", "2") \
24188 _op _suffix " %"_x"3,%1; " \
24189@@ -306,8 +307,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
24190 /* Raw emulation: instruction has two explicit operands. */
24191 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
24192 do { \
24193- unsigned long _tmp; \
24194- \
24195 switch ((ctxt)->dst.bytes) { \
24196 case 2: \
24197 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
24198@@ -323,7 +322,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
24199
24200 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
24201 do { \
24202- unsigned long _tmp; \
24203 switch ((ctxt)->dst.bytes) { \
24204 case 1: \
24205 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
24206diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
24207index 9392f52..0e56d77 100644
24208--- a/arch/x86/kvm/lapic.c
24209+++ b/arch/x86/kvm/lapic.c
24210@@ -55,7 +55,7 @@
24211 #define APIC_BUS_CYCLE_NS 1
24212
24213 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
24214-#define apic_debug(fmt, arg...)
24215+#define apic_debug(fmt, arg...) do {} while (0)
24216
24217 #define APIC_LVT_NUM 6
24218 /* 14 is the version for Xeon and Pentium 8.4.8*/
24219diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
24220index 891eb6d..e027900 100644
24221--- a/arch/x86/kvm/paging_tmpl.h
24222+++ b/arch/x86/kvm/paging_tmpl.h
24223@@ -208,7 +208,7 @@ retry_walk:
24224 if (unlikely(kvm_is_error_hva(host_addr)))
24225 goto error;
24226
24227- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
24228+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
24229 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
24230 goto error;
24231 walker->ptep_user[walker->level - 1] = ptep_user;
24232diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
24233index d29d3cd..ec9d522 100644
24234--- a/arch/x86/kvm/svm.c
24235+++ b/arch/x86/kvm/svm.c
24236@@ -3507,7 +3507,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
24237 int cpu = raw_smp_processor_id();
24238
24239 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
24240+
24241+ pax_open_kernel();
24242 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
24243+ pax_close_kernel();
24244+
24245 load_TR_desc();
24246 }
24247
24248@@ -3881,6 +3885,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
24249 #endif
24250 #endif
24251
24252+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
24253+ __set_fs(current_thread_info()->addr_limit);
24254+#endif
24255+
24256 reload_tss(vcpu);
24257
24258 local_irq_disable();
24259diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
24260index 9120ae1..238abc0 100644
24261--- a/arch/x86/kvm/vmx.c
24262+++ b/arch/x86/kvm/vmx.c
24263@@ -1370,7 +1370,11 @@ static void reload_tss(void)
24264 struct desc_struct *descs;
24265
24266 descs = (void *)gdt->address;
24267+
24268+ pax_open_kernel();
24269 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
24270+ pax_close_kernel();
24271+
24272 load_TR_desc();
24273 }
24274
24275@@ -1594,6 +1598,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
24276 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
24277 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
24278
24279+#ifdef CONFIG_PAX_PER_CPU_PGD
24280+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
24281+#endif
24282+
24283 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
24284 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
24285 vmx->loaded_vmcs->cpu = cpu;
24286@@ -2738,8 +2746,11 @@ static __init int hardware_setup(void)
24287 if (!cpu_has_vmx_flexpriority())
24288 flexpriority_enabled = 0;
24289
24290- if (!cpu_has_vmx_tpr_shadow())
24291- kvm_x86_ops->update_cr8_intercept = NULL;
24292+ if (!cpu_has_vmx_tpr_shadow()) {
24293+ pax_open_kernel();
24294+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
24295+ pax_close_kernel();
24296+ }
24297
24298 if (enable_ept && !cpu_has_vmx_ept_2m_page())
24299 kvm_disable_largepages();
24300@@ -3782,7 +3793,10 @@ static void vmx_set_constant_host_state(void)
24301
24302 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
24303 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
24304+
24305+#ifndef CONFIG_PAX_PER_CPU_PGD
24306 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
24307+#endif
24308
24309 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
24310 #ifdef CONFIG_X86_64
24311@@ -3803,7 +3817,7 @@ static void vmx_set_constant_host_state(void)
24312 native_store_idt(&dt);
24313 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
24314
24315- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
24316+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
24317
24318 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
24319 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
24320@@ -6355,6 +6369,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
24321 "jmp 2f \n\t"
24322 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
24323 "2: "
24324+
24325+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
24326+ "ljmp %[cs],$3f\n\t"
24327+ "3: "
24328+#endif
24329+
24330 /* Save guest registers, load host registers, keep flags */
24331 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
24332 "pop %0 \n\t"
24333@@ -6407,6 +6427,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
24334 #endif
24335 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
24336 [wordsize]"i"(sizeof(ulong))
24337+
24338+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
24339+ ,[cs]"i"(__KERNEL_CS)
24340+#endif
24341+
24342 : "cc", "memory"
24343 #ifdef CONFIG_X86_64
24344 , "rax", "rbx", "rdi", "rsi"
24345@@ -6420,7 +6445,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
24346 if (debugctlmsr)
24347 update_debugctlmsr(debugctlmsr);
24348
24349-#ifndef CONFIG_X86_64
24350+#ifdef CONFIG_X86_32
24351 /*
24352 * The sysexit path does not restore ds/es, so we must set them to
24353 * a reasonable value ourselves.
24354@@ -6429,8 +6454,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
24355 * may be executed in interrupt context, which saves and restore segments
24356 * around it, nullifying its effect.
24357 */
24358- loadsegment(ds, __USER_DS);
24359- loadsegment(es, __USER_DS);
24360+ loadsegment(ds, __KERNEL_DS);
24361+ loadsegment(es, __KERNEL_DS);
24362+ loadsegment(ss, __KERNEL_DS);
24363+
24364+#ifdef CONFIG_PAX_KERNEXEC
24365+ loadsegment(fs, __KERNEL_PERCPU);
24366+#endif
24367+
24368+#ifdef CONFIG_PAX_MEMORY_UDEREF
24369+ __set_fs(current_thread_info()->addr_limit);
24370+#endif
24371+
24372 #endif
24373
24374 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
24375diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
24376index c243b81..b692af3 100644
24377--- a/arch/x86/kvm/x86.c
24378+++ b/arch/x86/kvm/x86.c
24379@@ -1408,10 +1408,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
24380 unsigned long flags, this_tsc_khz;
24381 struct kvm_vcpu_arch *vcpu = &v->arch;
24382 struct kvm_arch *ka = &v->kvm->arch;
24383- void *shared_kaddr;
24384 s64 kernel_ns, max_kernel_ns;
24385 u64 tsc_timestamp, host_tsc;
24386- struct pvclock_vcpu_time_info *guest_hv_clock;
24387+ struct pvclock_vcpu_time_info guest_hv_clock;
24388 u8 pvclock_flags;
24389 bool use_master_clock;
24390
24391@@ -1465,7 +1464,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
24392
24393 local_irq_restore(flags);
24394
24395- if (!vcpu->time_page)
24396+ if (!vcpu->pv_time_enabled)
24397 return 0;
24398
24399 /*
24400@@ -1527,12 +1526,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
24401 */
24402 vcpu->hv_clock.version += 2;
24403
24404- shared_kaddr = kmap_atomic(vcpu->time_page);
24405-
24406- guest_hv_clock = shared_kaddr + vcpu->time_offset;
24407+ if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
24408+ &guest_hv_clock, sizeof(guest_hv_clock))))
24409+ return 0;
24410
24411 /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
24412- pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
24413+ pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
24414
24415 if (vcpu->pvclock_set_guest_stopped_request) {
24416 pvclock_flags |= PVCLOCK_GUEST_STOPPED;
24417@@ -1545,12 +1544,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
24418
24419 vcpu->hv_clock.flags = pvclock_flags;
24420
24421- memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
24422- sizeof(vcpu->hv_clock));
24423-
24424- kunmap_atomic(shared_kaddr);
24425-
24426- mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
24427+ kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
24428+ &vcpu->hv_clock,
24429+ sizeof(vcpu->hv_clock));
24430 return 0;
24431 }
24432
24433@@ -1692,8 +1688,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
24434 {
24435 struct kvm *kvm = vcpu->kvm;
24436 int lm = is_long_mode(vcpu);
24437- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
24438- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
24439+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
24440+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
24441 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
24442 : kvm->arch.xen_hvm_config.blob_size_32;
24443 u32 page_num = data & ~PAGE_MASK;
24444@@ -1839,10 +1835,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
24445
24446 static void kvmclock_reset(struct kvm_vcpu *vcpu)
24447 {
24448- if (vcpu->arch.time_page) {
24449- kvm_release_page_dirty(vcpu->arch.time_page);
24450- vcpu->arch.time_page = NULL;
24451- }
24452+ vcpu->arch.pv_time_enabled = false;
24453 }
24454
24455 static void accumulate_steal_time(struct kvm_vcpu *vcpu)
24456@@ -1948,6 +1941,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
24457 break;
24458 case MSR_KVM_SYSTEM_TIME_NEW:
24459 case MSR_KVM_SYSTEM_TIME: {
24460+ u64 gpa_offset;
24461 kvmclock_reset(vcpu);
24462
24463 vcpu->arch.time = data;
24464@@ -1957,14 +1951,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
24465 if (!(data & 1))
24466 break;
24467
24468- /* ...but clean it before doing the actual write */
24469- vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
24470+ gpa_offset = data & ~(PAGE_MASK | 1);
24471
24472- vcpu->arch.time_page =
24473- gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
24474+ /* Check that the address is 32-byte aligned. */
24475+ if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
24476+ break;
24477
24478- if (is_error_page(vcpu->arch.time_page))
24479- vcpu->arch.time_page = NULL;
24480+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
24481+ &vcpu->arch.pv_time, data & ~1ULL))
24482+ vcpu->arch.pv_time_enabled = false;
24483+ else
24484+ vcpu->arch.pv_time_enabled = true;
24485
24486 break;
24487 }
24488@@ -2571,6 +2568,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
24489 if (n < msr_list.nmsrs)
24490 goto out;
24491 r = -EFAULT;
24492+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
24493+ goto out;
24494 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
24495 num_msrs_to_save * sizeof(u32)))
24496 goto out;
24497@@ -2700,7 +2699,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
24498 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
24499 struct kvm_interrupt *irq)
24500 {
24501- if (irq->irq < 0 || irq->irq >= KVM_NR_INTERRUPTS)
24502+ if (irq->irq >= KVM_NR_INTERRUPTS)
24503 return -EINVAL;
24504 if (irqchip_in_kernel(vcpu->kvm))
24505 return -ENXIO;
24506@@ -2967,7 +2966,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
24507 */
24508 static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
24509 {
24510- if (!vcpu->arch.time_page)
24511+ if (!vcpu->arch.pv_time_enabled)
24512 return -EINVAL;
24513 vcpu->arch.pvclock_set_guest_stopped_request = true;
24514 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
24515@@ -5213,7 +5212,7 @@ static struct notifier_block pvclock_gtod_notifier = {
24516 };
24517 #endif
24518
24519-int kvm_arch_init(void *opaque)
24520+int kvm_arch_init(const void *opaque)
24521 {
24522 int r;
24523 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
24524@@ -6661,6 +6660,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
24525 goto fail_free_wbinvd_dirty_mask;
24526
24527 vcpu->arch.ia32_tsc_adjust_msr = 0x0;
24528+ vcpu->arch.pv_time_enabled = false;
24529 kvm_async_pf_hash_reset(vcpu);
24530 kvm_pmu_init(vcpu);
24531
24532diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
24533index df4176c..23ce092 100644
24534--- a/arch/x86/lguest/boot.c
24535+++ b/arch/x86/lguest/boot.c
24536@@ -1200,9 +1200,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
24537 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
24538 * Launcher to reboot us.
24539 */
24540-static void lguest_restart(char *reason)
24541+static __noreturn void lguest_restart(char *reason)
24542 {
24543 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
24544+ BUG();
24545 }
24546
24547 /*G:050
24548diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
24549index 00933d5..3a64af9 100644
24550--- a/arch/x86/lib/atomic64_386_32.S
24551+++ b/arch/x86/lib/atomic64_386_32.S
24552@@ -48,6 +48,10 @@ BEGIN(read)
24553 movl (v), %eax
24554 movl 4(v), %edx
24555 RET_ENDP
24556+BEGIN(read_unchecked)
24557+ movl (v), %eax
24558+ movl 4(v), %edx
24559+RET_ENDP
24560 #undef v
24561
24562 #define v %esi
24563@@ -55,6 +59,10 @@ BEGIN(set)
24564 movl %ebx, (v)
24565 movl %ecx, 4(v)
24566 RET_ENDP
24567+BEGIN(set_unchecked)
24568+ movl %ebx, (v)
24569+ movl %ecx, 4(v)
24570+RET_ENDP
24571 #undef v
24572
24573 #define v %esi
24574@@ -70,6 +78,20 @@ RET_ENDP
24575 BEGIN(add)
24576 addl %eax, (v)
24577 adcl %edx, 4(v)
24578+
24579+#ifdef CONFIG_PAX_REFCOUNT
24580+ jno 0f
24581+ subl %eax, (v)
24582+ sbbl %edx, 4(v)
24583+ int $4
24584+0:
24585+ _ASM_EXTABLE(0b, 0b)
24586+#endif
24587+
24588+RET_ENDP
24589+BEGIN(add_unchecked)
24590+ addl %eax, (v)
24591+ adcl %edx, 4(v)
24592 RET_ENDP
24593 #undef v
24594
24595@@ -77,6 +99,24 @@ RET_ENDP
24596 BEGIN(add_return)
24597 addl (v), %eax
24598 adcl 4(v), %edx
24599+
24600+#ifdef CONFIG_PAX_REFCOUNT
24601+ into
24602+1234:
24603+ _ASM_EXTABLE(1234b, 2f)
24604+#endif
24605+
24606+ movl %eax, (v)
24607+ movl %edx, 4(v)
24608+
24609+#ifdef CONFIG_PAX_REFCOUNT
24610+2:
24611+#endif
24612+
24613+RET_ENDP
24614+BEGIN(add_return_unchecked)
24615+ addl (v), %eax
24616+ adcl 4(v), %edx
24617 movl %eax, (v)
24618 movl %edx, 4(v)
24619 RET_ENDP
24620@@ -86,6 +126,20 @@ RET_ENDP
24621 BEGIN(sub)
24622 subl %eax, (v)
24623 sbbl %edx, 4(v)
24624+
24625+#ifdef CONFIG_PAX_REFCOUNT
24626+ jno 0f
24627+ addl %eax, (v)
24628+ adcl %edx, 4(v)
24629+ int $4
24630+0:
24631+ _ASM_EXTABLE(0b, 0b)
24632+#endif
24633+
24634+RET_ENDP
24635+BEGIN(sub_unchecked)
24636+ subl %eax, (v)
24637+ sbbl %edx, 4(v)
24638 RET_ENDP
24639 #undef v
24640
24641@@ -96,6 +150,27 @@ BEGIN(sub_return)
24642 sbbl $0, %edx
24643 addl (v), %eax
24644 adcl 4(v), %edx
24645+
24646+#ifdef CONFIG_PAX_REFCOUNT
24647+ into
24648+1234:
24649+ _ASM_EXTABLE(1234b, 2f)
24650+#endif
24651+
24652+ movl %eax, (v)
24653+ movl %edx, 4(v)
24654+
24655+#ifdef CONFIG_PAX_REFCOUNT
24656+2:
24657+#endif
24658+
24659+RET_ENDP
24660+BEGIN(sub_return_unchecked)
24661+ negl %edx
24662+ negl %eax
24663+ sbbl $0, %edx
24664+ addl (v), %eax
24665+ adcl 4(v), %edx
24666 movl %eax, (v)
24667 movl %edx, 4(v)
24668 RET_ENDP
24669@@ -105,6 +180,20 @@ RET_ENDP
24670 BEGIN(inc)
24671 addl $1, (v)
24672 adcl $0, 4(v)
24673+
24674+#ifdef CONFIG_PAX_REFCOUNT
24675+ jno 0f
24676+ subl $1, (v)
24677+ sbbl $0, 4(v)
24678+ int $4
24679+0:
24680+ _ASM_EXTABLE(0b, 0b)
24681+#endif
24682+
24683+RET_ENDP
24684+BEGIN(inc_unchecked)
24685+ addl $1, (v)
24686+ adcl $0, 4(v)
24687 RET_ENDP
24688 #undef v
24689
24690@@ -114,6 +203,26 @@ BEGIN(inc_return)
24691 movl 4(v), %edx
24692 addl $1, %eax
24693 adcl $0, %edx
24694+
24695+#ifdef CONFIG_PAX_REFCOUNT
24696+ into
24697+1234:
24698+ _ASM_EXTABLE(1234b, 2f)
24699+#endif
24700+
24701+ movl %eax, (v)
24702+ movl %edx, 4(v)
24703+
24704+#ifdef CONFIG_PAX_REFCOUNT
24705+2:
24706+#endif
24707+
24708+RET_ENDP
24709+BEGIN(inc_return_unchecked)
24710+ movl (v), %eax
24711+ movl 4(v), %edx
24712+ addl $1, %eax
24713+ adcl $0, %edx
24714 movl %eax, (v)
24715 movl %edx, 4(v)
24716 RET_ENDP
24717@@ -123,6 +232,20 @@ RET_ENDP
24718 BEGIN(dec)
24719 subl $1, (v)
24720 sbbl $0, 4(v)
24721+
24722+#ifdef CONFIG_PAX_REFCOUNT
24723+ jno 0f
24724+ addl $1, (v)
24725+ adcl $0, 4(v)
24726+ int $4
24727+0:
24728+ _ASM_EXTABLE(0b, 0b)
24729+#endif
24730+
24731+RET_ENDP
24732+BEGIN(dec_unchecked)
24733+ subl $1, (v)
24734+ sbbl $0, 4(v)
24735 RET_ENDP
24736 #undef v
24737
24738@@ -132,6 +255,26 @@ BEGIN(dec_return)
24739 movl 4(v), %edx
24740 subl $1, %eax
24741 sbbl $0, %edx
24742+
24743+#ifdef CONFIG_PAX_REFCOUNT
24744+ into
24745+1234:
24746+ _ASM_EXTABLE(1234b, 2f)
24747+#endif
24748+
24749+ movl %eax, (v)
24750+ movl %edx, 4(v)
24751+
24752+#ifdef CONFIG_PAX_REFCOUNT
24753+2:
24754+#endif
24755+
24756+RET_ENDP
24757+BEGIN(dec_return_unchecked)
24758+ movl (v), %eax
24759+ movl 4(v), %edx
24760+ subl $1, %eax
24761+ sbbl $0, %edx
24762 movl %eax, (v)
24763 movl %edx, 4(v)
24764 RET_ENDP
24765@@ -143,6 +286,13 @@ BEGIN(add_unless)
24766 adcl %edx, %edi
24767 addl (v), %eax
24768 adcl 4(v), %edx
24769+
24770+#ifdef CONFIG_PAX_REFCOUNT
24771+ into
24772+1234:
24773+ _ASM_EXTABLE(1234b, 2f)
24774+#endif
24775+
24776 cmpl %eax, %ecx
24777 je 3f
24778 1:
24779@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
24780 1:
24781 addl $1, %eax
24782 adcl $0, %edx
24783+
24784+#ifdef CONFIG_PAX_REFCOUNT
24785+ into
24786+1234:
24787+ _ASM_EXTABLE(1234b, 2f)
24788+#endif
24789+
24790 movl %eax, (v)
24791 movl %edx, 4(v)
24792 movl $1, %eax
24793@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
24794 movl 4(v), %edx
24795 subl $1, %eax
24796 sbbl $0, %edx
24797+
24798+#ifdef CONFIG_PAX_REFCOUNT
24799+ into
24800+1234:
24801+ _ASM_EXTABLE(1234b, 1f)
24802+#endif
24803+
24804 js 1f
24805 movl %eax, (v)
24806 movl %edx, 4(v)
24807diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
24808index f5cc9eb..51fa319 100644
24809--- a/arch/x86/lib/atomic64_cx8_32.S
24810+++ b/arch/x86/lib/atomic64_cx8_32.S
24811@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
24812 CFI_STARTPROC
24813
24814 read64 %ecx
24815+ pax_force_retaddr
24816 ret
24817 CFI_ENDPROC
24818 ENDPROC(atomic64_read_cx8)
24819
24820+ENTRY(atomic64_read_unchecked_cx8)
24821+ CFI_STARTPROC
24822+
24823+ read64 %ecx
24824+ pax_force_retaddr
24825+ ret
24826+ CFI_ENDPROC
24827+ENDPROC(atomic64_read_unchecked_cx8)
24828+
24829 ENTRY(atomic64_set_cx8)
24830 CFI_STARTPROC
24831
24832@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
24833 cmpxchg8b (%esi)
24834 jne 1b
24835
24836+ pax_force_retaddr
24837 ret
24838 CFI_ENDPROC
24839 ENDPROC(atomic64_set_cx8)
24840
24841+ENTRY(atomic64_set_unchecked_cx8)
24842+ CFI_STARTPROC
24843+
24844+1:
24845+/* we don't need LOCK_PREFIX since aligned 64-bit writes
24846+ * are atomic on 586 and newer */
24847+ cmpxchg8b (%esi)
24848+ jne 1b
24849+
24850+ pax_force_retaddr
24851+ ret
24852+ CFI_ENDPROC
24853+ENDPROC(atomic64_set_unchecked_cx8)
24854+
24855 ENTRY(atomic64_xchg_cx8)
24856 CFI_STARTPROC
24857
24858@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
24859 cmpxchg8b (%esi)
24860 jne 1b
24861
24862+ pax_force_retaddr
24863 ret
24864 CFI_ENDPROC
24865 ENDPROC(atomic64_xchg_cx8)
24866
24867-.macro addsub_return func ins insc
24868-ENTRY(atomic64_\func\()_return_cx8)
24869+.macro addsub_return func ins insc unchecked=""
24870+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
24871 CFI_STARTPROC
24872 SAVE ebp
24873 SAVE ebx
24874@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
24875 movl %edx, %ecx
24876 \ins\()l %esi, %ebx
24877 \insc\()l %edi, %ecx
24878+
24879+.ifb \unchecked
24880+#ifdef CONFIG_PAX_REFCOUNT
24881+ into
24882+2:
24883+ _ASM_EXTABLE(2b, 3f)
24884+#endif
24885+.endif
24886+
24887 LOCK_PREFIX
24888 cmpxchg8b (%ebp)
24889 jne 1b
24890-
24891-10:
24892 movl %ebx, %eax
24893 movl %ecx, %edx
24894+
24895+.ifb \unchecked
24896+#ifdef CONFIG_PAX_REFCOUNT
24897+3:
24898+#endif
24899+.endif
24900+
24901 RESTORE edi
24902 RESTORE esi
24903 RESTORE ebx
24904 RESTORE ebp
24905+ pax_force_retaddr
24906 ret
24907 CFI_ENDPROC
24908-ENDPROC(atomic64_\func\()_return_cx8)
24909+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
24910 .endm
24911
24912 addsub_return add add adc
24913 addsub_return sub sub sbb
24914+addsub_return add add adc _unchecked
24915+addsub_return sub sub sbb _unchecked
24916
24917-.macro incdec_return func ins insc
24918-ENTRY(atomic64_\func\()_return_cx8)
24919+.macro incdec_return func ins insc unchecked=""
24920+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
24921 CFI_STARTPROC
24922 SAVE ebx
24923
24924@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
24925 movl %edx, %ecx
24926 \ins\()l $1, %ebx
24927 \insc\()l $0, %ecx
24928+
24929+.ifb \unchecked
24930+#ifdef CONFIG_PAX_REFCOUNT
24931+ into
24932+2:
24933+ _ASM_EXTABLE(2b, 3f)
24934+#endif
24935+.endif
24936+
24937 LOCK_PREFIX
24938 cmpxchg8b (%esi)
24939 jne 1b
24940
24941-10:
24942 movl %ebx, %eax
24943 movl %ecx, %edx
24944+
24945+.ifb \unchecked
24946+#ifdef CONFIG_PAX_REFCOUNT
24947+3:
24948+#endif
24949+.endif
24950+
24951 RESTORE ebx
24952+ pax_force_retaddr
24953 ret
24954 CFI_ENDPROC
24955-ENDPROC(atomic64_\func\()_return_cx8)
24956+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
24957 .endm
24958
24959 incdec_return inc add adc
24960 incdec_return dec sub sbb
24961+incdec_return inc add adc _unchecked
24962+incdec_return dec sub sbb _unchecked
24963
24964 ENTRY(atomic64_dec_if_positive_cx8)
24965 CFI_STARTPROC
24966@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
24967 movl %edx, %ecx
24968 subl $1, %ebx
24969 sbb $0, %ecx
24970+
24971+#ifdef CONFIG_PAX_REFCOUNT
24972+ into
24973+1234:
24974+ _ASM_EXTABLE(1234b, 2f)
24975+#endif
24976+
24977 js 2f
24978 LOCK_PREFIX
24979 cmpxchg8b (%esi)
24980@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
24981 movl %ebx, %eax
24982 movl %ecx, %edx
24983 RESTORE ebx
24984+ pax_force_retaddr
24985 ret
24986 CFI_ENDPROC
24987 ENDPROC(atomic64_dec_if_positive_cx8)
24988@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
24989 movl %edx, %ecx
24990 addl %ebp, %ebx
24991 adcl %edi, %ecx
24992+
24993+#ifdef CONFIG_PAX_REFCOUNT
24994+ into
24995+1234:
24996+ _ASM_EXTABLE(1234b, 3f)
24997+#endif
24998+
24999 LOCK_PREFIX
25000 cmpxchg8b (%esi)
25001 jne 1b
25002@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
25003 CFI_ADJUST_CFA_OFFSET -8
25004 RESTORE ebx
25005 RESTORE ebp
25006+ pax_force_retaddr
25007 ret
25008 4:
25009 cmpl %edx, 4(%esp)
25010@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
25011 xorl %ecx, %ecx
25012 addl $1, %ebx
25013 adcl %edx, %ecx
25014+
25015+#ifdef CONFIG_PAX_REFCOUNT
25016+ into
25017+1234:
25018+ _ASM_EXTABLE(1234b, 3f)
25019+#endif
25020+
25021 LOCK_PREFIX
25022 cmpxchg8b (%esi)
25023 jne 1b
25024@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
25025 movl $1, %eax
25026 3:
25027 RESTORE ebx
25028+ pax_force_retaddr
25029 ret
25030 CFI_ENDPROC
25031 ENDPROC(atomic64_inc_not_zero_cx8)
25032diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
25033index 2af5df3..62b1a5a 100644
25034--- a/arch/x86/lib/checksum_32.S
25035+++ b/arch/x86/lib/checksum_32.S
25036@@ -29,7 +29,8 @@
25037 #include <asm/dwarf2.h>
25038 #include <asm/errno.h>
25039 #include <asm/asm.h>
25040-
25041+#include <asm/segment.h>
25042+
25043 /*
25044 * computes a partial checksum, e.g. for TCP/UDP fragments
25045 */
25046@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
25047
25048 #define ARGBASE 16
25049 #define FP 12
25050-
25051-ENTRY(csum_partial_copy_generic)
25052+
25053+ENTRY(csum_partial_copy_generic_to_user)
25054 CFI_STARTPROC
25055+
25056+#ifdef CONFIG_PAX_MEMORY_UDEREF
25057+ pushl_cfi %gs
25058+ popl_cfi %es
25059+ jmp csum_partial_copy_generic
25060+#endif
25061+
25062+ENTRY(csum_partial_copy_generic_from_user)
25063+
25064+#ifdef CONFIG_PAX_MEMORY_UDEREF
25065+ pushl_cfi %gs
25066+ popl_cfi %ds
25067+#endif
25068+
25069+ENTRY(csum_partial_copy_generic)
25070 subl $4,%esp
25071 CFI_ADJUST_CFA_OFFSET 4
25072 pushl_cfi %edi
25073@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
25074 jmp 4f
25075 SRC(1: movw (%esi), %bx )
25076 addl $2, %esi
25077-DST( movw %bx, (%edi) )
25078+DST( movw %bx, %es:(%edi) )
25079 addl $2, %edi
25080 addw %bx, %ax
25081 adcl $0, %eax
25082@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
25083 SRC(1: movl (%esi), %ebx )
25084 SRC( movl 4(%esi), %edx )
25085 adcl %ebx, %eax
25086-DST( movl %ebx, (%edi) )
25087+DST( movl %ebx, %es:(%edi) )
25088 adcl %edx, %eax
25089-DST( movl %edx, 4(%edi) )
25090+DST( movl %edx, %es:4(%edi) )
25091
25092 SRC( movl 8(%esi), %ebx )
25093 SRC( movl 12(%esi), %edx )
25094 adcl %ebx, %eax
25095-DST( movl %ebx, 8(%edi) )
25096+DST( movl %ebx, %es:8(%edi) )
25097 adcl %edx, %eax
25098-DST( movl %edx, 12(%edi) )
25099+DST( movl %edx, %es:12(%edi) )
25100
25101 SRC( movl 16(%esi), %ebx )
25102 SRC( movl 20(%esi), %edx )
25103 adcl %ebx, %eax
25104-DST( movl %ebx, 16(%edi) )
25105+DST( movl %ebx, %es:16(%edi) )
25106 adcl %edx, %eax
25107-DST( movl %edx, 20(%edi) )
25108+DST( movl %edx, %es:20(%edi) )
25109
25110 SRC( movl 24(%esi), %ebx )
25111 SRC( movl 28(%esi), %edx )
25112 adcl %ebx, %eax
25113-DST( movl %ebx, 24(%edi) )
25114+DST( movl %ebx, %es:24(%edi) )
25115 adcl %edx, %eax
25116-DST( movl %edx, 28(%edi) )
25117+DST( movl %edx, %es:28(%edi) )
25118
25119 lea 32(%esi), %esi
25120 lea 32(%edi), %edi
25121@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
25122 shrl $2, %edx # This clears CF
25123 SRC(3: movl (%esi), %ebx )
25124 adcl %ebx, %eax
25125-DST( movl %ebx, (%edi) )
25126+DST( movl %ebx, %es:(%edi) )
25127 lea 4(%esi), %esi
25128 lea 4(%edi), %edi
25129 dec %edx
25130@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
25131 jb 5f
25132 SRC( movw (%esi), %cx )
25133 leal 2(%esi), %esi
25134-DST( movw %cx, (%edi) )
25135+DST( movw %cx, %es:(%edi) )
25136 leal 2(%edi), %edi
25137 je 6f
25138 shll $16,%ecx
25139 SRC(5: movb (%esi), %cl )
25140-DST( movb %cl, (%edi) )
25141+DST( movb %cl, %es:(%edi) )
25142 6: addl %ecx, %eax
25143 adcl $0, %eax
25144 7:
25145@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
25146
25147 6001:
25148 movl ARGBASE+20(%esp), %ebx # src_err_ptr
25149- movl $-EFAULT, (%ebx)
25150+ movl $-EFAULT, %ss:(%ebx)
25151
25152 # zero the complete destination - computing the rest
25153 # is too much work
25154@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
25155
25156 6002:
25157 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
25158- movl $-EFAULT,(%ebx)
25159+ movl $-EFAULT,%ss:(%ebx)
25160 jmp 5000b
25161
25162 .previous
25163
25164+ pushl_cfi %ss
25165+ popl_cfi %ds
25166+ pushl_cfi %ss
25167+ popl_cfi %es
25168 popl_cfi %ebx
25169 CFI_RESTORE ebx
25170 popl_cfi %esi
25171@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
25172 popl_cfi %ecx # equivalent to addl $4,%esp
25173 ret
25174 CFI_ENDPROC
25175-ENDPROC(csum_partial_copy_generic)
25176+ENDPROC(csum_partial_copy_generic_to_user)
25177
25178 #else
25179
25180 /* Version for PentiumII/PPro */
25181
25182 #define ROUND1(x) \
25183+ nop; nop; nop; \
25184 SRC(movl x(%esi), %ebx ) ; \
25185 addl %ebx, %eax ; \
25186- DST(movl %ebx, x(%edi) ) ;
25187+ DST(movl %ebx, %es:x(%edi)) ;
25188
25189 #define ROUND(x) \
25190+ nop; nop; nop; \
25191 SRC(movl x(%esi), %ebx ) ; \
25192 adcl %ebx, %eax ; \
25193- DST(movl %ebx, x(%edi) ) ;
25194+ DST(movl %ebx, %es:x(%edi)) ;
25195
25196 #define ARGBASE 12
25197-
25198-ENTRY(csum_partial_copy_generic)
25199+
25200+ENTRY(csum_partial_copy_generic_to_user)
25201 CFI_STARTPROC
25202+
25203+#ifdef CONFIG_PAX_MEMORY_UDEREF
25204+ pushl_cfi %gs
25205+ popl_cfi %es
25206+ jmp csum_partial_copy_generic
25207+#endif
25208+
25209+ENTRY(csum_partial_copy_generic_from_user)
25210+
25211+#ifdef CONFIG_PAX_MEMORY_UDEREF
25212+ pushl_cfi %gs
25213+ popl_cfi %ds
25214+#endif
25215+
25216+ENTRY(csum_partial_copy_generic)
25217 pushl_cfi %ebx
25218 CFI_REL_OFFSET ebx, 0
25219 pushl_cfi %edi
25220@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
25221 subl %ebx, %edi
25222 lea -1(%esi),%edx
25223 andl $-32,%edx
25224- lea 3f(%ebx,%ebx), %ebx
25225+ lea 3f(%ebx,%ebx,2), %ebx
25226 testl %esi, %esi
25227 jmp *%ebx
25228 1: addl $64,%esi
25229@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
25230 jb 5f
25231 SRC( movw (%esi), %dx )
25232 leal 2(%esi), %esi
25233-DST( movw %dx, (%edi) )
25234+DST( movw %dx, %es:(%edi) )
25235 leal 2(%edi), %edi
25236 je 6f
25237 shll $16,%edx
25238 5:
25239 SRC( movb (%esi), %dl )
25240-DST( movb %dl, (%edi) )
25241+DST( movb %dl, %es:(%edi) )
25242 6: addl %edx, %eax
25243 adcl $0, %eax
25244 7:
25245 .section .fixup, "ax"
25246 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
25247- movl $-EFAULT, (%ebx)
25248+ movl $-EFAULT, %ss:(%ebx)
25249 # zero the complete destination (computing the rest is too much work)
25250 movl ARGBASE+8(%esp),%edi # dst
25251 movl ARGBASE+12(%esp),%ecx # len
25252@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
25253 rep; stosb
25254 jmp 7b
25255 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
25256- movl $-EFAULT, (%ebx)
25257+ movl $-EFAULT, %ss:(%ebx)
25258 jmp 7b
25259 .previous
25260
25261+#ifdef CONFIG_PAX_MEMORY_UDEREF
25262+ pushl_cfi %ss
25263+ popl_cfi %ds
25264+ pushl_cfi %ss
25265+ popl_cfi %es
25266+#endif
25267+
25268 popl_cfi %esi
25269 CFI_RESTORE esi
25270 popl_cfi %edi
25271@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
25272 CFI_RESTORE ebx
25273 ret
25274 CFI_ENDPROC
25275-ENDPROC(csum_partial_copy_generic)
25276+ENDPROC(csum_partial_copy_generic_to_user)
25277
25278 #undef ROUND
25279 #undef ROUND1
25280diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
25281index f2145cf..cea889d 100644
25282--- a/arch/x86/lib/clear_page_64.S
25283+++ b/arch/x86/lib/clear_page_64.S
25284@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
25285 movl $4096/8,%ecx
25286 xorl %eax,%eax
25287 rep stosq
25288+ pax_force_retaddr
25289 ret
25290 CFI_ENDPROC
25291 ENDPROC(clear_page_c)
25292@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
25293 movl $4096,%ecx
25294 xorl %eax,%eax
25295 rep stosb
25296+ pax_force_retaddr
25297 ret
25298 CFI_ENDPROC
25299 ENDPROC(clear_page_c_e)
25300@@ -43,6 +45,7 @@ ENTRY(clear_page)
25301 leaq 64(%rdi),%rdi
25302 jnz .Lloop
25303 nop
25304+ pax_force_retaddr
25305 ret
25306 CFI_ENDPROC
25307 .Lclear_page_end:
25308@@ -58,7 +61,7 @@ ENDPROC(clear_page)
25309
25310 #include <asm/cpufeature.h>
25311
25312- .section .altinstr_replacement,"ax"
25313+ .section .altinstr_replacement,"a"
25314 1: .byte 0xeb /* jmp <disp8> */
25315 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
25316 2: .byte 0xeb /* jmp <disp8> */
25317diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
25318index 1e572c5..2a162cd 100644
25319--- a/arch/x86/lib/cmpxchg16b_emu.S
25320+++ b/arch/x86/lib/cmpxchg16b_emu.S
25321@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
25322
25323 popf
25324 mov $1, %al
25325+ pax_force_retaddr
25326 ret
25327
25328 not_same:
25329 popf
25330 xor %al,%al
25331+ pax_force_retaddr
25332 ret
25333
25334 CFI_ENDPROC
25335diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
25336index 176cca6..1166c50 100644
25337--- a/arch/x86/lib/copy_page_64.S
25338+++ b/arch/x86/lib/copy_page_64.S
25339@@ -9,6 +9,7 @@ copy_page_rep:
25340 CFI_STARTPROC
25341 movl $4096/8, %ecx
25342 rep movsq
25343+ pax_force_retaddr
25344 ret
25345 CFI_ENDPROC
25346 ENDPROC(copy_page_rep)
25347@@ -20,12 +21,14 @@ ENDPROC(copy_page_rep)
25348
25349 ENTRY(copy_page)
25350 CFI_STARTPROC
25351- subq $2*8, %rsp
25352- CFI_ADJUST_CFA_OFFSET 2*8
25353+ subq $3*8, %rsp
25354+ CFI_ADJUST_CFA_OFFSET 3*8
25355 movq %rbx, (%rsp)
25356 CFI_REL_OFFSET rbx, 0
25357 movq %r12, 1*8(%rsp)
25358 CFI_REL_OFFSET r12, 1*8
25359+ movq %r13, 2*8(%rsp)
25360+ CFI_REL_OFFSET r13, 2*8
25361
25362 movl $(4096/64)-5, %ecx
25363 .p2align 4
25364@@ -36,7 +39,7 @@ ENTRY(copy_page)
25365 movq 0x8*2(%rsi), %rdx
25366 movq 0x8*3(%rsi), %r8
25367 movq 0x8*4(%rsi), %r9
25368- movq 0x8*5(%rsi), %r10
25369+ movq 0x8*5(%rsi), %r13
25370 movq 0x8*6(%rsi), %r11
25371 movq 0x8*7(%rsi), %r12
25372
25373@@ -47,7 +50,7 @@ ENTRY(copy_page)
25374 movq %rdx, 0x8*2(%rdi)
25375 movq %r8, 0x8*3(%rdi)
25376 movq %r9, 0x8*4(%rdi)
25377- movq %r10, 0x8*5(%rdi)
25378+ movq %r13, 0x8*5(%rdi)
25379 movq %r11, 0x8*6(%rdi)
25380 movq %r12, 0x8*7(%rdi)
25381
25382@@ -66,7 +69,7 @@ ENTRY(copy_page)
25383 movq 0x8*2(%rsi), %rdx
25384 movq 0x8*3(%rsi), %r8
25385 movq 0x8*4(%rsi), %r9
25386- movq 0x8*5(%rsi), %r10
25387+ movq 0x8*5(%rsi), %r13
25388 movq 0x8*6(%rsi), %r11
25389 movq 0x8*7(%rsi), %r12
25390
25391@@ -75,7 +78,7 @@ ENTRY(copy_page)
25392 movq %rdx, 0x8*2(%rdi)
25393 movq %r8, 0x8*3(%rdi)
25394 movq %r9, 0x8*4(%rdi)
25395- movq %r10, 0x8*5(%rdi)
25396+ movq %r13, 0x8*5(%rdi)
25397 movq %r11, 0x8*6(%rdi)
25398 movq %r12, 0x8*7(%rdi)
25399
25400@@ -87,8 +90,11 @@ ENTRY(copy_page)
25401 CFI_RESTORE rbx
25402 movq 1*8(%rsp), %r12
25403 CFI_RESTORE r12
25404- addq $2*8, %rsp
25405- CFI_ADJUST_CFA_OFFSET -2*8
25406+ movq 2*8(%rsp), %r13
25407+ CFI_RESTORE r13
25408+ addq $3*8, %rsp
25409+ CFI_ADJUST_CFA_OFFSET -3*8
25410+ pax_force_retaddr
25411 ret
25412 .Lcopy_page_end:
25413 CFI_ENDPROC
25414@@ -99,7 +105,7 @@ ENDPROC(copy_page)
25415
25416 #include <asm/cpufeature.h>
25417
25418- .section .altinstr_replacement,"ax"
25419+ .section .altinstr_replacement,"a"
25420 1: .byte 0xeb /* jmp <disp8> */
25421 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
25422 2:
25423diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
25424index a30ca15..d25fab6 100644
25425--- a/arch/x86/lib/copy_user_64.S
25426+++ b/arch/x86/lib/copy_user_64.S
25427@@ -18,6 +18,7 @@
25428 #include <asm/alternative-asm.h>
25429 #include <asm/asm.h>
25430 #include <asm/smap.h>
25431+#include <asm/pgtable.h>
25432
25433 /*
25434 * By placing feature2 after feature1 in altinstructions section, we logically
25435@@ -31,7 +32,7 @@
25436 .byte 0xe9 /* 32bit jump */
25437 .long \orig-1f /* by default jump to orig */
25438 1:
25439- .section .altinstr_replacement,"ax"
25440+ .section .altinstr_replacement,"a"
25441 2: .byte 0xe9 /* near jump with 32bit immediate */
25442 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
25443 3: .byte 0xe9 /* near jump with 32bit immediate */
25444@@ -70,47 +71,20 @@
25445 #endif
25446 .endm
25447
25448-/* Standard copy_to_user with segment limit checking */
25449-ENTRY(_copy_to_user)
25450- CFI_STARTPROC
25451- GET_THREAD_INFO(%rax)
25452- movq %rdi,%rcx
25453- addq %rdx,%rcx
25454- jc bad_to_user
25455- cmpq TI_addr_limit(%rax),%rcx
25456- ja bad_to_user
25457- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
25458- copy_user_generic_unrolled,copy_user_generic_string, \
25459- copy_user_enhanced_fast_string
25460- CFI_ENDPROC
25461-ENDPROC(_copy_to_user)
25462-
25463-/* Standard copy_from_user with segment limit checking */
25464-ENTRY(_copy_from_user)
25465- CFI_STARTPROC
25466- GET_THREAD_INFO(%rax)
25467- movq %rsi,%rcx
25468- addq %rdx,%rcx
25469- jc bad_from_user
25470- cmpq TI_addr_limit(%rax),%rcx
25471- ja bad_from_user
25472- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
25473- copy_user_generic_unrolled,copy_user_generic_string, \
25474- copy_user_enhanced_fast_string
25475- CFI_ENDPROC
25476-ENDPROC(_copy_from_user)
25477-
25478 .section .fixup,"ax"
25479 /* must zero dest */
25480 ENTRY(bad_from_user)
25481 bad_from_user:
25482 CFI_STARTPROC
25483+ testl %edx,%edx
25484+ js bad_to_user
25485 movl %edx,%ecx
25486 xorl %eax,%eax
25487 rep
25488 stosb
25489 bad_to_user:
25490 movl %edx,%eax
25491+ pax_force_retaddr
25492 ret
25493 CFI_ENDPROC
25494 ENDPROC(bad_from_user)
25495@@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
25496 jz 17f
25497 1: movq (%rsi),%r8
25498 2: movq 1*8(%rsi),%r9
25499-3: movq 2*8(%rsi),%r10
25500+3: movq 2*8(%rsi),%rax
25501 4: movq 3*8(%rsi),%r11
25502 5: movq %r8,(%rdi)
25503 6: movq %r9,1*8(%rdi)
25504-7: movq %r10,2*8(%rdi)
25505+7: movq %rax,2*8(%rdi)
25506 8: movq %r11,3*8(%rdi)
25507 9: movq 4*8(%rsi),%r8
25508 10: movq 5*8(%rsi),%r9
25509-11: movq 6*8(%rsi),%r10
25510+11: movq 6*8(%rsi),%rax
25511 12: movq 7*8(%rsi),%r11
25512 13: movq %r8,4*8(%rdi)
25513 14: movq %r9,5*8(%rdi)
25514-15: movq %r10,6*8(%rdi)
25515+15: movq %rax,6*8(%rdi)
25516 16: movq %r11,7*8(%rdi)
25517 leaq 64(%rsi),%rsi
25518 leaq 64(%rdi),%rdi
25519@@ -180,6 +154,7 @@ ENTRY(copy_user_generic_unrolled)
25520 jnz 21b
25521 23: xor %eax,%eax
25522 ASM_CLAC
25523+ pax_force_retaddr
25524 ret
25525
25526 .section .fixup,"ax"
25527@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
25528 movsb
25529 4: xorl %eax,%eax
25530 ASM_CLAC
25531+ pax_force_retaddr
25532 ret
25533
25534 .section .fixup,"ax"
25535@@ -286,6 +262,7 @@ ENTRY(copy_user_enhanced_fast_string)
25536 movsb
25537 2: xorl %eax,%eax
25538 ASM_CLAC
25539+ pax_force_retaddr
25540 ret
25541
25542 .section .fixup,"ax"
25543diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
25544index 6a4f43c..f5f9e26 100644
25545--- a/arch/x86/lib/copy_user_nocache_64.S
25546+++ b/arch/x86/lib/copy_user_nocache_64.S
25547@@ -8,6 +8,7 @@
25548
25549 #include <linux/linkage.h>
25550 #include <asm/dwarf2.h>
25551+#include <asm/alternative-asm.h>
25552
25553 #define FIX_ALIGNMENT 1
25554
25555@@ -16,6 +17,7 @@
25556 #include <asm/thread_info.h>
25557 #include <asm/asm.h>
25558 #include <asm/smap.h>
25559+#include <asm/pgtable.h>
25560
25561 .macro ALIGN_DESTINATION
25562 #ifdef FIX_ALIGNMENT
25563@@ -49,6 +51,15 @@
25564 */
25565 ENTRY(__copy_user_nocache)
25566 CFI_STARTPROC
25567+
25568+#ifdef CONFIG_PAX_MEMORY_UDEREF
25569+ mov $PAX_USER_SHADOW_BASE,%rcx
25570+ cmp %rcx,%rsi
25571+ jae 1f
25572+ add %rcx,%rsi
25573+1:
25574+#endif
25575+
25576 ASM_STAC
25577 cmpl $8,%edx
25578 jb 20f /* less then 8 bytes, go to byte copy loop */
25579@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
25580 jz 17f
25581 1: movq (%rsi),%r8
25582 2: movq 1*8(%rsi),%r9
25583-3: movq 2*8(%rsi),%r10
25584+3: movq 2*8(%rsi),%rax
25585 4: movq 3*8(%rsi),%r11
25586 5: movnti %r8,(%rdi)
25587 6: movnti %r9,1*8(%rdi)
25588-7: movnti %r10,2*8(%rdi)
25589+7: movnti %rax,2*8(%rdi)
25590 8: movnti %r11,3*8(%rdi)
25591 9: movq 4*8(%rsi),%r8
25592 10: movq 5*8(%rsi),%r9
25593-11: movq 6*8(%rsi),%r10
25594+11: movq 6*8(%rsi),%rax
25595 12: movq 7*8(%rsi),%r11
25596 13: movnti %r8,4*8(%rdi)
25597 14: movnti %r9,5*8(%rdi)
25598-15: movnti %r10,6*8(%rdi)
25599+15: movnti %rax,6*8(%rdi)
25600 16: movnti %r11,7*8(%rdi)
25601 leaq 64(%rsi),%rsi
25602 leaq 64(%rdi),%rdi
25603@@ -99,6 +110,7 @@ ENTRY(__copy_user_nocache)
25604 23: xorl %eax,%eax
25605 ASM_CLAC
25606 sfence
25607+ pax_force_retaddr
25608 ret
25609
25610 .section .fixup,"ax"
25611diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
25612index 2419d5f..953ee51 100644
25613--- a/arch/x86/lib/csum-copy_64.S
25614+++ b/arch/x86/lib/csum-copy_64.S
25615@@ -9,6 +9,7 @@
25616 #include <asm/dwarf2.h>
25617 #include <asm/errno.h>
25618 #include <asm/asm.h>
25619+#include <asm/alternative-asm.h>
25620
25621 /*
25622 * Checksum copy with exception handling.
25623@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
25624 CFI_RESTORE rbp
25625 addq $7*8, %rsp
25626 CFI_ADJUST_CFA_OFFSET -7*8
25627+ pax_force_retaddr 0, 1
25628 ret
25629 CFI_RESTORE_STATE
25630
25631diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
25632index 25b7ae8..169fafc 100644
25633--- a/arch/x86/lib/csum-wrappers_64.c
25634+++ b/arch/x86/lib/csum-wrappers_64.c
25635@@ -52,7 +52,7 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
25636 len -= 2;
25637 }
25638 }
25639- isum = csum_partial_copy_generic((__force const void *)src,
25640+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
25641 dst, len, isum, errp, NULL);
25642 if (unlikely(*errp))
25643 goto out_err;
25644@@ -105,7 +105,7 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
25645 }
25646
25647 *errp = 0;
25648- return csum_partial_copy_generic(src, (void __force *)dst,
25649+ return csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
25650 len, isum, NULL, errp);
25651 }
25652 EXPORT_SYMBOL(csum_partial_copy_to_user);
25653diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
25654index 156b9c8..b144132 100644
25655--- a/arch/x86/lib/getuser.S
25656+++ b/arch/x86/lib/getuser.S
25657@@ -34,17 +34,40 @@
25658 #include <asm/thread_info.h>
25659 #include <asm/asm.h>
25660 #include <asm/smap.h>
25661+#include <asm/segment.h>
25662+#include <asm/pgtable.h>
25663+#include <asm/alternative-asm.h>
25664+
25665+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
25666+#define __copyuser_seg gs;
25667+#else
25668+#define __copyuser_seg
25669+#endif
25670
25671 .text
25672 ENTRY(__get_user_1)
25673 CFI_STARTPROC
25674+
25675+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
25676 GET_THREAD_INFO(%_ASM_DX)
25677 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
25678 jae bad_get_user
25679 ASM_STAC
25680-1: movzb (%_ASM_AX),%edx
25681+
25682+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25683+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
25684+ cmp %_ASM_DX,%_ASM_AX
25685+ jae 1234f
25686+ add %_ASM_DX,%_ASM_AX
25687+1234:
25688+#endif
25689+
25690+#endif
25691+
25692+1: __copyuser_seg movzb (%_ASM_AX),%edx
25693 xor %eax,%eax
25694 ASM_CLAC
25695+ pax_force_retaddr
25696 ret
25697 CFI_ENDPROC
25698 ENDPROC(__get_user_1)
25699@@ -52,14 +75,28 @@ ENDPROC(__get_user_1)
25700 ENTRY(__get_user_2)
25701 CFI_STARTPROC
25702 add $1,%_ASM_AX
25703+
25704+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
25705 jc bad_get_user
25706 GET_THREAD_INFO(%_ASM_DX)
25707 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
25708 jae bad_get_user
25709 ASM_STAC
25710-2: movzwl -1(%_ASM_AX),%edx
25711+
25712+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25713+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
25714+ cmp %_ASM_DX,%_ASM_AX
25715+ jae 1234f
25716+ add %_ASM_DX,%_ASM_AX
25717+1234:
25718+#endif
25719+
25720+#endif
25721+
25722+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
25723 xor %eax,%eax
25724 ASM_CLAC
25725+ pax_force_retaddr
25726 ret
25727 CFI_ENDPROC
25728 ENDPROC(__get_user_2)
25729@@ -67,14 +104,28 @@ ENDPROC(__get_user_2)
25730 ENTRY(__get_user_4)
25731 CFI_STARTPROC
25732 add $3,%_ASM_AX
25733+
25734+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
25735 jc bad_get_user
25736 GET_THREAD_INFO(%_ASM_DX)
25737 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
25738 jae bad_get_user
25739 ASM_STAC
25740-3: mov -3(%_ASM_AX),%edx
25741+
25742+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25743+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
25744+ cmp %_ASM_DX,%_ASM_AX
25745+ jae 1234f
25746+ add %_ASM_DX,%_ASM_AX
25747+1234:
25748+#endif
25749+
25750+#endif
25751+
25752+3: __copyuser_seg mov -3(%_ASM_AX),%edx
25753 xor %eax,%eax
25754 ASM_CLAC
25755+ pax_force_retaddr
25756 ret
25757 CFI_ENDPROC
25758 ENDPROC(__get_user_4)
25759@@ -87,10 +138,20 @@ ENTRY(__get_user_8)
25760 GET_THREAD_INFO(%_ASM_DX)
25761 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
25762 jae bad_get_user
25763+
25764+#ifdef CONFIG_PAX_MEMORY_UDEREF
25765+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
25766+ cmp %_ASM_DX,%_ASM_AX
25767+ jae 1234f
25768+ add %_ASM_DX,%_ASM_AX
25769+1234:
25770+#endif
25771+
25772 ASM_STAC
25773 4: movq -7(%_ASM_AX),%_ASM_DX
25774 xor %eax,%eax
25775 ASM_CLAC
25776+ pax_force_retaddr
25777 ret
25778 CFI_ENDPROC
25779 ENDPROC(__get_user_8)
25780@@ -101,6 +162,7 @@ bad_get_user:
25781 xor %edx,%edx
25782 mov $(-EFAULT),%_ASM_AX
25783 ASM_CLAC
25784+ pax_force_retaddr
25785 ret
25786 CFI_ENDPROC
25787 END(bad_get_user)
25788diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
25789index 54fcffe..7be149e 100644
25790--- a/arch/x86/lib/insn.c
25791+++ b/arch/x86/lib/insn.c
25792@@ -20,8 +20,10 @@
25793
25794 #ifdef __KERNEL__
25795 #include <linux/string.h>
25796+#include <asm/pgtable_types.h>
25797 #else
25798 #include <string.h>
25799+#define ktla_ktva(addr) addr
25800 #endif
25801 #include <asm/inat.h>
25802 #include <asm/insn.h>
25803@@ -53,8 +55,8 @@
25804 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
25805 {
25806 memset(insn, 0, sizeof(*insn));
25807- insn->kaddr = kaddr;
25808- insn->next_byte = kaddr;
25809+ insn->kaddr = ktla_ktva(kaddr);
25810+ insn->next_byte = ktla_ktva(kaddr);
25811 insn->x86_64 = x86_64 ? 1 : 0;
25812 insn->opnd_bytes = 4;
25813 if (x86_64)
25814diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
25815index 05a95e7..326f2fa 100644
25816--- a/arch/x86/lib/iomap_copy_64.S
25817+++ b/arch/x86/lib/iomap_copy_64.S
25818@@ -17,6 +17,7 @@
25819
25820 #include <linux/linkage.h>
25821 #include <asm/dwarf2.h>
25822+#include <asm/alternative-asm.h>
25823
25824 /*
25825 * override generic version in lib/iomap_copy.c
25826@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
25827 CFI_STARTPROC
25828 movl %edx,%ecx
25829 rep movsd
25830+ pax_force_retaddr
25831 ret
25832 CFI_ENDPROC
25833 ENDPROC(__iowrite32_copy)
25834diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
25835index 1c273be..da9cc0e 100644
25836--- a/arch/x86/lib/memcpy_64.S
25837+++ b/arch/x86/lib/memcpy_64.S
25838@@ -33,6 +33,7 @@
25839 rep movsq
25840 movl %edx, %ecx
25841 rep movsb
25842+ pax_force_retaddr
25843 ret
25844 .Lmemcpy_e:
25845 .previous
25846@@ -49,6 +50,7 @@
25847 movq %rdi, %rax
25848 movq %rdx, %rcx
25849 rep movsb
25850+ pax_force_retaddr
25851 ret
25852 .Lmemcpy_e_e:
25853 .previous
25854@@ -76,13 +78,13 @@ ENTRY(memcpy)
25855 */
25856 movq 0*8(%rsi), %r8
25857 movq 1*8(%rsi), %r9
25858- movq 2*8(%rsi), %r10
25859+ movq 2*8(%rsi), %rcx
25860 movq 3*8(%rsi), %r11
25861 leaq 4*8(%rsi), %rsi
25862
25863 movq %r8, 0*8(%rdi)
25864 movq %r9, 1*8(%rdi)
25865- movq %r10, 2*8(%rdi)
25866+ movq %rcx, 2*8(%rdi)
25867 movq %r11, 3*8(%rdi)
25868 leaq 4*8(%rdi), %rdi
25869 jae .Lcopy_forward_loop
25870@@ -105,12 +107,12 @@ ENTRY(memcpy)
25871 subq $0x20, %rdx
25872 movq -1*8(%rsi), %r8
25873 movq -2*8(%rsi), %r9
25874- movq -3*8(%rsi), %r10
25875+ movq -3*8(%rsi), %rcx
25876 movq -4*8(%rsi), %r11
25877 leaq -4*8(%rsi), %rsi
25878 movq %r8, -1*8(%rdi)
25879 movq %r9, -2*8(%rdi)
25880- movq %r10, -3*8(%rdi)
25881+ movq %rcx, -3*8(%rdi)
25882 movq %r11, -4*8(%rdi)
25883 leaq -4*8(%rdi), %rdi
25884 jae .Lcopy_backward_loop
25885@@ -130,12 +132,13 @@ ENTRY(memcpy)
25886 */
25887 movq 0*8(%rsi), %r8
25888 movq 1*8(%rsi), %r9
25889- movq -2*8(%rsi, %rdx), %r10
25890+ movq -2*8(%rsi, %rdx), %rcx
25891 movq -1*8(%rsi, %rdx), %r11
25892 movq %r8, 0*8(%rdi)
25893 movq %r9, 1*8(%rdi)
25894- movq %r10, -2*8(%rdi, %rdx)
25895+ movq %rcx, -2*8(%rdi, %rdx)
25896 movq %r11, -1*8(%rdi, %rdx)
25897+ pax_force_retaddr
25898 retq
25899 .p2align 4
25900 .Lless_16bytes:
25901@@ -148,6 +151,7 @@ ENTRY(memcpy)
25902 movq -1*8(%rsi, %rdx), %r9
25903 movq %r8, 0*8(%rdi)
25904 movq %r9, -1*8(%rdi, %rdx)
25905+ pax_force_retaddr
25906 retq
25907 .p2align 4
25908 .Lless_8bytes:
25909@@ -161,6 +165,7 @@ ENTRY(memcpy)
25910 movl -4(%rsi, %rdx), %r8d
25911 movl %ecx, (%rdi)
25912 movl %r8d, -4(%rdi, %rdx)
25913+ pax_force_retaddr
25914 retq
25915 .p2align 4
25916 .Lless_3bytes:
25917@@ -179,6 +184,7 @@ ENTRY(memcpy)
25918 movb %cl, (%rdi)
25919
25920 .Lend:
25921+ pax_force_retaddr
25922 retq
25923 CFI_ENDPROC
25924 ENDPROC(memcpy)
25925diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
25926index ee16461..c39c199 100644
25927--- a/arch/x86/lib/memmove_64.S
25928+++ b/arch/x86/lib/memmove_64.S
25929@@ -61,13 +61,13 @@ ENTRY(memmove)
25930 5:
25931 sub $0x20, %rdx
25932 movq 0*8(%rsi), %r11
25933- movq 1*8(%rsi), %r10
25934+ movq 1*8(%rsi), %rcx
25935 movq 2*8(%rsi), %r9
25936 movq 3*8(%rsi), %r8
25937 leaq 4*8(%rsi), %rsi
25938
25939 movq %r11, 0*8(%rdi)
25940- movq %r10, 1*8(%rdi)
25941+ movq %rcx, 1*8(%rdi)
25942 movq %r9, 2*8(%rdi)
25943 movq %r8, 3*8(%rdi)
25944 leaq 4*8(%rdi), %rdi
25945@@ -81,10 +81,10 @@ ENTRY(memmove)
25946 4:
25947 movq %rdx, %rcx
25948 movq -8(%rsi, %rdx), %r11
25949- lea -8(%rdi, %rdx), %r10
25950+ lea -8(%rdi, %rdx), %r9
25951 shrq $3, %rcx
25952 rep movsq
25953- movq %r11, (%r10)
25954+ movq %r11, (%r9)
25955 jmp 13f
25956 .Lmemmove_end_forward:
25957
25958@@ -95,14 +95,14 @@ ENTRY(memmove)
25959 7:
25960 movq %rdx, %rcx
25961 movq (%rsi), %r11
25962- movq %rdi, %r10
25963+ movq %rdi, %r9
25964 leaq -8(%rsi, %rdx), %rsi
25965 leaq -8(%rdi, %rdx), %rdi
25966 shrq $3, %rcx
25967 std
25968 rep movsq
25969 cld
25970- movq %r11, (%r10)
25971+ movq %r11, (%r9)
25972 jmp 13f
25973
25974 /*
25975@@ -127,13 +127,13 @@ ENTRY(memmove)
25976 8:
25977 subq $0x20, %rdx
25978 movq -1*8(%rsi), %r11
25979- movq -2*8(%rsi), %r10
25980+ movq -2*8(%rsi), %rcx
25981 movq -3*8(%rsi), %r9
25982 movq -4*8(%rsi), %r8
25983 leaq -4*8(%rsi), %rsi
25984
25985 movq %r11, -1*8(%rdi)
25986- movq %r10, -2*8(%rdi)
25987+ movq %rcx, -2*8(%rdi)
25988 movq %r9, -3*8(%rdi)
25989 movq %r8, -4*8(%rdi)
25990 leaq -4*8(%rdi), %rdi
25991@@ -151,11 +151,11 @@ ENTRY(memmove)
25992 * Move data from 16 bytes to 31 bytes.
25993 */
25994 movq 0*8(%rsi), %r11
25995- movq 1*8(%rsi), %r10
25996+ movq 1*8(%rsi), %rcx
25997 movq -2*8(%rsi, %rdx), %r9
25998 movq -1*8(%rsi, %rdx), %r8
25999 movq %r11, 0*8(%rdi)
26000- movq %r10, 1*8(%rdi)
26001+ movq %rcx, 1*8(%rdi)
26002 movq %r9, -2*8(%rdi, %rdx)
26003 movq %r8, -1*8(%rdi, %rdx)
26004 jmp 13f
26005@@ -167,9 +167,9 @@ ENTRY(memmove)
26006 * Move data from 8 bytes to 15 bytes.
26007 */
26008 movq 0*8(%rsi), %r11
26009- movq -1*8(%rsi, %rdx), %r10
26010+ movq -1*8(%rsi, %rdx), %r9
26011 movq %r11, 0*8(%rdi)
26012- movq %r10, -1*8(%rdi, %rdx)
26013+ movq %r9, -1*8(%rdi, %rdx)
26014 jmp 13f
26015 10:
26016 cmpq $4, %rdx
26017@@ -178,9 +178,9 @@ ENTRY(memmove)
26018 * Move data from 4 bytes to 7 bytes.
26019 */
26020 movl (%rsi), %r11d
26021- movl -4(%rsi, %rdx), %r10d
26022+ movl -4(%rsi, %rdx), %r9d
26023 movl %r11d, (%rdi)
26024- movl %r10d, -4(%rdi, %rdx)
26025+ movl %r9d, -4(%rdi, %rdx)
26026 jmp 13f
26027 11:
26028 cmp $2, %rdx
26029@@ -189,9 +189,9 @@ ENTRY(memmove)
26030 * Move data from 2 bytes to 3 bytes.
26031 */
26032 movw (%rsi), %r11w
26033- movw -2(%rsi, %rdx), %r10w
26034+ movw -2(%rsi, %rdx), %r9w
26035 movw %r11w, (%rdi)
26036- movw %r10w, -2(%rdi, %rdx)
26037+ movw %r9w, -2(%rdi, %rdx)
26038 jmp 13f
26039 12:
26040 cmp $1, %rdx
26041@@ -202,6 +202,7 @@ ENTRY(memmove)
26042 movb (%rsi), %r11b
26043 movb %r11b, (%rdi)
26044 13:
26045+ pax_force_retaddr
26046 retq
26047 CFI_ENDPROC
26048
26049@@ -210,6 +211,7 @@ ENTRY(memmove)
26050 /* Forward moving data. */
26051 movq %rdx, %rcx
26052 rep movsb
26053+ pax_force_retaddr
26054 retq
26055 .Lmemmove_end_forward_efs:
26056 .previous
26057diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
26058index 2dcb380..963660a 100644
26059--- a/arch/x86/lib/memset_64.S
26060+++ b/arch/x86/lib/memset_64.S
26061@@ -30,6 +30,7 @@
26062 movl %edx,%ecx
26063 rep stosb
26064 movq %r9,%rax
26065+ pax_force_retaddr
26066 ret
26067 .Lmemset_e:
26068 .previous
26069@@ -52,6 +53,7 @@
26070 movq %rdx,%rcx
26071 rep stosb
26072 movq %r9,%rax
26073+ pax_force_retaddr
26074 ret
26075 .Lmemset_e_e:
26076 .previous
26077@@ -59,7 +61,7 @@
26078 ENTRY(memset)
26079 ENTRY(__memset)
26080 CFI_STARTPROC
26081- movq %rdi,%r10
26082+ movq %rdi,%r11
26083
26084 /* expand byte value */
26085 movzbl %sil,%ecx
26086@@ -117,7 +119,8 @@ ENTRY(__memset)
26087 jnz .Lloop_1
26088
26089 .Lende:
26090- movq %r10,%rax
26091+ movq %r11,%rax
26092+ pax_force_retaddr
26093 ret
26094
26095 CFI_RESTORE_STATE
26096diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
26097index c9f2d9b..e7fd2c0 100644
26098--- a/arch/x86/lib/mmx_32.c
26099+++ b/arch/x86/lib/mmx_32.c
26100@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
26101 {
26102 void *p;
26103 int i;
26104+ unsigned long cr0;
26105
26106 if (unlikely(in_interrupt()))
26107 return __memcpy(to, from, len);
26108@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
26109 kernel_fpu_begin();
26110
26111 __asm__ __volatile__ (
26112- "1: prefetch (%0)\n" /* This set is 28 bytes */
26113- " prefetch 64(%0)\n"
26114- " prefetch 128(%0)\n"
26115- " prefetch 192(%0)\n"
26116- " prefetch 256(%0)\n"
26117+ "1: prefetch (%1)\n" /* This set is 28 bytes */
26118+ " prefetch 64(%1)\n"
26119+ " prefetch 128(%1)\n"
26120+ " prefetch 192(%1)\n"
26121+ " prefetch 256(%1)\n"
26122 "2: \n"
26123 ".section .fixup, \"ax\"\n"
26124- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
26125+ "3: \n"
26126+
26127+#ifdef CONFIG_PAX_KERNEXEC
26128+ " movl %%cr0, %0\n"
26129+ " movl %0, %%eax\n"
26130+ " andl $0xFFFEFFFF, %%eax\n"
26131+ " movl %%eax, %%cr0\n"
26132+#endif
26133+
26134+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
26135+
26136+#ifdef CONFIG_PAX_KERNEXEC
26137+ " movl %0, %%cr0\n"
26138+#endif
26139+
26140 " jmp 2b\n"
26141 ".previous\n"
26142 _ASM_EXTABLE(1b, 3b)
26143- : : "r" (from));
26144+ : "=&r" (cr0) : "r" (from) : "ax");
26145
26146 for ( ; i > 5; i--) {
26147 __asm__ __volatile__ (
26148- "1: prefetch 320(%0)\n"
26149- "2: movq (%0), %%mm0\n"
26150- " movq 8(%0), %%mm1\n"
26151- " movq 16(%0), %%mm2\n"
26152- " movq 24(%0), %%mm3\n"
26153- " movq %%mm0, (%1)\n"
26154- " movq %%mm1, 8(%1)\n"
26155- " movq %%mm2, 16(%1)\n"
26156- " movq %%mm3, 24(%1)\n"
26157- " movq 32(%0), %%mm0\n"
26158- " movq 40(%0), %%mm1\n"
26159- " movq 48(%0), %%mm2\n"
26160- " movq 56(%0), %%mm3\n"
26161- " movq %%mm0, 32(%1)\n"
26162- " movq %%mm1, 40(%1)\n"
26163- " movq %%mm2, 48(%1)\n"
26164- " movq %%mm3, 56(%1)\n"
26165+ "1: prefetch 320(%1)\n"
26166+ "2: movq (%1), %%mm0\n"
26167+ " movq 8(%1), %%mm1\n"
26168+ " movq 16(%1), %%mm2\n"
26169+ " movq 24(%1), %%mm3\n"
26170+ " movq %%mm0, (%2)\n"
26171+ " movq %%mm1, 8(%2)\n"
26172+ " movq %%mm2, 16(%2)\n"
26173+ " movq %%mm3, 24(%2)\n"
26174+ " movq 32(%1), %%mm0\n"
26175+ " movq 40(%1), %%mm1\n"
26176+ " movq 48(%1), %%mm2\n"
26177+ " movq 56(%1), %%mm3\n"
26178+ " movq %%mm0, 32(%2)\n"
26179+ " movq %%mm1, 40(%2)\n"
26180+ " movq %%mm2, 48(%2)\n"
26181+ " movq %%mm3, 56(%2)\n"
26182 ".section .fixup, \"ax\"\n"
26183- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
26184+ "3:\n"
26185+
26186+#ifdef CONFIG_PAX_KERNEXEC
26187+ " movl %%cr0, %0\n"
26188+ " movl %0, %%eax\n"
26189+ " andl $0xFFFEFFFF, %%eax\n"
26190+ " movl %%eax, %%cr0\n"
26191+#endif
26192+
26193+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
26194+
26195+#ifdef CONFIG_PAX_KERNEXEC
26196+ " movl %0, %%cr0\n"
26197+#endif
26198+
26199 " jmp 2b\n"
26200 ".previous\n"
26201 _ASM_EXTABLE(1b, 3b)
26202- : : "r" (from), "r" (to) : "memory");
26203+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
26204
26205 from += 64;
26206 to += 64;
26207@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
26208 static void fast_copy_page(void *to, void *from)
26209 {
26210 int i;
26211+ unsigned long cr0;
26212
26213 kernel_fpu_begin();
26214
26215@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
26216 * but that is for later. -AV
26217 */
26218 __asm__ __volatile__(
26219- "1: prefetch (%0)\n"
26220- " prefetch 64(%0)\n"
26221- " prefetch 128(%0)\n"
26222- " prefetch 192(%0)\n"
26223- " prefetch 256(%0)\n"
26224+ "1: prefetch (%1)\n"
26225+ " prefetch 64(%1)\n"
26226+ " prefetch 128(%1)\n"
26227+ " prefetch 192(%1)\n"
26228+ " prefetch 256(%1)\n"
26229 "2: \n"
26230 ".section .fixup, \"ax\"\n"
26231- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
26232+ "3: \n"
26233+
26234+#ifdef CONFIG_PAX_KERNEXEC
26235+ " movl %%cr0, %0\n"
26236+ " movl %0, %%eax\n"
26237+ " andl $0xFFFEFFFF, %%eax\n"
26238+ " movl %%eax, %%cr0\n"
26239+#endif
26240+
26241+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
26242+
26243+#ifdef CONFIG_PAX_KERNEXEC
26244+ " movl %0, %%cr0\n"
26245+#endif
26246+
26247 " jmp 2b\n"
26248 ".previous\n"
26249- _ASM_EXTABLE(1b, 3b) : : "r" (from));
26250+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
26251
26252 for (i = 0; i < (4096-320)/64; i++) {
26253 __asm__ __volatile__ (
26254- "1: prefetch 320(%0)\n"
26255- "2: movq (%0), %%mm0\n"
26256- " movntq %%mm0, (%1)\n"
26257- " movq 8(%0), %%mm1\n"
26258- " movntq %%mm1, 8(%1)\n"
26259- " movq 16(%0), %%mm2\n"
26260- " movntq %%mm2, 16(%1)\n"
26261- " movq 24(%0), %%mm3\n"
26262- " movntq %%mm3, 24(%1)\n"
26263- " movq 32(%0), %%mm4\n"
26264- " movntq %%mm4, 32(%1)\n"
26265- " movq 40(%0), %%mm5\n"
26266- " movntq %%mm5, 40(%1)\n"
26267- " movq 48(%0), %%mm6\n"
26268- " movntq %%mm6, 48(%1)\n"
26269- " movq 56(%0), %%mm7\n"
26270- " movntq %%mm7, 56(%1)\n"
26271+ "1: prefetch 320(%1)\n"
26272+ "2: movq (%1), %%mm0\n"
26273+ " movntq %%mm0, (%2)\n"
26274+ " movq 8(%1), %%mm1\n"
26275+ " movntq %%mm1, 8(%2)\n"
26276+ " movq 16(%1), %%mm2\n"
26277+ " movntq %%mm2, 16(%2)\n"
26278+ " movq 24(%1), %%mm3\n"
26279+ " movntq %%mm3, 24(%2)\n"
26280+ " movq 32(%1), %%mm4\n"
26281+ " movntq %%mm4, 32(%2)\n"
26282+ " movq 40(%1), %%mm5\n"
26283+ " movntq %%mm5, 40(%2)\n"
26284+ " movq 48(%1), %%mm6\n"
26285+ " movntq %%mm6, 48(%2)\n"
26286+ " movq 56(%1), %%mm7\n"
26287+ " movntq %%mm7, 56(%2)\n"
26288 ".section .fixup, \"ax\"\n"
26289- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
26290+ "3:\n"
26291+
26292+#ifdef CONFIG_PAX_KERNEXEC
26293+ " movl %%cr0, %0\n"
26294+ " movl %0, %%eax\n"
26295+ " andl $0xFFFEFFFF, %%eax\n"
26296+ " movl %%eax, %%cr0\n"
26297+#endif
26298+
26299+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
26300+
26301+#ifdef CONFIG_PAX_KERNEXEC
26302+ " movl %0, %%cr0\n"
26303+#endif
26304+
26305 " jmp 2b\n"
26306 ".previous\n"
26307- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
26308+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
26309
26310 from += 64;
26311 to += 64;
26312@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
26313 static void fast_copy_page(void *to, void *from)
26314 {
26315 int i;
26316+ unsigned long cr0;
26317
26318 kernel_fpu_begin();
26319
26320 __asm__ __volatile__ (
26321- "1: prefetch (%0)\n"
26322- " prefetch 64(%0)\n"
26323- " prefetch 128(%0)\n"
26324- " prefetch 192(%0)\n"
26325- " prefetch 256(%0)\n"
26326+ "1: prefetch (%1)\n"
26327+ " prefetch 64(%1)\n"
26328+ " prefetch 128(%1)\n"
26329+ " prefetch 192(%1)\n"
26330+ " prefetch 256(%1)\n"
26331 "2: \n"
26332 ".section .fixup, \"ax\"\n"
26333- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
26334+ "3: \n"
26335+
26336+#ifdef CONFIG_PAX_KERNEXEC
26337+ " movl %%cr0, %0\n"
26338+ " movl %0, %%eax\n"
26339+ " andl $0xFFFEFFFF, %%eax\n"
26340+ " movl %%eax, %%cr0\n"
26341+#endif
26342+
26343+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
26344+
26345+#ifdef CONFIG_PAX_KERNEXEC
26346+ " movl %0, %%cr0\n"
26347+#endif
26348+
26349 " jmp 2b\n"
26350 ".previous\n"
26351- _ASM_EXTABLE(1b, 3b) : : "r" (from));
26352+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
26353
26354 for (i = 0; i < 4096/64; i++) {
26355 __asm__ __volatile__ (
26356- "1: prefetch 320(%0)\n"
26357- "2: movq (%0), %%mm0\n"
26358- " movq 8(%0), %%mm1\n"
26359- " movq 16(%0), %%mm2\n"
26360- " movq 24(%0), %%mm3\n"
26361- " movq %%mm0, (%1)\n"
26362- " movq %%mm1, 8(%1)\n"
26363- " movq %%mm2, 16(%1)\n"
26364- " movq %%mm3, 24(%1)\n"
26365- " movq 32(%0), %%mm0\n"
26366- " movq 40(%0), %%mm1\n"
26367- " movq 48(%0), %%mm2\n"
26368- " movq 56(%0), %%mm3\n"
26369- " movq %%mm0, 32(%1)\n"
26370- " movq %%mm1, 40(%1)\n"
26371- " movq %%mm2, 48(%1)\n"
26372- " movq %%mm3, 56(%1)\n"
26373+ "1: prefetch 320(%1)\n"
26374+ "2: movq (%1), %%mm0\n"
26375+ " movq 8(%1), %%mm1\n"
26376+ " movq 16(%1), %%mm2\n"
26377+ " movq 24(%1), %%mm3\n"
26378+ " movq %%mm0, (%2)\n"
26379+ " movq %%mm1, 8(%2)\n"
26380+ " movq %%mm2, 16(%2)\n"
26381+ " movq %%mm3, 24(%2)\n"
26382+ " movq 32(%1), %%mm0\n"
26383+ " movq 40(%1), %%mm1\n"
26384+ " movq 48(%1), %%mm2\n"
26385+ " movq 56(%1), %%mm3\n"
26386+ " movq %%mm0, 32(%2)\n"
26387+ " movq %%mm1, 40(%2)\n"
26388+ " movq %%mm2, 48(%2)\n"
26389+ " movq %%mm3, 56(%2)\n"
26390 ".section .fixup, \"ax\"\n"
26391- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
26392+ "3:\n"
26393+
26394+#ifdef CONFIG_PAX_KERNEXEC
26395+ " movl %%cr0, %0\n"
26396+ " movl %0, %%eax\n"
26397+ " andl $0xFFFEFFFF, %%eax\n"
26398+ " movl %%eax, %%cr0\n"
26399+#endif
26400+
26401+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
26402+
26403+#ifdef CONFIG_PAX_KERNEXEC
26404+ " movl %0, %%cr0\n"
26405+#endif
26406+
26407 " jmp 2b\n"
26408 ".previous\n"
26409 _ASM_EXTABLE(1b, 3b)
26410- : : "r" (from), "r" (to) : "memory");
26411+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
26412
26413 from += 64;
26414 to += 64;
26415diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
26416index f6d13ee..aca5f0b 100644
26417--- a/arch/x86/lib/msr-reg.S
26418+++ b/arch/x86/lib/msr-reg.S
26419@@ -3,6 +3,7 @@
26420 #include <asm/dwarf2.h>
26421 #include <asm/asm.h>
26422 #include <asm/msr.h>
26423+#include <asm/alternative-asm.h>
26424
26425 #ifdef CONFIG_X86_64
26426 /*
26427@@ -16,7 +17,7 @@ ENTRY(\op\()_safe_regs)
26428 CFI_STARTPROC
26429 pushq_cfi %rbx
26430 pushq_cfi %rbp
26431- movq %rdi, %r10 /* Save pointer */
26432+ movq %rdi, %r9 /* Save pointer */
26433 xorl %r11d, %r11d /* Return value */
26434 movl (%rdi), %eax
26435 movl 4(%rdi), %ecx
26436@@ -27,16 +28,17 @@ ENTRY(\op\()_safe_regs)
26437 movl 28(%rdi), %edi
26438 CFI_REMEMBER_STATE
26439 1: \op
26440-2: movl %eax, (%r10)
26441+2: movl %eax, (%r9)
26442 movl %r11d, %eax /* Return value */
26443- movl %ecx, 4(%r10)
26444- movl %edx, 8(%r10)
26445- movl %ebx, 12(%r10)
26446- movl %ebp, 20(%r10)
26447- movl %esi, 24(%r10)
26448- movl %edi, 28(%r10)
26449+ movl %ecx, 4(%r9)
26450+ movl %edx, 8(%r9)
26451+ movl %ebx, 12(%r9)
26452+ movl %ebp, 20(%r9)
26453+ movl %esi, 24(%r9)
26454+ movl %edi, 28(%r9)
26455 popq_cfi %rbp
26456 popq_cfi %rbx
26457+ pax_force_retaddr
26458 ret
26459 3:
26460 CFI_RESTORE_STATE
26461diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
26462index fc6ba17..04471c5 100644
26463--- a/arch/x86/lib/putuser.S
26464+++ b/arch/x86/lib/putuser.S
26465@@ -16,7 +16,9 @@
26466 #include <asm/errno.h>
26467 #include <asm/asm.h>
26468 #include <asm/smap.h>
26469-
26470+#include <asm/segment.h>
26471+#include <asm/pgtable.h>
26472+#include <asm/alternative-asm.h>
26473
26474 /*
26475 * __put_user_X
26476@@ -30,57 +32,125 @@
26477 * as they get called from within inline assembly.
26478 */
26479
26480-#define ENTER CFI_STARTPROC ; \
26481- GET_THREAD_INFO(%_ASM_BX)
26482-#define EXIT ASM_CLAC ; \
26483- ret ; \
26484+#define ENTER CFI_STARTPROC
26485+#define EXIT ASM_CLAC ; \
26486+ pax_force_retaddr ; \
26487+ ret ; \
26488 CFI_ENDPROC
26489
26490+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26491+#define _DEST %_ASM_CX,%_ASM_BX
26492+#else
26493+#define _DEST %_ASM_CX
26494+#endif
26495+
26496+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
26497+#define __copyuser_seg gs;
26498+#else
26499+#define __copyuser_seg
26500+#endif
26501+
26502 .text
26503 ENTRY(__put_user_1)
26504 ENTER
26505+
26506+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
26507+ GET_THREAD_INFO(%_ASM_BX)
26508 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
26509 jae bad_put_user
26510 ASM_STAC
26511-1: movb %al,(%_ASM_CX)
26512+
26513+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26514+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
26515+ cmp %_ASM_BX,%_ASM_CX
26516+ jb 1234f
26517+ xor %ebx,%ebx
26518+1234:
26519+#endif
26520+
26521+#endif
26522+
26523+1: __copyuser_seg movb %al,(_DEST)
26524 xor %eax,%eax
26525 EXIT
26526 ENDPROC(__put_user_1)
26527
26528 ENTRY(__put_user_2)
26529 ENTER
26530+
26531+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
26532+ GET_THREAD_INFO(%_ASM_BX)
26533 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
26534 sub $1,%_ASM_BX
26535 cmp %_ASM_BX,%_ASM_CX
26536 jae bad_put_user
26537 ASM_STAC
26538-2: movw %ax,(%_ASM_CX)
26539+
26540+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26541+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
26542+ cmp %_ASM_BX,%_ASM_CX
26543+ jb 1234f
26544+ xor %ebx,%ebx
26545+1234:
26546+#endif
26547+
26548+#endif
26549+
26550+2: __copyuser_seg movw %ax,(_DEST)
26551 xor %eax,%eax
26552 EXIT
26553 ENDPROC(__put_user_2)
26554
26555 ENTRY(__put_user_4)
26556 ENTER
26557+
26558+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
26559+ GET_THREAD_INFO(%_ASM_BX)
26560 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
26561 sub $3,%_ASM_BX
26562 cmp %_ASM_BX,%_ASM_CX
26563 jae bad_put_user
26564 ASM_STAC
26565-3: movl %eax,(%_ASM_CX)
26566+
26567+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26568+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
26569+ cmp %_ASM_BX,%_ASM_CX
26570+ jb 1234f
26571+ xor %ebx,%ebx
26572+1234:
26573+#endif
26574+
26575+#endif
26576+
26577+3: __copyuser_seg movl %eax,(_DEST)
26578 xor %eax,%eax
26579 EXIT
26580 ENDPROC(__put_user_4)
26581
26582 ENTRY(__put_user_8)
26583 ENTER
26584+
26585+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
26586+ GET_THREAD_INFO(%_ASM_BX)
26587 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
26588 sub $7,%_ASM_BX
26589 cmp %_ASM_BX,%_ASM_CX
26590 jae bad_put_user
26591 ASM_STAC
26592-4: mov %_ASM_AX,(%_ASM_CX)
26593+
26594+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26595+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
26596+ cmp %_ASM_BX,%_ASM_CX
26597+ jb 1234f
26598+ xor %ebx,%ebx
26599+1234:
26600+#endif
26601+
26602+#endif
26603+
26604+4: __copyuser_seg mov %_ASM_AX,(_DEST)
26605 #ifdef CONFIG_X86_32
26606-5: movl %edx,4(%_ASM_CX)
26607+5: __copyuser_seg movl %edx,4(_DEST)
26608 #endif
26609 xor %eax,%eax
26610 EXIT
26611diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
26612index 1cad221..de671ee 100644
26613--- a/arch/x86/lib/rwlock.S
26614+++ b/arch/x86/lib/rwlock.S
26615@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
26616 FRAME
26617 0: LOCK_PREFIX
26618 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
26619+
26620+#ifdef CONFIG_PAX_REFCOUNT
26621+ jno 1234f
26622+ LOCK_PREFIX
26623+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
26624+ int $4
26625+1234:
26626+ _ASM_EXTABLE(1234b, 1234b)
26627+#endif
26628+
26629 1: rep; nop
26630 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
26631 jne 1b
26632 LOCK_PREFIX
26633 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
26634+
26635+#ifdef CONFIG_PAX_REFCOUNT
26636+ jno 1234f
26637+ LOCK_PREFIX
26638+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
26639+ int $4
26640+1234:
26641+ _ASM_EXTABLE(1234b, 1234b)
26642+#endif
26643+
26644 jnz 0b
26645 ENDFRAME
26646+ pax_force_retaddr
26647 ret
26648 CFI_ENDPROC
26649 END(__write_lock_failed)
26650@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
26651 FRAME
26652 0: LOCK_PREFIX
26653 READ_LOCK_SIZE(inc) (%__lock_ptr)
26654+
26655+#ifdef CONFIG_PAX_REFCOUNT
26656+ jno 1234f
26657+ LOCK_PREFIX
26658+ READ_LOCK_SIZE(dec) (%__lock_ptr)
26659+ int $4
26660+1234:
26661+ _ASM_EXTABLE(1234b, 1234b)
26662+#endif
26663+
26664 1: rep; nop
26665 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
26666 js 1b
26667 LOCK_PREFIX
26668 READ_LOCK_SIZE(dec) (%__lock_ptr)
26669+
26670+#ifdef CONFIG_PAX_REFCOUNT
26671+ jno 1234f
26672+ LOCK_PREFIX
26673+ READ_LOCK_SIZE(inc) (%__lock_ptr)
26674+ int $4
26675+1234:
26676+ _ASM_EXTABLE(1234b, 1234b)
26677+#endif
26678+
26679 js 0b
26680 ENDFRAME
26681+ pax_force_retaddr
26682 ret
26683 CFI_ENDPROC
26684 END(__read_lock_failed)
26685diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
26686index 5dff5f0..cadebf4 100644
26687--- a/arch/x86/lib/rwsem.S
26688+++ b/arch/x86/lib/rwsem.S
26689@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
26690 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
26691 CFI_RESTORE __ASM_REG(dx)
26692 restore_common_regs
26693+ pax_force_retaddr
26694 ret
26695 CFI_ENDPROC
26696 ENDPROC(call_rwsem_down_read_failed)
26697@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
26698 movq %rax,%rdi
26699 call rwsem_down_write_failed
26700 restore_common_regs
26701+ pax_force_retaddr
26702 ret
26703 CFI_ENDPROC
26704 ENDPROC(call_rwsem_down_write_failed)
26705@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
26706 movq %rax,%rdi
26707 call rwsem_wake
26708 restore_common_regs
26709-1: ret
26710+1: pax_force_retaddr
26711+ ret
26712 CFI_ENDPROC
26713 ENDPROC(call_rwsem_wake)
26714
26715@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
26716 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
26717 CFI_RESTORE __ASM_REG(dx)
26718 restore_common_regs
26719+ pax_force_retaddr
26720 ret
26721 CFI_ENDPROC
26722 ENDPROC(call_rwsem_downgrade_wake)
26723diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
26724index a63efd6..ccecad8 100644
26725--- a/arch/x86/lib/thunk_64.S
26726+++ b/arch/x86/lib/thunk_64.S
26727@@ -8,6 +8,7 @@
26728 #include <linux/linkage.h>
26729 #include <asm/dwarf2.h>
26730 #include <asm/calling.h>
26731+#include <asm/alternative-asm.h>
26732
26733 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
26734 .macro THUNK name, func, put_ret_addr_in_rdi=0
26735@@ -41,5 +42,6 @@
26736 SAVE_ARGS
26737 restore:
26738 RESTORE_ARGS
26739+ pax_force_retaddr
26740 ret
26741 CFI_ENDPROC
26742diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
26743index f0312d7..9c39d63 100644
26744--- a/arch/x86/lib/usercopy_32.c
26745+++ b/arch/x86/lib/usercopy_32.c
26746@@ -42,11 +42,13 @@ do { \
26747 int __d0; \
26748 might_fault(); \
26749 __asm__ __volatile__( \
26750+ __COPYUSER_SET_ES \
26751 ASM_STAC "\n" \
26752 "0: rep; stosl\n" \
26753 " movl %2,%0\n" \
26754 "1: rep; stosb\n" \
26755 "2: " ASM_CLAC "\n" \
26756+ __COPYUSER_RESTORE_ES \
26757 ".section .fixup,\"ax\"\n" \
26758 "3: lea 0(%2,%0,4),%0\n" \
26759 " jmp 2b\n" \
26760@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
26761
26762 #ifdef CONFIG_X86_INTEL_USERCOPY
26763 static unsigned long
26764-__copy_user_intel(void __user *to, const void *from, unsigned long size)
26765+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
26766 {
26767 int d0, d1;
26768 __asm__ __volatile__(
26769@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
26770 " .align 2,0x90\n"
26771 "3: movl 0(%4), %%eax\n"
26772 "4: movl 4(%4), %%edx\n"
26773- "5: movl %%eax, 0(%3)\n"
26774- "6: movl %%edx, 4(%3)\n"
26775+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
26776+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
26777 "7: movl 8(%4), %%eax\n"
26778 "8: movl 12(%4),%%edx\n"
26779- "9: movl %%eax, 8(%3)\n"
26780- "10: movl %%edx, 12(%3)\n"
26781+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
26782+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
26783 "11: movl 16(%4), %%eax\n"
26784 "12: movl 20(%4), %%edx\n"
26785- "13: movl %%eax, 16(%3)\n"
26786- "14: movl %%edx, 20(%3)\n"
26787+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
26788+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
26789 "15: movl 24(%4), %%eax\n"
26790 "16: movl 28(%4), %%edx\n"
26791- "17: movl %%eax, 24(%3)\n"
26792- "18: movl %%edx, 28(%3)\n"
26793+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
26794+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
26795 "19: movl 32(%4), %%eax\n"
26796 "20: movl 36(%4), %%edx\n"
26797- "21: movl %%eax, 32(%3)\n"
26798- "22: movl %%edx, 36(%3)\n"
26799+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
26800+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
26801 "23: movl 40(%4), %%eax\n"
26802 "24: movl 44(%4), %%edx\n"
26803- "25: movl %%eax, 40(%3)\n"
26804- "26: movl %%edx, 44(%3)\n"
26805+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
26806+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
26807 "27: movl 48(%4), %%eax\n"
26808 "28: movl 52(%4), %%edx\n"
26809- "29: movl %%eax, 48(%3)\n"
26810- "30: movl %%edx, 52(%3)\n"
26811+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
26812+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
26813 "31: movl 56(%4), %%eax\n"
26814 "32: movl 60(%4), %%edx\n"
26815- "33: movl %%eax, 56(%3)\n"
26816- "34: movl %%edx, 60(%3)\n"
26817+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
26818+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
26819 " addl $-64, %0\n"
26820 " addl $64, %4\n"
26821 " addl $64, %3\n"
26822@@ -149,10 +151,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
26823 " shrl $2, %0\n"
26824 " andl $3, %%eax\n"
26825 " cld\n"
26826+ __COPYUSER_SET_ES
26827 "99: rep; movsl\n"
26828 "36: movl %%eax, %0\n"
26829 "37: rep; movsb\n"
26830 "100:\n"
26831+ __COPYUSER_RESTORE_ES
26832 ".section .fixup,\"ax\"\n"
26833 "101: lea 0(%%eax,%0,4),%0\n"
26834 " jmp 100b\n"
26835@@ -202,46 +206,150 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
26836 }
26837
26838 static unsigned long
26839+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
26840+{
26841+ int d0, d1;
26842+ __asm__ __volatile__(
26843+ " .align 2,0x90\n"
26844+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
26845+ " cmpl $67, %0\n"
26846+ " jbe 3f\n"
26847+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
26848+ " .align 2,0x90\n"
26849+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
26850+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
26851+ "5: movl %%eax, 0(%3)\n"
26852+ "6: movl %%edx, 4(%3)\n"
26853+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
26854+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
26855+ "9: movl %%eax, 8(%3)\n"
26856+ "10: movl %%edx, 12(%3)\n"
26857+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
26858+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
26859+ "13: movl %%eax, 16(%3)\n"
26860+ "14: movl %%edx, 20(%3)\n"
26861+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
26862+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
26863+ "17: movl %%eax, 24(%3)\n"
26864+ "18: movl %%edx, 28(%3)\n"
26865+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
26866+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
26867+ "21: movl %%eax, 32(%3)\n"
26868+ "22: movl %%edx, 36(%3)\n"
26869+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
26870+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
26871+ "25: movl %%eax, 40(%3)\n"
26872+ "26: movl %%edx, 44(%3)\n"
26873+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
26874+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
26875+ "29: movl %%eax, 48(%3)\n"
26876+ "30: movl %%edx, 52(%3)\n"
26877+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
26878+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
26879+ "33: movl %%eax, 56(%3)\n"
26880+ "34: movl %%edx, 60(%3)\n"
26881+ " addl $-64, %0\n"
26882+ " addl $64, %4\n"
26883+ " addl $64, %3\n"
26884+ " cmpl $63, %0\n"
26885+ " ja 1b\n"
26886+ "35: movl %0, %%eax\n"
26887+ " shrl $2, %0\n"
26888+ " andl $3, %%eax\n"
26889+ " cld\n"
26890+ "99: rep; "__copyuser_seg" movsl\n"
26891+ "36: movl %%eax, %0\n"
26892+ "37: rep; "__copyuser_seg" movsb\n"
26893+ "100:\n"
26894+ ".section .fixup,\"ax\"\n"
26895+ "101: lea 0(%%eax,%0,4),%0\n"
26896+ " jmp 100b\n"
26897+ ".previous\n"
26898+ _ASM_EXTABLE(1b,100b)
26899+ _ASM_EXTABLE(2b,100b)
26900+ _ASM_EXTABLE(3b,100b)
26901+ _ASM_EXTABLE(4b,100b)
26902+ _ASM_EXTABLE(5b,100b)
26903+ _ASM_EXTABLE(6b,100b)
26904+ _ASM_EXTABLE(7b,100b)
26905+ _ASM_EXTABLE(8b,100b)
26906+ _ASM_EXTABLE(9b,100b)
26907+ _ASM_EXTABLE(10b,100b)
26908+ _ASM_EXTABLE(11b,100b)
26909+ _ASM_EXTABLE(12b,100b)
26910+ _ASM_EXTABLE(13b,100b)
26911+ _ASM_EXTABLE(14b,100b)
26912+ _ASM_EXTABLE(15b,100b)
26913+ _ASM_EXTABLE(16b,100b)
26914+ _ASM_EXTABLE(17b,100b)
26915+ _ASM_EXTABLE(18b,100b)
26916+ _ASM_EXTABLE(19b,100b)
26917+ _ASM_EXTABLE(20b,100b)
26918+ _ASM_EXTABLE(21b,100b)
26919+ _ASM_EXTABLE(22b,100b)
26920+ _ASM_EXTABLE(23b,100b)
26921+ _ASM_EXTABLE(24b,100b)
26922+ _ASM_EXTABLE(25b,100b)
26923+ _ASM_EXTABLE(26b,100b)
26924+ _ASM_EXTABLE(27b,100b)
26925+ _ASM_EXTABLE(28b,100b)
26926+ _ASM_EXTABLE(29b,100b)
26927+ _ASM_EXTABLE(30b,100b)
26928+ _ASM_EXTABLE(31b,100b)
26929+ _ASM_EXTABLE(32b,100b)
26930+ _ASM_EXTABLE(33b,100b)
26931+ _ASM_EXTABLE(34b,100b)
26932+ _ASM_EXTABLE(35b,100b)
26933+ _ASM_EXTABLE(36b,100b)
26934+ _ASM_EXTABLE(37b,100b)
26935+ _ASM_EXTABLE(99b,101b)
26936+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
26937+ : "1"(to), "2"(from), "0"(size)
26938+ : "eax", "edx", "memory");
26939+ return size;
26940+}
26941+
26942+static unsigned long __size_overflow(3)
26943 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
26944 {
26945 int d0, d1;
26946 __asm__ __volatile__(
26947 " .align 2,0x90\n"
26948- "0: movl 32(%4), %%eax\n"
26949+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
26950 " cmpl $67, %0\n"
26951 " jbe 2f\n"
26952- "1: movl 64(%4), %%eax\n"
26953+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
26954 " .align 2,0x90\n"
26955- "2: movl 0(%4), %%eax\n"
26956- "21: movl 4(%4), %%edx\n"
26957+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
26958+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
26959 " movl %%eax, 0(%3)\n"
26960 " movl %%edx, 4(%3)\n"
26961- "3: movl 8(%4), %%eax\n"
26962- "31: movl 12(%4),%%edx\n"
26963+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
26964+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
26965 " movl %%eax, 8(%3)\n"
26966 " movl %%edx, 12(%3)\n"
26967- "4: movl 16(%4), %%eax\n"
26968- "41: movl 20(%4), %%edx\n"
26969+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
26970+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
26971 " movl %%eax, 16(%3)\n"
26972 " movl %%edx, 20(%3)\n"
26973- "10: movl 24(%4), %%eax\n"
26974- "51: movl 28(%4), %%edx\n"
26975+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
26976+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
26977 " movl %%eax, 24(%3)\n"
26978 " movl %%edx, 28(%3)\n"
26979- "11: movl 32(%4), %%eax\n"
26980- "61: movl 36(%4), %%edx\n"
26981+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
26982+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
26983 " movl %%eax, 32(%3)\n"
26984 " movl %%edx, 36(%3)\n"
26985- "12: movl 40(%4), %%eax\n"
26986- "71: movl 44(%4), %%edx\n"
26987+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
26988+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
26989 " movl %%eax, 40(%3)\n"
26990 " movl %%edx, 44(%3)\n"
26991- "13: movl 48(%4), %%eax\n"
26992- "81: movl 52(%4), %%edx\n"
26993+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
26994+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
26995 " movl %%eax, 48(%3)\n"
26996 " movl %%edx, 52(%3)\n"
26997- "14: movl 56(%4), %%eax\n"
26998- "91: movl 60(%4), %%edx\n"
26999+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
27000+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
27001 " movl %%eax, 56(%3)\n"
27002 " movl %%edx, 60(%3)\n"
27003 " addl $-64, %0\n"
27004@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
27005 " shrl $2, %0\n"
27006 " andl $3, %%eax\n"
27007 " cld\n"
27008- "6: rep; movsl\n"
27009+ "6: rep; "__copyuser_seg" movsl\n"
27010 " movl %%eax,%0\n"
27011- "7: rep; movsb\n"
27012+ "7: rep; "__copyuser_seg" movsb\n"
27013 "8:\n"
27014 ".section .fixup,\"ax\"\n"
27015 "9: lea 0(%%eax,%0,4),%0\n"
27016@@ -298,48 +406,48 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
27017 * hyoshiok@miraclelinux.com
27018 */
27019
27020-static unsigned long __copy_user_zeroing_intel_nocache(void *to,
27021+static unsigned long __size_overflow(3) __copy_user_zeroing_intel_nocache(void *to,
27022 const void __user *from, unsigned long size)
27023 {
27024 int d0, d1;
27025
27026 __asm__ __volatile__(
27027 " .align 2,0x90\n"
27028- "0: movl 32(%4), %%eax\n"
27029+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
27030 " cmpl $67, %0\n"
27031 " jbe 2f\n"
27032- "1: movl 64(%4), %%eax\n"
27033+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
27034 " .align 2,0x90\n"
27035- "2: movl 0(%4), %%eax\n"
27036- "21: movl 4(%4), %%edx\n"
27037+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
27038+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
27039 " movnti %%eax, 0(%3)\n"
27040 " movnti %%edx, 4(%3)\n"
27041- "3: movl 8(%4), %%eax\n"
27042- "31: movl 12(%4),%%edx\n"
27043+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
27044+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
27045 " movnti %%eax, 8(%3)\n"
27046 " movnti %%edx, 12(%3)\n"
27047- "4: movl 16(%4), %%eax\n"
27048- "41: movl 20(%4), %%edx\n"
27049+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
27050+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
27051 " movnti %%eax, 16(%3)\n"
27052 " movnti %%edx, 20(%3)\n"
27053- "10: movl 24(%4), %%eax\n"
27054- "51: movl 28(%4), %%edx\n"
27055+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
27056+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
27057 " movnti %%eax, 24(%3)\n"
27058 " movnti %%edx, 28(%3)\n"
27059- "11: movl 32(%4), %%eax\n"
27060- "61: movl 36(%4), %%edx\n"
27061+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
27062+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
27063 " movnti %%eax, 32(%3)\n"
27064 " movnti %%edx, 36(%3)\n"
27065- "12: movl 40(%4), %%eax\n"
27066- "71: movl 44(%4), %%edx\n"
27067+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
27068+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
27069 " movnti %%eax, 40(%3)\n"
27070 " movnti %%edx, 44(%3)\n"
27071- "13: movl 48(%4), %%eax\n"
27072- "81: movl 52(%4), %%edx\n"
27073+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
27074+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
27075 " movnti %%eax, 48(%3)\n"
27076 " movnti %%edx, 52(%3)\n"
27077- "14: movl 56(%4), %%eax\n"
27078- "91: movl 60(%4), %%edx\n"
27079+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
27080+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
27081 " movnti %%eax, 56(%3)\n"
27082 " movnti %%edx, 60(%3)\n"
27083 " addl $-64, %0\n"
27084@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
27085 " shrl $2, %0\n"
27086 " andl $3, %%eax\n"
27087 " cld\n"
27088- "6: rep; movsl\n"
27089+ "6: rep; "__copyuser_seg" movsl\n"
27090 " movl %%eax,%0\n"
27091- "7: rep; movsb\n"
27092+ "7: rep; "__copyuser_seg" movsb\n"
27093 "8:\n"
27094 ".section .fixup,\"ax\"\n"
27095 "9: lea 0(%%eax,%0,4),%0\n"
27096@@ -392,48 +500,48 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
27097 return size;
27098 }
27099
27100-static unsigned long __copy_user_intel_nocache(void *to,
27101+static unsigned long __size_overflow(3) __copy_user_intel_nocache(void *to,
27102 const void __user *from, unsigned long size)
27103 {
27104 int d0, d1;
27105
27106 __asm__ __volatile__(
27107 " .align 2,0x90\n"
27108- "0: movl 32(%4), %%eax\n"
27109+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
27110 " cmpl $67, %0\n"
27111 " jbe 2f\n"
27112- "1: movl 64(%4), %%eax\n"
27113+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
27114 " .align 2,0x90\n"
27115- "2: movl 0(%4), %%eax\n"
27116- "21: movl 4(%4), %%edx\n"
27117+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
27118+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
27119 " movnti %%eax, 0(%3)\n"
27120 " movnti %%edx, 4(%3)\n"
27121- "3: movl 8(%4), %%eax\n"
27122- "31: movl 12(%4),%%edx\n"
27123+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
27124+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
27125 " movnti %%eax, 8(%3)\n"
27126 " movnti %%edx, 12(%3)\n"
27127- "4: movl 16(%4), %%eax\n"
27128- "41: movl 20(%4), %%edx\n"
27129+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
27130+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
27131 " movnti %%eax, 16(%3)\n"
27132 " movnti %%edx, 20(%3)\n"
27133- "10: movl 24(%4), %%eax\n"
27134- "51: movl 28(%4), %%edx\n"
27135+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
27136+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
27137 " movnti %%eax, 24(%3)\n"
27138 " movnti %%edx, 28(%3)\n"
27139- "11: movl 32(%4), %%eax\n"
27140- "61: movl 36(%4), %%edx\n"
27141+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
27142+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
27143 " movnti %%eax, 32(%3)\n"
27144 " movnti %%edx, 36(%3)\n"
27145- "12: movl 40(%4), %%eax\n"
27146- "71: movl 44(%4), %%edx\n"
27147+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
27148+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
27149 " movnti %%eax, 40(%3)\n"
27150 " movnti %%edx, 44(%3)\n"
27151- "13: movl 48(%4), %%eax\n"
27152- "81: movl 52(%4), %%edx\n"
27153+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
27154+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
27155 " movnti %%eax, 48(%3)\n"
27156 " movnti %%edx, 52(%3)\n"
27157- "14: movl 56(%4), %%eax\n"
27158- "91: movl 60(%4), %%edx\n"
27159+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
27160+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
27161 " movnti %%eax, 56(%3)\n"
27162 " movnti %%edx, 60(%3)\n"
27163 " addl $-64, %0\n"
27164@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
27165 " shrl $2, %0\n"
27166 " andl $3, %%eax\n"
27167 " cld\n"
27168- "6: rep; movsl\n"
27169+ "6: rep; "__copyuser_seg" movsl\n"
27170 " movl %%eax,%0\n"
27171- "7: rep; movsb\n"
27172+ "7: rep; "__copyuser_seg" movsb\n"
27173 "8:\n"
27174 ".section .fixup,\"ax\"\n"
27175 "9: lea 0(%%eax,%0,4),%0\n"
27176@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
27177 */
27178 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
27179 unsigned long size);
27180-unsigned long __copy_user_intel(void __user *to, const void *from,
27181+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
27182+ unsigned long size);
27183+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
27184 unsigned long size);
27185 unsigned long __copy_user_zeroing_intel_nocache(void *to,
27186 const void __user *from, unsigned long size);
27187 #endif /* CONFIG_X86_INTEL_USERCOPY */
27188
27189 /* Generic arbitrary sized copy. */
27190-#define __copy_user(to, from, size) \
27191+#define __copy_user(to, from, size, prefix, set, restore) \
27192 do { \
27193 int __d0, __d1, __d2; \
27194 __asm__ __volatile__( \
27195+ set \
27196 " cmp $7,%0\n" \
27197 " jbe 1f\n" \
27198 " movl %1,%0\n" \
27199 " negl %0\n" \
27200 " andl $7,%0\n" \
27201 " subl %0,%3\n" \
27202- "4: rep; movsb\n" \
27203+ "4: rep; "prefix"movsb\n" \
27204 " movl %3,%0\n" \
27205 " shrl $2,%0\n" \
27206 " andl $3,%3\n" \
27207 " .align 2,0x90\n" \
27208- "0: rep; movsl\n" \
27209+ "0: rep; "prefix"movsl\n" \
27210 " movl %3,%0\n" \
27211- "1: rep; movsb\n" \
27212+ "1: rep; "prefix"movsb\n" \
27213 "2:\n" \
27214+ restore \
27215 ".section .fixup,\"ax\"\n" \
27216 "5: addl %3,%0\n" \
27217 " jmp 2b\n" \
27218@@ -538,14 +650,14 @@ do { \
27219 " negl %0\n" \
27220 " andl $7,%0\n" \
27221 " subl %0,%3\n" \
27222- "4: rep; movsb\n" \
27223+ "4: rep; "__copyuser_seg"movsb\n" \
27224 " movl %3,%0\n" \
27225 " shrl $2,%0\n" \
27226 " andl $3,%3\n" \
27227 " .align 2,0x90\n" \
27228- "0: rep; movsl\n" \
27229+ "0: rep; "__copyuser_seg"movsl\n" \
27230 " movl %3,%0\n" \
27231- "1: rep; movsb\n" \
27232+ "1: rep; "__copyuser_seg"movsb\n" \
27233 "2:\n" \
27234 ".section .fixup,\"ax\"\n" \
27235 "5: addl %3,%0\n" \
27236@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
27237 {
27238 stac();
27239 if (movsl_is_ok(to, from, n))
27240- __copy_user(to, from, n);
27241+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
27242 else
27243- n = __copy_user_intel(to, from, n);
27244+ n = __generic_copy_to_user_intel(to, from, n);
27245 clac();
27246 return n;
27247 }
27248@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
27249 {
27250 stac();
27251 if (movsl_is_ok(to, from, n))
27252- __copy_user(to, from, n);
27253+ __copy_user(to, from, n, __copyuser_seg, "", "");
27254 else
27255- n = __copy_user_intel((void __user *)to,
27256- (const void *)from, n);
27257+ n = __generic_copy_from_user_intel(to, from, n);
27258 clac();
27259 return n;
27260 }
27261@@ -632,66 +743,51 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
27262 if (n > 64 && cpu_has_xmm2)
27263 n = __copy_user_intel_nocache(to, from, n);
27264 else
27265- __copy_user(to, from, n);
27266+ __copy_user(to, from, n, __copyuser_seg, "", "");
27267 #else
27268- __copy_user(to, from, n);
27269+ __copy_user(to, from, n, __copyuser_seg, "", "");
27270 #endif
27271 clac();
27272 return n;
27273 }
27274 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
27275
27276-/**
27277- * copy_to_user: - Copy a block of data into user space.
27278- * @to: Destination address, in user space.
27279- * @from: Source address, in kernel space.
27280- * @n: Number of bytes to copy.
27281- *
27282- * Context: User context only. This function may sleep.
27283- *
27284- * Copy data from kernel space to user space.
27285- *
27286- * Returns number of bytes that could not be copied.
27287- * On success, this will be zero.
27288- */
27289-unsigned long
27290-copy_to_user(void __user *to, const void *from, unsigned long n)
27291-{
27292- if (access_ok(VERIFY_WRITE, to, n))
27293- n = __copy_to_user(to, from, n);
27294- return n;
27295-}
27296-EXPORT_SYMBOL(copy_to_user);
27297-
27298-/**
27299- * copy_from_user: - Copy a block of data from user space.
27300- * @to: Destination address, in kernel space.
27301- * @from: Source address, in user space.
27302- * @n: Number of bytes to copy.
27303- *
27304- * Context: User context only. This function may sleep.
27305- *
27306- * Copy data from user space to kernel space.
27307- *
27308- * Returns number of bytes that could not be copied.
27309- * On success, this will be zero.
27310- *
27311- * If some data could not be copied, this function will pad the copied
27312- * data to the requested size using zero bytes.
27313- */
27314-unsigned long
27315-_copy_from_user(void *to, const void __user *from, unsigned long n)
27316-{
27317- if (access_ok(VERIFY_READ, from, n))
27318- n = __copy_from_user(to, from, n);
27319- else
27320- memset(to, 0, n);
27321- return n;
27322-}
27323-EXPORT_SYMBOL(_copy_from_user);
27324-
27325 void copy_from_user_overflow(void)
27326 {
27327 WARN(1, "Buffer overflow detected!\n");
27328 }
27329 EXPORT_SYMBOL(copy_from_user_overflow);
27330+
27331+void copy_to_user_overflow(void)
27332+{
27333+ WARN(1, "Buffer overflow detected!\n");
27334+}
27335+EXPORT_SYMBOL(copy_to_user_overflow);
27336+
27337+#ifdef CONFIG_PAX_MEMORY_UDEREF
27338+void __set_fs(mm_segment_t x)
27339+{
27340+ switch (x.seg) {
27341+ case 0:
27342+ loadsegment(gs, 0);
27343+ break;
27344+ case TASK_SIZE_MAX:
27345+ loadsegment(gs, __USER_DS);
27346+ break;
27347+ case -1UL:
27348+ loadsegment(gs, __KERNEL_DS);
27349+ break;
27350+ default:
27351+ BUG();
27352+ }
27353+ return;
27354+}
27355+EXPORT_SYMBOL(__set_fs);
27356+
27357+void set_fs(mm_segment_t x)
27358+{
27359+ current_thread_info()->addr_limit = x;
27360+ __set_fs(x);
27361+}
27362+EXPORT_SYMBOL(set_fs);
27363+#endif
27364diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
27365index 906fea3..ee8a097 100644
27366--- a/arch/x86/lib/usercopy_64.c
27367+++ b/arch/x86/lib/usercopy_64.c
27368@@ -39,7 +39,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
27369 _ASM_EXTABLE(0b,3b)
27370 _ASM_EXTABLE(1b,2b)
27371 : [size8] "=&c"(size), [dst] "=&D" (__d0)
27372- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
27373+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
27374 [zero] "r" (0UL), [eight] "r" (8UL));
27375 clac();
27376 return size;
27377@@ -54,12 +54,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
27378 }
27379 EXPORT_SYMBOL(clear_user);
27380
27381-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
27382+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
27383 {
27384- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
27385- return copy_user_generic((__force void *)to, (__force void *)from, len);
27386- }
27387- return len;
27388+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
27389+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
27390+ return len;
27391 }
27392 EXPORT_SYMBOL(copy_in_user);
27393
27394@@ -69,7 +68,7 @@ EXPORT_SYMBOL(copy_in_user);
27395 * it is not necessary to optimize tail handling.
27396 */
27397 unsigned long
27398-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
27399+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
27400 {
27401 char c;
27402 unsigned zero_len;
27403@@ -87,3 +86,15 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
27404 clac();
27405 return len;
27406 }
27407+
27408+void copy_from_user_overflow(void)
27409+{
27410+ WARN(1, "Buffer overflow detected!\n");
27411+}
27412+EXPORT_SYMBOL(copy_from_user_overflow);
27413+
27414+void copy_to_user_overflow(void)
27415+{
27416+ WARN(1, "Buffer overflow detected!\n");
27417+}
27418+EXPORT_SYMBOL(copy_to_user_overflow);
27419diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
27420index 903ec1e..c4166b2 100644
27421--- a/arch/x86/mm/extable.c
27422+++ b/arch/x86/mm/extable.c
27423@@ -6,12 +6,24 @@
27424 static inline unsigned long
27425 ex_insn_addr(const struct exception_table_entry *x)
27426 {
27427- return (unsigned long)&x->insn + x->insn;
27428+ unsigned long reloc = 0;
27429+
27430+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27431+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
27432+#endif
27433+
27434+ return (unsigned long)&x->insn + x->insn + reloc;
27435 }
27436 static inline unsigned long
27437 ex_fixup_addr(const struct exception_table_entry *x)
27438 {
27439- return (unsigned long)&x->fixup + x->fixup;
27440+ unsigned long reloc = 0;
27441+
27442+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27443+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
27444+#endif
27445+
27446+ return (unsigned long)&x->fixup + x->fixup + reloc;
27447 }
27448
27449 int fixup_exception(struct pt_regs *regs)
27450@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
27451 unsigned long new_ip;
27452
27453 #ifdef CONFIG_PNPBIOS
27454- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
27455+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
27456 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
27457 extern u32 pnp_bios_is_utter_crap;
27458 pnp_bios_is_utter_crap = 1;
27459@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
27460 i += 4;
27461 p->fixup -= i;
27462 i += 4;
27463+
27464+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27465+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
27466+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
27467+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
27468+#endif
27469+
27470 }
27471 }
27472
27473diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
27474index fb674fd..223a693 100644
27475--- a/arch/x86/mm/fault.c
27476+++ b/arch/x86/mm/fault.c
27477@@ -13,12 +13,19 @@
27478 #include <linux/perf_event.h> /* perf_sw_event */
27479 #include <linux/hugetlb.h> /* hstate_index_to_shift */
27480 #include <linux/prefetch.h> /* prefetchw */
27481+#include <linux/unistd.h>
27482+#include <linux/compiler.h>
27483
27484 #include <asm/traps.h> /* dotraplinkage, ... */
27485 #include <asm/pgalloc.h> /* pgd_*(), ... */
27486 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
27487 #include <asm/fixmap.h> /* VSYSCALL_START */
27488 #include <asm/context_tracking.h> /* exception_enter(), ... */
27489+#include <asm/tlbflush.h>
27490+
27491+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
27492+#include <asm/stacktrace.h>
27493+#endif
27494
27495 /*
27496 * Page fault error code bits:
27497@@ -56,7 +63,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
27498 int ret = 0;
27499
27500 /* kprobe_running() needs smp_processor_id() */
27501- if (kprobes_built_in() && !user_mode_vm(regs)) {
27502+ if (kprobes_built_in() && !user_mode(regs)) {
27503 preempt_disable();
27504 if (kprobe_running() && kprobe_fault_handler(regs, 14))
27505 ret = 1;
27506@@ -117,7 +124,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
27507 return !instr_lo || (instr_lo>>1) == 1;
27508 case 0x00:
27509 /* Prefetch instruction is 0x0F0D or 0x0F18 */
27510- if (probe_kernel_address(instr, opcode))
27511+ if (user_mode(regs)) {
27512+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
27513+ return 0;
27514+ } else if (probe_kernel_address(instr, opcode))
27515 return 0;
27516
27517 *prefetch = (instr_lo == 0xF) &&
27518@@ -151,7 +161,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
27519 while (instr < max_instr) {
27520 unsigned char opcode;
27521
27522- if (probe_kernel_address(instr, opcode))
27523+ if (user_mode(regs)) {
27524+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
27525+ break;
27526+ } else if (probe_kernel_address(instr, opcode))
27527 break;
27528
27529 instr++;
27530@@ -182,6 +195,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
27531 force_sig_info(si_signo, &info, tsk);
27532 }
27533
27534+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
27535+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
27536+#endif
27537+
27538+#ifdef CONFIG_PAX_EMUTRAMP
27539+static int pax_handle_fetch_fault(struct pt_regs *regs);
27540+#endif
27541+
27542+#ifdef CONFIG_PAX_PAGEEXEC
27543+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
27544+{
27545+ pgd_t *pgd;
27546+ pud_t *pud;
27547+ pmd_t *pmd;
27548+
27549+ pgd = pgd_offset(mm, address);
27550+ if (!pgd_present(*pgd))
27551+ return NULL;
27552+ pud = pud_offset(pgd, address);
27553+ if (!pud_present(*pud))
27554+ return NULL;
27555+ pmd = pmd_offset(pud, address);
27556+ if (!pmd_present(*pmd))
27557+ return NULL;
27558+ return pmd;
27559+}
27560+#endif
27561+
27562 DEFINE_SPINLOCK(pgd_lock);
27563 LIST_HEAD(pgd_list);
27564
27565@@ -232,10 +273,22 @@ void vmalloc_sync_all(void)
27566 for (address = VMALLOC_START & PMD_MASK;
27567 address >= TASK_SIZE && address < FIXADDR_TOP;
27568 address += PMD_SIZE) {
27569+
27570+#ifdef CONFIG_PAX_PER_CPU_PGD
27571+ unsigned long cpu;
27572+#else
27573 struct page *page;
27574+#endif
27575
27576 spin_lock(&pgd_lock);
27577+
27578+#ifdef CONFIG_PAX_PER_CPU_PGD
27579+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
27580+ pgd_t *pgd = get_cpu_pgd(cpu);
27581+ pmd_t *ret;
27582+#else
27583 list_for_each_entry(page, &pgd_list, lru) {
27584+ pgd_t *pgd;
27585 spinlock_t *pgt_lock;
27586 pmd_t *ret;
27587
27588@@ -243,8 +296,14 @@ void vmalloc_sync_all(void)
27589 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
27590
27591 spin_lock(pgt_lock);
27592- ret = vmalloc_sync_one(page_address(page), address);
27593+ pgd = page_address(page);
27594+#endif
27595+
27596+ ret = vmalloc_sync_one(pgd, address);
27597+
27598+#ifndef CONFIG_PAX_PER_CPU_PGD
27599 spin_unlock(pgt_lock);
27600+#endif
27601
27602 if (!ret)
27603 break;
27604@@ -278,6 +337,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
27605 * an interrupt in the middle of a task switch..
27606 */
27607 pgd_paddr = read_cr3();
27608+
27609+#ifdef CONFIG_PAX_PER_CPU_PGD
27610+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
27611+#endif
27612+
27613 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
27614 if (!pmd_k)
27615 return -1;
27616@@ -373,7 +437,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
27617 * happen within a race in page table update. In the later
27618 * case just flush:
27619 */
27620+
27621+#ifdef CONFIG_PAX_PER_CPU_PGD
27622+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
27623+ pgd = pgd_offset_cpu(smp_processor_id(), address);
27624+#else
27625 pgd = pgd_offset(current->active_mm, address);
27626+#endif
27627+
27628 pgd_ref = pgd_offset_k(address);
27629 if (pgd_none(*pgd_ref))
27630 return -1;
27631@@ -541,7 +612,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
27632 static int is_errata100(struct pt_regs *regs, unsigned long address)
27633 {
27634 #ifdef CONFIG_X86_64
27635- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
27636+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
27637 return 1;
27638 #endif
27639 return 0;
27640@@ -568,7 +639,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
27641 }
27642
27643 static const char nx_warning[] = KERN_CRIT
27644-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
27645+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
27646
27647 static void
27648 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
27649@@ -577,15 +648,27 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
27650 if (!oops_may_print())
27651 return;
27652
27653- if (error_code & PF_INSTR) {
27654+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
27655 unsigned int level;
27656
27657 pte_t *pte = lookup_address(address, &level);
27658
27659 if (pte && pte_present(*pte) && !pte_exec(*pte))
27660- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
27661+ printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
27662 }
27663
27664+#ifdef CONFIG_PAX_KERNEXEC
27665+ if (init_mm.start_code <= address && address < init_mm.end_code) {
27666+ if (current->signal->curr_ip)
27667+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
27668+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
27669+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
27670+ else
27671+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
27672+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
27673+ }
27674+#endif
27675+
27676 printk(KERN_ALERT "BUG: unable to handle kernel ");
27677 if (address < PAGE_SIZE)
27678 printk(KERN_CONT "NULL pointer dereference");
27679@@ -748,6 +831,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
27680 return;
27681 }
27682 #endif
27683+
27684+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
27685+ if (pax_is_fetch_fault(regs, error_code, address)) {
27686+
27687+#ifdef CONFIG_PAX_EMUTRAMP
27688+ switch (pax_handle_fetch_fault(regs)) {
27689+ case 2:
27690+ return;
27691+ }
27692+#endif
27693+
27694+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
27695+ do_group_exit(SIGKILL);
27696+ }
27697+#endif
27698+
27699 /* Kernel addresses are always protection faults: */
27700 if (address >= TASK_SIZE)
27701 error_code |= PF_PROT;
27702@@ -833,7 +932,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
27703 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
27704 printk(KERN_ERR
27705 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
27706- tsk->comm, tsk->pid, address);
27707+ tsk->comm, task_pid_nr(tsk), address);
27708 code = BUS_MCEERR_AR;
27709 }
27710 #endif
27711@@ -896,6 +995,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
27712 return 1;
27713 }
27714
27715+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
27716+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
27717+{
27718+ pte_t *pte;
27719+ pmd_t *pmd;
27720+ spinlock_t *ptl;
27721+ unsigned char pte_mask;
27722+
27723+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
27724+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
27725+ return 0;
27726+
27727+ /* PaX: it's our fault, let's handle it if we can */
27728+
27729+ /* PaX: take a look at read faults before acquiring any locks */
27730+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
27731+ /* instruction fetch attempt from a protected page in user mode */
27732+ up_read(&mm->mmap_sem);
27733+
27734+#ifdef CONFIG_PAX_EMUTRAMP
27735+ switch (pax_handle_fetch_fault(regs)) {
27736+ case 2:
27737+ return 1;
27738+ }
27739+#endif
27740+
27741+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
27742+ do_group_exit(SIGKILL);
27743+ }
27744+
27745+ pmd = pax_get_pmd(mm, address);
27746+ if (unlikely(!pmd))
27747+ return 0;
27748+
27749+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
27750+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
27751+ pte_unmap_unlock(pte, ptl);
27752+ return 0;
27753+ }
27754+
27755+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
27756+ /* write attempt to a protected page in user mode */
27757+ pte_unmap_unlock(pte, ptl);
27758+ return 0;
27759+ }
27760+
27761+#ifdef CONFIG_SMP
27762+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
27763+#else
27764+ if (likely(address > get_limit(regs->cs)))
27765+#endif
27766+ {
27767+ set_pte(pte, pte_mkread(*pte));
27768+ __flush_tlb_one(address);
27769+ pte_unmap_unlock(pte, ptl);
27770+ up_read(&mm->mmap_sem);
27771+ return 1;
27772+ }
27773+
27774+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
27775+
27776+ /*
27777+ * PaX: fill DTLB with user rights and retry
27778+ */
27779+ __asm__ __volatile__ (
27780+ "orb %2,(%1)\n"
27781+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
27782+/*
27783+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
27784+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
27785+ * page fault when examined during a TLB load attempt. this is true not only
27786+ * for PTEs holding a non-present entry but also present entries that will
27787+ * raise a page fault (such as those set up by PaX, or the copy-on-write
27788+ * mechanism). in effect it means that we do *not* need to flush the TLBs
27789+ * for our target pages since their PTEs are simply not in the TLBs at all.
27790+
27791+ * the best thing in omitting it is that we gain around 15-20% speed in the
27792+ * fast path of the page fault handler and can get rid of tracing since we
27793+ * can no longer flush unintended entries.
27794+ */
27795+ "invlpg (%0)\n"
27796+#endif
27797+ __copyuser_seg"testb $0,(%0)\n"
27798+ "xorb %3,(%1)\n"
27799+ :
27800+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
27801+ : "memory", "cc");
27802+ pte_unmap_unlock(pte, ptl);
27803+ up_read(&mm->mmap_sem);
27804+ return 1;
27805+}
27806+#endif
27807+
27808 /*
27809 * Handle a spurious fault caused by a stale TLB entry.
27810 *
27811@@ -968,6 +1160,9 @@ int show_unhandled_signals = 1;
27812 static inline int
27813 access_error(unsigned long error_code, struct vm_area_struct *vma)
27814 {
27815+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
27816+ return 1;
27817+
27818 if (error_code & PF_WRITE) {
27819 /* write, present and write, not present: */
27820 if (unlikely(!(vma->vm_flags & VM_WRITE)))
27821@@ -996,7 +1191,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
27822 if (error_code & PF_USER)
27823 return false;
27824
27825- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
27826+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
27827 return false;
27828
27829 return true;
27830@@ -1012,18 +1207,33 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
27831 {
27832 struct vm_area_struct *vma;
27833 struct task_struct *tsk;
27834- unsigned long address;
27835 struct mm_struct *mm;
27836 int fault;
27837 int write = error_code & PF_WRITE;
27838 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
27839 (write ? FAULT_FLAG_WRITE : 0);
27840
27841- tsk = current;
27842- mm = tsk->mm;
27843-
27844 /* Get the faulting address: */
27845- address = read_cr2();
27846+ unsigned long address = read_cr2();
27847+
27848+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
27849+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
27850+ if (!search_exception_tables(regs->ip)) {
27851+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
27852+ bad_area_nosemaphore(regs, error_code, address);
27853+ return;
27854+ }
27855+ if (address < PAX_USER_SHADOW_BASE) {
27856+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
27857+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
27858+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
27859+ } else
27860+ address -= PAX_USER_SHADOW_BASE;
27861+ }
27862+#endif
27863+
27864+ tsk = current;
27865+ mm = tsk->mm;
27866
27867 /*
27868 * Detect and handle instructions that would cause a page fault for
27869@@ -1084,7 +1294,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
27870 * User-mode registers count as a user access even for any
27871 * potential system fault or CPU buglet:
27872 */
27873- if (user_mode_vm(regs)) {
27874+ if (user_mode(regs)) {
27875 local_irq_enable();
27876 error_code |= PF_USER;
27877 } else {
27878@@ -1146,6 +1356,11 @@ retry:
27879 might_sleep();
27880 }
27881
27882+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
27883+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
27884+ return;
27885+#endif
27886+
27887 vma = find_vma(mm, address);
27888 if (unlikely(!vma)) {
27889 bad_area(regs, error_code, address);
27890@@ -1157,18 +1372,24 @@ retry:
27891 bad_area(regs, error_code, address);
27892 return;
27893 }
27894- if (error_code & PF_USER) {
27895- /*
27896- * Accessing the stack below %sp is always a bug.
27897- * The large cushion allows instructions like enter
27898- * and pusha to work. ("enter $65535, $31" pushes
27899- * 32 pointers and then decrements %sp by 65535.)
27900- */
27901- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
27902- bad_area(regs, error_code, address);
27903- return;
27904- }
27905+ /*
27906+ * Accessing the stack below %sp is always a bug.
27907+ * The large cushion allows instructions like enter
27908+ * and pusha to work. ("enter $65535, $31" pushes
27909+ * 32 pointers and then decrements %sp by 65535.)
27910+ */
27911+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
27912+ bad_area(regs, error_code, address);
27913+ return;
27914 }
27915+
27916+#ifdef CONFIG_PAX_SEGMEXEC
27917+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
27918+ bad_area(regs, error_code, address);
27919+ return;
27920+ }
27921+#endif
27922+
27923 if (unlikely(expand_stack(vma, address))) {
27924 bad_area(regs, error_code, address);
27925 return;
27926@@ -1232,3 +1453,292 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
27927 __do_page_fault(regs, error_code);
27928 exception_exit(regs);
27929 }
27930+
27931+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
27932+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
27933+{
27934+ struct mm_struct *mm = current->mm;
27935+ unsigned long ip = regs->ip;
27936+
27937+ if (v8086_mode(regs))
27938+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
27939+
27940+#ifdef CONFIG_PAX_PAGEEXEC
27941+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
27942+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
27943+ return true;
27944+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
27945+ return true;
27946+ return false;
27947+ }
27948+#endif
27949+
27950+#ifdef CONFIG_PAX_SEGMEXEC
27951+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
27952+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
27953+ return true;
27954+ return false;
27955+ }
27956+#endif
27957+
27958+ return false;
27959+}
27960+#endif
27961+
27962+#ifdef CONFIG_PAX_EMUTRAMP
27963+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
27964+{
27965+ int err;
27966+
27967+ do { /* PaX: libffi trampoline emulation */
27968+ unsigned char mov, jmp;
27969+ unsigned int addr1, addr2;
27970+
27971+#ifdef CONFIG_X86_64
27972+ if ((regs->ip + 9) >> 32)
27973+ break;
27974+#endif
27975+
27976+ err = get_user(mov, (unsigned char __user *)regs->ip);
27977+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
27978+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
27979+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
27980+
27981+ if (err)
27982+ break;
27983+
27984+ if (mov == 0xB8 && jmp == 0xE9) {
27985+ regs->ax = addr1;
27986+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
27987+ return 2;
27988+ }
27989+ } while (0);
27990+
27991+ do { /* PaX: gcc trampoline emulation #1 */
27992+ unsigned char mov1, mov2;
27993+ unsigned short jmp;
27994+ unsigned int addr1, addr2;
27995+
27996+#ifdef CONFIG_X86_64
27997+ if ((regs->ip + 11) >> 32)
27998+ break;
27999+#endif
28000+
28001+ err = get_user(mov1, (unsigned char __user *)regs->ip);
28002+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
28003+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
28004+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
28005+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
28006+
28007+ if (err)
28008+ break;
28009+
28010+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
28011+ regs->cx = addr1;
28012+ regs->ax = addr2;
28013+ regs->ip = addr2;
28014+ return 2;
28015+ }
28016+ } while (0);
28017+
28018+ do { /* PaX: gcc trampoline emulation #2 */
28019+ unsigned char mov, jmp;
28020+ unsigned int addr1, addr2;
28021+
28022+#ifdef CONFIG_X86_64
28023+ if ((regs->ip + 9) >> 32)
28024+ break;
28025+#endif
28026+
28027+ err = get_user(mov, (unsigned char __user *)regs->ip);
28028+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
28029+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
28030+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
28031+
28032+ if (err)
28033+ break;
28034+
28035+ if (mov == 0xB9 && jmp == 0xE9) {
28036+ regs->cx = addr1;
28037+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
28038+ return 2;
28039+ }
28040+ } while (0);
28041+
28042+ return 1; /* PaX in action */
28043+}
28044+
28045+#ifdef CONFIG_X86_64
28046+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
28047+{
28048+ int err;
28049+
28050+ do { /* PaX: libffi trampoline emulation */
28051+ unsigned short mov1, mov2, jmp1;
28052+ unsigned char stcclc, jmp2;
28053+ unsigned long addr1, addr2;
28054+
28055+ err = get_user(mov1, (unsigned short __user *)regs->ip);
28056+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
28057+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
28058+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
28059+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
28060+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
28061+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
28062+
28063+ if (err)
28064+ break;
28065+
28066+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
28067+ regs->r11 = addr1;
28068+ regs->r10 = addr2;
28069+ if (stcclc == 0xF8)
28070+ regs->flags &= ~X86_EFLAGS_CF;
28071+ else
28072+ regs->flags |= X86_EFLAGS_CF;
28073+ regs->ip = addr1;
28074+ return 2;
28075+ }
28076+ } while (0);
28077+
28078+ do { /* PaX: gcc trampoline emulation #1 */
28079+ unsigned short mov1, mov2, jmp1;
28080+ unsigned char jmp2;
28081+ unsigned int addr1;
28082+ unsigned long addr2;
28083+
28084+ err = get_user(mov1, (unsigned short __user *)regs->ip);
28085+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
28086+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
28087+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
28088+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
28089+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
28090+
28091+ if (err)
28092+ break;
28093+
28094+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
28095+ regs->r11 = addr1;
28096+ regs->r10 = addr2;
28097+ regs->ip = addr1;
28098+ return 2;
28099+ }
28100+ } while (0);
28101+
28102+ do { /* PaX: gcc trampoline emulation #2 */
28103+ unsigned short mov1, mov2, jmp1;
28104+ unsigned char jmp2;
28105+ unsigned long addr1, addr2;
28106+
28107+ err = get_user(mov1, (unsigned short __user *)regs->ip);
28108+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
28109+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
28110+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
28111+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
28112+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
28113+
28114+ if (err)
28115+ break;
28116+
28117+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
28118+ regs->r11 = addr1;
28119+ regs->r10 = addr2;
28120+ regs->ip = addr1;
28121+ return 2;
28122+ }
28123+ } while (0);
28124+
28125+ return 1; /* PaX in action */
28126+}
28127+#endif
28128+
28129+/*
28130+ * PaX: decide what to do with offenders (regs->ip = fault address)
28131+ *
28132+ * returns 1 when task should be killed
28133+ * 2 when gcc trampoline was detected
28134+ */
28135+static int pax_handle_fetch_fault(struct pt_regs *regs)
28136+{
28137+ if (v8086_mode(regs))
28138+ return 1;
28139+
28140+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
28141+ return 1;
28142+
28143+#ifdef CONFIG_X86_32
28144+ return pax_handle_fetch_fault_32(regs);
28145+#else
28146+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
28147+ return pax_handle_fetch_fault_32(regs);
28148+ else
28149+ return pax_handle_fetch_fault_64(regs);
28150+#endif
28151+}
28152+#endif
28153+
28154+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
28155+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
28156+{
28157+ long i;
28158+
28159+ printk(KERN_ERR "PAX: bytes at PC: ");
28160+ for (i = 0; i < 20; i++) {
28161+ unsigned char c;
28162+ if (get_user(c, (unsigned char __force_user *)pc+i))
28163+ printk(KERN_CONT "?? ");
28164+ else
28165+ printk(KERN_CONT "%02x ", c);
28166+ }
28167+ printk("\n");
28168+
28169+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
28170+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
28171+ unsigned long c;
28172+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
28173+#ifdef CONFIG_X86_32
28174+ printk(KERN_CONT "???????? ");
28175+#else
28176+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
28177+ printk(KERN_CONT "???????? ???????? ");
28178+ else
28179+ printk(KERN_CONT "???????????????? ");
28180+#endif
28181+ } else {
28182+#ifdef CONFIG_X86_64
28183+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
28184+ printk(KERN_CONT "%08x ", (unsigned int)c);
28185+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
28186+ } else
28187+#endif
28188+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
28189+ }
28190+ }
28191+ printk("\n");
28192+}
28193+#endif
28194+
28195+/**
28196+ * probe_kernel_write(): safely attempt to write to a location
28197+ * @dst: address to write to
28198+ * @src: pointer to the data that shall be written
28199+ * @size: size of the data chunk
28200+ *
28201+ * Safely write to address @dst from the buffer at @src. If a kernel fault
28202+ * happens, handle that and return -EFAULT.
28203+ */
28204+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
28205+{
28206+ long ret;
28207+ mm_segment_t old_fs = get_fs();
28208+
28209+ set_fs(KERNEL_DS);
28210+ pagefault_disable();
28211+ pax_open_kernel();
28212+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
28213+ pax_close_kernel();
28214+ pagefault_enable();
28215+ set_fs(old_fs);
28216+
28217+ return ret ? -EFAULT : 0;
28218+}
28219diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
28220index dd74e46..7d26398 100644
28221--- a/arch/x86/mm/gup.c
28222+++ b/arch/x86/mm/gup.c
28223@@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
28224 addr = start;
28225 len = (unsigned long) nr_pages << PAGE_SHIFT;
28226 end = start + len;
28227- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
28228+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
28229 (void __user *)start, len)))
28230 return 0;
28231
28232diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
28233index 6f31ee5..8ee4164 100644
28234--- a/arch/x86/mm/highmem_32.c
28235+++ b/arch/x86/mm/highmem_32.c
28236@@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
28237 idx = type + KM_TYPE_NR*smp_processor_id();
28238 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
28239 BUG_ON(!pte_none(*(kmap_pte-idx)));
28240+
28241+ pax_open_kernel();
28242 set_pte(kmap_pte-idx, mk_pte(page, prot));
28243+ pax_close_kernel();
28244+
28245 arch_flush_lazy_mmu_mode();
28246
28247 return (void *)vaddr;
28248diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
28249index ae1aa71..56316db 100644
28250--- a/arch/x86/mm/hugetlbpage.c
28251+++ b/arch/x86/mm/hugetlbpage.c
28252@@ -279,6 +279,12 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
28253 info.flags = 0;
28254 info.length = len;
28255 info.low_limit = TASK_UNMAPPED_BASE;
28256+
28257+#ifdef CONFIG_PAX_RANDMMAP
28258+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
28259+ info.low_limit += current->mm->delta_mmap;
28260+#endif
28261+
28262 info.high_limit = TASK_SIZE;
28263 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
28264 info.align_offset = 0;
28265@@ -311,6 +317,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
28266 VM_BUG_ON(addr != -ENOMEM);
28267 info.flags = 0;
28268 info.low_limit = TASK_UNMAPPED_BASE;
28269+
28270+#ifdef CONFIG_PAX_RANDMMAP
28271+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
28272+ info.low_limit += current->mm->delta_mmap;
28273+#endif
28274+
28275 info.high_limit = TASK_SIZE;
28276 addr = vm_unmapped_area(&info);
28277 }
28278@@ -325,10 +337,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
28279 struct hstate *h = hstate_file(file);
28280 struct mm_struct *mm = current->mm;
28281 struct vm_area_struct *vma;
28282+ unsigned long pax_task_size = TASK_SIZE;
28283+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
28284
28285 if (len & ~huge_page_mask(h))
28286 return -EINVAL;
28287- if (len > TASK_SIZE)
28288+
28289+#ifdef CONFIG_PAX_SEGMEXEC
28290+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
28291+ pax_task_size = SEGMEXEC_TASK_SIZE;
28292+#endif
28293+
28294+ pax_task_size -= PAGE_SIZE;
28295+
28296+ if (len > pax_task_size)
28297 return -ENOMEM;
28298
28299 if (flags & MAP_FIXED) {
28300@@ -337,11 +359,14 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
28301 return addr;
28302 }
28303
28304+#ifdef CONFIG_PAX_RANDMMAP
28305+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
28306+#endif
28307+
28308 if (addr) {
28309 addr = ALIGN(addr, huge_page_size(h));
28310 vma = find_vma(mm, addr);
28311- if (TASK_SIZE - len >= addr &&
28312- (!vma || addr + len <= vma->vm_start))
28313+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
28314 return addr;
28315 }
28316 if (mm->get_unmapped_area == arch_get_unmapped_area)
28317diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
28318index d7aea41..0fc945b 100644
28319--- a/arch/x86/mm/init.c
28320+++ b/arch/x86/mm/init.c
28321@@ -4,6 +4,7 @@
28322 #include <linux/swap.h>
28323 #include <linux/memblock.h>
28324 #include <linux/bootmem.h> /* for max_low_pfn */
28325+#include <linux/tboot.h>
28326
28327 #include <asm/cacheflush.h>
28328 #include <asm/e820.h>
28329@@ -16,6 +17,8 @@
28330 #include <asm/tlb.h>
28331 #include <asm/proto.h>
28332 #include <asm/dma.h> /* for MAX_DMA_PFN */
28333+#include <asm/desc.h>
28334+#include <asm/bios_ebda.h>
28335
28336 unsigned long __initdata pgt_buf_start;
28337 unsigned long __meminitdata pgt_buf_end;
28338@@ -44,7 +47,7 @@ static void __init find_early_table_space(struct map_range *mr, int nr_range)
28339 {
28340 int i;
28341 unsigned long puds = 0, pmds = 0, ptes = 0, tables;
28342- unsigned long start = 0, good_end;
28343+ unsigned long start = 0x100000, good_end;
28344 phys_addr_t base;
28345
28346 for (i = 0; i < nr_range; i++) {
28347@@ -321,10 +324,40 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
28348 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
28349 * mmio resources as well as potential bios/acpi data regions.
28350 */
28351+
28352+#ifdef CONFIG_GRKERNSEC_KMEM
28353+static unsigned int ebda_start __read_only;
28354+static unsigned int ebda_end __read_only;
28355+#endif
28356+
28357 int devmem_is_allowed(unsigned long pagenr)
28358 {
28359- if (pagenr < 256)
28360+#ifdef CONFIG_GRKERNSEC_KMEM
28361+ /* allow BDA */
28362+ if (!pagenr)
28363 return 1;
28364+ /* allow EBDA */
28365+ if (pagenr >= ebda_start && pagenr < ebda_end)
28366+ return 1;
28367+ /* if tboot is in use, allow access to its hardcoded serial log range */
28368+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
28369+ return 1;
28370+#else
28371+ if (!pagenr)
28372+ return 1;
28373+#ifdef CONFIG_VM86
28374+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
28375+ return 1;
28376+#endif
28377+#endif
28378+
28379+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
28380+ return 1;
28381+#ifdef CONFIG_GRKERNSEC_KMEM
28382+ /* throw out everything else below 1MB */
28383+ if (pagenr <= 256)
28384+ return 0;
28385+#endif
28386 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
28387 return 0;
28388 if (!page_is_ram(pagenr))
28389@@ -381,8 +414,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
28390 #endif
28391 }
28392
28393+#ifdef CONFIG_GRKERNSEC_KMEM
28394+static inline void gr_init_ebda(void)
28395+{
28396+ unsigned int ebda_addr;
28397+ unsigned int ebda_size = 0;
28398+
28399+ ebda_addr = get_bios_ebda();
28400+ if (ebda_addr) {
28401+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
28402+ ebda_size <<= 10;
28403+ }
28404+ if (ebda_addr && ebda_size) {
28405+ ebda_start = ebda_addr >> PAGE_SHIFT;
28406+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
28407+ } else {
28408+ ebda_start = 0x9f000 >> PAGE_SHIFT;
28409+ ebda_end = 0xa0000 >> PAGE_SHIFT;
28410+ }
28411+}
28412+#else
28413+static inline void gr_init_ebda(void) { }
28414+#endif
28415+
28416 void free_initmem(void)
28417 {
28418+#ifdef CONFIG_PAX_KERNEXEC
28419+#ifdef CONFIG_X86_32
28420+ /* PaX: limit KERNEL_CS to actual size */
28421+ unsigned long addr, limit;
28422+ struct desc_struct d;
28423+ int cpu;
28424+#else
28425+ pgd_t *pgd;
28426+ pud_t *pud;
28427+ pmd_t *pmd;
28428+ unsigned long addr, end;
28429+#endif
28430+#endif
28431+
28432+ gr_init_ebda();
28433+
28434+#ifdef CONFIG_PAX_KERNEXEC
28435+#ifdef CONFIG_X86_32
28436+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
28437+ limit = (limit - 1UL) >> PAGE_SHIFT;
28438+
28439+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
28440+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
28441+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
28442+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
28443+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
28444+ }
28445+
28446+ /* PaX: make KERNEL_CS read-only */
28447+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
28448+ if (!paravirt_enabled())
28449+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
28450+/*
28451+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
28452+ pgd = pgd_offset_k(addr);
28453+ pud = pud_offset(pgd, addr);
28454+ pmd = pmd_offset(pud, addr);
28455+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
28456+ }
28457+*/
28458+#ifdef CONFIG_X86_PAE
28459+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
28460+/*
28461+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
28462+ pgd = pgd_offset_k(addr);
28463+ pud = pud_offset(pgd, addr);
28464+ pmd = pmd_offset(pud, addr);
28465+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
28466+ }
28467+*/
28468+#endif
28469+
28470+#ifdef CONFIG_MODULES
28471+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
28472+#endif
28473+
28474+#else
28475+ /* PaX: make kernel code/rodata read-only, rest non-executable */
28476+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
28477+ pgd = pgd_offset_k(addr);
28478+ pud = pud_offset(pgd, addr);
28479+ pmd = pmd_offset(pud, addr);
28480+ if (!pmd_present(*pmd))
28481+ continue;
28482+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
28483+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
28484+ else
28485+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
28486+ }
28487+
28488+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
28489+ end = addr + KERNEL_IMAGE_SIZE;
28490+ for (; addr < end; addr += PMD_SIZE) {
28491+ pgd = pgd_offset_k(addr);
28492+ pud = pud_offset(pgd, addr);
28493+ pmd = pmd_offset(pud, addr);
28494+ if (!pmd_present(*pmd))
28495+ continue;
28496+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
28497+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
28498+ }
28499+#endif
28500+
28501+ flush_tlb_all();
28502+#endif
28503+
28504 free_init_pages("unused kernel memory",
28505 (unsigned long)(&__init_begin),
28506 (unsigned long)(&__init_end));
28507diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
28508index 745d66b..56bf568 100644
28509--- a/arch/x86/mm/init_32.c
28510+++ b/arch/x86/mm/init_32.c
28511@@ -73,36 +73,6 @@ static __init void *alloc_low_page(void)
28512 }
28513
28514 /*
28515- * Creates a middle page table and puts a pointer to it in the
28516- * given global directory entry. This only returns the gd entry
28517- * in non-PAE compilation mode, since the middle layer is folded.
28518- */
28519-static pmd_t * __init one_md_table_init(pgd_t *pgd)
28520-{
28521- pud_t *pud;
28522- pmd_t *pmd_table;
28523-
28524-#ifdef CONFIG_X86_PAE
28525- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
28526- if (after_bootmem)
28527- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
28528- else
28529- pmd_table = (pmd_t *)alloc_low_page();
28530- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
28531- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
28532- pud = pud_offset(pgd, 0);
28533- BUG_ON(pmd_table != pmd_offset(pud, 0));
28534-
28535- return pmd_table;
28536- }
28537-#endif
28538- pud = pud_offset(pgd, 0);
28539- pmd_table = pmd_offset(pud, 0);
28540-
28541- return pmd_table;
28542-}
28543-
28544-/*
28545 * Create a page table and place a pointer to it in a middle page
28546 * directory entry:
28547 */
28548@@ -122,13 +92,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
28549 page_table = (pte_t *)alloc_low_page();
28550
28551 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
28552+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
28553+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
28554+#else
28555 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
28556+#endif
28557 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
28558 }
28559
28560 return pte_offset_kernel(pmd, 0);
28561 }
28562
28563+static pmd_t * __init one_md_table_init(pgd_t *pgd)
28564+{
28565+ pud_t *pud;
28566+ pmd_t *pmd_table;
28567+
28568+ pud = pud_offset(pgd, 0);
28569+ pmd_table = pmd_offset(pud, 0);
28570+
28571+ return pmd_table;
28572+}
28573+
28574 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
28575 {
28576 int pgd_idx = pgd_index(vaddr);
28577@@ -202,6 +187,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
28578 int pgd_idx, pmd_idx;
28579 unsigned long vaddr;
28580 pgd_t *pgd;
28581+ pud_t *pud;
28582 pmd_t *pmd;
28583 pte_t *pte = NULL;
28584
28585@@ -211,8 +197,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
28586 pgd = pgd_base + pgd_idx;
28587
28588 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
28589- pmd = one_md_table_init(pgd);
28590- pmd = pmd + pmd_index(vaddr);
28591+ pud = pud_offset(pgd, vaddr);
28592+ pmd = pmd_offset(pud, vaddr);
28593+
28594+#ifdef CONFIG_X86_PAE
28595+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
28596+#endif
28597+
28598 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
28599 pmd++, pmd_idx++) {
28600 pte = page_table_kmap_check(one_page_table_init(pmd),
28601@@ -224,11 +215,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
28602 }
28603 }
28604
28605-static inline int is_kernel_text(unsigned long addr)
28606+static inline int is_kernel_text(unsigned long start, unsigned long end)
28607 {
28608- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
28609- return 1;
28610- return 0;
28611+ if ((start > ktla_ktva((unsigned long)_etext) ||
28612+ end <= ktla_ktva((unsigned long)_stext)) &&
28613+ (start > ktla_ktva((unsigned long)_einittext) ||
28614+ end <= ktla_ktva((unsigned long)_sinittext)) &&
28615+
28616+#ifdef CONFIG_ACPI_SLEEP
28617+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
28618+#endif
28619+
28620+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
28621+ return 0;
28622+ return 1;
28623 }
28624
28625 /*
28626@@ -245,9 +245,10 @@ kernel_physical_mapping_init(unsigned long start,
28627 unsigned long last_map_addr = end;
28628 unsigned long start_pfn, end_pfn;
28629 pgd_t *pgd_base = swapper_pg_dir;
28630- int pgd_idx, pmd_idx, pte_ofs;
28631+ unsigned int pgd_idx, pmd_idx, pte_ofs;
28632 unsigned long pfn;
28633 pgd_t *pgd;
28634+ pud_t *pud;
28635 pmd_t *pmd;
28636 pte_t *pte;
28637 unsigned pages_2m, pages_4k;
28638@@ -280,8 +281,13 @@ repeat:
28639 pfn = start_pfn;
28640 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
28641 pgd = pgd_base + pgd_idx;
28642- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
28643- pmd = one_md_table_init(pgd);
28644+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
28645+ pud = pud_offset(pgd, 0);
28646+ pmd = pmd_offset(pud, 0);
28647+
28648+#ifdef CONFIG_X86_PAE
28649+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
28650+#endif
28651
28652 if (pfn >= end_pfn)
28653 continue;
28654@@ -293,14 +299,13 @@ repeat:
28655 #endif
28656 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
28657 pmd++, pmd_idx++) {
28658- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
28659+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
28660
28661 /*
28662 * Map with big pages if possible, otherwise
28663 * create normal page tables:
28664 */
28665 if (use_pse) {
28666- unsigned int addr2;
28667 pgprot_t prot = PAGE_KERNEL_LARGE;
28668 /*
28669 * first pass will use the same initial
28670@@ -310,11 +315,7 @@ repeat:
28671 __pgprot(PTE_IDENT_ATTR |
28672 _PAGE_PSE);
28673
28674- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
28675- PAGE_OFFSET + PAGE_SIZE-1;
28676-
28677- if (is_kernel_text(addr) ||
28678- is_kernel_text(addr2))
28679+ if (is_kernel_text(address, address + PMD_SIZE))
28680 prot = PAGE_KERNEL_LARGE_EXEC;
28681
28682 pages_2m++;
28683@@ -331,7 +332,7 @@ repeat:
28684 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
28685 pte += pte_ofs;
28686 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
28687- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
28688+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
28689 pgprot_t prot = PAGE_KERNEL;
28690 /*
28691 * first pass will use the same initial
28692@@ -339,7 +340,7 @@ repeat:
28693 */
28694 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
28695
28696- if (is_kernel_text(addr))
28697+ if (is_kernel_text(address, address + PAGE_SIZE))
28698 prot = PAGE_KERNEL_EXEC;
28699
28700 pages_4k++;
28701@@ -465,7 +466,7 @@ void __init native_pagetable_init(void)
28702
28703 pud = pud_offset(pgd, va);
28704 pmd = pmd_offset(pud, va);
28705- if (!pmd_present(*pmd))
28706+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
28707 break;
28708
28709 pte = pte_offset_kernel(pmd, va);
28710@@ -514,12 +515,10 @@ void __init early_ioremap_page_table_range_init(void)
28711
28712 static void __init pagetable_init(void)
28713 {
28714- pgd_t *pgd_base = swapper_pg_dir;
28715-
28716- permanent_kmaps_init(pgd_base);
28717+ permanent_kmaps_init(swapper_pg_dir);
28718 }
28719
28720-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
28721+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
28722 EXPORT_SYMBOL_GPL(__supported_pte_mask);
28723
28724 /* user-defined highmem size */
28725@@ -728,6 +727,12 @@ void __init mem_init(void)
28726
28727 pci_iommu_alloc();
28728
28729+#ifdef CONFIG_PAX_PER_CPU_PGD
28730+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
28731+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
28732+ KERNEL_PGD_PTRS);
28733+#endif
28734+
28735 #ifdef CONFIG_FLATMEM
28736 BUG_ON(!mem_map);
28737 #endif
28738@@ -754,7 +759,7 @@ void __init mem_init(void)
28739 reservedpages++;
28740
28741 codesize = (unsigned long) &_etext - (unsigned long) &_text;
28742- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
28743+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
28744 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
28745
28746 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
28747@@ -795,10 +800,10 @@ void __init mem_init(void)
28748 ((unsigned long)&__init_end -
28749 (unsigned long)&__init_begin) >> 10,
28750
28751- (unsigned long)&_etext, (unsigned long)&_edata,
28752- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
28753+ (unsigned long)&_sdata, (unsigned long)&_edata,
28754+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
28755
28756- (unsigned long)&_text, (unsigned long)&_etext,
28757+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
28758 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
28759
28760 /*
28761@@ -876,6 +881,7 @@ void set_kernel_text_rw(void)
28762 if (!kernel_set_to_readonly)
28763 return;
28764
28765+ start = ktla_ktva(start);
28766 pr_debug("Set kernel text: %lx - %lx for read write\n",
28767 start, start+size);
28768
28769@@ -890,6 +896,7 @@ void set_kernel_text_ro(void)
28770 if (!kernel_set_to_readonly)
28771 return;
28772
28773+ start = ktla_ktva(start);
28774 pr_debug("Set kernel text: %lx - %lx for read only\n",
28775 start, start+size);
28776
28777@@ -918,6 +925,7 @@ void mark_rodata_ro(void)
28778 unsigned long start = PFN_ALIGN(_text);
28779 unsigned long size = PFN_ALIGN(_etext) - start;
28780
28781+ start = ktla_ktva(start);
28782 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
28783 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
28784 size >> 10);
28785diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
28786index 75c9a6a..498d677 100644
28787--- a/arch/x86/mm/init_64.c
28788+++ b/arch/x86/mm/init_64.c
28789@@ -74,7 +74,7 @@ early_param("gbpages", parse_direct_gbpages_on);
28790 * around without checking the pgd every time.
28791 */
28792
28793-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
28794+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
28795 EXPORT_SYMBOL_GPL(__supported_pte_mask);
28796
28797 int force_personality32;
28798@@ -107,12 +107,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
28799
28800 for (address = start; address <= end; address += PGDIR_SIZE) {
28801 const pgd_t *pgd_ref = pgd_offset_k(address);
28802+
28803+#ifdef CONFIG_PAX_PER_CPU_PGD
28804+ unsigned long cpu;
28805+#else
28806 struct page *page;
28807+#endif
28808
28809 if (pgd_none(*pgd_ref))
28810 continue;
28811
28812 spin_lock(&pgd_lock);
28813+
28814+#ifdef CONFIG_PAX_PER_CPU_PGD
28815+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
28816+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
28817+#else
28818 list_for_each_entry(page, &pgd_list, lru) {
28819 pgd_t *pgd;
28820 spinlock_t *pgt_lock;
28821@@ -121,6 +131,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
28822 /* the pgt_lock only for Xen */
28823 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
28824 spin_lock(pgt_lock);
28825+#endif
28826
28827 if (pgd_none(*pgd))
28828 set_pgd(pgd, *pgd_ref);
28829@@ -128,7 +139,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
28830 BUG_ON(pgd_page_vaddr(*pgd)
28831 != pgd_page_vaddr(*pgd_ref));
28832
28833+#ifndef CONFIG_PAX_PER_CPU_PGD
28834 spin_unlock(pgt_lock);
28835+#endif
28836+
28837 }
28838 spin_unlock(&pgd_lock);
28839 }
28840@@ -161,7 +175,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
28841 {
28842 if (pgd_none(*pgd)) {
28843 pud_t *pud = (pud_t *)spp_getpage();
28844- pgd_populate(&init_mm, pgd, pud);
28845+ pgd_populate_kernel(&init_mm, pgd, pud);
28846 if (pud != pud_offset(pgd, 0))
28847 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
28848 pud, pud_offset(pgd, 0));
28849@@ -173,7 +187,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
28850 {
28851 if (pud_none(*pud)) {
28852 pmd_t *pmd = (pmd_t *) spp_getpage();
28853- pud_populate(&init_mm, pud, pmd);
28854+ pud_populate_kernel(&init_mm, pud, pmd);
28855 if (pmd != pmd_offset(pud, 0))
28856 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
28857 pmd, pmd_offset(pud, 0));
28858@@ -202,7 +216,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
28859 pmd = fill_pmd(pud, vaddr);
28860 pte = fill_pte(pmd, vaddr);
28861
28862+ pax_open_kernel();
28863 set_pte(pte, new_pte);
28864+ pax_close_kernel();
28865
28866 /*
28867 * It's enough to flush this one mapping.
28868@@ -261,14 +277,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
28869 pgd = pgd_offset_k((unsigned long)__va(phys));
28870 if (pgd_none(*pgd)) {
28871 pud = (pud_t *) spp_getpage();
28872- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
28873- _PAGE_USER));
28874+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
28875 }
28876 pud = pud_offset(pgd, (unsigned long)__va(phys));
28877 if (pud_none(*pud)) {
28878 pmd = (pmd_t *) spp_getpage();
28879- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
28880- _PAGE_USER));
28881+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
28882 }
28883 pmd = pmd_offset(pud, phys);
28884 BUG_ON(!pmd_none(*pmd));
28885@@ -329,7 +343,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
28886 if (pfn >= pgt_buf_top)
28887 panic("alloc_low_page: ran out of memory");
28888
28889- adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
28890+ adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
28891 clear_page(adr);
28892 *phys = pfn * PAGE_SIZE;
28893 return adr;
28894@@ -345,7 +359,7 @@ static __ref void *map_low_page(void *virt)
28895
28896 phys = __pa(virt);
28897 left = phys & (PAGE_SIZE - 1);
28898- adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
28899+ adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
28900 adr = (void *)(((unsigned long)adr) | left);
28901
28902 return adr;
28903@@ -553,7 +567,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
28904 unmap_low_page(pmd);
28905
28906 spin_lock(&init_mm.page_table_lock);
28907- pud_populate(&init_mm, pud, __va(pmd_phys));
28908+ pud_populate_kernel(&init_mm, pud, __va(pmd_phys));
28909 spin_unlock(&init_mm.page_table_lock);
28910 }
28911 __flush_tlb_all();
28912@@ -599,7 +613,7 @@ kernel_physical_mapping_init(unsigned long start,
28913 unmap_low_page(pud);
28914
28915 spin_lock(&init_mm.page_table_lock);
28916- pgd_populate(&init_mm, pgd, __va(pud_phys));
28917+ pgd_populate_kernel(&init_mm, pgd, __va(pud_phys));
28918 spin_unlock(&init_mm.page_table_lock);
28919 pgd_changed = true;
28920 }
28921@@ -693,6 +707,12 @@ void __init mem_init(void)
28922
28923 pci_iommu_alloc();
28924
28925+#ifdef CONFIG_PAX_PER_CPU_PGD
28926+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
28927+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
28928+ KERNEL_PGD_PTRS);
28929+#endif
28930+
28931 /* clear_bss() already clear the empty_zero_page */
28932
28933 reservedpages = 0;
28934@@ -856,8 +876,8 @@ int kern_addr_valid(unsigned long addr)
28935 static struct vm_area_struct gate_vma = {
28936 .vm_start = VSYSCALL_START,
28937 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
28938- .vm_page_prot = PAGE_READONLY_EXEC,
28939- .vm_flags = VM_READ | VM_EXEC
28940+ .vm_page_prot = PAGE_READONLY,
28941+ .vm_flags = VM_READ
28942 };
28943
28944 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
28945@@ -891,7 +911,7 @@ int in_gate_area_no_mm(unsigned long addr)
28946
28947 const char *arch_vma_name(struct vm_area_struct *vma)
28948 {
28949- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
28950+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
28951 return "[vdso]";
28952 if (vma == &gate_vma)
28953 return "[vsyscall]";
28954diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
28955index 7b179b4..6bd17777 100644
28956--- a/arch/x86/mm/iomap_32.c
28957+++ b/arch/x86/mm/iomap_32.c
28958@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
28959 type = kmap_atomic_idx_push();
28960 idx = type + KM_TYPE_NR * smp_processor_id();
28961 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
28962+
28963+ pax_open_kernel();
28964 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
28965+ pax_close_kernel();
28966+
28967 arch_flush_lazy_mmu_mode();
28968
28969 return (void *)vaddr;
28970diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
28971index 78fe3f1..73b95e2 100644
28972--- a/arch/x86/mm/ioremap.c
28973+++ b/arch/x86/mm/ioremap.c
28974@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
28975 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
28976 int is_ram = page_is_ram(pfn);
28977
28978- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
28979+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
28980 return NULL;
28981 WARN_ON_ONCE(is_ram);
28982 }
28983@@ -256,7 +256,7 @@ EXPORT_SYMBOL(ioremap_prot);
28984 *
28985 * Caller must ensure there is only one unmapping for the same pointer.
28986 */
28987-void iounmap(volatile void __iomem *addr)
28988+void iounmap(const volatile void __iomem *addr)
28989 {
28990 struct vm_struct *p, *o;
28991
28992@@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
28993
28994 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
28995 if (page_is_ram(start >> PAGE_SHIFT))
28996+#ifdef CONFIG_HIGHMEM
28997+ if ((start >> PAGE_SHIFT) < max_low_pfn)
28998+#endif
28999 return __va(phys);
29000
29001 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
29002@@ -327,6 +330,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
29003 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
29004 {
29005 if (page_is_ram(phys >> PAGE_SHIFT))
29006+#ifdef CONFIG_HIGHMEM
29007+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
29008+#endif
29009 return;
29010
29011 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
29012@@ -344,7 +350,7 @@ static int __init early_ioremap_debug_setup(char *str)
29013 early_param("early_ioremap_debug", early_ioremap_debug_setup);
29014
29015 static __initdata int after_paging_init;
29016-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
29017+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
29018
29019 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
29020 {
29021@@ -381,8 +387,7 @@ void __init early_ioremap_init(void)
29022 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
29023
29024 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
29025- memset(bm_pte, 0, sizeof(bm_pte));
29026- pmd_populate_kernel(&init_mm, pmd, bm_pte);
29027+ pmd_populate_user(&init_mm, pmd, bm_pte);
29028
29029 /*
29030 * The boot-ioremap range spans multiple pmds, for which
29031diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
29032index d87dd6d..bf3fa66 100644
29033--- a/arch/x86/mm/kmemcheck/kmemcheck.c
29034+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
29035@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
29036 * memory (e.g. tracked pages)? For now, we need this to avoid
29037 * invoking kmemcheck for PnP BIOS calls.
29038 */
29039- if (regs->flags & X86_VM_MASK)
29040+ if (v8086_mode(regs))
29041 return false;
29042- if (regs->cs != __KERNEL_CS)
29043+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
29044 return false;
29045
29046 pte = kmemcheck_pte_lookup(address);
29047diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
29048index 845df68..1d8d29f 100644
29049--- a/arch/x86/mm/mmap.c
29050+++ b/arch/x86/mm/mmap.c
29051@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
29052 * Leave an at least ~128 MB hole with possible stack randomization.
29053 */
29054 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
29055-#define MAX_GAP (TASK_SIZE/6*5)
29056+#define MAX_GAP (pax_task_size/6*5)
29057
29058 static int mmap_is_legacy(void)
29059 {
29060@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
29061 return rnd << PAGE_SHIFT;
29062 }
29063
29064-static unsigned long mmap_base(void)
29065+static unsigned long mmap_base(struct mm_struct *mm)
29066 {
29067 unsigned long gap = rlimit(RLIMIT_STACK);
29068+ unsigned long pax_task_size = TASK_SIZE;
29069+
29070+#ifdef CONFIG_PAX_SEGMEXEC
29071+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
29072+ pax_task_size = SEGMEXEC_TASK_SIZE;
29073+#endif
29074
29075 if (gap < MIN_GAP)
29076 gap = MIN_GAP;
29077 else if (gap > MAX_GAP)
29078 gap = MAX_GAP;
29079
29080- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
29081+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
29082 }
29083
29084 /*
29085 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
29086 * does, but not when emulating X86_32
29087 */
29088-static unsigned long mmap_legacy_base(void)
29089+static unsigned long mmap_legacy_base(struct mm_struct *mm)
29090 {
29091- if (mmap_is_ia32())
29092+ if (mmap_is_ia32()) {
29093+
29094+#ifdef CONFIG_PAX_SEGMEXEC
29095+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
29096+ return SEGMEXEC_TASK_UNMAPPED_BASE;
29097+ else
29098+#endif
29099+
29100 return TASK_UNMAPPED_BASE;
29101- else
29102+ } else
29103 return TASK_UNMAPPED_BASE + mmap_rnd();
29104 }
29105
29106@@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
29107 void arch_pick_mmap_layout(struct mm_struct *mm)
29108 {
29109 if (mmap_is_legacy()) {
29110- mm->mmap_base = mmap_legacy_base();
29111+ mm->mmap_base = mmap_legacy_base(mm);
29112+
29113+#ifdef CONFIG_PAX_RANDMMAP
29114+ if (mm->pax_flags & MF_PAX_RANDMMAP)
29115+ mm->mmap_base += mm->delta_mmap;
29116+#endif
29117+
29118 mm->get_unmapped_area = arch_get_unmapped_area;
29119 mm->unmap_area = arch_unmap_area;
29120 } else {
29121- mm->mmap_base = mmap_base();
29122+ mm->mmap_base = mmap_base(mm);
29123+
29124+#ifdef CONFIG_PAX_RANDMMAP
29125+ if (mm->pax_flags & MF_PAX_RANDMMAP)
29126+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
29127+#endif
29128+
29129 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
29130 mm->unmap_area = arch_unmap_area_topdown;
29131 }
29132diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
29133index dc0b727..f612039 100644
29134--- a/arch/x86/mm/mmio-mod.c
29135+++ b/arch/x86/mm/mmio-mod.c
29136@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
29137 break;
29138 default:
29139 {
29140- unsigned char *ip = (unsigned char *)instptr;
29141+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
29142 my_trace->opcode = MMIO_UNKNOWN_OP;
29143 my_trace->width = 0;
29144 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
29145@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
29146 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
29147 void __iomem *addr)
29148 {
29149- static atomic_t next_id;
29150+ static atomic_unchecked_t next_id;
29151 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
29152 /* These are page-unaligned. */
29153 struct mmiotrace_map map = {
29154@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
29155 .private = trace
29156 },
29157 .phys = offset,
29158- .id = atomic_inc_return(&next_id)
29159+ .id = atomic_inc_return_unchecked(&next_id)
29160 };
29161 map.map_id = trace->id;
29162
29163@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
29164 ioremap_trace_core(offset, size, addr);
29165 }
29166
29167-static void iounmap_trace_core(volatile void __iomem *addr)
29168+static void iounmap_trace_core(const volatile void __iomem *addr)
29169 {
29170 struct mmiotrace_map map = {
29171 .phys = 0,
29172@@ -328,7 +328,7 @@ not_enabled:
29173 }
29174 }
29175
29176-void mmiotrace_iounmap(volatile void __iomem *addr)
29177+void mmiotrace_iounmap(const volatile void __iomem *addr)
29178 {
29179 might_sleep();
29180 if (is_enabled()) /* recheck and proper locking in *_core() */
29181diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
29182index 8504f36..5fc68f2 100644
29183--- a/arch/x86/mm/numa.c
29184+++ b/arch/x86/mm/numa.c
29185@@ -478,7 +478,7 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
29186 return true;
29187 }
29188
29189-static int __init numa_register_memblks(struct numa_meminfo *mi)
29190+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
29191 {
29192 unsigned long uninitialized_var(pfn_align);
29193 int i, nid;
29194diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
29195index b008656..773eac2 100644
29196--- a/arch/x86/mm/pageattr-test.c
29197+++ b/arch/x86/mm/pageattr-test.c
29198@@ -36,7 +36,7 @@ enum {
29199
29200 static int pte_testbit(pte_t pte)
29201 {
29202- return pte_flags(pte) & _PAGE_UNUSED1;
29203+ return pte_flags(pte) & _PAGE_CPA_TEST;
29204 }
29205
29206 struct split_state {
29207diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
29208index a718e0d..77419bc 100644
29209--- a/arch/x86/mm/pageattr.c
29210+++ b/arch/x86/mm/pageattr.c
29211@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
29212 */
29213 #ifdef CONFIG_PCI_BIOS
29214 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
29215- pgprot_val(forbidden) |= _PAGE_NX;
29216+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
29217 #endif
29218
29219 /*
29220@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
29221 * Does not cover __inittext since that is gone later on. On
29222 * 64bit we do not enforce !NX on the low mapping
29223 */
29224- if (within(address, (unsigned long)_text, (unsigned long)_etext))
29225- pgprot_val(forbidden) |= _PAGE_NX;
29226+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
29227+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
29228
29229+#ifdef CONFIG_DEBUG_RODATA
29230 /*
29231 * The .rodata section needs to be read-only. Using the pfn
29232 * catches all aliases.
29233@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
29234 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
29235 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
29236 pgprot_val(forbidden) |= _PAGE_RW;
29237+#endif
29238
29239 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
29240 /*
29241@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
29242 }
29243 #endif
29244
29245+#ifdef CONFIG_PAX_KERNEXEC
29246+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
29247+ pgprot_val(forbidden) |= _PAGE_RW;
29248+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
29249+ }
29250+#endif
29251+
29252 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
29253
29254 return prot;
29255@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
29256 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
29257 {
29258 /* change init_mm */
29259+ pax_open_kernel();
29260 set_pte_atomic(kpte, pte);
29261+
29262 #ifdef CONFIG_X86_32
29263 if (!SHARED_KERNEL_PMD) {
29264+
29265+#ifdef CONFIG_PAX_PER_CPU_PGD
29266+ unsigned long cpu;
29267+#else
29268 struct page *page;
29269+#endif
29270
29271+#ifdef CONFIG_PAX_PER_CPU_PGD
29272+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
29273+ pgd_t *pgd = get_cpu_pgd(cpu);
29274+#else
29275 list_for_each_entry(page, &pgd_list, lru) {
29276- pgd_t *pgd;
29277+ pgd_t *pgd = (pgd_t *)page_address(page);
29278+#endif
29279+
29280 pud_t *pud;
29281 pmd_t *pmd;
29282
29283- pgd = (pgd_t *)page_address(page) + pgd_index(address);
29284+ pgd += pgd_index(address);
29285 pud = pud_offset(pgd, address);
29286 pmd = pmd_offset(pud, address);
29287 set_pte_atomic((pte_t *)pmd, pte);
29288 }
29289 }
29290 #endif
29291+ pax_close_kernel();
29292 }
29293
29294 static int
29295diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
29296index 0eb572e..92f5c1e 100644
29297--- a/arch/x86/mm/pat.c
29298+++ b/arch/x86/mm/pat.c
29299@@ -376,7 +376,7 @@ int free_memtype(u64 start, u64 end)
29300
29301 if (!entry) {
29302 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
29303- current->comm, current->pid, start, end - 1);
29304+ current->comm, task_pid_nr(current), start, end - 1);
29305 return -EINVAL;
29306 }
29307
29308@@ -506,8 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29309
29310 while (cursor < to) {
29311 if (!devmem_is_allowed(pfn)) {
29312- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
29313- current->comm, from, to - 1);
29314+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
29315+ current->comm, from, to - 1, cursor);
29316 return 0;
29317 }
29318 cursor += PAGE_SIZE;
29319@@ -570,7 +570,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
29320 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
29321 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
29322 "for [mem %#010Lx-%#010Lx]\n",
29323- current->comm, current->pid,
29324+ current->comm, task_pid_nr(current),
29325 cattr_name(flags),
29326 base, (unsigned long long)(base + size-1));
29327 return -EINVAL;
29328@@ -605,7 +605,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
29329 flags = lookup_memtype(paddr);
29330 if (want_flags != flags) {
29331 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
29332- current->comm, current->pid,
29333+ current->comm, task_pid_nr(current),
29334 cattr_name(want_flags),
29335 (unsigned long long)paddr,
29336 (unsigned long long)(paddr + size - 1),
29337@@ -627,7 +627,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
29338 free_memtype(paddr, paddr + size);
29339 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
29340 " for [mem %#010Lx-%#010Lx], got %s\n",
29341- current->comm, current->pid,
29342+ current->comm, task_pid_nr(current),
29343 cattr_name(want_flags),
29344 (unsigned long long)paddr,
29345 (unsigned long long)(paddr + size - 1),
29346diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
29347index 9f0614d..92ae64a 100644
29348--- a/arch/x86/mm/pf_in.c
29349+++ b/arch/x86/mm/pf_in.c
29350@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
29351 int i;
29352 enum reason_type rv = OTHERS;
29353
29354- p = (unsigned char *)ins_addr;
29355+ p = (unsigned char *)ktla_ktva(ins_addr);
29356 p += skip_prefix(p, &prf);
29357 p += get_opcode(p, &opcode);
29358
29359@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
29360 struct prefix_bits prf;
29361 int i;
29362
29363- p = (unsigned char *)ins_addr;
29364+ p = (unsigned char *)ktla_ktva(ins_addr);
29365 p += skip_prefix(p, &prf);
29366 p += get_opcode(p, &opcode);
29367
29368@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
29369 struct prefix_bits prf;
29370 int i;
29371
29372- p = (unsigned char *)ins_addr;
29373+ p = (unsigned char *)ktla_ktva(ins_addr);
29374 p += skip_prefix(p, &prf);
29375 p += get_opcode(p, &opcode);
29376
29377@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
29378 struct prefix_bits prf;
29379 int i;
29380
29381- p = (unsigned char *)ins_addr;
29382+ p = (unsigned char *)ktla_ktva(ins_addr);
29383 p += skip_prefix(p, &prf);
29384 p += get_opcode(p, &opcode);
29385 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
29386@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
29387 struct prefix_bits prf;
29388 int i;
29389
29390- p = (unsigned char *)ins_addr;
29391+ p = (unsigned char *)ktla_ktva(ins_addr);
29392 p += skip_prefix(p, &prf);
29393 p += get_opcode(p, &opcode);
29394 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
29395diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
29396index e27fbf8..8b56dc9 100644
29397--- a/arch/x86/mm/pgtable.c
29398+++ b/arch/x86/mm/pgtable.c
29399@@ -84,10 +84,64 @@ static inline void pgd_list_del(pgd_t *pgd)
29400 list_del(&page->lru);
29401 }
29402
29403-#define UNSHARED_PTRS_PER_PGD \
29404- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
29405+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29406+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
29407
29408+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
29409+{
29410+ unsigned int count = USER_PGD_PTRS;
29411
29412+ while (count--)
29413+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
29414+}
29415+#endif
29416+
29417+#ifdef CONFIG_PAX_PER_CPU_PGD
29418+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
29419+{
29420+ unsigned int count = USER_PGD_PTRS;
29421+
29422+ while (count--) {
29423+ pgd_t pgd;
29424+
29425+#ifdef CONFIG_X86_64
29426+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
29427+#else
29428+ pgd = *src++;
29429+#endif
29430+
29431+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29432+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
29433+#endif
29434+
29435+ *dst++ = pgd;
29436+ }
29437+
29438+}
29439+#endif
29440+
29441+#ifdef CONFIG_X86_64
29442+#define pxd_t pud_t
29443+#define pyd_t pgd_t
29444+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
29445+#define pxd_free(mm, pud) pud_free((mm), (pud))
29446+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
29447+#define pyd_offset(mm, address) pgd_offset((mm), (address))
29448+#define PYD_SIZE PGDIR_SIZE
29449+#else
29450+#define pxd_t pmd_t
29451+#define pyd_t pud_t
29452+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
29453+#define pxd_free(mm, pud) pmd_free((mm), (pud))
29454+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
29455+#define pyd_offset(mm, address) pud_offset((mm), (address))
29456+#define PYD_SIZE PUD_SIZE
29457+#endif
29458+
29459+#ifdef CONFIG_PAX_PER_CPU_PGD
29460+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
29461+static inline void pgd_dtor(pgd_t *pgd) {}
29462+#else
29463 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
29464 {
29465 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
29466@@ -128,6 +182,7 @@ static void pgd_dtor(pgd_t *pgd)
29467 pgd_list_del(pgd);
29468 spin_unlock(&pgd_lock);
29469 }
29470+#endif
29471
29472 /*
29473 * List of all pgd's needed for non-PAE so it can invalidate entries
29474@@ -140,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
29475 * -- nyc
29476 */
29477
29478-#ifdef CONFIG_X86_PAE
29479+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
29480 /*
29481 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
29482 * updating the top-level pagetable entries to guarantee the
29483@@ -152,7 +207,7 @@ static void pgd_dtor(pgd_t *pgd)
29484 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
29485 * and initialize the kernel pmds here.
29486 */
29487-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
29488+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
29489
29490 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
29491 {
29492@@ -170,36 +225,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
29493 */
29494 flush_tlb_mm(mm);
29495 }
29496+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
29497+#define PREALLOCATED_PXDS USER_PGD_PTRS
29498 #else /* !CONFIG_X86_PAE */
29499
29500 /* No need to prepopulate any pagetable entries in non-PAE modes. */
29501-#define PREALLOCATED_PMDS 0
29502+#define PREALLOCATED_PXDS 0
29503
29504 #endif /* CONFIG_X86_PAE */
29505
29506-static void free_pmds(pmd_t *pmds[])
29507+static void free_pxds(pxd_t *pxds[])
29508 {
29509 int i;
29510
29511- for(i = 0; i < PREALLOCATED_PMDS; i++)
29512- if (pmds[i])
29513- free_page((unsigned long)pmds[i]);
29514+ for(i = 0; i < PREALLOCATED_PXDS; i++)
29515+ if (pxds[i])
29516+ free_page((unsigned long)pxds[i]);
29517 }
29518
29519-static int preallocate_pmds(pmd_t *pmds[])
29520+static int preallocate_pxds(pxd_t *pxds[])
29521 {
29522 int i;
29523 bool failed = false;
29524
29525- for(i = 0; i < PREALLOCATED_PMDS; i++) {
29526- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
29527- if (pmd == NULL)
29528+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
29529+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
29530+ if (pxd == NULL)
29531 failed = true;
29532- pmds[i] = pmd;
29533+ pxds[i] = pxd;
29534 }
29535
29536 if (failed) {
29537- free_pmds(pmds);
29538+ free_pxds(pxds);
29539 return -ENOMEM;
29540 }
29541
29542@@ -212,51 +269,55 @@ static int preallocate_pmds(pmd_t *pmds[])
29543 * preallocate which never got a corresponding vma will need to be
29544 * freed manually.
29545 */
29546-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
29547+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
29548 {
29549 int i;
29550
29551- for(i = 0; i < PREALLOCATED_PMDS; i++) {
29552+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
29553 pgd_t pgd = pgdp[i];
29554
29555 if (pgd_val(pgd) != 0) {
29556- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
29557+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
29558
29559- pgdp[i] = native_make_pgd(0);
29560+ set_pgd(pgdp + i, native_make_pgd(0));
29561
29562- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
29563- pmd_free(mm, pmd);
29564+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
29565+ pxd_free(mm, pxd);
29566 }
29567 }
29568 }
29569
29570-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
29571+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
29572 {
29573- pud_t *pud;
29574+ pyd_t *pyd;
29575 unsigned long addr;
29576 int i;
29577
29578- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
29579+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
29580 return;
29581
29582- pud = pud_offset(pgd, 0);
29583+#ifdef CONFIG_X86_64
29584+ pyd = pyd_offset(mm, 0L);
29585+#else
29586+ pyd = pyd_offset(pgd, 0L);
29587+#endif
29588
29589- for (addr = i = 0; i < PREALLOCATED_PMDS;
29590- i++, pud++, addr += PUD_SIZE) {
29591- pmd_t *pmd = pmds[i];
29592+ for (addr = i = 0; i < PREALLOCATED_PXDS;
29593+ i++, pyd++, addr += PYD_SIZE) {
29594+ pxd_t *pxd = pxds[i];
29595
29596 if (i >= KERNEL_PGD_BOUNDARY)
29597- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
29598- sizeof(pmd_t) * PTRS_PER_PMD);
29599+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
29600+ sizeof(pxd_t) * PTRS_PER_PMD);
29601
29602- pud_populate(mm, pud, pmd);
29603+ pyd_populate(mm, pyd, pxd);
29604 }
29605 }
29606
29607 pgd_t *pgd_alloc(struct mm_struct *mm)
29608 {
29609 pgd_t *pgd;
29610- pmd_t *pmds[PREALLOCATED_PMDS];
29611+ pxd_t *pxds[PREALLOCATED_PXDS];
29612
29613 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
29614
29615@@ -265,11 +326,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
29616
29617 mm->pgd = pgd;
29618
29619- if (preallocate_pmds(pmds) != 0)
29620+ if (preallocate_pxds(pxds) != 0)
29621 goto out_free_pgd;
29622
29623 if (paravirt_pgd_alloc(mm) != 0)
29624- goto out_free_pmds;
29625+ goto out_free_pxds;
29626
29627 /*
29628 * Make sure that pre-populating the pmds is atomic with
29629@@ -279,14 +340,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
29630 spin_lock(&pgd_lock);
29631
29632 pgd_ctor(mm, pgd);
29633- pgd_prepopulate_pmd(mm, pgd, pmds);
29634+ pgd_prepopulate_pxd(mm, pgd, pxds);
29635
29636 spin_unlock(&pgd_lock);
29637
29638 return pgd;
29639
29640-out_free_pmds:
29641- free_pmds(pmds);
29642+out_free_pxds:
29643+ free_pxds(pxds);
29644 out_free_pgd:
29645 free_page((unsigned long)pgd);
29646 out:
29647@@ -295,7 +356,7 @@ out:
29648
29649 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
29650 {
29651- pgd_mop_up_pmds(mm, pgd);
29652+ pgd_mop_up_pxds(mm, pgd);
29653 pgd_dtor(pgd);
29654 paravirt_pgd_free(mm, pgd);
29655 free_page((unsigned long)pgd);
29656diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
29657index a69bcb8..19068ab 100644
29658--- a/arch/x86/mm/pgtable_32.c
29659+++ b/arch/x86/mm/pgtable_32.c
29660@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
29661 return;
29662 }
29663 pte = pte_offset_kernel(pmd, vaddr);
29664+
29665+ pax_open_kernel();
29666 if (pte_val(pteval))
29667 set_pte_at(&init_mm, vaddr, pte, pteval);
29668 else
29669 pte_clear(&init_mm, vaddr, pte);
29670+ pax_close_kernel();
29671
29672 /*
29673 * It's enough to flush this one mapping.
29674diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
29675index d2e2735..5c6586f 100644
29676--- a/arch/x86/mm/physaddr.c
29677+++ b/arch/x86/mm/physaddr.c
29678@@ -8,7 +8,7 @@
29679
29680 #ifdef CONFIG_X86_64
29681
29682-unsigned long __phys_addr(unsigned long x)
29683+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
29684 {
29685 if (x >= __START_KERNEL_map) {
29686 x -= __START_KERNEL_map;
29687@@ -45,7 +45,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
29688 #else
29689
29690 #ifdef CONFIG_DEBUG_VIRTUAL
29691-unsigned long __phys_addr(unsigned long x)
29692+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
29693 {
29694 /* VMALLOC_* aren't constants */
29695 VIRTUAL_BUG_ON(x < PAGE_OFFSET);
29696diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
29697index 410531d..0f16030 100644
29698--- a/arch/x86/mm/setup_nx.c
29699+++ b/arch/x86/mm/setup_nx.c
29700@@ -5,8 +5,10 @@
29701 #include <asm/pgtable.h>
29702 #include <asm/proto.h>
29703
29704+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
29705 static int disable_nx __cpuinitdata;
29706
29707+#ifndef CONFIG_PAX_PAGEEXEC
29708 /*
29709 * noexec = on|off
29710 *
29711@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
29712 return 0;
29713 }
29714 early_param("noexec", noexec_setup);
29715+#endif
29716+
29717+#endif
29718
29719 void __cpuinit x86_configure_nx(void)
29720 {
29721+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
29722 if (cpu_has_nx && !disable_nx)
29723 __supported_pte_mask |= _PAGE_NX;
29724 else
29725+#endif
29726 __supported_pte_mask &= ~_PAGE_NX;
29727 }
29728
29729diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
29730index 13a6b29..c2fff23 100644
29731--- a/arch/x86/mm/tlb.c
29732+++ b/arch/x86/mm/tlb.c
29733@@ -48,7 +48,11 @@ void leave_mm(int cpu)
29734 BUG();
29735 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
29736 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
29737+
29738+#ifndef CONFIG_PAX_PER_CPU_PGD
29739 load_cr3(swapper_pg_dir);
29740+#endif
29741+
29742 }
29743 }
29744 EXPORT_SYMBOL_GPL(leave_mm);
29745diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
29746index 877b9a1..a8ecf42 100644
29747--- a/arch/x86/net/bpf_jit.S
29748+++ b/arch/x86/net/bpf_jit.S
29749@@ -9,6 +9,7 @@
29750 */
29751 #include <linux/linkage.h>
29752 #include <asm/dwarf2.h>
29753+#include <asm/alternative-asm.h>
29754
29755 /*
29756 * Calling convention :
29757@@ -35,6 +36,7 @@ sk_load_word_positive_offset:
29758 jle bpf_slow_path_word
29759 mov (SKBDATA,%rsi),%eax
29760 bswap %eax /* ntohl() */
29761+ pax_force_retaddr
29762 ret
29763
29764 sk_load_half:
29765@@ -52,6 +54,7 @@ sk_load_half_positive_offset:
29766 jle bpf_slow_path_half
29767 movzwl (SKBDATA,%rsi),%eax
29768 rol $8,%ax # ntohs()
29769+ pax_force_retaddr
29770 ret
29771
29772 sk_load_byte:
29773@@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
29774 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
29775 jle bpf_slow_path_byte
29776 movzbl (SKBDATA,%rsi),%eax
29777+ pax_force_retaddr
29778 ret
29779
29780 /**
29781@@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
29782 movzbl (SKBDATA,%rsi),%ebx
29783 and $15,%bl
29784 shl $2,%bl
29785+ pax_force_retaddr
29786 ret
29787
29788 /* rsi contains offset and can be scratched */
29789@@ -109,6 +114,7 @@ bpf_slow_path_word:
29790 js bpf_error
29791 mov -12(%rbp),%eax
29792 bswap %eax
29793+ pax_force_retaddr
29794 ret
29795
29796 bpf_slow_path_half:
29797@@ -117,12 +123,14 @@ bpf_slow_path_half:
29798 mov -12(%rbp),%ax
29799 rol $8,%ax
29800 movzwl %ax,%eax
29801+ pax_force_retaddr
29802 ret
29803
29804 bpf_slow_path_byte:
29805 bpf_slow_path_common(1)
29806 js bpf_error
29807 movzbl -12(%rbp),%eax
29808+ pax_force_retaddr
29809 ret
29810
29811 bpf_slow_path_byte_msh:
29812@@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
29813 and $15,%al
29814 shl $2,%al
29815 xchg %eax,%ebx
29816+ pax_force_retaddr
29817 ret
29818
29819 #define sk_negative_common(SIZE) \
29820@@ -157,6 +166,7 @@ sk_load_word_negative_offset:
29821 sk_negative_common(4)
29822 mov (%rax), %eax
29823 bswap %eax
29824+ pax_force_retaddr
29825 ret
29826
29827 bpf_slow_path_half_neg:
29828@@ -168,6 +178,7 @@ sk_load_half_negative_offset:
29829 mov (%rax),%ax
29830 rol $8,%ax
29831 movzwl %ax,%eax
29832+ pax_force_retaddr
29833 ret
29834
29835 bpf_slow_path_byte_neg:
29836@@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
29837 .globl sk_load_byte_negative_offset
29838 sk_negative_common(1)
29839 movzbl (%rax), %eax
29840+ pax_force_retaddr
29841 ret
29842
29843 bpf_slow_path_byte_msh_neg:
29844@@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
29845 and $15,%al
29846 shl $2,%al
29847 xchg %eax,%ebx
29848+ pax_force_retaddr
29849 ret
29850
29851 bpf_error:
29852@@ -197,4 +210,5 @@ bpf_error:
29853 xor %eax,%eax
29854 mov -8(%rbp),%rbx
29855 leaveq
29856+ pax_force_retaddr
29857 ret
29858diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
29859index d11a470..3f9adff3 100644
29860--- a/arch/x86/net/bpf_jit_comp.c
29861+++ b/arch/x86/net/bpf_jit_comp.c
29862@@ -12,6 +12,7 @@
29863 #include <linux/netdevice.h>
29864 #include <linux/filter.h>
29865 #include <linux/if_vlan.h>
29866+#include <linux/random.h>
29867
29868 /*
29869 * Conventions :
29870@@ -49,13 +50,87 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
29871 return ptr + len;
29872 }
29873
29874+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29875+#define MAX_INSTR_CODE_SIZE 96
29876+#else
29877+#define MAX_INSTR_CODE_SIZE 64
29878+#endif
29879+
29880 #define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0)
29881
29882 #define EMIT1(b1) EMIT(b1, 1)
29883 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
29884 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
29885 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
29886+
29887+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29888+/* original constant will appear in ecx */
29889+#define DILUTE_CONST_SEQUENCE(_off, _key) \
29890+do { \
29891+ /* mov ecx, randkey */ \
29892+ EMIT1(0xb9); \
29893+ EMIT(_key, 4); \
29894+ /* xor ecx, randkey ^ off */ \
29895+ EMIT2(0x81, 0xf1); \
29896+ EMIT((_key) ^ (_off), 4); \
29897+} while (0)
29898+
29899+#define EMIT1_off32(b1, _off) \
29900+do { \
29901+ switch (b1) { \
29902+ case 0x05: /* add eax, imm32 */ \
29903+ case 0x2d: /* sub eax, imm32 */ \
29904+ case 0x25: /* and eax, imm32 */ \
29905+ case 0x0d: /* or eax, imm32 */ \
29906+ case 0xb8: /* mov eax, imm32 */ \
29907+ case 0x3d: /* cmp eax, imm32 */ \
29908+ case 0xa9: /* test eax, imm32 */ \
29909+ DILUTE_CONST_SEQUENCE(_off, randkey); \
29910+ EMIT2((b1) - 4, 0xc8); /* convert imm instruction to eax, ecx */\
29911+ break; \
29912+ case 0xbb: /* mov ebx, imm32 */ \
29913+ DILUTE_CONST_SEQUENCE(_off, randkey); \
29914+ /* mov ebx, ecx */ \
29915+ EMIT2(0x89, 0xcb); \
29916+ break; \
29917+ case 0xbe: /* mov esi, imm32 */ \
29918+ DILUTE_CONST_SEQUENCE(_off, randkey); \
29919+ /* mov esi, ecx */ \
29920+ EMIT2(0x89, 0xce); \
29921+ break; \
29922+ case 0xe9: /* jmp rel imm32 */ \
29923+ EMIT1(b1); \
29924+ EMIT(_off, 4); \
29925+ /* prevent fall-through, we're not called if off = 0 */ \
29926+ EMIT(0xcccccccc, 4); \
29927+ EMIT(0xcccccccc, 4); \
29928+ break; \
29929+ default: \
29930+ EMIT1(b1); \
29931+ EMIT(_off, 4); \
29932+ } \
29933+} while (0)
29934+
29935+#define EMIT2_off32(b1, b2, _off) \
29936+do { \
29937+ if ((b1) == 0x8d && (b2) == 0xb3) { /* lea esi, [rbx+imm32] */ \
29938+ EMIT2(0x8d, 0xb3); /* lea esi, [rbx+randkey] */ \
29939+ EMIT(randkey, 4); \
29940+ EMIT2(0x8d, 0xb6); /* lea esi, [esi+off-randkey] */ \
29941+ EMIT((_off) - randkey, 4); \
29942+ } else if ((b1) == 0x69 && (b2) == 0xc0) { /* imul eax, imm32 */\
29943+ DILUTE_CONST_SEQUENCE(_off, randkey); \
29944+ /* imul eax, ecx */ \
29945+ EMIT3(0x0f, 0xaf, 0xc1); \
29946+ } else { \
29947+ EMIT2(b1, b2); \
29948+ EMIT(_off, 4); \
29949+ } \
29950+} while (0)
29951+#else
29952 #define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0)
29953+#define EMIT2_off32(b1, b2, off) do { EMIT2(b1, b2); EMIT(off, 4);} while (0)
29954+#endif
29955
29956 #define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
29957 #define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
29958@@ -90,6 +165,24 @@ do { \
29959 #define X86_JBE 0x76
29960 #define X86_JA 0x77
29961
29962+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29963+#define APPEND_FLOW_VERIFY() \
29964+do { \
29965+ /* mov ecx, randkey */ \
29966+ EMIT1(0xb9); \
29967+ EMIT(randkey, 4); \
29968+ /* cmp ecx, randkey */ \
29969+ EMIT2(0x81, 0xf9); \
29970+ EMIT(randkey, 4); \
29971+ /* jz after 8 int 3s */ \
29972+ EMIT2(0x74, 0x08); \
29973+ EMIT(0xcccccccc, 4); \
29974+ EMIT(0xcccccccc, 4); \
29975+} while (0)
29976+#else
29977+#define APPEND_FLOW_VERIFY() do { } while (0)
29978+#endif
29979+
29980 #define EMIT_COND_JMP(op, offset) \
29981 do { \
29982 if (is_near(offset)) \
29983@@ -97,6 +190,7 @@ do { \
29984 else { \
29985 EMIT2(0x0f, op + 0x10); \
29986 EMIT(offset, 4); /* jxx .+off32 */ \
29987+ APPEND_FLOW_VERIFY(); \
29988 } \
29989 } while (0)
29990
29991@@ -121,12 +215,17 @@ static inline void bpf_flush_icache(void *start, void *end)
29992 set_fs(old_fs);
29993 }
29994
29995+struct bpf_jit_work {
29996+ struct work_struct work;
29997+ void *image;
29998+};
29999+
30000 #define CHOOSE_LOAD_FUNC(K, func) \
30001 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
30002
30003 void bpf_jit_compile(struct sk_filter *fp)
30004 {
30005- u8 temp[64];
30006+ u8 temp[MAX_INSTR_CODE_SIZE];
30007 u8 *prog;
30008 unsigned int proglen, oldproglen = 0;
30009 int ilen, i;
30010@@ -139,6 +238,9 @@ void bpf_jit_compile(struct sk_filter *fp)
30011 unsigned int *addrs;
30012 const struct sock_filter *filter = fp->insns;
30013 int flen = fp->len;
30014+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
30015+ unsigned int randkey;
30016+#endif
30017
30018 if (!bpf_jit_enable)
30019 return;
30020@@ -147,11 +249,19 @@ void bpf_jit_compile(struct sk_filter *fp)
30021 if (addrs == NULL)
30022 return;
30023
30024+ fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
30025+ if (!fp->work)
30026+ goto out;
30027+
30028+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
30029+ randkey = get_random_int();
30030+#endif
30031+
30032 /* Before first pass, make a rough estimation of addrs[]
30033- * each bpf instruction is translated to less than 64 bytes
30034+ * each bpf instruction is translated to less than MAX_INSTR_CODE_SIZE bytes
30035 */
30036 for (proglen = 0, i = 0; i < flen; i++) {
30037- proglen += 64;
30038+ proglen += MAX_INSTR_CODE_SIZE;
30039 addrs[i] = proglen;
30040 }
30041 cleanup_addr = proglen; /* epilogue address */
30042@@ -261,10 +371,8 @@ void bpf_jit_compile(struct sk_filter *fp)
30043 case BPF_S_ALU_MUL_K: /* A *= K */
30044 if (is_imm8(K))
30045 EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
30046- else {
30047- EMIT2(0x69, 0xc0); /* imul imm32,%eax */
30048- EMIT(K, 4);
30049- }
30050+ else
30051+ EMIT2_off32(0x69, 0xc0, K); /* imul imm32,%eax */
30052 break;
30053 case BPF_S_ALU_DIV_X: /* A /= X; */
30054 seen |= SEEN_XREG;
30055@@ -304,13 +412,23 @@ void bpf_jit_compile(struct sk_filter *fp)
30056 break;
30057 case BPF_S_ALU_MOD_K: /* A %= K; */
30058 EMIT2(0x31, 0xd2); /* xor %edx,%edx */
30059+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
30060+ DILUTE_CONST_SEQUENCE(K, randkey);
30061+#else
30062 EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
30063+#endif
30064 EMIT2(0xf7, 0xf1); /* div %ecx */
30065 EMIT2(0x89, 0xd0); /* mov %edx,%eax */
30066 break;
30067 case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
30068+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
30069+ DILUTE_CONST_SEQUENCE(K, randkey);
30070+ // imul rax, rcx
30071+ EMIT4(0x48, 0x0f, 0xaf, 0xc1);
30072+#else
30073 EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */
30074 EMIT(K, 4);
30075+#endif
30076 EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */
30077 break;
30078 case BPF_S_ALU_AND_X:
30079@@ -564,8 +682,7 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
30080 if (is_imm8(K)) {
30081 EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
30082 } else {
30083- EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */
30084- EMIT(K, 4);
30085+ EMIT2_off32(0x8d, 0xb3, K); /* lea imm32(%rbx),%esi */
30086 }
30087 } else {
30088 EMIT2(0x89,0xde); /* mov %ebx,%esi */
30089@@ -648,17 +765,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
30090 break;
30091 default:
30092 /* hmm, too complex filter, give up with jit compiler */
30093- goto out;
30094+ goto error;
30095 }
30096 ilen = prog - temp;
30097 if (image) {
30098 if (unlikely(proglen + ilen > oldproglen)) {
30099 pr_err("bpb_jit_compile fatal error\n");
30100- kfree(addrs);
30101- module_free(NULL, image);
30102- return;
30103+ module_free_exec(NULL, image);
30104+ goto error;
30105 }
30106+ pax_open_kernel();
30107 memcpy(image + proglen, temp, ilen);
30108+ pax_close_kernel();
30109 }
30110 proglen += ilen;
30111 addrs[i] = proglen;
30112@@ -679,11 +797,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
30113 break;
30114 }
30115 if (proglen == oldproglen) {
30116- image = module_alloc(max_t(unsigned int,
30117- proglen,
30118- sizeof(struct work_struct)));
30119+ image = module_alloc_exec(proglen);
30120 if (!image)
30121- goto out;
30122+ goto error;
30123 }
30124 oldproglen = proglen;
30125 }
30126@@ -699,7 +815,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
30127 bpf_flush_icache(image, image + proglen);
30128
30129 fp->bpf_func = (void *)image;
30130- }
30131+ } else
30132+error:
30133+ kfree(fp->work);
30134+
30135 out:
30136 kfree(addrs);
30137 return;
30138@@ -707,18 +826,20 @@ out:
30139
30140 static void jit_free_defer(struct work_struct *arg)
30141 {
30142- module_free(NULL, arg);
30143+ module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
30144+ kfree(arg);
30145 }
30146
30147 /* run from softirq, we must use a work_struct to call
30148- * module_free() from process context
30149+ * module_free_exec() from process context
30150 */
30151 void bpf_jit_free(struct sk_filter *fp)
30152 {
30153 if (fp->bpf_func != sk_run_filter) {
30154- struct work_struct *work = (struct work_struct *)fp->bpf_func;
30155+ struct work_struct *work = &fp->work->work;
30156
30157 INIT_WORK(work, jit_free_defer);
30158+ fp->work->image = fp->bpf_func;
30159 schedule_work(work);
30160 }
30161 }
30162diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
30163index d6aa6e8..266395a 100644
30164--- a/arch/x86/oprofile/backtrace.c
30165+++ b/arch/x86/oprofile/backtrace.c
30166@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
30167 struct stack_frame_ia32 *fp;
30168 unsigned long bytes;
30169
30170- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
30171+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
30172 if (bytes != sizeof(bufhead))
30173 return NULL;
30174
30175- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
30176+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
30177
30178 oprofile_add_trace(bufhead[0].return_address);
30179
30180@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
30181 struct stack_frame bufhead[2];
30182 unsigned long bytes;
30183
30184- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
30185+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
30186 if (bytes != sizeof(bufhead))
30187 return NULL;
30188
30189@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
30190 {
30191 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
30192
30193- if (!user_mode_vm(regs)) {
30194+ if (!user_mode(regs)) {
30195 unsigned long stack = kernel_stack_pointer(regs);
30196 if (depth)
30197 dump_trace(NULL, regs, (unsigned long *)stack, 0,
30198diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
30199index 48768df..ba9143c 100644
30200--- a/arch/x86/oprofile/nmi_int.c
30201+++ b/arch/x86/oprofile/nmi_int.c
30202@@ -23,6 +23,7 @@
30203 #include <asm/nmi.h>
30204 #include <asm/msr.h>
30205 #include <asm/apic.h>
30206+#include <asm/pgtable.h>
30207
30208 #include "op_counter.h"
30209 #include "op_x86_model.h"
30210@@ -774,8 +775,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
30211 if (ret)
30212 return ret;
30213
30214- if (!model->num_virt_counters)
30215- model->num_virt_counters = model->num_counters;
30216+ if (!model->num_virt_counters) {
30217+ pax_open_kernel();
30218+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
30219+ pax_close_kernel();
30220+ }
30221
30222 mux_init(ops);
30223
30224diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
30225index b2b9443..be58856 100644
30226--- a/arch/x86/oprofile/op_model_amd.c
30227+++ b/arch/x86/oprofile/op_model_amd.c
30228@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
30229 num_counters = AMD64_NUM_COUNTERS;
30230 }
30231
30232- op_amd_spec.num_counters = num_counters;
30233- op_amd_spec.num_controls = num_counters;
30234- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
30235+ pax_open_kernel();
30236+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
30237+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
30238+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
30239+ pax_close_kernel();
30240
30241 return 0;
30242 }
30243diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
30244index d90528e..0127e2b 100644
30245--- a/arch/x86/oprofile/op_model_ppro.c
30246+++ b/arch/x86/oprofile/op_model_ppro.c
30247@@ -19,6 +19,7 @@
30248 #include <asm/msr.h>
30249 #include <asm/apic.h>
30250 #include <asm/nmi.h>
30251+#include <asm/pgtable.h>
30252
30253 #include "op_x86_model.h"
30254 #include "op_counter.h"
30255@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
30256
30257 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
30258
30259- op_arch_perfmon_spec.num_counters = num_counters;
30260- op_arch_perfmon_spec.num_controls = num_counters;
30261+ pax_open_kernel();
30262+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
30263+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
30264+ pax_close_kernel();
30265 }
30266
30267 static int arch_perfmon_init(struct oprofile_operations *ignore)
30268diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
30269index 71e8a67..6a313bb 100644
30270--- a/arch/x86/oprofile/op_x86_model.h
30271+++ b/arch/x86/oprofile/op_x86_model.h
30272@@ -52,7 +52,7 @@ struct op_x86_model_spec {
30273 void (*switch_ctrl)(struct op_x86_model_spec const *model,
30274 struct op_msrs const * const msrs);
30275 #endif
30276-};
30277+} __do_const;
30278
30279 struct op_counter_config;
30280
30281diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
30282index e9e6ed5..e47ae67 100644
30283--- a/arch/x86/pci/amd_bus.c
30284+++ b/arch/x86/pci/amd_bus.c
30285@@ -337,7 +337,7 @@ static int __cpuinit amd_cpu_notify(struct notifier_block *self,
30286 return NOTIFY_OK;
30287 }
30288
30289-static struct notifier_block __cpuinitdata amd_cpu_notifier = {
30290+static struct notifier_block amd_cpu_notifier = {
30291 .notifier_call = amd_cpu_notify,
30292 };
30293
30294diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
30295index 372e9b8..e775a6c 100644
30296--- a/arch/x86/pci/irq.c
30297+++ b/arch/x86/pci/irq.c
30298@@ -50,7 +50,7 @@ struct irq_router {
30299 struct irq_router_handler {
30300 u16 vendor;
30301 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
30302-};
30303+} __do_const;
30304
30305 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
30306 void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL;
30307@@ -794,7 +794,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
30308 return 0;
30309 }
30310
30311-static __initdata struct irq_router_handler pirq_routers[] = {
30312+static __initconst const struct irq_router_handler pirq_routers[] = {
30313 { PCI_VENDOR_ID_INTEL, intel_router_probe },
30314 { PCI_VENDOR_ID_AL, ali_router_probe },
30315 { PCI_VENDOR_ID_ITE, ite_router_probe },
30316@@ -821,7 +821,7 @@ static struct pci_dev *pirq_router_dev;
30317 static void __init pirq_find_router(struct irq_router *r)
30318 {
30319 struct irq_routing_table *rt = pirq_table;
30320- struct irq_router_handler *h;
30321+ const struct irq_router_handler *h;
30322
30323 #ifdef CONFIG_PCI_BIOS
30324 if (!rt->signature) {
30325@@ -1094,7 +1094,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
30326 return 0;
30327 }
30328
30329-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
30330+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
30331 {
30332 .callback = fix_broken_hp_bios_irq9,
30333 .ident = "HP Pavilion N5400 Series Laptop",
30334diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
30335index 6eb18c4..20d83de 100644
30336--- a/arch/x86/pci/mrst.c
30337+++ b/arch/x86/pci/mrst.c
30338@@ -238,7 +238,9 @@ int __init pci_mrst_init(void)
30339 printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n");
30340 pci_mmcfg_late_init();
30341 pcibios_enable_irq = mrst_pci_irq_enable;
30342- pci_root_ops = pci_mrst_ops;
30343+ pax_open_kernel();
30344+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
30345+ pax_close_kernel();
30346 pci_soc_mode = 1;
30347 /* Continue with standard init */
30348 return 1;
30349diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
30350index c77b24a..c979855 100644
30351--- a/arch/x86/pci/pcbios.c
30352+++ b/arch/x86/pci/pcbios.c
30353@@ -79,7 +79,7 @@ union bios32 {
30354 static struct {
30355 unsigned long address;
30356 unsigned short segment;
30357-} bios32_indirect = { 0, __KERNEL_CS };
30358+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
30359
30360 /*
30361 * Returns the entry point for the given service, NULL on error
30362@@ -92,37 +92,80 @@ static unsigned long bios32_service(unsigned long service)
30363 unsigned long length; /* %ecx */
30364 unsigned long entry; /* %edx */
30365 unsigned long flags;
30366+ struct desc_struct d, *gdt;
30367
30368 local_irq_save(flags);
30369- __asm__("lcall *(%%edi); cld"
30370+
30371+ gdt = get_cpu_gdt_table(smp_processor_id());
30372+
30373+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
30374+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
30375+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
30376+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
30377+
30378+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
30379 : "=a" (return_code),
30380 "=b" (address),
30381 "=c" (length),
30382 "=d" (entry)
30383 : "0" (service),
30384 "1" (0),
30385- "D" (&bios32_indirect));
30386+ "D" (&bios32_indirect),
30387+ "r"(__PCIBIOS_DS)
30388+ : "memory");
30389+
30390+ pax_open_kernel();
30391+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
30392+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
30393+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
30394+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
30395+ pax_close_kernel();
30396+
30397 local_irq_restore(flags);
30398
30399 switch (return_code) {
30400- case 0:
30401- return address + entry;
30402- case 0x80: /* Not present */
30403- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
30404- return 0;
30405- default: /* Shouldn't happen */
30406- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
30407- service, return_code);
30408+ case 0: {
30409+ int cpu;
30410+ unsigned char flags;
30411+
30412+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
30413+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
30414+ printk(KERN_WARNING "bios32_service: not valid\n");
30415 return 0;
30416+ }
30417+ address = address + PAGE_OFFSET;
30418+ length += 16UL; /* some BIOSs underreport this... */
30419+ flags = 4;
30420+ if (length >= 64*1024*1024) {
30421+ length >>= PAGE_SHIFT;
30422+ flags |= 8;
30423+ }
30424+
30425+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
30426+ gdt = get_cpu_gdt_table(cpu);
30427+ pack_descriptor(&d, address, length, 0x9b, flags);
30428+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
30429+ pack_descriptor(&d, address, length, 0x93, flags);
30430+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
30431+ }
30432+ return entry;
30433+ }
30434+ case 0x80: /* Not present */
30435+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
30436+ return 0;
30437+ default: /* Shouldn't happen */
30438+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
30439+ service, return_code);
30440+ return 0;
30441 }
30442 }
30443
30444 static struct {
30445 unsigned long address;
30446 unsigned short segment;
30447-} pci_indirect = { 0, __KERNEL_CS };
30448+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
30449
30450-static int pci_bios_present;
30451+static int pci_bios_present __read_only;
30452
30453 static int check_pcibios(void)
30454 {
30455@@ -131,11 +174,13 @@ static int check_pcibios(void)
30456 unsigned long flags, pcibios_entry;
30457
30458 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
30459- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
30460+ pci_indirect.address = pcibios_entry;
30461
30462 local_irq_save(flags);
30463- __asm__(
30464- "lcall *(%%edi); cld\n\t"
30465+ __asm__("movw %w6, %%ds\n\t"
30466+ "lcall *%%ss:(%%edi); cld\n\t"
30467+ "push %%ss\n\t"
30468+ "pop %%ds\n\t"
30469 "jc 1f\n\t"
30470 "xor %%ah, %%ah\n"
30471 "1:"
30472@@ -144,7 +189,8 @@ static int check_pcibios(void)
30473 "=b" (ebx),
30474 "=c" (ecx)
30475 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
30476- "D" (&pci_indirect)
30477+ "D" (&pci_indirect),
30478+ "r" (__PCIBIOS_DS)
30479 : "memory");
30480 local_irq_restore(flags);
30481
30482@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30483
30484 switch (len) {
30485 case 1:
30486- __asm__("lcall *(%%esi); cld\n\t"
30487+ __asm__("movw %w6, %%ds\n\t"
30488+ "lcall *%%ss:(%%esi); cld\n\t"
30489+ "push %%ss\n\t"
30490+ "pop %%ds\n\t"
30491 "jc 1f\n\t"
30492 "xor %%ah, %%ah\n"
30493 "1:"
30494@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30495 : "1" (PCIBIOS_READ_CONFIG_BYTE),
30496 "b" (bx),
30497 "D" ((long)reg),
30498- "S" (&pci_indirect));
30499+ "S" (&pci_indirect),
30500+ "r" (__PCIBIOS_DS));
30501 /*
30502 * Zero-extend the result beyond 8 bits, do not trust the
30503 * BIOS having done it:
30504@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30505 *value &= 0xff;
30506 break;
30507 case 2:
30508- __asm__("lcall *(%%esi); cld\n\t"
30509+ __asm__("movw %w6, %%ds\n\t"
30510+ "lcall *%%ss:(%%esi); cld\n\t"
30511+ "push %%ss\n\t"
30512+ "pop %%ds\n\t"
30513 "jc 1f\n\t"
30514 "xor %%ah, %%ah\n"
30515 "1:"
30516@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30517 : "1" (PCIBIOS_READ_CONFIG_WORD),
30518 "b" (bx),
30519 "D" ((long)reg),
30520- "S" (&pci_indirect));
30521+ "S" (&pci_indirect),
30522+ "r" (__PCIBIOS_DS));
30523 /*
30524 * Zero-extend the result beyond 16 bits, do not trust the
30525 * BIOS having done it:
30526@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30527 *value &= 0xffff;
30528 break;
30529 case 4:
30530- __asm__("lcall *(%%esi); cld\n\t"
30531+ __asm__("movw %w6, %%ds\n\t"
30532+ "lcall *%%ss:(%%esi); cld\n\t"
30533+ "push %%ss\n\t"
30534+ "pop %%ds\n\t"
30535 "jc 1f\n\t"
30536 "xor %%ah, %%ah\n"
30537 "1:"
30538@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30539 : "1" (PCIBIOS_READ_CONFIG_DWORD),
30540 "b" (bx),
30541 "D" ((long)reg),
30542- "S" (&pci_indirect));
30543+ "S" (&pci_indirect),
30544+ "r" (__PCIBIOS_DS));
30545 break;
30546 }
30547
30548@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
30549
30550 switch (len) {
30551 case 1:
30552- __asm__("lcall *(%%esi); cld\n\t"
30553+ __asm__("movw %w6, %%ds\n\t"
30554+ "lcall *%%ss:(%%esi); cld\n\t"
30555+ "push %%ss\n\t"
30556+ "pop %%ds\n\t"
30557 "jc 1f\n\t"
30558 "xor %%ah, %%ah\n"
30559 "1:"
30560@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
30561 "c" (value),
30562 "b" (bx),
30563 "D" ((long)reg),
30564- "S" (&pci_indirect));
30565+ "S" (&pci_indirect),
30566+ "r" (__PCIBIOS_DS));
30567 break;
30568 case 2:
30569- __asm__("lcall *(%%esi); cld\n\t"
30570+ __asm__("movw %w6, %%ds\n\t"
30571+ "lcall *%%ss:(%%esi); cld\n\t"
30572+ "push %%ss\n\t"
30573+ "pop %%ds\n\t"
30574 "jc 1f\n\t"
30575 "xor %%ah, %%ah\n"
30576 "1:"
30577@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
30578 "c" (value),
30579 "b" (bx),
30580 "D" ((long)reg),
30581- "S" (&pci_indirect));
30582+ "S" (&pci_indirect),
30583+ "r" (__PCIBIOS_DS));
30584 break;
30585 case 4:
30586- __asm__("lcall *(%%esi); cld\n\t"
30587+ __asm__("movw %w6, %%ds\n\t"
30588+ "lcall *%%ss:(%%esi); cld\n\t"
30589+ "push %%ss\n\t"
30590+ "pop %%ds\n\t"
30591 "jc 1f\n\t"
30592 "xor %%ah, %%ah\n"
30593 "1:"
30594@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
30595 "c" (value),
30596 "b" (bx),
30597 "D" ((long)reg),
30598- "S" (&pci_indirect));
30599+ "S" (&pci_indirect),
30600+ "r" (__PCIBIOS_DS));
30601 break;
30602 }
30603
30604@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
30605
30606 DBG("PCI: Fetching IRQ routing table... ");
30607 __asm__("push %%es\n\t"
30608+ "movw %w8, %%ds\n\t"
30609 "push %%ds\n\t"
30610 "pop %%es\n\t"
30611- "lcall *(%%esi); cld\n\t"
30612+ "lcall *%%ss:(%%esi); cld\n\t"
30613 "pop %%es\n\t"
30614+ "push %%ss\n\t"
30615+ "pop %%ds\n"
30616 "jc 1f\n\t"
30617 "xor %%ah, %%ah\n"
30618 "1:"
30619@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
30620 "1" (0),
30621 "D" ((long) &opt),
30622 "S" (&pci_indirect),
30623- "m" (opt)
30624+ "m" (opt),
30625+ "r" (__PCIBIOS_DS)
30626 : "memory");
30627 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
30628 if (ret & 0xff00)
30629@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
30630 {
30631 int ret;
30632
30633- __asm__("lcall *(%%esi); cld\n\t"
30634+ __asm__("movw %w5, %%ds\n\t"
30635+ "lcall *%%ss:(%%esi); cld\n\t"
30636+ "push %%ss\n\t"
30637+ "pop %%ds\n"
30638 "jc 1f\n\t"
30639 "xor %%ah, %%ah\n"
30640 "1:"
30641@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
30642 : "0" (PCIBIOS_SET_PCI_HW_INT),
30643 "b" ((dev->bus->number << 8) | dev->devfn),
30644 "c" ((irq << 8) | (pin + 10)),
30645- "S" (&pci_indirect));
30646+ "S" (&pci_indirect),
30647+ "r" (__PCIBIOS_DS));
30648 return !(ret & 0xff00);
30649 }
30650 EXPORT_SYMBOL(pcibios_set_irq_routing);
30651diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
30652index 40e4469..1ab536e 100644
30653--- a/arch/x86/platform/efi/efi_32.c
30654+++ b/arch/x86/platform/efi/efi_32.c
30655@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
30656 {
30657 struct desc_ptr gdt_descr;
30658
30659+#ifdef CONFIG_PAX_KERNEXEC
30660+ struct desc_struct d;
30661+#endif
30662+
30663 local_irq_save(efi_rt_eflags);
30664
30665 load_cr3(initial_page_table);
30666 __flush_tlb_all();
30667
30668+#ifdef CONFIG_PAX_KERNEXEC
30669+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
30670+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
30671+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
30672+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
30673+#endif
30674+
30675 gdt_descr.address = __pa(get_cpu_gdt_table(0));
30676 gdt_descr.size = GDT_SIZE - 1;
30677 load_gdt(&gdt_descr);
30678@@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
30679 {
30680 struct desc_ptr gdt_descr;
30681
30682+#ifdef CONFIG_PAX_KERNEXEC
30683+ struct desc_struct d;
30684+
30685+ memset(&d, 0, sizeof d);
30686+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
30687+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
30688+#endif
30689+
30690 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
30691 gdt_descr.size = GDT_SIZE - 1;
30692 load_gdt(&gdt_descr);
30693diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
30694index fbe66e6..eae5e38 100644
30695--- a/arch/x86/platform/efi/efi_stub_32.S
30696+++ b/arch/x86/platform/efi/efi_stub_32.S
30697@@ -6,7 +6,9 @@
30698 */
30699
30700 #include <linux/linkage.h>
30701+#include <linux/init.h>
30702 #include <asm/page_types.h>
30703+#include <asm/segment.h>
30704
30705 /*
30706 * efi_call_phys(void *, ...) is a function with variable parameters.
30707@@ -20,7 +22,7 @@
30708 * service functions will comply with gcc calling convention, too.
30709 */
30710
30711-.text
30712+__INIT
30713 ENTRY(efi_call_phys)
30714 /*
30715 * 0. The function can only be called in Linux kernel. So CS has been
30716@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
30717 * The mapping of lower virtual memory has been created in prelog and
30718 * epilog.
30719 */
30720- movl $1f, %edx
30721- subl $__PAGE_OFFSET, %edx
30722- jmp *%edx
30723+#ifdef CONFIG_PAX_KERNEXEC
30724+ movl $(__KERNEXEC_EFI_DS), %edx
30725+ mov %edx, %ds
30726+ mov %edx, %es
30727+ mov %edx, %ss
30728+ addl $2f,(1f)
30729+ ljmp *(1f)
30730+
30731+__INITDATA
30732+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
30733+.previous
30734+
30735+2:
30736+ subl $2b,(1b)
30737+#else
30738+ jmp 1f-__PAGE_OFFSET
30739 1:
30740+#endif
30741
30742 /*
30743 * 2. Now on the top of stack is the return
30744@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
30745 * parameter 2, ..., param n. To make things easy, we save the return
30746 * address of efi_call_phys in a global variable.
30747 */
30748- popl %edx
30749- movl %edx, saved_return_addr
30750- /* get the function pointer into ECX*/
30751- popl %ecx
30752- movl %ecx, efi_rt_function_ptr
30753- movl $2f, %edx
30754- subl $__PAGE_OFFSET, %edx
30755- pushl %edx
30756+ popl (saved_return_addr)
30757+ popl (efi_rt_function_ptr)
30758
30759 /*
30760 * 3. Clear PG bit in %CR0.
30761@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
30762 /*
30763 * 5. Call the physical function.
30764 */
30765- jmp *%ecx
30766+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
30767
30768-2:
30769 /*
30770 * 6. After EFI runtime service returns, control will return to
30771 * following instruction. We'd better readjust stack pointer first.
30772@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
30773 movl %cr0, %edx
30774 orl $0x80000000, %edx
30775 movl %edx, %cr0
30776- jmp 1f
30777-1:
30778+
30779 /*
30780 * 8. Now restore the virtual mode from flat mode by
30781 * adding EIP with PAGE_OFFSET.
30782 */
30783- movl $1f, %edx
30784- jmp *%edx
30785+#ifdef CONFIG_PAX_KERNEXEC
30786+ movl $(__KERNEL_DS), %edx
30787+ mov %edx, %ds
30788+ mov %edx, %es
30789+ mov %edx, %ss
30790+ ljmp $(__KERNEL_CS),$1f
30791+#else
30792+ jmp 1f+__PAGE_OFFSET
30793+#endif
30794 1:
30795
30796 /*
30797 * 9. Balance the stack. And because EAX contain the return value,
30798 * we'd better not clobber it.
30799 */
30800- leal efi_rt_function_ptr, %edx
30801- movl (%edx), %ecx
30802- pushl %ecx
30803+ pushl (efi_rt_function_ptr)
30804
30805 /*
30806- * 10. Push the saved return address onto the stack and return.
30807+ * 10. Return to the saved return address.
30808 */
30809- leal saved_return_addr, %edx
30810- movl (%edx), %ecx
30811- pushl %ecx
30812- ret
30813+ jmpl *(saved_return_addr)
30814 ENDPROC(efi_call_phys)
30815 .previous
30816
30817-.data
30818+__INITDATA
30819 saved_return_addr:
30820 .long 0
30821 efi_rt_function_ptr:
30822diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
30823index 4c07cca..2c8427d 100644
30824--- a/arch/x86/platform/efi/efi_stub_64.S
30825+++ b/arch/x86/platform/efi/efi_stub_64.S
30826@@ -7,6 +7,7 @@
30827 */
30828
30829 #include <linux/linkage.h>
30830+#include <asm/alternative-asm.h>
30831
30832 #define SAVE_XMM \
30833 mov %rsp, %rax; \
30834@@ -40,6 +41,7 @@ ENTRY(efi_call0)
30835 call *%rdi
30836 addq $32, %rsp
30837 RESTORE_XMM
30838+ pax_force_retaddr 0, 1
30839 ret
30840 ENDPROC(efi_call0)
30841
30842@@ -50,6 +52,7 @@ ENTRY(efi_call1)
30843 call *%rdi
30844 addq $32, %rsp
30845 RESTORE_XMM
30846+ pax_force_retaddr 0, 1
30847 ret
30848 ENDPROC(efi_call1)
30849
30850@@ -60,6 +63,7 @@ ENTRY(efi_call2)
30851 call *%rdi
30852 addq $32, %rsp
30853 RESTORE_XMM
30854+ pax_force_retaddr 0, 1
30855 ret
30856 ENDPROC(efi_call2)
30857
30858@@ -71,6 +75,7 @@ ENTRY(efi_call3)
30859 call *%rdi
30860 addq $32, %rsp
30861 RESTORE_XMM
30862+ pax_force_retaddr 0, 1
30863 ret
30864 ENDPROC(efi_call3)
30865
30866@@ -83,6 +88,7 @@ ENTRY(efi_call4)
30867 call *%rdi
30868 addq $32, %rsp
30869 RESTORE_XMM
30870+ pax_force_retaddr 0, 1
30871 ret
30872 ENDPROC(efi_call4)
30873
30874@@ -96,6 +102,7 @@ ENTRY(efi_call5)
30875 call *%rdi
30876 addq $48, %rsp
30877 RESTORE_XMM
30878+ pax_force_retaddr 0, 1
30879 ret
30880 ENDPROC(efi_call5)
30881
30882@@ -112,5 +119,6 @@ ENTRY(efi_call6)
30883 call *%rdi
30884 addq $48, %rsp
30885 RESTORE_XMM
30886+ pax_force_retaddr 0, 1
30887 ret
30888 ENDPROC(efi_call6)
30889diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
30890index e31bcd8..f12dc46 100644
30891--- a/arch/x86/platform/mrst/mrst.c
30892+++ b/arch/x86/platform/mrst/mrst.c
30893@@ -78,13 +78,15 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
30894 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
30895 int sfi_mrtc_num;
30896
30897-static void mrst_power_off(void)
30898+static __noreturn void mrst_power_off(void)
30899 {
30900+ BUG();
30901 }
30902
30903-static void mrst_reboot(void)
30904+static __noreturn void mrst_reboot(void)
30905 {
30906 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
30907+ BUG();
30908 }
30909
30910 /* parse all the mtimer info to a static mtimer array */
30911diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
30912index d6ee929..3637cb5 100644
30913--- a/arch/x86/platform/olpc/olpc_dt.c
30914+++ b/arch/x86/platform/olpc/olpc_dt.c
30915@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
30916 return res;
30917 }
30918
30919-static struct of_pdt_ops prom_olpc_ops __initdata = {
30920+static struct of_pdt_ops prom_olpc_ops __initconst = {
30921 .nextprop = olpc_dt_nextprop,
30922 .getproplen = olpc_dt_getproplen,
30923 .getproperty = olpc_dt_getproperty,
30924diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
30925index 3c68768..07e82b8 100644
30926--- a/arch/x86/power/cpu.c
30927+++ b/arch/x86/power/cpu.c
30928@@ -134,7 +134,7 @@ static void do_fpu_end(void)
30929 static void fix_processor_context(void)
30930 {
30931 int cpu = smp_processor_id();
30932- struct tss_struct *t = &per_cpu(init_tss, cpu);
30933+ struct tss_struct *t = init_tss + cpu;
30934
30935 set_tss_desc(cpu, t); /*
30936 * This just modifies memory; should not be
30937@@ -144,8 +144,6 @@ static void fix_processor_context(void)
30938 */
30939
30940 #ifdef CONFIG_X86_64
30941- get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
30942-
30943 syscall_init(); /* This sets MSR_*STAR and related */
30944 #endif
30945 load_TR_desc(); /* This does ltr */
30946diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
30947index cbca565..bae7133 100644
30948--- a/arch/x86/realmode/init.c
30949+++ b/arch/x86/realmode/init.c
30950@@ -62,7 +62,13 @@ void __init setup_real_mode(void)
30951 __va(real_mode_header->trampoline_header);
30952
30953 #ifdef CONFIG_X86_32
30954- trampoline_header->start = __pa(startup_32_smp);
30955+ trampoline_header->start = __pa(ktla_ktva(startup_32_smp));
30956+
30957+#ifdef CONFIG_PAX_KERNEXEC
30958+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
30959+#endif
30960+
30961+ trampoline_header->boot_cs = __BOOT_CS;
30962 trampoline_header->gdt_limit = __BOOT_DS + 7;
30963 trampoline_header->gdt_base = __pa(boot_gdt);
30964 #else
30965diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
30966index 8869287..d577672 100644
30967--- a/arch/x86/realmode/rm/Makefile
30968+++ b/arch/x86/realmode/rm/Makefile
30969@@ -78,5 +78,8 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
30970 $(call cc-option, -fno-unit-at-a-time)) \
30971 $(call cc-option, -fno-stack-protector) \
30972 $(call cc-option, -mpreferred-stack-boundary=2)
30973+ifdef CONSTIFY_PLUGIN
30974+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
30975+endif
30976 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
30977 GCOV_PROFILE := n
30978diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
30979index a28221d..93c40f1 100644
30980--- a/arch/x86/realmode/rm/header.S
30981+++ b/arch/x86/realmode/rm/header.S
30982@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
30983 #endif
30984 /* APM/BIOS reboot */
30985 .long pa_machine_real_restart_asm
30986-#ifdef CONFIG_X86_64
30987+#ifdef CONFIG_X86_32
30988+ .long __KERNEL_CS
30989+#else
30990 .long __KERNEL32_CS
30991 #endif
30992 END(real_mode_header)
30993diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
30994index c1b2791..f9e31c7 100644
30995--- a/arch/x86/realmode/rm/trampoline_32.S
30996+++ b/arch/x86/realmode/rm/trampoline_32.S
30997@@ -25,6 +25,12 @@
30998 #include <asm/page_types.h>
30999 #include "realmode.h"
31000
31001+#ifdef CONFIG_PAX_KERNEXEC
31002+#define ta(X) (X)
31003+#else
31004+#define ta(X) (pa_ ## X)
31005+#endif
31006+
31007 .text
31008 .code16
31009
31010@@ -39,8 +45,6 @@ ENTRY(trampoline_start)
31011
31012 cli # We should be safe anyway
31013
31014- movl tr_start, %eax # where we need to go
31015-
31016 movl $0xA5A5A5A5, trampoline_status
31017 # write marker for master knows we're running
31018
31019@@ -56,7 +60,7 @@ ENTRY(trampoline_start)
31020 movw $1, %dx # protected mode (PE) bit
31021 lmsw %dx # into protected mode
31022
31023- ljmpl $__BOOT_CS, $pa_startup_32
31024+ ljmpl *(trampoline_header)
31025
31026 .section ".text32","ax"
31027 .code32
31028@@ -67,7 +71,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
31029 .balign 8
31030 GLOBAL(trampoline_header)
31031 tr_start: .space 4
31032- tr_gdt_pad: .space 2
31033+ tr_boot_cs: .space 2
31034 tr_gdt: .space 6
31035 END(trampoline_header)
31036
31037diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
31038index bb360dc..3e5945f 100644
31039--- a/arch/x86/realmode/rm/trampoline_64.S
31040+++ b/arch/x86/realmode/rm/trampoline_64.S
31041@@ -107,7 +107,7 @@ ENTRY(startup_32)
31042 wrmsr
31043
31044 # Enable paging and in turn activate Long Mode
31045- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
31046+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
31047 movl %eax, %cr0
31048
31049 /*
31050diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
31051index 79d67bd..c7e1b90 100644
31052--- a/arch/x86/tools/relocs.c
31053+++ b/arch/x86/tools/relocs.c
31054@@ -12,10 +12,13 @@
31055 #include <regex.h>
31056 #include <tools/le_byteshift.h>
31057
31058+#include "../../../include/generated/autoconf.h"
31059+
31060 static void die(char *fmt, ...);
31061
31062 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
31063 static Elf32_Ehdr ehdr;
31064+static Elf32_Phdr *phdr;
31065 static unsigned long reloc_count, reloc_idx;
31066 static unsigned long *relocs;
31067 static unsigned long reloc16_count, reloc16_idx;
31068@@ -330,9 +333,39 @@ static void read_ehdr(FILE *fp)
31069 }
31070 }
31071
31072+static void read_phdrs(FILE *fp)
31073+{
31074+ unsigned int i;
31075+
31076+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
31077+ if (!phdr) {
31078+ die("Unable to allocate %d program headers\n",
31079+ ehdr.e_phnum);
31080+ }
31081+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
31082+ die("Seek to %d failed: %s\n",
31083+ ehdr.e_phoff, strerror(errno));
31084+ }
31085+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
31086+ die("Cannot read ELF program headers: %s\n",
31087+ strerror(errno));
31088+ }
31089+ for(i = 0; i < ehdr.e_phnum; i++) {
31090+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
31091+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
31092+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
31093+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
31094+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
31095+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
31096+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
31097+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
31098+ }
31099+
31100+}
31101+
31102 static void read_shdrs(FILE *fp)
31103 {
31104- int i;
31105+ unsigned int i;
31106 Elf32_Shdr shdr;
31107
31108 secs = calloc(ehdr.e_shnum, sizeof(struct section));
31109@@ -367,7 +400,7 @@ static void read_shdrs(FILE *fp)
31110
31111 static void read_strtabs(FILE *fp)
31112 {
31113- int i;
31114+ unsigned int i;
31115 for (i = 0; i < ehdr.e_shnum; i++) {
31116 struct section *sec = &secs[i];
31117 if (sec->shdr.sh_type != SHT_STRTAB) {
31118@@ -392,7 +425,7 @@ static void read_strtabs(FILE *fp)
31119
31120 static void read_symtabs(FILE *fp)
31121 {
31122- int i,j;
31123+ unsigned int i,j;
31124 for (i = 0; i < ehdr.e_shnum; i++) {
31125 struct section *sec = &secs[i];
31126 if (sec->shdr.sh_type != SHT_SYMTAB) {
31127@@ -423,9 +456,11 @@ static void read_symtabs(FILE *fp)
31128 }
31129
31130
31131-static void read_relocs(FILE *fp)
31132+static void read_relocs(FILE *fp, int use_real_mode)
31133 {
31134- int i,j;
31135+ unsigned int i,j;
31136+ uint32_t base;
31137+
31138 for (i = 0; i < ehdr.e_shnum; i++) {
31139 struct section *sec = &secs[i];
31140 if (sec->shdr.sh_type != SHT_REL) {
31141@@ -445,9 +480,22 @@ static void read_relocs(FILE *fp)
31142 die("Cannot read symbol table: %s\n",
31143 strerror(errno));
31144 }
31145+ base = 0;
31146+
31147+#ifdef CONFIG_X86_32
31148+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
31149+ if (phdr[j].p_type != PT_LOAD )
31150+ continue;
31151+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
31152+ continue;
31153+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
31154+ break;
31155+ }
31156+#endif
31157+
31158 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
31159 Elf32_Rel *rel = &sec->reltab[j];
31160- rel->r_offset = elf32_to_cpu(rel->r_offset);
31161+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
31162 rel->r_info = elf32_to_cpu(rel->r_info);
31163 }
31164 }
31165@@ -456,13 +504,13 @@ static void read_relocs(FILE *fp)
31166
31167 static void print_absolute_symbols(void)
31168 {
31169- int i;
31170+ unsigned int i;
31171 printf("Absolute symbols\n");
31172 printf(" Num: Value Size Type Bind Visibility Name\n");
31173 for (i = 0; i < ehdr.e_shnum; i++) {
31174 struct section *sec = &secs[i];
31175 char *sym_strtab;
31176- int j;
31177+ unsigned int j;
31178
31179 if (sec->shdr.sh_type != SHT_SYMTAB) {
31180 continue;
31181@@ -489,14 +537,14 @@ static void print_absolute_symbols(void)
31182
31183 static void print_absolute_relocs(void)
31184 {
31185- int i, printed = 0;
31186+ unsigned int i, printed = 0;
31187
31188 for (i = 0; i < ehdr.e_shnum; i++) {
31189 struct section *sec = &secs[i];
31190 struct section *sec_applies, *sec_symtab;
31191 char *sym_strtab;
31192 Elf32_Sym *sh_symtab;
31193- int j;
31194+ unsigned int j;
31195 if (sec->shdr.sh_type != SHT_REL) {
31196 continue;
31197 }
31198@@ -558,13 +606,13 @@ static void print_absolute_relocs(void)
31199 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
31200 int use_real_mode)
31201 {
31202- int i;
31203+ unsigned int i;
31204 /* Walk through the relocations */
31205 for (i = 0; i < ehdr.e_shnum; i++) {
31206 char *sym_strtab;
31207 Elf32_Sym *sh_symtab;
31208 struct section *sec_applies, *sec_symtab;
31209- int j;
31210+ unsigned int j;
31211 struct section *sec = &secs[i];
31212
31213 if (sec->shdr.sh_type != SHT_REL) {
31214@@ -588,6 +636,24 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
31215 sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
31216 r_type = ELF32_R_TYPE(rel->r_info);
31217
31218+ if (!use_real_mode) {
31219+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
31220+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
31221+ continue;
31222+
31223+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
31224+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
31225+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
31226+ continue;
31227+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
31228+ continue;
31229+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
31230+ continue;
31231+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
31232+ continue;
31233+#endif
31234+ }
31235+
31236 shn_abs = sym->st_shndx == SHN_ABS;
31237
31238 switch (r_type) {
31239@@ -681,7 +747,7 @@ static int write32(unsigned int v, FILE *f)
31240
31241 static void emit_relocs(int as_text, int use_real_mode)
31242 {
31243- int i;
31244+ unsigned int i;
31245 /* Count how many relocations I have and allocate space for them. */
31246 reloc_count = 0;
31247 walk_relocs(count_reloc, use_real_mode);
31248@@ -808,10 +874,11 @@ int main(int argc, char **argv)
31249 fname, strerror(errno));
31250 }
31251 read_ehdr(fp);
31252+ read_phdrs(fp);
31253 read_shdrs(fp);
31254 read_strtabs(fp);
31255 read_symtabs(fp);
31256- read_relocs(fp);
31257+ read_relocs(fp, use_real_mode);
31258 if (show_absolute_syms) {
31259 print_absolute_symbols();
31260 goto out;
31261diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
31262index fd14be1..e3c79c0 100644
31263--- a/arch/x86/vdso/Makefile
31264+++ b/arch/x86/vdso/Makefile
31265@@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO $@
31266 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
31267 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
31268
31269-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
31270+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
31271 GCOV_PROFILE := n
31272
31273 #
31274diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
31275index 0faad64..39ef157 100644
31276--- a/arch/x86/vdso/vdso32-setup.c
31277+++ b/arch/x86/vdso/vdso32-setup.c
31278@@ -25,6 +25,7 @@
31279 #include <asm/tlbflush.h>
31280 #include <asm/vdso.h>
31281 #include <asm/proto.h>
31282+#include <asm/mman.h>
31283
31284 enum {
31285 VDSO_DISABLED = 0,
31286@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
31287 void enable_sep_cpu(void)
31288 {
31289 int cpu = get_cpu();
31290- struct tss_struct *tss = &per_cpu(init_tss, cpu);
31291+ struct tss_struct *tss = init_tss + cpu;
31292
31293 if (!boot_cpu_has(X86_FEATURE_SEP)) {
31294 put_cpu();
31295@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
31296 gate_vma.vm_start = FIXADDR_USER_START;
31297 gate_vma.vm_end = FIXADDR_USER_END;
31298 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
31299- gate_vma.vm_page_prot = __P101;
31300+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
31301
31302 return 0;
31303 }
31304@@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
31305 if (compat)
31306 addr = VDSO_HIGH_BASE;
31307 else {
31308- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
31309+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
31310 if (IS_ERR_VALUE(addr)) {
31311 ret = addr;
31312 goto up_fail;
31313 }
31314 }
31315
31316- current->mm->context.vdso = (void *)addr;
31317+ current->mm->context.vdso = addr;
31318
31319 if (compat_uses_vma || !compat) {
31320 /*
31321@@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
31322 }
31323
31324 current_thread_info()->sysenter_return =
31325- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
31326+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
31327
31328 up_fail:
31329 if (ret)
31330- current->mm->context.vdso = NULL;
31331+ current->mm->context.vdso = 0;
31332
31333 up_write(&mm->mmap_sem);
31334
31335@@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init);
31336
31337 const char *arch_vma_name(struct vm_area_struct *vma)
31338 {
31339- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
31340+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
31341 return "[vdso]";
31342+
31343+#ifdef CONFIG_PAX_SEGMEXEC
31344+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
31345+ return "[vdso]";
31346+#endif
31347+
31348 return NULL;
31349 }
31350
31351@@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
31352 * Check to see if the corresponding task was created in compat vdso
31353 * mode.
31354 */
31355- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
31356+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
31357 return &gate_vma;
31358 return NULL;
31359 }
31360diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
31361index 431e875..cbb23f3 100644
31362--- a/arch/x86/vdso/vma.c
31363+++ b/arch/x86/vdso/vma.c
31364@@ -16,8 +16,6 @@
31365 #include <asm/vdso.h>
31366 #include <asm/page.h>
31367
31368-unsigned int __read_mostly vdso_enabled = 1;
31369-
31370 extern char vdso_start[], vdso_end[];
31371 extern unsigned short vdso_sync_cpuid;
31372
31373@@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
31374 * unaligned here as a result of stack start randomization.
31375 */
31376 addr = PAGE_ALIGN(addr);
31377- addr = align_vdso_addr(addr);
31378
31379 return addr;
31380 }
31381@@ -154,30 +151,31 @@ static int setup_additional_pages(struct linux_binprm *bprm,
31382 unsigned size)
31383 {
31384 struct mm_struct *mm = current->mm;
31385- unsigned long addr;
31386+ unsigned long addr = 0;
31387 int ret;
31388
31389- if (!vdso_enabled)
31390- return 0;
31391-
31392 down_write(&mm->mmap_sem);
31393+
31394+#ifdef CONFIG_PAX_RANDMMAP
31395+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
31396+#endif
31397+
31398 addr = vdso_addr(mm->start_stack, size);
31399+ addr = align_vdso_addr(addr);
31400 addr = get_unmapped_area(NULL, addr, size, 0, 0);
31401 if (IS_ERR_VALUE(addr)) {
31402 ret = addr;
31403 goto up_fail;
31404 }
31405
31406- current->mm->context.vdso = (void *)addr;
31407+ mm->context.vdso = addr;
31408
31409 ret = install_special_mapping(mm, addr, size,
31410 VM_READ|VM_EXEC|
31411 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
31412 pages);
31413- if (ret) {
31414- current->mm->context.vdso = NULL;
31415- goto up_fail;
31416- }
31417+ if (ret)
31418+ mm->context.vdso = 0;
31419
31420 up_fail:
31421 up_write(&mm->mmap_sem);
31422@@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
31423 vdsox32_size);
31424 }
31425 #endif
31426-
31427-static __init int vdso_setup(char *s)
31428-{
31429- vdso_enabled = simple_strtoul(s, NULL, 0);
31430- return 0;
31431-}
31432-__setup("vdso=", vdso_setup);
31433diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
31434index 2262003..f229ced 100644
31435--- a/arch/x86/xen/enlighten.c
31436+++ b/arch/x86/xen/enlighten.c
31437@@ -100,8 +100,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
31438
31439 struct shared_info xen_dummy_shared_info;
31440
31441-void *xen_initial_gdt;
31442-
31443 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
31444 __read_mostly int xen_have_vector_callback;
31445 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
31446@@ -496,8 +494,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
31447 {
31448 unsigned long va = dtr->address;
31449 unsigned int size = dtr->size + 1;
31450- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
31451- unsigned long frames[pages];
31452+ unsigned long frames[65536 / PAGE_SIZE];
31453 int f;
31454
31455 /*
31456@@ -545,8 +542,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
31457 {
31458 unsigned long va = dtr->address;
31459 unsigned int size = dtr->size + 1;
31460- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
31461- unsigned long frames[pages];
31462+ unsigned long frames[65536 / PAGE_SIZE];
31463 int f;
31464
31465 /*
31466@@ -939,7 +935,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
31467 return 0;
31468 }
31469
31470-static void set_xen_basic_apic_ops(void)
31471+static void __init set_xen_basic_apic_ops(void)
31472 {
31473 apic->read = xen_apic_read;
31474 apic->write = xen_apic_write;
31475@@ -1245,30 +1241,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
31476 #endif
31477 };
31478
31479-static void xen_reboot(int reason)
31480+static __noreturn void xen_reboot(int reason)
31481 {
31482 struct sched_shutdown r = { .reason = reason };
31483
31484- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
31485- BUG();
31486+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
31487+ BUG();
31488 }
31489
31490-static void xen_restart(char *msg)
31491+static __noreturn void xen_restart(char *msg)
31492 {
31493 xen_reboot(SHUTDOWN_reboot);
31494 }
31495
31496-static void xen_emergency_restart(void)
31497+static __noreturn void xen_emergency_restart(void)
31498 {
31499 xen_reboot(SHUTDOWN_reboot);
31500 }
31501
31502-static void xen_machine_halt(void)
31503+static __noreturn void xen_machine_halt(void)
31504 {
31505 xen_reboot(SHUTDOWN_poweroff);
31506 }
31507
31508-static void xen_machine_power_off(void)
31509+static __noreturn void xen_machine_power_off(void)
31510 {
31511 if (pm_power_off)
31512 pm_power_off();
31513@@ -1370,7 +1366,17 @@ asmlinkage void __init xen_start_kernel(void)
31514 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
31515
31516 /* Work out if we support NX */
31517- x86_configure_nx();
31518+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
31519+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
31520+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
31521+ unsigned l, h;
31522+
31523+ __supported_pte_mask |= _PAGE_NX;
31524+ rdmsr(MSR_EFER, l, h);
31525+ l |= EFER_NX;
31526+ wrmsr(MSR_EFER, l, h);
31527+ }
31528+#endif
31529
31530 xen_setup_features();
31531
31532@@ -1399,14 +1405,7 @@ asmlinkage void __init xen_start_kernel(void)
31533 pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit;
31534 }
31535
31536- machine_ops = xen_machine_ops;
31537-
31538- /*
31539- * The only reliable way to retain the initial address of the
31540- * percpu gdt_page is to remember it here, so we can go and
31541- * mark it RW later, when the initial percpu area is freed.
31542- */
31543- xen_initial_gdt = &per_cpu(gdt_page, 0);
31544+ memcpy((void *)&machine_ops, &xen_machine_ops, sizeof machine_ops);
31545
31546 xen_smp_init();
31547
31548@@ -1598,7 +1597,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
31549 return NOTIFY_OK;
31550 }
31551
31552-static struct notifier_block xen_hvm_cpu_notifier __cpuinitdata = {
31553+static struct notifier_block xen_hvm_cpu_notifier = {
31554 .notifier_call = xen_hvm_cpu_notify,
31555 };
31556
31557diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
31558index 01de35c..0bda07b 100644
31559--- a/arch/x86/xen/mmu.c
31560+++ b/arch/x86/xen/mmu.c
31561@@ -1881,6 +1881,9 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
31562 /* L3_k[510] -> level2_kernel_pgt
31563 * L3_i[511] -> level2_fixmap_pgt */
31564 convert_pfn_mfn(level3_kernel_pgt);
31565+ convert_pfn_mfn(level3_vmalloc_start_pgt);
31566+ convert_pfn_mfn(level3_vmalloc_end_pgt);
31567+ convert_pfn_mfn(level3_vmemmap_pgt);
31568
31569 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
31570 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
31571@@ -1910,8 +1913,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
31572 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
31573 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
31574 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
31575+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
31576+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
31577+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
31578 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
31579 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
31580+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
31581 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
31582 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
31583
31584@@ -2097,6 +2104,7 @@ static void __init xen_post_allocator_init(void)
31585 pv_mmu_ops.set_pud = xen_set_pud;
31586 #if PAGETABLE_LEVELS == 4
31587 pv_mmu_ops.set_pgd = xen_set_pgd;
31588+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
31589 #endif
31590
31591 /* This will work as long as patching hasn't happened yet
31592@@ -2178,6 +2186,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
31593 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
31594 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
31595 .set_pgd = xen_set_pgd_hyper,
31596+ .set_pgd_batched = xen_set_pgd_hyper,
31597
31598 .alloc_pud = xen_alloc_pmd_init,
31599 .release_pud = xen_release_pmd_init,
31600diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
31601index 34bc4ce..c34aa24 100644
31602--- a/arch/x86/xen/smp.c
31603+++ b/arch/x86/xen/smp.c
31604@@ -229,11 +229,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
31605 {
31606 BUG_ON(smp_processor_id() != 0);
31607 native_smp_prepare_boot_cpu();
31608-
31609- /* We've switched to the "real" per-cpu gdt, so make sure the
31610- old memory can be recycled */
31611- make_lowmem_page_readwrite(xen_initial_gdt);
31612-
31613 xen_filter_cpu_maps();
31614 xen_setup_vcpu_info_placement();
31615 }
31616@@ -300,12 +295,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
31617 gdt = get_cpu_gdt_table(cpu);
31618
31619 ctxt->flags = VGCF_IN_KERNEL;
31620- ctxt->user_regs.ds = __USER_DS;
31621- ctxt->user_regs.es = __USER_DS;
31622+ ctxt->user_regs.ds = __KERNEL_DS;
31623+ ctxt->user_regs.es = __KERNEL_DS;
31624 ctxt->user_regs.ss = __KERNEL_DS;
31625 #ifdef CONFIG_X86_32
31626 ctxt->user_regs.fs = __KERNEL_PERCPU;
31627- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
31628+ savesegment(gs, ctxt->user_regs.gs);
31629 #else
31630 ctxt->gs_base_kernel = per_cpu_offset(cpu);
31631 #endif
31632@@ -355,13 +350,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle)
31633 int rc;
31634
31635 per_cpu(current_task, cpu) = idle;
31636+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
31637 #ifdef CONFIG_X86_32
31638 irq_ctx_init(cpu);
31639 #else
31640 clear_tsk_thread_flag(idle, TIF_FORK);
31641- per_cpu(kernel_stack, cpu) =
31642- (unsigned long)task_stack_page(idle) -
31643- KERNEL_STACK_OFFSET + THREAD_SIZE;
31644+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
31645 #endif
31646 xen_setup_runstate_info(cpu);
31647 xen_setup_timer(cpu);
31648@@ -630,7 +624,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
31649
31650 void __init xen_smp_init(void)
31651 {
31652- smp_ops = xen_smp_ops;
31653+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
31654 xen_fill_possible_map();
31655 xen_init_spinlocks();
31656 }
31657diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
31658index 33ca6e4..0ded929 100644
31659--- a/arch/x86/xen/xen-asm_32.S
31660+++ b/arch/x86/xen/xen-asm_32.S
31661@@ -84,14 +84,14 @@ ENTRY(xen_iret)
31662 ESP_OFFSET=4 # bytes pushed onto stack
31663
31664 /*
31665- * Store vcpu_info pointer for easy access. Do it this way to
31666- * avoid having to reload %fs
31667+ * Store vcpu_info pointer for easy access.
31668 */
31669 #ifdef CONFIG_SMP
31670- GET_THREAD_INFO(%eax)
31671- movl %ss:TI_cpu(%eax), %eax
31672- movl %ss:__per_cpu_offset(,%eax,4), %eax
31673- mov %ss:xen_vcpu(%eax), %eax
31674+ push %fs
31675+ mov $(__KERNEL_PERCPU), %eax
31676+ mov %eax, %fs
31677+ mov PER_CPU_VAR(xen_vcpu), %eax
31678+ pop %fs
31679 #else
31680 movl %ss:xen_vcpu, %eax
31681 #endif
31682diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
31683index 7faed58..ba4427c 100644
31684--- a/arch/x86/xen/xen-head.S
31685+++ b/arch/x86/xen/xen-head.S
31686@@ -19,6 +19,17 @@ ENTRY(startup_xen)
31687 #ifdef CONFIG_X86_32
31688 mov %esi,xen_start_info
31689 mov $init_thread_union+THREAD_SIZE,%esp
31690+#ifdef CONFIG_SMP
31691+ movl $cpu_gdt_table,%edi
31692+ movl $__per_cpu_load,%eax
31693+ movw %ax,__KERNEL_PERCPU + 2(%edi)
31694+ rorl $16,%eax
31695+ movb %al,__KERNEL_PERCPU + 4(%edi)
31696+ movb %ah,__KERNEL_PERCPU + 7(%edi)
31697+ movl $__per_cpu_end - 1,%eax
31698+ subl $__per_cpu_start,%eax
31699+ movw %ax,__KERNEL_PERCPU + 0(%edi)
31700+#endif
31701 #else
31702 mov %rsi,xen_start_info
31703 mov $init_thread_union+THREAD_SIZE,%rsp
31704diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
31705index a95b417..b6dbd0b 100644
31706--- a/arch/x86/xen/xen-ops.h
31707+++ b/arch/x86/xen/xen-ops.h
31708@@ -10,8 +10,6 @@
31709 extern const char xen_hypervisor_callback[];
31710 extern const char xen_failsafe_callback[];
31711
31712-extern void *xen_initial_gdt;
31713-
31714 struct trap_info;
31715 void xen_copy_trap_info(struct trap_info *traps);
31716
31717diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
31718index 525bd3d..ef888b1 100644
31719--- a/arch/xtensa/variants/dc232b/include/variant/core.h
31720+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
31721@@ -119,9 +119,9 @@
31722 ----------------------------------------------------------------------*/
31723
31724 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
31725-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
31726 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
31727 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
31728+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
31729
31730 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
31731 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
31732diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
31733index 2f33760..835e50a 100644
31734--- a/arch/xtensa/variants/fsf/include/variant/core.h
31735+++ b/arch/xtensa/variants/fsf/include/variant/core.h
31736@@ -11,6 +11,7 @@
31737 #ifndef _XTENSA_CORE_H
31738 #define _XTENSA_CORE_H
31739
31740+#include <linux/const.h>
31741
31742 /****************************************************************************
31743 Parameters Useful for Any Code, USER or PRIVILEGED
31744@@ -112,9 +113,9 @@
31745 ----------------------------------------------------------------------*/
31746
31747 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
31748-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
31749 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
31750 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
31751+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
31752
31753 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
31754 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
31755diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
31756index af00795..2bb8105 100644
31757--- a/arch/xtensa/variants/s6000/include/variant/core.h
31758+++ b/arch/xtensa/variants/s6000/include/variant/core.h
31759@@ -11,6 +11,7 @@
31760 #ifndef _XTENSA_CORE_CONFIGURATION_H
31761 #define _XTENSA_CORE_CONFIGURATION_H
31762
31763+#include <linux/const.h>
31764
31765 /****************************************************************************
31766 Parameters Useful for Any Code, USER or PRIVILEGED
31767@@ -118,9 +119,9 @@
31768 ----------------------------------------------------------------------*/
31769
31770 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
31771-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
31772 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
31773 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
31774+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
31775
31776 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
31777 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
31778diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
31779index 58916af..eb9dbcf6 100644
31780--- a/block/blk-iopoll.c
31781+++ b/block/blk-iopoll.c
31782@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
31783 }
31784 EXPORT_SYMBOL(blk_iopoll_complete);
31785
31786-static void blk_iopoll_softirq(struct softirq_action *h)
31787+static void blk_iopoll_softirq(void)
31788 {
31789 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
31790 int rearm = 0, budget = blk_iopoll_budget;
31791@@ -209,7 +209,7 @@ static int __cpuinit blk_iopoll_cpu_notify(struct notifier_block *self,
31792 return NOTIFY_OK;
31793 }
31794
31795-static struct notifier_block __cpuinitdata blk_iopoll_cpu_notifier = {
31796+static struct notifier_block blk_iopoll_cpu_notifier = {
31797 .notifier_call = blk_iopoll_cpu_notify,
31798 };
31799
31800diff --git a/block/blk-map.c b/block/blk-map.c
31801index 623e1cd..ca1e109 100644
31802--- a/block/blk-map.c
31803+++ b/block/blk-map.c
31804@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
31805 if (!len || !kbuf)
31806 return -EINVAL;
31807
31808- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
31809+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
31810 if (do_copy)
31811 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
31812 else
31813diff --git a/block/blk-softirq.c b/block/blk-softirq.c
31814index 467c8de..f3628c5 100644
31815--- a/block/blk-softirq.c
31816+++ b/block/blk-softirq.c
31817@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
31818 * Softirq action handler - move entries to local list and loop over them
31819 * while passing them to the queue registered handler.
31820 */
31821-static void blk_done_softirq(struct softirq_action *h)
31822+static void blk_done_softirq(void)
31823 {
31824 struct list_head *cpu_list, local_list;
31825
31826@@ -98,7 +98,7 @@ static int __cpuinit blk_cpu_notify(struct notifier_block *self,
31827 return NOTIFY_OK;
31828 }
31829
31830-static struct notifier_block __cpuinitdata blk_cpu_notifier = {
31831+static struct notifier_block blk_cpu_notifier = {
31832 .notifier_call = blk_cpu_notify,
31833 };
31834
31835diff --git a/block/bsg.c b/block/bsg.c
31836index ff64ae3..593560c 100644
31837--- a/block/bsg.c
31838+++ b/block/bsg.c
31839@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
31840 struct sg_io_v4 *hdr, struct bsg_device *bd,
31841 fmode_t has_write_perm)
31842 {
31843+ unsigned char tmpcmd[sizeof(rq->__cmd)];
31844+ unsigned char *cmdptr;
31845+
31846 if (hdr->request_len > BLK_MAX_CDB) {
31847 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
31848 if (!rq->cmd)
31849 return -ENOMEM;
31850- }
31851+ cmdptr = rq->cmd;
31852+ } else
31853+ cmdptr = tmpcmd;
31854
31855- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
31856+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
31857 hdr->request_len))
31858 return -EFAULT;
31859
31860+ if (cmdptr != rq->cmd)
31861+ memcpy(rq->cmd, cmdptr, hdr->request_len);
31862+
31863 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
31864 if (blk_verify_command(rq->cmd, has_write_perm))
31865 return -EPERM;
31866diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
31867index 7c668c8..db3521c 100644
31868--- a/block/compat_ioctl.c
31869+++ b/block/compat_ioctl.c
31870@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
31871 err |= __get_user(f->spec1, &uf->spec1);
31872 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
31873 err |= __get_user(name, &uf->name);
31874- f->name = compat_ptr(name);
31875+ f->name = (void __force_kernel *)compat_ptr(name);
31876 if (err) {
31877 err = -EFAULT;
31878 goto out;
31879diff --git a/block/partitions/efi.c b/block/partitions/efi.c
31880index b62fb88..bdab4c4 100644
31881--- a/block/partitions/efi.c
31882+++ b/block/partitions/efi.c
31883@@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
31884 if (!gpt)
31885 return NULL;
31886
31887+ if (!le32_to_cpu(gpt->num_partition_entries))
31888+ return NULL;
31889+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
31890+ if (!pte)
31891+ return NULL;
31892+
31893 count = le32_to_cpu(gpt->num_partition_entries) *
31894 le32_to_cpu(gpt->sizeof_partition_entry);
31895- if (!count)
31896- return NULL;
31897- pte = kzalloc(count, GFP_KERNEL);
31898- if (!pte)
31899- return NULL;
31900-
31901 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
31902 (u8 *) pte,
31903 count) < count) {
31904diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
31905index 9a87daa..fb17486 100644
31906--- a/block/scsi_ioctl.c
31907+++ b/block/scsi_ioctl.c
31908@@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
31909 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
31910 struct sg_io_hdr *hdr, fmode_t mode)
31911 {
31912- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
31913+ unsigned char tmpcmd[sizeof(rq->__cmd)];
31914+ unsigned char *cmdptr;
31915+
31916+ if (rq->cmd != rq->__cmd)
31917+ cmdptr = rq->cmd;
31918+ else
31919+ cmdptr = tmpcmd;
31920+
31921+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
31922 return -EFAULT;
31923+
31924+ if (cmdptr != rq->cmd)
31925+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
31926+
31927 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
31928 return -EPERM;
31929
31930@@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
31931 int err;
31932 unsigned int in_len, out_len, bytes, opcode, cmdlen;
31933 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
31934+ unsigned char tmpcmd[sizeof(rq->__cmd)];
31935+ unsigned char *cmdptr;
31936
31937 if (!sic)
31938 return -EINVAL;
31939@@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
31940 */
31941 err = -EFAULT;
31942 rq->cmd_len = cmdlen;
31943- if (copy_from_user(rq->cmd, sic->data, cmdlen))
31944+
31945+ if (rq->cmd != rq->__cmd)
31946+ cmdptr = rq->cmd;
31947+ else
31948+ cmdptr = tmpcmd;
31949+
31950+ if (copy_from_user(cmdptr, sic->data, cmdlen))
31951 goto error;
31952
31953+ if (rq->cmd != cmdptr)
31954+ memcpy(rq->cmd, cmdptr, cmdlen);
31955+
31956 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
31957 goto error;
31958
31959diff --git a/crypto/cryptd.c b/crypto/cryptd.c
31960index 7bdd61b..afec999 100644
31961--- a/crypto/cryptd.c
31962+++ b/crypto/cryptd.c
31963@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
31964
31965 struct cryptd_blkcipher_request_ctx {
31966 crypto_completion_t complete;
31967-};
31968+} __no_const;
31969
31970 struct cryptd_hash_ctx {
31971 struct crypto_shash *child;
31972@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
31973
31974 struct cryptd_aead_request_ctx {
31975 crypto_completion_t complete;
31976-};
31977+} __no_const;
31978
31979 static void cryptd_queue_worker(struct work_struct *work);
31980
31981diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
31982index f6d9baf..dfd511f 100644
31983--- a/crypto/crypto_user.c
31984+++ b/crypto/crypto_user.c
31985@@ -30,6 +30,8 @@
31986
31987 #include "internal.h"
31988
31989+#define null_terminated(x) (strnlen(x, sizeof(x)) < sizeof(x))
31990+
31991 static DEFINE_MUTEX(crypto_cfg_mutex);
31992
31993 /* The crypto netlink socket */
31994@@ -196,7 +198,10 @@ static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
31995 struct crypto_dump_info info;
31996 int err;
31997
31998- if (!p->cru_driver_name)
31999+ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
32000+ return -EINVAL;
32001+
32002+ if (!p->cru_driver_name[0])
32003 return -EINVAL;
32004
32005 alg = crypto_alg_match(p, 1);
32006@@ -260,6 +265,9 @@ static int crypto_update_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
32007 struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
32008 LIST_HEAD(list);
32009
32010+ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
32011+ return -EINVAL;
32012+
32013 if (priority && !strlen(p->cru_driver_name))
32014 return -EINVAL;
32015
32016@@ -287,6 +295,9 @@ static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
32017 struct crypto_alg *alg;
32018 struct crypto_user_alg *p = nlmsg_data(nlh);
32019
32020+ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
32021+ return -EINVAL;
32022+
32023 alg = crypto_alg_match(p, 1);
32024 if (!alg)
32025 return -ENOENT;
32026@@ -368,6 +379,9 @@ static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
32027 struct crypto_user_alg *p = nlmsg_data(nlh);
32028 struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
32029
32030+ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
32031+ return -EINVAL;
32032+
32033 if (strlen(p->cru_driver_name))
32034 exact = 1;
32035
32036diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
32037index f220d64..d359ad6 100644
32038--- a/drivers/acpi/apei/apei-internal.h
32039+++ b/drivers/acpi/apei/apei-internal.h
32040@@ -20,7 +20,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
32041 struct apei_exec_ins_type {
32042 u32 flags;
32043 apei_exec_ins_func_t run;
32044-};
32045+} __do_const;
32046
32047 struct apei_exec_context {
32048 u32 ip;
32049diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
32050index e6defd8..c26a225 100644
32051--- a/drivers/acpi/apei/cper.c
32052+++ b/drivers/acpi/apei/cper.c
32053@@ -38,12 +38,12 @@
32054 */
32055 u64 cper_next_record_id(void)
32056 {
32057- static atomic64_t seq;
32058+ static atomic64_unchecked_t seq;
32059
32060- if (!atomic64_read(&seq))
32061- atomic64_set(&seq, ((u64)get_seconds()) << 32);
32062+ if (!atomic64_read_unchecked(&seq))
32063+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
32064
32065- return atomic64_inc_return(&seq);
32066+ return atomic64_inc_return_unchecked(&seq);
32067 }
32068 EXPORT_SYMBOL_GPL(cper_next_record_id);
32069
32070diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
32071index be60399..778b33e8 100644
32072--- a/drivers/acpi/bgrt.c
32073+++ b/drivers/acpi/bgrt.c
32074@@ -87,8 +87,10 @@ static int __init bgrt_init(void)
32075 return -ENODEV;
32076
32077 sysfs_bin_attr_init(&image_attr);
32078- image_attr.private = bgrt_image;
32079- image_attr.size = bgrt_image_size;
32080+ pax_open_kernel();
32081+ *(void **)&image_attr.private = bgrt_image;
32082+ *(size_t *)&image_attr.size = bgrt_image_size;
32083+ pax_close_kernel();
32084
32085 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
32086 if (!bgrt_kobj)
32087diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
32088index cb96296..b81293b 100644
32089--- a/drivers/acpi/blacklist.c
32090+++ b/drivers/acpi/blacklist.c
32091@@ -52,7 +52,7 @@ struct acpi_blacklist_item {
32092 u32 is_critical_error;
32093 };
32094
32095-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
32096+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
32097
32098 /*
32099 * POLICY: If *anything* doesn't work, put it on the blacklist.
32100@@ -193,7 +193,7 @@ static int __init dmi_disable_osi_win7(const struct dmi_system_id *d)
32101 return 0;
32102 }
32103
32104-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
32105+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
32106 {
32107 .callback = dmi_disable_osi_vista,
32108 .ident = "Fujitsu Siemens",
32109diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
32110index 7586544..636a2f0 100644
32111--- a/drivers/acpi/ec_sys.c
32112+++ b/drivers/acpi/ec_sys.c
32113@@ -12,6 +12,7 @@
32114 #include <linux/acpi.h>
32115 #include <linux/debugfs.h>
32116 #include <linux/module.h>
32117+#include <linux/uaccess.h>
32118 #include "internal.h"
32119
32120 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
32121@@ -34,7 +35,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
32122 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
32123 */
32124 unsigned int size = EC_SPACE_SIZE;
32125- u8 *data = (u8 *) buf;
32126+ u8 data;
32127 loff_t init_off = *off;
32128 int err = 0;
32129
32130@@ -47,9 +48,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
32131 size = count;
32132
32133 while (size) {
32134- err = ec_read(*off, &data[*off - init_off]);
32135+ err = ec_read(*off, &data);
32136 if (err)
32137 return err;
32138+ if (put_user(data, &buf[*off - init_off]))
32139+ return -EFAULT;
32140 *off += 1;
32141 size--;
32142 }
32143@@ -65,7 +68,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
32144
32145 unsigned int size = count;
32146 loff_t init_off = *off;
32147- u8 *data = (u8 *) buf;
32148 int err = 0;
32149
32150 if (*off >= EC_SPACE_SIZE)
32151@@ -76,7 +78,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
32152 }
32153
32154 while (size) {
32155- u8 byte_write = data[*off - init_off];
32156+ u8 byte_write;
32157+ if (get_user(byte_write, &buf[*off - init_off]))
32158+ return -EFAULT;
32159 err = ec_write(*off, byte_write);
32160 if (err)
32161 return err;
32162diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
32163index e83311b..142b5cc 100644
32164--- a/drivers/acpi/processor_driver.c
32165+++ b/drivers/acpi/processor_driver.c
32166@@ -558,7 +558,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
32167 return 0;
32168 #endif
32169
32170- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
32171+ BUG_ON(pr->id >= nr_cpu_ids);
32172
32173 /*
32174 * Buggy BIOS check
32175diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
32176index ed9a1cc..f4a354c 100644
32177--- a/drivers/acpi/processor_idle.c
32178+++ b/drivers/acpi/processor_idle.c
32179@@ -1005,7 +1005,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
32180 {
32181 int i, count = CPUIDLE_DRIVER_STATE_START;
32182 struct acpi_processor_cx *cx;
32183- struct cpuidle_state *state;
32184+ cpuidle_state_no_const *state;
32185 struct cpuidle_driver *drv = &acpi_idle_driver;
32186
32187 if (!pr->flags.power_setup_done)
32188diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
32189index ea61ca9..3fdd70d 100644
32190--- a/drivers/acpi/sysfs.c
32191+++ b/drivers/acpi/sysfs.c
32192@@ -420,11 +420,11 @@ static u32 num_counters;
32193 static struct attribute **all_attrs;
32194 static u32 acpi_gpe_count;
32195
32196-static struct attribute_group interrupt_stats_attr_group = {
32197+static attribute_group_no_const interrupt_stats_attr_group = {
32198 .name = "interrupts",
32199 };
32200
32201-static struct kobj_attribute *counter_attrs;
32202+static kobj_attribute_no_const *counter_attrs;
32203
32204 static void delete_gpe_attr_array(void)
32205 {
32206diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
32207index 6cd7805..07facb3 100644
32208--- a/drivers/ata/libahci.c
32209+++ b/drivers/ata/libahci.c
32210@@ -1230,7 +1230,7 @@ int ahci_kick_engine(struct ata_port *ap)
32211 }
32212 EXPORT_SYMBOL_GPL(ahci_kick_engine);
32213
32214-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
32215+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
32216 struct ata_taskfile *tf, int is_cmd, u16 flags,
32217 unsigned long timeout_msec)
32218 {
32219diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
32220index 46cd3f4..0871ad0 100644
32221--- a/drivers/ata/libata-core.c
32222+++ b/drivers/ata/libata-core.c
32223@@ -4780,7 +4780,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
32224 struct ata_port *ap;
32225 unsigned int tag;
32226
32227- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
32228+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
32229 ap = qc->ap;
32230
32231 qc->flags = 0;
32232@@ -4796,7 +4796,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
32233 struct ata_port *ap;
32234 struct ata_link *link;
32235
32236- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
32237+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
32238 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
32239 ap = qc->ap;
32240 link = qc->dev->link;
32241@@ -5892,6 +5892,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
32242 return;
32243
32244 spin_lock(&lock);
32245+ pax_open_kernel();
32246
32247 for (cur = ops->inherits; cur; cur = cur->inherits) {
32248 void **inherit = (void **)cur;
32249@@ -5905,8 +5906,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
32250 if (IS_ERR(*pp))
32251 *pp = NULL;
32252
32253- ops->inherits = NULL;
32254+ *(struct ata_port_operations **)&ops->inherits = NULL;
32255
32256+ pax_close_kernel();
32257 spin_unlock(&lock);
32258 }
32259
32260diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
32261index 405022d..fb70e53 100644
32262--- a/drivers/ata/pata_arasan_cf.c
32263+++ b/drivers/ata/pata_arasan_cf.c
32264@@ -864,7 +864,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
32265 /* Handle platform specific quirks */
32266 if (pdata->quirk) {
32267 if (pdata->quirk & CF_BROKEN_PIO) {
32268- ap->ops->set_piomode = NULL;
32269+ pax_open_kernel();
32270+ *(void **)&ap->ops->set_piomode = NULL;
32271+ pax_close_kernel();
32272 ap->pio_mask = 0;
32273 }
32274 if (pdata->quirk & CF_BROKEN_MWDMA)
32275diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
32276index f9b983a..887b9d8 100644
32277--- a/drivers/atm/adummy.c
32278+++ b/drivers/atm/adummy.c
32279@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
32280 vcc->pop(vcc, skb);
32281 else
32282 dev_kfree_skb_any(skb);
32283- atomic_inc(&vcc->stats->tx);
32284+ atomic_inc_unchecked(&vcc->stats->tx);
32285
32286 return 0;
32287 }
32288diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
32289index 77a7480..05cde58 100644
32290--- a/drivers/atm/ambassador.c
32291+++ b/drivers/atm/ambassador.c
32292@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
32293 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
32294
32295 // VC layer stats
32296- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
32297+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
32298
32299 // free the descriptor
32300 kfree (tx_descr);
32301@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
32302 dump_skb ("<<<", vc, skb);
32303
32304 // VC layer stats
32305- atomic_inc(&atm_vcc->stats->rx);
32306+ atomic_inc_unchecked(&atm_vcc->stats->rx);
32307 __net_timestamp(skb);
32308 // end of our responsibility
32309 atm_vcc->push (atm_vcc, skb);
32310@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
32311 } else {
32312 PRINTK (KERN_INFO, "dropped over-size frame");
32313 // should we count this?
32314- atomic_inc(&atm_vcc->stats->rx_drop);
32315+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
32316 }
32317
32318 } else {
32319@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
32320 }
32321
32322 if (check_area (skb->data, skb->len)) {
32323- atomic_inc(&atm_vcc->stats->tx_err);
32324+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
32325 return -ENOMEM; // ?
32326 }
32327
32328diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
32329index b22d71c..d6e1049 100644
32330--- a/drivers/atm/atmtcp.c
32331+++ b/drivers/atm/atmtcp.c
32332@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
32333 if (vcc->pop) vcc->pop(vcc,skb);
32334 else dev_kfree_skb(skb);
32335 if (dev_data) return 0;
32336- atomic_inc(&vcc->stats->tx_err);
32337+ atomic_inc_unchecked(&vcc->stats->tx_err);
32338 return -ENOLINK;
32339 }
32340 size = skb->len+sizeof(struct atmtcp_hdr);
32341@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
32342 if (!new_skb) {
32343 if (vcc->pop) vcc->pop(vcc,skb);
32344 else dev_kfree_skb(skb);
32345- atomic_inc(&vcc->stats->tx_err);
32346+ atomic_inc_unchecked(&vcc->stats->tx_err);
32347 return -ENOBUFS;
32348 }
32349 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
32350@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
32351 if (vcc->pop) vcc->pop(vcc,skb);
32352 else dev_kfree_skb(skb);
32353 out_vcc->push(out_vcc,new_skb);
32354- atomic_inc(&vcc->stats->tx);
32355- atomic_inc(&out_vcc->stats->rx);
32356+ atomic_inc_unchecked(&vcc->stats->tx);
32357+ atomic_inc_unchecked(&out_vcc->stats->rx);
32358 return 0;
32359 }
32360
32361@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
32362 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
32363 read_unlock(&vcc_sklist_lock);
32364 if (!out_vcc) {
32365- atomic_inc(&vcc->stats->tx_err);
32366+ atomic_inc_unchecked(&vcc->stats->tx_err);
32367 goto done;
32368 }
32369 skb_pull(skb,sizeof(struct atmtcp_hdr));
32370@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
32371 __net_timestamp(new_skb);
32372 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
32373 out_vcc->push(out_vcc,new_skb);
32374- atomic_inc(&vcc->stats->tx);
32375- atomic_inc(&out_vcc->stats->rx);
32376+ atomic_inc_unchecked(&vcc->stats->tx);
32377+ atomic_inc_unchecked(&out_vcc->stats->rx);
32378 done:
32379 if (vcc->pop) vcc->pop(vcc,skb);
32380 else dev_kfree_skb(skb);
32381diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
32382index c1eb6fa..4c71be9 100644
32383--- a/drivers/atm/eni.c
32384+++ b/drivers/atm/eni.c
32385@@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
32386 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
32387 vcc->dev->number);
32388 length = 0;
32389- atomic_inc(&vcc->stats->rx_err);
32390+ atomic_inc_unchecked(&vcc->stats->rx_err);
32391 }
32392 else {
32393 length = ATM_CELL_SIZE-1; /* no HEC */
32394@@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
32395 size);
32396 }
32397 eff = length = 0;
32398- atomic_inc(&vcc->stats->rx_err);
32399+ atomic_inc_unchecked(&vcc->stats->rx_err);
32400 }
32401 else {
32402 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
32403@@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
32404 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
32405 vcc->dev->number,vcc->vci,length,size << 2,descr);
32406 length = eff = 0;
32407- atomic_inc(&vcc->stats->rx_err);
32408+ atomic_inc_unchecked(&vcc->stats->rx_err);
32409 }
32410 }
32411 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
32412@@ -767,7 +767,7 @@ rx_dequeued++;
32413 vcc->push(vcc,skb);
32414 pushed++;
32415 }
32416- atomic_inc(&vcc->stats->rx);
32417+ atomic_inc_unchecked(&vcc->stats->rx);
32418 }
32419 wake_up(&eni_dev->rx_wait);
32420 }
32421@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
32422 PCI_DMA_TODEVICE);
32423 if (vcc->pop) vcc->pop(vcc,skb);
32424 else dev_kfree_skb_irq(skb);
32425- atomic_inc(&vcc->stats->tx);
32426+ atomic_inc_unchecked(&vcc->stats->tx);
32427 wake_up(&eni_dev->tx_wait);
32428 dma_complete++;
32429 }
32430diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
32431index b41c948..a002b17 100644
32432--- a/drivers/atm/firestream.c
32433+++ b/drivers/atm/firestream.c
32434@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
32435 }
32436 }
32437
32438- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
32439+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
32440
32441 fs_dprintk (FS_DEBUG_TXMEM, "i");
32442 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
32443@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
32444 #endif
32445 skb_put (skb, qe->p1 & 0xffff);
32446 ATM_SKB(skb)->vcc = atm_vcc;
32447- atomic_inc(&atm_vcc->stats->rx);
32448+ atomic_inc_unchecked(&atm_vcc->stats->rx);
32449 __net_timestamp(skb);
32450 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
32451 atm_vcc->push (atm_vcc, skb);
32452@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
32453 kfree (pe);
32454 }
32455 if (atm_vcc)
32456- atomic_inc(&atm_vcc->stats->rx_drop);
32457+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
32458 break;
32459 case 0x1f: /* Reassembly abort: no buffers. */
32460 /* Silently increment error counter. */
32461 if (atm_vcc)
32462- atomic_inc(&atm_vcc->stats->rx_drop);
32463+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
32464 break;
32465 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
32466 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
32467diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
32468index 204814e..cede831 100644
32469--- a/drivers/atm/fore200e.c
32470+++ b/drivers/atm/fore200e.c
32471@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
32472 #endif
32473 /* check error condition */
32474 if (*entry->status & STATUS_ERROR)
32475- atomic_inc(&vcc->stats->tx_err);
32476+ atomic_inc_unchecked(&vcc->stats->tx_err);
32477 else
32478- atomic_inc(&vcc->stats->tx);
32479+ atomic_inc_unchecked(&vcc->stats->tx);
32480 }
32481 }
32482
32483@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
32484 if (skb == NULL) {
32485 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
32486
32487- atomic_inc(&vcc->stats->rx_drop);
32488+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32489 return -ENOMEM;
32490 }
32491
32492@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
32493
32494 dev_kfree_skb_any(skb);
32495
32496- atomic_inc(&vcc->stats->rx_drop);
32497+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32498 return -ENOMEM;
32499 }
32500
32501 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
32502
32503 vcc->push(vcc, skb);
32504- atomic_inc(&vcc->stats->rx);
32505+ atomic_inc_unchecked(&vcc->stats->rx);
32506
32507 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
32508
32509@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
32510 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
32511 fore200e->atm_dev->number,
32512 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
32513- atomic_inc(&vcc->stats->rx_err);
32514+ atomic_inc_unchecked(&vcc->stats->rx_err);
32515 }
32516 }
32517
32518@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
32519 goto retry_here;
32520 }
32521
32522- atomic_inc(&vcc->stats->tx_err);
32523+ atomic_inc_unchecked(&vcc->stats->tx_err);
32524
32525 fore200e->tx_sat++;
32526 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
32527diff --git a/drivers/atm/he.c b/drivers/atm/he.c
32528index 72b6960..cf9167a 100644
32529--- a/drivers/atm/he.c
32530+++ b/drivers/atm/he.c
32531@@ -1699,7 +1699,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
32532
32533 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
32534 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
32535- atomic_inc(&vcc->stats->rx_drop);
32536+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32537 goto return_host_buffers;
32538 }
32539
32540@@ -1726,7 +1726,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
32541 RBRQ_LEN_ERR(he_dev->rbrq_head)
32542 ? "LEN_ERR" : "",
32543 vcc->vpi, vcc->vci);
32544- atomic_inc(&vcc->stats->rx_err);
32545+ atomic_inc_unchecked(&vcc->stats->rx_err);
32546 goto return_host_buffers;
32547 }
32548
32549@@ -1778,7 +1778,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
32550 vcc->push(vcc, skb);
32551 spin_lock(&he_dev->global_lock);
32552
32553- atomic_inc(&vcc->stats->rx);
32554+ atomic_inc_unchecked(&vcc->stats->rx);
32555
32556 return_host_buffers:
32557 ++pdus_assembled;
32558@@ -2104,7 +2104,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
32559 tpd->vcc->pop(tpd->vcc, tpd->skb);
32560 else
32561 dev_kfree_skb_any(tpd->skb);
32562- atomic_inc(&tpd->vcc->stats->tx_err);
32563+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
32564 }
32565 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
32566 return;
32567@@ -2516,7 +2516,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
32568 vcc->pop(vcc, skb);
32569 else
32570 dev_kfree_skb_any(skb);
32571- atomic_inc(&vcc->stats->tx_err);
32572+ atomic_inc_unchecked(&vcc->stats->tx_err);
32573 return -EINVAL;
32574 }
32575
32576@@ -2527,7 +2527,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
32577 vcc->pop(vcc, skb);
32578 else
32579 dev_kfree_skb_any(skb);
32580- atomic_inc(&vcc->stats->tx_err);
32581+ atomic_inc_unchecked(&vcc->stats->tx_err);
32582 return -EINVAL;
32583 }
32584 #endif
32585@@ -2539,7 +2539,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
32586 vcc->pop(vcc, skb);
32587 else
32588 dev_kfree_skb_any(skb);
32589- atomic_inc(&vcc->stats->tx_err);
32590+ atomic_inc_unchecked(&vcc->stats->tx_err);
32591 spin_unlock_irqrestore(&he_dev->global_lock, flags);
32592 return -ENOMEM;
32593 }
32594@@ -2581,7 +2581,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
32595 vcc->pop(vcc, skb);
32596 else
32597 dev_kfree_skb_any(skb);
32598- atomic_inc(&vcc->stats->tx_err);
32599+ atomic_inc_unchecked(&vcc->stats->tx_err);
32600 spin_unlock_irqrestore(&he_dev->global_lock, flags);
32601 return -ENOMEM;
32602 }
32603@@ -2612,7 +2612,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
32604 __enqueue_tpd(he_dev, tpd, cid);
32605 spin_unlock_irqrestore(&he_dev->global_lock, flags);
32606
32607- atomic_inc(&vcc->stats->tx);
32608+ atomic_inc_unchecked(&vcc->stats->tx);
32609
32610 return 0;
32611 }
32612diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
32613index 1dc0519..1aadaf7 100644
32614--- a/drivers/atm/horizon.c
32615+++ b/drivers/atm/horizon.c
32616@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
32617 {
32618 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
32619 // VC layer stats
32620- atomic_inc(&vcc->stats->rx);
32621+ atomic_inc_unchecked(&vcc->stats->rx);
32622 __net_timestamp(skb);
32623 // end of our responsibility
32624 vcc->push (vcc, skb);
32625@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
32626 dev->tx_iovec = NULL;
32627
32628 // VC layer stats
32629- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
32630+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
32631
32632 // free the skb
32633 hrz_kfree_skb (skb);
32634diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
32635index 272f009..a18ba55 100644
32636--- a/drivers/atm/idt77252.c
32637+++ b/drivers/atm/idt77252.c
32638@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
32639 else
32640 dev_kfree_skb(skb);
32641
32642- atomic_inc(&vcc->stats->tx);
32643+ atomic_inc_unchecked(&vcc->stats->tx);
32644 }
32645
32646 atomic_dec(&scq->used);
32647@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32648 if ((sb = dev_alloc_skb(64)) == NULL) {
32649 printk("%s: Can't allocate buffers for aal0.\n",
32650 card->name);
32651- atomic_add(i, &vcc->stats->rx_drop);
32652+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
32653 break;
32654 }
32655 if (!atm_charge(vcc, sb->truesize)) {
32656 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
32657 card->name);
32658- atomic_add(i - 1, &vcc->stats->rx_drop);
32659+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
32660 dev_kfree_skb(sb);
32661 break;
32662 }
32663@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32664 ATM_SKB(sb)->vcc = vcc;
32665 __net_timestamp(sb);
32666 vcc->push(vcc, sb);
32667- atomic_inc(&vcc->stats->rx);
32668+ atomic_inc_unchecked(&vcc->stats->rx);
32669
32670 cell += ATM_CELL_PAYLOAD;
32671 }
32672@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32673 "(CDC: %08x)\n",
32674 card->name, len, rpp->len, readl(SAR_REG_CDC));
32675 recycle_rx_pool_skb(card, rpp);
32676- atomic_inc(&vcc->stats->rx_err);
32677+ atomic_inc_unchecked(&vcc->stats->rx_err);
32678 return;
32679 }
32680 if (stat & SAR_RSQE_CRC) {
32681 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
32682 recycle_rx_pool_skb(card, rpp);
32683- atomic_inc(&vcc->stats->rx_err);
32684+ atomic_inc_unchecked(&vcc->stats->rx_err);
32685 return;
32686 }
32687 if (skb_queue_len(&rpp->queue) > 1) {
32688@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32689 RXPRINTK("%s: Can't alloc RX skb.\n",
32690 card->name);
32691 recycle_rx_pool_skb(card, rpp);
32692- atomic_inc(&vcc->stats->rx_err);
32693+ atomic_inc_unchecked(&vcc->stats->rx_err);
32694 return;
32695 }
32696 if (!atm_charge(vcc, skb->truesize)) {
32697@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32698 __net_timestamp(skb);
32699
32700 vcc->push(vcc, skb);
32701- atomic_inc(&vcc->stats->rx);
32702+ atomic_inc_unchecked(&vcc->stats->rx);
32703
32704 return;
32705 }
32706@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32707 __net_timestamp(skb);
32708
32709 vcc->push(vcc, skb);
32710- atomic_inc(&vcc->stats->rx);
32711+ atomic_inc_unchecked(&vcc->stats->rx);
32712
32713 if (skb->truesize > SAR_FB_SIZE_3)
32714 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
32715@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
32716 if (vcc->qos.aal != ATM_AAL0) {
32717 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
32718 card->name, vpi, vci);
32719- atomic_inc(&vcc->stats->rx_drop);
32720+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32721 goto drop;
32722 }
32723
32724 if ((sb = dev_alloc_skb(64)) == NULL) {
32725 printk("%s: Can't allocate buffers for AAL0.\n",
32726 card->name);
32727- atomic_inc(&vcc->stats->rx_err);
32728+ atomic_inc_unchecked(&vcc->stats->rx_err);
32729 goto drop;
32730 }
32731
32732@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
32733 ATM_SKB(sb)->vcc = vcc;
32734 __net_timestamp(sb);
32735 vcc->push(vcc, sb);
32736- atomic_inc(&vcc->stats->rx);
32737+ atomic_inc_unchecked(&vcc->stats->rx);
32738
32739 drop:
32740 skb_pull(queue, 64);
32741@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
32742
32743 if (vc == NULL) {
32744 printk("%s: NULL connection in send().\n", card->name);
32745- atomic_inc(&vcc->stats->tx_err);
32746+ atomic_inc_unchecked(&vcc->stats->tx_err);
32747 dev_kfree_skb(skb);
32748 return -EINVAL;
32749 }
32750 if (!test_bit(VCF_TX, &vc->flags)) {
32751 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
32752- atomic_inc(&vcc->stats->tx_err);
32753+ atomic_inc_unchecked(&vcc->stats->tx_err);
32754 dev_kfree_skb(skb);
32755 return -EINVAL;
32756 }
32757@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
32758 break;
32759 default:
32760 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
32761- atomic_inc(&vcc->stats->tx_err);
32762+ atomic_inc_unchecked(&vcc->stats->tx_err);
32763 dev_kfree_skb(skb);
32764 return -EINVAL;
32765 }
32766
32767 if (skb_shinfo(skb)->nr_frags != 0) {
32768 printk("%s: No scatter-gather yet.\n", card->name);
32769- atomic_inc(&vcc->stats->tx_err);
32770+ atomic_inc_unchecked(&vcc->stats->tx_err);
32771 dev_kfree_skb(skb);
32772 return -EINVAL;
32773 }
32774@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
32775
32776 err = queue_skb(card, vc, skb, oam);
32777 if (err) {
32778- atomic_inc(&vcc->stats->tx_err);
32779+ atomic_inc_unchecked(&vcc->stats->tx_err);
32780 dev_kfree_skb(skb);
32781 return err;
32782 }
32783@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
32784 skb = dev_alloc_skb(64);
32785 if (!skb) {
32786 printk("%s: Out of memory in send_oam().\n", card->name);
32787- atomic_inc(&vcc->stats->tx_err);
32788+ atomic_inc_unchecked(&vcc->stats->tx_err);
32789 return -ENOMEM;
32790 }
32791 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
32792diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
32793index 4217f29..88f547a 100644
32794--- a/drivers/atm/iphase.c
32795+++ b/drivers/atm/iphase.c
32796@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
32797 status = (u_short) (buf_desc_ptr->desc_mode);
32798 if (status & (RX_CER | RX_PTE | RX_OFL))
32799 {
32800- atomic_inc(&vcc->stats->rx_err);
32801+ atomic_inc_unchecked(&vcc->stats->rx_err);
32802 IF_ERR(printk("IA: bad packet, dropping it");)
32803 if (status & RX_CER) {
32804 IF_ERR(printk(" cause: packet CRC error\n");)
32805@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
32806 len = dma_addr - buf_addr;
32807 if (len > iadev->rx_buf_sz) {
32808 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
32809- atomic_inc(&vcc->stats->rx_err);
32810+ atomic_inc_unchecked(&vcc->stats->rx_err);
32811 goto out_free_desc;
32812 }
32813
32814@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
32815 ia_vcc = INPH_IA_VCC(vcc);
32816 if (ia_vcc == NULL)
32817 {
32818- atomic_inc(&vcc->stats->rx_err);
32819+ atomic_inc_unchecked(&vcc->stats->rx_err);
32820 atm_return(vcc, skb->truesize);
32821 dev_kfree_skb_any(skb);
32822 goto INCR_DLE;
32823@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
32824 if ((length > iadev->rx_buf_sz) || (length >
32825 (skb->len - sizeof(struct cpcs_trailer))))
32826 {
32827- atomic_inc(&vcc->stats->rx_err);
32828+ atomic_inc_unchecked(&vcc->stats->rx_err);
32829 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
32830 length, skb->len);)
32831 atm_return(vcc, skb->truesize);
32832@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
32833
32834 IF_RX(printk("rx_dle_intr: skb push");)
32835 vcc->push(vcc,skb);
32836- atomic_inc(&vcc->stats->rx);
32837+ atomic_inc_unchecked(&vcc->stats->rx);
32838 iadev->rx_pkt_cnt++;
32839 }
32840 INCR_DLE:
32841@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
32842 {
32843 struct k_sonet_stats *stats;
32844 stats = &PRIV(_ia_dev[board])->sonet_stats;
32845- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
32846- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
32847- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
32848- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
32849- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
32850- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
32851- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
32852- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
32853- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
32854+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
32855+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
32856+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
32857+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
32858+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
32859+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
32860+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
32861+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
32862+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
32863 }
32864 ia_cmds.status = 0;
32865 break;
32866@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
32867 if ((desc == 0) || (desc > iadev->num_tx_desc))
32868 {
32869 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
32870- atomic_inc(&vcc->stats->tx);
32871+ atomic_inc_unchecked(&vcc->stats->tx);
32872 if (vcc->pop)
32873 vcc->pop(vcc, skb);
32874 else
32875@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
32876 ATM_DESC(skb) = vcc->vci;
32877 skb_queue_tail(&iadev->tx_dma_q, skb);
32878
32879- atomic_inc(&vcc->stats->tx);
32880+ atomic_inc_unchecked(&vcc->stats->tx);
32881 iadev->tx_pkt_cnt++;
32882 /* Increment transaction counter */
32883 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
32884
32885 #if 0
32886 /* add flow control logic */
32887- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
32888+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
32889 if (iavcc->vc_desc_cnt > 10) {
32890 vcc->tx_quota = vcc->tx_quota * 3 / 4;
32891 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
32892diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
32893index fa7d701..1e404c7 100644
32894--- a/drivers/atm/lanai.c
32895+++ b/drivers/atm/lanai.c
32896@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
32897 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
32898 lanai_endtx(lanai, lvcc);
32899 lanai_free_skb(lvcc->tx.atmvcc, skb);
32900- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
32901+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
32902 }
32903
32904 /* Try to fill the buffer - don't call unless there is backlog */
32905@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
32906 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
32907 __net_timestamp(skb);
32908 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
32909- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
32910+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
32911 out:
32912 lvcc->rx.buf.ptr = end;
32913 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
32914@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
32915 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
32916 "vcc %d\n", lanai->number, (unsigned int) s, vci);
32917 lanai->stats.service_rxnotaal5++;
32918- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
32919+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
32920 return 0;
32921 }
32922 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
32923@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
32924 int bytes;
32925 read_unlock(&vcc_sklist_lock);
32926 DPRINTK("got trashed rx pdu on vci %d\n", vci);
32927- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
32928+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
32929 lvcc->stats.x.aal5.service_trash++;
32930 bytes = (SERVICE_GET_END(s) * 16) -
32931 (((unsigned long) lvcc->rx.buf.ptr) -
32932@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
32933 }
32934 if (s & SERVICE_STREAM) {
32935 read_unlock(&vcc_sklist_lock);
32936- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
32937+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
32938 lvcc->stats.x.aal5.service_stream++;
32939 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
32940 "PDU on VCI %d!\n", lanai->number, vci);
32941@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
32942 return 0;
32943 }
32944 DPRINTK("got rx crc error on vci %d\n", vci);
32945- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
32946+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
32947 lvcc->stats.x.aal5.service_rxcrc++;
32948 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
32949 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
32950diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
32951index ed1d2b7..8cffc1f 100644
32952--- a/drivers/atm/nicstar.c
32953+++ b/drivers/atm/nicstar.c
32954@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
32955 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
32956 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
32957 card->index);
32958- atomic_inc(&vcc->stats->tx_err);
32959+ atomic_inc_unchecked(&vcc->stats->tx_err);
32960 dev_kfree_skb_any(skb);
32961 return -EINVAL;
32962 }
32963@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
32964 if (!vc->tx) {
32965 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
32966 card->index);
32967- atomic_inc(&vcc->stats->tx_err);
32968+ atomic_inc_unchecked(&vcc->stats->tx_err);
32969 dev_kfree_skb_any(skb);
32970 return -EINVAL;
32971 }
32972@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
32973 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
32974 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
32975 card->index);
32976- atomic_inc(&vcc->stats->tx_err);
32977+ atomic_inc_unchecked(&vcc->stats->tx_err);
32978 dev_kfree_skb_any(skb);
32979 return -EINVAL;
32980 }
32981
32982 if (skb_shinfo(skb)->nr_frags != 0) {
32983 printk("nicstar%d: No scatter-gather yet.\n", card->index);
32984- atomic_inc(&vcc->stats->tx_err);
32985+ atomic_inc_unchecked(&vcc->stats->tx_err);
32986 dev_kfree_skb_any(skb);
32987 return -EINVAL;
32988 }
32989@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
32990 }
32991
32992 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
32993- atomic_inc(&vcc->stats->tx_err);
32994+ atomic_inc_unchecked(&vcc->stats->tx_err);
32995 dev_kfree_skb_any(skb);
32996 return -EIO;
32997 }
32998- atomic_inc(&vcc->stats->tx);
32999+ atomic_inc_unchecked(&vcc->stats->tx);
33000
33001 return 0;
33002 }
33003@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33004 printk
33005 ("nicstar%d: Can't allocate buffers for aal0.\n",
33006 card->index);
33007- atomic_add(i, &vcc->stats->rx_drop);
33008+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
33009 break;
33010 }
33011 if (!atm_charge(vcc, sb->truesize)) {
33012 RXPRINTK
33013 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
33014 card->index);
33015- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
33016+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
33017 dev_kfree_skb_any(sb);
33018 break;
33019 }
33020@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33021 ATM_SKB(sb)->vcc = vcc;
33022 __net_timestamp(sb);
33023 vcc->push(vcc, sb);
33024- atomic_inc(&vcc->stats->rx);
33025+ atomic_inc_unchecked(&vcc->stats->rx);
33026 cell += ATM_CELL_PAYLOAD;
33027 }
33028
33029@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33030 if (iovb == NULL) {
33031 printk("nicstar%d: Out of iovec buffers.\n",
33032 card->index);
33033- atomic_inc(&vcc->stats->rx_drop);
33034+ atomic_inc_unchecked(&vcc->stats->rx_drop);
33035 recycle_rx_buf(card, skb);
33036 return;
33037 }
33038@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33039 small or large buffer itself. */
33040 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
33041 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
33042- atomic_inc(&vcc->stats->rx_err);
33043+ atomic_inc_unchecked(&vcc->stats->rx_err);
33044 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
33045 NS_MAX_IOVECS);
33046 NS_PRV_IOVCNT(iovb) = 0;
33047@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33048 ("nicstar%d: Expected a small buffer, and this is not one.\n",
33049 card->index);
33050 which_list(card, skb);
33051- atomic_inc(&vcc->stats->rx_err);
33052+ atomic_inc_unchecked(&vcc->stats->rx_err);
33053 recycle_rx_buf(card, skb);
33054 vc->rx_iov = NULL;
33055 recycle_iov_buf(card, iovb);
33056@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33057 ("nicstar%d: Expected a large buffer, and this is not one.\n",
33058 card->index);
33059 which_list(card, skb);
33060- atomic_inc(&vcc->stats->rx_err);
33061+ atomic_inc_unchecked(&vcc->stats->rx_err);
33062 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
33063 NS_PRV_IOVCNT(iovb));
33064 vc->rx_iov = NULL;
33065@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33066 printk(" - PDU size mismatch.\n");
33067 else
33068 printk(".\n");
33069- atomic_inc(&vcc->stats->rx_err);
33070+ atomic_inc_unchecked(&vcc->stats->rx_err);
33071 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
33072 NS_PRV_IOVCNT(iovb));
33073 vc->rx_iov = NULL;
33074@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33075 /* skb points to a small buffer */
33076 if (!atm_charge(vcc, skb->truesize)) {
33077 push_rxbufs(card, skb);
33078- atomic_inc(&vcc->stats->rx_drop);
33079+ atomic_inc_unchecked(&vcc->stats->rx_drop);
33080 } else {
33081 skb_put(skb, len);
33082 dequeue_sm_buf(card, skb);
33083@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33084 ATM_SKB(skb)->vcc = vcc;
33085 __net_timestamp(skb);
33086 vcc->push(vcc, skb);
33087- atomic_inc(&vcc->stats->rx);
33088+ atomic_inc_unchecked(&vcc->stats->rx);
33089 }
33090 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
33091 struct sk_buff *sb;
33092@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33093 if (len <= NS_SMBUFSIZE) {
33094 if (!atm_charge(vcc, sb->truesize)) {
33095 push_rxbufs(card, sb);
33096- atomic_inc(&vcc->stats->rx_drop);
33097+ atomic_inc_unchecked(&vcc->stats->rx_drop);
33098 } else {
33099 skb_put(sb, len);
33100 dequeue_sm_buf(card, sb);
33101@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33102 ATM_SKB(sb)->vcc = vcc;
33103 __net_timestamp(sb);
33104 vcc->push(vcc, sb);
33105- atomic_inc(&vcc->stats->rx);
33106+ atomic_inc_unchecked(&vcc->stats->rx);
33107 }
33108
33109 push_rxbufs(card, skb);
33110@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33111
33112 if (!atm_charge(vcc, skb->truesize)) {
33113 push_rxbufs(card, skb);
33114- atomic_inc(&vcc->stats->rx_drop);
33115+ atomic_inc_unchecked(&vcc->stats->rx_drop);
33116 } else {
33117 dequeue_lg_buf(card, skb);
33118 #ifdef NS_USE_DESTRUCTORS
33119@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33120 ATM_SKB(skb)->vcc = vcc;
33121 __net_timestamp(skb);
33122 vcc->push(vcc, skb);
33123- atomic_inc(&vcc->stats->rx);
33124+ atomic_inc_unchecked(&vcc->stats->rx);
33125 }
33126
33127 push_rxbufs(card, sb);
33128@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33129 printk
33130 ("nicstar%d: Out of huge buffers.\n",
33131 card->index);
33132- atomic_inc(&vcc->stats->rx_drop);
33133+ atomic_inc_unchecked(&vcc->stats->rx_drop);
33134 recycle_iovec_rx_bufs(card,
33135 (struct iovec *)
33136 iovb->data,
33137@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33138 card->hbpool.count++;
33139 } else
33140 dev_kfree_skb_any(hb);
33141- atomic_inc(&vcc->stats->rx_drop);
33142+ atomic_inc_unchecked(&vcc->stats->rx_drop);
33143 } else {
33144 /* Copy the small buffer to the huge buffer */
33145 sb = (struct sk_buff *)iov->iov_base;
33146@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33147 #endif /* NS_USE_DESTRUCTORS */
33148 __net_timestamp(hb);
33149 vcc->push(vcc, hb);
33150- atomic_inc(&vcc->stats->rx);
33151+ atomic_inc_unchecked(&vcc->stats->rx);
33152 }
33153 }
33154
33155diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
33156index 0474a89..06ea4a1 100644
33157--- a/drivers/atm/solos-pci.c
33158+++ b/drivers/atm/solos-pci.c
33159@@ -838,7 +838,7 @@ void solos_bh(unsigned long card_arg)
33160 }
33161 atm_charge(vcc, skb->truesize);
33162 vcc->push(vcc, skb);
33163- atomic_inc(&vcc->stats->rx);
33164+ atomic_inc_unchecked(&vcc->stats->rx);
33165 break;
33166
33167 case PKT_STATUS:
33168@@ -1117,7 +1117,7 @@ static uint32_t fpga_tx(struct solos_card *card)
33169 vcc = SKB_CB(oldskb)->vcc;
33170
33171 if (vcc) {
33172- atomic_inc(&vcc->stats->tx);
33173+ atomic_inc_unchecked(&vcc->stats->tx);
33174 solos_pop(vcc, oldskb);
33175 } else {
33176 dev_kfree_skb_irq(oldskb);
33177diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
33178index 0215934..ce9f5b1 100644
33179--- a/drivers/atm/suni.c
33180+++ b/drivers/atm/suni.c
33181@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
33182
33183
33184 #define ADD_LIMITED(s,v) \
33185- atomic_add((v),&stats->s); \
33186- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
33187+ atomic_add_unchecked((v),&stats->s); \
33188+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
33189
33190
33191 static void suni_hz(unsigned long from_timer)
33192diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
33193index 5120a96..e2572bd 100644
33194--- a/drivers/atm/uPD98402.c
33195+++ b/drivers/atm/uPD98402.c
33196@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
33197 struct sonet_stats tmp;
33198 int error = 0;
33199
33200- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
33201+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
33202 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
33203 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
33204 if (zero && !error) {
33205@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
33206
33207
33208 #define ADD_LIMITED(s,v) \
33209- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
33210- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
33211- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
33212+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
33213+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
33214+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
33215
33216
33217 static void stat_event(struct atm_dev *dev)
33218@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
33219 if (reason & uPD98402_INT_PFM) stat_event(dev);
33220 if (reason & uPD98402_INT_PCO) {
33221 (void) GET(PCOCR); /* clear interrupt cause */
33222- atomic_add(GET(HECCT),
33223+ atomic_add_unchecked(GET(HECCT),
33224 &PRIV(dev)->sonet_stats.uncorr_hcs);
33225 }
33226 if ((reason & uPD98402_INT_RFO) &&
33227@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
33228 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
33229 uPD98402_INT_LOS),PIMR); /* enable them */
33230 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
33231- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
33232- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
33233- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
33234+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
33235+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
33236+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
33237 return 0;
33238 }
33239
33240diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
33241index 969c3c2..9b72956 100644
33242--- a/drivers/atm/zatm.c
33243+++ b/drivers/atm/zatm.c
33244@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
33245 }
33246 if (!size) {
33247 dev_kfree_skb_irq(skb);
33248- if (vcc) atomic_inc(&vcc->stats->rx_err);
33249+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
33250 continue;
33251 }
33252 if (!atm_charge(vcc,skb->truesize)) {
33253@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
33254 skb->len = size;
33255 ATM_SKB(skb)->vcc = vcc;
33256 vcc->push(vcc,skb);
33257- atomic_inc(&vcc->stats->rx);
33258+ atomic_inc_unchecked(&vcc->stats->rx);
33259 }
33260 zout(pos & 0xffff,MTA(mbx));
33261 #if 0 /* probably a stupid idea */
33262@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
33263 skb_queue_head(&zatm_vcc->backlog,skb);
33264 break;
33265 }
33266- atomic_inc(&vcc->stats->tx);
33267+ atomic_inc_unchecked(&vcc->stats->tx);
33268 wake_up(&zatm_vcc->tx_wait);
33269 }
33270
33271diff --git a/drivers/base/bus.c b/drivers/base/bus.c
33272index 6856303..0602d70 100644
33273--- a/drivers/base/bus.c
33274+++ b/drivers/base/bus.c
33275@@ -1163,7 +1163,7 @@ int subsys_interface_register(struct subsys_interface *sif)
33276 return -EINVAL;
33277
33278 mutex_lock(&subsys->p->mutex);
33279- list_add_tail(&sif->node, &subsys->p->interfaces);
33280+ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
33281 if (sif->add_dev) {
33282 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
33283 while ((dev = subsys_dev_iter_next(&iter)))
33284@@ -1188,7 +1188,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
33285 subsys = sif->subsys;
33286
33287 mutex_lock(&subsys->p->mutex);
33288- list_del_init(&sif->node);
33289+ pax_list_del_init((struct list_head *)&sif->node);
33290 if (sif->remove_dev) {
33291 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
33292 while ((dev = subsys_dev_iter_next(&iter)))
33293diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
33294index 17cf7ca..7e553e1 100644
33295--- a/drivers/base/devtmpfs.c
33296+++ b/drivers/base/devtmpfs.c
33297@@ -347,7 +347,7 @@ int devtmpfs_mount(const char *mntdir)
33298 if (!thread)
33299 return 0;
33300
33301- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
33302+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
33303 if (err)
33304 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
33305 else
33306diff --git a/drivers/base/node.c b/drivers/base/node.c
33307index fac124a..66bd4ab 100644
33308--- a/drivers/base/node.c
33309+++ b/drivers/base/node.c
33310@@ -625,7 +625,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
33311 struct node_attr {
33312 struct device_attribute attr;
33313 enum node_states state;
33314-};
33315+} __do_const;
33316
33317 static ssize_t show_node_state(struct device *dev,
33318 struct device_attribute *attr, char *buf)
33319diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
33320index acc3a8d..981c236 100644
33321--- a/drivers/base/power/domain.c
33322+++ b/drivers/base/power/domain.c
33323@@ -1851,7 +1851,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
33324 {
33325 struct cpuidle_driver *cpuidle_drv;
33326 struct gpd_cpu_data *cpu_data;
33327- struct cpuidle_state *idle_state;
33328+ cpuidle_state_no_const *idle_state;
33329 int ret = 0;
33330
33331 if (IS_ERR_OR_NULL(genpd) || state < 0)
33332@@ -1919,7 +1919,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
33333 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
33334 {
33335 struct gpd_cpu_data *cpu_data;
33336- struct cpuidle_state *idle_state;
33337+ cpuidle_state_no_const *idle_state;
33338 int ret = 0;
33339
33340 if (IS_ERR_OR_NULL(genpd))
33341diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
33342index e6ee5e8..98ad7fc 100644
33343--- a/drivers/base/power/wakeup.c
33344+++ b/drivers/base/power/wakeup.c
33345@@ -29,14 +29,14 @@ bool events_check_enabled __read_mostly;
33346 * They need to be modified together atomically, so it's better to use one
33347 * atomic variable to hold them both.
33348 */
33349-static atomic_t combined_event_count = ATOMIC_INIT(0);
33350+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
33351
33352 #define IN_PROGRESS_BITS (sizeof(int) * 4)
33353 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
33354
33355 static void split_counters(unsigned int *cnt, unsigned int *inpr)
33356 {
33357- unsigned int comb = atomic_read(&combined_event_count);
33358+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
33359
33360 *cnt = (comb >> IN_PROGRESS_BITS);
33361 *inpr = comb & MAX_IN_PROGRESS;
33362@@ -389,7 +389,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
33363 ws->start_prevent_time = ws->last_time;
33364
33365 /* Increment the counter of events in progress. */
33366- cec = atomic_inc_return(&combined_event_count);
33367+ cec = atomic_inc_return_unchecked(&combined_event_count);
33368
33369 trace_wakeup_source_activate(ws->name, cec);
33370 }
33371@@ -515,7 +515,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
33372 * Increment the counter of registered wakeup events and decrement the
33373 * couter of wakeup events in progress simultaneously.
33374 */
33375- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
33376+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
33377 trace_wakeup_source_deactivate(ws->name, cec);
33378
33379 split_counters(&cnt, &inpr);
33380diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
33381index e8d11b6..7b1b36f 100644
33382--- a/drivers/base/syscore.c
33383+++ b/drivers/base/syscore.c
33384@@ -21,7 +21,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
33385 void register_syscore_ops(struct syscore_ops *ops)
33386 {
33387 mutex_lock(&syscore_ops_lock);
33388- list_add_tail(&ops->node, &syscore_ops_list);
33389+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
33390 mutex_unlock(&syscore_ops_lock);
33391 }
33392 EXPORT_SYMBOL_GPL(register_syscore_ops);
33393@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
33394 void unregister_syscore_ops(struct syscore_ops *ops)
33395 {
33396 mutex_lock(&syscore_ops_lock);
33397- list_del(&ops->node);
33398+ pax_list_del((struct list_head *)&ops->node);
33399 mutex_unlock(&syscore_ops_lock);
33400 }
33401 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
33402diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
33403index ade58bc..867143d 100644
33404--- a/drivers/block/cciss.c
33405+++ b/drivers/block/cciss.c
33406@@ -1196,6 +1196,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
33407 int err;
33408 u32 cp;
33409
33410+ memset(&arg64, 0, sizeof(arg64));
33411+
33412 err = 0;
33413 err |=
33414 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
33415@@ -3005,7 +3007,7 @@ static void start_io(ctlr_info_t *h)
33416 while (!list_empty(&h->reqQ)) {
33417 c = list_entry(h->reqQ.next, CommandList_struct, list);
33418 /* can't do anything if fifo is full */
33419- if ((h->access.fifo_full(h))) {
33420+ if ((h->access->fifo_full(h))) {
33421 dev_warn(&h->pdev->dev, "fifo full\n");
33422 break;
33423 }
33424@@ -3015,7 +3017,7 @@ static void start_io(ctlr_info_t *h)
33425 h->Qdepth--;
33426
33427 /* Tell the controller execute command */
33428- h->access.submit_command(h, c);
33429+ h->access->submit_command(h, c);
33430
33431 /* Put job onto the completed Q */
33432 addQ(&h->cmpQ, c);
33433@@ -3441,17 +3443,17 @@ startio:
33434
33435 static inline unsigned long get_next_completion(ctlr_info_t *h)
33436 {
33437- return h->access.command_completed(h);
33438+ return h->access->command_completed(h);
33439 }
33440
33441 static inline int interrupt_pending(ctlr_info_t *h)
33442 {
33443- return h->access.intr_pending(h);
33444+ return h->access->intr_pending(h);
33445 }
33446
33447 static inline long interrupt_not_for_us(ctlr_info_t *h)
33448 {
33449- return ((h->access.intr_pending(h) == 0) ||
33450+ return ((h->access->intr_pending(h) == 0) ||
33451 (h->interrupts_enabled == 0));
33452 }
33453
33454@@ -3484,7 +3486,7 @@ static inline u32 next_command(ctlr_info_t *h)
33455 u32 a;
33456
33457 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
33458- return h->access.command_completed(h);
33459+ return h->access->command_completed(h);
33460
33461 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
33462 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
33463@@ -4041,7 +4043,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
33464 trans_support & CFGTBL_Trans_use_short_tags);
33465
33466 /* Change the access methods to the performant access methods */
33467- h->access = SA5_performant_access;
33468+ h->access = &SA5_performant_access;
33469 h->transMethod = CFGTBL_Trans_Performant;
33470
33471 return;
33472@@ -4310,7 +4312,7 @@ static int cciss_pci_init(ctlr_info_t *h)
33473 if (prod_index < 0)
33474 return -ENODEV;
33475 h->product_name = products[prod_index].product_name;
33476- h->access = *(products[prod_index].access);
33477+ h->access = products[prod_index].access;
33478
33479 if (cciss_board_disabled(h)) {
33480 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
33481@@ -5032,7 +5034,7 @@ reinit_after_soft_reset:
33482 }
33483
33484 /* make sure the board interrupts are off */
33485- h->access.set_intr_mask(h, CCISS_INTR_OFF);
33486+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
33487 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
33488 if (rc)
33489 goto clean2;
33490@@ -5082,7 +5084,7 @@ reinit_after_soft_reset:
33491 * fake ones to scoop up any residual completions.
33492 */
33493 spin_lock_irqsave(&h->lock, flags);
33494- h->access.set_intr_mask(h, CCISS_INTR_OFF);
33495+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
33496 spin_unlock_irqrestore(&h->lock, flags);
33497 free_irq(h->intr[h->intr_mode], h);
33498 rc = cciss_request_irq(h, cciss_msix_discard_completions,
33499@@ -5102,9 +5104,9 @@ reinit_after_soft_reset:
33500 dev_info(&h->pdev->dev, "Board READY.\n");
33501 dev_info(&h->pdev->dev,
33502 "Waiting for stale completions to drain.\n");
33503- h->access.set_intr_mask(h, CCISS_INTR_ON);
33504+ h->access->set_intr_mask(h, CCISS_INTR_ON);
33505 msleep(10000);
33506- h->access.set_intr_mask(h, CCISS_INTR_OFF);
33507+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
33508
33509 rc = controller_reset_failed(h->cfgtable);
33510 if (rc)
33511@@ -5127,7 +5129,7 @@ reinit_after_soft_reset:
33512 cciss_scsi_setup(h);
33513
33514 /* Turn the interrupts on so we can service requests */
33515- h->access.set_intr_mask(h, CCISS_INTR_ON);
33516+ h->access->set_intr_mask(h, CCISS_INTR_ON);
33517
33518 /* Get the firmware version */
33519 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
33520@@ -5199,7 +5201,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
33521 kfree(flush_buf);
33522 if (return_code != IO_OK)
33523 dev_warn(&h->pdev->dev, "Error flushing cache\n");
33524- h->access.set_intr_mask(h, CCISS_INTR_OFF);
33525+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
33526 free_irq(h->intr[h->intr_mode], h);
33527 }
33528
33529diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
33530index 7fda30e..eb5dfe0 100644
33531--- a/drivers/block/cciss.h
33532+++ b/drivers/block/cciss.h
33533@@ -101,7 +101,7 @@ struct ctlr_info
33534 /* information about each logical volume */
33535 drive_info_struct *drv[CISS_MAX_LUN];
33536
33537- struct access_method access;
33538+ struct access_method *access;
33539
33540 /* queue and queue Info */
33541 struct list_head reqQ;
33542diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
33543index 3f08713..56a586a 100644
33544--- a/drivers/block/cpqarray.c
33545+++ b/drivers/block/cpqarray.c
33546@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
33547 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
33548 goto Enomem4;
33549 }
33550- hba[i]->access.set_intr_mask(hba[i], 0);
33551+ hba[i]->access->set_intr_mask(hba[i], 0);
33552 if (request_irq(hba[i]->intr, do_ida_intr,
33553 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
33554 {
33555@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
33556 add_timer(&hba[i]->timer);
33557
33558 /* Enable IRQ now that spinlock and rate limit timer are set up */
33559- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
33560+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
33561
33562 for(j=0; j<NWD; j++) {
33563 struct gendisk *disk = ida_gendisk[i][j];
33564@@ -694,7 +694,7 @@ DBGINFO(
33565 for(i=0; i<NR_PRODUCTS; i++) {
33566 if (board_id == products[i].board_id) {
33567 c->product_name = products[i].product_name;
33568- c->access = *(products[i].access);
33569+ c->access = products[i].access;
33570 break;
33571 }
33572 }
33573@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
33574 hba[ctlr]->intr = intr;
33575 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
33576 hba[ctlr]->product_name = products[j].product_name;
33577- hba[ctlr]->access = *(products[j].access);
33578+ hba[ctlr]->access = products[j].access;
33579 hba[ctlr]->ctlr = ctlr;
33580 hba[ctlr]->board_id = board_id;
33581 hba[ctlr]->pci_dev = NULL; /* not PCI */
33582@@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
33583
33584 while((c = h->reqQ) != NULL) {
33585 /* Can't do anything if we're busy */
33586- if (h->access.fifo_full(h) == 0)
33587+ if (h->access->fifo_full(h) == 0)
33588 return;
33589
33590 /* Get the first entry from the request Q */
33591@@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
33592 h->Qdepth--;
33593
33594 /* Tell the controller to do our bidding */
33595- h->access.submit_command(h, c);
33596+ h->access->submit_command(h, c);
33597
33598 /* Get onto the completion Q */
33599 addQ(&h->cmpQ, c);
33600@@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
33601 unsigned long flags;
33602 __u32 a,a1;
33603
33604- istat = h->access.intr_pending(h);
33605+ istat = h->access->intr_pending(h);
33606 /* Is this interrupt for us? */
33607 if (istat == 0)
33608 return IRQ_NONE;
33609@@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
33610 */
33611 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
33612 if (istat & FIFO_NOT_EMPTY) {
33613- while((a = h->access.command_completed(h))) {
33614+ while((a = h->access->command_completed(h))) {
33615 a1 = a; a &= ~3;
33616 if ((c = h->cmpQ) == NULL)
33617 {
33618@@ -1449,11 +1449,11 @@ static int sendcmd(
33619 /*
33620 * Disable interrupt
33621 */
33622- info_p->access.set_intr_mask(info_p, 0);
33623+ info_p->access->set_intr_mask(info_p, 0);
33624 /* Make sure there is room in the command FIFO */
33625 /* Actually it should be completely empty at this time. */
33626 for (i = 200000; i > 0; i--) {
33627- temp = info_p->access.fifo_full(info_p);
33628+ temp = info_p->access->fifo_full(info_p);
33629 if (temp != 0) {
33630 break;
33631 }
33632@@ -1466,7 +1466,7 @@ DBG(
33633 /*
33634 * Send the cmd
33635 */
33636- info_p->access.submit_command(info_p, c);
33637+ info_p->access->submit_command(info_p, c);
33638 complete = pollcomplete(ctlr);
33639
33640 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
33641@@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
33642 * we check the new geometry. Then turn interrupts back on when
33643 * we're done.
33644 */
33645- host->access.set_intr_mask(host, 0);
33646+ host->access->set_intr_mask(host, 0);
33647 getgeometry(ctlr);
33648- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
33649+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
33650
33651 for(i=0; i<NWD; i++) {
33652 struct gendisk *disk = ida_gendisk[ctlr][i];
33653@@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
33654 /* Wait (up to 2 seconds) for a command to complete */
33655
33656 for (i = 200000; i > 0; i--) {
33657- done = hba[ctlr]->access.command_completed(hba[ctlr]);
33658+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
33659 if (done == 0) {
33660 udelay(10); /* a short fixed delay */
33661 } else
33662diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
33663index be73e9d..7fbf140 100644
33664--- a/drivers/block/cpqarray.h
33665+++ b/drivers/block/cpqarray.h
33666@@ -99,7 +99,7 @@ struct ctlr_info {
33667 drv_info_t drv[NWD];
33668 struct proc_dir_entry *proc;
33669
33670- struct access_method access;
33671+ struct access_method *access;
33672
33673 cmdlist_t *reqQ;
33674 cmdlist_t *cmpQ;
33675diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
33676index 6b51afa..17e1191 100644
33677--- a/drivers/block/drbd/drbd_int.h
33678+++ b/drivers/block/drbd/drbd_int.h
33679@@ -582,7 +582,7 @@ struct drbd_epoch {
33680 struct drbd_tconn *tconn;
33681 struct list_head list;
33682 unsigned int barrier_nr;
33683- atomic_t epoch_size; /* increased on every request added. */
33684+ atomic_unchecked_t epoch_size; /* increased on every request added. */
33685 atomic_t active; /* increased on every req. added, and dec on every finished. */
33686 unsigned long flags;
33687 };
33688@@ -1011,7 +1011,7 @@ struct drbd_conf {
33689 int al_tr_cycle;
33690 int al_tr_pos; /* position of the next transaction in the journal */
33691 wait_queue_head_t seq_wait;
33692- atomic_t packet_seq;
33693+ atomic_unchecked_t packet_seq;
33694 unsigned int peer_seq;
33695 spinlock_t peer_seq_lock;
33696 unsigned int minor;
33697@@ -1527,7 +1527,7 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
33698 char __user *uoptval;
33699 int err;
33700
33701- uoptval = (char __user __force *)optval;
33702+ uoptval = (char __force_user *)optval;
33703
33704 set_fs(KERNEL_DS);
33705 if (level == SOL_SOCKET)
33706diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
33707index 8c13eeb..217adee 100644
33708--- a/drivers/block/drbd/drbd_main.c
33709+++ b/drivers/block/drbd/drbd_main.c
33710@@ -1317,7 +1317,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
33711 p->sector = sector;
33712 p->block_id = block_id;
33713 p->blksize = blksize;
33714- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
33715+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
33716 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
33717 }
33718
33719@@ -1619,7 +1619,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
33720 return -EIO;
33721 p->sector = cpu_to_be64(req->i.sector);
33722 p->block_id = (unsigned long)req;
33723- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
33724+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
33725 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
33726 if (mdev->state.conn >= C_SYNC_SOURCE &&
33727 mdev->state.conn <= C_PAUSED_SYNC_T)
33728@@ -2574,8 +2574,8 @@ void conn_destroy(struct kref *kref)
33729 {
33730 struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
33731
33732- if (atomic_read(&tconn->current_epoch->epoch_size) != 0)
33733- conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size));
33734+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size) != 0)
33735+ conn_err(tconn, "epoch_size:%d\n", atomic_read_unchecked(&tconn->current_epoch->epoch_size));
33736 kfree(tconn->current_epoch);
33737
33738 idr_destroy(&tconn->volumes);
33739diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
33740index a9eccfc..f5efe87 100644
33741--- a/drivers/block/drbd/drbd_receiver.c
33742+++ b/drivers/block/drbd/drbd_receiver.c
33743@@ -833,7 +833,7 @@ int drbd_connected(struct drbd_conf *mdev)
33744 {
33745 int err;
33746
33747- atomic_set(&mdev->packet_seq, 0);
33748+ atomic_set_unchecked(&mdev->packet_seq, 0);
33749 mdev->peer_seq = 0;
33750
33751 mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
33752@@ -1191,7 +1191,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
33753 do {
33754 next_epoch = NULL;
33755
33756- epoch_size = atomic_read(&epoch->epoch_size);
33757+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
33758
33759 switch (ev & ~EV_CLEANUP) {
33760 case EV_PUT:
33761@@ -1231,7 +1231,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
33762 rv = FE_DESTROYED;
33763 } else {
33764 epoch->flags = 0;
33765- atomic_set(&epoch->epoch_size, 0);
33766+ atomic_set_unchecked(&epoch->epoch_size, 0);
33767 /* atomic_set(&epoch->active, 0); is already zero */
33768 if (rv == FE_STILL_LIVE)
33769 rv = FE_RECYCLED;
33770@@ -1449,7 +1449,7 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
33771 conn_wait_active_ee_empty(tconn);
33772 drbd_flush(tconn);
33773
33774- if (atomic_read(&tconn->current_epoch->epoch_size)) {
33775+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
33776 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
33777 if (epoch)
33778 break;
33779@@ -1462,11 +1462,11 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
33780 }
33781
33782 epoch->flags = 0;
33783- atomic_set(&epoch->epoch_size, 0);
33784+ atomic_set_unchecked(&epoch->epoch_size, 0);
33785 atomic_set(&epoch->active, 0);
33786
33787 spin_lock(&tconn->epoch_lock);
33788- if (atomic_read(&tconn->current_epoch->epoch_size)) {
33789+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
33790 list_add(&epoch->list, &tconn->current_epoch->list);
33791 tconn->current_epoch = epoch;
33792 tconn->epochs++;
33793@@ -2170,7 +2170,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
33794
33795 err = wait_for_and_update_peer_seq(mdev, peer_seq);
33796 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
33797- atomic_inc(&tconn->current_epoch->epoch_size);
33798+ atomic_inc_unchecked(&tconn->current_epoch->epoch_size);
33799 err2 = drbd_drain_block(mdev, pi->size);
33800 if (!err)
33801 err = err2;
33802@@ -2204,7 +2204,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
33803
33804 spin_lock(&tconn->epoch_lock);
33805 peer_req->epoch = tconn->current_epoch;
33806- atomic_inc(&peer_req->epoch->epoch_size);
33807+ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
33808 atomic_inc(&peer_req->epoch->active);
33809 spin_unlock(&tconn->epoch_lock);
33810
33811@@ -4346,7 +4346,7 @@ struct data_cmd {
33812 int expect_payload;
33813 size_t pkt_size;
33814 int (*fn)(struct drbd_tconn *, struct packet_info *);
33815-};
33816+} __do_const;
33817
33818 static struct data_cmd drbd_cmd_handler[] = {
33819 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
33820@@ -4466,7 +4466,7 @@ static void conn_disconnect(struct drbd_tconn *tconn)
33821 if (!list_empty(&tconn->current_epoch->list))
33822 conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
33823 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
33824- atomic_set(&tconn->current_epoch->epoch_size, 0);
33825+ atomic_set_unchecked(&tconn->current_epoch->epoch_size, 0);
33826 tconn->send.seen_any_write_yet = false;
33827
33828 conn_info(tconn, "Connection closed\n");
33829@@ -5222,7 +5222,7 @@ static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
33830 struct asender_cmd {
33831 size_t pkt_size;
33832 int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
33833-};
33834+} __do_const;
33835
33836 static struct asender_cmd asender_tbl[] = {
33837 [P_PING] = { 0, got_Ping },
33838diff --git a/drivers/block/loop.c b/drivers/block/loop.c
33839index f74f2c0..bb668af 100644
33840--- a/drivers/block/loop.c
33841+++ b/drivers/block/loop.c
33842@@ -226,7 +226,7 @@ static int __do_lo_send_write(struct file *file,
33843 mm_segment_t old_fs = get_fs();
33844
33845 set_fs(get_ds());
33846- bw = file->f_op->write(file, buf, len, &pos);
33847+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
33848 set_fs(old_fs);
33849 if (likely(bw == len))
33850 return 0;
33851diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
33852index d620b44..587561e 100644
33853--- a/drivers/cdrom/cdrom.c
33854+++ b/drivers/cdrom/cdrom.c
33855@@ -416,7 +416,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
33856 ENSURE(reset, CDC_RESET);
33857 ENSURE(generic_packet, CDC_GENERIC_PACKET);
33858 cdi->mc_flags = 0;
33859- cdo->n_minors = 0;
33860 cdi->options = CDO_USE_FFLAGS;
33861
33862 if (autoclose==1 && CDROM_CAN(CDC_CLOSE_TRAY))
33863@@ -436,8 +435,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
33864 else
33865 cdi->cdda_method = CDDA_OLD;
33866
33867- if (!cdo->generic_packet)
33868- cdo->generic_packet = cdrom_dummy_generic_packet;
33869+ if (!cdo->generic_packet) {
33870+ pax_open_kernel();
33871+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
33872+ pax_close_kernel();
33873+ }
33874
33875 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
33876 mutex_lock(&cdrom_mutex);
33877@@ -458,7 +460,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
33878 if (cdi->exit)
33879 cdi->exit(cdi);
33880
33881- cdi->ops->n_minors--;
33882 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
33883 }
33884
33885diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
33886index d59cdcb..11afddf 100644
33887--- a/drivers/cdrom/gdrom.c
33888+++ b/drivers/cdrom/gdrom.c
33889@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
33890 .audio_ioctl = gdrom_audio_ioctl,
33891 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
33892 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
33893- .n_minors = 1,
33894 };
33895
33896 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
33897diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
33898index 72bedad..8181ce1 100644
33899--- a/drivers/char/Kconfig
33900+++ b/drivers/char/Kconfig
33901@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
33902
33903 config DEVKMEM
33904 bool "/dev/kmem virtual device support"
33905- default y
33906+ default n
33907+ depends on !GRKERNSEC_KMEM
33908 help
33909 Say Y here if you want to support the /dev/kmem device. The
33910 /dev/kmem device is rarely used, but can be used for certain
33911@@ -581,6 +582,7 @@ config DEVPORT
33912 bool
33913 depends on !M68K
33914 depends on ISA || PCI
33915+ depends on !GRKERNSEC_KMEM
33916 default y
33917
33918 source "drivers/s390/char/Kconfig"
33919diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
33920index 2e04433..22afc64 100644
33921--- a/drivers/char/agp/frontend.c
33922+++ b/drivers/char/agp/frontend.c
33923@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
33924 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
33925 return -EFAULT;
33926
33927- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
33928+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
33929 return -EFAULT;
33930
33931 client = agp_find_client_by_pid(reserve.pid);
33932diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
33933index 21cb980..f15107c 100644
33934--- a/drivers/char/genrtc.c
33935+++ b/drivers/char/genrtc.c
33936@@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct file *file,
33937 switch (cmd) {
33938
33939 case RTC_PLL_GET:
33940+ memset(&pll, 0, sizeof(pll));
33941 if (get_rtc_pll(&pll))
33942 return -EINVAL;
33943 else
33944diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
33945index fe6d4be..89f32100 100644
33946--- a/drivers/char/hpet.c
33947+++ b/drivers/char/hpet.c
33948@@ -571,7 +571,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
33949 }
33950
33951 static int
33952-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
33953+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
33954 struct hpet_info *info)
33955 {
33956 struct hpet_timer __iomem *timer;
33957diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
33958index 053201b0..8335cce 100644
33959--- a/drivers/char/ipmi/ipmi_msghandler.c
33960+++ b/drivers/char/ipmi/ipmi_msghandler.c
33961@@ -420,7 +420,7 @@ struct ipmi_smi {
33962 struct proc_dir_entry *proc_dir;
33963 char proc_dir_name[10];
33964
33965- atomic_t stats[IPMI_NUM_STATS];
33966+ atomic_unchecked_t stats[IPMI_NUM_STATS];
33967
33968 /*
33969 * run_to_completion duplicate of smb_info, smi_info
33970@@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
33971
33972
33973 #define ipmi_inc_stat(intf, stat) \
33974- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
33975+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
33976 #define ipmi_get_stat(intf, stat) \
33977- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
33978+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
33979
33980 static int is_lan_addr(struct ipmi_addr *addr)
33981 {
33982@@ -2884,7 +2884,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
33983 INIT_LIST_HEAD(&intf->cmd_rcvrs);
33984 init_waitqueue_head(&intf->waitq);
33985 for (i = 0; i < IPMI_NUM_STATS; i++)
33986- atomic_set(&intf->stats[i], 0);
33987+ atomic_set_unchecked(&intf->stats[i], 0);
33988
33989 intf->proc_dir = NULL;
33990
33991diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
33992index 1c7fdcd..4899100 100644
33993--- a/drivers/char/ipmi/ipmi_si_intf.c
33994+++ b/drivers/char/ipmi/ipmi_si_intf.c
33995@@ -275,7 +275,7 @@ struct smi_info {
33996 unsigned char slave_addr;
33997
33998 /* Counters and things for the proc filesystem. */
33999- atomic_t stats[SI_NUM_STATS];
34000+ atomic_unchecked_t stats[SI_NUM_STATS];
34001
34002 struct task_struct *thread;
34003
34004@@ -284,9 +284,9 @@ struct smi_info {
34005 };
34006
34007 #define smi_inc_stat(smi, stat) \
34008- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
34009+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
34010 #define smi_get_stat(smi, stat) \
34011- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
34012+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
34013
34014 #define SI_MAX_PARMS 4
34015
34016@@ -3225,7 +3225,7 @@ static int try_smi_init(struct smi_info *new_smi)
34017 atomic_set(&new_smi->req_events, 0);
34018 new_smi->run_to_completion = 0;
34019 for (i = 0; i < SI_NUM_STATS; i++)
34020- atomic_set(&new_smi->stats[i], 0);
34021+ atomic_set_unchecked(&new_smi->stats[i], 0);
34022
34023 new_smi->interrupt_disabled = 1;
34024 atomic_set(&new_smi->stop_operation, 0);
34025diff --git a/drivers/char/mem.c b/drivers/char/mem.c
34026index c6fa3bc..4ca3e42 100644
34027--- a/drivers/char/mem.c
34028+++ b/drivers/char/mem.c
34029@@ -18,6 +18,7 @@
34030 #include <linux/raw.h>
34031 #include <linux/tty.h>
34032 #include <linux/capability.h>
34033+#include <linux/security.h>
34034 #include <linux/ptrace.h>
34035 #include <linux/device.h>
34036 #include <linux/highmem.h>
34037@@ -37,6 +38,10 @@
34038
34039 #define DEVPORT_MINOR 4
34040
34041+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
34042+extern const struct file_operations grsec_fops;
34043+#endif
34044+
34045 static inline unsigned long size_inside_page(unsigned long start,
34046 unsigned long size)
34047 {
34048@@ -68,9 +73,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
34049
34050 while (cursor < to) {
34051 if (!devmem_is_allowed(pfn)) {
34052+#ifdef CONFIG_GRKERNSEC_KMEM
34053+ gr_handle_mem_readwrite(from, to);
34054+#else
34055 printk(KERN_INFO
34056 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
34057 current->comm, from, to);
34058+#endif
34059 return 0;
34060 }
34061 cursor += PAGE_SIZE;
34062@@ -78,6 +87,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
34063 }
34064 return 1;
34065 }
34066+#elif defined(CONFIG_GRKERNSEC_KMEM)
34067+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
34068+{
34069+ return 0;
34070+}
34071 #else
34072 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
34073 {
34074@@ -120,6 +134,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
34075
34076 while (count > 0) {
34077 unsigned long remaining;
34078+ char *temp;
34079
34080 sz = size_inside_page(p, count);
34081
34082@@ -135,7 +150,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
34083 if (!ptr)
34084 return -EFAULT;
34085
34086- remaining = copy_to_user(buf, ptr, sz);
34087+#ifdef CONFIG_PAX_USERCOPY
34088+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
34089+ if (!temp) {
34090+ unxlate_dev_mem_ptr(p, ptr);
34091+ return -ENOMEM;
34092+ }
34093+ memcpy(temp, ptr, sz);
34094+#else
34095+ temp = ptr;
34096+#endif
34097+
34098+ remaining = copy_to_user(buf, temp, sz);
34099+
34100+#ifdef CONFIG_PAX_USERCOPY
34101+ kfree(temp);
34102+#endif
34103+
34104 unxlate_dev_mem_ptr(p, ptr);
34105 if (remaining)
34106 return -EFAULT;
34107@@ -398,9 +429,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
34108 size_t count, loff_t *ppos)
34109 {
34110 unsigned long p = *ppos;
34111- ssize_t low_count, read, sz;
34112+ ssize_t low_count, read, sz, err = 0;
34113 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
34114- int err = 0;
34115
34116 read = 0;
34117 if (p < (unsigned long) high_memory) {
34118@@ -422,6 +452,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
34119 }
34120 #endif
34121 while (low_count > 0) {
34122+ char *temp;
34123+
34124 sz = size_inside_page(p, low_count);
34125
34126 /*
34127@@ -431,7 +463,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
34128 */
34129 kbuf = xlate_dev_kmem_ptr((char *)p);
34130
34131- if (copy_to_user(buf, kbuf, sz))
34132+#ifdef CONFIG_PAX_USERCOPY
34133+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
34134+ if (!temp)
34135+ return -ENOMEM;
34136+ memcpy(temp, kbuf, sz);
34137+#else
34138+ temp = kbuf;
34139+#endif
34140+
34141+ err = copy_to_user(buf, temp, sz);
34142+
34143+#ifdef CONFIG_PAX_USERCOPY
34144+ kfree(temp);
34145+#endif
34146+
34147+ if (err)
34148 return -EFAULT;
34149 buf += sz;
34150 p += sz;
34151@@ -833,6 +880,9 @@ static const struct memdev {
34152 #ifdef CONFIG_CRASH_DUMP
34153 [12] = { "oldmem", 0, &oldmem_fops, NULL },
34154 #endif
34155+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
34156+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
34157+#endif
34158 };
34159
34160 static int memory_open(struct inode *inode, struct file *filp)
34161diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
34162index 9df78e2..01ba9ae 100644
34163--- a/drivers/char/nvram.c
34164+++ b/drivers/char/nvram.c
34165@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
34166
34167 spin_unlock_irq(&rtc_lock);
34168
34169- if (copy_to_user(buf, contents, tmp - contents))
34170+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
34171 return -EFAULT;
34172
34173 *ppos = i;
34174diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
34175index b66eaa0..2619d1b 100644
34176--- a/drivers/char/pcmcia/synclink_cs.c
34177+++ b/drivers/char/pcmcia/synclink_cs.c
34178@@ -2348,9 +2348,9 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
34179
34180 if (debug_level >= DEBUG_LEVEL_INFO)
34181 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
34182- __FILE__,__LINE__, info->device_name, port->count);
34183+ __FILE__,__LINE__, info->device_name, atomic_read(&port->count));
34184
34185- WARN_ON(!port->count);
34186+ WARN_ON(!atomic_read(&port->count));
34187
34188 if (tty_port_close_start(port, tty, filp) == 0)
34189 goto cleanup;
34190@@ -2368,7 +2368,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
34191 cleanup:
34192 if (debug_level >= DEBUG_LEVEL_INFO)
34193 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__,__LINE__,
34194- tty->driver->name, port->count);
34195+ tty->driver->name, atomic_read(&port->count));
34196 }
34197
34198 /* Wait until the transmitter is empty.
34199@@ -2510,7 +2510,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
34200
34201 if (debug_level >= DEBUG_LEVEL_INFO)
34202 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
34203- __FILE__,__LINE__,tty->driver->name, port->count);
34204+ __FILE__,__LINE__,tty->driver->name, atomic_read(&port->count));
34205
34206 /* If port is closing, signal caller to try again */
34207 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
34208@@ -2530,11 +2530,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
34209 goto cleanup;
34210 }
34211 spin_lock(&port->lock);
34212- port->count++;
34213+ atomic_inc(&port->count);
34214 spin_unlock(&port->lock);
34215 spin_unlock_irqrestore(&info->netlock, flags);
34216
34217- if (port->count == 1) {
34218+ if (atomic_read(&port->count) == 1) {
34219 /* 1st open on this device, init hardware */
34220 retval = startup(info, tty);
34221 if (retval < 0)
34222@@ -3889,7 +3889,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
34223 unsigned short new_crctype;
34224
34225 /* return error if TTY interface open */
34226- if (info->port.count)
34227+ if (atomic_read(&info->port.count))
34228 return -EBUSY;
34229
34230 switch (encoding)
34231@@ -3992,7 +3992,7 @@ static int hdlcdev_open(struct net_device *dev)
34232
34233 /* arbitrate between network and tty opens */
34234 spin_lock_irqsave(&info->netlock, flags);
34235- if (info->port.count != 0 || info->netcount != 0) {
34236+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
34237 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
34238 spin_unlock_irqrestore(&info->netlock, flags);
34239 return -EBUSY;
34240@@ -4081,7 +4081,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
34241 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
34242
34243 /* return error if TTY interface open */
34244- if (info->port.count)
34245+ if (atomic_read(&info->port.count))
34246 return -EBUSY;
34247
34248 if (cmd != SIOCWANDEV)
34249diff --git a/drivers/char/random.c b/drivers/char/random.c
34250index 57d4b15..253207b 100644
34251--- a/drivers/char/random.c
34252+++ b/drivers/char/random.c
34253@@ -272,8 +272,13 @@
34254 /*
34255 * Configuration information
34256 */
34257+#ifdef CONFIG_GRKERNSEC_RANDNET
34258+#define INPUT_POOL_WORDS 512
34259+#define OUTPUT_POOL_WORDS 128
34260+#else
34261 #define INPUT_POOL_WORDS 128
34262 #define OUTPUT_POOL_WORDS 32
34263+#endif
34264 #define SEC_XFER_SIZE 512
34265 #define EXTRACT_SIZE 10
34266
34267@@ -313,10 +318,17 @@ static struct poolinfo {
34268 int poolwords;
34269 int tap1, tap2, tap3, tap4, tap5;
34270 } poolinfo_table[] = {
34271+#ifdef CONFIG_GRKERNSEC_RANDNET
34272+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
34273+ { 512, 411, 308, 208, 104, 1 },
34274+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
34275+ { 128, 103, 76, 51, 25, 1 },
34276+#else
34277 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
34278 { 128, 103, 76, 51, 25, 1 },
34279 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
34280 { 32, 26, 20, 14, 7, 1 },
34281+#endif
34282 #if 0
34283 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
34284 { 2048, 1638, 1231, 819, 411, 1 },
34285@@ -524,8 +536,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
34286 input_rotate += i ? 7 : 14;
34287 }
34288
34289- ACCESS_ONCE(r->input_rotate) = input_rotate;
34290- ACCESS_ONCE(r->add_ptr) = i;
34291+ ACCESS_ONCE_RW(r->input_rotate) = input_rotate;
34292+ ACCESS_ONCE_RW(r->add_ptr) = i;
34293 smp_wmb();
34294
34295 if (out)
34296@@ -1024,7 +1036,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
34297
34298 extract_buf(r, tmp);
34299 i = min_t(int, nbytes, EXTRACT_SIZE);
34300- if (copy_to_user(buf, tmp, i)) {
34301+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
34302 ret = -EFAULT;
34303 break;
34304 }
34305@@ -1360,7 +1372,7 @@ EXPORT_SYMBOL(generate_random_uuid);
34306 #include <linux/sysctl.h>
34307
34308 static int min_read_thresh = 8, min_write_thresh;
34309-static int max_read_thresh = INPUT_POOL_WORDS * 32;
34310+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
34311 static int max_write_thresh = INPUT_POOL_WORDS * 32;
34312 static char sysctl_bootid[16];
34313
34314@@ -1376,7 +1388,7 @@ static char sysctl_bootid[16];
34315 static int proc_do_uuid(ctl_table *table, int write,
34316 void __user *buffer, size_t *lenp, loff_t *ppos)
34317 {
34318- ctl_table fake_table;
34319+ ctl_table_no_const fake_table;
34320 unsigned char buf[64], tmp_uuid[16], *uuid;
34321
34322 uuid = table->data;
34323diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
34324index d780295..b29f3a8 100644
34325--- a/drivers/char/sonypi.c
34326+++ b/drivers/char/sonypi.c
34327@@ -54,6 +54,7 @@
34328
34329 #include <asm/uaccess.h>
34330 #include <asm/io.h>
34331+#include <asm/local.h>
34332
34333 #include <linux/sonypi.h>
34334
34335@@ -490,7 +491,7 @@ static struct sonypi_device {
34336 spinlock_t fifo_lock;
34337 wait_queue_head_t fifo_proc_list;
34338 struct fasync_struct *fifo_async;
34339- int open_count;
34340+ local_t open_count;
34341 int model;
34342 struct input_dev *input_jog_dev;
34343 struct input_dev *input_key_dev;
34344@@ -897,7 +898,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
34345 static int sonypi_misc_release(struct inode *inode, struct file *file)
34346 {
34347 mutex_lock(&sonypi_device.lock);
34348- sonypi_device.open_count--;
34349+ local_dec(&sonypi_device.open_count);
34350 mutex_unlock(&sonypi_device.lock);
34351 return 0;
34352 }
34353@@ -906,9 +907,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
34354 {
34355 mutex_lock(&sonypi_device.lock);
34356 /* Flush input queue on first open */
34357- if (!sonypi_device.open_count)
34358+ if (!local_read(&sonypi_device.open_count))
34359 kfifo_reset(&sonypi_device.fifo);
34360- sonypi_device.open_count++;
34361+ local_inc(&sonypi_device.open_count);
34362 mutex_unlock(&sonypi_device.lock);
34363
34364 return 0;
34365diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
34366index 93211df..c7805f7 100644
34367--- a/drivers/char/tpm/tpm.c
34368+++ b/drivers/char/tpm/tpm.c
34369@@ -410,7 +410,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
34370 chip->vendor.req_complete_val)
34371 goto out_recv;
34372
34373- if ((status == chip->vendor.req_canceled)) {
34374+ if (status == chip->vendor.req_canceled) {
34375 dev_err(chip->dev, "Operation Canceled\n");
34376 rc = -ECANCELED;
34377 goto out;
34378diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
34379index 56051d0..11cf3b7 100644
34380--- a/drivers/char/tpm/tpm_acpi.c
34381+++ b/drivers/char/tpm/tpm_acpi.c
34382@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
34383 virt = acpi_os_map_memory(start, len);
34384 if (!virt) {
34385 kfree(log->bios_event_log);
34386+ log->bios_event_log = NULL;
34387 printk("%s: ERROR - Unable to map memory\n", __func__);
34388 return -EIO;
34389 }
34390
34391- memcpy_fromio(log->bios_event_log, virt, len);
34392+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
34393
34394 acpi_os_unmap_memory(virt, len);
34395 return 0;
34396diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
34397index 84ddc55..1d32f1e 100644
34398--- a/drivers/char/tpm/tpm_eventlog.c
34399+++ b/drivers/char/tpm/tpm_eventlog.c
34400@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
34401 event = addr;
34402
34403 if ((event->event_type == 0 && event->event_size == 0) ||
34404- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
34405+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
34406 return NULL;
34407
34408 return addr;
34409@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
34410 return NULL;
34411
34412 if ((event->event_type == 0 && event->event_size == 0) ||
34413- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
34414+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
34415 return NULL;
34416
34417 (*pos)++;
34418@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
34419 int i;
34420
34421 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
34422- seq_putc(m, data[i]);
34423+ if (!seq_putc(m, data[i]))
34424+ return -EFAULT;
34425
34426 return 0;
34427 }
34428diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
34429index a4b7aa0..2faa0bc 100644
34430--- a/drivers/char/virtio_console.c
34431+++ b/drivers/char/virtio_console.c
34432@@ -685,7 +685,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
34433 if (to_user) {
34434 ssize_t ret;
34435
34436- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
34437+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
34438 if (ret)
34439 return -EFAULT;
34440 } else {
34441@@ -784,7 +784,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
34442 if (!port_has_data(port) && !port->host_connected)
34443 return 0;
34444
34445- return fill_readbuf(port, ubuf, count, true);
34446+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
34447 }
34448
34449 static int wait_port_writable(struct port *port, bool nonblock)
34450diff --git a/drivers/clocksource/arm_generic.c b/drivers/clocksource/arm_generic.c
34451index 8ae1a61..9c00613 100644
34452--- a/drivers/clocksource/arm_generic.c
34453+++ b/drivers/clocksource/arm_generic.c
34454@@ -181,7 +181,7 @@ static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
34455 return NOTIFY_OK;
34456 }
34457
34458-static struct notifier_block __cpuinitdata arch_timer_cpu_nb = {
34459+static struct notifier_block arch_timer_cpu_nb = {
34460 .notifier_call = arch_timer_cpu_notify,
34461 };
34462
34463diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
34464index 7b0d49d..134fac9 100644
34465--- a/drivers/cpufreq/acpi-cpufreq.c
34466+++ b/drivers/cpufreq/acpi-cpufreq.c
34467@@ -172,7 +172,7 @@ static ssize_t show_global_boost(struct kobject *kobj,
34468 return sprintf(buf, "%u\n", boost_enabled);
34469 }
34470
34471-static struct global_attr global_boost = __ATTR(boost, 0644,
34472+static global_attr_no_const global_boost = __ATTR(boost, 0644,
34473 show_global_boost,
34474 store_global_boost);
34475
34476@@ -712,8 +712,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
34477 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
34478 per_cpu(acfreq_data, cpu) = data;
34479
34480- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
34481- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
34482+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
34483+ pax_open_kernel();
34484+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
34485+ pax_close_kernel();
34486+ }
34487
34488 result = acpi_processor_register_performance(data->acpi_data, cpu);
34489 if (result)
34490@@ -835,7 +838,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
34491 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
34492 break;
34493 case ACPI_ADR_SPACE_FIXED_HARDWARE:
34494- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
34495+ pax_open_kernel();
34496+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
34497+ pax_close_kernel();
34498 policy->cur = get_cur_freq_on_cpu(cpu);
34499 break;
34500 default:
34501@@ -846,8 +851,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
34502 acpi_processor_notify_smm(THIS_MODULE);
34503
34504 /* Check for APERF/MPERF support in hardware */
34505- if (boot_cpu_has(X86_FEATURE_APERFMPERF))
34506- acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
34507+ if (boot_cpu_has(X86_FEATURE_APERFMPERF)) {
34508+ pax_open_kernel();
34509+ *(void **)&acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
34510+ pax_close_kernel();
34511+ }
34512
34513 pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
34514 for (i = 0; i < perf->state_count; i++)
34515diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
34516index 1f93dbd..305cef1 100644
34517--- a/drivers/cpufreq/cpufreq.c
34518+++ b/drivers/cpufreq/cpufreq.c
34519@@ -1843,7 +1843,7 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
34520 return NOTIFY_OK;
34521 }
34522
34523-static struct notifier_block __refdata cpufreq_cpu_notifier = {
34524+static struct notifier_block cpufreq_cpu_notifier = {
34525 .notifier_call = cpufreq_cpu_callback,
34526 };
34527
34528@@ -1875,8 +1875,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
34529
34530 pr_debug("trying to register driver %s\n", driver_data->name);
34531
34532- if (driver_data->setpolicy)
34533- driver_data->flags |= CPUFREQ_CONST_LOOPS;
34534+ if (driver_data->setpolicy) {
34535+ pax_open_kernel();
34536+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
34537+ pax_close_kernel();
34538+ }
34539
34540 spin_lock_irqsave(&cpufreq_driver_lock, flags);
34541 if (cpufreq_driver) {
34542diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
34543index 6c5f1d3..c7e2f35e 100644
34544--- a/drivers/cpufreq/cpufreq_governor.c
34545+++ b/drivers/cpufreq/cpufreq_governor.c
34546@@ -243,7 +243,7 @@ int cpufreq_governor_dbs(struct dbs_data *dbs_data,
34547 * governor, thus we are bound to jiffes/HZ
34548 */
34549 if (dbs_data->governor == GOV_CONSERVATIVE) {
34550- struct cs_ops *ops = dbs_data->gov_ops;
34551+ const struct cs_ops *ops = dbs_data->gov_ops;
34552
34553 cpufreq_register_notifier(ops->notifier_block,
34554 CPUFREQ_TRANSITION_NOTIFIER);
34555@@ -251,7 +251,7 @@ int cpufreq_governor_dbs(struct dbs_data *dbs_data,
34556 dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
34557 jiffies_to_usecs(10);
34558 } else {
34559- struct od_ops *ops = dbs_data->gov_ops;
34560+ const struct od_ops *ops = dbs_data->gov_ops;
34561
34562 od_tuners->io_is_busy = ops->io_busy();
34563 }
34564@@ -268,7 +268,7 @@ second_time:
34565 cs_dbs_info->enable = 1;
34566 cs_dbs_info->requested_freq = policy->cur;
34567 } else {
34568- struct od_ops *ops = dbs_data->gov_ops;
34569+ const struct od_ops *ops = dbs_data->gov_ops;
34570 od_dbs_info->rate_mult = 1;
34571 od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
34572 ops->powersave_bias_init_cpu(cpu);
34573@@ -289,7 +289,7 @@ second_time:
34574 mutex_destroy(&cpu_cdbs->timer_mutex);
34575 dbs_data->enable--;
34576 if (!dbs_data->enable) {
34577- struct cs_ops *ops = dbs_data->gov_ops;
34578+ const struct cs_ops *ops = dbs_data->gov_ops;
34579
34580 sysfs_remove_group(cpufreq_global_kobject,
34581 dbs_data->attr_group);
34582diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
34583index f661654..6c8e638 100644
34584--- a/drivers/cpufreq/cpufreq_governor.h
34585+++ b/drivers/cpufreq/cpufreq_governor.h
34586@@ -142,7 +142,7 @@ struct dbs_data {
34587 void (*gov_check_cpu)(int cpu, unsigned int load);
34588
34589 /* Governor specific ops, see below */
34590- void *gov_ops;
34591+ const void *gov_ops;
34592 };
34593
34594 /* Governor specific ops, will be passed to dbs_data->gov_ops */
34595diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
34596index 9d7732b..0b1a793 100644
34597--- a/drivers/cpufreq/cpufreq_stats.c
34598+++ b/drivers/cpufreq/cpufreq_stats.c
34599@@ -340,7 +340,7 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
34600 }
34601
34602 /* priority=1 so this will get called before cpufreq_remove_dev */
34603-static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
34604+static struct notifier_block cpufreq_stat_cpu_notifier = {
34605 .notifier_call = cpufreq_stat_cpu_callback,
34606 .priority = 1,
34607 };
34608diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
34609index 827629c9..0bc6a03 100644
34610--- a/drivers/cpufreq/p4-clockmod.c
34611+++ b/drivers/cpufreq/p4-clockmod.c
34612@@ -167,10 +167,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
34613 case 0x0F: /* Core Duo */
34614 case 0x16: /* Celeron Core */
34615 case 0x1C: /* Atom */
34616- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34617+ pax_open_kernel();
34618+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34619+ pax_close_kernel();
34620 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
34621 case 0x0D: /* Pentium M (Dothan) */
34622- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34623+ pax_open_kernel();
34624+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34625+ pax_close_kernel();
34626 /* fall through */
34627 case 0x09: /* Pentium M (Banias) */
34628 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
34629@@ -182,7 +186,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
34630
34631 /* on P-4s, the TSC runs with constant frequency independent whether
34632 * throttling is active or not. */
34633- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34634+ pax_open_kernel();
34635+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34636+ pax_close_kernel();
34637
34638 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
34639 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
34640diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
34641index 3a953d5..f5993f6 100644
34642--- a/drivers/cpufreq/speedstep-centrino.c
34643+++ b/drivers/cpufreq/speedstep-centrino.c
34644@@ -353,8 +353,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
34645 !cpu_has(cpu, X86_FEATURE_EST))
34646 return -ENODEV;
34647
34648- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
34649- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
34650+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
34651+ pax_open_kernel();
34652+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
34653+ pax_close_kernel();
34654+ }
34655
34656 if (policy->cpu != 0)
34657 return -ENODEV;
34658diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
34659index e1f6860..f8de20b 100644
34660--- a/drivers/cpuidle/cpuidle.c
34661+++ b/drivers/cpuidle/cpuidle.c
34662@@ -279,7 +279,7 @@ static int poll_idle(struct cpuidle_device *dev,
34663
34664 static void poll_idle_init(struct cpuidle_driver *drv)
34665 {
34666- struct cpuidle_state *state = &drv->states[0];
34667+ cpuidle_state_no_const *state = &drv->states[0];
34668
34669 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
34670 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
34671diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
34672index ea2f8e7..70ac501 100644
34673--- a/drivers/cpuidle/governor.c
34674+++ b/drivers/cpuidle/governor.c
34675@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
34676 mutex_lock(&cpuidle_lock);
34677 if (__cpuidle_find_governor(gov->name) == NULL) {
34678 ret = 0;
34679- list_add_tail(&gov->governor_list, &cpuidle_governors);
34680+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
34681 if (!cpuidle_curr_governor ||
34682 cpuidle_curr_governor->rating < gov->rating)
34683 cpuidle_switch_governor(gov);
34684@@ -135,7 +135,7 @@ void cpuidle_unregister_governor(struct cpuidle_governor *gov)
34685 new_gov = cpuidle_replace_governor(gov->rating);
34686 cpuidle_switch_governor(new_gov);
34687 }
34688- list_del(&gov->governor_list);
34689+ pax_list_del((struct list_head *)&gov->governor_list);
34690 mutex_unlock(&cpuidle_lock);
34691 }
34692
34693diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
34694index 428754a..8bdf9cc 100644
34695--- a/drivers/cpuidle/sysfs.c
34696+++ b/drivers/cpuidle/sysfs.c
34697@@ -131,7 +131,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
34698 NULL
34699 };
34700
34701-static struct attribute_group cpuidle_attr_group = {
34702+static attribute_group_no_const cpuidle_attr_group = {
34703 .attrs = cpuidle_default_attrs,
34704 .name = "cpuidle",
34705 };
34706diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
34707index 3b36797..289c16a 100644
34708--- a/drivers/devfreq/devfreq.c
34709+++ b/drivers/devfreq/devfreq.c
34710@@ -588,7 +588,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
34711 goto err_out;
34712 }
34713
34714- list_add(&governor->node, &devfreq_governor_list);
34715+ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
34716
34717 list_for_each_entry(devfreq, &devfreq_list, node) {
34718 int ret = 0;
34719@@ -676,7 +676,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
34720 }
34721 }
34722
34723- list_del(&governor->node);
34724+ pax_list_del((struct list_head *)&governor->node);
34725 err_out:
34726 mutex_unlock(&devfreq_list_lock);
34727
34728diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c
34729index b70709b..1d8d02a 100644
34730--- a/drivers/dma/sh/shdma.c
34731+++ b/drivers/dma/sh/shdma.c
34732@@ -476,7 +476,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
34733 return ret;
34734 }
34735
34736-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
34737+static struct notifier_block sh_dmae_nmi_notifier = {
34738 .notifier_call = sh_dmae_nmi_handler,
34739
34740 /* Run before NMI debug handler and KGDB */
34741diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
34742index 0ca1ca7..6e6f454 100644
34743--- a/drivers/edac/edac_mc_sysfs.c
34744+++ b/drivers/edac/edac_mc_sysfs.c
34745@@ -148,7 +148,7 @@ static const char *edac_caps[] = {
34746 struct dev_ch_attribute {
34747 struct device_attribute attr;
34748 int channel;
34749-};
34750+} __do_const;
34751
34752 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
34753 struct dev_ch_attribute dev_attr_legacy_##_name = \
34754diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
34755index 0056c4d..23b54d9 100644
34756--- a/drivers/edac/edac_pci_sysfs.c
34757+++ b/drivers/edac/edac_pci_sysfs.c
34758@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
34759 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
34760 static int edac_pci_poll_msec = 1000; /* one second workq period */
34761
34762-static atomic_t pci_parity_count = ATOMIC_INIT(0);
34763-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
34764+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
34765+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
34766
34767 static struct kobject *edac_pci_top_main_kobj;
34768 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
34769@@ -235,7 +235,7 @@ struct edac_pci_dev_attribute {
34770 void *value;
34771 ssize_t(*show) (void *, char *);
34772 ssize_t(*store) (void *, const char *, size_t);
34773-};
34774+} __do_const;
34775
34776 /* Set of show/store abstract level functions for PCI Parity object */
34777 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
34778@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34779 edac_printk(KERN_CRIT, EDAC_PCI,
34780 "Signaled System Error on %s\n",
34781 pci_name(dev));
34782- atomic_inc(&pci_nonparity_count);
34783+ atomic_inc_unchecked(&pci_nonparity_count);
34784 }
34785
34786 if (status & (PCI_STATUS_PARITY)) {
34787@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34788 "Master Data Parity Error on %s\n",
34789 pci_name(dev));
34790
34791- atomic_inc(&pci_parity_count);
34792+ atomic_inc_unchecked(&pci_parity_count);
34793 }
34794
34795 if (status & (PCI_STATUS_DETECTED_PARITY)) {
34796@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34797 "Detected Parity Error on %s\n",
34798 pci_name(dev));
34799
34800- atomic_inc(&pci_parity_count);
34801+ atomic_inc_unchecked(&pci_parity_count);
34802 }
34803 }
34804
34805@@ -618,7 +618,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34806 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
34807 "Signaled System Error on %s\n",
34808 pci_name(dev));
34809- atomic_inc(&pci_nonparity_count);
34810+ atomic_inc_unchecked(&pci_nonparity_count);
34811 }
34812
34813 if (status & (PCI_STATUS_PARITY)) {
34814@@ -626,7 +626,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34815 "Master Data Parity Error on "
34816 "%s\n", pci_name(dev));
34817
34818- atomic_inc(&pci_parity_count);
34819+ atomic_inc_unchecked(&pci_parity_count);
34820 }
34821
34822 if (status & (PCI_STATUS_DETECTED_PARITY)) {
34823@@ -634,7 +634,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34824 "Detected Parity Error on %s\n",
34825 pci_name(dev));
34826
34827- atomic_inc(&pci_parity_count);
34828+ atomic_inc_unchecked(&pci_parity_count);
34829 }
34830 }
34831 }
34832@@ -672,7 +672,7 @@ void edac_pci_do_parity_check(void)
34833 if (!check_pci_errors)
34834 return;
34835
34836- before_count = atomic_read(&pci_parity_count);
34837+ before_count = atomic_read_unchecked(&pci_parity_count);
34838
34839 /* scan all PCI devices looking for a Parity Error on devices and
34840 * bridges.
34841@@ -684,7 +684,7 @@ void edac_pci_do_parity_check(void)
34842 /* Only if operator has selected panic on PCI Error */
34843 if (edac_pci_get_panic_on_pe()) {
34844 /* If the count is different 'after' from 'before' */
34845- if (before_count != atomic_read(&pci_parity_count))
34846+ if (before_count != atomic_read_unchecked(&pci_parity_count))
34847 panic("EDAC: PCI Parity Error");
34848 }
34849 }
34850diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
34851index 6796799..99e8377 100644
34852--- a/drivers/edac/mce_amd.h
34853+++ b/drivers/edac/mce_amd.h
34854@@ -78,7 +78,7 @@ extern const char * const ii_msgs[];
34855 struct amd_decoder_ops {
34856 bool (*mc0_mce)(u16, u8);
34857 bool (*mc1_mce)(u16, u8);
34858-};
34859+} __no_const;
34860
34861 void amd_report_gart_errors(bool);
34862 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
34863diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
34864index 57ea7f4..789e3c3 100644
34865--- a/drivers/firewire/core-card.c
34866+++ b/drivers/firewire/core-card.c
34867@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
34868
34869 void fw_core_remove_card(struct fw_card *card)
34870 {
34871- struct fw_card_driver dummy_driver = dummy_driver_template;
34872+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
34873
34874 card->driver->update_phy_reg(card, 4,
34875 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
34876diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
34877index f8d2287..5aaf4db 100644
34878--- a/drivers/firewire/core-cdev.c
34879+++ b/drivers/firewire/core-cdev.c
34880@@ -1365,8 +1365,7 @@ static int init_iso_resource(struct client *client,
34881 int ret;
34882
34883 if ((request->channels == 0 && request->bandwidth == 0) ||
34884- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
34885- request->bandwidth < 0)
34886+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
34887 return -EINVAL;
34888
34889 r = kmalloc(sizeof(*r), GFP_KERNEL);
34890diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
34891index af3e8aa..eb2f227 100644
34892--- a/drivers/firewire/core-device.c
34893+++ b/drivers/firewire/core-device.c
34894@@ -232,7 +232,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
34895 struct config_rom_attribute {
34896 struct device_attribute attr;
34897 u32 key;
34898-};
34899+} __do_const;
34900
34901 static ssize_t show_immediate(struct device *dev,
34902 struct device_attribute *dattr, char *buf)
34903diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
34904index 28a94c7..58da63a 100644
34905--- a/drivers/firewire/core-transaction.c
34906+++ b/drivers/firewire/core-transaction.c
34907@@ -38,6 +38,7 @@
34908 #include <linux/timer.h>
34909 #include <linux/types.h>
34910 #include <linux/workqueue.h>
34911+#include <linux/sched.h>
34912
34913 #include <asm/byteorder.h>
34914
34915diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
34916index 515a42c..5ecf3ba 100644
34917--- a/drivers/firewire/core.h
34918+++ b/drivers/firewire/core.h
34919@@ -111,6 +111,7 @@ struct fw_card_driver {
34920
34921 int (*stop_iso)(struct fw_iso_context *ctx);
34922 };
34923+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
34924
34925 void fw_card_initialize(struct fw_card *card,
34926 const struct fw_card_driver *driver, struct device *device);
34927diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
34928index 94a58a0..f5eba42 100644
34929--- a/drivers/firmware/dmi-id.c
34930+++ b/drivers/firmware/dmi-id.c
34931@@ -16,7 +16,7 @@
34932 struct dmi_device_attribute{
34933 struct device_attribute dev_attr;
34934 int field;
34935-};
34936+} __do_const;
34937 #define to_dmi_dev_attr(_dev_attr) \
34938 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
34939
34940diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
34941index 4cd392d..4b629e1 100644
34942--- a/drivers/firmware/dmi_scan.c
34943+++ b/drivers/firmware/dmi_scan.c
34944@@ -490,11 +490,6 @@ void __init dmi_scan_machine(void)
34945 }
34946 }
34947 else {
34948- /*
34949- * no iounmap() for that ioremap(); it would be a no-op, but
34950- * it's so early in setup that sucker gets confused into doing
34951- * what it shouldn't if we actually call it.
34952- */
34953 p = dmi_ioremap(0xF0000, 0x10000);
34954 if (p == NULL)
34955 goto error;
34956@@ -769,7 +764,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
34957 if (buf == NULL)
34958 return -1;
34959
34960- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
34961+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
34962
34963 iounmap(buf);
34964 return 0;
34965diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
34966index b07cb37..2a51037 100644
34967--- a/drivers/firmware/efivars.c
34968+++ b/drivers/firmware/efivars.c
34969@@ -138,7 +138,7 @@ struct efivar_attribute {
34970 };
34971
34972 static struct efivars __efivars;
34973-static struct efivar_operations ops;
34974+static efivar_operations_no_const ops __read_only;
34975
34976 #define PSTORE_EFI_ATTRIBUTES \
34977 (EFI_VARIABLE_NON_VOLATILE | \
34978@@ -1834,7 +1834,7 @@ efivar_create_sysfs_entry(struct efivars *efivars,
34979 static int
34980 create_efivars_bin_attributes(struct efivars *efivars)
34981 {
34982- struct bin_attribute *attr;
34983+ bin_attribute_no_const *attr;
34984 int error;
34985
34986 /* new_var */
34987diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
34988index 2a90ba6..07f3733 100644
34989--- a/drivers/firmware/google/memconsole.c
34990+++ b/drivers/firmware/google/memconsole.c
34991@@ -147,7 +147,9 @@ static int __init memconsole_init(void)
34992 if (!found_memconsole())
34993 return -ENODEV;
34994
34995- memconsole_bin_attr.size = memconsole_length;
34996+ pax_open_kernel();
34997+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
34998+ pax_close_kernel();
34999
35000 ret = sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
35001
35002diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
35003index 6f2306d..af9476a 100644
35004--- a/drivers/gpio/gpio-ich.c
35005+++ b/drivers/gpio/gpio-ich.c
35006@@ -69,7 +69,7 @@ struct ichx_desc {
35007 /* Some chipsets have quirks, let these use their own request/get */
35008 int (*request)(struct gpio_chip *chip, unsigned offset);
35009 int (*get)(struct gpio_chip *chip, unsigned offset);
35010-};
35011+} __do_const;
35012
35013 static struct {
35014 spinlock_t lock;
35015diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
35016index 9902732..64b62dd 100644
35017--- a/drivers/gpio/gpio-vr41xx.c
35018+++ b/drivers/gpio/gpio-vr41xx.c
35019@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
35020 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
35021 maskl, pendl, maskh, pendh);
35022
35023- atomic_inc(&irq_err_count);
35024+ atomic_inc_unchecked(&irq_err_count);
35025
35026 return -EINVAL;
35027 }
35028diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
35029index 7b2d378..cc947ea 100644
35030--- a/drivers/gpu/drm/drm_crtc_helper.c
35031+++ b/drivers/gpu/drm/drm_crtc_helper.c
35032@@ -319,7 +319,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
35033 struct drm_crtc *tmp;
35034 int crtc_mask = 1;
35035
35036- WARN(!crtc, "checking null crtc?\n");
35037+ BUG_ON(!crtc);
35038
35039 dev = crtc->dev;
35040
35041diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
35042index be174ca..7f38143 100644
35043--- a/drivers/gpu/drm/drm_drv.c
35044+++ b/drivers/gpu/drm/drm_drv.c
35045@@ -307,7 +307,7 @@ module_exit(drm_core_exit);
35046 /**
35047 * Copy and IOCTL return string to user space
35048 */
35049-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
35050+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
35051 {
35052 int len;
35053
35054@@ -377,7 +377,7 @@ long drm_ioctl(struct file *filp,
35055 struct drm_file *file_priv = filp->private_data;
35056 struct drm_device *dev;
35057 struct drm_ioctl_desc *ioctl;
35058- drm_ioctl_t *func;
35059+ drm_ioctl_no_const_t func;
35060 unsigned int nr = DRM_IOCTL_NR(cmd);
35061 int retcode = -EINVAL;
35062 char stack_kdata[128];
35063@@ -390,7 +390,7 @@ long drm_ioctl(struct file *filp,
35064 return -ENODEV;
35065
35066 atomic_inc(&dev->ioctl_count);
35067- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
35068+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
35069 ++file_priv->ioctl_count;
35070
35071 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
35072diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
35073index 133b413..fd68225 100644
35074--- a/drivers/gpu/drm/drm_fops.c
35075+++ b/drivers/gpu/drm/drm_fops.c
35076@@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
35077 }
35078
35079 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
35080- atomic_set(&dev->counts[i], 0);
35081+ atomic_set_unchecked(&dev->counts[i], 0);
35082
35083 dev->sigdata.lock = NULL;
35084
35085@@ -134,7 +134,7 @@ int drm_open(struct inode *inode, struct file *filp)
35086 if (drm_device_is_unplugged(dev))
35087 return -ENODEV;
35088
35089- if (!dev->open_count++)
35090+ if (local_inc_return(&dev->open_count) == 1)
35091 need_setup = 1;
35092 mutex_lock(&dev->struct_mutex);
35093 old_mapping = dev->dev_mapping;
35094@@ -149,7 +149,7 @@ int drm_open(struct inode *inode, struct file *filp)
35095 retcode = drm_open_helper(inode, filp, dev);
35096 if (retcode)
35097 goto err_undo;
35098- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
35099+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
35100 if (need_setup) {
35101 retcode = drm_setup(dev);
35102 if (retcode)
35103@@ -164,7 +164,7 @@ err_undo:
35104 iput(container_of(dev->dev_mapping, struct inode, i_data));
35105 dev->dev_mapping = old_mapping;
35106 mutex_unlock(&dev->struct_mutex);
35107- dev->open_count--;
35108+ local_dec(&dev->open_count);
35109 return retcode;
35110 }
35111 EXPORT_SYMBOL(drm_open);
35112@@ -438,7 +438,7 @@ int drm_release(struct inode *inode, struct file *filp)
35113
35114 mutex_lock(&drm_global_mutex);
35115
35116- DRM_DEBUG("open_count = %d\n", dev->open_count);
35117+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
35118
35119 if (dev->driver->preclose)
35120 dev->driver->preclose(dev, file_priv);
35121@@ -447,10 +447,10 @@ int drm_release(struct inode *inode, struct file *filp)
35122 * Begin inline drm_release
35123 */
35124
35125- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
35126+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
35127 task_pid_nr(current),
35128 (long)old_encode_dev(file_priv->minor->device),
35129- dev->open_count);
35130+ local_read(&dev->open_count));
35131
35132 /* Release any auth tokens that might point to this file_priv,
35133 (do that under the drm_global_mutex) */
35134@@ -547,8 +547,8 @@ int drm_release(struct inode *inode, struct file *filp)
35135 * End inline drm_release
35136 */
35137
35138- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
35139- if (!--dev->open_count) {
35140+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
35141+ if (local_dec_and_test(&dev->open_count)) {
35142 if (atomic_read(&dev->ioctl_count)) {
35143 DRM_ERROR("Device busy: %d\n",
35144 atomic_read(&dev->ioctl_count));
35145diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
35146index f731116..629842c 100644
35147--- a/drivers/gpu/drm/drm_global.c
35148+++ b/drivers/gpu/drm/drm_global.c
35149@@ -36,7 +36,7 @@
35150 struct drm_global_item {
35151 struct mutex mutex;
35152 void *object;
35153- int refcount;
35154+ atomic_t refcount;
35155 };
35156
35157 static struct drm_global_item glob[DRM_GLOBAL_NUM];
35158@@ -49,7 +49,7 @@ void drm_global_init(void)
35159 struct drm_global_item *item = &glob[i];
35160 mutex_init(&item->mutex);
35161 item->object = NULL;
35162- item->refcount = 0;
35163+ atomic_set(&item->refcount, 0);
35164 }
35165 }
35166
35167@@ -59,7 +59,7 @@ void drm_global_release(void)
35168 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
35169 struct drm_global_item *item = &glob[i];
35170 BUG_ON(item->object != NULL);
35171- BUG_ON(item->refcount != 0);
35172+ BUG_ON(atomic_read(&item->refcount) != 0);
35173 }
35174 }
35175
35176@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
35177 void *object;
35178
35179 mutex_lock(&item->mutex);
35180- if (item->refcount == 0) {
35181+ if (atomic_read(&item->refcount) == 0) {
35182 item->object = kzalloc(ref->size, GFP_KERNEL);
35183 if (unlikely(item->object == NULL)) {
35184 ret = -ENOMEM;
35185@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
35186 goto out_err;
35187
35188 }
35189- ++item->refcount;
35190+ atomic_inc(&item->refcount);
35191 ref->object = item->object;
35192 object = item->object;
35193 mutex_unlock(&item->mutex);
35194@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
35195 struct drm_global_item *item = &glob[ref->global_type];
35196
35197 mutex_lock(&item->mutex);
35198- BUG_ON(item->refcount == 0);
35199+ BUG_ON(atomic_read(&item->refcount) == 0);
35200 BUG_ON(ref->object != item->object);
35201- if (--item->refcount == 0) {
35202+ if (atomic_dec_and_test(&item->refcount)) {
35203 ref->release(ref);
35204 item->object = NULL;
35205 }
35206diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
35207index d4b20ce..77a8d41 100644
35208--- a/drivers/gpu/drm/drm_info.c
35209+++ b/drivers/gpu/drm/drm_info.c
35210@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
35211 struct drm_local_map *map;
35212 struct drm_map_list *r_list;
35213
35214- /* Hardcoded from _DRM_FRAME_BUFFER,
35215- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
35216- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
35217- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
35218+ static const char * const types[] = {
35219+ [_DRM_FRAME_BUFFER] = "FB",
35220+ [_DRM_REGISTERS] = "REG",
35221+ [_DRM_SHM] = "SHM",
35222+ [_DRM_AGP] = "AGP",
35223+ [_DRM_SCATTER_GATHER] = "SG",
35224+ [_DRM_CONSISTENT] = "PCI",
35225+ [_DRM_GEM] = "GEM" };
35226 const char *type;
35227 int i;
35228
35229@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
35230 map = r_list->map;
35231 if (!map)
35232 continue;
35233- if (map->type < 0 || map->type > 5)
35234+ if (map->type >= ARRAY_SIZE(types))
35235 type = "??";
35236 else
35237 type = types[map->type];
35238@@ -253,7 +257,11 @@ int drm_vma_info(struct seq_file *m, void *data)
35239 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
35240 vma->vm_flags & VM_LOCKED ? 'l' : '-',
35241 vma->vm_flags & VM_IO ? 'i' : '-',
35242+#ifdef CONFIG_GRKERNSEC_HIDESYM
35243+ 0);
35244+#else
35245 vma->vm_pgoff);
35246+#endif
35247
35248 #if defined(__i386__)
35249 pgprot = pgprot_val(vma->vm_page_prot);
35250diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
35251index 2f4c434..dd12cd2 100644
35252--- a/drivers/gpu/drm/drm_ioc32.c
35253+++ b/drivers/gpu/drm/drm_ioc32.c
35254@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
35255 request = compat_alloc_user_space(nbytes);
35256 if (!access_ok(VERIFY_WRITE, request, nbytes))
35257 return -EFAULT;
35258- list = (struct drm_buf_desc *) (request + 1);
35259+ list = (struct drm_buf_desc __user *) (request + 1);
35260
35261 if (__put_user(count, &request->count)
35262 || __put_user(list, &request->list))
35263@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
35264 request = compat_alloc_user_space(nbytes);
35265 if (!access_ok(VERIFY_WRITE, request, nbytes))
35266 return -EFAULT;
35267- list = (struct drm_buf_pub *) (request + 1);
35268+ list = (struct drm_buf_pub __user *) (request + 1);
35269
35270 if (__put_user(count, &request->count)
35271 || __put_user(list, &request->list))
35272@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
35273 return 0;
35274 }
35275
35276-drm_ioctl_compat_t *drm_compat_ioctls[] = {
35277+drm_ioctl_compat_t drm_compat_ioctls[] = {
35278 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
35279 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
35280 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
35281@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
35282 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35283 {
35284 unsigned int nr = DRM_IOCTL_NR(cmd);
35285- drm_ioctl_compat_t *fn;
35286 int ret;
35287
35288 /* Assume that ioctls without an explicit compat routine will just
35289@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35290 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
35291 return drm_ioctl(filp, cmd, arg);
35292
35293- fn = drm_compat_ioctls[nr];
35294-
35295- if (fn != NULL)
35296- ret = (*fn) (filp, cmd, arg);
35297+ if (drm_compat_ioctls[nr] != NULL)
35298+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
35299 else
35300 ret = drm_ioctl(filp, cmd, arg);
35301
35302diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
35303index e77bd8b..1571b85 100644
35304--- a/drivers/gpu/drm/drm_ioctl.c
35305+++ b/drivers/gpu/drm/drm_ioctl.c
35306@@ -252,7 +252,7 @@ int drm_getstats(struct drm_device *dev, void *data,
35307 stats->data[i].value =
35308 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
35309 else
35310- stats->data[i].value = atomic_read(&dev->counts[i]);
35311+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
35312 stats->data[i].type = dev->types[i];
35313 }
35314
35315diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
35316index d752c96..fe08455 100644
35317--- a/drivers/gpu/drm/drm_lock.c
35318+++ b/drivers/gpu/drm/drm_lock.c
35319@@ -86,7 +86,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
35320 if (drm_lock_take(&master->lock, lock->context)) {
35321 master->lock.file_priv = file_priv;
35322 master->lock.lock_time = jiffies;
35323- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
35324+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
35325 break; /* Got lock */
35326 }
35327
35328@@ -157,7 +157,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
35329 return -EINVAL;
35330 }
35331
35332- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
35333+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
35334
35335 if (drm_lock_free(&master->lock, lock->context)) {
35336 /* FIXME: Should really bail out here. */
35337diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
35338index 200e104..59facda 100644
35339--- a/drivers/gpu/drm/drm_stub.c
35340+++ b/drivers/gpu/drm/drm_stub.c
35341@@ -516,7 +516,7 @@ void drm_unplug_dev(struct drm_device *dev)
35342
35343 drm_device_set_unplugged(dev);
35344
35345- if (dev->open_count == 0) {
35346+ if (local_read(&dev->open_count) == 0) {
35347 drm_put_dev(dev);
35348 }
35349 mutex_unlock(&drm_global_mutex);
35350diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
35351index 004ecdf..db1f6e0 100644
35352--- a/drivers/gpu/drm/i810/i810_dma.c
35353+++ b/drivers/gpu/drm/i810/i810_dma.c
35354@@ -945,8 +945,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
35355 dma->buflist[vertex->idx],
35356 vertex->discard, vertex->used);
35357
35358- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
35359- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
35360+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
35361+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
35362 sarea_priv->last_enqueue = dev_priv->counter - 1;
35363 sarea_priv->last_dispatch = (int)hw_status[5];
35364
35365@@ -1106,8 +1106,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
35366 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
35367 mc->last_render);
35368
35369- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
35370- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
35371+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
35372+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
35373 sarea_priv->last_enqueue = dev_priv->counter - 1;
35374 sarea_priv->last_dispatch = (int)hw_status[5];
35375
35376diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
35377index 6e0acad..93c8289 100644
35378--- a/drivers/gpu/drm/i810/i810_drv.h
35379+++ b/drivers/gpu/drm/i810/i810_drv.h
35380@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
35381 int page_flipping;
35382
35383 wait_queue_head_t irq_queue;
35384- atomic_t irq_received;
35385- atomic_t irq_emitted;
35386+ atomic_unchecked_t irq_received;
35387+ atomic_unchecked_t irq_emitted;
35388
35389 int front_offset;
35390 } drm_i810_private_t;
35391diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
35392index 261efc8e..27af8a5 100644
35393--- a/drivers/gpu/drm/i915/i915_debugfs.c
35394+++ b/drivers/gpu/drm/i915/i915_debugfs.c
35395@@ -496,7 +496,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
35396 I915_READ(GTIMR));
35397 }
35398 seq_printf(m, "Interrupts received: %d\n",
35399- atomic_read(&dev_priv->irq_received));
35400+ atomic_read_unchecked(&dev_priv->irq_received));
35401 for_each_ring(ring, dev_priv, i) {
35402 if (IS_GEN6(dev) || IS_GEN7(dev)) {
35403 seq_printf(m,
35404diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
35405index 99daa89..84ebd44 100644
35406--- a/drivers/gpu/drm/i915/i915_dma.c
35407+++ b/drivers/gpu/drm/i915/i915_dma.c
35408@@ -1253,7 +1253,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
35409 bool can_switch;
35410
35411 spin_lock(&dev->count_lock);
35412- can_switch = (dev->open_count == 0);
35413+ can_switch = (local_read(&dev->open_count) == 0);
35414 spin_unlock(&dev->count_lock);
35415 return can_switch;
35416 }
35417diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
35418index 7339a4b..445aaba 100644
35419--- a/drivers/gpu/drm/i915/i915_drv.h
35420+++ b/drivers/gpu/drm/i915/i915_drv.h
35421@@ -656,7 +656,7 @@ typedef struct drm_i915_private {
35422 drm_dma_handle_t *status_page_dmah;
35423 struct resource mch_res;
35424
35425- atomic_t irq_received;
35426+ atomic_unchecked_t irq_received;
35427
35428 /* protects the irq masks */
35429 spinlock_t irq_lock;
35430@@ -1102,7 +1102,7 @@ struct drm_i915_gem_object {
35431 * will be page flipped away on the next vblank. When it
35432 * reaches 0, dev_priv->pending_flip_queue will be woken up.
35433 */
35434- atomic_t pending_flip;
35435+ atomic_unchecked_t pending_flip;
35436 };
35437 #define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
35438
35439@@ -1633,7 +1633,7 @@ extern struct i2c_adapter *intel_gmbus_get_adapter(
35440 struct drm_i915_private *dev_priv, unsigned port);
35441 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
35442 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
35443-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
35444+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
35445 {
35446 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
35447 }
35448diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
35449index 7adf5a7..e24fb51 100644
35450--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
35451+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
35452@@ -672,7 +672,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
35453 i915_gem_clflush_object(obj);
35454
35455 if (obj->base.pending_write_domain)
35456- flips |= atomic_read(&obj->pending_flip);
35457+ flips |= atomic_read_unchecked(&obj->pending_flip);
35458
35459 flush_domains |= obj->base.write_domain;
35460 }
35461@@ -703,9 +703,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
35462
35463 static int
35464 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
35465- int count)
35466+ unsigned int count)
35467 {
35468- int i;
35469+ unsigned int i;
35470 int relocs_total = 0;
35471 int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
35472
35473@@ -1202,7 +1202,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
35474 return -ENOMEM;
35475 }
35476 ret = copy_from_user(exec2_list,
35477- (struct drm_i915_relocation_entry __user *)
35478+ (struct drm_i915_gem_exec_object2 __user *)
35479 (uintptr_t) args->buffers_ptr,
35480 sizeof(*exec2_list) * args->buffer_count);
35481 if (ret != 0) {
35482diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
35483index 3c59584..500f2e9 100644
35484--- a/drivers/gpu/drm/i915/i915_ioc32.c
35485+++ b/drivers/gpu/drm/i915/i915_ioc32.c
35486@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
35487 (unsigned long)request);
35488 }
35489
35490-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
35491+static drm_ioctl_compat_t i915_compat_ioctls[] = {
35492 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
35493 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
35494 [DRM_I915_GETPARAM] = compat_i915_getparam,
35495@@ -202,18 +202,15 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
35496 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35497 {
35498 unsigned int nr = DRM_IOCTL_NR(cmd);
35499- drm_ioctl_compat_t *fn = NULL;
35500 int ret;
35501
35502 if (nr < DRM_COMMAND_BASE)
35503 return drm_compat_ioctl(filp, cmd, arg);
35504
35505- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
35506- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
35507-
35508- if (fn != NULL)
35509+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls)) {
35510+ drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
35511 ret = (*fn) (filp, cmd, arg);
35512- else
35513+ } else
35514 ret = drm_ioctl(filp, cmd, arg);
35515
35516 return ret;
35517diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
35518index fe84338..a863190 100644
35519--- a/drivers/gpu/drm/i915/i915_irq.c
35520+++ b/drivers/gpu/drm/i915/i915_irq.c
35521@@ -535,7 +535,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
35522 u32 pipe_stats[I915_MAX_PIPES];
35523 bool blc_event;
35524
35525- atomic_inc(&dev_priv->irq_received);
35526+ atomic_inc_unchecked(&dev_priv->irq_received);
35527
35528 while (true) {
35529 iir = I915_READ(VLV_IIR);
35530@@ -688,7 +688,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
35531 irqreturn_t ret = IRQ_NONE;
35532 int i;
35533
35534- atomic_inc(&dev_priv->irq_received);
35535+ atomic_inc_unchecked(&dev_priv->irq_received);
35536
35537 /* disable master interrupt before clearing iir */
35538 de_ier = I915_READ(DEIER);
35539@@ -760,7 +760,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
35540 int ret = IRQ_NONE;
35541 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
35542
35543- atomic_inc(&dev_priv->irq_received);
35544+ atomic_inc_unchecked(&dev_priv->irq_received);
35545
35546 /* disable master interrupt before clearing iir */
35547 de_ier = I915_READ(DEIER);
35548@@ -1787,7 +1787,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
35549 {
35550 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
35551
35552- atomic_set(&dev_priv->irq_received, 0);
35553+ atomic_set_unchecked(&dev_priv->irq_received, 0);
35554
35555 I915_WRITE(HWSTAM, 0xeffe);
35556
35557@@ -1813,7 +1813,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
35558 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
35559 int pipe;
35560
35561- atomic_set(&dev_priv->irq_received, 0);
35562+ atomic_set_unchecked(&dev_priv->irq_received, 0);
35563
35564 /* VLV magic */
35565 I915_WRITE(VLV_IMR, 0);
35566@@ -2108,7 +2108,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
35567 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
35568 int pipe;
35569
35570- atomic_set(&dev_priv->irq_received, 0);
35571+ atomic_set_unchecked(&dev_priv->irq_received, 0);
35572
35573 for_each_pipe(pipe)
35574 I915_WRITE(PIPESTAT(pipe), 0);
35575@@ -2159,7 +2159,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
35576 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
35577 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
35578
35579- atomic_inc(&dev_priv->irq_received);
35580+ atomic_inc_unchecked(&dev_priv->irq_received);
35581
35582 iir = I915_READ16(IIR);
35583 if (iir == 0)
35584@@ -2244,7 +2244,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
35585 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
35586 int pipe;
35587
35588- atomic_set(&dev_priv->irq_received, 0);
35589+ atomic_set_unchecked(&dev_priv->irq_received, 0);
35590
35591 if (I915_HAS_HOTPLUG(dev)) {
35592 I915_WRITE(PORT_HOTPLUG_EN, 0);
35593@@ -2339,7 +2339,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
35594 };
35595 int pipe, ret = IRQ_NONE;
35596
35597- atomic_inc(&dev_priv->irq_received);
35598+ atomic_inc_unchecked(&dev_priv->irq_received);
35599
35600 iir = I915_READ(IIR);
35601 do {
35602@@ -2465,7 +2465,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
35603 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
35604 int pipe;
35605
35606- atomic_set(&dev_priv->irq_received, 0);
35607+ atomic_set_unchecked(&dev_priv->irq_received, 0);
35608
35609 I915_WRITE(PORT_HOTPLUG_EN, 0);
35610 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
35611@@ -2572,7 +2572,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
35612 int irq_received;
35613 int ret = IRQ_NONE, pipe;
35614
35615- atomic_inc(&dev_priv->irq_received);
35616+ atomic_inc_unchecked(&dev_priv->irq_received);
35617
35618 iir = I915_READ(IIR);
35619
35620diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
35621index e6e4df7..6a9a1bd 100644
35622--- a/drivers/gpu/drm/i915/intel_display.c
35623+++ b/drivers/gpu/drm/i915/intel_display.c
35624@@ -2255,7 +2255,7 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
35625
35626 wait_event(dev_priv->pending_flip_queue,
35627 atomic_read(&dev_priv->mm.wedged) ||
35628- atomic_read(&obj->pending_flip) == 0);
35629+ atomic_read_unchecked(&obj->pending_flip) == 0);
35630
35631 /* Big Hammer, we also need to ensure that any pending
35632 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
35633@@ -7122,8 +7122,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
35634
35635 obj = work->old_fb_obj;
35636
35637- atomic_clear_mask(1 << intel_crtc->plane,
35638- &obj->pending_flip.counter);
35639+ atomic_clear_mask_unchecked(1 << intel_crtc->plane, &obj->pending_flip);
35640 wake_up(&dev_priv->pending_flip_queue);
35641
35642 queue_work(dev_priv->wq, &work->work);
35643@@ -7486,7 +7485,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
35644 /* Block clients from rendering to the new back buffer until
35645 * the flip occurs and the object is no longer visible.
35646 */
35647- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
35648+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
35649 atomic_inc(&intel_crtc->unpin_work_count);
35650
35651 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
35652@@ -7504,7 +7503,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
35653 cleanup_pending:
35654 atomic_dec(&intel_crtc->unpin_work_count);
35655 crtc->fb = old_fb;
35656- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
35657+ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
35658 drm_gem_object_unreference(&work->old_fb_obj->base);
35659 drm_gem_object_unreference(&obj->base);
35660 mutex_unlock(&dev->struct_mutex);
35661@@ -8846,13 +8845,13 @@ struct intel_quirk {
35662 int subsystem_vendor;
35663 int subsystem_device;
35664 void (*hook)(struct drm_device *dev);
35665-};
35666+} __do_const;
35667
35668 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
35669 struct intel_dmi_quirk {
35670 void (*hook)(struct drm_device *dev);
35671 const struct dmi_system_id (*dmi_id_list)[];
35672-};
35673+} __do_const;
35674
35675 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
35676 {
35677@@ -8860,18 +8859,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
35678 return 1;
35679 }
35680
35681+static const struct dmi_system_id intel_dmi_quirks_table[] = {
35682+ {
35683+ .callback = intel_dmi_reverse_brightness,
35684+ .ident = "NCR Corporation",
35685+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
35686+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
35687+ },
35688+ },
35689+ { } /* terminating entry */
35690+};
35691+
35692 static const struct intel_dmi_quirk intel_dmi_quirks[] = {
35693 {
35694- .dmi_id_list = &(const struct dmi_system_id[]) {
35695- {
35696- .callback = intel_dmi_reverse_brightness,
35697- .ident = "NCR Corporation",
35698- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
35699- DMI_MATCH(DMI_PRODUCT_NAME, ""),
35700- },
35701- },
35702- { } /* terminating entry */
35703- },
35704+ .dmi_id_list = &intel_dmi_quirks_table,
35705 .hook = quirk_invert_brightness,
35706 },
35707 };
35708diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
35709index 54558a0..2d97005 100644
35710--- a/drivers/gpu/drm/mga/mga_drv.h
35711+++ b/drivers/gpu/drm/mga/mga_drv.h
35712@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
35713 u32 clear_cmd;
35714 u32 maccess;
35715
35716- atomic_t vbl_received; /**< Number of vblanks received. */
35717+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
35718 wait_queue_head_t fence_queue;
35719- atomic_t last_fence_retired;
35720+ atomic_unchecked_t last_fence_retired;
35721 u32 next_fence_to_post;
35722
35723 unsigned int fb_cpp;
35724diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
35725index 709e90d..89a1c0d 100644
35726--- a/drivers/gpu/drm/mga/mga_ioc32.c
35727+++ b/drivers/gpu/drm/mga/mga_ioc32.c
35728@@ -189,7 +189,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
35729 return 0;
35730 }
35731
35732-drm_ioctl_compat_t *mga_compat_ioctls[] = {
35733+drm_ioctl_compat_t mga_compat_ioctls[] = {
35734 [DRM_MGA_INIT] = compat_mga_init,
35735 [DRM_MGA_GETPARAM] = compat_mga_getparam,
35736 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
35737@@ -207,18 +207,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
35738 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35739 {
35740 unsigned int nr = DRM_IOCTL_NR(cmd);
35741- drm_ioctl_compat_t *fn = NULL;
35742 int ret;
35743
35744 if (nr < DRM_COMMAND_BASE)
35745 return drm_compat_ioctl(filp, cmd, arg);
35746
35747- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
35748- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
35749-
35750- if (fn != NULL)
35751+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) {
35752+ drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
35753 ret = (*fn) (filp, cmd, arg);
35754- else
35755+ } else
35756 ret = drm_ioctl(filp, cmd, arg);
35757
35758 return ret;
35759diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
35760index 598c281..60d590e 100644
35761--- a/drivers/gpu/drm/mga/mga_irq.c
35762+++ b/drivers/gpu/drm/mga/mga_irq.c
35763@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
35764 if (crtc != 0)
35765 return 0;
35766
35767- return atomic_read(&dev_priv->vbl_received);
35768+ return atomic_read_unchecked(&dev_priv->vbl_received);
35769 }
35770
35771
35772@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
35773 /* VBLANK interrupt */
35774 if (status & MGA_VLINEPEN) {
35775 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
35776- atomic_inc(&dev_priv->vbl_received);
35777+ atomic_inc_unchecked(&dev_priv->vbl_received);
35778 drm_handle_vblank(dev, 0);
35779 handled = 1;
35780 }
35781@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
35782 if ((prim_start & ~0x03) != (prim_end & ~0x03))
35783 MGA_WRITE(MGA_PRIMEND, prim_end);
35784
35785- atomic_inc(&dev_priv->last_fence_retired);
35786+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
35787 DRM_WAKEUP(&dev_priv->fence_queue);
35788 handled = 1;
35789 }
35790@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
35791 * using fences.
35792 */
35793 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
35794- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
35795+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
35796 - *sequence) <= (1 << 23)));
35797
35798 *sequence = cur_fence;
35799diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
35800index 4f50c40..2e7c949 100644
35801--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
35802+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
35803@@ -751,8 +751,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
35804 int i;
35805 unsigned char misc = 0;
35806 unsigned char ext_vga[6];
35807- unsigned char ext_vga_index24;
35808- unsigned char dac_index90 = 0;
35809 u8 bppshift;
35810
35811 static unsigned char dacvalue[] = {
35812@@ -803,7 +801,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
35813 option2 = 0x0000b000;
35814 break;
35815 case G200_ER:
35816- dac_index90 = 0;
35817 break;
35818 }
35819
35820@@ -852,10 +849,8 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
35821 WREG_DAC(i, dacvalue[i]);
35822 }
35823
35824- if (mdev->type == G200_ER) {
35825- WREG_DAC(0x90, dac_index90);
35826- }
35827-
35828+ if (mdev->type == G200_ER)
35829+ WREG_DAC(0x90, 0);
35830
35831 if (option)
35832 pci_write_config_dword(dev->pdev, PCI_MGA_OPTION, option);
35833@@ -952,8 +947,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
35834 if (mdev->type == G200_WB)
35835 ext_vga[1] |= 0x88;
35836
35837- ext_vga_index24 = 0x05;
35838-
35839 /* Set pixel clocks */
35840 misc = 0x2d;
35841 WREG8(MGA_MISC_OUT, misc);
35842@@ -965,7 +958,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
35843 }
35844
35845 if (mdev->type == G200_ER)
35846- WREG_ECRT(24, ext_vga_index24);
35847+ WREG_ECRT(0x24, 0x5);
35848
35849 if (mdev->type == G200_EV) {
35850 WREG_ECRT(6, 0);
35851diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
35852index 865eddf..62c4cc3 100644
35853--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
35854+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
35855@@ -1015,7 +1015,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
35856 struct bit_table {
35857 const char id;
35858 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
35859-};
35860+} __no_const;
35861
35862 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
35863
35864diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
35865index aa89eb9..d45d38b 100644
35866--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
35867+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
35868@@ -80,7 +80,7 @@ struct nouveau_drm {
35869 struct drm_global_reference mem_global_ref;
35870 struct ttm_bo_global_ref bo_global_ref;
35871 struct ttm_bo_device bdev;
35872- atomic_t validate_sequence;
35873+ atomic_unchecked_t validate_sequence;
35874 int (*move)(struct nouveau_channel *,
35875 struct ttm_buffer_object *,
35876 struct ttm_mem_reg *, struct ttm_mem_reg *);
35877diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
35878index cdb83ac..27f0a16 100644
35879--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
35880+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
35881@@ -43,7 +43,7 @@ struct nouveau_fence_priv {
35882 int (*sync)(struct nouveau_fence *, struct nouveau_channel *,
35883 struct nouveau_channel *);
35884 u32 (*read)(struct nouveau_channel *);
35885-};
35886+} __no_const;
35887
35888 #define nouveau_fence(drm) ((struct nouveau_fence_priv *)(drm)->fence)
35889
35890diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
35891index 8bf695c..9fbc90a 100644
35892--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
35893+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
35894@@ -321,7 +321,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
35895 int trycnt = 0;
35896 int ret, i;
35897
35898- sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
35899+ sequence = atomic_add_return_unchecked(1, &drm->ttm.validate_sequence);
35900 retry:
35901 if (++trycnt > 100000) {
35902 NV_ERROR(drm, "%s failed and gave up.\n", __func__);
35903diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
35904index 08214bc..9208577 100644
35905--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
35906+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
35907@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
35908 unsigned long arg)
35909 {
35910 unsigned int nr = DRM_IOCTL_NR(cmd);
35911- drm_ioctl_compat_t *fn = NULL;
35912+ drm_ioctl_compat_t fn = NULL;
35913 int ret;
35914
35915 if (nr < DRM_COMMAND_BASE)
35916diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
35917index 25d3495..d81aaf6 100644
35918--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
35919+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
35920@@ -62,7 +62,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
35921 bool can_switch;
35922
35923 spin_lock(&dev->count_lock);
35924- can_switch = (dev->open_count == 0);
35925+ can_switch = (local_read(&dev->open_count) == 0);
35926 spin_unlock(&dev->count_lock);
35927 return can_switch;
35928 }
35929diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
35930index d4660cf..70dbe65 100644
35931--- a/drivers/gpu/drm/r128/r128_cce.c
35932+++ b/drivers/gpu/drm/r128/r128_cce.c
35933@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
35934
35935 /* GH: Simple idle check.
35936 */
35937- atomic_set(&dev_priv->idle_count, 0);
35938+ atomic_set_unchecked(&dev_priv->idle_count, 0);
35939
35940 /* We don't support anything other than bus-mastering ring mode,
35941 * but the ring can be in either AGP or PCI space for the ring
35942diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
35943index 930c71b..499aded 100644
35944--- a/drivers/gpu/drm/r128/r128_drv.h
35945+++ b/drivers/gpu/drm/r128/r128_drv.h
35946@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
35947 int is_pci;
35948 unsigned long cce_buffers_offset;
35949
35950- atomic_t idle_count;
35951+ atomic_unchecked_t idle_count;
35952
35953 int page_flipping;
35954 int current_page;
35955 u32 crtc_offset;
35956 u32 crtc_offset_cntl;
35957
35958- atomic_t vbl_received;
35959+ atomic_unchecked_t vbl_received;
35960
35961 u32 color_fmt;
35962 unsigned int front_offset;
35963diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
35964index a954c54..9cc595c 100644
35965--- a/drivers/gpu/drm/r128/r128_ioc32.c
35966+++ b/drivers/gpu/drm/r128/r128_ioc32.c
35967@@ -177,7 +177,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
35968 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
35969 }
35970
35971-drm_ioctl_compat_t *r128_compat_ioctls[] = {
35972+drm_ioctl_compat_t r128_compat_ioctls[] = {
35973 [DRM_R128_INIT] = compat_r128_init,
35974 [DRM_R128_DEPTH] = compat_r128_depth,
35975 [DRM_R128_STIPPLE] = compat_r128_stipple,
35976@@ -196,18 +196,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
35977 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35978 {
35979 unsigned int nr = DRM_IOCTL_NR(cmd);
35980- drm_ioctl_compat_t *fn = NULL;
35981 int ret;
35982
35983 if (nr < DRM_COMMAND_BASE)
35984 return drm_compat_ioctl(filp, cmd, arg);
35985
35986- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls))
35987- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
35988-
35989- if (fn != NULL)
35990+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls)) {
35991+ drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
35992 ret = (*fn) (filp, cmd, arg);
35993- else
35994+ } else
35995 ret = drm_ioctl(filp, cmd, arg);
35996
35997 return ret;
35998diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
35999index 2ea4f09..d391371 100644
36000--- a/drivers/gpu/drm/r128/r128_irq.c
36001+++ b/drivers/gpu/drm/r128/r128_irq.c
36002@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
36003 if (crtc != 0)
36004 return 0;
36005
36006- return atomic_read(&dev_priv->vbl_received);
36007+ return atomic_read_unchecked(&dev_priv->vbl_received);
36008 }
36009
36010 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
36011@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
36012 /* VBLANK interrupt */
36013 if (status & R128_CRTC_VBLANK_INT) {
36014 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
36015- atomic_inc(&dev_priv->vbl_received);
36016+ atomic_inc_unchecked(&dev_priv->vbl_received);
36017 drm_handle_vblank(dev, 0);
36018 return IRQ_HANDLED;
36019 }
36020diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
36021index 19bb7e6..de7e2a2 100644
36022--- a/drivers/gpu/drm/r128/r128_state.c
36023+++ b/drivers/gpu/drm/r128/r128_state.c
36024@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
36025
36026 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
36027 {
36028- if (atomic_read(&dev_priv->idle_count) == 0)
36029+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
36030 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
36031 else
36032- atomic_set(&dev_priv->idle_count, 0);
36033+ atomic_set_unchecked(&dev_priv->idle_count, 0);
36034 }
36035
36036 #endif
36037diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
36038index 5a82b6b..9e69c73 100644
36039--- a/drivers/gpu/drm/radeon/mkregtable.c
36040+++ b/drivers/gpu/drm/radeon/mkregtable.c
36041@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
36042 regex_t mask_rex;
36043 regmatch_t match[4];
36044 char buf[1024];
36045- size_t end;
36046+ long end;
36047 int len;
36048 int done = 0;
36049 int r;
36050 unsigned o;
36051 struct offset *offset;
36052 char last_reg_s[10];
36053- int last_reg;
36054+ unsigned long last_reg;
36055
36056 if (regcomp
36057 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
36058diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
36059index 0d6562b..a154330 100644
36060--- a/drivers/gpu/drm/radeon/radeon_device.c
36061+++ b/drivers/gpu/drm/radeon/radeon_device.c
36062@@ -969,7 +969,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
36063 bool can_switch;
36064
36065 spin_lock(&dev->count_lock);
36066- can_switch = (dev->open_count == 0);
36067+ can_switch = (local_read(&dev->open_count) == 0);
36068 spin_unlock(&dev->count_lock);
36069 return can_switch;
36070 }
36071diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
36072index e7fdf16..f4f6490 100644
36073--- a/drivers/gpu/drm/radeon/radeon_drv.h
36074+++ b/drivers/gpu/drm/radeon/radeon_drv.h
36075@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
36076
36077 /* SW interrupt */
36078 wait_queue_head_t swi_queue;
36079- atomic_t swi_emitted;
36080+ atomic_unchecked_t swi_emitted;
36081 int vblank_crtc;
36082 uint32_t irq_enable_reg;
36083 uint32_t r500_disp_irq_reg;
36084diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
36085index c180df8..5fd8186 100644
36086--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
36087+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
36088@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
36089 request = compat_alloc_user_space(sizeof(*request));
36090 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
36091 || __put_user(req32.param, &request->param)
36092- || __put_user((void __user *)(unsigned long)req32.value,
36093+ || __put_user((unsigned long)req32.value,
36094 &request->value))
36095 return -EFAULT;
36096
36097@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
36098 #define compat_radeon_cp_setparam NULL
36099 #endif /* X86_64 || IA64 */
36100
36101-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
36102+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
36103 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
36104 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
36105 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
36106@@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
36107 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
36108 {
36109 unsigned int nr = DRM_IOCTL_NR(cmd);
36110- drm_ioctl_compat_t *fn = NULL;
36111 int ret;
36112
36113 if (nr < DRM_COMMAND_BASE)
36114 return drm_compat_ioctl(filp, cmd, arg);
36115
36116- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
36117- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
36118-
36119- if (fn != NULL)
36120+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls)) {
36121+ drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
36122 ret = (*fn) (filp, cmd, arg);
36123- else
36124+ } else
36125 ret = drm_ioctl(filp, cmd, arg);
36126
36127 return ret;
36128diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
36129index e771033..a0bc6b3 100644
36130--- a/drivers/gpu/drm/radeon/radeon_irq.c
36131+++ b/drivers/gpu/drm/radeon/radeon_irq.c
36132@@ -224,8 +224,8 @@ static int radeon_emit_irq(struct drm_device * dev)
36133 unsigned int ret;
36134 RING_LOCALS;
36135
36136- atomic_inc(&dev_priv->swi_emitted);
36137- ret = atomic_read(&dev_priv->swi_emitted);
36138+ atomic_inc_unchecked(&dev_priv->swi_emitted);
36139+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
36140
36141 BEGIN_RING(4);
36142 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
36143@@ -351,7 +351,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
36144 drm_radeon_private_t *dev_priv =
36145 (drm_radeon_private_t *) dev->dev_private;
36146
36147- atomic_set(&dev_priv->swi_emitted, 0);
36148+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
36149 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
36150
36151 dev->max_vblank_count = 0x001fffff;
36152diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
36153index 8e9057b..af6dacb 100644
36154--- a/drivers/gpu/drm/radeon/radeon_state.c
36155+++ b/drivers/gpu/drm/radeon/radeon_state.c
36156@@ -2166,7 +2166,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
36157 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
36158 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
36159
36160- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
36161+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
36162 sarea_priv->nbox * sizeof(depth_boxes[0])))
36163 return -EFAULT;
36164
36165@@ -3029,7 +3029,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
36166 {
36167 drm_radeon_private_t *dev_priv = dev->dev_private;
36168 drm_radeon_getparam_t *param = data;
36169- int value;
36170+ int value = 0;
36171
36172 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
36173
36174diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
36175index 93f760e..8088227 100644
36176--- a/drivers/gpu/drm/radeon/radeon_ttm.c
36177+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
36178@@ -782,7 +782,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
36179 man->size = size >> PAGE_SHIFT;
36180 }
36181
36182-static struct vm_operations_struct radeon_ttm_vm_ops;
36183+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
36184 static const struct vm_operations_struct *ttm_vm_ops = NULL;
36185
36186 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
36187@@ -823,8 +823,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
36188 }
36189 if (unlikely(ttm_vm_ops == NULL)) {
36190 ttm_vm_ops = vma->vm_ops;
36191+ pax_open_kernel();
36192 radeon_ttm_vm_ops = *ttm_vm_ops;
36193 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
36194+ pax_close_kernel();
36195 }
36196 vma->vm_ops = &radeon_ttm_vm_ops;
36197 return 0;
36198@@ -862,28 +864,33 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
36199 sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
36200 else
36201 sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
36202- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
36203- radeon_mem_types_list[i].show = &radeon_mm_dump_table;
36204- radeon_mem_types_list[i].driver_features = 0;
36205+ pax_open_kernel();
36206+ *(const char **)&radeon_mem_types_list[i].name = radeon_mem_types_names[i];
36207+ *(void **)&radeon_mem_types_list[i].show = &radeon_mm_dump_table;
36208+ *(u32 *)&radeon_mem_types_list[i].driver_features = 0;
36209 if (i == 0)
36210- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
36211+ *(void **)&radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
36212 else
36213- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
36214-
36215+ *(void **)&radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
36216+ pax_close_kernel();
36217 }
36218 /* Add ttm page pool to debugfs */
36219 sprintf(radeon_mem_types_names[i], "ttm_page_pool");
36220- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
36221- radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
36222- radeon_mem_types_list[i].driver_features = 0;
36223- radeon_mem_types_list[i++].data = NULL;
36224+ pax_open_kernel();
36225+ *(const char **)&radeon_mem_types_list[i].name = radeon_mem_types_names[i];
36226+ *(void **)&radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
36227+ *(u32 *)&radeon_mem_types_list[i].driver_features = 0;
36228+ *(void **)&radeon_mem_types_list[i++].data = NULL;
36229+ pax_close_kernel();
36230 #ifdef CONFIG_SWIOTLB
36231 if (swiotlb_nr_tbl()) {
36232 sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
36233- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
36234- radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
36235- radeon_mem_types_list[i].driver_features = 0;
36236- radeon_mem_types_list[i++].data = NULL;
36237+ pax_open_kernel();
36238+ *(const char **)&radeon_mem_types_list[i].name = radeon_mem_types_names[i];
36239+ *(void **)&radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
36240+ *(u32 *)&radeon_mem_types_list[i].driver_features = 0;
36241+ *(void **)&radeon_mem_types_list[i++].data = NULL;
36242+ pax_close_kernel();
36243 }
36244 #endif
36245 return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
36246diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
36247index 5706d2a..17aedaa 100644
36248--- a/drivers/gpu/drm/radeon/rs690.c
36249+++ b/drivers/gpu/drm/radeon/rs690.c
36250@@ -304,9 +304,11 @@ static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
36251 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
36252 rdev->pm.sideport_bandwidth.full)
36253 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
36254- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
36255+ read_delay_latency.full = dfixed_const(800 * 1000);
36256 read_delay_latency.full = dfixed_div(read_delay_latency,
36257 rdev->pm.igp_sideport_mclk);
36258+ a.full = dfixed_const(370);
36259+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
36260 } else {
36261 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
36262 rdev->pm.k8_bandwidth.full)
36263diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
36264index bd2a3b4..122d9ad 100644
36265--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
36266+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
36267@@ -394,9 +394,9 @@ static int ttm_pool_get_num_unused_pages(void)
36268 static int ttm_pool_mm_shrink(struct shrinker *shrink,
36269 struct shrink_control *sc)
36270 {
36271- static atomic_t start_pool = ATOMIC_INIT(0);
36272+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
36273 unsigned i;
36274- unsigned pool_offset = atomic_add_return(1, &start_pool);
36275+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
36276 struct ttm_page_pool *pool;
36277 int shrink_pages = sc->nr_to_scan;
36278
36279diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
36280index 1eb060c..188b1fc 100644
36281--- a/drivers/gpu/drm/udl/udl_fb.c
36282+++ b/drivers/gpu/drm/udl/udl_fb.c
36283@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
36284 fb_deferred_io_cleanup(info);
36285 kfree(info->fbdefio);
36286 info->fbdefio = NULL;
36287- info->fbops->fb_mmap = udl_fb_mmap;
36288 }
36289
36290 pr_warn("released /dev/fb%d user=%d count=%d\n",
36291diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
36292index 893a650..6190d3b 100644
36293--- a/drivers/gpu/drm/via/via_drv.h
36294+++ b/drivers/gpu/drm/via/via_drv.h
36295@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
36296 typedef uint32_t maskarray_t[5];
36297
36298 typedef struct drm_via_irq {
36299- atomic_t irq_received;
36300+ atomic_unchecked_t irq_received;
36301 uint32_t pending_mask;
36302 uint32_t enable_mask;
36303 wait_queue_head_t irq_queue;
36304@@ -75,7 +75,7 @@ typedef struct drm_via_private {
36305 struct timeval last_vblank;
36306 int last_vblank_valid;
36307 unsigned usec_per_vblank;
36308- atomic_t vbl_received;
36309+ atomic_unchecked_t vbl_received;
36310 drm_via_state_t hc_state;
36311 char pci_buf[VIA_PCI_BUF_SIZE];
36312 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
36313diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
36314index ac98964..5dbf512 100644
36315--- a/drivers/gpu/drm/via/via_irq.c
36316+++ b/drivers/gpu/drm/via/via_irq.c
36317@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
36318 if (crtc != 0)
36319 return 0;
36320
36321- return atomic_read(&dev_priv->vbl_received);
36322+ return atomic_read_unchecked(&dev_priv->vbl_received);
36323 }
36324
36325 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
36326@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
36327
36328 status = VIA_READ(VIA_REG_INTERRUPT);
36329 if (status & VIA_IRQ_VBLANK_PENDING) {
36330- atomic_inc(&dev_priv->vbl_received);
36331- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
36332+ atomic_inc_unchecked(&dev_priv->vbl_received);
36333+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
36334 do_gettimeofday(&cur_vblank);
36335 if (dev_priv->last_vblank_valid) {
36336 dev_priv->usec_per_vblank =
36337@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
36338 dev_priv->last_vblank = cur_vblank;
36339 dev_priv->last_vblank_valid = 1;
36340 }
36341- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
36342+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
36343 DRM_DEBUG("US per vblank is: %u\n",
36344 dev_priv->usec_per_vblank);
36345 }
36346@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
36347
36348 for (i = 0; i < dev_priv->num_irqs; ++i) {
36349 if (status & cur_irq->pending_mask) {
36350- atomic_inc(&cur_irq->irq_received);
36351+ atomic_inc_unchecked(&cur_irq->irq_received);
36352 DRM_WAKEUP(&cur_irq->irq_queue);
36353 handled = 1;
36354 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
36355@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
36356 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
36357 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
36358 masks[irq][4]));
36359- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
36360+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
36361 } else {
36362 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
36363 (((cur_irq_sequence =
36364- atomic_read(&cur_irq->irq_received)) -
36365+ atomic_read_unchecked(&cur_irq->irq_received)) -
36366 *sequence) <= (1 << 23)));
36367 }
36368 *sequence = cur_irq_sequence;
36369@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
36370 }
36371
36372 for (i = 0; i < dev_priv->num_irqs; ++i) {
36373- atomic_set(&cur_irq->irq_received, 0);
36374+ atomic_set_unchecked(&cur_irq->irq_received, 0);
36375 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
36376 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
36377 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
36378@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
36379 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
36380 case VIA_IRQ_RELATIVE:
36381 irqwait->request.sequence +=
36382- atomic_read(&cur_irq->irq_received);
36383+ atomic_read_unchecked(&cur_irq->irq_received);
36384 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
36385 case VIA_IRQ_ABSOLUTE:
36386 break;
36387diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
36388index 13aeda7..4a952d1 100644
36389--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
36390+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
36391@@ -290,7 +290,7 @@ struct vmw_private {
36392 * Fencing and IRQs.
36393 */
36394
36395- atomic_t marker_seq;
36396+ atomic_unchecked_t marker_seq;
36397 wait_queue_head_t fence_queue;
36398 wait_queue_head_t fifo_queue;
36399 int fence_queue_waiters; /* Protected by hw_mutex */
36400diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
36401index 3eb1486..0a47ee9 100644
36402--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
36403+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
36404@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
36405 (unsigned int) min,
36406 (unsigned int) fifo->capabilities);
36407
36408- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
36409+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
36410 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
36411 vmw_marker_queue_init(&fifo->marker_queue);
36412 return vmw_fifo_send_fence(dev_priv, &dummy);
36413@@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
36414 if (reserveable)
36415 iowrite32(bytes, fifo_mem +
36416 SVGA_FIFO_RESERVED);
36417- return fifo_mem + (next_cmd >> 2);
36418+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
36419 } else {
36420 need_bounce = true;
36421 }
36422@@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
36423
36424 fm = vmw_fifo_reserve(dev_priv, bytes);
36425 if (unlikely(fm == NULL)) {
36426- *seqno = atomic_read(&dev_priv->marker_seq);
36427+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
36428 ret = -ENOMEM;
36429 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
36430 false, 3*HZ);
36431@@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
36432 }
36433
36434 do {
36435- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
36436+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
36437 } while (*seqno == 0);
36438
36439 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
36440diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
36441index 4640adb..e1384ed 100644
36442--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
36443+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
36444@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
36445 * emitted. Then the fence is stale and signaled.
36446 */
36447
36448- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
36449+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
36450 > VMW_FENCE_WRAP);
36451
36452 return ret;
36453@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
36454
36455 if (fifo_idle)
36456 down_read(&fifo_state->rwsem);
36457- signal_seq = atomic_read(&dev_priv->marker_seq);
36458+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
36459 ret = 0;
36460
36461 for (;;) {
36462diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
36463index 8a8725c..afed796 100644
36464--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
36465+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
36466@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
36467 while (!vmw_lag_lt(queue, us)) {
36468 spin_lock(&queue->lock);
36469 if (list_empty(&queue->head))
36470- seqno = atomic_read(&dev_priv->marker_seq);
36471+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
36472 else {
36473 marker = list_first_entry(&queue->head,
36474 struct vmw_marker, head);
36475diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
36476index ceb3040..6160c5c 100644
36477--- a/drivers/hid/hid-core.c
36478+++ b/drivers/hid/hid-core.c
36479@@ -2242,7 +2242,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
36480
36481 int hid_add_device(struct hid_device *hdev)
36482 {
36483- static atomic_t id = ATOMIC_INIT(0);
36484+ static atomic_unchecked_t id = ATOMIC_INIT(0);
36485 int ret;
36486
36487 if (WARN_ON(hdev->status & HID_STAT_ADDED))
36488@@ -2276,7 +2276,7 @@ int hid_add_device(struct hid_device *hdev)
36489 /* XXX hack, any other cleaner solution after the driver core
36490 * is converted to allow more than 20 bytes as the device name? */
36491 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
36492- hdev->vendor, hdev->product, atomic_inc_return(&id));
36493+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
36494
36495 hid_debug_register(hdev, dev_name(&hdev->dev));
36496 ret = device_add(&hdev->dev);
36497diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
36498index eec3291..8ed706b 100644
36499--- a/drivers/hid/hid-wiimote-debug.c
36500+++ b/drivers/hid/hid-wiimote-debug.c
36501@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
36502 else if (size == 0)
36503 return -EIO;
36504
36505- if (copy_to_user(u, buf, size))
36506+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
36507 return -EFAULT;
36508
36509 *off += size;
36510diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
36511index 773a2f2..7ce08bc 100644
36512--- a/drivers/hv/channel.c
36513+++ b/drivers/hv/channel.c
36514@@ -394,8 +394,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
36515 int ret = 0;
36516 int t;
36517
36518- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
36519- atomic_inc(&vmbus_connection.next_gpadl_handle);
36520+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
36521+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
36522
36523 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
36524 if (ret)
36525diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
36526index 3648f8f..30ef30d 100644
36527--- a/drivers/hv/hv.c
36528+++ b/drivers/hv/hv.c
36529@@ -111,7 +111,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
36530 u64 output_address = (output) ? virt_to_phys(output) : 0;
36531 u32 output_address_hi = output_address >> 32;
36532 u32 output_address_lo = output_address & 0xFFFFFFFF;
36533- void *hypercall_page = hv_context.hypercall_page;
36534+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
36535
36536 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
36537 "=a"(hv_status_lo) : "d" (control_hi),
36538diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
36539index d8d1fad..b91caf7 100644
36540--- a/drivers/hv/hyperv_vmbus.h
36541+++ b/drivers/hv/hyperv_vmbus.h
36542@@ -594,7 +594,7 @@ enum vmbus_connect_state {
36543 struct vmbus_connection {
36544 enum vmbus_connect_state conn_state;
36545
36546- atomic_t next_gpadl_handle;
36547+ atomic_unchecked_t next_gpadl_handle;
36548
36549 /*
36550 * Represents channel interrupts. Each bit position represents a
36551diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
36552index 8e1a9ec..4687821 100644
36553--- a/drivers/hv/vmbus_drv.c
36554+++ b/drivers/hv/vmbus_drv.c
36555@@ -629,10 +629,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
36556 {
36557 int ret = 0;
36558
36559- static atomic_t device_num = ATOMIC_INIT(0);
36560+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
36561
36562 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
36563- atomic_inc_return(&device_num));
36564+ atomic_inc_return_unchecked(&device_num));
36565
36566 child_device_obj->device.bus = &hv_bus;
36567 child_device_obj->device.parent = &hv_acpi_dev->dev;
36568diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
36569index 1672e2a..4a6297c 100644
36570--- a/drivers/hwmon/acpi_power_meter.c
36571+++ b/drivers/hwmon/acpi_power_meter.c
36572@@ -117,7 +117,7 @@ struct sensor_template {
36573 struct device_attribute *devattr,
36574 const char *buf, size_t count);
36575 int index;
36576-};
36577+} __do_const;
36578
36579 /* Averaging interval */
36580 static int update_avg_interval(struct acpi_power_meter_resource *resource)
36581@@ -629,7 +629,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
36582 struct sensor_template *attrs)
36583 {
36584 struct device *dev = &resource->acpi_dev->dev;
36585- struct sensor_device_attribute *sensors =
36586+ sensor_device_attribute_no_const *sensors =
36587 &resource->sensors[resource->num_sensors];
36588 int res = 0;
36589
36590diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
36591index b41baff..4953e4d 100644
36592--- a/drivers/hwmon/applesmc.c
36593+++ b/drivers/hwmon/applesmc.c
36594@@ -1084,7 +1084,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
36595 {
36596 struct applesmc_node_group *grp;
36597 struct applesmc_dev_attr *node;
36598- struct attribute *attr;
36599+ attribute_no_const *attr;
36600 int ret, i;
36601
36602 for (grp = groups; grp->format; grp++) {
36603diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
36604index 56dbcfb..9874bf1 100644
36605--- a/drivers/hwmon/asus_atk0110.c
36606+++ b/drivers/hwmon/asus_atk0110.c
36607@@ -152,10 +152,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
36608 struct atk_sensor_data {
36609 struct list_head list;
36610 struct atk_data *data;
36611- struct device_attribute label_attr;
36612- struct device_attribute input_attr;
36613- struct device_attribute limit1_attr;
36614- struct device_attribute limit2_attr;
36615+ device_attribute_no_const label_attr;
36616+ device_attribute_no_const input_attr;
36617+ device_attribute_no_const limit1_attr;
36618+ device_attribute_no_const limit2_attr;
36619 char label_attr_name[ATTR_NAME_SIZE];
36620 char input_attr_name[ATTR_NAME_SIZE];
36621 char limit1_attr_name[ATTR_NAME_SIZE];
36622@@ -275,7 +275,7 @@ static ssize_t atk_name_show(struct device *dev,
36623 static struct device_attribute atk_name_attr =
36624 __ATTR(name, 0444, atk_name_show, NULL);
36625
36626-static void atk_init_attribute(struct device_attribute *attr, char *name,
36627+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
36628 sysfs_show_func show)
36629 {
36630 sysfs_attr_init(&attr->attr);
36631diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
36632index d64923d..72591e8 100644
36633--- a/drivers/hwmon/coretemp.c
36634+++ b/drivers/hwmon/coretemp.c
36635@@ -790,7 +790,7 @@ static int __cpuinit coretemp_cpu_callback(struct notifier_block *nfb,
36636 return NOTIFY_OK;
36637 }
36638
36639-static struct notifier_block coretemp_cpu_notifier __refdata = {
36640+static struct notifier_block coretemp_cpu_notifier = {
36641 .notifier_call = coretemp_cpu_callback,
36642 };
36643
36644diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
36645index a14f634..2916ee2 100644
36646--- a/drivers/hwmon/ibmaem.c
36647+++ b/drivers/hwmon/ibmaem.c
36648@@ -925,7 +925,7 @@ static int aem_register_sensors(struct aem_data *data,
36649 struct aem_rw_sensor_template *rw)
36650 {
36651 struct device *dev = &data->pdev->dev;
36652- struct sensor_device_attribute *sensors = data->sensors;
36653+ sensor_device_attribute_no_const *sensors = data->sensors;
36654 int err;
36655
36656 /* Set up read-only sensors */
36657diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
36658index 7d19b1b..8fdaaac 100644
36659--- a/drivers/hwmon/pmbus/pmbus_core.c
36660+++ b/drivers/hwmon/pmbus/pmbus_core.c
36661@@ -811,7 +811,7 @@ static ssize_t pmbus_show_label(struct device *dev,
36662
36663 #define PMBUS_ADD_ATTR(data, _name, _idx, _mode, _type, _show, _set) \
36664 do { \
36665- struct sensor_device_attribute *a \
36666+ sensor_device_attribute_no_const *a \
36667 = &data->_type##s[data->num_##_type##s].attribute; \
36668 BUG_ON(data->num_attributes >= data->max_attributes); \
36669 sysfs_attr_init(&a->dev_attr.attr); \
36670diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
36671index 8047fed..1e956f0 100644
36672--- a/drivers/hwmon/sht15.c
36673+++ b/drivers/hwmon/sht15.c
36674@@ -169,7 +169,7 @@ struct sht15_data {
36675 int supply_uV;
36676 bool supply_uV_valid;
36677 struct work_struct update_supply_work;
36678- atomic_t interrupt_handled;
36679+ atomic_unchecked_t interrupt_handled;
36680 };
36681
36682 /**
36683@@ -512,13 +512,13 @@ static int sht15_measurement(struct sht15_data *data,
36684 return ret;
36685
36686 gpio_direction_input(data->pdata->gpio_data);
36687- atomic_set(&data->interrupt_handled, 0);
36688+ atomic_set_unchecked(&data->interrupt_handled, 0);
36689
36690 enable_irq(gpio_to_irq(data->pdata->gpio_data));
36691 if (gpio_get_value(data->pdata->gpio_data) == 0) {
36692 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
36693 /* Only relevant if the interrupt hasn't occurred. */
36694- if (!atomic_read(&data->interrupt_handled))
36695+ if (!atomic_read_unchecked(&data->interrupt_handled))
36696 schedule_work(&data->read_work);
36697 }
36698 ret = wait_event_timeout(data->wait_queue,
36699@@ -785,7 +785,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
36700
36701 /* First disable the interrupt */
36702 disable_irq_nosync(irq);
36703- atomic_inc(&data->interrupt_handled);
36704+ atomic_inc_unchecked(&data->interrupt_handled);
36705 /* Then schedule a reading work struct */
36706 if (data->state != SHT15_READING_NOTHING)
36707 schedule_work(&data->read_work);
36708@@ -807,11 +807,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
36709 * If not, then start the interrupt again - care here as could
36710 * have gone low in meantime so verify it hasn't!
36711 */
36712- atomic_set(&data->interrupt_handled, 0);
36713+ atomic_set_unchecked(&data->interrupt_handled, 0);
36714 enable_irq(gpio_to_irq(data->pdata->gpio_data));
36715 /* If still not occurred or another handler was scheduled */
36716 if (gpio_get_value(data->pdata->gpio_data)
36717- || atomic_read(&data->interrupt_handled))
36718+ || atomic_read_unchecked(&data->interrupt_handled))
36719 return;
36720 }
36721
36722diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
36723index 76f157b..9c0db1b 100644
36724--- a/drivers/hwmon/via-cputemp.c
36725+++ b/drivers/hwmon/via-cputemp.c
36726@@ -296,7 +296,7 @@ static int __cpuinit via_cputemp_cpu_callback(struct notifier_block *nfb,
36727 return NOTIFY_OK;
36728 }
36729
36730-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
36731+static struct notifier_block via_cputemp_cpu_notifier = {
36732 .notifier_call = via_cputemp_cpu_callback,
36733 };
36734
36735diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
36736index 378fcb5..5e91fa8 100644
36737--- a/drivers/i2c/busses/i2c-amd756-s4882.c
36738+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
36739@@ -43,7 +43,7 @@
36740 extern struct i2c_adapter amd756_smbus;
36741
36742 static struct i2c_adapter *s4882_adapter;
36743-static struct i2c_algorithm *s4882_algo;
36744+static i2c_algorithm_no_const *s4882_algo;
36745
36746 /* Wrapper access functions for multiplexed SMBus */
36747 static DEFINE_MUTEX(amd756_lock);
36748diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
36749index 29015eb..af2d8e9 100644
36750--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
36751+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
36752@@ -41,7 +41,7 @@
36753 extern struct i2c_adapter *nforce2_smbus;
36754
36755 static struct i2c_adapter *s4985_adapter;
36756-static struct i2c_algorithm *s4985_algo;
36757+static i2c_algorithm_no_const *s4985_algo;
36758
36759 /* Wrapper access functions for multiplexed SMBus */
36760 static DEFINE_MUTEX(nforce2_lock);
36761diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
36762index 8126824..55a2798 100644
36763--- a/drivers/ide/ide-cd.c
36764+++ b/drivers/ide/ide-cd.c
36765@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
36766 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
36767 if ((unsigned long)buf & alignment
36768 || blk_rq_bytes(rq) & q->dma_pad_mask
36769- || object_is_on_stack(buf))
36770+ || object_starts_on_stack(buf))
36771 drive->dma = 0;
36772 }
36773 }
36774diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
36775index 8848f16..f8e6dd8 100644
36776--- a/drivers/iio/industrialio-core.c
36777+++ b/drivers/iio/industrialio-core.c
36778@@ -506,7 +506,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
36779 }
36780
36781 static
36782-int __iio_device_attr_init(struct device_attribute *dev_attr,
36783+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
36784 const char *postfix,
36785 struct iio_chan_spec const *chan,
36786 ssize_t (*readfunc)(struct device *dev,
36787diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
36788index 394fea2..c833880 100644
36789--- a/drivers/infiniband/core/cm.c
36790+++ b/drivers/infiniband/core/cm.c
36791@@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
36792
36793 struct cm_counter_group {
36794 struct kobject obj;
36795- atomic_long_t counter[CM_ATTR_COUNT];
36796+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
36797 };
36798
36799 struct cm_counter_attribute {
36800@@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
36801 struct ib_mad_send_buf *msg = NULL;
36802 int ret;
36803
36804- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36805+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36806 counter[CM_REQ_COUNTER]);
36807
36808 /* Quick state check to discard duplicate REQs. */
36809@@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
36810 if (!cm_id_priv)
36811 return;
36812
36813- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36814+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36815 counter[CM_REP_COUNTER]);
36816 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
36817 if (ret)
36818@@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
36819 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
36820 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
36821 spin_unlock_irq(&cm_id_priv->lock);
36822- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36823+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36824 counter[CM_RTU_COUNTER]);
36825 goto out;
36826 }
36827@@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
36828 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
36829 dreq_msg->local_comm_id);
36830 if (!cm_id_priv) {
36831- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36832+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36833 counter[CM_DREQ_COUNTER]);
36834 cm_issue_drep(work->port, work->mad_recv_wc);
36835 return -EINVAL;
36836@@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
36837 case IB_CM_MRA_REP_RCVD:
36838 break;
36839 case IB_CM_TIMEWAIT:
36840- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36841+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36842 counter[CM_DREQ_COUNTER]);
36843 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
36844 goto unlock;
36845@@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
36846 cm_free_msg(msg);
36847 goto deref;
36848 case IB_CM_DREQ_RCVD:
36849- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36850+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36851 counter[CM_DREQ_COUNTER]);
36852 goto unlock;
36853 default:
36854@@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
36855 ib_modify_mad(cm_id_priv->av.port->mad_agent,
36856 cm_id_priv->msg, timeout)) {
36857 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
36858- atomic_long_inc(&work->port->
36859+ atomic_long_inc_unchecked(&work->port->
36860 counter_group[CM_RECV_DUPLICATES].
36861 counter[CM_MRA_COUNTER]);
36862 goto out;
36863@@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
36864 break;
36865 case IB_CM_MRA_REQ_RCVD:
36866 case IB_CM_MRA_REP_RCVD:
36867- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36868+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36869 counter[CM_MRA_COUNTER]);
36870 /* fall through */
36871 default:
36872@@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
36873 case IB_CM_LAP_IDLE:
36874 break;
36875 case IB_CM_MRA_LAP_SENT:
36876- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36877+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36878 counter[CM_LAP_COUNTER]);
36879 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
36880 goto unlock;
36881@@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
36882 cm_free_msg(msg);
36883 goto deref;
36884 case IB_CM_LAP_RCVD:
36885- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36886+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36887 counter[CM_LAP_COUNTER]);
36888 goto unlock;
36889 default:
36890@@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
36891 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
36892 if (cur_cm_id_priv) {
36893 spin_unlock_irq(&cm.lock);
36894- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36895+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36896 counter[CM_SIDR_REQ_COUNTER]);
36897 goto out; /* Duplicate message. */
36898 }
36899@@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
36900 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
36901 msg->retries = 1;
36902
36903- atomic_long_add(1 + msg->retries,
36904+ atomic_long_add_unchecked(1 + msg->retries,
36905 &port->counter_group[CM_XMIT].counter[attr_index]);
36906 if (msg->retries)
36907- atomic_long_add(msg->retries,
36908+ atomic_long_add_unchecked(msg->retries,
36909 &port->counter_group[CM_XMIT_RETRIES].
36910 counter[attr_index]);
36911
36912@@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
36913 }
36914
36915 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
36916- atomic_long_inc(&port->counter_group[CM_RECV].
36917+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
36918 counter[attr_id - CM_ATTR_ID_OFFSET]);
36919
36920 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
36921@@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
36922 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
36923
36924 return sprintf(buf, "%ld\n",
36925- atomic_long_read(&group->counter[cm_attr->index]));
36926+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
36927 }
36928
36929 static const struct sysfs_ops cm_counter_ops = {
36930diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
36931index 176c8f9..2627b62 100644
36932--- a/drivers/infiniband/core/fmr_pool.c
36933+++ b/drivers/infiniband/core/fmr_pool.c
36934@@ -98,8 +98,8 @@ struct ib_fmr_pool {
36935
36936 struct task_struct *thread;
36937
36938- atomic_t req_ser;
36939- atomic_t flush_ser;
36940+ atomic_unchecked_t req_ser;
36941+ atomic_unchecked_t flush_ser;
36942
36943 wait_queue_head_t force_wait;
36944 };
36945@@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
36946 struct ib_fmr_pool *pool = pool_ptr;
36947
36948 do {
36949- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
36950+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
36951 ib_fmr_batch_release(pool);
36952
36953- atomic_inc(&pool->flush_ser);
36954+ atomic_inc_unchecked(&pool->flush_ser);
36955 wake_up_interruptible(&pool->force_wait);
36956
36957 if (pool->flush_function)
36958@@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
36959 }
36960
36961 set_current_state(TASK_INTERRUPTIBLE);
36962- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
36963+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
36964 !kthread_should_stop())
36965 schedule();
36966 __set_current_state(TASK_RUNNING);
36967@@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
36968 pool->dirty_watermark = params->dirty_watermark;
36969 pool->dirty_len = 0;
36970 spin_lock_init(&pool->pool_lock);
36971- atomic_set(&pool->req_ser, 0);
36972- atomic_set(&pool->flush_ser, 0);
36973+ atomic_set_unchecked(&pool->req_ser, 0);
36974+ atomic_set_unchecked(&pool->flush_ser, 0);
36975 init_waitqueue_head(&pool->force_wait);
36976
36977 pool->thread = kthread_run(ib_fmr_cleanup_thread,
36978@@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
36979 }
36980 spin_unlock_irq(&pool->pool_lock);
36981
36982- serial = atomic_inc_return(&pool->req_ser);
36983+ serial = atomic_inc_return_unchecked(&pool->req_ser);
36984 wake_up_process(pool->thread);
36985
36986 if (wait_event_interruptible(pool->force_wait,
36987- atomic_read(&pool->flush_ser) - serial >= 0))
36988+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
36989 return -EINTR;
36990
36991 return 0;
36992@@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
36993 } else {
36994 list_add_tail(&fmr->list, &pool->dirty_list);
36995 if (++pool->dirty_len >= pool->dirty_watermark) {
36996- atomic_inc(&pool->req_ser);
36997+ atomic_inc_unchecked(&pool->req_ser);
36998 wake_up_process(pool->thread);
36999 }
37000 }
37001diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
37002index afd8179..598063f 100644
37003--- a/drivers/infiniband/hw/cxgb4/mem.c
37004+++ b/drivers/infiniband/hw/cxgb4/mem.c
37005@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
37006 int err;
37007 struct fw_ri_tpte tpt;
37008 u32 stag_idx;
37009- static atomic_t key;
37010+ static atomic_unchecked_t key;
37011
37012 if (c4iw_fatal_error(rdev))
37013 return -EIO;
37014@@ -139,7 +139,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
37015 if (rdev->stats.stag.cur > rdev->stats.stag.max)
37016 rdev->stats.stag.max = rdev->stats.stag.cur;
37017 mutex_unlock(&rdev->stats.lock);
37018- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
37019+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
37020 }
37021 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
37022 __func__, stag_state, type, pdid, stag_idx);
37023diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
37024index 79b3dbc..96e5fcc 100644
37025--- a/drivers/infiniband/hw/ipath/ipath_rc.c
37026+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
37027@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
37028 struct ib_atomic_eth *ateth;
37029 struct ipath_ack_entry *e;
37030 u64 vaddr;
37031- atomic64_t *maddr;
37032+ atomic64_unchecked_t *maddr;
37033 u64 sdata;
37034 u32 rkey;
37035 u8 next;
37036@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
37037 IB_ACCESS_REMOTE_ATOMIC)))
37038 goto nack_acc_unlck;
37039 /* Perform atomic OP and save result. */
37040- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
37041+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
37042 sdata = be64_to_cpu(ateth->swap_data);
37043 e = &qp->s_ack_queue[qp->r_head_ack_queue];
37044 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
37045- (u64) atomic64_add_return(sdata, maddr) - sdata :
37046+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
37047 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
37048 be64_to_cpu(ateth->compare_data),
37049 sdata);
37050diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
37051index 1f95bba..9530f87 100644
37052--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
37053+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
37054@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
37055 unsigned long flags;
37056 struct ib_wc wc;
37057 u64 sdata;
37058- atomic64_t *maddr;
37059+ atomic64_unchecked_t *maddr;
37060 enum ib_wc_status send_status;
37061
37062 /*
37063@@ -382,11 +382,11 @@ again:
37064 IB_ACCESS_REMOTE_ATOMIC)))
37065 goto acc_err;
37066 /* Perform atomic OP and save result. */
37067- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
37068+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
37069 sdata = wqe->wr.wr.atomic.compare_add;
37070 *(u64 *) sqp->s_sge.sge.vaddr =
37071 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
37072- (u64) atomic64_add_return(sdata, maddr) - sdata :
37073+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
37074 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
37075 sdata, wqe->wr.wr.atomic.swap);
37076 goto send_comp;
37077diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
37078index 9d3e5c1..d9afe4a 100644
37079--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
37080+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
37081@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
37082 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
37083 }
37084
37085-int mthca_QUERY_FW(struct mthca_dev *dev)
37086+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
37087 {
37088 struct mthca_mailbox *mailbox;
37089 u32 *outbox;
37090diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
37091index ed9a989..e0c5871 100644
37092--- a/drivers/infiniband/hw/mthca/mthca_mr.c
37093+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
37094@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
37095 return key;
37096 }
37097
37098-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
37099+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
37100 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
37101 {
37102 struct mthca_mailbox *mailbox;
37103diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
37104index 5b152a3..c1f3e83 100644
37105--- a/drivers/infiniband/hw/nes/nes.c
37106+++ b/drivers/infiniband/hw/nes/nes.c
37107@@ -98,7 +98,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
37108 LIST_HEAD(nes_adapter_list);
37109 static LIST_HEAD(nes_dev_list);
37110
37111-atomic_t qps_destroyed;
37112+atomic_unchecked_t qps_destroyed;
37113
37114 static unsigned int ee_flsh_adapter;
37115 static unsigned int sysfs_nonidx_addr;
37116@@ -267,7 +267,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
37117 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
37118 struct nes_adapter *nesadapter = nesdev->nesadapter;
37119
37120- atomic_inc(&qps_destroyed);
37121+ atomic_inc_unchecked(&qps_destroyed);
37122
37123 /* Free the control structures */
37124
37125diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
37126index 33cc589..3bd6538 100644
37127--- a/drivers/infiniband/hw/nes/nes.h
37128+++ b/drivers/infiniband/hw/nes/nes.h
37129@@ -177,17 +177,17 @@ extern unsigned int nes_debug_level;
37130 extern unsigned int wqm_quanta;
37131 extern struct list_head nes_adapter_list;
37132
37133-extern atomic_t cm_connects;
37134-extern atomic_t cm_accepts;
37135-extern atomic_t cm_disconnects;
37136-extern atomic_t cm_closes;
37137-extern atomic_t cm_connecteds;
37138-extern atomic_t cm_connect_reqs;
37139-extern atomic_t cm_rejects;
37140-extern atomic_t mod_qp_timouts;
37141-extern atomic_t qps_created;
37142-extern atomic_t qps_destroyed;
37143-extern atomic_t sw_qps_destroyed;
37144+extern atomic_unchecked_t cm_connects;
37145+extern atomic_unchecked_t cm_accepts;
37146+extern atomic_unchecked_t cm_disconnects;
37147+extern atomic_unchecked_t cm_closes;
37148+extern atomic_unchecked_t cm_connecteds;
37149+extern atomic_unchecked_t cm_connect_reqs;
37150+extern atomic_unchecked_t cm_rejects;
37151+extern atomic_unchecked_t mod_qp_timouts;
37152+extern atomic_unchecked_t qps_created;
37153+extern atomic_unchecked_t qps_destroyed;
37154+extern atomic_unchecked_t sw_qps_destroyed;
37155 extern u32 mh_detected;
37156 extern u32 mh_pauses_sent;
37157 extern u32 cm_packets_sent;
37158@@ -196,16 +196,16 @@ extern u32 cm_packets_created;
37159 extern u32 cm_packets_received;
37160 extern u32 cm_packets_dropped;
37161 extern u32 cm_packets_retrans;
37162-extern atomic_t cm_listens_created;
37163-extern atomic_t cm_listens_destroyed;
37164+extern atomic_unchecked_t cm_listens_created;
37165+extern atomic_unchecked_t cm_listens_destroyed;
37166 extern u32 cm_backlog_drops;
37167-extern atomic_t cm_loopbacks;
37168-extern atomic_t cm_nodes_created;
37169-extern atomic_t cm_nodes_destroyed;
37170-extern atomic_t cm_accel_dropped_pkts;
37171-extern atomic_t cm_resets_recvd;
37172-extern atomic_t pau_qps_created;
37173-extern atomic_t pau_qps_destroyed;
37174+extern atomic_unchecked_t cm_loopbacks;
37175+extern atomic_unchecked_t cm_nodes_created;
37176+extern atomic_unchecked_t cm_nodes_destroyed;
37177+extern atomic_unchecked_t cm_accel_dropped_pkts;
37178+extern atomic_unchecked_t cm_resets_recvd;
37179+extern atomic_unchecked_t pau_qps_created;
37180+extern atomic_unchecked_t pau_qps_destroyed;
37181
37182 extern u32 int_mod_timer_init;
37183 extern u32 int_mod_cq_depth_256;
37184diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
37185index 22ea67e..dcbe3bc 100644
37186--- a/drivers/infiniband/hw/nes/nes_cm.c
37187+++ b/drivers/infiniband/hw/nes/nes_cm.c
37188@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
37189 u32 cm_packets_retrans;
37190 u32 cm_packets_created;
37191 u32 cm_packets_received;
37192-atomic_t cm_listens_created;
37193-atomic_t cm_listens_destroyed;
37194+atomic_unchecked_t cm_listens_created;
37195+atomic_unchecked_t cm_listens_destroyed;
37196 u32 cm_backlog_drops;
37197-atomic_t cm_loopbacks;
37198-atomic_t cm_nodes_created;
37199-atomic_t cm_nodes_destroyed;
37200-atomic_t cm_accel_dropped_pkts;
37201-atomic_t cm_resets_recvd;
37202+atomic_unchecked_t cm_loopbacks;
37203+atomic_unchecked_t cm_nodes_created;
37204+atomic_unchecked_t cm_nodes_destroyed;
37205+atomic_unchecked_t cm_accel_dropped_pkts;
37206+atomic_unchecked_t cm_resets_recvd;
37207
37208 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
37209 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
37210@@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
37211
37212 static struct nes_cm_core *g_cm_core;
37213
37214-atomic_t cm_connects;
37215-atomic_t cm_accepts;
37216-atomic_t cm_disconnects;
37217-atomic_t cm_closes;
37218-atomic_t cm_connecteds;
37219-atomic_t cm_connect_reqs;
37220-atomic_t cm_rejects;
37221+atomic_unchecked_t cm_connects;
37222+atomic_unchecked_t cm_accepts;
37223+atomic_unchecked_t cm_disconnects;
37224+atomic_unchecked_t cm_closes;
37225+atomic_unchecked_t cm_connecteds;
37226+atomic_unchecked_t cm_connect_reqs;
37227+atomic_unchecked_t cm_rejects;
37228
37229 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
37230 {
37231@@ -1272,7 +1272,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
37232 kfree(listener);
37233 listener = NULL;
37234 ret = 0;
37235- atomic_inc(&cm_listens_destroyed);
37236+ atomic_inc_unchecked(&cm_listens_destroyed);
37237 } else {
37238 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
37239 }
37240@@ -1466,7 +1466,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
37241 cm_node->rem_mac);
37242
37243 add_hte_node(cm_core, cm_node);
37244- atomic_inc(&cm_nodes_created);
37245+ atomic_inc_unchecked(&cm_nodes_created);
37246
37247 return cm_node;
37248 }
37249@@ -1524,7 +1524,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
37250 }
37251
37252 atomic_dec(&cm_core->node_cnt);
37253- atomic_inc(&cm_nodes_destroyed);
37254+ atomic_inc_unchecked(&cm_nodes_destroyed);
37255 nesqp = cm_node->nesqp;
37256 if (nesqp) {
37257 nesqp->cm_node = NULL;
37258@@ -1588,7 +1588,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
37259
37260 static void drop_packet(struct sk_buff *skb)
37261 {
37262- atomic_inc(&cm_accel_dropped_pkts);
37263+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
37264 dev_kfree_skb_any(skb);
37265 }
37266
37267@@ -1651,7 +1651,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
37268 {
37269
37270 int reset = 0; /* whether to send reset in case of err.. */
37271- atomic_inc(&cm_resets_recvd);
37272+ atomic_inc_unchecked(&cm_resets_recvd);
37273 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
37274 " refcnt=%d\n", cm_node, cm_node->state,
37275 atomic_read(&cm_node->ref_count));
37276@@ -2292,7 +2292,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
37277 rem_ref_cm_node(cm_node->cm_core, cm_node);
37278 return NULL;
37279 }
37280- atomic_inc(&cm_loopbacks);
37281+ atomic_inc_unchecked(&cm_loopbacks);
37282 loopbackremotenode->loopbackpartner = cm_node;
37283 loopbackremotenode->tcp_cntxt.rcv_wscale =
37284 NES_CM_DEFAULT_RCV_WND_SCALE;
37285@@ -2567,7 +2567,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
37286 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
37287 else {
37288 rem_ref_cm_node(cm_core, cm_node);
37289- atomic_inc(&cm_accel_dropped_pkts);
37290+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
37291 dev_kfree_skb_any(skb);
37292 }
37293 break;
37294@@ -2875,7 +2875,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
37295
37296 if ((cm_id) && (cm_id->event_handler)) {
37297 if (issue_disconn) {
37298- atomic_inc(&cm_disconnects);
37299+ atomic_inc_unchecked(&cm_disconnects);
37300 cm_event.event = IW_CM_EVENT_DISCONNECT;
37301 cm_event.status = disconn_status;
37302 cm_event.local_addr = cm_id->local_addr;
37303@@ -2897,7 +2897,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
37304 }
37305
37306 if (issue_close) {
37307- atomic_inc(&cm_closes);
37308+ atomic_inc_unchecked(&cm_closes);
37309 nes_disconnect(nesqp, 1);
37310
37311 cm_id->provider_data = nesqp;
37312@@ -3033,7 +3033,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
37313
37314 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
37315 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
37316- atomic_inc(&cm_accepts);
37317+ atomic_inc_unchecked(&cm_accepts);
37318
37319 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
37320 netdev_refcnt_read(nesvnic->netdev));
37321@@ -3228,7 +3228,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
37322 struct nes_cm_core *cm_core;
37323 u8 *start_buff;
37324
37325- atomic_inc(&cm_rejects);
37326+ atomic_inc_unchecked(&cm_rejects);
37327 cm_node = (struct nes_cm_node *)cm_id->provider_data;
37328 loopback = cm_node->loopbackpartner;
37329 cm_core = cm_node->cm_core;
37330@@ -3288,7 +3288,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
37331 ntohl(cm_id->local_addr.sin_addr.s_addr),
37332 ntohs(cm_id->local_addr.sin_port));
37333
37334- atomic_inc(&cm_connects);
37335+ atomic_inc_unchecked(&cm_connects);
37336 nesqp->active_conn = 1;
37337
37338 /* cache the cm_id in the qp */
37339@@ -3398,7 +3398,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
37340 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
37341 return err;
37342 }
37343- atomic_inc(&cm_listens_created);
37344+ atomic_inc_unchecked(&cm_listens_created);
37345 }
37346
37347 cm_id->add_ref(cm_id);
37348@@ -3499,7 +3499,7 @@ static void cm_event_connected(struct nes_cm_event *event)
37349
37350 if (nesqp->destroyed)
37351 return;
37352- atomic_inc(&cm_connecteds);
37353+ atomic_inc_unchecked(&cm_connecteds);
37354 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
37355 " local port 0x%04X. jiffies = %lu.\n",
37356 nesqp->hwqp.qp_id,
37357@@ -3679,7 +3679,7 @@ static void cm_event_reset(struct nes_cm_event *event)
37358
37359 cm_id->add_ref(cm_id);
37360 ret = cm_id->event_handler(cm_id, &cm_event);
37361- atomic_inc(&cm_closes);
37362+ atomic_inc_unchecked(&cm_closes);
37363 cm_event.event = IW_CM_EVENT_CLOSE;
37364 cm_event.status = 0;
37365 cm_event.provider_data = cm_id->provider_data;
37366@@ -3715,7 +3715,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
37367 return;
37368 cm_id = cm_node->cm_id;
37369
37370- atomic_inc(&cm_connect_reqs);
37371+ atomic_inc_unchecked(&cm_connect_reqs);
37372 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
37373 cm_node, cm_id, jiffies);
37374
37375@@ -3755,7 +3755,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
37376 return;
37377 cm_id = cm_node->cm_id;
37378
37379- atomic_inc(&cm_connect_reqs);
37380+ atomic_inc_unchecked(&cm_connect_reqs);
37381 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
37382 cm_node, cm_id, jiffies);
37383
37384diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
37385index 4166452..fc952c3 100644
37386--- a/drivers/infiniband/hw/nes/nes_mgt.c
37387+++ b/drivers/infiniband/hw/nes/nes_mgt.c
37388@@ -40,8 +40,8 @@
37389 #include "nes.h"
37390 #include "nes_mgt.h"
37391
37392-atomic_t pau_qps_created;
37393-atomic_t pau_qps_destroyed;
37394+atomic_unchecked_t pau_qps_created;
37395+atomic_unchecked_t pau_qps_destroyed;
37396
37397 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
37398 {
37399@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
37400 {
37401 struct sk_buff *skb;
37402 unsigned long flags;
37403- atomic_inc(&pau_qps_destroyed);
37404+ atomic_inc_unchecked(&pau_qps_destroyed);
37405
37406 /* Free packets that have not yet been forwarded */
37407 /* Lock is acquired by skb_dequeue when removing the skb */
37408@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
37409 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
37410 skb_queue_head_init(&nesqp->pau_list);
37411 spin_lock_init(&nesqp->pau_lock);
37412- atomic_inc(&pau_qps_created);
37413+ atomic_inc_unchecked(&pau_qps_created);
37414 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
37415 }
37416
37417diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
37418index 9542e16..a008c40 100644
37419--- a/drivers/infiniband/hw/nes/nes_nic.c
37420+++ b/drivers/infiniband/hw/nes/nes_nic.c
37421@@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
37422 target_stat_values[++index] = mh_detected;
37423 target_stat_values[++index] = mh_pauses_sent;
37424 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
37425- target_stat_values[++index] = atomic_read(&cm_connects);
37426- target_stat_values[++index] = atomic_read(&cm_accepts);
37427- target_stat_values[++index] = atomic_read(&cm_disconnects);
37428- target_stat_values[++index] = atomic_read(&cm_connecteds);
37429- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
37430- target_stat_values[++index] = atomic_read(&cm_rejects);
37431- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
37432- target_stat_values[++index] = atomic_read(&qps_created);
37433- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
37434- target_stat_values[++index] = atomic_read(&qps_destroyed);
37435- target_stat_values[++index] = atomic_read(&cm_closes);
37436+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
37437+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
37438+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
37439+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
37440+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
37441+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
37442+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
37443+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
37444+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
37445+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
37446+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
37447 target_stat_values[++index] = cm_packets_sent;
37448 target_stat_values[++index] = cm_packets_bounced;
37449 target_stat_values[++index] = cm_packets_created;
37450 target_stat_values[++index] = cm_packets_received;
37451 target_stat_values[++index] = cm_packets_dropped;
37452 target_stat_values[++index] = cm_packets_retrans;
37453- target_stat_values[++index] = atomic_read(&cm_listens_created);
37454- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
37455+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
37456+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
37457 target_stat_values[++index] = cm_backlog_drops;
37458- target_stat_values[++index] = atomic_read(&cm_loopbacks);
37459- target_stat_values[++index] = atomic_read(&cm_nodes_created);
37460- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
37461- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
37462- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
37463+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
37464+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
37465+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
37466+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
37467+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
37468 target_stat_values[++index] = nesadapter->free_4kpbl;
37469 target_stat_values[++index] = nesadapter->free_256pbl;
37470 target_stat_values[++index] = int_mod_timer_init;
37471 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
37472 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
37473 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
37474- target_stat_values[++index] = atomic_read(&pau_qps_created);
37475- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
37476+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
37477+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
37478 }
37479
37480 /**
37481diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
37482index 07e4fba..685f041 100644
37483--- a/drivers/infiniband/hw/nes/nes_verbs.c
37484+++ b/drivers/infiniband/hw/nes/nes_verbs.c
37485@@ -46,9 +46,9 @@
37486
37487 #include <rdma/ib_umem.h>
37488
37489-atomic_t mod_qp_timouts;
37490-atomic_t qps_created;
37491-atomic_t sw_qps_destroyed;
37492+atomic_unchecked_t mod_qp_timouts;
37493+atomic_unchecked_t qps_created;
37494+atomic_unchecked_t sw_qps_destroyed;
37495
37496 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
37497
37498@@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
37499 if (init_attr->create_flags)
37500 return ERR_PTR(-EINVAL);
37501
37502- atomic_inc(&qps_created);
37503+ atomic_inc_unchecked(&qps_created);
37504 switch (init_attr->qp_type) {
37505 case IB_QPT_RC:
37506 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
37507@@ -1462,7 +1462,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
37508 struct iw_cm_event cm_event;
37509 int ret = 0;
37510
37511- atomic_inc(&sw_qps_destroyed);
37512+ atomic_inc_unchecked(&sw_qps_destroyed);
37513 nesqp->destroyed = 1;
37514
37515 /* Blow away the connection if it exists. */
37516diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
37517index 4d11575..3e890e5 100644
37518--- a/drivers/infiniband/hw/qib/qib.h
37519+++ b/drivers/infiniband/hw/qib/qib.h
37520@@ -51,6 +51,7 @@
37521 #include <linux/completion.h>
37522 #include <linux/kref.h>
37523 #include <linux/sched.h>
37524+#include <linux/slab.h>
37525
37526 #include "qib_common.h"
37527 #include "qib_verbs.h"
37528diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
37529index da739d9..da1c7f4 100644
37530--- a/drivers/input/gameport/gameport.c
37531+++ b/drivers/input/gameport/gameport.c
37532@@ -487,14 +487,14 @@ EXPORT_SYMBOL(gameport_set_phys);
37533 */
37534 static void gameport_init_port(struct gameport *gameport)
37535 {
37536- static atomic_t gameport_no = ATOMIC_INIT(0);
37537+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
37538
37539 __module_get(THIS_MODULE);
37540
37541 mutex_init(&gameport->drv_mutex);
37542 device_initialize(&gameport->dev);
37543 dev_set_name(&gameport->dev, "gameport%lu",
37544- (unsigned long)atomic_inc_return(&gameport_no) - 1);
37545+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
37546 gameport->dev.bus = &gameport_bus;
37547 gameport->dev.release = gameport_release_port;
37548 if (gameport->parent)
37549diff --git a/drivers/input/input.c b/drivers/input/input.c
37550index c044699..174d71a 100644
37551--- a/drivers/input/input.c
37552+++ b/drivers/input/input.c
37553@@ -2019,7 +2019,7 @@ static void devm_input_device_unregister(struct device *dev, void *res)
37554 */
37555 int input_register_device(struct input_dev *dev)
37556 {
37557- static atomic_t input_no = ATOMIC_INIT(0);
37558+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
37559 struct input_devres *devres = NULL;
37560 struct input_handler *handler;
37561 unsigned int packet_size;
37562@@ -2074,7 +2074,7 @@ int input_register_device(struct input_dev *dev)
37563 dev->setkeycode = input_default_setkeycode;
37564
37565 dev_set_name(&dev->dev, "input%ld",
37566- (unsigned long) atomic_inc_return(&input_no) - 1);
37567+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
37568
37569 error = device_add(&dev->dev);
37570 if (error)
37571diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
37572index 04c69af..5f92d00 100644
37573--- a/drivers/input/joystick/sidewinder.c
37574+++ b/drivers/input/joystick/sidewinder.c
37575@@ -30,6 +30,7 @@
37576 #include <linux/kernel.h>
37577 #include <linux/module.h>
37578 #include <linux/slab.h>
37579+#include <linux/sched.h>
37580 #include <linux/init.h>
37581 #include <linux/input.h>
37582 #include <linux/gameport.h>
37583diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
37584index d6cbfe9..6225402 100644
37585--- a/drivers/input/joystick/xpad.c
37586+++ b/drivers/input/joystick/xpad.c
37587@@ -735,7 +735,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
37588
37589 static int xpad_led_probe(struct usb_xpad *xpad)
37590 {
37591- static atomic_t led_seq = ATOMIC_INIT(0);
37592+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
37593 long led_no;
37594 struct xpad_led *led;
37595 struct led_classdev *led_cdev;
37596@@ -748,7 +748,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
37597 if (!led)
37598 return -ENOMEM;
37599
37600- led_no = (long)atomic_inc_return(&led_seq) - 1;
37601+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
37602
37603 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
37604 led->xpad = xpad;
37605diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
37606index fe1df23..5b710f3 100644
37607--- a/drivers/input/mouse/psmouse.h
37608+++ b/drivers/input/mouse/psmouse.h
37609@@ -115,7 +115,7 @@ struct psmouse_attribute {
37610 ssize_t (*set)(struct psmouse *psmouse, void *data,
37611 const char *buf, size_t count);
37612 bool protect;
37613-};
37614+} __do_const;
37615 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
37616
37617 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
37618diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
37619index 4c842c3..590b0bf 100644
37620--- a/drivers/input/mousedev.c
37621+++ b/drivers/input/mousedev.c
37622@@ -738,7 +738,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
37623
37624 spin_unlock_irq(&client->packet_lock);
37625
37626- if (copy_to_user(buffer, data, count))
37627+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
37628 return -EFAULT;
37629
37630 return count;
37631diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
37632index 25fc597..558bf3b 100644
37633--- a/drivers/input/serio/serio.c
37634+++ b/drivers/input/serio/serio.c
37635@@ -496,7 +496,7 @@ static void serio_release_port(struct device *dev)
37636 */
37637 static void serio_init_port(struct serio *serio)
37638 {
37639- static atomic_t serio_no = ATOMIC_INIT(0);
37640+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
37641
37642 __module_get(THIS_MODULE);
37643
37644@@ -507,7 +507,7 @@ static void serio_init_port(struct serio *serio)
37645 mutex_init(&serio->drv_mutex);
37646 device_initialize(&serio->dev);
37647 dev_set_name(&serio->dev, "serio%ld",
37648- (long)atomic_inc_return(&serio_no) - 1);
37649+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
37650 serio->dev.bus = &serio_bus;
37651 serio->dev.release = serio_release_port;
37652 serio->dev.groups = serio_device_attr_groups;
37653diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
37654index ddbdaca..be18a78 100644
37655--- a/drivers/iommu/iommu.c
37656+++ b/drivers/iommu/iommu.c
37657@@ -554,7 +554,7 @@ static struct notifier_block iommu_bus_nb = {
37658 static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
37659 {
37660 bus_register_notifier(bus, &iommu_bus_nb);
37661- bus_for_each_dev(bus, NULL, ops, add_iommu_group);
37662+ bus_for_each_dev(bus, NULL, (void *)ops, add_iommu_group);
37663 }
37664
37665 /**
37666diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
37667index 89562a8..218999b 100644
37668--- a/drivers/isdn/capi/capi.c
37669+++ b/drivers/isdn/capi/capi.c
37670@@ -81,8 +81,8 @@ struct capiminor {
37671
37672 struct capi20_appl *ap;
37673 u32 ncci;
37674- atomic_t datahandle;
37675- atomic_t msgid;
37676+ atomic_unchecked_t datahandle;
37677+ atomic_unchecked_t msgid;
37678
37679 struct tty_port port;
37680 int ttyinstop;
37681@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
37682 capimsg_setu16(s, 2, mp->ap->applid);
37683 capimsg_setu8 (s, 4, CAPI_DATA_B3);
37684 capimsg_setu8 (s, 5, CAPI_RESP);
37685- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
37686+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
37687 capimsg_setu32(s, 8, mp->ncci);
37688 capimsg_setu16(s, 12, datahandle);
37689 }
37690@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
37691 mp->outbytes -= len;
37692 spin_unlock_bh(&mp->outlock);
37693
37694- datahandle = atomic_inc_return(&mp->datahandle);
37695+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
37696 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
37697 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
37698 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
37699 capimsg_setu16(skb->data, 2, mp->ap->applid);
37700 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
37701 capimsg_setu8 (skb->data, 5, CAPI_REQ);
37702- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
37703+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
37704 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
37705 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
37706 capimsg_setu16(skb->data, 16, len); /* Data length */
37707diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
37708index 67abf3f..076b3a6 100644
37709--- a/drivers/isdn/gigaset/interface.c
37710+++ b/drivers/isdn/gigaset/interface.c
37711@@ -160,9 +160,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
37712 }
37713 tty->driver_data = cs;
37714
37715- ++cs->port.count;
37716+ atomic_inc(&cs->port.count);
37717
37718- if (cs->port.count == 1) {
37719+ if (atomic_read(&cs->port.count) == 1) {
37720 tty_port_tty_set(&cs->port, tty);
37721 tty->low_latency = 1;
37722 }
37723@@ -186,9 +186,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
37724
37725 if (!cs->connected)
37726 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
37727- else if (!cs->port.count)
37728+ else if (!atomic_read(&cs->port.count))
37729 dev_warn(cs->dev, "%s: device not opened\n", __func__);
37730- else if (!--cs->port.count)
37731+ else if (!atomic_dec_return(&cs->port.count))
37732 tty_port_tty_set(&cs->port, NULL);
37733
37734 mutex_unlock(&cs->mutex);
37735diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
37736index 821f7ac..28d4030 100644
37737--- a/drivers/isdn/hardware/avm/b1.c
37738+++ b/drivers/isdn/hardware/avm/b1.c
37739@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
37740 }
37741 if (left) {
37742 if (t4file->user) {
37743- if (copy_from_user(buf, dp, left))
37744+ if (left > sizeof buf || copy_from_user(buf, dp, left))
37745 return -EFAULT;
37746 } else {
37747 memcpy(buf, dp, left);
37748@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
37749 }
37750 if (left) {
37751 if (config->user) {
37752- if (copy_from_user(buf, dp, left))
37753+ if (left > sizeof buf || copy_from_user(buf, dp, left))
37754 return -EFAULT;
37755 } else {
37756 memcpy(buf, dp, left);
37757diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
37758index e09dc8a..15e2efb 100644
37759--- a/drivers/isdn/i4l/isdn_tty.c
37760+++ b/drivers/isdn/i4l/isdn_tty.c
37761@@ -1513,9 +1513,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
37762
37763 #ifdef ISDN_DEBUG_MODEM_OPEN
37764 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
37765- port->count);
37766+ atomic_read(&port->count));
37767 #endif
37768- port->count++;
37769+ atomic_inc(&port->count);
37770 port->tty = tty;
37771 /*
37772 * Start up serial port
37773@@ -1559,7 +1559,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
37774 #endif
37775 return;
37776 }
37777- if ((tty->count == 1) && (port->count != 1)) {
37778+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
37779 /*
37780 * Uh, oh. tty->count is 1, which means that the tty
37781 * structure will be freed. Info->count should always
37782@@ -1568,15 +1568,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
37783 * serial port won't be shutdown.
37784 */
37785 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
37786- "info->count is %d\n", port->count);
37787- port->count = 1;
37788+ "info->count is %d\n", atomic_read(&port->count));
37789+ atomic_set(&port->count, 1);
37790 }
37791- if (--port->count < 0) {
37792+ if (atomic_dec_return(&port->count) < 0) {
37793 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
37794- info->line, port->count);
37795- port->count = 0;
37796+ info->line, atomic_read(&port->count));
37797+ atomic_set(&port->count, 0);
37798 }
37799- if (port->count) {
37800+ if (atomic_read(&port->count)) {
37801 #ifdef ISDN_DEBUG_MODEM_OPEN
37802 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
37803 #endif
37804@@ -1630,7 +1630,7 @@ isdn_tty_hangup(struct tty_struct *tty)
37805 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
37806 return;
37807 isdn_tty_shutdown(info);
37808- port->count = 0;
37809+ atomic_set(&port->count, 0);
37810 port->flags &= ~ASYNC_NORMAL_ACTIVE;
37811 port->tty = NULL;
37812 wake_up_interruptible(&port->open_wait);
37813@@ -1975,7 +1975,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
37814 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
37815 modem_info *info = &dev->mdm.info[i];
37816
37817- if (info->port.count == 0)
37818+ if (atomic_read(&info->port.count) == 0)
37819 continue;
37820 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
37821 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
37822diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
37823index e74df7c..03a03ba 100644
37824--- a/drivers/isdn/icn/icn.c
37825+++ b/drivers/isdn/icn/icn.c
37826@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
37827 if (count > len)
37828 count = len;
37829 if (user) {
37830- if (copy_from_user(msg, buf, count))
37831+ if (count > sizeof msg || copy_from_user(msg, buf, count))
37832 return -EFAULT;
37833 } else
37834 memcpy(msg, buf, count);
37835diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
37836index 6a8405d..0bd1c7e 100644
37837--- a/drivers/leds/leds-clevo-mail.c
37838+++ b/drivers/leds/leds-clevo-mail.c
37839@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
37840 * detected as working, but in reality it is not) as low as
37841 * possible.
37842 */
37843-static struct dmi_system_id __initdata clevo_mail_led_dmi_table[] = {
37844+static const struct dmi_system_id __initconst clevo_mail_led_dmi_table[] = {
37845 {
37846 .callback = clevo_mail_led_dmi_callback,
37847 .ident = "Clevo D410J",
37848diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
37849index ec9b287..65c9bf4 100644
37850--- a/drivers/leds/leds-ss4200.c
37851+++ b/drivers/leds/leds-ss4200.c
37852@@ -92,7 +92,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
37853 * detected as working, but in reality it is not) as low as
37854 * possible.
37855 */
37856-static struct dmi_system_id __initdata nas_led_whitelist[] = {
37857+static const struct dmi_system_id __initconst nas_led_whitelist[] = {
37858 {
37859 .callback = ss4200_led_dmi_callback,
37860 .ident = "Intel SS4200-E",
37861diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
37862index a5ebc00..982886f 100644
37863--- a/drivers/lguest/core.c
37864+++ b/drivers/lguest/core.c
37865@@ -92,9 +92,17 @@ static __init int map_switcher(void)
37866 * it's worked so far. The end address needs +1 because __get_vm_area
37867 * allocates an extra guard page, so we need space for that.
37868 */
37869+
37870+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
37871+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
37872+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
37873+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
37874+#else
37875 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
37876 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
37877 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
37878+#endif
37879+
37880 if (!switcher_vma) {
37881 err = -ENOMEM;
37882 printk("lguest: could not map switcher pages high\n");
37883@@ -119,7 +127,7 @@ static __init int map_switcher(void)
37884 * Now the Switcher is mapped at the right address, we can't fail!
37885 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
37886 */
37887- memcpy(switcher_vma->addr, start_switcher_text,
37888+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
37889 end_switcher_text - start_switcher_text);
37890
37891 printk(KERN_INFO "lguest: mapped switcher at %p\n",
37892diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
37893index 3b62be16..e33134a 100644
37894--- a/drivers/lguest/page_tables.c
37895+++ b/drivers/lguest/page_tables.c
37896@@ -532,7 +532,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
37897 /*:*/
37898
37899 #ifdef CONFIG_X86_PAE
37900-static void release_pmd(pmd_t *spmd)
37901+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
37902 {
37903 /* If the entry's not present, there's nothing to release. */
37904 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
37905diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
37906index 4af12e1..0e89afe 100644
37907--- a/drivers/lguest/x86/core.c
37908+++ b/drivers/lguest/x86/core.c
37909@@ -59,7 +59,7 @@ static struct {
37910 /* Offset from where switcher.S was compiled to where we've copied it */
37911 static unsigned long switcher_offset(void)
37912 {
37913- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
37914+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
37915 }
37916
37917 /* This cpu's struct lguest_pages. */
37918@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
37919 * These copies are pretty cheap, so we do them unconditionally: */
37920 /* Save the current Host top-level page directory.
37921 */
37922+
37923+#ifdef CONFIG_PAX_PER_CPU_PGD
37924+ pages->state.host_cr3 = read_cr3();
37925+#else
37926 pages->state.host_cr3 = __pa(current->mm->pgd);
37927+#endif
37928+
37929 /*
37930 * Set up the Guest's page tables to see this CPU's pages (and no
37931 * other CPU's pages).
37932@@ -476,7 +482,7 @@ void __init lguest_arch_host_init(void)
37933 * compiled-in switcher code and the high-mapped copy we just made.
37934 */
37935 for (i = 0; i < IDT_ENTRIES; i++)
37936- default_idt_entries[i] += switcher_offset();
37937+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
37938
37939 /*
37940 * Set up the Switcher's per-cpu areas.
37941@@ -559,7 +565,7 @@ void __init lguest_arch_host_init(void)
37942 * it will be undisturbed when we switch. To change %cs and jump we
37943 * need this structure to feed to Intel's "lcall" instruction.
37944 */
37945- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
37946+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
37947 lguest_entry.segment = LGUEST_CS;
37948
37949 /*
37950diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
37951index 40634b0..4f5855e 100644
37952--- a/drivers/lguest/x86/switcher_32.S
37953+++ b/drivers/lguest/x86/switcher_32.S
37954@@ -87,6 +87,7 @@
37955 #include <asm/page.h>
37956 #include <asm/segment.h>
37957 #include <asm/lguest.h>
37958+#include <asm/processor-flags.h>
37959
37960 // We mark the start of the code to copy
37961 // It's placed in .text tho it's never run here
37962@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
37963 // Changes type when we load it: damn Intel!
37964 // For after we switch over our page tables
37965 // That entry will be read-only: we'd crash.
37966+
37967+#ifdef CONFIG_PAX_KERNEXEC
37968+ mov %cr0, %edx
37969+ xor $X86_CR0_WP, %edx
37970+ mov %edx, %cr0
37971+#endif
37972+
37973 movl $(GDT_ENTRY_TSS*8), %edx
37974 ltr %dx
37975
37976@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
37977 // Let's clear it again for our return.
37978 // The GDT descriptor of the Host
37979 // Points to the table after two "size" bytes
37980- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
37981+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
37982 // Clear "used" from type field (byte 5, bit 2)
37983- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
37984+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
37985+
37986+#ifdef CONFIG_PAX_KERNEXEC
37987+ mov %cr0, %eax
37988+ xor $X86_CR0_WP, %eax
37989+ mov %eax, %cr0
37990+#endif
37991
37992 // Once our page table's switched, the Guest is live!
37993 // The Host fades as we run this final step.
37994@@ -295,13 +309,12 @@ deliver_to_host:
37995 // I consulted gcc, and it gave
37996 // These instructions, which I gladly credit:
37997 leal (%edx,%ebx,8), %eax
37998- movzwl (%eax),%edx
37999- movl 4(%eax), %eax
38000- xorw %ax, %ax
38001- orl %eax, %edx
38002+ movl 4(%eax), %edx
38003+ movw (%eax), %dx
38004 // Now the address of the handler's in %edx
38005 // We call it now: its "iret" drops us home.
38006- jmp *%edx
38007+ ljmp $__KERNEL_CS, $1f
38008+1: jmp *%edx
38009
38010 // Every interrupt can come to us here
38011 // But we must truly tell each apart.
38012diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
38013index 7155945..4bcc562 100644
38014--- a/drivers/md/bitmap.c
38015+++ b/drivers/md/bitmap.c
38016@@ -1779,7 +1779,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
38017 chunk_kb ? "KB" : "B");
38018 if (bitmap->storage.file) {
38019 seq_printf(seq, ", file: ");
38020- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
38021+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
38022 }
38023
38024 seq_printf(seq, "\n");
38025diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
38026index eee353d..74504c4 100644
38027--- a/drivers/md/dm-ioctl.c
38028+++ b/drivers/md/dm-ioctl.c
38029@@ -1632,7 +1632,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
38030 cmd == DM_LIST_VERSIONS_CMD)
38031 return 0;
38032
38033- if ((cmd == DM_DEV_CREATE_CMD)) {
38034+ if (cmd == DM_DEV_CREATE_CMD) {
38035 if (!*param->name) {
38036 DMWARN("name not supplied when creating device");
38037 return -EINVAL;
38038diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
38039index 7f24190..0e18099 100644
38040--- a/drivers/md/dm-raid1.c
38041+++ b/drivers/md/dm-raid1.c
38042@@ -40,7 +40,7 @@ enum dm_raid1_error {
38043
38044 struct mirror {
38045 struct mirror_set *ms;
38046- atomic_t error_count;
38047+ atomic_unchecked_t error_count;
38048 unsigned long error_type;
38049 struct dm_dev *dev;
38050 sector_t offset;
38051@@ -183,7 +183,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
38052 struct mirror *m;
38053
38054 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
38055- if (!atomic_read(&m->error_count))
38056+ if (!atomic_read_unchecked(&m->error_count))
38057 return m;
38058
38059 return NULL;
38060@@ -215,7 +215,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
38061 * simple way to tell if a device has encountered
38062 * errors.
38063 */
38064- atomic_inc(&m->error_count);
38065+ atomic_inc_unchecked(&m->error_count);
38066
38067 if (test_and_set_bit(error_type, &m->error_type))
38068 return;
38069@@ -406,7 +406,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
38070 struct mirror *m = get_default_mirror(ms);
38071
38072 do {
38073- if (likely(!atomic_read(&m->error_count)))
38074+ if (likely(!atomic_read_unchecked(&m->error_count)))
38075 return m;
38076
38077 if (m-- == ms->mirror)
38078@@ -420,7 +420,7 @@ static int default_ok(struct mirror *m)
38079 {
38080 struct mirror *default_mirror = get_default_mirror(m->ms);
38081
38082- return !atomic_read(&default_mirror->error_count);
38083+ return !atomic_read_unchecked(&default_mirror->error_count);
38084 }
38085
38086 static int mirror_available(struct mirror_set *ms, struct bio *bio)
38087@@ -557,7 +557,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
38088 */
38089 if (likely(region_in_sync(ms, region, 1)))
38090 m = choose_mirror(ms, bio->bi_sector);
38091- else if (m && atomic_read(&m->error_count))
38092+ else if (m && atomic_read_unchecked(&m->error_count))
38093 m = NULL;
38094
38095 if (likely(m))
38096@@ -924,7 +924,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
38097 }
38098
38099 ms->mirror[mirror].ms = ms;
38100- atomic_set(&(ms->mirror[mirror].error_count), 0);
38101+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
38102 ms->mirror[mirror].error_type = 0;
38103 ms->mirror[mirror].offset = offset;
38104
38105@@ -1337,7 +1337,7 @@ static void mirror_resume(struct dm_target *ti)
38106 */
38107 static char device_status_char(struct mirror *m)
38108 {
38109- if (!atomic_read(&(m->error_count)))
38110+ if (!atomic_read_unchecked(&(m->error_count)))
38111 return 'A';
38112
38113 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
38114diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
38115index aaecefa..23b3026 100644
38116--- a/drivers/md/dm-stripe.c
38117+++ b/drivers/md/dm-stripe.c
38118@@ -20,7 +20,7 @@ struct stripe {
38119 struct dm_dev *dev;
38120 sector_t physical_start;
38121
38122- atomic_t error_count;
38123+ atomic_unchecked_t error_count;
38124 };
38125
38126 struct stripe_c {
38127@@ -184,7 +184,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
38128 kfree(sc);
38129 return r;
38130 }
38131- atomic_set(&(sc->stripe[i].error_count), 0);
38132+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
38133 }
38134
38135 ti->private = sc;
38136@@ -325,7 +325,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
38137 DMEMIT("%d ", sc->stripes);
38138 for (i = 0; i < sc->stripes; i++) {
38139 DMEMIT("%s ", sc->stripe[i].dev->name);
38140- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
38141+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
38142 'D' : 'A';
38143 }
38144 buffer[i] = '\0';
38145@@ -370,8 +370,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
38146 */
38147 for (i = 0; i < sc->stripes; i++)
38148 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
38149- atomic_inc(&(sc->stripe[i].error_count));
38150- if (atomic_read(&(sc->stripe[i].error_count)) <
38151+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
38152+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
38153 DM_IO_ERROR_THRESHOLD)
38154 schedule_work(&sc->trigger_event);
38155 }
38156diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
38157index daf25d0..d74f49f 100644
38158--- a/drivers/md/dm-table.c
38159+++ b/drivers/md/dm-table.c
38160@@ -390,7 +390,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
38161 if (!dev_size)
38162 return 0;
38163
38164- if ((start >= dev_size) || (start + len > dev_size)) {
38165+ if ((start >= dev_size) || (len > dev_size - start)) {
38166 DMWARN("%s: %s too small for target: "
38167 "start=%llu, len=%llu, dev_size=%llu",
38168 dm_device_name(ti->table->md), bdevname(bdev, b),
38169diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
38170index 4d6e853..a234157 100644
38171--- a/drivers/md/dm-thin-metadata.c
38172+++ b/drivers/md/dm-thin-metadata.c
38173@@ -397,7 +397,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
38174 {
38175 pmd->info.tm = pmd->tm;
38176 pmd->info.levels = 2;
38177- pmd->info.value_type.context = pmd->data_sm;
38178+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
38179 pmd->info.value_type.size = sizeof(__le64);
38180 pmd->info.value_type.inc = data_block_inc;
38181 pmd->info.value_type.dec = data_block_dec;
38182@@ -416,7 +416,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
38183
38184 pmd->bl_info.tm = pmd->tm;
38185 pmd->bl_info.levels = 1;
38186- pmd->bl_info.value_type.context = pmd->data_sm;
38187+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
38188 pmd->bl_info.value_type.size = sizeof(__le64);
38189 pmd->bl_info.value_type.inc = data_block_inc;
38190 pmd->bl_info.value_type.dec = data_block_dec;
38191diff --git a/drivers/md/dm.c b/drivers/md/dm.c
38192index 0d8f086..f5a91d5 100644
38193--- a/drivers/md/dm.c
38194+++ b/drivers/md/dm.c
38195@@ -170,9 +170,9 @@ struct mapped_device {
38196 /*
38197 * Event handling.
38198 */
38199- atomic_t event_nr;
38200+ atomic_unchecked_t event_nr;
38201 wait_queue_head_t eventq;
38202- atomic_t uevent_seq;
38203+ atomic_unchecked_t uevent_seq;
38204 struct list_head uevent_list;
38205 spinlock_t uevent_lock; /* Protect access to uevent_list */
38206
38207@@ -1872,8 +1872,8 @@ static struct mapped_device *alloc_dev(int minor)
38208 rwlock_init(&md->map_lock);
38209 atomic_set(&md->holders, 1);
38210 atomic_set(&md->open_count, 0);
38211- atomic_set(&md->event_nr, 0);
38212- atomic_set(&md->uevent_seq, 0);
38213+ atomic_set_unchecked(&md->event_nr, 0);
38214+ atomic_set_unchecked(&md->uevent_seq, 0);
38215 INIT_LIST_HEAD(&md->uevent_list);
38216 spin_lock_init(&md->uevent_lock);
38217
38218@@ -2026,7 +2026,7 @@ static void event_callback(void *context)
38219
38220 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
38221
38222- atomic_inc(&md->event_nr);
38223+ atomic_inc_unchecked(&md->event_nr);
38224 wake_up(&md->eventq);
38225 }
38226
38227@@ -2683,18 +2683,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
38228
38229 uint32_t dm_next_uevent_seq(struct mapped_device *md)
38230 {
38231- return atomic_add_return(1, &md->uevent_seq);
38232+ return atomic_add_return_unchecked(1, &md->uevent_seq);
38233 }
38234
38235 uint32_t dm_get_event_nr(struct mapped_device *md)
38236 {
38237- return atomic_read(&md->event_nr);
38238+ return atomic_read_unchecked(&md->event_nr);
38239 }
38240
38241 int dm_wait_event(struct mapped_device *md, int event_nr)
38242 {
38243 return wait_event_interruptible(md->eventq,
38244- (event_nr != atomic_read(&md->event_nr)));
38245+ (event_nr != atomic_read_unchecked(&md->event_nr)));
38246 }
38247
38248 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
38249diff --git a/drivers/md/md.c b/drivers/md/md.c
38250index f363135..9b38815 100644
38251--- a/drivers/md/md.c
38252+++ b/drivers/md/md.c
38253@@ -240,10 +240,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
38254 * start build, activate spare
38255 */
38256 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
38257-static atomic_t md_event_count;
38258+static atomic_unchecked_t md_event_count;
38259 void md_new_event(struct mddev *mddev)
38260 {
38261- atomic_inc(&md_event_count);
38262+ atomic_inc_unchecked(&md_event_count);
38263 wake_up(&md_event_waiters);
38264 }
38265 EXPORT_SYMBOL_GPL(md_new_event);
38266@@ -253,7 +253,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
38267 */
38268 static void md_new_event_inintr(struct mddev *mddev)
38269 {
38270- atomic_inc(&md_event_count);
38271+ atomic_inc_unchecked(&md_event_count);
38272 wake_up(&md_event_waiters);
38273 }
38274
38275@@ -1507,7 +1507,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
38276 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
38277 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
38278 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
38279- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
38280+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
38281
38282 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
38283 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
38284@@ -1751,7 +1751,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
38285 else
38286 sb->resync_offset = cpu_to_le64(0);
38287
38288- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
38289+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
38290
38291 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
38292 sb->size = cpu_to_le64(mddev->dev_sectors);
38293@@ -2751,7 +2751,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
38294 static ssize_t
38295 errors_show(struct md_rdev *rdev, char *page)
38296 {
38297- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
38298+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
38299 }
38300
38301 static ssize_t
38302@@ -2760,7 +2760,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
38303 char *e;
38304 unsigned long n = simple_strtoul(buf, &e, 10);
38305 if (*buf && (*e == 0 || *e == '\n')) {
38306- atomic_set(&rdev->corrected_errors, n);
38307+ atomic_set_unchecked(&rdev->corrected_errors, n);
38308 return len;
38309 }
38310 return -EINVAL;
38311@@ -3210,8 +3210,8 @@ int md_rdev_init(struct md_rdev *rdev)
38312 rdev->sb_loaded = 0;
38313 rdev->bb_page = NULL;
38314 atomic_set(&rdev->nr_pending, 0);
38315- atomic_set(&rdev->read_errors, 0);
38316- atomic_set(&rdev->corrected_errors, 0);
38317+ atomic_set_unchecked(&rdev->read_errors, 0);
38318+ atomic_set_unchecked(&rdev->corrected_errors, 0);
38319
38320 INIT_LIST_HEAD(&rdev->same_set);
38321 init_waitqueue_head(&rdev->blocked_wait);
38322@@ -6987,7 +6987,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
38323
38324 spin_unlock(&pers_lock);
38325 seq_printf(seq, "\n");
38326- seq->poll_event = atomic_read(&md_event_count);
38327+ seq->poll_event = atomic_read_unchecked(&md_event_count);
38328 return 0;
38329 }
38330 if (v == (void*)2) {
38331@@ -7090,7 +7090,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
38332 return error;
38333
38334 seq = file->private_data;
38335- seq->poll_event = atomic_read(&md_event_count);
38336+ seq->poll_event = atomic_read_unchecked(&md_event_count);
38337 return error;
38338 }
38339
38340@@ -7104,7 +7104,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
38341 /* always allow read */
38342 mask = POLLIN | POLLRDNORM;
38343
38344- if (seq->poll_event != atomic_read(&md_event_count))
38345+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
38346 mask |= POLLERR | POLLPRI;
38347 return mask;
38348 }
38349@@ -7148,7 +7148,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
38350 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
38351 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
38352 (int)part_stat_read(&disk->part0, sectors[1]) -
38353- atomic_read(&disk->sync_io);
38354+ atomic_read_unchecked(&disk->sync_io);
38355 /* sync IO will cause sync_io to increase before the disk_stats
38356 * as sync_io is counted when a request starts, and
38357 * disk_stats is counted when it completes.
38358diff --git a/drivers/md/md.h b/drivers/md/md.h
38359index eca59c3..7c42285 100644
38360--- a/drivers/md/md.h
38361+++ b/drivers/md/md.h
38362@@ -94,13 +94,13 @@ struct md_rdev {
38363 * only maintained for arrays that
38364 * support hot removal
38365 */
38366- atomic_t read_errors; /* number of consecutive read errors that
38367+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
38368 * we have tried to ignore.
38369 */
38370 struct timespec last_read_error; /* monotonic time since our
38371 * last read error
38372 */
38373- atomic_t corrected_errors; /* number of corrected read errors,
38374+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
38375 * for reporting to userspace and storing
38376 * in superblock.
38377 */
38378@@ -434,7 +434,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
38379
38380 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
38381 {
38382- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
38383+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
38384 }
38385
38386 struct md_personality
38387diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
38388index 1cbfc6b..56e1dbb 100644
38389--- a/drivers/md/persistent-data/dm-space-map.h
38390+++ b/drivers/md/persistent-data/dm-space-map.h
38391@@ -60,6 +60,7 @@ struct dm_space_map {
38392 int (*root_size)(struct dm_space_map *sm, size_t *result);
38393 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
38394 };
38395+typedef struct dm_space_map __no_const dm_space_map_no_const;
38396
38397 /*----------------------------------------------------------------*/
38398
38399diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
38400index 75b1f89..00ba344 100644
38401--- a/drivers/md/raid1.c
38402+++ b/drivers/md/raid1.c
38403@@ -1819,7 +1819,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
38404 if (r1_sync_page_io(rdev, sect, s,
38405 bio->bi_io_vec[idx].bv_page,
38406 READ) != 0)
38407- atomic_add(s, &rdev->corrected_errors);
38408+ atomic_add_unchecked(s, &rdev->corrected_errors);
38409 }
38410 sectors -= s;
38411 sect += s;
38412@@ -2041,7 +2041,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
38413 test_bit(In_sync, &rdev->flags)) {
38414 if (r1_sync_page_io(rdev, sect, s,
38415 conf->tmppage, READ)) {
38416- atomic_add(s, &rdev->corrected_errors);
38417+ atomic_add_unchecked(s, &rdev->corrected_errors);
38418 printk(KERN_INFO
38419 "md/raid1:%s: read error corrected "
38420 "(%d sectors at %llu on %s)\n",
38421diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
38422index 8d925dc..11d674f 100644
38423--- a/drivers/md/raid10.c
38424+++ b/drivers/md/raid10.c
38425@@ -1878,7 +1878,7 @@ static void end_sync_read(struct bio *bio, int error)
38426 /* The write handler will notice the lack of
38427 * R10BIO_Uptodate and record any errors etc
38428 */
38429- atomic_add(r10_bio->sectors,
38430+ atomic_add_unchecked(r10_bio->sectors,
38431 &conf->mirrors[d].rdev->corrected_errors);
38432
38433 /* for reconstruct, we always reschedule after a read.
38434@@ -2227,7 +2227,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
38435 {
38436 struct timespec cur_time_mon;
38437 unsigned long hours_since_last;
38438- unsigned int read_errors = atomic_read(&rdev->read_errors);
38439+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
38440
38441 ktime_get_ts(&cur_time_mon);
38442
38443@@ -2249,9 +2249,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
38444 * overflowing the shift of read_errors by hours_since_last.
38445 */
38446 if (hours_since_last >= 8 * sizeof(read_errors))
38447- atomic_set(&rdev->read_errors, 0);
38448+ atomic_set_unchecked(&rdev->read_errors, 0);
38449 else
38450- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
38451+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
38452 }
38453
38454 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
38455@@ -2305,8 +2305,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
38456 return;
38457
38458 check_decay_read_errors(mddev, rdev);
38459- atomic_inc(&rdev->read_errors);
38460- if (atomic_read(&rdev->read_errors) > max_read_errors) {
38461+ atomic_inc_unchecked(&rdev->read_errors);
38462+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
38463 char b[BDEVNAME_SIZE];
38464 bdevname(rdev->bdev, b);
38465
38466@@ -2314,7 +2314,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
38467 "md/raid10:%s: %s: Raid device exceeded "
38468 "read_error threshold [cur %d:max %d]\n",
38469 mdname(mddev), b,
38470- atomic_read(&rdev->read_errors), max_read_errors);
38471+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
38472 printk(KERN_NOTICE
38473 "md/raid10:%s: %s: Failing raid device\n",
38474 mdname(mddev), b);
38475@@ -2469,7 +2469,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
38476 sect +
38477 choose_data_offset(r10_bio, rdev)),
38478 bdevname(rdev->bdev, b));
38479- atomic_add(s, &rdev->corrected_errors);
38480+ atomic_add_unchecked(s, &rdev->corrected_errors);
38481 }
38482
38483 rdev_dec_pending(rdev, mddev);
38484diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
38485index 94ce78e..df99e24 100644
38486--- a/drivers/md/raid5.c
38487+++ b/drivers/md/raid5.c
38488@@ -1800,21 +1800,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
38489 mdname(conf->mddev), STRIPE_SECTORS,
38490 (unsigned long long)s,
38491 bdevname(rdev->bdev, b));
38492- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
38493+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
38494 clear_bit(R5_ReadError, &sh->dev[i].flags);
38495 clear_bit(R5_ReWrite, &sh->dev[i].flags);
38496 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
38497 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
38498
38499- if (atomic_read(&rdev->read_errors))
38500- atomic_set(&rdev->read_errors, 0);
38501+ if (atomic_read_unchecked(&rdev->read_errors))
38502+ atomic_set_unchecked(&rdev->read_errors, 0);
38503 } else {
38504 const char *bdn = bdevname(rdev->bdev, b);
38505 int retry = 0;
38506 int set_bad = 0;
38507
38508 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
38509- atomic_inc(&rdev->read_errors);
38510+ atomic_inc_unchecked(&rdev->read_errors);
38511 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
38512 printk_ratelimited(
38513 KERN_WARNING
38514@@ -1842,7 +1842,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
38515 mdname(conf->mddev),
38516 (unsigned long long)s,
38517 bdn);
38518- } else if (atomic_read(&rdev->read_errors)
38519+ } else if (atomic_read_unchecked(&rdev->read_errors)
38520 > conf->max_nr_stripes)
38521 printk(KERN_WARNING
38522 "md/raid:%s: Too many read errors, failing device %s.\n",
38523diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
38524index d33101a..6b13069 100644
38525--- a/drivers/media/dvb-core/dvbdev.c
38526+++ b/drivers/media/dvb-core/dvbdev.c
38527@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
38528 const struct dvb_device *template, void *priv, int type)
38529 {
38530 struct dvb_device *dvbdev;
38531- struct file_operations *dvbdevfops;
38532+ file_operations_no_const *dvbdevfops;
38533 struct device *clsdev;
38534 int minor;
38535 int id;
38536diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
38537index 404f63a..4796533 100644
38538--- a/drivers/media/dvb-frontends/dib3000.h
38539+++ b/drivers/media/dvb-frontends/dib3000.h
38540@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
38541 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
38542 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
38543 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
38544-};
38545+} __no_const;
38546
38547 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
38548 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
38549diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
38550index bc78354..42c9459 100644
38551--- a/drivers/media/pci/cx88/cx88-video.c
38552+++ b/drivers/media/pci/cx88/cx88-video.c
38553@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
38554
38555 /* ------------------------------------------------------------------ */
38556
38557-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
38558-static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
38559-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
38560+static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
38561+static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
38562+static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
38563
38564 module_param_array(video_nr, int, NULL, 0444);
38565 module_param_array(vbi_nr, int, NULL, 0444);
38566diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
38567index 8e9a668..78d6310 100644
38568--- a/drivers/media/platform/omap/omap_vout.c
38569+++ b/drivers/media/platform/omap/omap_vout.c
38570@@ -63,7 +63,6 @@ enum omap_vout_channels {
38571 OMAP_VIDEO2,
38572 };
38573
38574-static struct videobuf_queue_ops video_vbq_ops;
38575 /* Variables configurable through module params*/
38576 static u32 video1_numbuffers = 3;
38577 static u32 video2_numbuffers = 3;
38578@@ -1012,6 +1011,12 @@ static int omap_vout_open(struct file *file)
38579 {
38580 struct videobuf_queue *q;
38581 struct omap_vout_device *vout = NULL;
38582+ static struct videobuf_queue_ops video_vbq_ops = {
38583+ .buf_setup = omap_vout_buffer_setup,
38584+ .buf_prepare = omap_vout_buffer_prepare,
38585+ .buf_release = omap_vout_buffer_release,
38586+ .buf_queue = omap_vout_buffer_queue,
38587+ };
38588
38589 vout = video_drvdata(file);
38590 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
38591@@ -1029,10 +1034,6 @@ static int omap_vout_open(struct file *file)
38592 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
38593
38594 q = &vout->vbq;
38595- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
38596- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
38597- video_vbq_ops.buf_release = omap_vout_buffer_release;
38598- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
38599 spin_lock_init(&vout->vbq_lock);
38600
38601 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
38602diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
38603index b671e20..34088b7 100644
38604--- a/drivers/media/platform/s5p-tv/mixer.h
38605+++ b/drivers/media/platform/s5p-tv/mixer.h
38606@@ -155,7 +155,7 @@ struct mxr_layer {
38607 /** layer index (unique identifier) */
38608 int idx;
38609 /** callbacks for layer methods */
38610- struct mxr_layer_ops ops;
38611+ struct mxr_layer_ops *ops;
38612 /** format array */
38613 const struct mxr_format **fmt_array;
38614 /** size of format array */
38615diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
38616index b93a21f..2535195 100644
38617--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
38618+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
38619@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
38620 {
38621 struct mxr_layer *layer;
38622 int ret;
38623- struct mxr_layer_ops ops = {
38624+ static struct mxr_layer_ops ops = {
38625 .release = mxr_graph_layer_release,
38626 .buffer_set = mxr_graph_buffer_set,
38627 .stream_set = mxr_graph_stream_set,
38628diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
38629index 3b1670a..595c939 100644
38630--- a/drivers/media/platform/s5p-tv/mixer_reg.c
38631+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
38632@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
38633 layer->update_buf = next;
38634 }
38635
38636- layer->ops.buffer_set(layer, layer->update_buf);
38637+ layer->ops->buffer_set(layer, layer->update_buf);
38638
38639 if (done && done != layer->shadow_buf)
38640 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
38641diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
38642index 1f3b743..e839271 100644
38643--- a/drivers/media/platform/s5p-tv/mixer_video.c
38644+++ b/drivers/media/platform/s5p-tv/mixer_video.c
38645@@ -208,7 +208,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
38646 layer->geo.src.height = layer->geo.src.full_height;
38647
38648 mxr_geometry_dump(mdev, &layer->geo);
38649- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
38650+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
38651 mxr_geometry_dump(mdev, &layer->geo);
38652 }
38653
38654@@ -226,7 +226,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
38655 layer->geo.dst.full_width = mbus_fmt.width;
38656 layer->geo.dst.full_height = mbus_fmt.height;
38657 layer->geo.dst.field = mbus_fmt.field;
38658- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
38659+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
38660
38661 mxr_geometry_dump(mdev, &layer->geo);
38662 }
38663@@ -332,7 +332,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
38664 /* set source size to highest accepted value */
38665 geo->src.full_width = max(geo->dst.full_width, pix->width);
38666 geo->src.full_height = max(geo->dst.full_height, pix->height);
38667- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
38668+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
38669 mxr_geometry_dump(mdev, &layer->geo);
38670 /* set cropping to total visible screen */
38671 geo->src.width = pix->width;
38672@@ -340,12 +340,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
38673 geo->src.x_offset = 0;
38674 geo->src.y_offset = 0;
38675 /* assure consistency of geometry */
38676- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
38677+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
38678 mxr_geometry_dump(mdev, &layer->geo);
38679 /* set full size to lowest possible value */
38680 geo->src.full_width = 0;
38681 geo->src.full_height = 0;
38682- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
38683+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
38684 mxr_geometry_dump(mdev, &layer->geo);
38685
38686 /* returning results */
38687@@ -472,7 +472,7 @@ static int mxr_s_selection(struct file *file, void *fh,
38688 target->width = s->r.width;
38689 target->height = s->r.height;
38690
38691- layer->ops.fix_geometry(layer, stage, s->flags);
38692+ layer->ops->fix_geometry(layer, stage, s->flags);
38693
38694 /* retrieve update selection rectangle */
38695 res.left = target->x_offset;
38696@@ -937,13 +937,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
38697 mxr_output_get(mdev);
38698
38699 mxr_layer_update_output(layer);
38700- layer->ops.format_set(layer);
38701+ layer->ops->format_set(layer);
38702 /* enabling layer in hardware */
38703 spin_lock_irqsave(&layer->enq_slock, flags);
38704 layer->state = MXR_LAYER_STREAMING;
38705 spin_unlock_irqrestore(&layer->enq_slock, flags);
38706
38707- layer->ops.stream_set(layer, MXR_ENABLE);
38708+ layer->ops->stream_set(layer, MXR_ENABLE);
38709 mxr_streamer_get(mdev);
38710
38711 return 0;
38712@@ -1013,7 +1013,7 @@ static int stop_streaming(struct vb2_queue *vq)
38713 spin_unlock_irqrestore(&layer->enq_slock, flags);
38714
38715 /* disabling layer in hardware */
38716- layer->ops.stream_set(layer, MXR_DISABLE);
38717+ layer->ops->stream_set(layer, MXR_DISABLE);
38718 /* remove one streamer */
38719 mxr_streamer_put(mdev);
38720 /* allow changes in output configuration */
38721@@ -1052,8 +1052,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
38722
38723 void mxr_layer_release(struct mxr_layer *layer)
38724 {
38725- if (layer->ops.release)
38726- layer->ops.release(layer);
38727+ if (layer->ops->release)
38728+ layer->ops->release(layer);
38729 }
38730
38731 void mxr_base_layer_release(struct mxr_layer *layer)
38732@@ -1079,7 +1079,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
38733
38734 layer->mdev = mdev;
38735 layer->idx = idx;
38736- layer->ops = *ops;
38737+ layer->ops = ops;
38738
38739 spin_lock_init(&layer->enq_slock);
38740 INIT_LIST_HEAD(&layer->enq_list);
38741diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
38742index 3d13a63..da31bf1 100644
38743--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
38744+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
38745@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
38746 {
38747 struct mxr_layer *layer;
38748 int ret;
38749- struct mxr_layer_ops ops = {
38750+ static struct mxr_layer_ops ops = {
38751 .release = mxr_vp_layer_release,
38752 .buffer_set = mxr_vp_buffer_set,
38753 .stream_set = mxr_vp_stream_set,
38754diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
38755index 643d80a..56bb96b 100644
38756--- a/drivers/media/radio/radio-cadet.c
38757+++ b/drivers/media/radio/radio-cadet.c
38758@@ -302,6 +302,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
38759 unsigned char readbuf[RDS_BUFFER];
38760 int i = 0;
38761
38762+ if (count > RDS_BUFFER)
38763+ return -EFAULT;
38764 mutex_lock(&dev->lock);
38765 if (dev->rdsstat == 0)
38766 cadet_start_rds(dev);
38767@@ -317,7 +319,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
38768 while (i < count && dev->rdsin != dev->rdsout)
38769 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
38770
38771- if (i && copy_to_user(data, readbuf, i))
38772+ if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
38773 i = -EFAULT;
38774 unlock:
38775 mutex_unlock(&dev->lock);
38776diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
38777index 3940bb0..fb3952a 100644
38778--- a/drivers/media/usb/dvb-usb/cxusb.c
38779+++ b/drivers/media/usb/dvb-usb/cxusb.c
38780@@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
38781
38782 struct dib0700_adapter_state {
38783 int (*set_param_save) (struct dvb_frontend *);
38784-};
38785+} __no_const;
38786
38787 static int dib7070_set_param_override(struct dvb_frontend *fe)
38788 {
38789diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
38790index 9382895..ac8093c 100644
38791--- a/drivers/media/usb/dvb-usb/dw2102.c
38792+++ b/drivers/media/usb/dvb-usb/dw2102.c
38793@@ -95,7 +95,7 @@ struct su3000_state {
38794
38795 struct s6x0_state {
38796 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
38797-};
38798+} __no_const;
38799
38800 /* debug */
38801 static int dvb_usb_dw2102_debug;
38802diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
38803index aa6e7c7..4cd8061 100644
38804--- a/drivers/media/v4l2-core/v4l2-ioctl.c
38805+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
38806@@ -1923,7 +1923,8 @@ struct v4l2_ioctl_info {
38807 struct file *file, void *fh, void *p);
38808 } u;
38809 void (*debug)(const void *arg, bool write_only);
38810-};
38811+} __do_const;
38812+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
38813
38814 /* This control needs a priority check */
38815 #define INFO_FL_PRIO (1 << 0)
38816@@ -2108,7 +2109,7 @@ static long __video_do_ioctl(struct file *file,
38817 struct video_device *vfd = video_devdata(file);
38818 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
38819 bool write_only = false;
38820- struct v4l2_ioctl_info default_info;
38821+ v4l2_ioctl_info_no_const default_info;
38822 const struct v4l2_ioctl_info *info;
38823 void *fh = file->private_data;
38824 struct v4l2_fh *vfh = NULL;
38825diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c
38826index 29b2172..a7c5b31 100644
38827--- a/drivers/memstick/host/r592.c
38828+++ b/drivers/memstick/host/r592.c
38829@@ -454,7 +454,7 @@ static int r592_transfer_fifo_pio(struct r592_device *dev)
38830 /* Executes one TPC (data is read/written from small or large fifo) */
38831 static void r592_execute_tpc(struct r592_device *dev)
38832 {
38833- bool is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS;
38834+ bool is_write;
38835 int len, error;
38836 u32 status, reg;
38837
38838@@ -463,6 +463,7 @@ static void r592_execute_tpc(struct r592_device *dev)
38839 return;
38840 }
38841
38842+ is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS;
38843 len = dev->req->long_data ?
38844 dev->req->sg.length : dev->req->data_len;
38845
38846diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
38847index fb69baa..3aeea2e 100644
38848--- a/drivers/message/fusion/mptbase.c
38849+++ b/drivers/message/fusion/mptbase.c
38850@@ -6755,8 +6755,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
38851 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
38852 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
38853
38854+#ifdef CONFIG_GRKERNSEC_HIDESYM
38855+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
38856+#else
38857 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
38858 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
38859+#endif
38860+
38861 /*
38862 * Rounding UP to nearest 4-kB boundary here...
38863 */
38864@@ -6769,7 +6774,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
38865 ioc->facts.GlobalCredits);
38866
38867 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
38868+#ifdef CONFIG_GRKERNSEC_HIDESYM
38869+ NULL, NULL);
38870+#else
38871 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
38872+#endif
38873 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
38874 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
38875 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
38876diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
38877index fa43c39..daeb158 100644
38878--- a/drivers/message/fusion/mptsas.c
38879+++ b/drivers/message/fusion/mptsas.c
38880@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
38881 return 0;
38882 }
38883
38884+static inline void
38885+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
38886+{
38887+ if (phy_info->port_details) {
38888+ phy_info->port_details->rphy = rphy;
38889+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
38890+ ioc->name, rphy));
38891+ }
38892+
38893+ if (rphy) {
38894+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
38895+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
38896+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
38897+ ioc->name, rphy, rphy->dev.release));
38898+ }
38899+}
38900+
38901 /* no mutex */
38902 static void
38903 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
38904@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
38905 return NULL;
38906 }
38907
38908-static inline void
38909-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
38910-{
38911- if (phy_info->port_details) {
38912- phy_info->port_details->rphy = rphy;
38913- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
38914- ioc->name, rphy));
38915- }
38916-
38917- if (rphy) {
38918- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
38919- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
38920- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
38921- ioc->name, rphy, rphy->dev.release));
38922- }
38923-}
38924-
38925 static inline struct sas_port *
38926 mptsas_get_port(struct mptsas_phyinfo *phy_info)
38927 {
38928diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
38929index 164afa7..b6b2e74 100644
38930--- a/drivers/message/fusion/mptscsih.c
38931+++ b/drivers/message/fusion/mptscsih.c
38932@@ -1271,15 +1271,16 @@ mptscsih_info(struct Scsi_Host *SChost)
38933
38934 h = shost_priv(SChost);
38935
38936- if (h) {
38937- if (h->info_kbuf == NULL)
38938- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
38939- return h->info_kbuf;
38940- h->info_kbuf[0] = '\0';
38941+ if (!h)
38942+ return NULL;
38943
38944- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
38945- h->info_kbuf[size-1] = '\0';
38946- }
38947+ if (h->info_kbuf == NULL)
38948+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
38949+ return h->info_kbuf;
38950+ h->info_kbuf[0] = '\0';
38951+
38952+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
38953+ h->info_kbuf[size-1] = '\0';
38954
38955 return h->info_kbuf;
38956 }
38957diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
38958index 8001aa6..b137580 100644
38959--- a/drivers/message/i2o/i2o_proc.c
38960+++ b/drivers/message/i2o/i2o_proc.c
38961@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
38962 "Array Controller Device"
38963 };
38964
38965-static char *chtostr(char *tmp, u8 *chars, int n)
38966-{
38967- tmp[0] = 0;
38968- return strncat(tmp, (char *)chars, n);
38969-}
38970-
38971 static int i2o_report_query_status(struct seq_file *seq, int block_status,
38972 char *group)
38973 {
38974@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
38975 } *result;
38976
38977 i2o_exec_execute_ddm_table ddm_table;
38978- char tmp[28 + 1];
38979
38980 result = kmalloc(sizeof(*result), GFP_KERNEL);
38981 if (!result)
38982@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
38983
38984 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
38985 seq_printf(seq, "%-#8x", ddm_table.module_id);
38986- seq_printf(seq, "%-29s",
38987- chtostr(tmp, ddm_table.module_name_version, 28));
38988+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
38989 seq_printf(seq, "%9d ", ddm_table.data_size);
38990 seq_printf(seq, "%8d", ddm_table.code_size);
38991
38992@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
38993
38994 i2o_driver_result_table *result;
38995 i2o_driver_store_table *dst;
38996- char tmp[28 + 1];
38997
38998 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
38999 if (result == NULL)
39000@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
39001
39002 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
39003 seq_printf(seq, "%-#8x", dst->module_id);
39004- seq_printf(seq, "%-29s",
39005- chtostr(tmp, dst->module_name_version, 28));
39006- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
39007+ seq_printf(seq, "%-.28s", dst->module_name_version);
39008+ seq_printf(seq, "%-.8s", dst->date);
39009 seq_printf(seq, "%8d ", dst->module_size);
39010 seq_printf(seq, "%8d ", dst->mpb_size);
39011 seq_printf(seq, "0x%04x", dst->module_flags);
39012@@ -1250,7 +1240,6 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
39013 // == (allow) 512d bytes (max)
39014 static u16 *work16 = (u16 *) work32;
39015 int token;
39016- char tmp[16 + 1];
39017
39018 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
39019
39020@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
39021 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
39022 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
39023 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
39024- seq_printf(seq, "Vendor info : %s\n",
39025- chtostr(tmp, (u8 *) (work32 + 2), 16));
39026- seq_printf(seq, "Product info : %s\n",
39027- chtostr(tmp, (u8 *) (work32 + 6), 16));
39028- seq_printf(seq, "Description : %s\n",
39029- chtostr(tmp, (u8 *) (work32 + 10), 16));
39030- seq_printf(seq, "Product rev. : %s\n",
39031- chtostr(tmp, (u8 *) (work32 + 14), 8));
39032+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
39033+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
39034+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
39035+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
39036
39037 seq_printf(seq, "Serial number : ");
39038 print_serial_number(seq, (u8 *) (work32 + 16),
39039@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
39040 u8 pad[256]; // allow up to 256 byte (max) serial number
39041 } result;
39042
39043- char tmp[24 + 1];
39044-
39045 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
39046
39047 if (token < 0) {
39048@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
39049 }
39050
39051 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
39052- seq_printf(seq, "Module name : %s\n",
39053- chtostr(tmp, result.module_name, 24));
39054- seq_printf(seq, "Module revision : %s\n",
39055- chtostr(tmp, result.module_rev, 8));
39056+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
39057+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
39058
39059 seq_printf(seq, "Serial number : ");
39060 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
39061@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
39062 u8 instance_number[4];
39063 } result;
39064
39065- char tmp[64 + 1];
39066-
39067 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
39068
39069 if (token < 0) {
39070@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
39071 return 0;
39072 }
39073
39074- seq_printf(seq, "Device name : %s\n",
39075- chtostr(tmp, result.device_name, 64));
39076- seq_printf(seq, "Service name : %s\n",
39077- chtostr(tmp, result.service_name, 64));
39078- seq_printf(seq, "Physical name : %s\n",
39079- chtostr(tmp, result.physical_location, 64));
39080- seq_printf(seq, "Instance number : %s\n",
39081- chtostr(tmp, result.instance_number, 4));
39082+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
39083+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
39084+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
39085+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
39086
39087 return 0;
39088 }
39089diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
39090index a8c08f3..155fe3d 100644
39091--- a/drivers/message/i2o/iop.c
39092+++ b/drivers/message/i2o/iop.c
39093@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
39094
39095 spin_lock_irqsave(&c->context_list_lock, flags);
39096
39097- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
39098- atomic_inc(&c->context_list_counter);
39099+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
39100+ atomic_inc_unchecked(&c->context_list_counter);
39101
39102- entry->context = atomic_read(&c->context_list_counter);
39103+ entry->context = atomic_read_unchecked(&c->context_list_counter);
39104
39105 list_add(&entry->list, &c->context_list);
39106
39107@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
39108
39109 #if BITS_PER_LONG == 64
39110 spin_lock_init(&c->context_list_lock);
39111- atomic_set(&c->context_list_counter, 0);
39112+ atomic_set_unchecked(&c->context_list_counter, 0);
39113 INIT_LIST_HEAD(&c->context_list);
39114 #endif
39115
39116diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
39117index 45ece11..8efa218 100644
39118--- a/drivers/mfd/janz-cmodio.c
39119+++ b/drivers/mfd/janz-cmodio.c
39120@@ -13,6 +13,7 @@
39121
39122 #include <linux/kernel.h>
39123 #include <linux/module.h>
39124+#include <linux/slab.h>
39125 #include <linux/init.h>
39126 #include <linux/pci.h>
39127 #include <linux/interrupt.h>
39128diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
39129index a5f9888..1c0ed56 100644
39130--- a/drivers/mfd/twl4030-irq.c
39131+++ b/drivers/mfd/twl4030-irq.c
39132@@ -35,6 +35,7 @@
39133 #include <linux/of.h>
39134 #include <linux/irqdomain.h>
39135 #include <linux/i2c/twl.h>
39136+#include <asm/pgtable.h>
39137
39138 #include "twl-core.h"
39139
39140@@ -728,10 +729,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
39141 * Install an irq handler for each of the SIH modules;
39142 * clone dummy irq_chip since PIH can't *do* anything
39143 */
39144- twl4030_irq_chip = dummy_irq_chip;
39145- twl4030_irq_chip.name = "twl4030";
39146+ pax_open_kernel();
39147+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
39148+ *(const char **)&twl4030_irq_chip.name = "twl4030";
39149
39150- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
39151+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
39152+ pax_close_kernel();
39153
39154 for (i = irq_base; i < irq_end; i++) {
39155 irq_set_chip_and_handler(i, &twl4030_irq_chip,
39156diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
39157index 277a8db..0e0b754 100644
39158--- a/drivers/mfd/twl6030-irq.c
39159+++ b/drivers/mfd/twl6030-irq.c
39160@@ -387,10 +387,12 @@ int twl6030_init_irq(struct device *dev, int irq_num)
39161 * install an irq handler for each of the modules;
39162 * clone dummy irq_chip since PIH can't *do* anything
39163 */
39164- twl6030_irq_chip = dummy_irq_chip;
39165- twl6030_irq_chip.name = "twl6030";
39166- twl6030_irq_chip.irq_set_type = NULL;
39167- twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;
39168+ pax_open_kernel();
39169+ memcpy((void *)&twl6030_irq_chip, &dummy_irq_chip, sizeof twl6030_irq_chip);
39170+ *(const char **)&twl6030_irq_chip.name = "twl6030";
39171+ *(void **)&twl6030_irq_chip.irq_set_type = NULL;
39172+ *(void **)&twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;
39173+ pax_close_kernel();
39174
39175 for (i = irq_base; i < irq_end; i++) {
39176 irq_set_chip_and_handler(i, &twl6030_irq_chip,
39177diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
39178index f428d86..274c368 100644
39179--- a/drivers/misc/c2port/core.c
39180+++ b/drivers/misc/c2port/core.c
39181@@ -924,7 +924,9 @@ struct c2port_device *c2port_device_register(char *name,
39182 mutex_init(&c2dev->mutex);
39183
39184 /* Create binary file */
39185- c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
39186+ pax_open_kernel();
39187+ *(size_t *)&c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
39188+ pax_close_kernel();
39189 ret = device_create_bin_file(c2dev->dev, &c2port_bin_attrs);
39190 if (unlikely(ret))
39191 goto error_device_create_bin_file;
39192diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
39193index 3aa9a96..59cf685 100644
39194--- a/drivers/misc/kgdbts.c
39195+++ b/drivers/misc/kgdbts.c
39196@@ -832,7 +832,7 @@ static void run_plant_and_detach_test(int is_early)
39197 char before[BREAK_INSTR_SIZE];
39198 char after[BREAK_INSTR_SIZE];
39199
39200- probe_kernel_read(before, (char *)kgdbts_break_test,
39201+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
39202 BREAK_INSTR_SIZE);
39203 init_simple_test();
39204 ts.tst = plant_and_detach_test;
39205@@ -840,7 +840,7 @@ static void run_plant_and_detach_test(int is_early)
39206 /* Activate test with initial breakpoint */
39207 if (!is_early)
39208 kgdb_breakpoint();
39209- probe_kernel_read(after, (char *)kgdbts_break_test,
39210+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
39211 BREAK_INSTR_SIZE);
39212 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
39213 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
39214diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
39215index 4a87e5c..76bdf5c 100644
39216--- a/drivers/misc/lis3lv02d/lis3lv02d.c
39217+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
39218@@ -498,7 +498,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
39219 * the lid is closed. This leads to interrupts as soon as a little move
39220 * is done.
39221 */
39222- atomic_inc(&lis3->count);
39223+ atomic_inc_unchecked(&lis3->count);
39224
39225 wake_up_interruptible(&lis3->misc_wait);
39226 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
39227@@ -584,7 +584,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
39228 if (lis3->pm_dev)
39229 pm_runtime_get_sync(lis3->pm_dev);
39230
39231- atomic_set(&lis3->count, 0);
39232+ atomic_set_unchecked(&lis3->count, 0);
39233 return 0;
39234 }
39235
39236@@ -617,7 +617,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
39237 add_wait_queue(&lis3->misc_wait, &wait);
39238 while (true) {
39239 set_current_state(TASK_INTERRUPTIBLE);
39240- data = atomic_xchg(&lis3->count, 0);
39241+ data = atomic_xchg_unchecked(&lis3->count, 0);
39242 if (data)
39243 break;
39244
39245@@ -658,7 +658,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
39246 struct lis3lv02d, miscdev);
39247
39248 poll_wait(file, &lis3->misc_wait, wait);
39249- if (atomic_read(&lis3->count))
39250+ if (atomic_read_unchecked(&lis3->count))
39251 return POLLIN | POLLRDNORM;
39252 return 0;
39253 }
39254diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
39255index c439c82..1f20f57 100644
39256--- a/drivers/misc/lis3lv02d/lis3lv02d.h
39257+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
39258@@ -297,7 +297,7 @@ struct lis3lv02d {
39259 struct input_polled_dev *idev; /* input device */
39260 struct platform_device *pdev; /* platform device */
39261 struct regulator_bulk_data regulators[2];
39262- atomic_t count; /* interrupt count after last read */
39263+ atomic_unchecked_t count; /* interrupt count after last read */
39264 union axis_conversion ac; /* hw -> logical axis */
39265 int mapped_btns[3];
39266
39267diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
39268index 2f30bad..c4c13d0 100644
39269--- a/drivers/misc/sgi-gru/gruhandles.c
39270+++ b/drivers/misc/sgi-gru/gruhandles.c
39271@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
39272 unsigned long nsec;
39273
39274 nsec = CLKS2NSEC(clks);
39275- atomic_long_inc(&mcs_op_statistics[op].count);
39276- atomic_long_add(nsec, &mcs_op_statistics[op].total);
39277+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
39278+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
39279 if (mcs_op_statistics[op].max < nsec)
39280 mcs_op_statistics[op].max = nsec;
39281 }
39282diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
39283index 950dbe9..eeef0f8 100644
39284--- a/drivers/misc/sgi-gru/gruprocfs.c
39285+++ b/drivers/misc/sgi-gru/gruprocfs.c
39286@@ -32,9 +32,9 @@
39287
39288 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
39289
39290-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
39291+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
39292 {
39293- unsigned long val = atomic_long_read(v);
39294+ unsigned long val = atomic_long_read_unchecked(v);
39295
39296 seq_printf(s, "%16lu %s\n", val, id);
39297 }
39298@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
39299
39300 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
39301 for (op = 0; op < mcsop_last; op++) {
39302- count = atomic_long_read(&mcs_op_statistics[op].count);
39303- total = atomic_long_read(&mcs_op_statistics[op].total);
39304+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
39305+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
39306 max = mcs_op_statistics[op].max;
39307 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
39308 count ? total / count : 0, max);
39309diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
39310index 5c3ce24..4915ccb 100644
39311--- a/drivers/misc/sgi-gru/grutables.h
39312+++ b/drivers/misc/sgi-gru/grutables.h
39313@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
39314 * GRU statistics.
39315 */
39316 struct gru_stats_s {
39317- atomic_long_t vdata_alloc;
39318- atomic_long_t vdata_free;
39319- atomic_long_t gts_alloc;
39320- atomic_long_t gts_free;
39321- atomic_long_t gms_alloc;
39322- atomic_long_t gms_free;
39323- atomic_long_t gts_double_allocate;
39324- atomic_long_t assign_context;
39325- atomic_long_t assign_context_failed;
39326- atomic_long_t free_context;
39327- atomic_long_t load_user_context;
39328- atomic_long_t load_kernel_context;
39329- atomic_long_t lock_kernel_context;
39330- atomic_long_t unlock_kernel_context;
39331- atomic_long_t steal_user_context;
39332- atomic_long_t steal_kernel_context;
39333- atomic_long_t steal_context_failed;
39334- atomic_long_t nopfn;
39335- atomic_long_t asid_new;
39336- atomic_long_t asid_next;
39337- atomic_long_t asid_wrap;
39338- atomic_long_t asid_reuse;
39339- atomic_long_t intr;
39340- atomic_long_t intr_cbr;
39341- atomic_long_t intr_tfh;
39342- atomic_long_t intr_spurious;
39343- atomic_long_t intr_mm_lock_failed;
39344- atomic_long_t call_os;
39345- atomic_long_t call_os_wait_queue;
39346- atomic_long_t user_flush_tlb;
39347- atomic_long_t user_unload_context;
39348- atomic_long_t user_exception;
39349- atomic_long_t set_context_option;
39350- atomic_long_t check_context_retarget_intr;
39351- atomic_long_t check_context_unload;
39352- atomic_long_t tlb_dropin;
39353- atomic_long_t tlb_preload_page;
39354- atomic_long_t tlb_dropin_fail_no_asid;
39355- atomic_long_t tlb_dropin_fail_upm;
39356- atomic_long_t tlb_dropin_fail_invalid;
39357- atomic_long_t tlb_dropin_fail_range_active;
39358- atomic_long_t tlb_dropin_fail_idle;
39359- atomic_long_t tlb_dropin_fail_fmm;
39360- atomic_long_t tlb_dropin_fail_no_exception;
39361- atomic_long_t tfh_stale_on_fault;
39362- atomic_long_t mmu_invalidate_range;
39363- atomic_long_t mmu_invalidate_page;
39364- atomic_long_t flush_tlb;
39365- atomic_long_t flush_tlb_gru;
39366- atomic_long_t flush_tlb_gru_tgh;
39367- atomic_long_t flush_tlb_gru_zero_asid;
39368+ atomic_long_unchecked_t vdata_alloc;
39369+ atomic_long_unchecked_t vdata_free;
39370+ atomic_long_unchecked_t gts_alloc;
39371+ atomic_long_unchecked_t gts_free;
39372+ atomic_long_unchecked_t gms_alloc;
39373+ atomic_long_unchecked_t gms_free;
39374+ atomic_long_unchecked_t gts_double_allocate;
39375+ atomic_long_unchecked_t assign_context;
39376+ atomic_long_unchecked_t assign_context_failed;
39377+ atomic_long_unchecked_t free_context;
39378+ atomic_long_unchecked_t load_user_context;
39379+ atomic_long_unchecked_t load_kernel_context;
39380+ atomic_long_unchecked_t lock_kernel_context;
39381+ atomic_long_unchecked_t unlock_kernel_context;
39382+ atomic_long_unchecked_t steal_user_context;
39383+ atomic_long_unchecked_t steal_kernel_context;
39384+ atomic_long_unchecked_t steal_context_failed;
39385+ atomic_long_unchecked_t nopfn;
39386+ atomic_long_unchecked_t asid_new;
39387+ atomic_long_unchecked_t asid_next;
39388+ atomic_long_unchecked_t asid_wrap;
39389+ atomic_long_unchecked_t asid_reuse;
39390+ atomic_long_unchecked_t intr;
39391+ atomic_long_unchecked_t intr_cbr;
39392+ atomic_long_unchecked_t intr_tfh;
39393+ atomic_long_unchecked_t intr_spurious;
39394+ atomic_long_unchecked_t intr_mm_lock_failed;
39395+ atomic_long_unchecked_t call_os;
39396+ atomic_long_unchecked_t call_os_wait_queue;
39397+ atomic_long_unchecked_t user_flush_tlb;
39398+ atomic_long_unchecked_t user_unload_context;
39399+ atomic_long_unchecked_t user_exception;
39400+ atomic_long_unchecked_t set_context_option;
39401+ atomic_long_unchecked_t check_context_retarget_intr;
39402+ atomic_long_unchecked_t check_context_unload;
39403+ atomic_long_unchecked_t tlb_dropin;
39404+ atomic_long_unchecked_t tlb_preload_page;
39405+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
39406+ atomic_long_unchecked_t tlb_dropin_fail_upm;
39407+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
39408+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
39409+ atomic_long_unchecked_t tlb_dropin_fail_idle;
39410+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
39411+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
39412+ atomic_long_unchecked_t tfh_stale_on_fault;
39413+ atomic_long_unchecked_t mmu_invalidate_range;
39414+ atomic_long_unchecked_t mmu_invalidate_page;
39415+ atomic_long_unchecked_t flush_tlb;
39416+ atomic_long_unchecked_t flush_tlb_gru;
39417+ atomic_long_unchecked_t flush_tlb_gru_tgh;
39418+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
39419
39420- atomic_long_t copy_gpa;
39421- atomic_long_t read_gpa;
39422+ atomic_long_unchecked_t copy_gpa;
39423+ atomic_long_unchecked_t read_gpa;
39424
39425- atomic_long_t mesq_receive;
39426- atomic_long_t mesq_receive_none;
39427- atomic_long_t mesq_send;
39428- atomic_long_t mesq_send_failed;
39429- atomic_long_t mesq_noop;
39430- atomic_long_t mesq_send_unexpected_error;
39431- atomic_long_t mesq_send_lb_overflow;
39432- atomic_long_t mesq_send_qlimit_reached;
39433- atomic_long_t mesq_send_amo_nacked;
39434- atomic_long_t mesq_send_put_nacked;
39435- atomic_long_t mesq_page_overflow;
39436- atomic_long_t mesq_qf_locked;
39437- atomic_long_t mesq_qf_noop_not_full;
39438- atomic_long_t mesq_qf_switch_head_failed;
39439- atomic_long_t mesq_qf_unexpected_error;
39440- atomic_long_t mesq_noop_unexpected_error;
39441- atomic_long_t mesq_noop_lb_overflow;
39442- atomic_long_t mesq_noop_qlimit_reached;
39443- atomic_long_t mesq_noop_amo_nacked;
39444- atomic_long_t mesq_noop_put_nacked;
39445- atomic_long_t mesq_noop_page_overflow;
39446+ atomic_long_unchecked_t mesq_receive;
39447+ atomic_long_unchecked_t mesq_receive_none;
39448+ atomic_long_unchecked_t mesq_send;
39449+ atomic_long_unchecked_t mesq_send_failed;
39450+ atomic_long_unchecked_t mesq_noop;
39451+ atomic_long_unchecked_t mesq_send_unexpected_error;
39452+ atomic_long_unchecked_t mesq_send_lb_overflow;
39453+ atomic_long_unchecked_t mesq_send_qlimit_reached;
39454+ atomic_long_unchecked_t mesq_send_amo_nacked;
39455+ atomic_long_unchecked_t mesq_send_put_nacked;
39456+ atomic_long_unchecked_t mesq_page_overflow;
39457+ atomic_long_unchecked_t mesq_qf_locked;
39458+ atomic_long_unchecked_t mesq_qf_noop_not_full;
39459+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
39460+ atomic_long_unchecked_t mesq_qf_unexpected_error;
39461+ atomic_long_unchecked_t mesq_noop_unexpected_error;
39462+ atomic_long_unchecked_t mesq_noop_lb_overflow;
39463+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
39464+ atomic_long_unchecked_t mesq_noop_amo_nacked;
39465+ atomic_long_unchecked_t mesq_noop_put_nacked;
39466+ atomic_long_unchecked_t mesq_noop_page_overflow;
39467
39468 };
39469
39470@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
39471 tghop_invalidate, mcsop_last};
39472
39473 struct mcs_op_statistic {
39474- atomic_long_t count;
39475- atomic_long_t total;
39476+ atomic_long_unchecked_t count;
39477+ atomic_long_unchecked_t total;
39478 unsigned long max;
39479 };
39480
39481@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
39482
39483 #define STAT(id) do { \
39484 if (gru_options & OPT_STATS) \
39485- atomic_long_inc(&gru_stats.id); \
39486+ atomic_long_inc_unchecked(&gru_stats.id); \
39487 } while (0)
39488
39489 #ifdef CONFIG_SGI_GRU_DEBUG
39490diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
39491index c862cd4..0d176fe 100644
39492--- a/drivers/misc/sgi-xp/xp.h
39493+++ b/drivers/misc/sgi-xp/xp.h
39494@@ -288,7 +288,7 @@ struct xpc_interface {
39495 xpc_notify_func, void *);
39496 void (*received) (short, int, void *);
39497 enum xp_retval (*partid_to_nasids) (short, void *);
39498-};
39499+} __no_const;
39500
39501 extern struct xpc_interface xpc_interface;
39502
39503diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
39504index b94d5f7..7f494c5 100644
39505--- a/drivers/misc/sgi-xp/xpc.h
39506+++ b/drivers/misc/sgi-xp/xpc.h
39507@@ -835,6 +835,7 @@ struct xpc_arch_operations {
39508 void (*received_payload) (struct xpc_channel *, void *);
39509 void (*notify_senders_of_disconnect) (struct xpc_channel *);
39510 };
39511+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
39512
39513 /* struct xpc_partition act_state values (for XPC HB) */
39514
39515@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
39516 /* found in xpc_main.c */
39517 extern struct device *xpc_part;
39518 extern struct device *xpc_chan;
39519-extern struct xpc_arch_operations xpc_arch_ops;
39520+extern xpc_arch_operations_no_const xpc_arch_ops;
39521 extern int xpc_disengage_timelimit;
39522 extern int xpc_disengage_timedout;
39523 extern int xpc_activate_IRQ_rcvd;
39524diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
39525index d971817..33bdca5 100644
39526--- a/drivers/misc/sgi-xp/xpc_main.c
39527+++ b/drivers/misc/sgi-xp/xpc_main.c
39528@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
39529 .notifier_call = xpc_system_die,
39530 };
39531
39532-struct xpc_arch_operations xpc_arch_ops;
39533+xpc_arch_operations_no_const xpc_arch_ops;
39534
39535 /*
39536 * Timer function to enforce the timelimit on the partition disengage.
39537@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
39538
39539 if (((die_args->trapnr == X86_TRAP_MF) ||
39540 (die_args->trapnr == X86_TRAP_XF)) &&
39541- !user_mode_vm(die_args->regs))
39542+ !user_mode(die_args->regs))
39543 xpc_die_deactivate();
39544
39545 break;
39546diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
39547index 6d8f701..35b6369 100644
39548--- a/drivers/mmc/core/mmc_ops.c
39549+++ b/drivers/mmc/core/mmc_ops.c
39550@@ -247,7 +247,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
39551 void *data_buf;
39552 int is_on_stack;
39553
39554- is_on_stack = object_is_on_stack(buf);
39555+ is_on_stack = object_starts_on_stack(buf);
39556 if (is_on_stack) {
39557 /*
39558 * dma onto stack is unsafe/nonportable, but callers to this
39559diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
39560index 53b8fd9..615b462 100644
39561--- a/drivers/mmc/host/dw_mmc.h
39562+++ b/drivers/mmc/host/dw_mmc.h
39563@@ -205,5 +205,5 @@ struct dw_mci_drv_data {
39564 int (*parse_dt)(struct dw_mci *host);
39565 int (*setup_bus)(struct dw_mci *host,
39566 struct device_node *slot_np, u8 bus_width);
39567-};
39568+} __do_const;
39569 #endif /* _DW_MMC_H_ */
39570diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
39571index 82a8de1..3c56ccb 100644
39572--- a/drivers/mmc/host/sdhci-s3c.c
39573+++ b/drivers/mmc/host/sdhci-s3c.c
39574@@ -721,9 +721,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
39575 * we can use overriding functions instead of default.
39576 */
39577 if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
39578- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
39579- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
39580- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
39581+ pax_open_kernel();
39582+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
39583+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
39584+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
39585+ pax_close_kernel();
39586 }
39587
39588 /* It supports additional host capabilities if needed */
39589diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
39590index a4eb8b5..8c0628f 100644
39591--- a/drivers/mtd/devices/doc2000.c
39592+++ b/drivers/mtd/devices/doc2000.c
39593@@ -753,7 +753,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
39594
39595 /* The ECC will not be calculated correctly if less than 512 is written */
39596 /* DBB-
39597- if (len != 0x200 && eccbuf)
39598+ if (len != 0x200)
39599 printk(KERN_WARNING
39600 "ECC needs a full sector write (adr: %lx size %lx)\n",
39601 (long) to, (long) len);
39602diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
39603index 0c8bb6b..6f35deb 100644
39604--- a/drivers/mtd/nand/denali.c
39605+++ b/drivers/mtd/nand/denali.c
39606@@ -24,6 +24,7 @@
39607 #include <linux/slab.h>
39608 #include <linux/mtd/mtd.h>
39609 #include <linux/module.h>
39610+#include <linux/slab.h>
39611
39612 #include "denali.h"
39613
39614diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
39615index 51b9d6a..52af9a7 100644
39616--- a/drivers/mtd/nftlmount.c
39617+++ b/drivers/mtd/nftlmount.c
39618@@ -24,6 +24,7 @@
39619 #include <asm/errno.h>
39620 #include <linux/delay.h>
39621 #include <linux/slab.h>
39622+#include <linux/sched.h>
39623 #include <linux/mtd/mtd.h>
39624 #include <linux/mtd/nand.h>
39625 #include <linux/mtd/nftl.h>
39626diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
39627index 8dd6ba5..419cc1d 100644
39628--- a/drivers/mtd/sm_ftl.c
39629+++ b/drivers/mtd/sm_ftl.c
39630@@ -56,7 +56,7 @@ ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
39631 #define SM_CIS_VENDOR_OFFSET 0x59
39632 struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
39633 {
39634- struct attribute_group *attr_group;
39635+ attribute_group_no_const *attr_group;
39636 struct attribute **attributes;
39637 struct sm_sysfs_attribute *vendor_attribute;
39638
39639diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
39640index 27cdf1f..8c37357 100644
39641--- a/drivers/net/bonding/bond_main.c
39642+++ b/drivers/net/bonding/bond_main.c
39643@@ -4859,7 +4859,7 @@ static unsigned int bond_get_num_tx_queues(void)
39644 return tx_queues;
39645 }
39646
39647-static struct rtnl_link_ops bond_link_ops __read_mostly = {
39648+static struct rtnl_link_ops bond_link_ops = {
39649 .kind = "bond",
39650 .priv_size = sizeof(struct bonding),
39651 .setup = bond_setup,
39652@@ -4975,8 +4975,8 @@ static void __exit bonding_exit(void)
39653
39654 bond_destroy_debugfs();
39655
39656- rtnl_link_unregister(&bond_link_ops);
39657 unregister_pernet_subsys(&bond_net_ops);
39658+ rtnl_link_unregister(&bond_link_ops);
39659
39660 #ifdef CONFIG_NET_POLL_CONTROLLER
39661 /*
39662diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
39663index 70dba5d..11a0919 100644
39664--- a/drivers/net/ethernet/8390/ax88796.c
39665+++ b/drivers/net/ethernet/8390/ax88796.c
39666@@ -872,9 +872,11 @@ static int ax_probe(struct platform_device *pdev)
39667 if (ax->plat->reg_offsets)
39668 ei_local->reg_offset = ax->plat->reg_offsets;
39669 else {
39670+ resource_size_t _mem_size = mem_size;
39671+ do_div(_mem_size, 0x18);
39672 ei_local->reg_offset = ax->reg_offsets;
39673 for (ret = 0; ret < 0x18; ret++)
39674- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
39675+ ax->reg_offsets[ret] = _mem_size * ret;
39676 }
39677
39678 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
39679diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
39680index 0991534..8098e92 100644
39681--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
39682+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
39683@@ -1094,7 +1094,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
39684 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
39685 {
39686 /* RX_MODE controlling object */
39687- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
39688+ bnx2x_init_rx_mode_obj(bp);
39689
39690 /* multicast configuration controlling object */
39691 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
39692diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
39693index 10bc093..a2fb42a 100644
39694--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
39695+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
39696@@ -2136,12 +2136,12 @@ static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
39697 break;
39698 default:
39699 BNX2X_ERR("Non valid capability ID\n");
39700- rval = -EINVAL;
39701+ rval = 1;
39702 break;
39703 }
39704 } else {
39705 DP(BNX2X_MSG_DCB, "DCB disabled\n");
39706- rval = -EINVAL;
39707+ rval = 1;
39708 }
39709
39710 DP(BNX2X_MSG_DCB, "capid %d:%x\n", capid, *cap);
39711@@ -2167,12 +2167,12 @@ static int bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num)
39712 break;
39713 default:
39714 BNX2X_ERR("Non valid TC-ID\n");
39715- rval = -EINVAL;
39716+ rval = 1;
39717 break;
39718 }
39719 } else {
39720 DP(BNX2X_MSG_DCB, "DCB disabled\n");
39721- rval = -EINVAL;
39722+ rval = 1;
39723 }
39724
39725 return rval;
39726@@ -2185,7 +2185,7 @@ static int bnx2x_dcbnl_set_numtcs(struct net_device *netdev, int tcid, u8 num)
39727 return -EINVAL;
39728 }
39729
39730-static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev)
39731+static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev)
39732 {
39733 struct bnx2x *bp = netdev_priv(netdev);
39734 DP(BNX2X_MSG_DCB, "state = %d\n", bp->dcbx_local_feat.pfc.enabled);
39735@@ -2387,12 +2387,12 @@ static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid,
39736 break;
39737 default:
39738 BNX2X_ERR("Non valid featrue-ID\n");
39739- rval = -EINVAL;
39740+ rval = 1;
39741 break;
39742 }
39743 } else {
39744 DP(BNX2X_MSG_DCB, "DCB disabled\n");
39745- rval = -EINVAL;
39746+ rval = 1;
39747 }
39748
39749 return rval;
39750@@ -2428,12 +2428,12 @@ static u8 bnx2x_dcbnl_set_featcfg(struct net_device *netdev, int featid,
39751 break;
39752 default:
39753 BNX2X_ERR("Non valid featrue-ID\n");
39754- rval = -EINVAL;
39755+ rval = 1;
39756 break;
39757 }
39758 } else {
39759 DP(BNX2X_MSG_DCB, "dcbnl call not valid\n");
39760- rval = -EINVAL;
39761+ rval = 1;
39762 }
39763
39764 return rval;
39765diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
39766index 09b625e..15b16fe 100644
39767--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
39768+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
39769@@ -2375,15 +2375,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
39770 return rc;
39771 }
39772
39773-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
39774- struct bnx2x_rx_mode_obj *o)
39775+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
39776 {
39777 if (CHIP_IS_E1x(bp)) {
39778- o->wait_comp = bnx2x_empty_rx_mode_wait;
39779- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
39780+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
39781+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
39782 } else {
39783- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
39784- o->config_rx_mode = bnx2x_set_rx_mode_e2;
39785+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
39786+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
39787 }
39788 }
39789
39790diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
39791index adbd91b..58ec94a 100644
39792--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
39793+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
39794@@ -1293,8 +1293,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
39795
39796 /********************* RX MODE ****************/
39797
39798-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
39799- struct bnx2x_rx_mode_obj *o);
39800+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
39801
39802 /**
39803 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
39804diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
39805index d330e81..ce1fb9a 100644
39806--- a/drivers/net/ethernet/broadcom/tg3.h
39807+++ b/drivers/net/ethernet/broadcom/tg3.h
39808@@ -146,6 +146,7 @@
39809 #define CHIPREV_ID_5750_A0 0x4000
39810 #define CHIPREV_ID_5750_A1 0x4001
39811 #define CHIPREV_ID_5750_A3 0x4003
39812+#define CHIPREV_ID_5750_C1 0x4201
39813 #define CHIPREV_ID_5750_C2 0x4202
39814 #define CHIPREV_ID_5752_A0_HW 0x5000
39815 #define CHIPREV_ID_5752_A0 0x6000
39816diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
39817index 8cffcdf..aadf043 100644
39818--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
39819+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
39820@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
39821 */
39822 struct l2t_skb_cb {
39823 arp_failure_handler_func arp_failure_handler;
39824-};
39825+} __no_const;
39826
39827 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
39828
39829diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
39830index 4c83003..2a2a5b9 100644
39831--- a/drivers/net/ethernet/dec/tulip/de4x5.c
39832+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
39833@@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39834 for (i=0; i<ETH_ALEN; i++) {
39835 tmp.addr[i] = dev->dev_addr[i];
39836 }
39837- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
39838+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
39839 break;
39840
39841 case DE4X5_SET_HWADDR: /* Set the hardware address */
39842@@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39843 spin_lock_irqsave(&lp->lock, flags);
39844 memcpy(&statbuf, &lp->pktStats, ioc->len);
39845 spin_unlock_irqrestore(&lp->lock, flags);
39846- if (copy_to_user(ioc->data, &statbuf, ioc->len))
39847+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
39848 return -EFAULT;
39849 break;
39850 }
39851diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
39852index 4d6f3c5..6169e60 100644
39853--- a/drivers/net/ethernet/emulex/benet/be_main.c
39854+++ b/drivers/net/ethernet/emulex/benet/be_main.c
39855@@ -455,7 +455,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
39856
39857 if (wrapped)
39858 newacc += 65536;
39859- ACCESS_ONCE(*acc) = newacc;
39860+ ACCESS_ONCE_RW(*acc) = newacc;
39861 }
39862
39863 void be_parse_stats(struct be_adapter *adapter)
39864diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
39865index 74d749e..eefb1bd 100644
39866--- a/drivers/net/ethernet/faraday/ftgmac100.c
39867+++ b/drivers/net/ethernet/faraday/ftgmac100.c
39868@@ -31,6 +31,8 @@
39869 #include <linux/netdevice.h>
39870 #include <linux/phy.h>
39871 #include <linux/platform_device.h>
39872+#include <linux/interrupt.h>
39873+#include <linux/irqreturn.h>
39874 #include <net/ip.h>
39875
39876 #include "ftgmac100.h"
39877diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
39878index b901a01..1ff32ee 100644
39879--- a/drivers/net/ethernet/faraday/ftmac100.c
39880+++ b/drivers/net/ethernet/faraday/ftmac100.c
39881@@ -31,6 +31,8 @@
39882 #include <linux/module.h>
39883 #include <linux/netdevice.h>
39884 #include <linux/platform_device.h>
39885+#include <linux/interrupt.h>
39886+#include <linux/irqreturn.h>
39887
39888 #include "ftmac100.h"
39889
39890diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
39891index bb9256a..56d8752 100644
39892--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
39893+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
39894@@ -806,7 +806,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
39895 }
39896
39897 /* update the base incval used to calculate frequency adjustment */
39898- ACCESS_ONCE(adapter->base_incval) = incval;
39899+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
39900 smp_mb();
39901
39902 /* need lock to prevent incorrect read while modifying cyclecounter */
39903diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
39904index c124e67..db9b897 100644
39905--- a/drivers/net/ethernet/lantiq_etop.c
39906+++ b/drivers/net/ethernet/lantiq_etop.c
39907@@ -769,7 +769,7 @@ ltq_etop_probe(struct platform_device *pdev)
39908 return 0;
39909
39910 err_free:
39911- kfree(dev);
39912+ free_netdev(dev);
39913 err_out:
39914 return err;
39915 }
39916diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
39917index fbe5363..266b4e3 100644
39918--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
39919+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
39920@@ -3461,7 +3461,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
39921 struct __vxge_hw_fifo *fifo;
39922 struct vxge_hw_fifo_config *config;
39923 u32 txdl_size, txdl_per_memblock;
39924- struct vxge_hw_mempool_cbs fifo_mp_callback;
39925+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
39926+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
39927+ };
39928+
39929 struct __vxge_hw_virtualpath *vpath;
39930
39931 if ((vp == NULL) || (attr == NULL)) {
39932@@ -3544,8 +3547,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
39933 goto exit;
39934 }
39935
39936- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
39937-
39938 fifo->mempool =
39939 __vxge_hw_mempool_create(vpath->hldev,
39940 fifo->config->memblock_size,
39941diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
39942index 998974f..ecd26db 100644
39943--- a/drivers/net/ethernet/realtek/r8169.c
39944+++ b/drivers/net/ethernet/realtek/r8169.c
39945@@ -741,22 +741,22 @@ struct rtl8169_private {
39946 struct mdio_ops {
39947 void (*write)(struct rtl8169_private *, int, int);
39948 int (*read)(struct rtl8169_private *, int);
39949- } mdio_ops;
39950+ } __no_const mdio_ops;
39951
39952 struct pll_power_ops {
39953 void (*down)(struct rtl8169_private *);
39954 void (*up)(struct rtl8169_private *);
39955- } pll_power_ops;
39956+ } __no_const pll_power_ops;
39957
39958 struct jumbo_ops {
39959 void (*enable)(struct rtl8169_private *);
39960 void (*disable)(struct rtl8169_private *);
39961- } jumbo_ops;
39962+ } __no_const jumbo_ops;
39963
39964 struct csi_ops {
39965 void (*write)(struct rtl8169_private *, int, int);
39966 u32 (*read)(struct rtl8169_private *, int);
39967- } csi_ops;
39968+ } __no_const csi_ops;
39969
39970 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
39971 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
39972diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
39973index 3f93624..cf01144 100644
39974--- a/drivers/net/ethernet/sfc/ptp.c
39975+++ b/drivers/net/ethernet/sfc/ptp.c
39976@@ -553,7 +553,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
39977 (u32)((u64)ptp->start.dma_addr >> 32));
39978
39979 /* Clear flag that signals MC ready */
39980- ACCESS_ONCE(*start) = 0;
39981+ ACCESS_ONCE_RW(*start) = 0;
39982 efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
39983 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
39984
39985diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
39986index 0c74a70..3bc6f68 100644
39987--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
39988+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
39989@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
39990
39991 writel(value, ioaddr + MMC_CNTRL);
39992
39993- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
39994- MMC_CNTRL, value);
39995+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
39996+// MMC_CNTRL, value);
39997 }
39998
39999 /* To mask all all interrupts.*/
40000diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
40001index e6fe0d8..2b7d752 100644
40002--- a/drivers/net/hyperv/hyperv_net.h
40003+++ b/drivers/net/hyperv/hyperv_net.h
40004@@ -101,7 +101,7 @@ struct rndis_device {
40005
40006 enum rndis_device_state state;
40007 bool link_state;
40008- atomic_t new_req_id;
40009+ atomic_unchecked_t new_req_id;
40010
40011 spinlock_t request_lock;
40012 struct list_head req_list;
40013diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
40014index 2b657d4..9903bc0 100644
40015--- a/drivers/net/hyperv/rndis_filter.c
40016+++ b/drivers/net/hyperv/rndis_filter.c
40017@@ -107,7 +107,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
40018 * template
40019 */
40020 set = &rndis_msg->msg.set_req;
40021- set->req_id = atomic_inc_return(&dev->new_req_id);
40022+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
40023
40024 /* Add to the request list */
40025 spin_lock_irqsave(&dev->request_lock, flags);
40026@@ -758,7 +758,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
40027
40028 /* Setup the rndis set */
40029 halt = &request->request_msg.msg.halt_req;
40030- halt->req_id = atomic_inc_return(&dev->new_req_id);
40031+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
40032
40033 /* Ignore return since this msg is optional. */
40034 rndis_filter_send_request(dev, request);
40035diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
40036index 1e9cb0b..7839125 100644
40037--- a/drivers/net/ieee802154/fakehard.c
40038+++ b/drivers/net/ieee802154/fakehard.c
40039@@ -386,7 +386,7 @@ static int ieee802154fake_probe(struct platform_device *pdev)
40040 phy->transmit_power = 0xbf;
40041
40042 dev->netdev_ops = &fake_ops;
40043- dev->ml_priv = &fake_mlme;
40044+ dev->ml_priv = (void *)&fake_mlme;
40045
40046 priv = netdev_priv(dev);
40047 priv->phy = phy;
40048diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
40049index e5cb723..1fc0461 100644
40050--- a/drivers/net/macvlan.c
40051+++ b/drivers/net/macvlan.c
40052@@ -852,13 +852,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
40053 int macvlan_link_register(struct rtnl_link_ops *ops)
40054 {
40055 /* common fields */
40056- ops->priv_size = sizeof(struct macvlan_dev);
40057- ops->validate = macvlan_validate;
40058- ops->maxtype = IFLA_MACVLAN_MAX;
40059- ops->policy = macvlan_policy;
40060- ops->changelink = macvlan_changelink;
40061- ops->get_size = macvlan_get_size;
40062- ops->fill_info = macvlan_fill_info;
40063+ pax_open_kernel();
40064+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
40065+ *(void **)&ops->validate = macvlan_validate;
40066+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
40067+ *(const void **)&ops->policy = macvlan_policy;
40068+ *(void **)&ops->changelink = macvlan_changelink;
40069+ *(void **)&ops->get_size = macvlan_get_size;
40070+ *(void **)&ops->fill_info = macvlan_fill_info;
40071+ pax_close_kernel();
40072
40073 return rtnl_link_register(ops);
40074 };
40075@@ -914,7 +916,7 @@ static int macvlan_device_event(struct notifier_block *unused,
40076 return NOTIFY_DONE;
40077 }
40078
40079-static struct notifier_block macvlan_notifier_block __read_mostly = {
40080+static struct notifier_block macvlan_notifier_block = {
40081 .notifier_call = macvlan_device_event,
40082 };
40083
40084diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
40085index 0f0f9ce..0ca5819 100644
40086--- a/drivers/net/macvtap.c
40087+++ b/drivers/net/macvtap.c
40088@@ -1100,7 +1100,7 @@ static int macvtap_device_event(struct notifier_block *unused,
40089 return NOTIFY_DONE;
40090 }
40091
40092-static struct notifier_block macvtap_notifier_block __read_mostly = {
40093+static struct notifier_block macvtap_notifier_block = {
40094 .notifier_call = macvtap_device_event,
40095 };
40096
40097diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
40098index daec9b0..6428fcb 100644
40099--- a/drivers/net/phy/mdio-bitbang.c
40100+++ b/drivers/net/phy/mdio-bitbang.c
40101@@ -234,6 +234,7 @@ void free_mdio_bitbang(struct mii_bus *bus)
40102 struct mdiobb_ctrl *ctrl = bus->priv;
40103
40104 module_put(ctrl->ops->owner);
40105+ mdiobus_unregister(bus);
40106 mdiobus_free(bus);
40107 }
40108 EXPORT_SYMBOL(free_mdio_bitbang);
40109diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
40110index 508570e..f706dc7 100644
40111--- a/drivers/net/ppp/ppp_generic.c
40112+++ b/drivers/net/ppp/ppp_generic.c
40113@@ -999,7 +999,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
40114 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
40115 struct ppp_stats stats;
40116 struct ppp_comp_stats cstats;
40117- char *vers;
40118
40119 switch (cmd) {
40120 case SIOCGPPPSTATS:
40121@@ -1021,8 +1020,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
40122 break;
40123
40124 case SIOCGPPPVER:
40125- vers = PPP_VERSION;
40126- if (copy_to_user(addr, vers, strlen(vers) + 1))
40127+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
40128 break;
40129 err = 0;
40130 break;
40131diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
40132index 8efe47a..a8075c5 100644
40133--- a/drivers/net/team/team.c
40134+++ b/drivers/net/team/team.c
40135@@ -2603,7 +2603,7 @@ static int team_device_event(struct notifier_block *unused,
40136 return NOTIFY_DONE;
40137 }
40138
40139-static struct notifier_block team_notifier_block __read_mostly = {
40140+static struct notifier_block team_notifier_block = {
40141 .notifier_call = team_device_event,
40142 };
40143
40144diff --git a/drivers/net/tun.c b/drivers/net/tun.c
40145index cb95fe5..a5bdab5 100644
40146--- a/drivers/net/tun.c
40147+++ b/drivers/net/tun.c
40148@@ -1838,7 +1838,7 @@ unlock:
40149 }
40150
40151 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
40152- unsigned long arg, int ifreq_len)
40153+ unsigned long arg, size_t ifreq_len)
40154 {
40155 struct tun_file *tfile = file->private_data;
40156 struct tun_struct *tun;
40157@@ -1850,6 +1850,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
40158 int vnet_hdr_sz;
40159 int ret;
40160
40161+ if (ifreq_len > sizeof ifr)
40162+ return -EFAULT;
40163+
40164 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
40165 if (copy_from_user(&ifr, argp, ifreq_len))
40166 return -EFAULT;
40167diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
40168index cd8ccb2..cff5144 100644
40169--- a/drivers/net/usb/hso.c
40170+++ b/drivers/net/usb/hso.c
40171@@ -71,7 +71,7 @@
40172 #include <asm/byteorder.h>
40173 #include <linux/serial_core.h>
40174 #include <linux/serial.h>
40175-
40176+#include <asm/local.h>
40177
40178 #define MOD_AUTHOR "Option Wireless"
40179 #define MOD_DESCRIPTION "USB High Speed Option driver"
40180@@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
40181 struct urb *urb;
40182
40183 urb = serial->rx_urb[0];
40184- if (serial->port.count > 0) {
40185+ if (atomic_read(&serial->port.count) > 0) {
40186 count = put_rxbuf_data(urb, serial);
40187 if (count == -1)
40188 return;
40189@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
40190 DUMP1(urb->transfer_buffer, urb->actual_length);
40191
40192 /* Anyone listening? */
40193- if (serial->port.count == 0)
40194+ if (atomic_read(&serial->port.count) == 0)
40195 return;
40196
40197 if (status == 0) {
40198@@ -1298,8 +1298,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
40199 tty_port_tty_set(&serial->port, tty);
40200
40201 /* check for port already opened, if not set the termios */
40202- serial->port.count++;
40203- if (serial->port.count == 1) {
40204+ if (atomic_inc_return(&serial->port.count) == 1) {
40205 serial->rx_state = RX_IDLE;
40206 /* Force default termio settings */
40207 _hso_serial_set_termios(tty, NULL);
40208@@ -1311,7 +1310,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
40209 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
40210 if (result) {
40211 hso_stop_serial_device(serial->parent);
40212- serial->port.count--;
40213+ atomic_dec(&serial->port.count);
40214 kref_put(&serial->parent->ref, hso_serial_ref_free);
40215 }
40216 } else {
40217@@ -1348,10 +1347,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
40218
40219 /* reset the rts and dtr */
40220 /* do the actual close */
40221- serial->port.count--;
40222+ atomic_dec(&serial->port.count);
40223
40224- if (serial->port.count <= 0) {
40225- serial->port.count = 0;
40226+ if (atomic_read(&serial->port.count) <= 0) {
40227+ atomic_set(&serial->port.count, 0);
40228 tty_port_tty_set(&serial->port, NULL);
40229 if (!usb_gone)
40230 hso_stop_serial_device(serial->parent);
40231@@ -1427,7 +1426,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
40232
40233 /* the actual setup */
40234 spin_lock_irqsave(&serial->serial_lock, flags);
40235- if (serial->port.count)
40236+ if (atomic_read(&serial->port.count))
40237 _hso_serial_set_termios(tty, old);
40238 else
40239 tty->termios = *old;
40240@@ -1886,7 +1885,7 @@ static void intr_callback(struct urb *urb)
40241 D1("Pending read interrupt on port %d\n", i);
40242 spin_lock(&serial->serial_lock);
40243 if (serial->rx_state == RX_IDLE &&
40244- serial->port.count > 0) {
40245+ atomic_read(&serial->port.count) > 0) {
40246 /* Setup and send a ctrl req read on
40247 * port i */
40248 if (!serial->rx_urb_filled[0]) {
40249@@ -3079,7 +3078,7 @@ static int hso_resume(struct usb_interface *iface)
40250 /* Start all serial ports */
40251 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
40252 if (serial_table[i] && (serial_table[i]->interface == iface)) {
40253- if (dev2ser(serial_table[i])->port.count) {
40254+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
40255 result =
40256 hso_start_serial_device(serial_table[i], GFP_NOIO);
40257 hso_kick_transmit(dev2ser(serial_table[i]));
40258diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
40259index 6993bfa..9053a34 100644
40260--- a/drivers/net/vxlan.c
40261+++ b/drivers/net/vxlan.c
40262@@ -1428,7 +1428,7 @@ nla_put_failure:
40263 return -EMSGSIZE;
40264 }
40265
40266-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
40267+static struct rtnl_link_ops vxlan_link_ops = {
40268 .kind = "vxlan",
40269 .maxtype = IFLA_VXLAN_MAX,
40270 .policy = vxlan_policy,
40271diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
40272index 77fa428..996b355 100644
40273--- a/drivers/net/wireless/at76c50x-usb.c
40274+++ b/drivers/net/wireless/at76c50x-usb.c
40275@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
40276 }
40277
40278 /* Convert timeout from the DFU status to jiffies */
40279-static inline unsigned long at76_get_timeout(struct dfu_status *s)
40280+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
40281 {
40282 return msecs_to_jiffies((s->poll_timeout[2] << 16)
40283 | (s->poll_timeout[1] << 8)
40284diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
40285index 8d78253..bebbb68 100644
40286--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
40287+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
40288@@ -184,8 +184,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40289 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
40290 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
40291
40292- ACCESS_ONCE(ads->ds_link) = i->link;
40293- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
40294+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
40295+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
40296
40297 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
40298 ctl6 = SM(i->keytype, AR_EncrType);
40299@@ -199,26 +199,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40300
40301 if ((i->is_first || i->is_last) &&
40302 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
40303- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
40304+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
40305 | set11nTries(i->rates, 1)
40306 | set11nTries(i->rates, 2)
40307 | set11nTries(i->rates, 3)
40308 | (i->dur_update ? AR_DurUpdateEna : 0)
40309 | SM(0, AR_BurstDur);
40310
40311- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
40312+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
40313 | set11nRate(i->rates, 1)
40314 | set11nRate(i->rates, 2)
40315 | set11nRate(i->rates, 3);
40316 } else {
40317- ACCESS_ONCE(ads->ds_ctl2) = 0;
40318- ACCESS_ONCE(ads->ds_ctl3) = 0;
40319+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
40320+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
40321 }
40322
40323 if (!i->is_first) {
40324- ACCESS_ONCE(ads->ds_ctl0) = 0;
40325- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
40326- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
40327+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
40328+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
40329+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
40330 return;
40331 }
40332
40333@@ -243,7 +243,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40334 break;
40335 }
40336
40337- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
40338+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
40339 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
40340 | SM(i->txpower, AR_XmitPower)
40341 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
40342@@ -253,19 +253,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40343 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
40344 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
40345
40346- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
40347- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
40348+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
40349+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
40350
40351 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
40352 return;
40353
40354- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
40355+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
40356 | set11nPktDurRTSCTS(i->rates, 1);
40357
40358- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
40359+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
40360 | set11nPktDurRTSCTS(i->rates, 3);
40361
40362- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
40363+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
40364 | set11nRateFlags(i->rates, 1)
40365 | set11nRateFlags(i->rates, 2)
40366 | set11nRateFlags(i->rates, 3)
40367diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
40368index 301bf72..3f5654f 100644
40369--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
40370+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
40371@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40372 (i->qcu << AR_TxQcuNum_S) | desc_len;
40373
40374 checksum += val;
40375- ACCESS_ONCE(ads->info) = val;
40376+ ACCESS_ONCE_RW(ads->info) = val;
40377
40378 checksum += i->link;
40379- ACCESS_ONCE(ads->link) = i->link;
40380+ ACCESS_ONCE_RW(ads->link) = i->link;
40381
40382 checksum += i->buf_addr[0];
40383- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
40384+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
40385 checksum += i->buf_addr[1];
40386- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
40387+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
40388 checksum += i->buf_addr[2];
40389- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
40390+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
40391 checksum += i->buf_addr[3];
40392- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
40393+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
40394
40395 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
40396- ACCESS_ONCE(ads->ctl3) = val;
40397+ ACCESS_ONCE_RW(ads->ctl3) = val;
40398 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
40399- ACCESS_ONCE(ads->ctl5) = val;
40400+ ACCESS_ONCE_RW(ads->ctl5) = val;
40401 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
40402- ACCESS_ONCE(ads->ctl7) = val;
40403+ ACCESS_ONCE_RW(ads->ctl7) = val;
40404 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
40405- ACCESS_ONCE(ads->ctl9) = val;
40406+ ACCESS_ONCE_RW(ads->ctl9) = val;
40407
40408 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
40409- ACCESS_ONCE(ads->ctl10) = checksum;
40410+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
40411
40412 if (i->is_first || i->is_last) {
40413- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
40414+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
40415 | set11nTries(i->rates, 1)
40416 | set11nTries(i->rates, 2)
40417 | set11nTries(i->rates, 3)
40418 | (i->dur_update ? AR_DurUpdateEna : 0)
40419 | SM(0, AR_BurstDur);
40420
40421- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
40422+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
40423 | set11nRate(i->rates, 1)
40424 | set11nRate(i->rates, 2)
40425 | set11nRate(i->rates, 3);
40426 } else {
40427- ACCESS_ONCE(ads->ctl13) = 0;
40428- ACCESS_ONCE(ads->ctl14) = 0;
40429+ ACCESS_ONCE_RW(ads->ctl13) = 0;
40430+ ACCESS_ONCE_RW(ads->ctl14) = 0;
40431 }
40432
40433 ads->ctl20 = 0;
40434@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40435
40436 ctl17 = SM(i->keytype, AR_EncrType);
40437 if (!i->is_first) {
40438- ACCESS_ONCE(ads->ctl11) = 0;
40439- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
40440- ACCESS_ONCE(ads->ctl15) = 0;
40441- ACCESS_ONCE(ads->ctl16) = 0;
40442- ACCESS_ONCE(ads->ctl17) = ctl17;
40443- ACCESS_ONCE(ads->ctl18) = 0;
40444- ACCESS_ONCE(ads->ctl19) = 0;
40445+ ACCESS_ONCE_RW(ads->ctl11) = 0;
40446+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
40447+ ACCESS_ONCE_RW(ads->ctl15) = 0;
40448+ ACCESS_ONCE_RW(ads->ctl16) = 0;
40449+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
40450+ ACCESS_ONCE_RW(ads->ctl18) = 0;
40451+ ACCESS_ONCE_RW(ads->ctl19) = 0;
40452 return;
40453 }
40454
40455- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
40456+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
40457 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
40458 | SM(i->txpower, AR_XmitPower)
40459 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
40460@@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40461 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
40462 ctl12 |= SM(val, AR_PAPRDChainMask);
40463
40464- ACCESS_ONCE(ads->ctl12) = ctl12;
40465- ACCESS_ONCE(ads->ctl17) = ctl17;
40466+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
40467+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
40468
40469- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
40470+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
40471 | set11nPktDurRTSCTS(i->rates, 1);
40472
40473- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
40474+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
40475 | set11nPktDurRTSCTS(i->rates, 3);
40476
40477- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
40478+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
40479 | set11nRateFlags(i->rates, 1)
40480 | set11nRateFlags(i->rates, 2)
40481 | set11nRateFlags(i->rates, 3)
40482 | SM(i->rtscts_rate, AR_RTSCTSRate);
40483
40484- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
40485+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
40486 }
40487
40488 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
40489diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
40490index 9d26fc5..60d9f14 100644
40491--- a/drivers/net/wireless/ath/ath9k/hw.h
40492+++ b/drivers/net/wireless/ath/ath9k/hw.h
40493@@ -658,7 +658,7 @@ struct ath_hw_private_ops {
40494
40495 /* ANI */
40496 void (*ani_cache_ini_regs)(struct ath_hw *ah);
40497-};
40498+} __no_const;
40499
40500 /**
40501 * struct ath_hw_ops - callbacks used by hardware code and driver code
40502@@ -688,7 +688,7 @@ struct ath_hw_ops {
40503 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
40504 struct ath_hw_antcomb_conf *antconf);
40505 void (*antctrl_shared_chain_lnadiv)(struct ath_hw *hw, bool enable);
40506-};
40507+} __no_const;
40508
40509 struct ath_nf_limits {
40510 s16 max;
40511diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
40512index 3726cd6..b655808 100644
40513--- a/drivers/net/wireless/iwlegacy/3945-mac.c
40514+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
40515@@ -3615,7 +3615,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
40516 */
40517 if (il3945_mod_params.disable_hw_scan) {
40518 D_INFO("Disabling hw_scan\n");
40519- il3945_mac_ops.hw_scan = NULL;
40520+ pax_open_kernel();
40521+ *(void **)&il3945_mac_ops.hw_scan = NULL;
40522+ pax_close_kernel();
40523 }
40524
40525 D_INFO("*** LOAD DRIVER ***\n");
40526diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
40527index 5b9533e..7733880 100644
40528--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
40529+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
40530@@ -203,7 +203,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
40531 {
40532 struct iwl_priv *priv = file->private_data;
40533 char buf[64];
40534- int buf_size;
40535+ size_t buf_size;
40536 u32 offset, len;
40537
40538 memset(buf, 0, sizeof(buf));
40539@@ -473,7 +473,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
40540 struct iwl_priv *priv = file->private_data;
40541
40542 char buf[8];
40543- int buf_size;
40544+ size_t buf_size;
40545 u32 reset_flag;
40546
40547 memset(buf, 0, sizeof(buf));
40548@@ -554,7 +554,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
40549 {
40550 struct iwl_priv *priv = file->private_data;
40551 char buf[8];
40552- int buf_size;
40553+ size_t buf_size;
40554 int ht40;
40555
40556 memset(buf, 0, sizeof(buf));
40557@@ -606,7 +606,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
40558 {
40559 struct iwl_priv *priv = file->private_data;
40560 char buf[8];
40561- int buf_size;
40562+ size_t buf_size;
40563 int value;
40564
40565 memset(buf, 0, sizeof(buf));
40566@@ -1871,7 +1871,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
40567 {
40568 struct iwl_priv *priv = file->private_data;
40569 char buf[8];
40570- int buf_size;
40571+ size_t buf_size;
40572 int clear;
40573
40574 memset(buf, 0, sizeof(buf));
40575@@ -1916,7 +1916,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
40576 {
40577 struct iwl_priv *priv = file->private_data;
40578 char buf[8];
40579- int buf_size;
40580+ size_t buf_size;
40581 int trace;
40582
40583 memset(buf, 0, sizeof(buf));
40584@@ -1987,7 +1987,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
40585 {
40586 struct iwl_priv *priv = file->private_data;
40587 char buf[8];
40588- int buf_size;
40589+ size_t buf_size;
40590 int missed;
40591
40592 memset(buf, 0, sizeof(buf));
40593@@ -2028,7 +2028,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
40594
40595 struct iwl_priv *priv = file->private_data;
40596 char buf[8];
40597- int buf_size;
40598+ size_t buf_size;
40599 int plcp;
40600
40601 memset(buf, 0, sizeof(buf));
40602@@ -2088,7 +2088,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
40603
40604 struct iwl_priv *priv = file->private_data;
40605 char buf[8];
40606- int buf_size;
40607+ size_t buf_size;
40608 int flush;
40609
40610 memset(buf, 0, sizeof(buf));
40611@@ -2178,7 +2178,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
40612
40613 struct iwl_priv *priv = file->private_data;
40614 char buf[8];
40615- int buf_size;
40616+ size_t buf_size;
40617 int rts;
40618
40619 if (!priv->cfg->ht_params)
40620@@ -2220,7 +2220,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
40621 {
40622 struct iwl_priv *priv = file->private_data;
40623 char buf[8];
40624- int buf_size;
40625+ size_t buf_size;
40626
40627 memset(buf, 0, sizeof(buf));
40628 buf_size = min(count, sizeof(buf) - 1);
40629@@ -2256,7 +2256,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
40630 struct iwl_priv *priv = file->private_data;
40631 u32 event_log_flag;
40632 char buf[8];
40633- int buf_size;
40634+ size_t buf_size;
40635
40636 /* check that the interface is up */
40637 if (!iwl_is_ready(priv))
40638@@ -2310,7 +2310,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
40639 struct iwl_priv *priv = file->private_data;
40640 char buf[8];
40641 u32 calib_disabled;
40642- int buf_size;
40643+ size_t buf_size;
40644
40645 memset(buf, 0, sizeof(buf));
40646 buf_size = min(count, sizeof(buf) - 1);
40647diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
40648index 35708b9..31f7754 100644
40649--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
40650+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
40651@@ -1100,7 +1100,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
40652 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
40653
40654 char buf[8];
40655- int buf_size;
40656+ size_t buf_size;
40657 u32 reset_flag;
40658
40659 memset(buf, 0, sizeof(buf));
40660@@ -1121,7 +1121,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
40661 {
40662 struct iwl_trans *trans = file->private_data;
40663 char buf[8];
40664- int buf_size;
40665+ size_t buf_size;
40666 int csr;
40667
40668 memset(buf, 0, sizeof(buf));
40669diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
40670index ff90855..e46d223 100644
40671--- a/drivers/net/wireless/mac80211_hwsim.c
40672+++ b/drivers/net/wireless/mac80211_hwsim.c
40673@@ -2062,25 +2062,19 @@ static int __init init_mac80211_hwsim(void)
40674
40675 if (channels > 1) {
40676 hwsim_if_comb.num_different_channels = channels;
40677- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
40678- mac80211_hwsim_ops.cancel_hw_scan =
40679- mac80211_hwsim_cancel_hw_scan;
40680- mac80211_hwsim_ops.sw_scan_start = NULL;
40681- mac80211_hwsim_ops.sw_scan_complete = NULL;
40682- mac80211_hwsim_ops.remain_on_channel =
40683- mac80211_hwsim_roc;
40684- mac80211_hwsim_ops.cancel_remain_on_channel =
40685- mac80211_hwsim_croc;
40686- mac80211_hwsim_ops.add_chanctx =
40687- mac80211_hwsim_add_chanctx;
40688- mac80211_hwsim_ops.remove_chanctx =
40689- mac80211_hwsim_remove_chanctx;
40690- mac80211_hwsim_ops.change_chanctx =
40691- mac80211_hwsim_change_chanctx;
40692- mac80211_hwsim_ops.assign_vif_chanctx =
40693- mac80211_hwsim_assign_vif_chanctx;
40694- mac80211_hwsim_ops.unassign_vif_chanctx =
40695- mac80211_hwsim_unassign_vif_chanctx;
40696+ pax_open_kernel();
40697+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
40698+ *(void **)&mac80211_hwsim_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
40699+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
40700+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
40701+ *(void **)&mac80211_hwsim_ops.remain_on_channel = mac80211_hwsim_roc;
40702+ *(void **)&mac80211_hwsim_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
40703+ *(void **)&mac80211_hwsim_ops.add_chanctx = mac80211_hwsim_add_chanctx;
40704+ *(void **)&mac80211_hwsim_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
40705+ *(void **)&mac80211_hwsim_ops.change_chanctx = mac80211_hwsim_change_chanctx;
40706+ *(void **)&mac80211_hwsim_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
40707+ *(void **)&mac80211_hwsim_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
40708+ pax_close_kernel();
40709 }
40710
40711 spin_lock_init(&hwsim_radio_lock);
40712diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
40713index cdb11b3..3eca710 100644
40714--- a/drivers/net/wireless/mwifiex/cfg80211.c
40715+++ b/drivers/net/wireless/mwifiex/cfg80211.c
40716@@ -1846,7 +1846,8 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
40717 }
40718 }
40719
40720- for (i = 0; i < request->n_channels; i++) {
40721+ for (i = 0; i < min_t(u32, request->n_channels,
40722+ MWIFIEX_USER_SCAN_CHAN_MAX); i++) {
40723 chan = request->channels[i];
40724 priv->user_scan_cfg->chan_list[i].chan_number = chan->hw_value;
40725 priv->user_scan_cfg->chan_list[i].radio_type = chan->band;
40726diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
40727index abe1d03..fb02c22 100644
40728--- a/drivers/net/wireless/rndis_wlan.c
40729+++ b/drivers/net/wireless/rndis_wlan.c
40730@@ -1238,7 +1238,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
40731
40732 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
40733
40734- if (rts_threshold < 0 || rts_threshold > 2347)
40735+ if (rts_threshold > 2347)
40736 rts_threshold = 2347;
40737
40738 tmp = cpu_to_le32(rts_threshold);
40739diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
40740index 0751b35..246ba3e 100644
40741--- a/drivers/net/wireless/rt2x00/rt2x00.h
40742+++ b/drivers/net/wireless/rt2x00/rt2x00.h
40743@@ -398,7 +398,7 @@ struct rt2x00_intf {
40744 * for hardware which doesn't support hardware
40745 * sequence counting.
40746 */
40747- atomic_t seqno;
40748+ atomic_unchecked_t seqno;
40749 };
40750
40751 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
40752diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
40753index e488b94..14b6a0c 100644
40754--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
40755+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
40756@@ -240,9 +240,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
40757 * sequence counter given by mac80211.
40758 */
40759 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
40760- seqno = atomic_add_return(0x10, &intf->seqno);
40761+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
40762 else
40763- seqno = atomic_read(&intf->seqno);
40764+ seqno = atomic_read_unchecked(&intf->seqno);
40765
40766 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
40767 hdr->seq_ctrl |= cpu_to_le16(seqno);
40768diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
40769index e57ee48..541cf6c 100644
40770--- a/drivers/net/wireless/ti/wl1251/sdio.c
40771+++ b/drivers/net/wireless/ti/wl1251/sdio.c
40772@@ -269,13 +269,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
40773
40774 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
40775
40776- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
40777- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
40778+ pax_open_kernel();
40779+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
40780+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
40781+ pax_close_kernel();
40782
40783 wl1251_info("using dedicated interrupt line");
40784 } else {
40785- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
40786- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
40787+ pax_open_kernel();
40788+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
40789+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
40790+ pax_close_kernel();
40791
40792 wl1251_info("using SDIO interrupt");
40793 }
40794diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
40795index e5f5f8f..fdf15b7 100644
40796--- a/drivers/net/wireless/ti/wl12xx/main.c
40797+++ b/drivers/net/wireless/ti/wl12xx/main.c
40798@@ -644,7 +644,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
40799 sizeof(wl->conf.mem));
40800
40801 /* read data preparation is only needed by wl127x */
40802- wl->ops->prepare_read = wl127x_prepare_read;
40803+ pax_open_kernel();
40804+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
40805+ pax_close_kernel();
40806
40807 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER, WL127X_IFTYPE_VER,
40808 WL127X_MAJOR_VER, WL127X_SUBTYPE_VER,
40809@@ -665,7 +667,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
40810 sizeof(wl->conf.mem));
40811
40812 /* read data preparation is only needed by wl127x */
40813- wl->ops->prepare_read = wl127x_prepare_read;
40814+ pax_open_kernel();
40815+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
40816+ pax_close_kernel();
40817
40818 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER, WL127X_IFTYPE_VER,
40819 WL127X_MAJOR_VER, WL127X_SUBTYPE_VER,
40820diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
40821index 8d8c1f8..e754844 100644
40822--- a/drivers/net/wireless/ti/wl18xx/main.c
40823+++ b/drivers/net/wireless/ti/wl18xx/main.c
40824@@ -1489,8 +1489,10 @@ static int wl18xx_setup(struct wl1271 *wl)
40825 }
40826
40827 if (!checksum_param) {
40828- wl18xx_ops.set_rx_csum = NULL;
40829- wl18xx_ops.init_vif = NULL;
40830+ pax_open_kernel();
40831+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
40832+ *(void **)&wl18xx_ops.init_vif = NULL;
40833+ pax_close_kernel();
40834 }
40835
40836 /* Enable 11a Band only if we have 5G antennas */
40837diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
40838index ef2b171..bb513a6 100644
40839--- a/drivers/net/wireless/zd1211rw/zd_usb.c
40840+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
40841@@ -387,7 +387,7 @@ static inline void handle_regs_int(struct urb *urb)
40842 {
40843 struct zd_usb *usb = urb->context;
40844 struct zd_usb_interrupt *intr = &usb->intr;
40845- int len;
40846+ unsigned int len;
40847 u16 int_num;
40848
40849 ZD_ASSERT(in_interrupt());
40850diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
40851index d93b2b6..ae50401 100644
40852--- a/drivers/oprofile/buffer_sync.c
40853+++ b/drivers/oprofile/buffer_sync.c
40854@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
40855 if (cookie == NO_COOKIE)
40856 offset = pc;
40857 if (cookie == INVALID_COOKIE) {
40858- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
40859+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
40860 offset = pc;
40861 }
40862 if (cookie != last_cookie) {
40863@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
40864 /* add userspace sample */
40865
40866 if (!mm) {
40867- atomic_inc(&oprofile_stats.sample_lost_no_mm);
40868+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
40869 return 0;
40870 }
40871
40872 cookie = lookup_dcookie(mm, s->eip, &offset);
40873
40874 if (cookie == INVALID_COOKIE) {
40875- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
40876+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
40877 return 0;
40878 }
40879
40880@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
40881 /* ignore backtraces if failed to add a sample */
40882 if (state == sb_bt_start) {
40883 state = sb_bt_ignore;
40884- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
40885+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
40886 }
40887 }
40888 release_mm(mm);
40889diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
40890index c0cc4e7..44d4e54 100644
40891--- a/drivers/oprofile/event_buffer.c
40892+++ b/drivers/oprofile/event_buffer.c
40893@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
40894 }
40895
40896 if (buffer_pos == buffer_size) {
40897- atomic_inc(&oprofile_stats.event_lost_overflow);
40898+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
40899 return;
40900 }
40901
40902diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
40903index ed2c3ec..deda85a 100644
40904--- a/drivers/oprofile/oprof.c
40905+++ b/drivers/oprofile/oprof.c
40906@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
40907 if (oprofile_ops.switch_events())
40908 return;
40909
40910- atomic_inc(&oprofile_stats.multiplex_counter);
40911+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
40912 start_switch_worker();
40913 }
40914
40915diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
40916index 84a208d..d61b0a1 100644
40917--- a/drivers/oprofile/oprofile_files.c
40918+++ b/drivers/oprofile/oprofile_files.c
40919@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
40920
40921 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
40922
40923-static ssize_t timeout_read(struct file *file, char __user *buf,
40924+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
40925 size_t count, loff_t *offset)
40926 {
40927 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
40928diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
40929index 917d28e..d62d981 100644
40930--- a/drivers/oprofile/oprofile_stats.c
40931+++ b/drivers/oprofile/oprofile_stats.c
40932@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
40933 cpu_buf->sample_invalid_eip = 0;
40934 }
40935
40936- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
40937- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
40938- atomic_set(&oprofile_stats.event_lost_overflow, 0);
40939- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
40940- atomic_set(&oprofile_stats.multiplex_counter, 0);
40941+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
40942+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
40943+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
40944+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
40945+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
40946 }
40947
40948
40949diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
40950index 38b6fc0..b5cbfce 100644
40951--- a/drivers/oprofile/oprofile_stats.h
40952+++ b/drivers/oprofile/oprofile_stats.h
40953@@ -13,11 +13,11 @@
40954 #include <linux/atomic.h>
40955
40956 struct oprofile_stat_struct {
40957- atomic_t sample_lost_no_mm;
40958- atomic_t sample_lost_no_mapping;
40959- atomic_t bt_lost_no_mapping;
40960- atomic_t event_lost_overflow;
40961- atomic_t multiplex_counter;
40962+ atomic_unchecked_t sample_lost_no_mm;
40963+ atomic_unchecked_t sample_lost_no_mapping;
40964+ atomic_unchecked_t bt_lost_no_mapping;
40965+ atomic_unchecked_t event_lost_overflow;
40966+ atomic_unchecked_t multiplex_counter;
40967 };
40968
40969 extern struct oprofile_stat_struct oprofile_stats;
40970diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
40971index 849357c..b83c1e0 100644
40972--- a/drivers/oprofile/oprofilefs.c
40973+++ b/drivers/oprofile/oprofilefs.c
40974@@ -185,7 +185,7 @@ static const struct file_operations atomic_ro_fops = {
40975
40976
40977 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
40978- char const *name, atomic_t *val)
40979+ char const *name, atomic_unchecked_t *val)
40980 {
40981 return __oprofilefs_create_file(sb, root, name,
40982 &atomic_ro_fops, 0444, val);
40983diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
40984index 93404f7..4a313d8 100644
40985--- a/drivers/oprofile/timer_int.c
40986+++ b/drivers/oprofile/timer_int.c
40987@@ -93,7 +93,7 @@ static int __cpuinit oprofile_cpu_notify(struct notifier_block *self,
40988 return NOTIFY_OK;
40989 }
40990
40991-static struct notifier_block __refdata oprofile_cpu_notifier = {
40992+static struct notifier_block oprofile_cpu_notifier = {
40993 .notifier_call = oprofile_cpu_notify,
40994 };
40995
40996diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
40997index 3f56bc0..707d642 100644
40998--- a/drivers/parport/procfs.c
40999+++ b/drivers/parport/procfs.c
41000@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
41001
41002 *ppos += len;
41003
41004- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
41005+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
41006 }
41007
41008 #ifdef CONFIG_PARPORT_1284
41009@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
41010
41011 *ppos += len;
41012
41013- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
41014+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
41015 }
41016 #endif /* IEEE1284.3 support. */
41017
41018diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
41019index c35e8ad..fc33beb 100644
41020--- a/drivers/pci/hotplug/acpiphp_ibm.c
41021+++ b/drivers/pci/hotplug/acpiphp_ibm.c
41022@@ -464,7 +464,9 @@ static int __init ibm_acpiphp_init(void)
41023 goto init_cleanup;
41024 }
41025
41026- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
41027+ pax_open_kernel();
41028+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
41029+ pax_close_kernel();
41030 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
41031
41032 return retval;
41033diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
41034index a6a71c4..c91097b 100644
41035--- a/drivers/pci/hotplug/cpcihp_generic.c
41036+++ b/drivers/pci/hotplug/cpcihp_generic.c
41037@@ -73,7 +73,6 @@ static u16 port;
41038 static unsigned int enum_bit;
41039 static u8 enum_mask;
41040
41041-static struct cpci_hp_controller_ops generic_hpc_ops;
41042 static struct cpci_hp_controller generic_hpc;
41043
41044 static int __init validate_parameters(void)
41045@@ -139,6 +138,10 @@ static int query_enum(void)
41046 return ((value & enum_mask) == enum_mask);
41047 }
41048
41049+static struct cpci_hp_controller_ops generic_hpc_ops = {
41050+ .query_enum = query_enum,
41051+};
41052+
41053 static int __init cpcihp_generic_init(void)
41054 {
41055 int status;
41056@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
41057 pci_dev_put(dev);
41058
41059 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
41060- generic_hpc_ops.query_enum = query_enum;
41061 generic_hpc.ops = &generic_hpc_ops;
41062
41063 status = cpci_hp_register_controller(&generic_hpc);
41064diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
41065index 449b4bb..257e2e8 100644
41066--- a/drivers/pci/hotplug/cpcihp_zt5550.c
41067+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
41068@@ -59,7 +59,6 @@
41069 /* local variables */
41070 static bool debug;
41071 static bool poll;
41072-static struct cpci_hp_controller_ops zt5550_hpc_ops;
41073 static struct cpci_hp_controller zt5550_hpc;
41074
41075 /* Primary cPCI bus bridge device */
41076@@ -205,6 +204,10 @@ static int zt5550_hc_disable_irq(void)
41077 return 0;
41078 }
41079
41080+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
41081+ .query_enum = zt5550_hc_query_enum,
41082+};
41083+
41084 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
41085 {
41086 int status;
41087@@ -216,16 +219,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
41088 dbg("returned from zt5550_hc_config");
41089
41090 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
41091- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
41092 zt5550_hpc.ops = &zt5550_hpc_ops;
41093 if(!poll) {
41094 zt5550_hpc.irq = hc_dev->irq;
41095 zt5550_hpc.irq_flags = IRQF_SHARED;
41096 zt5550_hpc.dev_id = hc_dev;
41097
41098- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
41099- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
41100- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
41101+ pax_open_kernel();
41102+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
41103+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
41104+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
41105+ pax_open_kernel();
41106 } else {
41107 info("using ENUM# polling mode");
41108 }
41109diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
41110index 76ba8a1..20ca857 100644
41111--- a/drivers/pci/hotplug/cpqphp_nvram.c
41112+++ b/drivers/pci/hotplug/cpqphp_nvram.c
41113@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
41114
41115 void compaq_nvram_init (void __iomem *rom_start)
41116 {
41117+
41118+#ifndef CONFIG_PAX_KERNEXEC
41119 if (rom_start) {
41120 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
41121 }
41122+#endif
41123+
41124 dbg("int15 entry = %p\n", compaq_int15_entry_point);
41125
41126 /* initialize our int15 lock */
41127diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
41128index 202f4a9..8ee47d0 100644
41129--- a/drivers/pci/hotplug/pci_hotplug_core.c
41130+++ b/drivers/pci/hotplug/pci_hotplug_core.c
41131@@ -448,8 +448,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
41132 return -EINVAL;
41133 }
41134
41135- slot->ops->owner = owner;
41136- slot->ops->mod_name = mod_name;
41137+ pax_open_kernel();
41138+ *(struct module **)&slot->ops->owner = owner;
41139+ *(const char **)&slot->ops->mod_name = mod_name;
41140+ pax_close_kernel();
41141
41142 mutex_lock(&pci_hp_mutex);
41143 /*
41144diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
41145index 939bd1d..a1459c9 100644
41146--- a/drivers/pci/hotplug/pciehp_core.c
41147+++ b/drivers/pci/hotplug/pciehp_core.c
41148@@ -91,7 +91,7 @@ static int init_slot(struct controller *ctrl)
41149 struct slot *slot = ctrl->slot;
41150 struct hotplug_slot *hotplug = NULL;
41151 struct hotplug_slot_info *info = NULL;
41152- struct hotplug_slot_ops *ops = NULL;
41153+ hotplug_slot_ops_no_const *ops = NULL;
41154 char name[SLOT_NAME_SIZE];
41155 int retval = -ENOMEM;
41156
41157diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
41158index 9c6e9bb..2916736 100644
41159--- a/drivers/pci/pci-sysfs.c
41160+++ b/drivers/pci/pci-sysfs.c
41161@@ -1071,7 +1071,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
41162 {
41163 /* allocate attribute structure, piggyback attribute name */
41164 int name_len = write_combine ? 13 : 10;
41165- struct bin_attribute *res_attr;
41166+ bin_attribute_no_const *res_attr;
41167 int retval;
41168
41169 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
41170@@ -1256,7 +1256,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
41171 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
41172 {
41173 int retval;
41174- struct bin_attribute *attr;
41175+ bin_attribute_no_const *attr;
41176
41177 /* If the device has VPD, try to expose it in sysfs. */
41178 if (dev->vpd) {
41179@@ -1303,7 +1303,7 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
41180 {
41181 int retval;
41182 int rom_size = 0;
41183- struct bin_attribute *attr;
41184+ bin_attribute_no_const *attr;
41185
41186 if (!sysfs_initialized)
41187 return -EACCES;
41188diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
41189index e851829..a1a7196 100644
41190--- a/drivers/pci/pci.h
41191+++ b/drivers/pci/pci.h
41192@@ -98,7 +98,7 @@ struct pci_vpd_ops {
41193 struct pci_vpd {
41194 unsigned int len;
41195 const struct pci_vpd_ops *ops;
41196- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
41197+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
41198 };
41199
41200 extern int pci_vpd_pci22_init(struct pci_dev *dev);
41201diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
41202index 8474b6a..ee81993 100644
41203--- a/drivers/pci/pcie/aspm.c
41204+++ b/drivers/pci/pcie/aspm.c
41205@@ -27,9 +27,9 @@
41206 #define MODULE_PARAM_PREFIX "pcie_aspm."
41207
41208 /* Note: those are not register definitions */
41209-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
41210-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
41211-#define ASPM_STATE_L1 (4) /* L1 state */
41212+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
41213+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
41214+#define ASPM_STATE_L1 (4U) /* L1 state */
41215 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
41216 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
41217
41218diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
41219index 6186f03..1a78714 100644
41220--- a/drivers/pci/probe.c
41221+++ b/drivers/pci/probe.c
41222@@ -173,7 +173,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
41223 struct pci_bus_region region;
41224 bool bar_too_big = false, bar_disabled = false;
41225
41226- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
41227+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
41228
41229 /* No printks while decoding is disabled! */
41230 if (!dev->mmio_always_on) {
41231diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
41232index 9b8505c..f00870a 100644
41233--- a/drivers/pci/proc.c
41234+++ b/drivers/pci/proc.c
41235@@ -465,7 +465,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
41236 static int __init pci_proc_init(void)
41237 {
41238 struct pci_dev *dev = NULL;
41239+
41240+#ifdef CONFIG_GRKERNSEC_PROC_ADD
41241+#ifdef CONFIG_GRKERNSEC_PROC_USER
41242+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
41243+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
41244+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
41245+#endif
41246+#else
41247 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
41248+#endif
41249 proc_create("devices", 0, proc_bus_pci_dir,
41250 &proc_bus_pci_dev_operations);
41251 proc_initialized = 1;
41252diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
41253index 2111dbb..79e434b 100644
41254--- a/drivers/platform/x86/msi-laptop.c
41255+++ b/drivers/platform/x86/msi-laptop.c
41256@@ -820,12 +820,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
41257 int result;
41258
41259 /* allow userland write sysfs file */
41260- dev_attr_bluetooth.store = store_bluetooth;
41261- dev_attr_wlan.store = store_wlan;
41262- dev_attr_threeg.store = store_threeg;
41263- dev_attr_bluetooth.attr.mode |= S_IWUSR;
41264- dev_attr_wlan.attr.mode |= S_IWUSR;
41265- dev_attr_threeg.attr.mode |= S_IWUSR;
41266+ pax_open_kernel();
41267+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
41268+ *(void **)&dev_attr_wlan.store = store_wlan;
41269+ *(void **)&dev_attr_threeg.store = store_threeg;
41270+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
41271+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
41272+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
41273+ pax_close_kernel();
41274
41275 /* disable hardware control by fn key */
41276 result = ec_read(MSI_STANDARD_EC_SCM_LOAD_ADDRESS, &data);
41277diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
41278index 0fe987f..6f3d5c3 100644
41279--- a/drivers/platform/x86/sony-laptop.c
41280+++ b/drivers/platform/x86/sony-laptop.c
41281@@ -2356,7 +2356,7 @@ static void sony_nc_lid_resume_cleanup(struct platform_device *pd)
41282 }
41283
41284 /* High speed charging function */
41285-static struct device_attribute *hsc_handle;
41286+static device_attribute_no_const *hsc_handle;
41287
41288 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
41289 struct device_attribute *attr,
41290diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
41291index f946ca7..f25c833 100644
41292--- a/drivers/platform/x86/thinkpad_acpi.c
41293+++ b/drivers/platform/x86/thinkpad_acpi.c
41294@@ -2097,7 +2097,7 @@ static int hotkey_mask_get(void)
41295 return 0;
41296 }
41297
41298-void static hotkey_mask_warn_incomplete_mask(void)
41299+static void hotkey_mask_warn_incomplete_mask(void)
41300 {
41301 /* log only what the user can fix... */
41302 const u32 wantedmask = hotkey_driver_mask &
41303@@ -2328,11 +2328,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
41304 }
41305 }
41306
41307-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
41308- struct tp_nvram_state *newn,
41309- const u32 event_mask)
41310-{
41311-
41312 #define TPACPI_COMPARE_KEY(__scancode, __member) \
41313 do { \
41314 if ((event_mask & (1 << __scancode)) && \
41315@@ -2346,36 +2341,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
41316 tpacpi_hotkey_send_key(__scancode); \
41317 } while (0)
41318
41319- void issue_volchange(const unsigned int oldvol,
41320- const unsigned int newvol)
41321- {
41322- unsigned int i = oldvol;
41323+static void issue_volchange(const unsigned int oldvol,
41324+ const unsigned int newvol,
41325+ const u32 event_mask)
41326+{
41327+ unsigned int i = oldvol;
41328
41329- while (i > newvol) {
41330- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
41331- i--;
41332- }
41333- while (i < newvol) {
41334- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
41335- i++;
41336- }
41337+ while (i > newvol) {
41338+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
41339+ i--;
41340 }
41341+ while (i < newvol) {
41342+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
41343+ i++;
41344+ }
41345+}
41346
41347- void issue_brightnesschange(const unsigned int oldbrt,
41348- const unsigned int newbrt)
41349- {
41350- unsigned int i = oldbrt;
41351+static void issue_brightnesschange(const unsigned int oldbrt,
41352+ const unsigned int newbrt,
41353+ const u32 event_mask)
41354+{
41355+ unsigned int i = oldbrt;
41356
41357- while (i > newbrt) {
41358- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
41359- i--;
41360- }
41361- while (i < newbrt) {
41362- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
41363- i++;
41364- }
41365+ while (i > newbrt) {
41366+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
41367+ i--;
41368+ }
41369+ while (i < newbrt) {
41370+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
41371+ i++;
41372 }
41373+}
41374
41375+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
41376+ struct tp_nvram_state *newn,
41377+ const u32 event_mask)
41378+{
41379 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
41380 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
41381 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
41382@@ -2409,7 +2410,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
41383 oldn->volume_level != newn->volume_level) {
41384 /* recently muted, or repeated mute keypress, or
41385 * multiple presses ending in mute */
41386- issue_volchange(oldn->volume_level, newn->volume_level);
41387+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
41388 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
41389 }
41390 } else {
41391@@ -2419,7 +2420,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
41392 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
41393 }
41394 if (oldn->volume_level != newn->volume_level) {
41395- issue_volchange(oldn->volume_level, newn->volume_level);
41396+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
41397 } else if (oldn->volume_toggle != newn->volume_toggle) {
41398 /* repeated vol up/down keypress at end of scale ? */
41399 if (newn->volume_level == 0)
41400@@ -2432,7 +2433,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
41401 /* handle brightness */
41402 if (oldn->brightness_level != newn->brightness_level) {
41403 issue_brightnesschange(oldn->brightness_level,
41404- newn->brightness_level);
41405+ newn->brightness_level,
41406+ event_mask);
41407 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
41408 /* repeated key presses that didn't change state */
41409 if (newn->brightness_level == 0)
41410@@ -2441,10 +2443,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
41411 && !tp_features.bright_unkfw)
41412 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
41413 }
41414+}
41415
41416 #undef TPACPI_COMPARE_KEY
41417 #undef TPACPI_MAY_SEND_KEY
41418-}
41419
41420 /*
41421 * Polling driver
41422diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
41423index 769d265..a3a05ca 100644
41424--- a/drivers/pnp/pnpbios/bioscalls.c
41425+++ b/drivers/pnp/pnpbios/bioscalls.c
41426@@ -58,7 +58,7 @@ do { \
41427 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
41428 } while(0)
41429
41430-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
41431+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
41432 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
41433
41434 /*
41435@@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
41436
41437 cpu = get_cpu();
41438 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
41439+
41440+ pax_open_kernel();
41441 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
41442+ pax_close_kernel();
41443
41444 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
41445 spin_lock_irqsave(&pnp_bios_lock, flags);
41446@@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
41447 :"memory");
41448 spin_unlock_irqrestore(&pnp_bios_lock, flags);
41449
41450+ pax_open_kernel();
41451 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
41452+ pax_close_kernel();
41453+
41454 put_cpu();
41455
41456 /* If we get here and this is set then the PnP BIOS faulted on us. */
41457@@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
41458 return status;
41459 }
41460
41461-void pnpbios_calls_init(union pnp_bios_install_struct *header)
41462+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
41463 {
41464 int i;
41465
41466@@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
41467 pnp_bios_callpoint.offset = header->fields.pm16offset;
41468 pnp_bios_callpoint.segment = PNP_CS16;
41469
41470+ pax_open_kernel();
41471+
41472 for_each_possible_cpu(i) {
41473 struct desc_struct *gdt = get_cpu_gdt_table(i);
41474 if (!gdt)
41475@@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
41476 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
41477 (unsigned long)__va(header->fields.pm16dseg));
41478 }
41479+
41480+ pax_close_kernel();
41481 }
41482diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
41483index 3e6db1c..1fbbdae 100644
41484--- a/drivers/pnp/resource.c
41485+++ b/drivers/pnp/resource.c
41486@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
41487 return 1;
41488
41489 /* check if the resource is valid */
41490- if (*irq < 0 || *irq > 15)
41491+ if (*irq > 15)
41492 return 0;
41493
41494 /* check if the resource is reserved */
41495@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
41496 return 1;
41497
41498 /* check if the resource is valid */
41499- if (*dma < 0 || *dma == 4 || *dma > 7)
41500+ if (*dma == 4 || *dma > 7)
41501 return 0;
41502
41503 /* check if the resource is reserved */
41504diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
41505index 7df7c5f..bd48c47 100644
41506--- a/drivers/power/pda_power.c
41507+++ b/drivers/power/pda_power.c
41508@@ -37,7 +37,11 @@ static int polling;
41509
41510 #ifdef CONFIG_USB_OTG_UTILS
41511 static struct usb_phy *transceiver;
41512-static struct notifier_block otg_nb;
41513+static int otg_handle_notification(struct notifier_block *nb,
41514+ unsigned long event, void *unused);
41515+static struct notifier_block otg_nb = {
41516+ .notifier_call = otg_handle_notification
41517+};
41518 #endif
41519
41520 static struct regulator *ac_draw;
41521@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
41522
41523 #ifdef CONFIG_USB_OTG_UTILS
41524 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
41525- otg_nb.notifier_call = otg_handle_notification;
41526 ret = usb_register_notifier(transceiver, &otg_nb);
41527 if (ret) {
41528 dev_err(dev, "failure to register otg notifier\n");
41529diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
41530index cc439fd..8fa30df 100644
41531--- a/drivers/power/power_supply.h
41532+++ b/drivers/power/power_supply.h
41533@@ -16,12 +16,12 @@ struct power_supply;
41534
41535 #ifdef CONFIG_SYSFS
41536
41537-extern void power_supply_init_attrs(struct device_type *dev_type);
41538+extern void power_supply_init_attrs(void);
41539 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
41540
41541 #else
41542
41543-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
41544+static inline void power_supply_init_attrs(void) {}
41545 #define power_supply_uevent NULL
41546
41547 #endif /* CONFIG_SYSFS */
41548diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
41549index 8a7cfb3..72e6e9b 100644
41550--- a/drivers/power/power_supply_core.c
41551+++ b/drivers/power/power_supply_core.c
41552@@ -24,7 +24,10 @@
41553 struct class *power_supply_class;
41554 EXPORT_SYMBOL_GPL(power_supply_class);
41555
41556-static struct device_type power_supply_dev_type;
41557+extern const struct attribute_group *power_supply_attr_groups[];
41558+static struct device_type power_supply_dev_type = {
41559+ .groups = power_supply_attr_groups,
41560+};
41561
41562 static int __power_supply_changed_work(struct device *dev, void *data)
41563 {
41564@@ -393,7 +396,7 @@ static int __init power_supply_class_init(void)
41565 return PTR_ERR(power_supply_class);
41566
41567 power_supply_class->dev_uevent = power_supply_uevent;
41568- power_supply_init_attrs(&power_supply_dev_type);
41569+ power_supply_init_attrs();
41570
41571 return 0;
41572 }
41573diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
41574index 40fa3b7..d9c2e0e 100644
41575--- a/drivers/power/power_supply_sysfs.c
41576+++ b/drivers/power/power_supply_sysfs.c
41577@@ -229,17 +229,15 @@ static struct attribute_group power_supply_attr_group = {
41578 .is_visible = power_supply_attr_is_visible,
41579 };
41580
41581-static const struct attribute_group *power_supply_attr_groups[] = {
41582+const struct attribute_group *power_supply_attr_groups[] = {
41583 &power_supply_attr_group,
41584 NULL,
41585 };
41586
41587-void power_supply_init_attrs(struct device_type *dev_type)
41588+void power_supply_init_attrs(void)
41589 {
41590 int i;
41591
41592- dev_type->groups = power_supply_attr_groups;
41593-
41594 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
41595 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
41596 }
41597diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
41598index 4d7c635..9860196 100644
41599--- a/drivers/regulator/max8660.c
41600+++ b/drivers/regulator/max8660.c
41601@@ -333,8 +333,10 @@ static int max8660_probe(struct i2c_client *client,
41602 max8660->shadow_regs[MAX8660_OVER1] = 5;
41603 } else {
41604 /* Otherwise devices can be toggled via software */
41605- max8660_dcdc_ops.enable = max8660_dcdc_enable;
41606- max8660_dcdc_ops.disable = max8660_dcdc_disable;
41607+ pax_open_kernel();
41608+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
41609+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
41610+ pax_close_kernel();
41611 }
41612
41613 /*
41614diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
41615index 9a8ea91..c483dd9 100644
41616--- a/drivers/regulator/max8973-regulator.c
41617+++ b/drivers/regulator/max8973-regulator.c
41618@@ -401,9 +401,11 @@ static int max8973_probe(struct i2c_client *client,
41619 if (!pdata->enable_ext_control) {
41620 max->desc.enable_reg = MAX8973_VOUT;
41621 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
41622- max8973_dcdc_ops.enable = regulator_enable_regmap;
41623- max8973_dcdc_ops.disable = regulator_disable_regmap;
41624- max8973_dcdc_ops.is_enabled = regulator_is_enabled_regmap;
41625+ pax_open_kernel();
41626+ *(void **)&max8973_dcdc_ops.enable = regulator_enable_regmap;
41627+ *(void **)&max8973_dcdc_ops.disable = regulator_disable_regmap;
41628+ *(void **)&max8973_dcdc_ops.is_enabled = regulator_is_enabled_regmap;
41629+ pax_close_kernel();
41630 }
41631
41632 max->enable_external_control = pdata->enable_ext_control;
41633diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
41634index 0d84b1f..c2da6ac 100644
41635--- a/drivers/regulator/mc13892-regulator.c
41636+++ b/drivers/regulator/mc13892-regulator.c
41637@@ -540,10 +540,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
41638 }
41639 mc13xxx_unlock(mc13892);
41640
41641- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
41642+ pax_open_kernel();
41643+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
41644 = mc13892_vcam_set_mode;
41645- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
41646+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
41647 = mc13892_vcam_get_mode;
41648+ pax_close_kernel();
41649
41650 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
41651 ARRAY_SIZE(mc13892_regulators));
41652diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
41653index 16630aa..6afc992 100644
41654--- a/drivers/rtc/rtc-cmos.c
41655+++ b/drivers/rtc/rtc-cmos.c
41656@@ -724,7 +724,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
41657 hpet_rtc_timer_init();
41658
41659 /* export at least the first block of NVRAM */
41660- nvram.size = address_space - NVRAM_OFFSET;
41661+ pax_open_kernel();
41662+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
41663+ pax_close_kernel();
41664 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
41665 if (retval < 0) {
41666 dev_dbg(dev, "can't create nvram file? %d\n", retval);
41667diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
41668index 9a86b4b..3a383dc 100644
41669--- a/drivers/rtc/rtc-dev.c
41670+++ b/drivers/rtc/rtc-dev.c
41671@@ -14,6 +14,7 @@
41672 #include <linux/module.h>
41673 #include <linux/rtc.h>
41674 #include <linux/sched.h>
41675+#include <linux/grsecurity.h>
41676 #include "rtc-core.h"
41677
41678 static dev_t rtc_devt;
41679@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
41680 if (copy_from_user(&tm, uarg, sizeof(tm)))
41681 return -EFAULT;
41682
41683+ gr_log_timechange();
41684+
41685 return rtc_set_time(rtc, &tm);
41686
41687 case RTC_PIE_ON:
41688diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
41689index e0d0ba4..3c65868 100644
41690--- a/drivers/rtc/rtc-ds1307.c
41691+++ b/drivers/rtc/rtc-ds1307.c
41692@@ -106,7 +106,7 @@ struct ds1307 {
41693 u8 offset; /* register's offset */
41694 u8 regs[11];
41695 u16 nvram_offset;
41696- struct bin_attribute *nvram;
41697+ bin_attribute_no_const *nvram;
41698 enum ds_type type;
41699 unsigned long flags;
41700 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
41701diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
41702index 130f29a..6179d03 100644
41703--- a/drivers/rtc/rtc-m48t59.c
41704+++ b/drivers/rtc/rtc-m48t59.c
41705@@ -482,7 +482,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
41706 goto out;
41707 }
41708
41709- m48t59_nvram_attr.size = pdata->offset;
41710+ pax_open_kernel();
41711+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
41712+ pax_close_kernel();
41713
41714 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
41715 if (ret) {
41716diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
41717index e693af6..2e525b6 100644
41718--- a/drivers/scsi/bfa/bfa_fcpim.h
41719+++ b/drivers/scsi/bfa/bfa_fcpim.h
41720@@ -36,7 +36,7 @@ struct bfa_iotag_s {
41721
41722 struct bfa_itn_s {
41723 bfa_isr_func_t isr;
41724-};
41725+} __no_const;
41726
41727 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
41728 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
41729diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
41730index 23a90e7..9cf04ee 100644
41731--- a/drivers/scsi/bfa/bfa_ioc.h
41732+++ b/drivers/scsi/bfa/bfa_ioc.h
41733@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
41734 bfa_ioc_disable_cbfn_t disable_cbfn;
41735 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
41736 bfa_ioc_reset_cbfn_t reset_cbfn;
41737-};
41738+} __no_const;
41739
41740 /*
41741 * IOC event notification mechanism.
41742@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
41743 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
41744 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
41745 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
41746-};
41747+} __no_const;
41748
41749 /*
41750 * Queue element to wait for room in request queue. FIFO order is
41751diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
41752index 593085a..47aa999 100644
41753--- a/drivers/scsi/hosts.c
41754+++ b/drivers/scsi/hosts.c
41755@@ -42,7 +42,7 @@
41756 #include "scsi_logging.h"
41757
41758
41759-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
41760+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
41761
41762
41763 static void scsi_host_cls_release(struct device *dev)
41764@@ -361,7 +361,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
41765 * subtract one because we increment first then return, but we need to
41766 * know what the next host number was before increment
41767 */
41768- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
41769+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
41770 shost->dma_channel = 0xff;
41771
41772 /* These three are default values which can be overridden */
41773diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
41774index 4f33806..afd6f60 100644
41775--- a/drivers/scsi/hpsa.c
41776+++ b/drivers/scsi/hpsa.c
41777@@ -554,7 +554,7 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
41778 unsigned long flags;
41779
41780 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
41781- return h->access.command_completed(h, q);
41782+ return h->access->command_completed(h, q);
41783
41784 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
41785 a = rq->head[rq->current_entry];
41786@@ -3374,7 +3374,7 @@ static void start_io(struct ctlr_info *h)
41787 while (!list_empty(&h->reqQ)) {
41788 c = list_entry(h->reqQ.next, struct CommandList, list);
41789 /* can't do anything if fifo is full */
41790- if ((h->access.fifo_full(h))) {
41791+ if ((h->access->fifo_full(h))) {
41792 dev_warn(&h->pdev->dev, "fifo full\n");
41793 break;
41794 }
41795@@ -3396,7 +3396,7 @@ static void start_io(struct ctlr_info *h)
41796
41797 /* Tell the controller execute command */
41798 spin_unlock_irqrestore(&h->lock, flags);
41799- h->access.submit_command(h, c);
41800+ h->access->submit_command(h, c);
41801 spin_lock_irqsave(&h->lock, flags);
41802 }
41803 spin_unlock_irqrestore(&h->lock, flags);
41804@@ -3404,17 +3404,17 @@ static void start_io(struct ctlr_info *h)
41805
41806 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
41807 {
41808- return h->access.command_completed(h, q);
41809+ return h->access->command_completed(h, q);
41810 }
41811
41812 static inline bool interrupt_pending(struct ctlr_info *h)
41813 {
41814- return h->access.intr_pending(h);
41815+ return h->access->intr_pending(h);
41816 }
41817
41818 static inline long interrupt_not_for_us(struct ctlr_info *h)
41819 {
41820- return (h->access.intr_pending(h) == 0) ||
41821+ return (h->access->intr_pending(h) == 0) ||
41822 (h->interrupts_enabled == 0);
41823 }
41824
41825@@ -4316,7 +4316,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
41826 if (prod_index < 0)
41827 return -ENODEV;
41828 h->product_name = products[prod_index].product_name;
41829- h->access = *(products[prod_index].access);
41830+ h->access = products[prod_index].access;
41831
41832 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
41833 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
41834@@ -4598,7 +4598,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
41835
41836 assert_spin_locked(&lockup_detector_lock);
41837 remove_ctlr_from_lockup_detector_list(h);
41838- h->access.set_intr_mask(h, HPSA_INTR_OFF);
41839+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
41840 spin_lock_irqsave(&h->lock, flags);
41841 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
41842 spin_unlock_irqrestore(&h->lock, flags);
41843@@ -4775,7 +4775,7 @@ reinit_after_soft_reset:
41844 }
41845
41846 /* make sure the board interrupts are off */
41847- h->access.set_intr_mask(h, HPSA_INTR_OFF);
41848+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
41849
41850 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
41851 goto clean2;
41852@@ -4809,7 +4809,7 @@ reinit_after_soft_reset:
41853 * fake ones to scoop up any residual completions.
41854 */
41855 spin_lock_irqsave(&h->lock, flags);
41856- h->access.set_intr_mask(h, HPSA_INTR_OFF);
41857+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
41858 spin_unlock_irqrestore(&h->lock, flags);
41859 free_irqs(h);
41860 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
41861@@ -4828,9 +4828,9 @@ reinit_after_soft_reset:
41862 dev_info(&h->pdev->dev, "Board READY.\n");
41863 dev_info(&h->pdev->dev,
41864 "Waiting for stale completions to drain.\n");
41865- h->access.set_intr_mask(h, HPSA_INTR_ON);
41866+ h->access->set_intr_mask(h, HPSA_INTR_ON);
41867 msleep(10000);
41868- h->access.set_intr_mask(h, HPSA_INTR_OFF);
41869+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
41870
41871 rc = controller_reset_failed(h->cfgtable);
41872 if (rc)
41873@@ -4851,7 +4851,7 @@ reinit_after_soft_reset:
41874 }
41875
41876 /* Turn the interrupts on so we can service requests */
41877- h->access.set_intr_mask(h, HPSA_INTR_ON);
41878+ h->access->set_intr_mask(h, HPSA_INTR_ON);
41879
41880 hpsa_hba_inquiry(h);
41881 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
41882@@ -4903,7 +4903,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
41883 * To write all data in the battery backed cache to disks
41884 */
41885 hpsa_flush_cache(h);
41886- h->access.set_intr_mask(h, HPSA_INTR_OFF);
41887+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
41888 hpsa_free_irqs_and_disable_msix(h);
41889 }
41890
41891@@ -5071,7 +5071,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
41892 return;
41893 }
41894 /* Change the access methods to the performant access methods */
41895- h->access = SA5_performant_access;
41896+ h->access = &SA5_performant_access;
41897 h->transMethod = CFGTBL_Trans_Performant;
41898 }
41899
41900diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
41901index 9816479..c5d4e97 100644
41902--- a/drivers/scsi/hpsa.h
41903+++ b/drivers/scsi/hpsa.h
41904@@ -79,7 +79,7 @@ struct ctlr_info {
41905 unsigned int msix_vector;
41906 unsigned int msi_vector;
41907 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
41908- struct access_method access;
41909+ struct access_method *access;
41910
41911 /* queue and queue Info */
41912 struct list_head reqQ;
41913diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
41914index c772d8d..35c362c 100644
41915--- a/drivers/scsi/libfc/fc_exch.c
41916+++ b/drivers/scsi/libfc/fc_exch.c
41917@@ -100,12 +100,12 @@ struct fc_exch_mgr {
41918 u16 pool_max_index;
41919
41920 struct {
41921- atomic_t no_free_exch;
41922- atomic_t no_free_exch_xid;
41923- atomic_t xid_not_found;
41924- atomic_t xid_busy;
41925- atomic_t seq_not_found;
41926- atomic_t non_bls_resp;
41927+ atomic_unchecked_t no_free_exch;
41928+ atomic_unchecked_t no_free_exch_xid;
41929+ atomic_unchecked_t xid_not_found;
41930+ atomic_unchecked_t xid_busy;
41931+ atomic_unchecked_t seq_not_found;
41932+ atomic_unchecked_t non_bls_resp;
41933 } stats;
41934 };
41935
41936@@ -725,7 +725,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
41937 /* allocate memory for exchange */
41938 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
41939 if (!ep) {
41940- atomic_inc(&mp->stats.no_free_exch);
41941+ atomic_inc_unchecked(&mp->stats.no_free_exch);
41942 goto out;
41943 }
41944 memset(ep, 0, sizeof(*ep));
41945@@ -786,7 +786,7 @@ out:
41946 return ep;
41947 err:
41948 spin_unlock_bh(&pool->lock);
41949- atomic_inc(&mp->stats.no_free_exch_xid);
41950+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
41951 mempool_free(ep, mp->ep_pool);
41952 return NULL;
41953 }
41954@@ -929,7 +929,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41955 xid = ntohs(fh->fh_ox_id); /* we originated exch */
41956 ep = fc_exch_find(mp, xid);
41957 if (!ep) {
41958- atomic_inc(&mp->stats.xid_not_found);
41959+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41960 reject = FC_RJT_OX_ID;
41961 goto out;
41962 }
41963@@ -959,7 +959,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41964 ep = fc_exch_find(mp, xid);
41965 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
41966 if (ep) {
41967- atomic_inc(&mp->stats.xid_busy);
41968+ atomic_inc_unchecked(&mp->stats.xid_busy);
41969 reject = FC_RJT_RX_ID;
41970 goto rel;
41971 }
41972@@ -970,7 +970,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41973 }
41974 xid = ep->xid; /* get our XID */
41975 } else if (!ep) {
41976- atomic_inc(&mp->stats.xid_not_found);
41977+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41978 reject = FC_RJT_RX_ID; /* XID not found */
41979 goto out;
41980 }
41981@@ -987,7 +987,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41982 } else {
41983 sp = &ep->seq;
41984 if (sp->id != fh->fh_seq_id) {
41985- atomic_inc(&mp->stats.seq_not_found);
41986+ atomic_inc_unchecked(&mp->stats.seq_not_found);
41987 if (f_ctl & FC_FC_END_SEQ) {
41988 /*
41989 * Update sequence_id based on incoming last
41990@@ -1437,22 +1437,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41991
41992 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
41993 if (!ep) {
41994- atomic_inc(&mp->stats.xid_not_found);
41995+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41996 goto out;
41997 }
41998 if (ep->esb_stat & ESB_ST_COMPLETE) {
41999- atomic_inc(&mp->stats.xid_not_found);
42000+ atomic_inc_unchecked(&mp->stats.xid_not_found);
42001 goto rel;
42002 }
42003 if (ep->rxid == FC_XID_UNKNOWN)
42004 ep->rxid = ntohs(fh->fh_rx_id);
42005 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
42006- atomic_inc(&mp->stats.xid_not_found);
42007+ atomic_inc_unchecked(&mp->stats.xid_not_found);
42008 goto rel;
42009 }
42010 if (ep->did != ntoh24(fh->fh_s_id) &&
42011 ep->did != FC_FID_FLOGI) {
42012- atomic_inc(&mp->stats.xid_not_found);
42013+ atomic_inc_unchecked(&mp->stats.xid_not_found);
42014 goto rel;
42015 }
42016 sof = fr_sof(fp);
42017@@ -1461,7 +1461,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
42018 sp->ssb_stat |= SSB_ST_RESP;
42019 sp->id = fh->fh_seq_id;
42020 } else if (sp->id != fh->fh_seq_id) {
42021- atomic_inc(&mp->stats.seq_not_found);
42022+ atomic_inc_unchecked(&mp->stats.seq_not_found);
42023 goto rel;
42024 }
42025
42026@@ -1525,9 +1525,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
42027 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
42028
42029 if (!sp)
42030- atomic_inc(&mp->stats.xid_not_found);
42031+ atomic_inc_unchecked(&mp->stats.xid_not_found);
42032 else
42033- atomic_inc(&mp->stats.non_bls_resp);
42034+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
42035
42036 fc_frame_free(fp);
42037 }
42038@@ -2174,13 +2174,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
42039
42040 list_for_each_entry(ema, &lport->ema_list, ema_list) {
42041 mp = ema->mp;
42042- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
42043+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
42044 st->fc_no_free_exch_xid +=
42045- atomic_read(&mp->stats.no_free_exch_xid);
42046- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
42047- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
42048- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
42049- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
42050+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
42051+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
42052+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
42053+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
42054+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
42055 }
42056 }
42057 EXPORT_SYMBOL(fc_exch_update_stats);
42058diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
42059index bdb81cd..d3c7c2c 100644
42060--- a/drivers/scsi/libsas/sas_ata.c
42061+++ b/drivers/scsi/libsas/sas_ata.c
42062@@ -554,7 +554,7 @@ static struct ata_port_operations sas_sata_ops = {
42063 .postreset = ata_std_postreset,
42064 .error_handler = ata_std_error_handler,
42065 .post_internal_cmd = sas_ata_post_internal,
42066- .qc_defer = ata_std_qc_defer,
42067+ .qc_defer = ata_std_qc_defer,
42068 .qc_prep = ata_noop_qc_prep,
42069 .qc_issue = sas_ata_qc_issue,
42070 .qc_fill_rtf = sas_ata_qc_fill_rtf,
42071diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
42072index df4c13a..a51e90c 100644
42073--- a/drivers/scsi/lpfc/lpfc.h
42074+++ b/drivers/scsi/lpfc/lpfc.h
42075@@ -424,7 +424,7 @@ struct lpfc_vport {
42076 struct dentry *debug_nodelist;
42077 struct dentry *vport_debugfs_root;
42078 struct lpfc_debugfs_trc *disc_trc;
42079- atomic_t disc_trc_cnt;
42080+ atomic_unchecked_t disc_trc_cnt;
42081 #endif
42082 uint8_t stat_data_enabled;
42083 uint8_t stat_data_blocked;
42084@@ -842,8 +842,8 @@ struct lpfc_hba {
42085 struct timer_list fabric_block_timer;
42086 unsigned long bit_flags;
42087 #define FABRIC_COMANDS_BLOCKED 0
42088- atomic_t num_rsrc_err;
42089- atomic_t num_cmd_success;
42090+ atomic_unchecked_t num_rsrc_err;
42091+ atomic_unchecked_t num_cmd_success;
42092 unsigned long last_rsrc_error_time;
42093 unsigned long last_ramp_down_time;
42094 unsigned long last_ramp_up_time;
42095@@ -879,7 +879,7 @@ struct lpfc_hba {
42096
42097 struct dentry *debug_slow_ring_trc;
42098 struct lpfc_debugfs_trc *slow_ring_trc;
42099- atomic_t slow_ring_trc_cnt;
42100+ atomic_unchecked_t slow_ring_trc_cnt;
42101 /* iDiag debugfs sub-directory */
42102 struct dentry *idiag_root;
42103 struct dentry *idiag_pci_cfg;
42104diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
42105index f63f5ff..de29189 100644
42106--- a/drivers/scsi/lpfc/lpfc_debugfs.c
42107+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
42108@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
42109
42110 #include <linux/debugfs.h>
42111
42112-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
42113+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
42114 static unsigned long lpfc_debugfs_start_time = 0L;
42115
42116 /* iDiag */
42117@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
42118 lpfc_debugfs_enable = 0;
42119
42120 len = 0;
42121- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
42122+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
42123 (lpfc_debugfs_max_disc_trc - 1);
42124 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
42125 dtp = vport->disc_trc + i;
42126@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
42127 lpfc_debugfs_enable = 0;
42128
42129 len = 0;
42130- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
42131+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
42132 (lpfc_debugfs_max_slow_ring_trc - 1);
42133 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
42134 dtp = phba->slow_ring_trc + i;
42135@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
42136 !vport || !vport->disc_trc)
42137 return;
42138
42139- index = atomic_inc_return(&vport->disc_trc_cnt) &
42140+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
42141 (lpfc_debugfs_max_disc_trc - 1);
42142 dtp = vport->disc_trc + index;
42143 dtp->fmt = fmt;
42144 dtp->data1 = data1;
42145 dtp->data2 = data2;
42146 dtp->data3 = data3;
42147- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
42148+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
42149 dtp->jif = jiffies;
42150 #endif
42151 return;
42152@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
42153 !phba || !phba->slow_ring_trc)
42154 return;
42155
42156- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
42157+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
42158 (lpfc_debugfs_max_slow_ring_trc - 1);
42159 dtp = phba->slow_ring_trc + index;
42160 dtp->fmt = fmt;
42161 dtp->data1 = data1;
42162 dtp->data2 = data2;
42163 dtp->data3 = data3;
42164- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
42165+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
42166 dtp->jif = jiffies;
42167 #endif
42168 return;
42169@@ -4182,7 +4182,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
42170 "slow_ring buffer\n");
42171 goto debug_failed;
42172 }
42173- atomic_set(&phba->slow_ring_trc_cnt, 0);
42174+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
42175 memset(phba->slow_ring_trc, 0,
42176 (sizeof(struct lpfc_debugfs_trc) *
42177 lpfc_debugfs_max_slow_ring_trc));
42178@@ -4228,7 +4228,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
42179 "buffer\n");
42180 goto debug_failed;
42181 }
42182- atomic_set(&vport->disc_trc_cnt, 0);
42183+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
42184
42185 snprintf(name, sizeof(name), "discovery_trace");
42186 vport->debug_disc_trc =
42187diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
42188index 89ad558..76956c4 100644
42189--- a/drivers/scsi/lpfc/lpfc_init.c
42190+++ b/drivers/scsi/lpfc/lpfc_init.c
42191@@ -10618,8 +10618,10 @@ lpfc_init(void)
42192 "misc_register returned with status %d", error);
42193
42194 if (lpfc_enable_npiv) {
42195- lpfc_transport_functions.vport_create = lpfc_vport_create;
42196- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
42197+ pax_open_kernel();
42198+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
42199+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
42200+ pax_close_kernel();
42201 }
42202 lpfc_transport_template =
42203 fc_attach_transport(&lpfc_transport_functions);
42204diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
42205index 60e5a17..ff7a793 100644
42206--- a/drivers/scsi/lpfc/lpfc_scsi.c
42207+++ b/drivers/scsi/lpfc/lpfc_scsi.c
42208@@ -305,7 +305,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
42209 uint32_t evt_posted;
42210
42211 spin_lock_irqsave(&phba->hbalock, flags);
42212- atomic_inc(&phba->num_rsrc_err);
42213+ atomic_inc_unchecked(&phba->num_rsrc_err);
42214 phba->last_rsrc_error_time = jiffies;
42215
42216 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
42217@@ -346,7 +346,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
42218 unsigned long flags;
42219 struct lpfc_hba *phba = vport->phba;
42220 uint32_t evt_posted;
42221- atomic_inc(&phba->num_cmd_success);
42222+ atomic_inc_unchecked(&phba->num_cmd_success);
42223
42224 if (vport->cfg_lun_queue_depth <= queue_depth)
42225 return;
42226@@ -390,8 +390,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
42227 unsigned long num_rsrc_err, num_cmd_success;
42228 int i;
42229
42230- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
42231- num_cmd_success = atomic_read(&phba->num_cmd_success);
42232+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
42233+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
42234
42235 /*
42236 * The error and success command counters are global per
42237@@ -419,8 +419,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
42238 }
42239 }
42240 lpfc_destroy_vport_work_array(phba, vports);
42241- atomic_set(&phba->num_rsrc_err, 0);
42242- atomic_set(&phba->num_cmd_success, 0);
42243+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
42244+ atomic_set_unchecked(&phba->num_cmd_success, 0);
42245 }
42246
42247 /**
42248@@ -454,8 +454,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
42249 }
42250 }
42251 lpfc_destroy_vport_work_array(phba, vports);
42252- atomic_set(&phba->num_rsrc_err, 0);
42253- atomic_set(&phba->num_cmd_success, 0);
42254+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
42255+ atomic_set_unchecked(&phba->num_cmd_success, 0);
42256 }
42257
42258 /**
42259diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
42260index b46f5e9..c4c4ccb 100644
42261--- a/drivers/scsi/pmcraid.c
42262+++ b/drivers/scsi/pmcraid.c
42263@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
42264 res->scsi_dev = scsi_dev;
42265 scsi_dev->hostdata = res;
42266 res->change_detected = 0;
42267- atomic_set(&res->read_failures, 0);
42268- atomic_set(&res->write_failures, 0);
42269+ atomic_set_unchecked(&res->read_failures, 0);
42270+ atomic_set_unchecked(&res->write_failures, 0);
42271 rc = 0;
42272 }
42273 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
42274@@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
42275
42276 /* If this was a SCSI read/write command keep count of errors */
42277 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
42278- atomic_inc(&res->read_failures);
42279+ atomic_inc_unchecked(&res->read_failures);
42280 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
42281- atomic_inc(&res->write_failures);
42282+ atomic_inc_unchecked(&res->write_failures);
42283
42284 if (!RES_IS_GSCSI(res->cfg_entry) &&
42285 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
42286@@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
42287 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
42288 * hrrq_id assigned here in queuecommand
42289 */
42290- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
42291+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
42292 pinstance->num_hrrq;
42293 cmd->cmd_done = pmcraid_io_done;
42294
42295@@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
42296 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
42297 * hrrq_id assigned here in queuecommand
42298 */
42299- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
42300+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
42301 pinstance->num_hrrq;
42302
42303 if (request_size) {
42304@@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
42305
42306 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
42307 /* add resources only after host is added into system */
42308- if (!atomic_read(&pinstance->expose_resources))
42309+ if (!atomic_read_unchecked(&pinstance->expose_resources))
42310 return;
42311
42312 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
42313@@ -5324,8 +5324,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
42314 init_waitqueue_head(&pinstance->reset_wait_q);
42315
42316 atomic_set(&pinstance->outstanding_cmds, 0);
42317- atomic_set(&pinstance->last_message_id, 0);
42318- atomic_set(&pinstance->expose_resources, 0);
42319+ atomic_set_unchecked(&pinstance->last_message_id, 0);
42320+ atomic_set_unchecked(&pinstance->expose_resources, 0);
42321
42322 INIT_LIST_HEAD(&pinstance->free_res_q);
42323 INIT_LIST_HEAD(&pinstance->used_res_q);
42324@@ -6038,7 +6038,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
42325 /* Schedule worker thread to handle CCN and take care of adding and
42326 * removing devices to OS
42327 */
42328- atomic_set(&pinstance->expose_resources, 1);
42329+ atomic_set_unchecked(&pinstance->expose_resources, 1);
42330 schedule_work(&pinstance->worker_q);
42331 return rc;
42332
42333diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
42334index e1d150f..6c6df44 100644
42335--- a/drivers/scsi/pmcraid.h
42336+++ b/drivers/scsi/pmcraid.h
42337@@ -748,7 +748,7 @@ struct pmcraid_instance {
42338 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
42339
42340 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
42341- atomic_t last_message_id;
42342+ atomic_unchecked_t last_message_id;
42343
42344 /* configuration table */
42345 struct pmcraid_config_table *cfg_table;
42346@@ -777,7 +777,7 @@ struct pmcraid_instance {
42347 atomic_t outstanding_cmds;
42348
42349 /* should add/delete resources to mid-layer now ?*/
42350- atomic_t expose_resources;
42351+ atomic_unchecked_t expose_resources;
42352
42353
42354
42355@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
42356 struct pmcraid_config_table_entry_ext cfg_entry_ext;
42357 };
42358 struct scsi_device *scsi_dev; /* Link scsi_device structure */
42359- atomic_t read_failures; /* count of failed READ commands */
42360- atomic_t write_failures; /* count of failed WRITE commands */
42361+ atomic_unchecked_t read_failures; /* count of failed READ commands */
42362+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
42363
42364 /* To indicate add/delete/modify during CCN */
42365 u8 change_detected;
42366diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
42367index 83d7984..a27d947 100644
42368--- a/drivers/scsi/qla2xxx/qla_attr.c
42369+++ b/drivers/scsi/qla2xxx/qla_attr.c
42370@@ -1969,7 +1969,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
42371 return 0;
42372 }
42373
42374-struct fc_function_template qla2xxx_transport_functions = {
42375+fc_function_template_no_const qla2xxx_transport_functions = {
42376
42377 .show_host_node_name = 1,
42378 .show_host_port_name = 1,
42379@@ -2016,7 +2016,7 @@ struct fc_function_template qla2xxx_transport_functions = {
42380 .bsg_timeout = qla24xx_bsg_timeout,
42381 };
42382
42383-struct fc_function_template qla2xxx_transport_vport_functions = {
42384+fc_function_template_no_const qla2xxx_transport_vport_functions = {
42385
42386 .show_host_node_name = 1,
42387 .show_host_port_name = 1,
42388diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
42389index 2411d1a..4673766 100644
42390--- a/drivers/scsi/qla2xxx/qla_gbl.h
42391+++ b/drivers/scsi/qla2xxx/qla_gbl.h
42392@@ -515,8 +515,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
42393 struct device_attribute;
42394 extern struct device_attribute *qla2x00_host_attrs[];
42395 struct fc_function_template;
42396-extern struct fc_function_template qla2xxx_transport_functions;
42397-extern struct fc_function_template qla2xxx_transport_vport_functions;
42398+extern fc_function_template_no_const qla2xxx_transport_functions;
42399+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
42400 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
42401 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
42402 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
42403diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
42404index 10d23f8..a7d5d4c 100644
42405--- a/drivers/scsi/qla2xxx/qla_os.c
42406+++ b/drivers/scsi/qla2xxx/qla_os.c
42407@@ -1472,8 +1472,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
42408 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
42409 /* Ok, a 64bit DMA mask is applicable. */
42410 ha->flags.enable_64bit_addressing = 1;
42411- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
42412- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
42413+ pax_open_kernel();
42414+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
42415+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
42416+ pax_close_kernel();
42417 return;
42418 }
42419 }
42420diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
42421index 329d553..f20d31d 100644
42422--- a/drivers/scsi/qla4xxx/ql4_def.h
42423+++ b/drivers/scsi/qla4xxx/ql4_def.h
42424@@ -273,7 +273,7 @@ struct ddb_entry {
42425 * (4000 only) */
42426 atomic_t relogin_timer; /* Max Time to wait for
42427 * relogin to complete */
42428- atomic_t relogin_retry_count; /* Num of times relogin has been
42429+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
42430 * retried */
42431 uint32_t default_time2wait; /* Default Min time between
42432 * relogins (+aens) */
42433diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
42434index 4cec123..7c1329f 100644
42435--- a/drivers/scsi/qla4xxx/ql4_os.c
42436+++ b/drivers/scsi/qla4xxx/ql4_os.c
42437@@ -2621,12 +2621,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
42438 */
42439 if (!iscsi_is_session_online(cls_sess)) {
42440 /* Reset retry relogin timer */
42441- atomic_inc(&ddb_entry->relogin_retry_count);
42442+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
42443 DEBUG2(ql4_printk(KERN_INFO, ha,
42444 "%s: index[%d] relogin timed out-retrying"
42445 " relogin (%d), retry (%d)\n", __func__,
42446 ddb_entry->fw_ddb_index,
42447- atomic_read(&ddb_entry->relogin_retry_count),
42448+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
42449 ddb_entry->default_time2wait + 4));
42450 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
42451 atomic_set(&ddb_entry->retry_relogin_timer,
42452@@ -4738,7 +4738,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
42453
42454 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
42455 atomic_set(&ddb_entry->relogin_timer, 0);
42456- atomic_set(&ddb_entry->relogin_retry_count, 0);
42457+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
42458 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
42459 ddb_entry->default_relogin_timeout =
42460 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
42461diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
42462index 2c0d0ec..4e8681a 100644
42463--- a/drivers/scsi/scsi.c
42464+++ b/drivers/scsi/scsi.c
42465@@ -661,7 +661,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
42466 unsigned long timeout;
42467 int rtn = 0;
42468
42469- atomic_inc(&cmd->device->iorequest_cnt);
42470+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
42471
42472 /* check if the device is still usable */
42473 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
42474diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
42475index f1bf5af..f67e943 100644
42476--- a/drivers/scsi/scsi_lib.c
42477+++ b/drivers/scsi/scsi_lib.c
42478@@ -1454,7 +1454,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
42479 shost = sdev->host;
42480 scsi_init_cmd_errh(cmd);
42481 cmd->result = DID_NO_CONNECT << 16;
42482- atomic_inc(&cmd->device->iorequest_cnt);
42483+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
42484
42485 /*
42486 * SCSI request completion path will do scsi_device_unbusy(),
42487@@ -1480,9 +1480,9 @@ static void scsi_softirq_done(struct request *rq)
42488
42489 INIT_LIST_HEAD(&cmd->eh_entry);
42490
42491- atomic_inc(&cmd->device->iodone_cnt);
42492+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
42493 if (cmd->result)
42494- atomic_inc(&cmd->device->ioerr_cnt);
42495+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
42496
42497 disposition = scsi_decide_disposition(cmd);
42498 if (disposition != SUCCESS &&
42499diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
42500index 931a7d9..0c2a754 100644
42501--- a/drivers/scsi/scsi_sysfs.c
42502+++ b/drivers/scsi/scsi_sysfs.c
42503@@ -658,7 +658,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
42504 char *buf) \
42505 { \
42506 struct scsi_device *sdev = to_scsi_device(dev); \
42507- unsigned long long count = atomic_read(&sdev->field); \
42508+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
42509 return snprintf(buf, 20, "0x%llx\n", count); \
42510 } \
42511 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
42512diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
42513index 84a1fdf..693b0d6 100644
42514--- a/drivers/scsi/scsi_tgt_lib.c
42515+++ b/drivers/scsi/scsi_tgt_lib.c
42516@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
42517 int err;
42518
42519 dprintk("%lx %u\n", uaddr, len);
42520- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
42521+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
42522 if (err) {
42523 /*
42524 * TODO: need to fixup sg_tablesize, max_segment_size,
42525diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
42526index e894ca7..de9d7660 100644
42527--- a/drivers/scsi/scsi_transport_fc.c
42528+++ b/drivers/scsi/scsi_transport_fc.c
42529@@ -498,7 +498,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
42530 * Netlink Infrastructure
42531 */
42532
42533-static atomic_t fc_event_seq;
42534+static atomic_unchecked_t fc_event_seq;
42535
42536 /**
42537 * fc_get_event_number - Obtain the next sequential FC event number
42538@@ -511,7 +511,7 @@ static atomic_t fc_event_seq;
42539 u32
42540 fc_get_event_number(void)
42541 {
42542- return atomic_add_return(1, &fc_event_seq);
42543+ return atomic_add_return_unchecked(1, &fc_event_seq);
42544 }
42545 EXPORT_SYMBOL(fc_get_event_number);
42546
42547@@ -659,7 +659,7 @@ static __init int fc_transport_init(void)
42548 {
42549 int error;
42550
42551- atomic_set(&fc_event_seq, 0);
42552+ atomic_set_unchecked(&fc_event_seq, 0);
42553
42554 error = transport_class_register(&fc_host_class);
42555 if (error)
42556@@ -849,7 +849,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
42557 char *cp;
42558
42559 *val = simple_strtoul(buf, &cp, 0);
42560- if ((*cp && (*cp != '\n')) || (*val < 0))
42561+ if (*cp && (*cp != '\n'))
42562 return -EINVAL;
42563 /*
42564 * Check for overflow; dev_loss_tmo is u32
42565diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
42566index 31969f2..2b348f0 100644
42567--- a/drivers/scsi/scsi_transport_iscsi.c
42568+++ b/drivers/scsi/scsi_transport_iscsi.c
42569@@ -79,7 +79,7 @@ struct iscsi_internal {
42570 struct transport_container session_cont;
42571 };
42572
42573-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
42574+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
42575 static struct workqueue_struct *iscsi_eh_timer_workq;
42576
42577 static DEFINE_IDA(iscsi_sess_ida);
42578@@ -1064,7 +1064,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
42579 int err;
42580
42581 ihost = shost->shost_data;
42582- session->sid = atomic_add_return(1, &iscsi_session_nr);
42583+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
42584
42585 if (target_id == ISCSI_MAX_TARGET) {
42586 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
42587@@ -2943,7 +2943,7 @@ static __init int iscsi_transport_init(void)
42588 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
42589 ISCSI_TRANSPORT_VERSION);
42590
42591- atomic_set(&iscsi_session_nr, 0);
42592+ atomic_set_unchecked(&iscsi_session_nr, 0);
42593
42594 err = class_register(&iscsi_transport_class);
42595 if (err)
42596diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
42597index f379c7f..e8fc69c 100644
42598--- a/drivers/scsi/scsi_transport_srp.c
42599+++ b/drivers/scsi/scsi_transport_srp.c
42600@@ -33,7 +33,7 @@
42601 #include "scsi_transport_srp_internal.h"
42602
42603 struct srp_host_attrs {
42604- atomic_t next_port_id;
42605+ atomic_unchecked_t next_port_id;
42606 };
42607 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
42608
42609@@ -61,7 +61,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
42610 struct Scsi_Host *shost = dev_to_shost(dev);
42611 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
42612
42613- atomic_set(&srp_host->next_port_id, 0);
42614+ atomic_set_unchecked(&srp_host->next_port_id, 0);
42615 return 0;
42616 }
42617
42618@@ -210,7 +210,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
42619 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
42620 rport->roles = ids->roles;
42621
42622- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
42623+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
42624 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
42625
42626 transport_setup_device(&rport->dev);
42627diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
42628index 7992635..609faf8 100644
42629--- a/drivers/scsi/sd.c
42630+++ b/drivers/scsi/sd.c
42631@@ -2909,7 +2909,7 @@ static int sd_probe(struct device *dev)
42632 sdkp->disk = gd;
42633 sdkp->index = index;
42634 atomic_set(&sdkp->openers, 0);
42635- atomic_set(&sdkp->device->ioerr_cnt, 0);
42636+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
42637
42638 if (!sdp->request_queue->rq_timeout) {
42639 if (sdp->type != TYPE_MOD)
42640diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
42641index be2c9a6..275525c 100644
42642--- a/drivers/scsi/sg.c
42643+++ b/drivers/scsi/sg.c
42644@@ -1101,7 +1101,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
42645 sdp->disk->disk_name,
42646 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
42647 NULL,
42648- (char *)arg);
42649+ (char __user *)arg);
42650 case BLKTRACESTART:
42651 return blk_trace_startstop(sdp->device->request_queue, 1);
42652 case BLKTRACESTOP:
42653diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
42654index 19ee901..6e8c2ef 100644
42655--- a/drivers/spi/spi.c
42656+++ b/drivers/spi/spi.c
42657@@ -1616,7 +1616,7 @@ int spi_bus_unlock(struct spi_master *master)
42658 EXPORT_SYMBOL_GPL(spi_bus_unlock);
42659
42660 /* portable code must never pass more than 32 bytes */
42661-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
42662+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
42663
42664 static u8 *buf;
42665
42666diff --git a/drivers/staging/iio/iio_hwmon.c b/drivers/staging/iio/iio_hwmon.c
42667index c7a5f97..71ecd35 100644
42668--- a/drivers/staging/iio/iio_hwmon.c
42669+++ b/drivers/staging/iio/iio_hwmon.c
42670@@ -72,7 +72,7 @@ static void iio_hwmon_free_attrs(struct iio_hwmon_state *st)
42671 static int iio_hwmon_probe(struct platform_device *pdev)
42672 {
42673 struct iio_hwmon_state *st;
42674- struct sensor_device_attribute *a;
42675+ sensor_device_attribute_no_const *a;
42676 int ret, i;
42677 int in_i = 1, temp_i = 1, curr_i = 1;
42678 enum iio_chan_type type;
42679diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
42680index 34afc16..ffe44dd 100644
42681--- a/drivers/staging/octeon/ethernet-rx.c
42682+++ b/drivers/staging/octeon/ethernet-rx.c
42683@@ -421,11 +421,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
42684 /* Increment RX stats for virtual ports */
42685 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
42686 #ifdef CONFIG_64BIT
42687- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
42688- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
42689+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
42690+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
42691 #else
42692- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
42693- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
42694+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
42695+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
42696 #endif
42697 }
42698 netif_receive_skb(skb);
42699@@ -437,9 +437,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
42700 dev->name);
42701 */
42702 #ifdef CONFIG_64BIT
42703- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
42704+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
42705 #else
42706- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
42707+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
42708 #endif
42709 dev_kfree_skb_irq(skb);
42710 }
42711diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
42712index ef32dc1..a159d68 100644
42713--- a/drivers/staging/octeon/ethernet.c
42714+++ b/drivers/staging/octeon/ethernet.c
42715@@ -252,11 +252,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
42716 * since the RX tasklet also increments it.
42717 */
42718 #ifdef CONFIG_64BIT
42719- atomic64_add(rx_status.dropped_packets,
42720- (atomic64_t *)&priv->stats.rx_dropped);
42721+ atomic64_add_unchecked(rx_status.dropped_packets,
42722+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
42723 #else
42724- atomic_add(rx_status.dropped_packets,
42725- (atomic_t *)&priv->stats.rx_dropped);
42726+ atomic_add_unchecked(rx_status.dropped_packets,
42727+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
42728 #endif
42729 }
42730
42731diff --git a/drivers/staging/ramster/tmem.c b/drivers/staging/ramster/tmem.c
42732index a2b7e03..aaf3630 100644
42733--- a/drivers/staging/ramster/tmem.c
42734+++ b/drivers/staging/ramster/tmem.c
42735@@ -50,25 +50,25 @@
42736 * A tmem host implementation must use this function to register callbacks
42737 * for memory allocation.
42738 */
42739-static struct tmem_hostops tmem_hostops;
42740+static struct tmem_hostops *tmem_hostops;
42741
42742 static void tmem_objnode_tree_init(void);
42743
42744 void tmem_register_hostops(struct tmem_hostops *m)
42745 {
42746 tmem_objnode_tree_init();
42747- tmem_hostops = *m;
42748+ tmem_hostops = m;
42749 }
42750
42751 /*
42752 * A tmem host implementation must use this function to register
42753 * callbacks for a page-accessible memory (PAM) implementation.
42754 */
42755-static struct tmem_pamops tmem_pamops;
42756+static struct tmem_pamops *tmem_pamops;
42757
42758 void tmem_register_pamops(struct tmem_pamops *m)
42759 {
42760- tmem_pamops = *m;
42761+ tmem_pamops = m;
42762 }
42763
42764 /*
42765@@ -174,7 +174,7 @@ static void tmem_obj_init(struct tmem_obj *obj, struct tmem_hashbucket *hb,
42766 obj->pampd_count = 0;
42767 #ifdef CONFIG_RAMSTER
42768 if (tmem_pamops.new_obj != NULL)
42769- (*tmem_pamops.new_obj)(obj);
42770+ (tmem_pamops->new_obj)(obj);
42771 #endif
42772 SET_SENTINEL(obj, OBJ);
42773
42774@@ -210,7 +210,7 @@ static void tmem_pool_flush(struct tmem_pool *pool, bool destroy)
42775 rbnode = rb_next(rbnode);
42776 tmem_pampd_destroy_all_in_obj(obj, true);
42777 tmem_obj_free(obj, hb);
42778- (*tmem_hostops.obj_free)(obj, pool);
42779+ (tmem_hostops->obj_free)(obj, pool);
42780 }
42781 spin_unlock(&hb->lock);
42782 }
42783@@ -261,7 +261,7 @@ static struct tmem_objnode *tmem_objnode_alloc(struct tmem_obj *obj)
42784 ASSERT_SENTINEL(obj, OBJ);
42785 BUG_ON(obj->pool == NULL);
42786 ASSERT_SENTINEL(obj->pool, POOL);
42787- objnode = (*tmem_hostops.objnode_alloc)(obj->pool);
42788+ objnode = (tmem_hostops->objnode_alloc)(obj->pool);
42789 if (unlikely(objnode == NULL))
42790 goto out;
42791 objnode->obj = obj;
42792@@ -290,7 +290,7 @@ static void tmem_objnode_free(struct tmem_objnode *objnode)
42793 ASSERT_SENTINEL(pool, POOL);
42794 objnode->obj->objnode_count--;
42795 objnode->obj = NULL;
42796- (*tmem_hostops.objnode_free)(objnode, pool);
42797+ (tmem_hostops->objnode_free)(objnode, pool);
42798 }
42799
42800 /*
42801@@ -348,7 +348,7 @@ static void *tmem_pampd_replace_in_obj(struct tmem_obj *obj, uint32_t index,
42802 void *old_pampd = *(void **)slot;
42803 *(void **)slot = new_pampd;
42804 if (!no_free)
42805- (*tmem_pamops.free)(old_pampd, obj->pool,
42806+ (tmem_pamops->free)(old_pampd, obj->pool,
42807 NULL, 0, false);
42808 ret = new_pampd;
42809 }
42810@@ -505,7 +505,7 @@ static void tmem_objnode_node_destroy(struct tmem_obj *obj,
42811 if (objnode->slots[i]) {
42812 if (ht == 1) {
42813 obj->pampd_count--;
42814- (*tmem_pamops.free)(objnode->slots[i],
42815+ (tmem_pamops->free)(objnode->slots[i],
42816 obj->pool, NULL, 0, true);
42817 objnode->slots[i] = NULL;
42818 continue;
42819@@ -524,7 +524,7 @@ static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj,
42820 return;
42821 if (obj->objnode_tree_height == 0) {
42822 obj->pampd_count--;
42823- (*tmem_pamops.free)(obj->objnode_tree_root,
42824+ (tmem_pamops->free)(obj->objnode_tree_root,
42825 obj->pool, NULL, 0, true);
42826 } else {
42827 tmem_objnode_node_destroy(obj, obj->objnode_tree_root,
42828@@ -535,7 +535,7 @@ static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj,
42829 obj->objnode_tree_root = NULL;
42830 #ifdef CONFIG_RAMSTER
42831 if (tmem_pamops.free_obj != NULL)
42832- (*tmem_pamops.free_obj)(obj->pool, obj, pool_destroy);
42833+ (tmem_pamops->free_obj)(obj->pool, obj, pool_destroy);
42834 #endif
42835 }
42836
42837@@ -574,7 +574,7 @@ int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
42838 /* if found, is a dup put, flush the old one */
42839 pampd_del = tmem_pampd_delete_from_obj(obj, index);
42840 BUG_ON(pampd_del != pampd);
42841- (*tmem_pamops.free)(pampd, pool, oidp, index, true);
42842+ (tmem_pamops->free)(pampd, pool, oidp, index, true);
42843 if (obj->pampd_count == 0) {
42844 objnew = obj;
42845 objfound = NULL;
42846@@ -582,7 +582,7 @@ int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
42847 pampd = NULL;
42848 }
42849 } else {
42850- obj = objnew = (*tmem_hostops.obj_alloc)(pool);
42851+ obj = objnew = (tmem_hostops->obj_alloc)(pool);
42852 if (unlikely(obj == NULL)) {
42853 ret = -ENOMEM;
42854 goto out;
42855@@ -597,16 +597,16 @@ int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
42856 if (unlikely(ret == -ENOMEM))
42857 /* may have partially built objnode tree ("stump") */
42858 goto delete_and_free;
42859- (*tmem_pamops.create_finish)(pampd, is_ephemeral(pool));
42860+ (tmem_pamops->create_finish)(pampd, is_ephemeral(pool));
42861 goto out;
42862
42863 delete_and_free:
42864 (void)tmem_pampd_delete_from_obj(obj, index);
42865 if (pampd)
42866- (*tmem_pamops.free)(pampd, pool, NULL, 0, true);
42867+ (tmem_pamops->free)(pampd, pool, NULL, 0, true);
42868 if (objnew) {
42869 tmem_obj_free(objnew, hb);
42870- (*tmem_hostops.obj_free)(objnew, pool);
42871+ (tmem_hostops->obj_free)(objnew, pool);
42872 }
42873 out:
42874 spin_unlock(&hb->lock);
42875@@ -651,7 +651,7 @@ void tmem_localify_finish(struct tmem_obj *obj, uint32_t index,
42876 if (pampd != NULL) {
42877 BUG_ON(obj == NULL);
42878 (void)tmem_pampd_replace_in_obj(obj, index, pampd, 1);
42879- (*tmem_pamops.create_finish)(pampd, is_ephemeral(obj->pool));
42880+ (tmem_pamops->create_finish)(pampd, is_ephemeral(obj->pool));
42881 } else if (delete) {
42882 BUG_ON(obj == NULL);
42883 (void)tmem_pampd_delete_from_obj(obj, index);
42884@@ -671,7 +671,7 @@ static int tmem_repatriate(void **ppampd, struct tmem_hashbucket *hb,
42885 int ret = 0;
42886
42887 if (!is_ephemeral(pool))
42888- new_pampd = (*tmem_pamops.repatriate_preload)(
42889+ new_pampd = (tmem_pamops->repatriate_preload)(
42890 old_pampd, pool, oidp, index, &intransit);
42891 if (intransit)
42892 ret = -EAGAIN;
42893@@ -680,7 +680,7 @@ static int tmem_repatriate(void **ppampd, struct tmem_hashbucket *hb,
42894 /* must release the hb->lock else repatriate can't sleep */
42895 spin_unlock(&hb->lock);
42896 if (!intransit)
42897- ret = (*tmem_pamops.repatriate)(old_pampd, new_pampd, pool,
42898+ ret = (tmem_pamops->repatriate)(old_pampd, new_pampd, pool,
42899 oidp, index, free, data);
42900 if (ret == -EAGAIN) {
42901 /* rare I think, but should cond_resched()??? */
42902@@ -714,7 +714,7 @@ int tmem_replace(struct tmem_pool *pool, struct tmem_oid *oidp,
42903 new_pampd = tmem_pampd_replace_in_obj(obj, index, new_pampd, 0);
42904 /* if we bug here, pamops wasn't properly set up for ramster */
42905 BUG_ON(tmem_pamops.replace_in_obj == NULL);
42906- ret = (*tmem_pamops.replace_in_obj)(new_pampd, obj);
42907+ ret = (tmem_pamops->replace_in_obj)(new_pampd, obj);
42908 out:
42909 spin_unlock(&hb->lock);
42910 return ret;
42911@@ -776,15 +776,15 @@ int tmem_get(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
42912 if (free) {
42913 if (obj->pampd_count == 0) {
42914 tmem_obj_free(obj, hb);
42915- (*tmem_hostops.obj_free)(obj, pool);
42916+ (tmem_hostops->obj_free)(obj, pool);
42917 obj = NULL;
42918 }
42919 }
42920 if (free)
42921- ret = (*tmem_pamops.get_data_and_free)(
42922+ ret = (tmem_pamops->get_data_and_free)(
42923 data, sizep, raw, pampd, pool, oidp, index);
42924 else
42925- ret = (*tmem_pamops.get_data)(
42926+ ret = (tmem_pamops->get_data)(
42927 data, sizep, raw, pampd, pool, oidp, index);
42928 if (ret < 0)
42929 goto out;
42930@@ -816,10 +816,10 @@ int tmem_flush_page(struct tmem_pool *pool,
42931 pampd = tmem_pampd_delete_from_obj(obj, index);
42932 if (pampd == NULL)
42933 goto out;
42934- (*tmem_pamops.free)(pampd, pool, oidp, index, true);
42935+ (tmem_pamops->free)(pampd, pool, oidp, index, true);
42936 if (obj->pampd_count == 0) {
42937 tmem_obj_free(obj, hb);
42938- (*tmem_hostops.obj_free)(obj, pool);
42939+ (tmem_hostops->obj_free)(obj, pool);
42940 }
42941 ret = 0;
42942
42943@@ -844,7 +844,7 @@ int tmem_flush_object(struct tmem_pool *pool, struct tmem_oid *oidp)
42944 goto out;
42945 tmem_pampd_destroy_all_in_obj(obj, false);
42946 tmem_obj_free(obj, hb);
42947- (*tmem_hostops.obj_free)(obj, pool);
42948+ (tmem_hostops->obj_free)(obj, pool);
42949 ret = 0;
42950
42951 out:
42952diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
42953index dc23395..cf7e9b1 100644
42954--- a/drivers/staging/rtl8712/rtl871x_io.h
42955+++ b/drivers/staging/rtl8712/rtl871x_io.h
42956@@ -108,7 +108,7 @@ struct _io_ops {
42957 u8 *pmem);
42958 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
42959 u8 *pmem);
42960-};
42961+} __no_const;
42962
42963 struct io_req {
42964 struct list_head list;
42965diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
42966index 1f5088b..0e59820 100644
42967--- a/drivers/staging/sbe-2t3e3/netdev.c
42968+++ b/drivers/staging/sbe-2t3e3/netdev.c
42969@@ -51,7 +51,7 @@ static int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
42970 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
42971
42972 if (rlen)
42973- if (copy_to_user(data, &resp, rlen))
42974+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
42975 return -EFAULT;
42976
42977 return 0;
42978diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
42979index 5dddc4d..34fcb2f 100644
42980--- a/drivers/staging/usbip/vhci.h
42981+++ b/drivers/staging/usbip/vhci.h
42982@@ -83,7 +83,7 @@ struct vhci_hcd {
42983 unsigned resuming:1;
42984 unsigned long re_timeout;
42985
42986- atomic_t seqnum;
42987+ atomic_unchecked_t seqnum;
42988
42989 /*
42990 * NOTE:
42991diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
42992index c3aa219..bf8b3de 100644
42993--- a/drivers/staging/usbip/vhci_hcd.c
42994+++ b/drivers/staging/usbip/vhci_hcd.c
42995@@ -451,7 +451,7 @@ static void vhci_tx_urb(struct urb *urb)
42996 return;
42997 }
42998
42999- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
43000+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
43001 if (priv->seqnum == 0xffff)
43002 dev_info(&urb->dev->dev, "seqnum max\n");
43003
43004@@ -703,7 +703,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
43005 return -ENOMEM;
43006 }
43007
43008- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
43009+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
43010 if (unlink->seqnum == 0xffff)
43011 pr_info("seqnum max\n");
43012
43013@@ -907,7 +907,7 @@ static int vhci_start(struct usb_hcd *hcd)
43014 vdev->rhport = rhport;
43015 }
43016
43017- atomic_set(&vhci->seqnum, 0);
43018+ atomic_set_unchecked(&vhci->seqnum, 0);
43019 spin_lock_init(&vhci->lock);
43020
43021 hcd->power_budget = 0; /* no limit */
43022diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
43023index ba5f1c0..11d8122 100644
43024--- a/drivers/staging/usbip/vhci_rx.c
43025+++ b/drivers/staging/usbip/vhci_rx.c
43026@@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
43027 if (!urb) {
43028 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
43029 pr_info("max seqnum %d\n",
43030- atomic_read(&the_controller->seqnum));
43031+ atomic_read_unchecked(&the_controller->seqnum));
43032 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
43033 return;
43034 }
43035diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
43036index 5f13890..36a044b 100644
43037--- a/drivers/staging/vt6655/hostap.c
43038+++ b/drivers/staging/vt6655/hostap.c
43039@@ -73,14 +73,13 @@ static int msglevel =MSG_LEVEL_INFO;
43040 *
43041 */
43042
43043+static net_device_ops_no_const apdev_netdev_ops;
43044+
43045 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
43046 {
43047 PSDevice apdev_priv;
43048 struct net_device *dev = pDevice->dev;
43049 int ret;
43050- const struct net_device_ops apdev_netdev_ops = {
43051- .ndo_start_xmit = pDevice->tx_80211,
43052- };
43053
43054 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
43055
43056@@ -92,6 +91,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
43057 *apdev_priv = *pDevice;
43058 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
43059
43060+ /* only half broken now */
43061+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
43062 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
43063
43064 pDevice->apdev->type = ARPHRD_IEEE80211;
43065diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
43066index 26a7d0e..897b083 100644
43067--- a/drivers/staging/vt6656/hostap.c
43068+++ b/drivers/staging/vt6656/hostap.c
43069@@ -60,14 +60,13 @@ static int msglevel =MSG_LEVEL_INFO;
43070 *
43071 */
43072
43073+static net_device_ops_no_const apdev_netdev_ops;
43074+
43075 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
43076 {
43077 PSDevice apdev_priv;
43078 struct net_device *dev = pDevice->dev;
43079 int ret;
43080- const struct net_device_ops apdev_netdev_ops = {
43081- .ndo_start_xmit = pDevice->tx_80211,
43082- };
43083
43084 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
43085
43086@@ -79,6 +78,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
43087 *apdev_priv = *pDevice;
43088 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
43089
43090+ /* only half broken now */
43091+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
43092 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
43093
43094 pDevice->apdev->type = ARPHRD_IEEE80211;
43095diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
43096index 56c8e60..1920c63 100644
43097--- a/drivers/staging/zcache/tmem.c
43098+++ b/drivers/staging/zcache/tmem.c
43099@@ -39,7 +39,7 @@
43100 * A tmem host implementation must use this function to register callbacks
43101 * for memory allocation.
43102 */
43103-static struct tmem_hostops tmem_hostops;
43104+static tmem_hostops_no_const tmem_hostops;
43105
43106 static void tmem_objnode_tree_init(void);
43107
43108@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
43109 * A tmem host implementation must use this function to register
43110 * callbacks for a page-accessible memory (PAM) implementation
43111 */
43112-static struct tmem_pamops tmem_pamops;
43113+static tmem_pamops_no_const tmem_pamops;
43114
43115 void tmem_register_pamops(struct tmem_pamops *m)
43116 {
43117diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
43118index 0d4aa82..f7832d4 100644
43119--- a/drivers/staging/zcache/tmem.h
43120+++ b/drivers/staging/zcache/tmem.h
43121@@ -180,6 +180,7 @@ struct tmem_pamops {
43122 void (*new_obj)(struct tmem_obj *);
43123 int (*replace_in_obj)(void *, struct tmem_obj *);
43124 };
43125+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
43126 extern void tmem_register_pamops(struct tmem_pamops *m);
43127
43128 /* memory allocation methods provided by the host implementation */
43129@@ -189,6 +190,7 @@ struct tmem_hostops {
43130 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
43131 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
43132 };
43133+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
43134 extern void tmem_register_hostops(struct tmem_hostops *m);
43135
43136 /* core tmem accessor functions */
43137diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
43138index 96f4981..4daaa7e 100644
43139--- a/drivers/target/target_core_device.c
43140+++ b/drivers/target/target_core_device.c
43141@@ -1370,7 +1370,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
43142 spin_lock_init(&dev->se_port_lock);
43143 spin_lock_init(&dev->se_tmr_lock);
43144 spin_lock_init(&dev->qf_cmd_lock);
43145- atomic_set(&dev->dev_ordered_id, 0);
43146+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
43147 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
43148 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
43149 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
43150diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
43151index fcf880f..a4d1e8f 100644
43152--- a/drivers/target/target_core_transport.c
43153+++ b/drivers/target/target_core_transport.c
43154@@ -1077,7 +1077,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
43155 * Used to determine when ORDERED commands should go from
43156 * Dormant to Active status.
43157 */
43158- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
43159+ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
43160 smp_mb__after_atomic_inc();
43161 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
43162 cmd->se_ordered_id, cmd->sam_task_attr,
43163diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
43164index b09c8d1f..c4225c0 100644
43165--- a/drivers/tty/cyclades.c
43166+++ b/drivers/tty/cyclades.c
43167@@ -1589,10 +1589,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
43168 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
43169 info->port.count);
43170 #endif
43171- info->port.count++;
43172+ atomic_inc(&info->port.count);
43173 #ifdef CY_DEBUG_COUNT
43174 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
43175- current->pid, info->port.count);
43176+ current->pid, atomic_read(&info->port.count));
43177 #endif
43178
43179 /*
43180@@ -3991,7 +3991,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
43181 for (j = 0; j < cy_card[i].nports; j++) {
43182 info = &cy_card[i].ports[j];
43183
43184- if (info->port.count) {
43185+ if (atomic_read(&info->port.count)) {
43186 /* XXX is the ldisc num worth this? */
43187 struct tty_struct *tty;
43188 struct tty_ldisc *ld;
43189diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
43190index 13ee53b..418d164 100644
43191--- a/drivers/tty/hvc/hvc_console.c
43192+++ b/drivers/tty/hvc/hvc_console.c
43193@@ -338,7 +338,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
43194
43195 spin_lock_irqsave(&hp->port.lock, flags);
43196 /* Check and then increment for fast path open. */
43197- if (hp->port.count++ > 0) {
43198+ if (atomic_inc_return(&hp->port.count) > 1) {
43199 spin_unlock_irqrestore(&hp->port.lock, flags);
43200 hvc_kick();
43201 return 0;
43202@@ -388,7 +388,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
43203
43204 spin_lock_irqsave(&hp->port.lock, flags);
43205
43206- if (--hp->port.count == 0) {
43207+ if (atomic_dec_return(&hp->port.count) == 0) {
43208 spin_unlock_irqrestore(&hp->port.lock, flags);
43209 /* We are done with the tty pointer now. */
43210 tty_port_tty_set(&hp->port, NULL);
43211@@ -406,9 +406,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
43212 */
43213 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
43214 } else {
43215- if (hp->port.count < 0)
43216+ if (atomic_read(&hp->port.count) < 0)
43217 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
43218- hp->vtermno, hp->port.count);
43219+ hp->vtermno, atomic_read(&hp->port.count));
43220 spin_unlock_irqrestore(&hp->port.lock, flags);
43221 }
43222 }
43223@@ -438,12 +438,12 @@ static void hvc_hangup(struct tty_struct *tty)
43224 * open->hangup case this can be called after the final close so prevent
43225 * that from happening for now.
43226 */
43227- if (hp->port.count <= 0) {
43228+ if (atomic_read(&hp->port.count) <= 0) {
43229 spin_unlock_irqrestore(&hp->port.lock, flags);
43230 return;
43231 }
43232
43233- hp->port.count = 0;
43234+ atomic_set(&hp->port.count, 0);
43235 spin_unlock_irqrestore(&hp->port.lock, flags);
43236 tty_port_tty_set(&hp->port, NULL);
43237
43238@@ -491,7 +491,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
43239 return -EPIPE;
43240
43241 /* FIXME what's this (unprotected) check for? */
43242- if (hp->port.count <= 0)
43243+ if (atomic_read(&hp->port.count) <= 0)
43244 return -EIO;
43245
43246 spin_lock_irqsave(&hp->lock, flags);
43247diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
43248index 8776357..b2d4afd 100644
43249--- a/drivers/tty/hvc/hvcs.c
43250+++ b/drivers/tty/hvc/hvcs.c
43251@@ -83,6 +83,7 @@
43252 #include <asm/hvcserver.h>
43253 #include <asm/uaccess.h>
43254 #include <asm/vio.h>
43255+#include <asm/local.h>
43256
43257 /*
43258 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
43259@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
43260
43261 spin_lock_irqsave(&hvcsd->lock, flags);
43262
43263- if (hvcsd->port.count > 0) {
43264+ if (atomic_read(&hvcsd->port.count) > 0) {
43265 spin_unlock_irqrestore(&hvcsd->lock, flags);
43266 printk(KERN_INFO "HVCS: vterm state unchanged. "
43267 "The hvcs device node is still in use.\n");
43268@@ -1132,7 +1133,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
43269 }
43270 }
43271
43272- hvcsd->port.count = 0;
43273+ atomic_set(&hvcsd->port.count, 0);
43274 hvcsd->port.tty = tty;
43275 tty->driver_data = hvcsd;
43276
43277@@ -1185,7 +1186,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
43278 unsigned long flags;
43279
43280 spin_lock_irqsave(&hvcsd->lock, flags);
43281- hvcsd->port.count++;
43282+ atomic_inc(&hvcsd->port.count);
43283 hvcsd->todo_mask |= HVCS_SCHED_READ;
43284 spin_unlock_irqrestore(&hvcsd->lock, flags);
43285
43286@@ -1221,7 +1222,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
43287 hvcsd = tty->driver_data;
43288
43289 spin_lock_irqsave(&hvcsd->lock, flags);
43290- if (--hvcsd->port.count == 0) {
43291+ if (atomic_dec_and_test(&hvcsd->port.count)) {
43292
43293 vio_disable_interrupts(hvcsd->vdev);
43294
43295@@ -1246,10 +1247,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
43296
43297 free_irq(irq, hvcsd);
43298 return;
43299- } else if (hvcsd->port.count < 0) {
43300+ } else if (atomic_read(&hvcsd->port.count) < 0) {
43301 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
43302 " is missmanaged.\n",
43303- hvcsd->vdev->unit_address, hvcsd->port.count);
43304+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
43305 }
43306
43307 spin_unlock_irqrestore(&hvcsd->lock, flags);
43308@@ -1271,7 +1272,7 @@ static void hvcs_hangup(struct tty_struct * tty)
43309
43310 spin_lock_irqsave(&hvcsd->lock, flags);
43311 /* Preserve this so that we know how many kref refs to put */
43312- temp_open_count = hvcsd->port.count;
43313+ temp_open_count = atomic_read(&hvcsd->port.count);
43314
43315 /*
43316 * Don't kref put inside the spinlock because the destruction
43317@@ -1286,7 +1287,7 @@ static void hvcs_hangup(struct tty_struct * tty)
43318 tty->driver_data = NULL;
43319 hvcsd->port.tty = NULL;
43320
43321- hvcsd->port.count = 0;
43322+ atomic_set(&hvcsd->port.count, 0);
43323
43324 /* This will drop any buffered data on the floor which is OK in a hangup
43325 * scenario. */
43326@@ -1357,7 +1358,7 @@ static int hvcs_write(struct tty_struct *tty,
43327 * the middle of a write operation? This is a crummy place to do this
43328 * but we want to keep it all in the spinlock.
43329 */
43330- if (hvcsd->port.count <= 0) {
43331+ if (atomic_read(&hvcsd->port.count) <= 0) {
43332 spin_unlock_irqrestore(&hvcsd->lock, flags);
43333 return -ENODEV;
43334 }
43335@@ -1431,7 +1432,7 @@ static int hvcs_write_room(struct tty_struct *tty)
43336 {
43337 struct hvcs_struct *hvcsd = tty->driver_data;
43338
43339- if (!hvcsd || hvcsd->port.count <= 0)
43340+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
43341 return 0;
43342
43343 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
43344diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
43345index 2cde13d..645d78f 100644
43346--- a/drivers/tty/ipwireless/tty.c
43347+++ b/drivers/tty/ipwireless/tty.c
43348@@ -29,6 +29,7 @@
43349 #include <linux/tty_driver.h>
43350 #include <linux/tty_flip.h>
43351 #include <linux/uaccess.h>
43352+#include <asm/local.h>
43353
43354 #include "tty.h"
43355 #include "network.h"
43356@@ -99,10 +100,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
43357 mutex_unlock(&tty->ipw_tty_mutex);
43358 return -ENODEV;
43359 }
43360- if (tty->port.count == 0)
43361+ if (atomic_read(&tty->port.count) == 0)
43362 tty->tx_bytes_queued = 0;
43363
43364- tty->port.count++;
43365+ atomic_inc(&tty->port.count);
43366
43367 tty->port.tty = linux_tty;
43368 linux_tty->driver_data = tty;
43369@@ -118,9 +119,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
43370
43371 static void do_ipw_close(struct ipw_tty *tty)
43372 {
43373- tty->port.count--;
43374-
43375- if (tty->port.count == 0) {
43376+ if (atomic_dec_return(&tty->port.count) == 0) {
43377 struct tty_struct *linux_tty = tty->port.tty;
43378
43379 if (linux_tty != NULL) {
43380@@ -141,7 +140,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
43381 return;
43382
43383 mutex_lock(&tty->ipw_tty_mutex);
43384- if (tty->port.count == 0) {
43385+ if (atomic_read(&tty->port.count) == 0) {
43386 mutex_unlock(&tty->ipw_tty_mutex);
43387 return;
43388 }
43389@@ -170,7 +169,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
43390 return;
43391 }
43392
43393- if (!tty->port.count) {
43394+ if (!atomic_read(&tty->port.count)) {
43395 mutex_unlock(&tty->ipw_tty_mutex);
43396 return;
43397 }
43398@@ -212,7 +211,7 @@ static int ipw_write(struct tty_struct *linux_tty,
43399 return -ENODEV;
43400
43401 mutex_lock(&tty->ipw_tty_mutex);
43402- if (!tty->port.count) {
43403+ if (!atomic_read(&tty->port.count)) {
43404 mutex_unlock(&tty->ipw_tty_mutex);
43405 return -EINVAL;
43406 }
43407@@ -252,7 +251,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
43408 if (!tty)
43409 return -ENODEV;
43410
43411- if (!tty->port.count)
43412+ if (!atomic_read(&tty->port.count))
43413 return -EINVAL;
43414
43415 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
43416@@ -294,7 +293,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
43417 if (!tty)
43418 return 0;
43419
43420- if (!tty->port.count)
43421+ if (!atomic_read(&tty->port.count))
43422 return 0;
43423
43424 return tty->tx_bytes_queued;
43425@@ -375,7 +374,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
43426 if (!tty)
43427 return -ENODEV;
43428
43429- if (!tty->port.count)
43430+ if (!atomic_read(&tty->port.count))
43431 return -EINVAL;
43432
43433 return get_control_lines(tty);
43434@@ -391,7 +390,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
43435 if (!tty)
43436 return -ENODEV;
43437
43438- if (!tty->port.count)
43439+ if (!atomic_read(&tty->port.count))
43440 return -EINVAL;
43441
43442 return set_control_lines(tty, set, clear);
43443@@ -405,7 +404,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
43444 if (!tty)
43445 return -ENODEV;
43446
43447- if (!tty->port.count)
43448+ if (!atomic_read(&tty->port.count))
43449 return -EINVAL;
43450
43451 /* FIXME: Exactly how is the tty object locked here .. */
43452@@ -561,7 +560,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
43453 * are gone */
43454 mutex_lock(&ttyj->ipw_tty_mutex);
43455 }
43456- while (ttyj->port.count)
43457+ while (atomic_read(&ttyj->port.count))
43458 do_ipw_close(ttyj);
43459 ipwireless_disassociate_network_ttys(network,
43460 ttyj->channel_idx);
43461diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
43462index f9d2850..b006f04 100644
43463--- a/drivers/tty/moxa.c
43464+++ b/drivers/tty/moxa.c
43465@@ -1193,7 +1193,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
43466 }
43467
43468 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
43469- ch->port.count++;
43470+ atomic_inc(&ch->port.count);
43471 tty->driver_data = ch;
43472 tty_port_tty_set(&ch->port, tty);
43473 mutex_lock(&ch->port.mutex);
43474diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
43475index bfd6771..e0d93c4 100644
43476--- a/drivers/tty/n_gsm.c
43477+++ b/drivers/tty/n_gsm.c
43478@@ -1636,7 +1636,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
43479 spin_lock_init(&dlci->lock);
43480 mutex_init(&dlci->mutex);
43481 dlci->fifo = &dlci->_fifo;
43482- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
43483+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
43484 kfree(dlci);
43485 return NULL;
43486 }
43487@@ -2936,7 +2936,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
43488 struct gsm_dlci *dlci = tty->driver_data;
43489 struct tty_port *port = &dlci->port;
43490
43491- port->count++;
43492+ atomic_inc(&port->count);
43493 dlci_get(dlci);
43494 dlci_get(dlci->gsm->dlci[0]);
43495 mux_get(dlci->gsm);
43496diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
43497index 19083ef..6e34e97 100644
43498--- a/drivers/tty/n_tty.c
43499+++ b/drivers/tty/n_tty.c
43500@@ -2196,6 +2196,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
43501 {
43502 *ops = tty_ldisc_N_TTY;
43503 ops->owner = NULL;
43504- ops->refcount = ops->flags = 0;
43505+ atomic_set(&ops->refcount, 0);
43506+ ops->flags = 0;
43507 }
43508 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
43509diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
43510index ac35c90..c47deac 100644
43511--- a/drivers/tty/pty.c
43512+++ b/drivers/tty/pty.c
43513@@ -790,8 +790,10 @@ static void __init unix98_pty_init(void)
43514 panic("Couldn't register Unix98 pts driver");
43515
43516 /* Now create the /dev/ptmx special device */
43517+ pax_open_kernel();
43518 tty_default_fops(&ptmx_fops);
43519- ptmx_fops.open = ptmx_open;
43520+ *(void **)&ptmx_fops.open = ptmx_open;
43521+ pax_close_kernel();
43522
43523 cdev_init(&ptmx_cdev, &ptmx_fops);
43524 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
43525diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
43526index e42009a..566a036 100644
43527--- a/drivers/tty/rocket.c
43528+++ b/drivers/tty/rocket.c
43529@@ -925,7 +925,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
43530 tty->driver_data = info;
43531 tty_port_tty_set(port, tty);
43532
43533- if (port->count++ == 0) {
43534+ if (atomic_inc_return(&port->count) == 1) {
43535 atomic_inc(&rp_num_ports_open);
43536
43537 #ifdef ROCKET_DEBUG_OPEN
43538@@ -934,7 +934,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
43539 #endif
43540 }
43541 #ifdef ROCKET_DEBUG_OPEN
43542- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
43543+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
43544 #endif
43545
43546 /*
43547@@ -1529,7 +1529,7 @@ static void rp_hangup(struct tty_struct *tty)
43548 spin_unlock_irqrestore(&info->port.lock, flags);
43549 return;
43550 }
43551- if (info->port.count)
43552+ if (atomic_read(&info->port.count))
43553 atomic_dec(&rp_num_ports_open);
43554 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
43555 spin_unlock_irqrestore(&info->port.lock, flags);
43556diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
43557index 1002054..dd644a8 100644
43558--- a/drivers/tty/serial/kgdboc.c
43559+++ b/drivers/tty/serial/kgdboc.c
43560@@ -24,8 +24,9 @@
43561 #define MAX_CONFIG_LEN 40
43562
43563 static struct kgdb_io kgdboc_io_ops;
43564+static struct kgdb_io kgdboc_io_ops_console;
43565
43566-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
43567+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
43568 static int configured = -1;
43569
43570 static char config[MAX_CONFIG_LEN];
43571@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
43572 kgdboc_unregister_kbd();
43573 if (configured == 1)
43574 kgdb_unregister_io_module(&kgdboc_io_ops);
43575+ else if (configured == 2)
43576+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
43577 }
43578
43579 static int configure_kgdboc(void)
43580@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
43581 int err;
43582 char *cptr = config;
43583 struct console *cons;
43584+ int is_console = 0;
43585
43586 err = kgdboc_option_setup(config);
43587 if (err || !strlen(config) || isspace(config[0]))
43588 goto noconfig;
43589
43590 err = -ENODEV;
43591- kgdboc_io_ops.is_console = 0;
43592 kgdb_tty_driver = NULL;
43593
43594 kgdboc_use_kms = 0;
43595@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
43596 int idx;
43597 if (cons->device && cons->device(cons, &idx) == p &&
43598 idx == tty_line) {
43599- kgdboc_io_ops.is_console = 1;
43600+ is_console = 1;
43601 break;
43602 }
43603 cons = cons->next;
43604@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
43605 kgdb_tty_line = tty_line;
43606
43607 do_register:
43608- err = kgdb_register_io_module(&kgdboc_io_ops);
43609+ if (is_console) {
43610+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
43611+ configured = 2;
43612+ } else {
43613+ err = kgdb_register_io_module(&kgdboc_io_ops);
43614+ configured = 1;
43615+ }
43616 if (err)
43617 goto noconfig;
43618
43619@@ -205,8 +214,6 @@ do_register:
43620 if (err)
43621 goto nmi_con_failed;
43622
43623- configured = 1;
43624-
43625 return 0;
43626
43627 nmi_con_failed:
43628@@ -223,7 +230,7 @@ noconfig:
43629 static int __init init_kgdboc(void)
43630 {
43631 /* Already configured? */
43632- if (configured == 1)
43633+ if (configured >= 1)
43634 return 0;
43635
43636 return configure_kgdboc();
43637@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
43638 if (config[len - 1] == '\n')
43639 config[len - 1] = '\0';
43640
43641- if (configured == 1)
43642+ if (configured >= 1)
43643 cleanup_kgdboc();
43644
43645 /* Go and configure with the new params. */
43646@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
43647 .post_exception = kgdboc_post_exp_handler,
43648 };
43649
43650+static struct kgdb_io kgdboc_io_ops_console = {
43651+ .name = "kgdboc",
43652+ .read_char = kgdboc_get_char,
43653+ .write_char = kgdboc_put_char,
43654+ .pre_exception = kgdboc_pre_exp_handler,
43655+ .post_exception = kgdboc_post_exp_handler,
43656+ .is_console = 1
43657+};
43658+
43659 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
43660 /* This is only available if kgdboc is a built in for early debugging */
43661 static int __init kgdboc_early_init(char *opt)
43662diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
43663index e514b3a..c73d614 100644
43664--- a/drivers/tty/serial/samsung.c
43665+++ b/drivers/tty/serial/samsung.c
43666@@ -453,11 +453,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
43667 }
43668 }
43669
43670+static int s3c64xx_serial_startup(struct uart_port *port);
43671 static int s3c24xx_serial_startup(struct uart_port *port)
43672 {
43673 struct s3c24xx_uart_port *ourport = to_ourport(port);
43674 int ret;
43675
43676+ /* Startup sequence is different for s3c64xx and higher SoC's */
43677+ if (s3c24xx_serial_has_interrupt_mask(port))
43678+ return s3c64xx_serial_startup(port);
43679+
43680 dbg("s3c24xx_serial_startup: port=%p (%08lx,%p)\n",
43681 port->mapbase, port->membase);
43682
43683@@ -1122,10 +1127,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
43684 /* setup info for port */
43685 port->dev = &platdev->dev;
43686
43687- /* Startup sequence is different for s3c64xx and higher SoC's */
43688- if (s3c24xx_serial_has_interrupt_mask(port))
43689- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
43690-
43691 port->uartclk = 1;
43692
43693 if (cfg->uart_flags & UPF_CONS_FLOW) {
43694diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
43695index 2c7230a..2104f16 100644
43696--- a/drivers/tty/serial/serial_core.c
43697+++ b/drivers/tty/serial/serial_core.c
43698@@ -1455,7 +1455,7 @@ static void uart_hangup(struct tty_struct *tty)
43699 uart_flush_buffer(tty);
43700 uart_shutdown(tty, state);
43701 spin_lock_irqsave(&port->lock, flags);
43702- port->count = 0;
43703+ atomic_set(&port->count, 0);
43704 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
43705 spin_unlock_irqrestore(&port->lock, flags);
43706 tty_port_tty_set(port, NULL);
43707@@ -1551,7 +1551,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
43708 goto end;
43709 }
43710
43711- port->count++;
43712+ atomic_inc(&port->count);
43713 if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
43714 retval = -ENXIO;
43715 goto err_dec_count;
43716@@ -1578,7 +1578,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
43717 /*
43718 * Make sure the device is in D0 state.
43719 */
43720- if (port->count == 1)
43721+ if (atomic_read(&port->count) == 1)
43722 uart_change_pm(state, 0);
43723
43724 /*
43725@@ -1596,7 +1596,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
43726 end:
43727 return retval;
43728 err_dec_count:
43729- port->count--;
43730+ atomic_inc(&port->count);
43731 mutex_unlock(&port->mutex);
43732 goto end;
43733 }
43734diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
43735index 9e071f6..f30ae69 100644
43736--- a/drivers/tty/synclink.c
43737+++ b/drivers/tty/synclink.c
43738@@ -3095,7 +3095,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
43739
43740 if (debug_level >= DEBUG_LEVEL_INFO)
43741 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
43742- __FILE__,__LINE__, info->device_name, info->port.count);
43743+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
43744
43745 if (tty_port_close_start(&info->port, tty, filp) == 0)
43746 goto cleanup;
43747@@ -3113,7 +3113,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
43748 cleanup:
43749 if (debug_level >= DEBUG_LEVEL_INFO)
43750 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
43751- tty->driver->name, info->port.count);
43752+ tty->driver->name, atomic_read(&info->port.count));
43753
43754 } /* end of mgsl_close() */
43755
43756@@ -3212,8 +3212,8 @@ static void mgsl_hangup(struct tty_struct *tty)
43757
43758 mgsl_flush_buffer(tty);
43759 shutdown(info);
43760-
43761- info->port.count = 0;
43762+
43763+ atomic_set(&info->port.count, 0);
43764 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
43765 info->port.tty = NULL;
43766
43767@@ -3302,12 +3302,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
43768
43769 if (debug_level >= DEBUG_LEVEL_INFO)
43770 printk("%s(%d):block_til_ready before block on %s count=%d\n",
43771- __FILE__,__LINE__, tty->driver->name, port->count );
43772+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
43773
43774 spin_lock_irqsave(&info->irq_spinlock, flags);
43775 if (!tty_hung_up_p(filp)) {
43776 extra_count = true;
43777- port->count--;
43778+ atomic_dec(&port->count);
43779 }
43780 spin_unlock_irqrestore(&info->irq_spinlock, flags);
43781 port->blocked_open++;
43782@@ -3336,7 +3336,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
43783
43784 if (debug_level >= DEBUG_LEVEL_INFO)
43785 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
43786- __FILE__,__LINE__, tty->driver->name, port->count );
43787+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
43788
43789 tty_unlock(tty);
43790 schedule();
43791@@ -3348,12 +3348,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
43792
43793 /* FIXME: Racy on hangup during close wait */
43794 if (extra_count)
43795- port->count++;
43796+ atomic_inc(&port->count);
43797 port->blocked_open--;
43798
43799 if (debug_level >= DEBUG_LEVEL_INFO)
43800 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
43801- __FILE__,__LINE__, tty->driver->name, port->count );
43802+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
43803
43804 if (!retval)
43805 port->flags |= ASYNC_NORMAL_ACTIVE;
43806@@ -3405,7 +3405,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
43807
43808 if (debug_level >= DEBUG_LEVEL_INFO)
43809 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
43810- __FILE__,__LINE__,tty->driver->name, info->port.count);
43811+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
43812
43813 /* If port is closing, signal caller to try again */
43814 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
43815@@ -3424,10 +3424,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
43816 spin_unlock_irqrestore(&info->netlock, flags);
43817 goto cleanup;
43818 }
43819- info->port.count++;
43820+ atomic_inc(&info->port.count);
43821 spin_unlock_irqrestore(&info->netlock, flags);
43822
43823- if (info->port.count == 1) {
43824+ if (atomic_read(&info->port.count) == 1) {
43825 /* 1st open on this device, init hardware */
43826 retval = startup(info);
43827 if (retval < 0)
43828@@ -3451,8 +3451,8 @@ cleanup:
43829 if (retval) {
43830 if (tty->count == 1)
43831 info->port.tty = NULL; /* tty layer will release tty struct */
43832- if(info->port.count)
43833- info->port.count--;
43834+ if (atomic_read(&info->port.count))
43835+ atomic_dec(&info->port.count);
43836 }
43837
43838 return retval;
43839@@ -7662,7 +7662,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
43840 unsigned short new_crctype;
43841
43842 /* return error if TTY interface open */
43843- if (info->port.count)
43844+ if (atomic_read(&info->port.count))
43845 return -EBUSY;
43846
43847 switch (encoding)
43848@@ -7757,7 +7757,7 @@ static int hdlcdev_open(struct net_device *dev)
43849
43850 /* arbitrate between network and tty opens */
43851 spin_lock_irqsave(&info->netlock, flags);
43852- if (info->port.count != 0 || info->netcount != 0) {
43853+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
43854 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
43855 spin_unlock_irqrestore(&info->netlock, flags);
43856 return -EBUSY;
43857@@ -7843,7 +7843,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
43858 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
43859
43860 /* return error if TTY interface open */
43861- if (info->port.count)
43862+ if (atomic_read(&info->port.count))
43863 return -EBUSY;
43864
43865 if (cmd != SIOCWANDEV)
43866diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
43867index aba1e59..877ac33 100644
43868--- a/drivers/tty/synclink_gt.c
43869+++ b/drivers/tty/synclink_gt.c
43870@@ -671,7 +671,7 @@ static int open(struct tty_struct *tty, struct file *filp)
43871 tty->driver_data = info;
43872 info->port.tty = tty;
43873
43874- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
43875+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
43876
43877 /* If port is closing, signal caller to try again */
43878 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
43879@@ -692,10 +692,10 @@ static int open(struct tty_struct *tty, struct file *filp)
43880 mutex_unlock(&info->port.mutex);
43881 goto cleanup;
43882 }
43883- info->port.count++;
43884+ atomic_inc(&info->port.count);
43885 spin_unlock_irqrestore(&info->netlock, flags);
43886
43887- if (info->port.count == 1) {
43888+ if (atomic_read(&info->port.count) == 1) {
43889 /* 1st open on this device, init hardware */
43890 retval = startup(info);
43891 if (retval < 0) {
43892@@ -716,8 +716,8 @@ cleanup:
43893 if (retval) {
43894 if (tty->count == 1)
43895 info->port.tty = NULL; /* tty layer will release tty struct */
43896- if(info->port.count)
43897- info->port.count--;
43898+ if(atomic_read(&info->port.count))
43899+ atomic_dec(&info->port.count);
43900 }
43901
43902 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
43903@@ -730,7 +730,7 @@ static void close(struct tty_struct *tty, struct file *filp)
43904
43905 if (sanity_check(info, tty->name, "close"))
43906 return;
43907- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
43908+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
43909
43910 if (tty_port_close_start(&info->port, tty, filp) == 0)
43911 goto cleanup;
43912@@ -747,7 +747,7 @@ static void close(struct tty_struct *tty, struct file *filp)
43913 tty_port_close_end(&info->port, tty);
43914 info->port.tty = NULL;
43915 cleanup:
43916- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
43917+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
43918 }
43919
43920 static void hangup(struct tty_struct *tty)
43921@@ -765,7 +765,7 @@ static void hangup(struct tty_struct *tty)
43922 shutdown(info);
43923
43924 spin_lock_irqsave(&info->port.lock, flags);
43925- info->port.count = 0;
43926+ atomic_set(&info->port.count, 0);
43927 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
43928 info->port.tty = NULL;
43929 spin_unlock_irqrestore(&info->port.lock, flags);
43930@@ -1450,7 +1450,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
43931 unsigned short new_crctype;
43932
43933 /* return error if TTY interface open */
43934- if (info->port.count)
43935+ if (atomic_read(&info->port.count))
43936 return -EBUSY;
43937
43938 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
43939@@ -1545,7 +1545,7 @@ static int hdlcdev_open(struct net_device *dev)
43940
43941 /* arbitrate between network and tty opens */
43942 spin_lock_irqsave(&info->netlock, flags);
43943- if (info->port.count != 0 || info->netcount != 0) {
43944+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
43945 DBGINFO(("%s hdlc_open busy\n", dev->name));
43946 spin_unlock_irqrestore(&info->netlock, flags);
43947 return -EBUSY;
43948@@ -1630,7 +1630,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
43949 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
43950
43951 /* return error if TTY interface open */
43952- if (info->port.count)
43953+ if (atomic_read(&info->port.count))
43954 return -EBUSY;
43955
43956 if (cmd != SIOCWANDEV)
43957@@ -2419,7 +2419,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
43958 if (port == NULL)
43959 continue;
43960 spin_lock(&port->lock);
43961- if ((port->port.count || port->netcount) &&
43962+ if ((atomic_read(&port->port.count) || port->netcount) &&
43963 port->pending_bh && !port->bh_running &&
43964 !port->bh_requested) {
43965 DBGISR(("%s bh queued\n", port->device_name));
43966@@ -3308,7 +3308,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
43967 spin_lock_irqsave(&info->lock, flags);
43968 if (!tty_hung_up_p(filp)) {
43969 extra_count = true;
43970- port->count--;
43971+ atomic_dec(&port->count);
43972 }
43973 spin_unlock_irqrestore(&info->lock, flags);
43974 port->blocked_open++;
43975@@ -3345,7 +3345,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
43976 remove_wait_queue(&port->open_wait, &wait);
43977
43978 if (extra_count)
43979- port->count++;
43980+ atomic_inc(&port->count);
43981 port->blocked_open--;
43982
43983 if (!retval)
43984diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
43985index fd43fb6..34704ad 100644
43986--- a/drivers/tty/synclinkmp.c
43987+++ b/drivers/tty/synclinkmp.c
43988@@ -751,7 +751,7 @@ static int open(struct tty_struct *tty, struct file *filp)
43989
43990 if (debug_level >= DEBUG_LEVEL_INFO)
43991 printk("%s(%d):%s open(), old ref count = %d\n",
43992- __FILE__,__LINE__,tty->driver->name, info->port.count);
43993+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
43994
43995 /* If port is closing, signal caller to try again */
43996 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
43997@@ -770,10 +770,10 @@ static int open(struct tty_struct *tty, struct file *filp)
43998 spin_unlock_irqrestore(&info->netlock, flags);
43999 goto cleanup;
44000 }
44001- info->port.count++;
44002+ atomic_inc(&info->port.count);
44003 spin_unlock_irqrestore(&info->netlock, flags);
44004
44005- if (info->port.count == 1) {
44006+ if (atomic_read(&info->port.count) == 1) {
44007 /* 1st open on this device, init hardware */
44008 retval = startup(info);
44009 if (retval < 0)
44010@@ -797,8 +797,8 @@ cleanup:
44011 if (retval) {
44012 if (tty->count == 1)
44013 info->port.tty = NULL; /* tty layer will release tty struct */
44014- if(info->port.count)
44015- info->port.count--;
44016+ if(atomic_read(&info->port.count))
44017+ atomic_dec(&info->port.count);
44018 }
44019
44020 return retval;
44021@@ -816,7 +816,7 @@ static void close(struct tty_struct *tty, struct file *filp)
44022
44023 if (debug_level >= DEBUG_LEVEL_INFO)
44024 printk("%s(%d):%s close() entry, count=%d\n",
44025- __FILE__,__LINE__, info->device_name, info->port.count);
44026+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
44027
44028 if (tty_port_close_start(&info->port, tty, filp) == 0)
44029 goto cleanup;
44030@@ -835,7 +835,7 @@ static void close(struct tty_struct *tty, struct file *filp)
44031 cleanup:
44032 if (debug_level >= DEBUG_LEVEL_INFO)
44033 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
44034- tty->driver->name, info->port.count);
44035+ tty->driver->name, atomic_read(&info->port.count));
44036 }
44037
44038 /* Called by tty_hangup() when a hangup is signaled.
44039@@ -858,7 +858,7 @@ static void hangup(struct tty_struct *tty)
44040 shutdown(info);
44041
44042 spin_lock_irqsave(&info->port.lock, flags);
44043- info->port.count = 0;
44044+ atomic_set(&info->port.count, 0);
44045 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
44046 info->port.tty = NULL;
44047 spin_unlock_irqrestore(&info->port.lock, flags);
44048@@ -1566,7 +1566,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
44049 unsigned short new_crctype;
44050
44051 /* return error if TTY interface open */
44052- if (info->port.count)
44053+ if (atomic_read(&info->port.count))
44054 return -EBUSY;
44055
44056 switch (encoding)
44057@@ -1661,7 +1661,7 @@ static int hdlcdev_open(struct net_device *dev)
44058
44059 /* arbitrate between network and tty opens */
44060 spin_lock_irqsave(&info->netlock, flags);
44061- if (info->port.count != 0 || info->netcount != 0) {
44062+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
44063 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
44064 spin_unlock_irqrestore(&info->netlock, flags);
44065 return -EBUSY;
44066@@ -1747,7 +1747,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
44067 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
44068
44069 /* return error if TTY interface open */
44070- if (info->port.count)
44071+ if (atomic_read(&info->port.count))
44072 return -EBUSY;
44073
44074 if (cmd != SIOCWANDEV)
44075@@ -2632,7 +2632,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
44076 * do not request bottom half processing if the
44077 * device is not open in a normal mode.
44078 */
44079- if ( port && (port->port.count || port->netcount) &&
44080+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
44081 port->pending_bh && !port->bh_running &&
44082 !port->bh_requested ) {
44083 if ( debug_level >= DEBUG_LEVEL_ISR )
44084@@ -3330,12 +3330,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
44085
44086 if (debug_level >= DEBUG_LEVEL_INFO)
44087 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
44088- __FILE__,__LINE__, tty->driver->name, port->count );
44089+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
44090
44091 spin_lock_irqsave(&info->lock, flags);
44092 if (!tty_hung_up_p(filp)) {
44093 extra_count = true;
44094- port->count--;
44095+ atomic_dec(&port->count);
44096 }
44097 spin_unlock_irqrestore(&info->lock, flags);
44098 port->blocked_open++;
44099@@ -3364,7 +3364,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
44100
44101 if (debug_level >= DEBUG_LEVEL_INFO)
44102 printk("%s(%d):%s block_til_ready() count=%d\n",
44103- __FILE__,__LINE__, tty->driver->name, port->count );
44104+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
44105
44106 tty_unlock(tty);
44107 schedule();
44108@@ -3375,12 +3375,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
44109 remove_wait_queue(&port->open_wait, &wait);
44110
44111 if (extra_count)
44112- port->count++;
44113+ atomic_inc(&port->count);
44114 port->blocked_open--;
44115
44116 if (debug_level >= DEBUG_LEVEL_INFO)
44117 printk("%s(%d):%s block_til_ready() after, count=%d\n",
44118- __FILE__,__LINE__, tty->driver->name, port->count );
44119+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
44120
44121 if (!retval)
44122 port->flags |= ASYNC_NORMAL_ACTIVE;
44123diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
44124index b3c4a25..723916f 100644
44125--- a/drivers/tty/sysrq.c
44126+++ b/drivers/tty/sysrq.c
44127@@ -867,7 +867,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
44128 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
44129 size_t count, loff_t *ppos)
44130 {
44131- if (count) {
44132+ if (count && capable(CAP_SYS_ADMIN)) {
44133 char c;
44134
44135 if (get_user(c, buf))
44136diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
44137index da9fde8..c07975f 100644
44138--- a/drivers/tty/tty_io.c
44139+++ b/drivers/tty/tty_io.c
44140@@ -3391,7 +3391,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
44141
44142 void tty_default_fops(struct file_operations *fops)
44143 {
44144- *fops = tty_fops;
44145+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
44146 }
44147
44148 /*
44149diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
44150index c578229..45aa9ee 100644
44151--- a/drivers/tty/tty_ldisc.c
44152+++ b/drivers/tty/tty_ldisc.c
44153@@ -56,7 +56,7 @@ static void put_ldisc(struct tty_ldisc *ld)
44154 if (atomic_dec_and_test(&ld->users)) {
44155 struct tty_ldisc_ops *ldo = ld->ops;
44156
44157- ldo->refcount--;
44158+ atomic_dec(&ldo->refcount);
44159 module_put(ldo->owner);
44160 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
44161
44162@@ -91,7 +91,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
44163 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
44164 tty_ldiscs[disc] = new_ldisc;
44165 new_ldisc->num = disc;
44166- new_ldisc->refcount = 0;
44167+ atomic_set(&new_ldisc->refcount, 0);
44168 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
44169
44170 return ret;
44171@@ -119,7 +119,7 @@ int tty_unregister_ldisc(int disc)
44172 return -EINVAL;
44173
44174 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
44175- if (tty_ldiscs[disc]->refcount)
44176+ if (atomic_read(&tty_ldiscs[disc]->refcount))
44177 ret = -EBUSY;
44178 else
44179 tty_ldiscs[disc] = NULL;
44180@@ -140,7 +140,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
44181 if (ldops) {
44182 ret = ERR_PTR(-EAGAIN);
44183 if (try_module_get(ldops->owner)) {
44184- ldops->refcount++;
44185+ atomic_inc(&ldops->refcount);
44186 ret = ldops;
44187 }
44188 }
44189@@ -153,7 +153,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
44190 unsigned long flags;
44191
44192 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
44193- ldops->refcount--;
44194+ atomic_dec(&ldops->refcount);
44195 module_put(ldops->owner);
44196 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
44197 }
44198diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
44199index b7ff59d..7c6105e 100644
44200--- a/drivers/tty/tty_port.c
44201+++ b/drivers/tty/tty_port.c
44202@@ -218,7 +218,7 @@ void tty_port_hangup(struct tty_port *port)
44203 unsigned long flags;
44204
44205 spin_lock_irqsave(&port->lock, flags);
44206- port->count = 0;
44207+ atomic_set(&port->count, 0);
44208 port->flags &= ~ASYNC_NORMAL_ACTIVE;
44209 if (port->tty) {
44210 set_bit(TTY_IO_ERROR, &port->tty->flags);
44211@@ -344,7 +344,7 @@ int tty_port_block_til_ready(struct tty_port *port,
44212 /* The port lock protects the port counts */
44213 spin_lock_irqsave(&port->lock, flags);
44214 if (!tty_hung_up_p(filp))
44215- port->count--;
44216+ atomic_dec(&port->count);
44217 port->blocked_open++;
44218 spin_unlock_irqrestore(&port->lock, flags);
44219
44220@@ -386,7 +386,7 @@ int tty_port_block_til_ready(struct tty_port *port,
44221 we must not mess that up further */
44222 spin_lock_irqsave(&port->lock, flags);
44223 if (!tty_hung_up_p(filp))
44224- port->count++;
44225+ atomic_inc(&port->count);
44226 port->blocked_open--;
44227 if (retval == 0)
44228 port->flags |= ASYNC_NORMAL_ACTIVE;
44229@@ -406,19 +406,19 @@ int tty_port_close_start(struct tty_port *port,
44230 return 0;
44231 }
44232
44233- if (tty->count == 1 && port->count != 1) {
44234+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
44235 printk(KERN_WARNING
44236 "tty_port_close_start: tty->count = 1 port count = %d.\n",
44237- port->count);
44238- port->count = 1;
44239+ atomic_read(&port->count));
44240+ atomic_set(&port->count, 1);
44241 }
44242- if (--port->count < 0) {
44243+ if (atomic_dec_return(&port->count) < 0) {
44244 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
44245- port->count);
44246- port->count = 0;
44247+ atomic_read(&port->count));
44248+ atomic_set(&port->count, 0);
44249 }
44250
44251- if (port->count) {
44252+ if (atomic_read(&port->count)) {
44253 spin_unlock_irqrestore(&port->lock, flags);
44254 if (port->ops->drop)
44255 port->ops->drop(port);
44256@@ -516,7 +516,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
44257 {
44258 spin_lock_irq(&port->lock);
44259 if (!tty_hung_up_p(filp))
44260- ++port->count;
44261+ atomic_inc(&port->count);
44262 spin_unlock_irq(&port->lock);
44263 tty_port_tty_set(port, tty);
44264
44265diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
44266index 681765b..d3ccdf2 100644
44267--- a/drivers/tty/vt/keyboard.c
44268+++ b/drivers/tty/vt/keyboard.c
44269@@ -660,6 +660,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
44270 kbd->kbdmode == VC_OFF) &&
44271 value != KVAL(K_SAK))
44272 return; /* SAK is allowed even in raw mode */
44273+
44274+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
44275+ {
44276+ void *func = fn_handler[value];
44277+ if (func == fn_show_state || func == fn_show_ptregs ||
44278+ func == fn_show_mem)
44279+ return;
44280+ }
44281+#endif
44282+
44283 fn_handler[value](vc);
44284 }
44285
44286@@ -1808,9 +1818,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
44287 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
44288 return -EFAULT;
44289
44290- if (!capable(CAP_SYS_TTY_CONFIG))
44291- perm = 0;
44292-
44293 switch (cmd) {
44294 case KDGKBENT:
44295 /* Ensure another thread doesn't free it under us */
44296@@ -1825,6 +1832,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
44297 spin_unlock_irqrestore(&kbd_event_lock, flags);
44298 return put_user(val, &user_kbe->kb_value);
44299 case KDSKBENT:
44300+ if (!capable(CAP_SYS_TTY_CONFIG))
44301+ perm = 0;
44302+
44303 if (!perm)
44304 return -EPERM;
44305 if (!i && v == K_NOSUCHMAP) {
44306@@ -1915,9 +1925,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
44307 int i, j, k;
44308 int ret;
44309
44310- if (!capable(CAP_SYS_TTY_CONFIG))
44311- perm = 0;
44312-
44313 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
44314 if (!kbs) {
44315 ret = -ENOMEM;
44316@@ -1951,6 +1958,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
44317 kfree(kbs);
44318 return ((p && *p) ? -EOVERFLOW : 0);
44319 case KDSKBSENT:
44320+ if (!capable(CAP_SYS_TTY_CONFIG))
44321+ perm = 0;
44322+
44323 if (!perm) {
44324 ret = -EPERM;
44325 goto reterr;
44326diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
44327index 5110f36..8dc0a74 100644
44328--- a/drivers/uio/uio.c
44329+++ b/drivers/uio/uio.c
44330@@ -25,6 +25,7 @@
44331 #include <linux/kobject.h>
44332 #include <linux/cdev.h>
44333 #include <linux/uio_driver.h>
44334+#include <asm/local.h>
44335
44336 #define UIO_MAX_DEVICES (1U << MINORBITS)
44337
44338@@ -32,10 +33,10 @@ struct uio_device {
44339 struct module *owner;
44340 struct device *dev;
44341 int minor;
44342- atomic_t event;
44343+ atomic_unchecked_t event;
44344 struct fasync_struct *async_queue;
44345 wait_queue_head_t wait;
44346- int vma_count;
44347+ local_t vma_count;
44348 struct uio_info *info;
44349 struct kobject *map_dir;
44350 struct kobject *portio_dir;
44351@@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
44352 struct device_attribute *attr, char *buf)
44353 {
44354 struct uio_device *idev = dev_get_drvdata(dev);
44355- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
44356+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
44357 }
44358
44359 static struct device_attribute uio_class_attributes[] = {
44360@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
44361 {
44362 struct uio_device *idev = info->uio_dev;
44363
44364- atomic_inc(&idev->event);
44365+ atomic_inc_unchecked(&idev->event);
44366 wake_up_interruptible(&idev->wait);
44367 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
44368 }
44369@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
44370 }
44371
44372 listener->dev = idev;
44373- listener->event_count = atomic_read(&idev->event);
44374+ listener->event_count = atomic_read_unchecked(&idev->event);
44375 filep->private_data = listener;
44376
44377 if (idev->info->open) {
44378@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
44379 return -EIO;
44380
44381 poll_wait(filep, &idev->wait, wait);
44382- if (listener->event_count != atomic_read(&idev->event))
44383+ if (listener->event_count != atomic_read_unchecked(&idev->event))
44384 return POLLIN | POLLRDNORM;
44385 return 0;
44386 }
44387@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
44388 do {
44389 set_current_state(TASK_INTERRUPTIBLE);
44390
44391- event_count = atomic_read(&idev->event);
44392+ event_count = atomic_read_unchecked(&idev->event);
44393 if (event_count != listener->event_count) {
44394 if (copy_to_user(buf, &event_count, count))
44395 retval = -EFAULT;
44396@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
44397 static void uio_vma_open(struct vm_area_struct *vma)
44398 {
44399 struct uio_device *idev = vma->vm_private_data;
44400- idev->vma_count++;
44401+ local_inc(&idev->vma_count);
44402 }
44403
44404 static void uio_vma_close(struct vm_area_struct *vma)
44405 {
44406 struct uio_device *idev = vma->vm_private_data;
44407- idev->vma_count--;
44408+ local_dec(&idev->vma_count);
44409 }
44410
44411 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
44412@@ -819,7 +820,7 @@ int __uio_register_device(struct module *owner,
44413 idev->owner = owner;
44414 idev->info = info;
44415 init_waitqueue_head(&idev->wait);
44416- atomic_set(&idev->event, 0);
44417+ atomic_set_unchecked(&idev->event, 0);
44418
44419 ret = uio_get_minor(idev);
44420 if (ret)
44421diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
44422index b7eb86a..36d28af 100644
44423--- a/drivers/usb/atm/cxacru.c
44424+++ b/drivers/usb/atm/cxacru.c
44425@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
44426 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
44427 if (ret < 2)
44428 return -EINVAL;
44429- if (index < 0 || index > 0x7f)
44430+ if (index > 0x7f)
44431 return -EINVAL;
44432 pos += tmp;
44433
44434diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
44435index 35f10bf..6a38a0b 100644
44436--- a/drivers/usb/atm/usbatm.c
44437+++ b/drivers/usb/atm/usbatm.c
44438@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
44439 if (printk_ratelimit())
44440 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
44441 __func__, vpi, vci);
44442- atomic_inc(&vcc->stats->rx_err);
44443+ atomic_inc_unchecked(&vcc->stats->rx_err);
44444 return;
44445 }
44446
44447@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
44448 if (length > ATM_MAX_AAL5_PDU) {
44449 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
44450 __func__, length, vcc);
44451- atomic_inc(&vcc->stats->rx_err);
44452+ atomic_inc_unchecked(&vcc->stats->rx_err);
44453 goto out;
44454 }
44455
44456@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
44457 if (sarb->len < pdu_length) {
44458 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
44459 __func__, pdu_length, sarb->len, vcc);
44460- atomic_inc(&vcc->stats->rx_err);
44461+ atomic_inc_unchecked(&vcc->stats->rx_err);
44462 goto out;
44463 }
44464
44465 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
44466 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
44467 __func__, vcc);
44468- atomic_inc(&vcc->stats->rx_err);
44469+ atomic_inc_unchecked(&vcc->stats->rx_err);
44470 goto out;
44471 }
44472
44473@@ -389,7 +389,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
44474 if (printk_ratelimit())
44475 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
44476 __func__, length);
44477- atomic_inc(&vcc->stats->rx_drop);
44478+ atomic_inc_unchecked(&vcc->stats->rx_drop);
44479 goto out;
44480 }
44481
44482@@ -417,7 +417,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
44483
44484 vcc->push(vcc, skb);
44485
44486- atomic_inc(&vcc->stats->rx);
44487+ atomic_inc_unchecked(&vcc->stats->rx);
44488 out:
44489 skb_trim(sarb, 0);
44490 }
44491@@ -623,7 +623,7 @@ static void usbatm_tx_process(unsigned long data)
44492 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
44493
44494 usbatm_pop(vcc, skb);
44495- atomic_inc(&vcc->stats->tx);
44496+ atomic_inc_unchecked(&vcc->stats->tx);
44497
44498 skb = skb_dequeue(&instance->sndqueue);
44499 }
44500@@ -779,11 +779,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
44501 if (!left--)
44502 return sprintf(page,
44503 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
44504- atomic_read(&atm_dev->stats.aal5.tx),
44505- atomic_read(&atm_dev->stats.aal5.tx_err),
44506- atomic_read(&atm_dev->stats.aal5.rx),
44507- atomic_read(&atm_dev->stats.aal5.rx_err),
44508- atomic_read(&atm_dev->stats.aal5.rx_drop));
44509+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
44510+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
44511+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
44512+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
44513+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
44514
44515 if (!left--) {
44516 if (instance->disconnected)
44517diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
44518index cbacea9..246cccd 100644
44519--- a/drivers/usb/core/devices.c
44520+++ b/drivers/usb/core/devices.c
44521@@ -126,7 +126,7 @@ static const char format_endpt[] =
44522 * time it gets called.
44523 */
44524 static struct device_connect_event {
44525- atomic_t count;
44526+ atomic_unchecked_t count;
44527 wait_queue_head_t wait;
44528 } device_event = {
44529 .count = ATOMIC_INIT(1),
44530@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
44531
44532 void usbfs_conn_disc_event(void)
44533 {
44534- atomic_add(2, &device_event.count);
44535+ atomic_add_unchecked(2, &device_event.count);
44536 wake_up(&device_event.wait);
44537 }
44538
44539@@ -645,7 +645,7 @@ static unsigned int usb_device_poll(struct file *file,
44540
44541 poll_wait(file, &device_event.wait, wait);
44542
44543- event_count = atomic_read(&device_event.count);
44544+ event_count = atomic_read_unchecked(&device_event.count);
44545 if (file->f_version != event_count) {
44546 file->f_version = event_count;
44547 return POLLIN | POLLRDNORM;
44548diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
44549index 8e64adf..9a33a3c 100644
44550--- a/drivers/usb/core/hcd.c
44551+++ b/drivers/usb/core/hcd.c
44552@@ -1522,7 +1522,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
44553 */
44554 usb_get_urb(urb);
44555 atomic_inc(&urb->use_count);
44556- atomic_inc(&urb->dev->urbnum);
44557+ atomic_inc_unchecked(&urb->dev->urbnum);
44558 usbmon_urb_submit(&hcd->self, urb);
44559
44560 /* NOTE requirements on root-hub callers (usbfs and the hub
44561@@ -1549,7 +1549,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
44562 urb->hcpriv = NULL;
44563 INIT_LIST_HEAD(&urb->urb_list);
44564 atomic_dec(&urb->use_count);
44565- atomic_dec(&urb->dev->urbnum);
44566+ atomic_dec_unchecked(&urb->dev->urbnum);
44567 if (atomic_read(&urb->reject))
44568 wake_up(&usb_kill_urb_queue);
44569 usb_put_urb(urb);
44570diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
44571index 131f736..99004c3 100644
44572--- a/drivers/usb/core/message.c
44573+++ b/drivers/usb/core/message.c
44574@@ -129,7 +129,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
44575 * method can wait for it to complete. Since you don't have a handle on the
44576 * URB used, you can't cancel the request.
44577 */
44578-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
44579+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
44580 __u8 requesttype, __u16 value, __u16 index, void *data,
44581 __u16 size, int timeout)
44582 {
44583diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
44584index 818e4a0..0fc9589 100644
44585--- a/drivers/usb/core/sysfs.c
44586+++ b/drivers/usb/core/sysfs.c
44587@@ -226,7 +226,7 @@ show_urbnum(struct device *dev, struct device_attribute *attr, char *buf)
44588 struct usb_device *udev;
44589
44590 udev = to_usb_device(dev);
44591- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
44592+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
44593 }
44594 static DEVICE_ATTR(urbnum, S_IRUGO, show_urbnum, NULL);
44595
44596diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
44597index f81b925..78d22ec 100644
44598--- a/drivers/usb/core/usb.c
44599+++ b/drivers/usb/core/usb.c
44600@@ -388,7 +388,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
44601 set_dev_node(&dev->dev, dev_to_node(bus->controller));
44602 dev->state = USB_STATE_ATTACHED;
44603 dev->lpm_disable_count = 1;
44604- atomic_set(&dev->urbnum, 0);
44605+ atomic_set_unchecked(&dev->urbnum, 0);
44606
44607 INIT_LIST_HEAD(&dev->ep0.urb_list);
44608 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
44609diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
44610index 5e29dde..eca992f 100644
44611--- a/drivers/usb/early/ehci-dbgp.c
44612+++ b/drivers/usb/early/ehci-dbgp.c
44613@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
44614
44615 #ifdef CONFIG_KGDB
44616 static struct kgdb_io kgdbdbgp_io_ops;
44617-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
44618+static struct kgdb_io kgdbdbgp_io_ops_console;
44619+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
44620 #else
44621 #define dbgp_kgdb_mode (0)
44622 #endif
44623@@ -1047,6 +1048,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
44624 .write_char = kgdbdbgp_write_char,
44625 };
44626
44627+static struct kgdb_io kgdbdbgp_io_ops_console = {
44628+ .name = "kgdbdbgp",
44629+ .read_char = kgdbdbgp_read_char,
44630+ .write_char = kgdbdbgp_write_char,
44631+ .is_console = 1
44632+};
44633+
44634 static int kgdbdbgp_wait_time;
44635
44636 static int __init kgdbdbgp_parse_config(char *str)
44637@@ -1062,8 +1070,10 @@ static int __init kgdbdbgp_parse_config(char *str)
44638 ptr++;
44639 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
44640 }
44641- kgdb_register_io_module(&kgdbdbgp_io_ops);
44642- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
44643+ if (early_dbgp_console.index != -1)
44644+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
44645+ else
44646+ kgdb_register_io_module(&kgdbdbgp_io_ops);
44647
44648 return 0;
44649 }
44650diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
44651index 598dcc1..032dd4f 100644
44652--- a/drivers/usb/gadget/u_serial.c
44653+++ b/drivers/usb/gadget/u_serial.c
44654@@ -735,9 +735,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
44655 spin_lock_irq(&port->port_lock);
44656
44657 /* already open? Great. */
44658- if (port->port.count) {
44659+ if (atomic_read(&port->port.count)) {
44660 status = 0;
44661- port->port.count++;
44662+ atomic_inc(&port->port.count);
44663
44664 /* currently opening/closing? wait ... */
44665 } else if (port->openclose) {
44666@@ -796,7 +796,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
44667 tty->driver_data = port;
44668 port->port.tty = tty;
44669
44670- port->port.count = 1;
44671+ atomic_set(&port->port.count, 1);
44672 port->openclose = false;
44673
44674 /* if connected, start the I/O stream */
44675@@ -838,11 +838,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
44676
44677 spin_lock_irq(&port->port_lock);
44678
44679- if (port->port.count != 1) {
44680- if (port->port.count == 0)
44681+ if (atomic_read(&port->port.count) != 1) {
44682+ if (atomic_read(&port->port.count) == 0)
44683 WARN_ON(1);
44684 else
44685- --port->port.count;
44686+ atomic_dec(&port->port.count);
44687 goto exit;
44688 }
44689
44690@@ -852,7 +852,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
44691 * and sleep if necessary
44692 */
44693 port->openclose = true;
44694- port->port.count = 0;
44695+ atomic_set(&port->port.count, 0);
44696
44697 gser = port->port_usb;
44698 if (gser && gser->disconnect)
44699@@ -1159,7 +1159,7 @@ static int gs_closed(struct gs_port *port)
44700 int cond;
44701
44702 spin_lock_irq(&port->port_lock);
44703- cond = (port->port.count == 0) && !port->openclose;
44704+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
44705 spin_unlock_irq(&port->port_lock);
44706 return cond;
44707 }
44708@@ -1273,7 +1273,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
44709 /* if it's already open, start I/O ... and notify the serial
44710 * protocol about open/close status (connect/disconnect).
44711 */
44712- if (port->port.count) {
44713+ if (atomic_read(&port->port.count)) {
44714 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
44715 gs_start_io(port);
44716 if (gser->connect)
44717@@ -1320,7 +1320,7 @@ void gserial_disconnect(struct gserial *gser)
44718
44719 port->port_usb = NULL;
44720 gser->ioport = NULL;
44721- if (port->port.count > 0 || port->openclose) {
44722+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
44723 wake_up_interruptible(&port->drain_wait);
44724 if (port->port.tty)
44725 tty_hangup(port->port.tty);
44726@@ -1336,7 +1336,7 @@ void gserial_disconnect(struct gserial *gser)
44727
44728 /* finally, free any unused/unusable I/O buffers */
44729 spin_lock_irqsave(&port->port_lock, flags);
44730- if (port->port.count == 0 && !port->openclose)
44731+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
44732 gs_buf_free(&port->port_write_buf);
44733 gs_free_requests(gser->out, &port->read_pool, NULL);
44734 gs_free_requests(gser->out, &port->read_queue, NULL);
44735diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
44736index 5f3bcd3..bfca43f 100644
44737--- a/drivers/usb/serial/console.c
44738+++ b/drivers/usb/serial/console.c
44739@@ -124,7 +124,7 @@ static int usb_console_setup(struct console *co, char *options)
44740
44741 info->port = port;
44742
44743- ++port->port.count;
44744+ atomic_inc(&port->port.count);
44745 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
44746 if (serial->type->set_termios) {
44747 /*
44748@@ -174,7 +174,7 @@ static int usb_console_setup(struct console *co, char *options)
44749 }
44750 /* Now that any required fake tty operations are completed restore
44751 * the tty port count */
44752- --port->port.count;
44753+ atomic_dec(&port->port.count);
44754 /* The console is special in terms of closing the device so
44755 * indicate this port is now acting as a system console. */
44756 port->port.console = 1;
44757@@ -187,7 +187,7 @@ static int usb_console_setup(struct console *co, char *options)
44758 free_tty:
44759 kfree(tty);
44760 reset_open_count:
44761- port->port.count = 0;
44762+ atomic_set(&port->port.count, 0);
44763 usb_autopm_put_interface(serial->interface);
44764 error_get_interface:
44765 usb_serial_put(serial);
44766diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
44767index 6c3586a..a94e621 100644
44768--- a/drivers/usb/storage/realtek_cr.c
44769+++ b/drivers/usb/storage/realtek_cr.c
44770@@ -429,7 +429,7 @@ static int rts51x_read_status(struct us_data *us,
44771
44772 buf = kmalloc(len, GFP_NOIO);
44773 if (buf == NULL)
44774- return USB_STOR_TRANSPORT_ERROR;
44775+ return -ENOMEM;
44776
44777 US_DEBUGP("%s, lun = %d\n", __func__, lun);
44778
44779diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
44780index 75f70f0..d467e1a 100644
44781--- a/drivers/usb/storage/usb.h
44782+++ b/drivers/usb/storage/usb.h
44783@@ -63,7 +63,7 @@ struct us_unusual_dev {
44784 __u8 useProtocol;
44785 __u8 useTransport;
44786 int (*initFunction)(struct us_data *);
44787-};
44788+} __do_const;
44789
44790
44791 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
44792diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
44793index d6bea3e..60b250e 100644
44794--- a/drivers/usb/wusbcore/wa-hc.h
44795+++ b/drivers/usb/wusbcore/wa-hc.h
44796@@ -192,7 +192,7 @@ struct wahc {
44797 struct list_head xfer_delayed_list;
44798 spinlock_t xfer_list_lock;
44799 struct work_struct xfer_work;
44800- atomic_t xfer_id_count;
44801+ atomic_unchecked_t xfer_id_count;
44802 };
44803
44804
44805@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
44806 INIT_LIST_HEAD(&wa->xfer_delayed_list);
44807 spin_lock_init(&wa->xfer_list_lock);
44808 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
44809- atomic_set(&wa->xfer_id_count, 1);
44810+ atomic_set_unchecked(&wa->xfer_id_count, 1);
44811 }
44812
44813 /**
44814diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
44815index 57c01ab..8a05959 100644
44816--- a/drivers/usb/wusbcore/wa-xfer.c
44817+++ b/drivers/usb/wusbcore/wa-xfer.c
44818@@ -296,7 +296,7 @@ out:
44819 */
44820 static void wa_xfer_id_init(struct wa_xfer *xfer)
44821 {
44822- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
44823+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
44824 }
44825
44826 /*
44827diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
44828index b28e66c..4a62e12 100644
44829--- a/drivers/vfio/pci/vfio_pci.c
44830+++ b/drivers/vfio/pci/vfio_pci.c
44831@@ -331,6 +331,7 @@ static long vfio_pci_ioctl(void *device_data,
44832
44833 if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
44834 size_t size;
44835+ int max = vfio_pci_get_irq_count(vdev, hdr.index);
44836
44837 if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL)
44838 size = sizeof(uint8_t);
44839@@ -340,7 +341,7 @@ static long vfio_pci_ioctl(void *device_data,
44840 return -EINVAL;
44841
44842 if (hdr.argsz - minsz < hdr.count * size ||
44843- hdr.count > vfio_pci_get_irq_count(vdev, hdr.index))
44844+ hdr.start >= max || hdr.start + hdr.count > max)
44845 return -EINVAL;
44846
44847 data = memdup_user((void __user *)(arg + minsz),
44848diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
44849index 8c55011..eed4ae1a 100644
44850--- a/drivers/video/aty/aty128fb.c
44851+++ b/drivers/video/aty/aty128fb.c
44852@@ -149,7 +149,7 @@ enum {
44853 };
44854
44855 /* Must match above enum */
44856-static char * const r128_family[] = {
44857+static const char * const r128_family[] = {
44858 "AGP",
44859 "PCI",
44860 "PRO AGP",
44861diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
44862index 4f27fdc..d3537e6 100644
44863--- a/drivers/video/aty/atyfb_base.c
44864+++ b/drivers/video/aty/atyfb_base.c
44865@@ -1325,10 +1325,14 @@ static int atyfb_set_par(struct fb_info *info)
44866 par->accel_flags = var->accel_flags; /* hack */
44867
44868 if (var->accel_flags) {
44869- info->fbops->fb_sync = atyfb_sync;
44870+ pax_open_kernel();
44871+ *(void **)&info->fbops->fb_sync = atyfb_sync;
44872+ pax_close_kernel();
44873 info->flags &= ~FBINFO_HWACCEL_DISABLED;
44874 } else {
44875- info->fbops->fb_sync = NULL;
44876+ pax_open_kernel();
44877+ *(void **)&info->fbops->fb_sync = NULL;
44878+ pax_close_kernel();
44879 info->flags |= FBINFO_HWACCEL_DISABLED;
44880 }
44881
44882diff --git a/drivers/video/aty/mach64_cursor.c b/drivers/video/aty/mach64_cursor.c
44883index 95ec042..e6affdd 100644
44884--- a/drivers/video/aty/mach64_cursor.c
44885+++ b/drivers/video/aty/mach64_cursor.c
44886@@ -7,6 +7,7 @@
44887 #include <linux/string.h>
44888
44889 #include <asm/io.h>
44890+#include <asm/pgtable.h>
44891
44892 #ifdef __sparc__
44893 #include <asm/fbio.h>
44894@@ -208,7 +209,9 @@ int aty_init_cursor(struct fb_info *info)
44895 info->sprite.buf_align = 16; /* and 64 lines tall. */
44896 info->sprite.flags = FB_PIXMAP_IO;
44897
44898- info->fbops->fb_cursor = atyfb_cursor;
44899+ pax_open_kernel();
44900+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
44901+ pax_close_kernel();
44902
44903 return 0;
44904 }
44905diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
44906index 6c5ed6b..b727c88 100644
44907--- a/drivers/video/backlight/kb3886_bl.c
44908+++ b/drivers/video/backlight/kb3886_bl.c
44909@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
44910 static unsigned long kb3886bl_flags;
44911 #define KB3886BL_SUSPENDED 0x01
44912
44913-static struct dmi_system_id __initdata kb3886bl_device_table[] = {
44914+static const struct dmi_system_id __initconst kb3886bl_device_table[] = {
44915 {
44916 .ident = "Sahara Touch-iT",
44917 .matches = {
44918diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
44919index 88cad6b..dd746c7 100644
44920--- a/drivers/video/fb_defio.c
44921+++ b/drivers/video/fb_defio.c
44922@@ -206,7 +206,9 @@ void fb_deferred_io_init(struct fb_info *info)
44923
44924 BUG_ON(!fbdefio);
44925 mutex_init(&fbdefio->lock);
44926- info->fbops->fb_mmap = fb_deferred_io_mmap;
44927+ pax_open_kernel();
44928+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
44929+ pax_close_kernel();
44930 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
44931 INIT_LIST_HEAD(&fbdefio->pagelist);
44932 if (fbdefio->delay == 0) /* set a default of 1 s */
44933@@ -237,7 +239,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
44934 page->mapping = NULL;
44935 }
44936
44937- info->fbops->fb_mmap = NULL;
44938+ *(void **)&info->fbops->fb_mmap = NULL;
44939 mutex_destroy(&fbdefio->lock);
44940 }
44941 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
44942diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
44943index 5c3960d..15cf8fc 100644
44944--- a/drivers/video/fbcmap.c
44945+++ b/drivers/video/fbcmap.c
44946@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
44947 rc = -ENODEV;
44948 goto out;
44949 }
44950- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
44951- !info->fbops->fb_setcmap)) {
44952+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
44953 rc = -EINVAL;
44954 goto out1;
44955 }
44956diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
44957index dc61c12..e29796e 100644
44958--- a/drivers/video/fbmem.c
44959+++ b/drivers/video/fbmem.c
44960@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
44961 image->dx += image->width + 8;
44962 }
44963 } else if (rotate == FB_ROTATE_UD) {
44964- for (x = 0; x < num && image->dx >= 0; x++) {
44965+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
44966 info->fbops->fb_imageblit(info, image);
44967 image->dx -= image->width + 8;
44968 }
44969@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
44970 image->dy += image->height + 8;
44971 }
44972 } else if (rotate == FB_ROTATE_CCW) {
44973- for (x = 0; x < num && image->dy >= 0; x++) {
44974+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
44975 info->fbops->fb_imageblit(info, image);
44976 image->dy -= image->height + 8;
44977 }
44978@@ -1166,7 +1166,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
44979 return -EFAULT;
44980 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
44981 return -EINVAL;
44982- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
44983+ if (con2fb.framebuffer >= FB_MAX)
44984 return -EINVAL;
44985 if (!registered_fb[con2fb.framebuffer])
44986 request_module("fb%d", con2fb.framebuffer);
44987diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
44988index 7672d2e..b56437f 100644
44989--- a/drivers/video/i810/i810_accel.c
44990+++ b/drivers/video/i810/i810_accel.c
44991@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
44992 }
44993 }
44994 printk("ringbuffer lockup!!!\n");
44995+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
44996 i810_report_error(mmio);
44997 par->dev_flags |= LOCKUP;
44998 info->pixmap.scan_align = 1;
44999diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
45000index 3c14e43..eafa544 100644
45001--- a/drivers/video/logo/logo_linux_clut224.ppm
45002+++ b/drivers/video/logo/logo_linux_clut224.ppm
45003@@ -1,1604 +1,1123 @@
45004 P3
45005-# Standard 224-color Linux logo
45006 80 80
45007 255
45008- 0 0 0 0 0 0 0 0 0 0 0 0
45009- 0 0 0 0 0 0 0 0 0 0 0 0
45010- 0 0 0 0 0 0 0 0 0 0 0 0
45011- 0 0 0 0 0 0 0 0 0 0 0 0
45012- 0 0 0 0 0 0 0 0 0 0 0 0
45013- 0 0 0 0 0 0 0 0 0 0 0 0
45014- 0 0 0 0 0 0 0 0 0 0 0 0
45015- 0 0 0 0 0 0 0 0 0 0 0 0
45016- 0 0 0 0 0 0 0 0 0 0 0 0
45017- 6 6 6 6 6 6 10 10 10 10 10 10
45018- 10 10 10 6 6 6 6 6 6 6 6 6
45019- 0 0 0 0 0 0 0 0 0 0 0 0
45020- 0 0 0 0 0 0 0 0 0 0 0 0
45021- 0 0 0 0 0 0 0 0 0 0 0 0
45022- 0 0 0 0 0 0 0 0 0 0 0 0
45023- 0 0 0 0 0 0 0 0 0 0 0 0
45024- 0 0 0 0 0 0 0 0 0 0 0 0
45025- 0 0 0 0 0 0 0 0 0 0 0 0
45026- 0 0 0 0 0 0 0 0 0 0 0 0
45027- 0 0 0 0 0 0 0 0 0 0 0 0
45028- 0 0 0 0 0 0 0 0 0 0 0 0
45029- 0 0 0 0 0 0 0 0 0 0 0 0
45030- 0 0 0 0 0 0 0 0 0 0 0 0
45031- 0 0 0 0 0 0 0 0 0 0 0 0
45032- 0 0 0 0 0 0 0 0 0 0 0 0
45033- 0 0 0 0 0 0 0 0 0 0 0 0
45034- 0 0 0 0 0 0 0 0 0 0 0 0
45035- 0 0 0 0 0 0 0 0 0 0 0 0
45036- 0 0 0 6 6 6 10 10 10 14 14 14
45037- 22 22 22 26 26 26 30 30 30 34 34 34
45038- 30 30 30 30 30 30 26 26 26 18 18 18
45039- 14 14 14 10 10 10 6 6 6 0 0 0
45040- 0 0 0 0 0 0 0 0 0 0 0 0
45041- 0 0 0 0 0 0 0 0 0 0 0 0
45042- 0 0 0 0 0 0 0 0 0 0 0 0
45043- 0 0 0 0 0 0 0 0 0 0 0 0
45044- 0 0 0 0 0 0 0 0 0 0 0 0
45045- 0 0 0 0 0 0 0 0 0 0 0 0
45046- 0 0 0 0 0 0 0 0 0 0 0 0
45047- 0 0 0 0 0 0 0 0 0 0 0 0
45048- 0 0 0 0 0 0 0 0 0 0 0 0
45049- 0 0 0 0 0 1 0 0 1 0 0 0
45050- 0 0 0 0 0 0 0 0 0 0 0 0
45051- 0 0 0 0 0 0 0 0 0 0 0 0
45052- 0 0 0 0 0 0 0 0 0 0 0 0
45053- 0 0 0 0 0 0 0 0 0 0 0 0
45054- 0 0 0 0 0 0 0 0 0 0 0 0
45055- 0 0 0 0 0 0 0 0 0 0 0 0
45056- 6 6 6 14 14 14 26 26 26 42 42 42
45057- 54 54 54 66 66 66 78 78 78 78 78 78
45058- 78 78 78 74 74 74 66 66 66 54 54 54
45059- 42 42 42 26 26 26 18 18 18 10 10 10
45060- 6 6 6 0 0 0 0 0 0 0 0 0
45061- 0 0 0 0 0 0 0 0 0 0 0 0
45062- 0 0 0 0 0 0 0 0 0 0 0 0
45063- 0 0 0 0 0 0 0 0 0 0 0 0
45064- 0 0 0 0 0 0 0 0 0 0 0 0
45065- 0 0 0 0 0 0 0 0 0 0 0 0
45066- 0 0 0 0 0 0 0 0 0 0 0 0
45067- 0 0 0 0 0 0 0 0 0 0 0 0
45068- 0 0 0 0 0 0 0 0 0 0 0 0
45069- 0 0 1 0 0 0 0 0 0 0 0 0
45070- 0 0 0 0 0 0 0 0 0 0 0 0
45071- 0 0 0 0 0 0 0 0 0 0 0 0
45072- 0 0 0 0 0 0 0 0 0 0 0 0
45073- 0 0 0 0 0 0 0 0 0 0 0 0
45074- 0 0 0 0 0 0 0 0 0 0 0 0
45075- 0 0 0 0 0 0 0 0 0 10 10 10
45076- 22 22 22 42 42 42 66 66 66 86 86 86
45077- 66 66 66 38 38 38 38 38 38 22 22 22
45078- 26 26 26 34 34 34 54 54 54 66 66 66
45079- 86 86 86 70 70 70 46 46 46 26 26 26
45080- 14 14 14 6 6 6 0 0 0 0 0 0
45081- 0 0 0 0 0 0 0 0 0 0 0 0
45082- 0 0 0 0 0 0 0 0 0 0 0 0
45083- 0 0 0 0 0 0 0 0 0 0 0 0
45084- 0 0 0 0 0 0 0 0 0 0 0 0
45085- 0 0 0 0 0 0 0 0 0 0 0 0
45086- 0 0 0 0 0 0 0 0 0 0 0 0
45087- 0 0 0 0 0 0 0 0 0 0 0 0
45088- 0 0 0 0 0 0 0 0 0 0 0 0
45089- 0 0 1 0 0 1 0 0 1 0 0 0
45090- 0 0 0 0 0 0 0 0 0 0 0 0
45091- 0 0 0 0 0 0 0 0 0 0 0 0
45092- 0 0 0 0 0 0 0 0 0 0 0 0
45093- 0 0 0 0 0 0 0 0 0 0 0 0
45094- 0 0 0 0 0 0 0 0 0 0 0 0
45095- 0 0 0 0 0 0 10 10 10 26 26 26
45096- 50 50 50 82 82 82 58 58 58 6 6 6
45097- 2 2 6 2 2 6 2 2 6 2 2 6
45098- 2 2 6 2 2 6 2 2 6 2 2 6
45099- 6 6 6 54 54 54 86 86 86 66 66 66
45100- 38 38 38 18 18 18 6 6 6 0 0 0
45101- 0 0 0 0 0 0 0 0 0 0 0 0
45102- 0 0 0 0 0 0 0 0 0 0 0 0
45103- 0 0 0 0 0 0 0 0 0 0 0 0
45104- 0 0 0 0 0 0 0 0 0 0 0 0
45105- 0 0 0 0 0 0 0 0 0 0 0 0
45106- 0 0 0 0 0 0 0 0 0 0 0 0
45107- 0 0 0 0 0 0 0 0 0 0 0 0
45108- 0 0 0 0 0 0 0 0 0 0 0 0
45109- 0 0 0 0 0 0 0 0 0 0 0 0
45110- 0 0 0 0 0 0 0 0 0 0 0 0
45111- 0 0 0 0 0 0 0 0 0 0 0 0
45112- 0 0 0 0 0 0 0 0 0 0 0 0
45113- 0 0 0 0 0 0 0 0 0 0 0 0
45114- 0 0 0 0 0 0 0 0 0 0 0 0
45115- 0 0 0 6 6 6 22 22 22 50 50 50
45116- 78 78 78 34 34 34 2 2 6 2 2 6
45117- 2 2 6 2 2 6 2 2 6 2 2 6
45118- 2 2 6 2 2 6 2 2 6 2 2 6
45119- 2 2 6 2 2 6 6 6 6 70 70 70
45120- 78 78 78 46 46 46 22 22 22 6 6 6
45121- 0 0 0 0 0 0 0 0 0 0 0 0
45122- 0 0 0 0 0 0 0 0 0 0 0 0
45123- 0 0 0 0 0 0 0 0 0 0 0 0
45124- 0 0 0 0 0 0 0 0 0 0 0 0
45125- 0 0 0 0 0 0 0 0 0 0 0 0
45126- 0 0 0 0 0 0 0 0 0 0 0 0
45127- 0 0 0 0 0 0 0 0 0 0 0 0
45128- 0 0 0 0 0 0 0 0 0 0 0 0
45129- 0 0 1 0 0 1 0 0 1 0 0 0
45130- 0 0 0 0 0 0 0 0 0 0 0 0
45131- 0 0 0 0 0 0 0 0 0 0 0 0
45132- 0 0 0 0 0 0 0 0 0 0 0 0
45133- 0 0 0 0 0 0 0 0 0 0 0 0
45134- 0 0 0 0 0 0 0 0 0 0 0 0
45135- 6 6 6 18 18 18 42 42 42 82 82 82
45136- 26 26 26 2 2 6 2 2 6 2 2 6
45137- 2 2 6 2 2 6 2 2 6 2 2 6
45138- 2 2 6 2 2 6 2 2 6 14 14 14
45139- 46 46 46 34 34 34 6 6 6 2 2 6
45140- 42 42 42 78 78 78 42 42 42 18 18 18
45141- 6 6 6 0 0 0 0 0 0 0 0 0
45142- 0 0 0 0 0 0 0 0 0 0 0 0
45143- 0 0 0 0 0 0 0 0 0 0 0 0
45144- 0 0 0 0 0 0 0 0 0 0 0 0
45145- 0 0 0 0 0 0 0 0 0 0 0 0
45146- 0 0 0 0 0 0 0 0 0 0 0 0
45147- 0 0 0 0 0 0 0 0 0 0 0 0
45148- 0 0 0 0 0 0 0 0 0 0 0 0
45149- 0 0 1 0 0 0 0 0 1 0 0 0
45150- 0 0 0 0 0 0 0 0 0 0 0 0
45151- 0 0 0 0 0 0 0 0 0 0 0 0
45152- 0 0 0 0 0 0 0 0 0 0 0 0
45153- 0 0 0 0 0 0 0 0 0 0 0 0
45154- 0 0 0 0 0 0 0 0 0 0 0 0
45155- 10 10 10 30 30 30 66 66 66 58 58 58
45156- 2 2 6 2 2 6 2 2 6 2 2 6
45157- 2 2 6 2 2 6 2 2 6 2 2 6
45158- 2 2 6 2 2 6 2 2 6 26 26 26
45159- 86 86 86 101 101 101 46 46 46 10 10 10
45160- 2 2 6 58 58 58 70 70 70 34 34 34
45161- 10 10 10 0 0 0 0 0 0 0 0 0
45162- 0 0 0 0 0 0 0 0 0 0 0 0
45163- 0 0 0 0 0 0 0 0 0 0 0 0
45164- 0 0 0 0 0 0 0 0 0 0 0 0
45165- 0 0 0 0 0 0 0 0 0 0 0 0
45166- 0 0 0 0 0 0 0 0 0 0 0 0
45167- 0 0 0 0 0 0 0 0 0 0 0 0
45168- 0 0 0 0 0 0 0 0 0 0 0 0
45169- 0 0 1 0 0 1 0 0 1 0 0 0
45170- 0 0 0 0 0 0 0 0 0 0 0 0
45171- 0 0 0 0 0 0 0 0 0 0 0 0
45172- 0 0 0 0 0 0 0 0 0 0 0 0
45173- 0 0 0 0 0 0 0 0 0 0 0 0
45174- 0 0 0 0 0 0 0 0 0 0 0 0
45175- 14 14 14 42 42 42 86 86 86 10 10 10
45176- 2 2 6 2 2 6 2 2 6 2 2 6
45177- 2 2 6 2 2 6 2 2 6 2 2 6
45178- 2 2 6 2 2 6 2 2 6 30 30 30
45179- 94 94 94 94 94 94 58 58 58 26 26 26
45180- 2 2 6 6 6 6 78 78 78 54 54 54
45181- 22 22 22 6 6 6 0 0 0 0 0 0
45182- 0 0 0 0 0 0 0 0 0 0 0 0
45183- 0 0 0 0 0 0 0 0 0 0 0 0
45184- 0 0 0 0 0 0 0 0 0 0 0 0
45185- 0 0 0 0 0 0 0 0 0 0 0 0
45186- 0 0 0 0 0 0 0 0 0 0 0 0
45187- 0 0 0 0 0 0 0 0 0 0 0 0
45188- 0 0 0 0 0 0 0 0 0 0 0 0
45189- 0 0 0 0 0 0 0 0 0 0 0 0
45190- 0 0 0 0 0 0 0 0 0 0 0 0
45191- 0 0 0 0 0 0 0 0 0 0 0 0
45192- 0 0 0 0 0 0 0 0 0 0 0 0
45193- 0 0 0 0 0 0 0 0 0 0 0 0
45194- 0 0 0 0 0 0 0 0 0 6 6 6
45195- 22 22 22 62 62 62 62 62 62 2 2 6
45196- 2 2 6 2 2 6 2 2 6 2 2 6
45197- 2 2 6 2 2 6 2 2 6 2 2 6
45198- 2 2 6 2 2 6 2 2 6 26 26 26
45199- 54 54 54 38 38 38 18 18 18 10 10 10
45200- 2 2 6 2 2 6 34 34 34 82 82 82
45201- 38 38 38 14 14 14 0 0 0 0 0 0
45202- 0 0 0 0 0 0 0 0 0 0 0 0
45203- 0 0 0 0 0 0 0 0 0 0 0 0
45204- 0 0 0 0 0 0 0 0 0 0 0 0
45205- 0 0 0 0 0 0 0 0 0 0 0 0
45206- 0 0 0 0 0 0 0 0 0 0 0 0
45207- 0 0 0 0 0 0 0 0 0 0 0 0
45208- 0 0 0 0 0 0 0 0 0 0 0 0
45209- 0 0 0 0 0 1 0 0 1 0 0 0
45210- 0 0 0 0 0 0 0 0 0 0 0 0
45211- 0 0 0 0 0 0 0 0 0 0 0 0
45212- 0 0 0 0 0 0 0 0 0 0 0 0
45213- 0 0 0 0 0 0 0 0 0 0 0 0
45214- 0 0 0 0 0 0 0 0 0 6 6 6
45215- 30 30 30 78 78 78 30 30 30 2 2 6
45216- 2 2 6 2 2 6 2 2 6 2 2 6
45217- 2 2 6 2 2 6 2 2 6 2 2 6
45218- 2 2 6 2 2 6 2 2 6 10 10 10
45219- 10 10 10 2 2 6 2 2 6 2 2 6
45220- 2 2 6 2 2 6 2 2 6 78 78 78
45221- 50 50 50 18 18 18 6 6 6 0 0 0
45222- 0 0 0 0 0 0 0 0 0 0 0 0
45223- 0 0 0 0 0 0 0 0 0 0 0 0
45224- 0 0 0 0 0 0 0 0 0 0 0 0
45225- 0 0 0 0 0 0 0 0 0 0 0 0
45226- 0 0 0 0 0 0 0 0 0 0 0 0
45227- 0 0 0 0 0 0 0 0 0 0 0 0
45228- 0 0 0 0 0 0 0 0 0 0 0 0
45229- 0 0 1 0 0 0 0 0 0 0 0 0
45230- 0 0 0 0 0 0 0 0 0 0 0 0
45231- 0 0 0 0 0 0 0 0 0 0 0 0
45232- 0 0 0 0 0 0 0 0 0 0 0 0
45233- 0 0 0 0 0 0 0 0 0 0 0 0
45234- 0 0 0 0 0 0 0 0 0 10 10 10
45235- 38 38 38 86 86 86 14 14 14 2 2 6
45236- 2 2 6 2 2 6 2 2 6 2 2 6
45237- 2 2 6 2 2 6 2 2 6 2 2 6
45238- 2 2 6 2 2 6 2 2 6 2 2 6
45239- 2 2 6 2 2 6 2 2 6 2 2 6
45240- 2 2 6 2 2 6 2 2 6 54 54 54
45241- 66 66 66 26 26 26 6 6 6 0 0 0
45242- 0 0 0 0 0 0 0 0 0 0 0 0
45243- 0 0 0 0 0 0 0 0 0 0 0 0
45244- 0 0 0 0 0 0 0 0 0 0 0 0
45245- 0 0 0 0 0 0 0 0 0 0 0 0
45246- 0 0 0 0 0 0 0 0 0 0 0 0
45247- 0 0 0 0 0 0 0 0 0 0 0 0
45248- 0 0 0 0 0 0 0 0 0 0 0 0
45249- 0 0 0 0 0 1 0 0 1 0 0 0
45250- 0 0 0 0 0 0 0 0 0 0 0 0
45251- 0 0 0 0 0 0 0 0 0 0 0 0
45252- 0 0 0 0 0 0 0 0 0 0 0 0
45253- 0 0 0 0 0 0 0 0 0 0 0 0
45254- 0 0 0 0 0 0 0 0 0 14 14 14
45255- 42 42 42 82 82 82 2 2 6 2 2 6
45256- 2 2 6 6 6 6 10 10 10 2 2 6
45257- 2 2 6 2 2 6 2 2 6 2 2 6
45258- 2 2 6 2 2 6 2 2 6 6 6 6
45259- 14 14 14 10 10 10 2 2 6 2 2 6
45260- 2 2 6 2 2 6 2 2 6 18 18 18
45261- 82 82 82 34 34 34 10 10 10 0 0 0
45262- 0 0 0 0 0 0 0 0 0 0 0 0
45263- 0 0 0 0 0 0 0 0 0 0 0 0
45264- 0 0 0 0 0 0 0 0 0 0 0 0
45265- 0 0 0 0 0 0 0 0 0 0 0 0
45266- 0 0 0 0 0 0 0 0 0 0 0 0
45267- 0 0 0 0 0 0 0 0 0 0 0 0
45268- 0 0 0 0 0 0 0 0 0 0 0 0
45269- 0 0 1 0 0 0 0 0 0 0 0 0
45270- 0 0 0 0 0 0 0 0 0 0 0 0
45271- 0 0 0 0 0 0 0 0 0 0 0 0
45272- 0 0 0 0 0 0 0 0 0 0 0 0
45273- 0 0 0 0 0 0 0 0 0 0 0 0
45274- 0 0 0 0 0 0 0 0 0 14 14 14
45275- 46 46 46 86 86 86 2 2 6 2 2 6
45276- 6 6 6 6 6 6 22 22 22 34 34 34
45277- 6 6 6 2 2 6 2 2 6 2 2 6
45278- 2 2 6 2 2 6 18 18 18 34 34 34
45279- 10 10 10 50 50 50 22 22 22 2 2 6
45280- 2 2 6 2 2 6 2 2 6 10 10 10
45281- 86 86 86 42 42 42 14 14 14 0 0 0
45282- 0 0 0 0 0 0 0 0 0 0 0 0
45283- 0 0 0 0 0 0 0 0 0 0 0 0
45284- 0 0 0 0 0 0 0 0 0 0 0 0
45285- 0 0 0 0 0 0 0 0 0 0 0 0
45286- 0 0 0 0 0 0 0 0 0 0 0 0
45287- 0 0 0 0 0 0 0 0 0 0 0 0
45288- 0 0 0 0 0 0 0 0 0 0 0 0
45289- 0 0 1 0 0 1 0 0 1 0 0 0
45290- 0 0 0 0 0 0 0 0 0 0 0 0
45291- 0 0 0 0 0 0 0 0 0 0 0 0
45292- 0 0 0 0 0 0 0 0 0 0 0 0
45293- 0 0 0 0 0 0 0 0 0 0 0 0
45294- 0 0 0 0 0 0 0 0 0 14 14 14
45295- 46 46 46 86 86 86 2 2 6 2 2 6
45296- 38 38 38 116 116 116 94 94 94 22 22 22
45297- 22 22 22 2 2 6 2 2 6 2 2 6
45298- 14 14 14 86 86 86 138 138 138 162 162 162
45299-154 154 154 38 38 38 26 26 26 6 6 6
45300- 2 2 6 2 2 6 2 2 6 2 2 6
45301- 86 86 86 46 46 46 14 14 14 0 0 0
45302- 0 0 0 0 0 0 0 0 0 0 0 0
45303- 0 0 0 0 0 0 0 0 0 0 0 0
45304- 0 0 0 0 0 0 0 0 0 0 0 0
45305- 0 0 0 0 0 0 0 0 0 0 0 0
45306- 0 0 0 0 0 0 0 0 0 0 0 0
45307- 0 0 0 0 0 0 0 0 0 0 0 0
45308- 0 0 0 0 0 0 0 0 0 0 0 0
45309- 0 0 0 0 0 0 0 0 0 0 0 0
45310- 0 0 0 0 0 0 0 0 0 0 0 0
45311- 0 0 0 0 0 0 0 0 0 0 0 0
45312- 0 0 0 0 0 0 0 0 0 0 0 0
45313- 0 0 0 0 0 0 0 0 0 0 0 0
45314- 0 0 0 0 0 0 0 0 0 14 14 14
45315- 46 46 46 86 86 86 2 2 6 14 14 14
45316-134 134 134 198 198 198 195 195 195 116 116 116
45317- 10 10 10 2 2 6 2 2 6 6 6 6
45318-101 98 89 187 187 187 210 210 210 218 218 218
45319-214 214 214 134 134 134 14 14 14 6 6 6
45320- 2 2 6 2 2 6 2 2 6 2 2 6
45321- 86 86 86 50 50 50 18 18 18 6 6 6
45322- 0 0 0 0 0 0 0 0 0 0 0 0
45323- 0 0 0 0 0 0 0 0 0 0 0 0
45324- 0 0 0 0 0 0 0 0 0 0 0 0
45325- 0 0 0 0 0 0 0 0 0 0 0 0
45326- 0 0 0 0 0 0 0 0 0 0 0 0
45327- 0 0 0 0 0 0 0 0 0 0 0 0
45328- 0 0 0 0 0 0 0 0 1 0 0 0
45329- 0 0 1 0 0 1 0 0 1 0 0 0
45330- 0 0 0 0 0 0 0 0 0 0 0 0
45331- 0 0 0 0 0 0 0 0 0 0 0 0
45332- 0 0 0 0 0 0 0 0 0 0 0 0
45333- 0 0 0 0 0 0 0 0 0 0 0 0
45334- 0 0 0 0 0 0 0 0 0 14 14 14
45335- 46 46 46 86 86 86 2 2 6 54 54 54
45336-218 218 218 195 195 195 226 226 226 246 246 246
45337- 58 58 58 2 2 6 2 2 6 30 30 30
45338-210 210 210 253 253 253 174 174 174 123 123 123
45339-221 221 221 234 234 234 74 74 74 2 2 6
45340- 2 2 6 2 2 6 2 2 6 2 2 6
45341- 70 70 70 58 58 58 22 22 22 6 6 6
45342- 0 0 0 0 0 0 0 0 0 0 0 0
45343- 0 0 0 0 0 0 0 0 0 0 0 0
45344- 0 0 0 0 0 0 0 0 0 0 0 0
45345- 0 0 0 0 0 0 0 0 0 0 0 0
45346- 0 0 0 0 0 0 0 0 0 0 0 0
45347- 0 0 0 0 0 0 0 0 0 0 0 0
45348- 0 0 0 0 0 0 0 0 0 0 0 0
45349- 0 0 0 0 0 0 0 0 0 0 0 0
45350- 0 0 0 0 0 0 0 0 0 0 0 0
45351- 0 0 0 0 0 0 0 0 0 0 0 0
45352- 0 0 0 0 0 0 0 0 0 0 0 0
45353- 0 0 0 0 0 0 0 0 0 0 0 0
45354- 0 0 0 0 0 0 0 0 0 14 14 14
45355- 46 46 46 82 82 82 2 2 6 106 106 106
45356-170 170 170 26 26 26 86 86 86 226 226 226
45357-123 123 123 10 10 10 14 14 14 46 46 46
45358-231 231 231 190 190 190 6 6 6 70 70 70
45359- 90 90 90 238 238 238 158 158 158 2 2 6
45360- 2 2 6 2 2 6 2 2 6 2 2 6
45361- 70 70 70 58 58 58 22 22 22 6 6 6
45362- 0 0 0 0 0 0 0 0 0 0 0 0
45363- 0 0 0 0 0 0 0 0 0 0 0 0
45364- 0 0 0 0 0 0 0 0 0 0 0 0
45365- 0 0 0 0 0 0 0 0 0 0 0 0
45366- 0 0 0 0 0 0 0 0 0 0 0 0
45367- 0 0 0 0 0 0 0 0 0 0 0 0
45368- 0 0 0 0 0 0 0 0 1 0 0 0
45369- 0 0 1 0 0 1 0 0 1 0 0 0
45370- 0 0 0 0 0 0 0 0 0 0 0 0
45371- 0 0 0 0 0 0 0 0 0 0 0 0
45372- 0 0 0 0 0 0 0 0 0 0 0 0
45373- 0 0 0 0 0 0 0 0 0 0 0 0
45374- 0 0 0 0 0 0 0 0 0 14 14 14
45375- 42 42 42 86 86 86 6 6 6 116 116 116
45376-106 106 106 6 6 6 70 70 70 149 149 149
45377-128 128 128 18 18 18 38 38 38 54 54 54
45378-221 221 221 106 106 106 2 2 6 14 14 14
45379- 46 46 46 190 190 190 198 198 198 2 2 6
45380- 2 2 6 2 2 6 2 2 6 2 2 6
45381- 74 74 74 62 62 62 22 22 22 6 6 6
45382- 0 0 0 0 0 0 0 0 0 0 0 0
45383- 0 0 0 0 0 0 0 0 0 0 0 0
45384- 0 0 0 0 0 0 0 0 0 0 0 0
45385- 0 0 0 0 0 0 0 0 0 0 0 0
45386- 0 0 0 0 0 0 0 0 0 0 0 0
45387- 0 0 0 0 0 0 0 0 0 0 0 0
45388- 0 0 0 0 0 0 0 0 1 0 0 0
45389- 0 0 1 0 0 0 0 0 1 0 0 0
45390- 0 0 0 0 0 0 0 0 0 0 0 0
45391- 0 0 0 0 0 0 0 0 0 0 0 0
45392- 0 0 0 0 0 0 0 0 0 0 0 0
45393- 0 0 0 0 0 0 0 0 0 0 0 0
45394- 0 0 0 0 0 0 0 0 0 14 14 14
45395- 42 42 42 94 94 94 14 14 14 101 101 101
45396-128 128 128 2 2 6 18 18 18 116 116 116
45397-118 98 46 121 92 8 121 92 8 98 78 10
45398-162 162 162 106 106 106 2 2 6 2 2 6
45399- 2 2 6 195 195 195 195 195 195 6 6 6
45400- 2 2 6 2 2 6 2 2 6 2 2 6
45401- 74 74 74 62 62 62 22 22 22 6 6 6
45402- 0 0 0 0 0 0 0 0 0 0 0 0
45403- 0 0 0 0 0 0 0 0 0 0 0 0
45404- 0 0 0 0 0 0 0 0 0 0 0 0
45405- 0 0 0 0 0 0 0 0 0 0 0 0
45406- 0 0 0 0 0 0 0 0 0 0 0 0
45407- 0 0 0 0 0 0 0 0 0 0 0 0
45408- 0 0 0 0 0 0 0 0 1 0 0 1
45409- 0 0 1 0 0 0 0 0 1 0 0 0
45410- 0 0 0 0 0 0 0 0 0 0 0 0
45411- 0 0 0 0 0 0 0 0 0 0 0 0
45412- 0 0 0 0 0 0 0 0 0 0 0 0
45413- 0 0 0 0 0 0 0 0 0 0 0 0
45414- 0 0 0 0 0 0 0 0 0 10 10 10
45415- 38 38 38 90 90 90 14 14 14 58 58 58
45416-210 210 210 26 26 26 54 38 6 154 114 10
45417-226 170 11 236 186 11 225 175 15 184 144 12
45418-215 174 15 175 146 61 37 26 9 2 2 6
45419- 70 70 70 246 246 246 138 138 138 2 2 6
45420- 2 2 6 2 2 6 2 2 6 2 2 6
45421- 70 70 70 66 66 66 26 26 26 6 6 6
45422- 0 0 0 0 0 0 0 0 0 0 0 0
45423- 0 0 0 0 0 0 0 0 0 0 0 0
45424- 0 0 0 0 0 0 0 0 0 0 0 0
45425- 0 0 0 0 0 0 0 0 0 0 0 0
45426- 0 0 0 0 0 0 0 0 0 0 0 0
45427- 0 0 0 0 0 0 0 0 0 0 0 0
45428- 0 0 0 0 0 0 0 0 0 0 0 0
45429- 0 0 0 0 0 0 0 0 0 0 0 0
45430- 0 0 0 0 0 0 0 0 0 0 0 0
45431- 0 0 0 0 0 0 0 0 0 0 0 0
45432- 0 0 0 0 0 0 0 0 0 0 0 0
45433- 0 0 0 0 0 0 0 0 0 0 0 0
45434- 0 0 0 0 0 0 0 0 0 10 10 10
45435- 38 38 38 86 86 86 14 14 14 10 10 10
45436-195 195 195 188 164 115 192 133 9 225 175 15
45437-239 182 13 234 190 10 232 195 16 232 200 30
45438-245 207 45 241 208 19 232 195 16 184 144 12
45439-218 194 134 211 206 186 42 42 42 2 2 6
45440- 2 2 6 2 2 6 2 2 6 2 2 6
45441- 50 50 50 74 74 74 30 30 30 6 6 6
45442- 0 0 0 0 0 0 0 0 0 0 0 0
45443- 0 0 0 0 0 0 0 0 0 0 0 0
45444- 0 0 0 0 0 0 0 0 0 0 0 0
45445- 0 0 0 0 0 0 0 0 0 0 0 0
45446- 0 0 0 0 0 0 0 0 0 0 0 0
45447- 0 0 0 0 0 0 0 0 0 0 0 0
45448- 0 0 0 0 0 0 0 0 0 0 0 0
45449- 0 0 0 0 0 0 0 0 0 0 0 0
45450- 0 0 0 0 0 0 0 0 0 0 0 0
45451- 0 0 0 0 0 0 0 0 0 0 0 0
45452- 0 0 0 0 0 0 0 0 0 0 0 0
45453- 0 0 0 0 0 0 0 0 0 0 0 0
45454- 0 0 0 0 0 0 0 0 0 10 10 10
45455- 34 34 34 86 86 86 14 14 14 2 2 6
45456-121 87 25 192 133 9 219 162 10 239 182 13
45457-236 186 11 232 195 16 241 208 19 244 214 54
45458-246 218 60 246 218 38 246 215 20 241 208 19
45459-241 208 19 226 184 13 121 87 25 2 2 6
45460- 2 2 6 2 2 6 2 2 6 2 2 6
45461- 50 50 50 82 82 82 34 34 34 10 10 10
45462- 0 0 0 0 0 0 0 0 0 0 0 0
45463- 0 0 0 0 0 0 0 0 0 0 0 0
45464- 0 0 0 0 0 0 0 0 0 0 0 0
45465- 0 0 0 0 0 0 0 0 0 0 0 0
45466- 0 0 0 0 0 0 0 0 0 0 0 0
45467- 0 0 0 0 0 0 0 0 0 0 0 0
45468- 0 0 0 0 0 0 0 0 0 0 0 0
45469- 0 0 0 0 0 0 0 0 0 0 0 0
45470- 0 0 0 0 0 0 0 0 0 0 0 0
45471- 0 0 0 0 0 0 0 0 0 0 0 0
45472- 0 0 0 0 0 0 0 0 0 0 0 0
45473- 0 0 0 0 0 0 0 0 0 0 0 0
45474- 0 0 0 0 0 0 0 0 0 10 10 10
45475- 34 34 34 82 82 82 30 30 30 61 42 6
45476-180 123 7 206 145 10 230 174 11 239 182 13
45477-234 190 10 238 202 15 241 208 19 246 218 74
45478-246 218 38 246 215 20 246 215 20 246 215 20
45479-226 184 13 215 174 15 184 144 12 6 6 6
45480- 2 2 6 2 2 6 2 2 6 2 2 6
45481- 26 26 26 94 94 94 42 42 42 14 14 14
45482- 0 0 0 0 0 0 0 0 0 0 0 0
45483- 0 0 0 0 0 0 0 0 0 0 0 0
45484- 0 0 0 0 0 0 0 0 0 0 0 0
45485- 0 0 0 0 0 0 0 0 0 0 0 0
45486- 0 0 0 0 0 0 0 0 0 0 0 0
45487- 0 0 0 0 0 0 0 0 0 0 0 0
45488- 0 0 0 0 0 0 0 0 0 0 0 0
45489- 0 0 0 0 0 0 0 0 0 0 0 0
45490- 0 0 0 0 0 0 0 0 0 0 0 0
45491- 0 0 0 0 0 0 0 0 0 0 0 0
45492- 0 0 0 0 0 0 0 0 0 0 0 0
45493- 0 0 0 0 0 0 0 0 0 0 0 0
45494- 0 0 0 0 0 0 0 0 0 10 10 10
45495- 30 30 30 78 78 78 50 50 50 104 69 6
45496-192 133 9 216 158 10 236 178 12 236 186 11
45497-232 195 16 241 208 19 244 214 54 245 215 43
45498-246 215 20 246 215 20 241 208 19 198 155 10
45499-200 144 11 216 158 10 156 118 10 2 2 6
45500- 2 2 6 2 2 6 2 2 6 2 2 6
45501- 6 6 6 90 90 90 54 54 54 18 18 18
45502- 6 6 6 0 0 0 0 0 0 0 0 0
45503- 0 0 0 0 0 0 0 0 0 0 0 0
45504- 0 0 0 0 0 0 0 0 0 0 0 0
45505- 0 0 0 0 0 0 0 0 0 0 0 0
45506- 0 0 0 0 0 0 0 0 0 0 0 0
45507- 0 0 0 0 0 0 0 0 0 0 0 0
45508- 0 0 0 0 0 0 0 0 0 0 0 0
45509- 0 0 0 0 0 0 0 0 0 0 0 0
45510- 0 0 0 0 0 0 0 0 0 0 0 0
45511- 0 0 0 0 0 0 0 0 0 0 0 0
45512- 0 0 0 0 0 0 0 0 0 0 0 0
45513- 0 0 0 0 0 0 0 0 0 0 0 0
45514- 0 0 0 0 0 0 0 0 0 10 10 10
45515- 30 30 30 78 78 78 46 46 46 22 22 22
45516-137 92 6 210 162 10 239 182 13 238 190 10
45517-238 202 15 241 208 19 246 215 20 246 215 20
45518-241 208 19 203 166 17 185 133 11 210 150 10
45519-216 158 10 210 150 10 102 78 10 2 2 6
45520- 6 6 6 54 54 54 14 14 14 2 2 6
45521- 2 2 6 62 62 62 74 74 74 30 30 30
45522- 10 10 10 0 0 0 0 0 0 0 0 0
45523- 0 0 0 0 0 0 0 0 0 0 0 0
45524- 0 0 0 0 0 0 0 0 0 0 0 0
45525- 0 0 0 0 0 0 0 0 0 0 0 0
45526- 0 0 0 0 0 0 0 0 0 0 0 0
45527- 0 0 0 0 0 0 0 0 0 0 0 0
45528- 0 0 0 0 0 0 0 0 0 0 0 0
45529- 0 0 0 0 0 0 0 0 0 0 0 0
45530- 0 0 0 0 0 0 0 0 0 0 0 0
45531- 0 0 0 0 0 0 0 0 0 0 0 0
45532- 0 0 0 0 0 0 0 0 0 0 0 0
45533- 0 0 0 0 0 0 0 0 0 0 0 0
45534- 0 0 0 0 0 0 0 0 0 10 10 10
45535- 34 34 34 78 78 78 50 50 50 6 6 6
45536- 94 70 30 139 102 15 190 146 13 226 184 13
45537-232 200 30 232 195 16 215 174 15 190 146 13
45538-168 122 10 192 133 9 210 150 10 213 154 11
45539-202 150 34 182 157 106 101 98 89 2 2 6
45540- 2 2 6 78 78 78 116 116 116 58 58 58
45541- 2 2 6 22 22 22 90 90 90 46 46 46
45542- 18 18 18 6 6 6 0 0 0 0 0 0
45543- 0 0 0 0 0 0 0 0 0 0 0 0
45544- 0 0 0 0 0 0 0 0 0 0 0 0
45545- 0 0 0 0 0 0 0 0 0 0 0 0
45546- 0 0 0 0 0 0 0 0 0 0 0 0
45547- 0 0 0 0 0 0 0 0 0 0 0 0
45548- 0 0 0 0 0 0 0 0 0 0 0 0
45549- 0 0 0 0 0 0 0 0 0 0 0 0
45550- 0 0 0 0 0 0 0 0 0 0 0 0
45551- 0 0 0 0 0 0 0 0 0 0 0 0
45552- 0 0 0 0 0 0 0 0 0 0 0 0
45553- 0 0 0 0 0 0 0 0 0 0 0 0
45554- 0 0 0 0 0 0 0 0 0 10 10 10
45555- 38 38 38 86 86 86 50 50 50 6 6 6
45556-128 128 128 174 154 114 156 107 11 168 122 10
45557-198 155 10 184 144 12 197 138 11 200 144 11
45558-206 145 10 206 145 10 197 138 11 188 164 115
45559-195 195 195 198 198 198 174 174 174 14 14 14
45560- 2 2 6 22 22 22 116 116 116 116 116 116
45561- 22 22 22 2 2 6 74 74 74 70 70 70
45562- 30 30 30 10 10 10 0 0 0 0 0 0
45563- 0 0 0 0 0 0 0 0 0 0 0 0
45564- 0 0 0 0 0 0 0 0 0 0 0 0
45565- 0 0 0 0 0 0 0 0 0 0 0 0
45566- 0 0 0 0 0 0 0 0 0 0 0 0
45567- 0 0 0 0 0 0 0 0 0 0 0 0
45568- 0 0 0 0 0 0 0 0 0 0 0 0
45569- 0 0 0 0 0 0 0 0 0 0 0 0
45570- 0 0 0 0 0 0 0 0 0 0 0 0
45571- 0 0 0 0 0 0 0 0 0 0 0 0
45572- 0 0 0 0 0 0 0 0 0 0 0 0
45573- 0 0 0 0 0 0 0 0 0 0 0 0
45574- 0 0 0 0 0 0 6 6 6 18 18 18
45575- 50 50 50 101 101 101 26 26 26 10 10 10
45576-138 138 138 190 190 190 174 154 114 156 107 11
45577-197 138 11 200 144 11 197 138 11 192 133 9
45578-180 123 7 190 142 34 190 178 144 187 187 187
45579-202 202 202 221 221 221 214 214 214 66 66 66
45580- 2 2 6 2 2 6 50 50 50 62 62 62
45581- 6 6 6 2 2 6 10 10 10 90 90 90
45582- 50 50 50 18 18 18 6 6 6 0 0 0
45583- 0 0 0 0 0 0 0 0 0 0 0 0
45584- 0 0 0 0 0 0 0 0 0 0 0 0
45585- 0 0 0 0 0 0 0 0 0 0 0 0
45586- 0 0 0 0 0 0 0 0 0 0 0 0
45587- 0 0 0 0 0 0 0 0 0 0 0 0
45588- 0 0 0 0 0 0 0 0 0 0 0 0
45589- 0 0 0 0 0 0 0 0 0 0 0 0
45590- 0 0 0 0 0 0 0 0 0 0 0 0
45591- 0 0 0 0 0 0 0 0 0 0 0 0
45592- 0 0 0 0 0 0 0 0 0 0 0 0
45593- 0 0 0 0 0 0 0 0 0 0 0 0
45594- 0 0 0 0 0 0 10 10 10 34 34 34
45595- 74 74 74 74 74 74 2 2 6 6 6 6
45596-144 144 144 198 198 198 190 190 190 178 166 146
45597-154 121 60 156 107 11 156 107 11 168 124 44
45598-174 154 114 187 187 187 190 190 190 210 210 210
45599-246 246 246 253 253 253 253 253 253 182 182 182
45600- 6 6 6 2 2 6 2 2 6 2 2 6
45601- 2 2 6 2 2 6 2 2 6 62 62 62
45602- 74 74 74 34 34 34 14 14 14 0 0 0
45603- 0 0 0 0 0 0 0 0 0 0 0 0
45604- 0 0 0 0 0 0 0 0 0 0 0 0
45605- 0 0 0 0 0 0 0 0 0 0 0 0
45606- 0 0 0 0 0 0 0 0 0 0 0 0
45607- 0 0 0 0 0 0 0 0 0 0 0 0
45608- 0 0 0 0 0 0 0 0 0 0 0 0
45609- 0 0 0 0 0 0 0 0 0 0 0 0
45610- 0 0 0 0 0 0 0 0 0 0 0 0
45611- 0 0 0 0 0 0 0 0 0 0 0 0
45612- 0 0 0 0 0 0 0 0 0 0 0 0
45613- 0 0 0 0 0 0 0 0 0 0 0 0
45614- 0 0 0 10 10 10 22 22 22 54 54 54
45615- 94 94 94 18 18 18 2 2 6 46 46 46
45616-234 234 234 221 221 221 190 190 190 190 190 190
45617-190 190 190 187 187 187 187 187 187 190 190 190
45618-190 190 190 195 195 195 214 214 214 242 242 242
45619-253 253 253 253 253 253 253 253 253 253 253 253
45620- 82 82 82 2 2 6 2 2 6 2 2 6
45621- 2 2 6 2 2 6 2 2 6 14 14 14
45622- 86 86 86 54 54 54 22 22 22 6 6 6
45623- 0 0 0 0 0 0 0 0 0 0 0 0
45624- 0 0 0 0 0 0 0 0 0 0 0 0
45625- 0 0 0 0 0 0 0 0 0 0 0 0
45626- 0 0 0 0 0 0 0 0 0 0 0 0
45627- 0 0 0 0 0 0 0 0 0 0 0 0
45628- 0 0 0 0 0 0 0 0 0 0 0 0
45629- 0 0 0 0 0 0 0 0 0 0 0 0
45630- 0 0 0 0 0 0 0 0 0 0 0 0
45631- 0 0 0 0 0 0 0 0 0 0 0 0
45632- 0 0 0 0 0 0 0 0 0 0 0 0
45633- 0 0 0 0 0 0 0 0 0 0 0 0
45634- 6 6 6 18 18 18 46 46 46 90 90 90
45635- 46 46 46 18 18 18 6 6 6 182 182 182
45636-253 253 253 246 246 246 206 206 206 190 190 190
45637-190 190 190 190 190 190 190 190 190 190 190 190
45638-206 206 206 231 231 231 250 250 250 253 253 253
45639-253 253 253 253 253 253 253 253 253 253 253 253
45640-202 202 202 14 14 14 2 2 6 2 2 6
45641- 2 2 6 2 2 6 2 2 6 2 2 6
45642- 42 42 42 86 86 86 42 42 42 18 18 18
45643- 6 6 6 0 0 0 0 0 0 0 0 0
45644- 0 0 0 0 0 0 0 0 0 0 0 0
45645- 0 0 0 0 0 0 0 0 0 0 0 0
45646- 0 0 0 0 0 0 0 0 0 0 0 0
45647- 0 0 0 0 0 0 0 0 0 0 0 0
45648- 0 0 0 0 0 0 0 0 0 0 0 0
45649- 0 0 0 0 0 0 0 0 0 0 0 0
45650- 0 0 0 0 0 0 0 0 0 0 0 0
45651- 0 0 0 0 0 0 0 0 0 0 0 0
45652- 0 0 0 0 0 0 0 0 0 0 0 0
45653- 0 0 0 0 0 0 0 0 0 6 6 6
45654- 14 14 14 38 38 38 74 74 74 66 66 66
45655- 2 2 6 6 6 6 90 90 90 250 250 250
45656-253 253 253 253 253 253 238 238 238 198 198 198
45657-190 190 190 190 190 190 195 195 195 221 221 221
45658-246 246 246 253 253 253 253 253 253 253 253 253
45659-253 253 253 253 253 253 253 253 253 253 253 253
45660-253 253 253 82 82 82 2 2 6 2 2 6
45661- 2 2 6 2 2 6 2 2 6 2 2 6
45662- 2 2 6 78 78 78 70 70 70 34 34 34
45663- 14 14 14 6 6 6 0 0 0 0 0 0
45664- 0 0 0 0 0 0 0 0 0 0 0 0
45665- 0 0 0 0 0 0 0 0 0 0 0 0
45666- 0 0 0 0 0 0 0 0 0 0 0 0
45667- 0 0 0 0 0 0 0 0 0 0 0 0
45668- 0 0 0 0 0 0 0 0 0 0 0 0
45669- 0 0 0 0 0 0 0 0 0 0 0 0
45670- 0 0 0 0 0 0 0 0 0 0 0 0
45671- 0 0 0 0 0 0 0 0 0 0 0 0
45672- 0 0 0 0 0 0 0 0 0 0 0 0
45673- 0 0 0 0 0 0 0 0 0 14 14 14
45674- 34 34 34 66 66 66 78 78 78 6 6 6
45675- 2 2 6 18 18 18 218 218 218 253 253 253
45676-253 253 253 253 253 253 253 253 253 246 246 246
45677-226 226 226 231 231 231 246 246 246 253 253 253
45678-253 253 253 253 253 253 253 253 253 253 253 253
45679-253 253 253 253 253 253 253 253 253 253 253 253
45680-253 253 253 178 178 178 2 2 6 2 2 6
45681- 2 2 6 2 2 6 2 2 6 2 2 6
45682- 2 2 6 18 18 18 90 90 90 62 62 62
45683- 30 30 30 10 10 10 0 0 0 0 0 0
45684- 0 0 0 0 0 0 0 0 0 0 0 0
45685- 0 0 0 0 0 0 0 0 0 0 0 0
45686- 0 0 0 0 0 0 0 0 0 0 0 0
45687- 0 0 0 0 0 0 0 0 0 0 0 0
45688- 0 0 0 0 0 0 0 0 0 0 0 0
45689- 0 0 0 0 0 0 0 0 0 0 0 0
45690- 0 0 0 0 0 0 0 0 0 0 0 0
45691- 0 0 0 0 0 0 0 0 0 0 0 0
45692- 0 0 0 0 0 0 0 0 0 0 0 0
45693- 0 0 0 0 0 0 10 10 10 26 26 26
45694- 58 58 58 90 90 90 18 18 18 2 2 6
45695- 2 2 6 110 110 110 253 253 253 253 253 253
45696-253 253 253 253 253 253 253 253 253 253 253 253
45697-250 250 250 253 253 253 253 253 253 253 253 253
45698-253 253 253 253 253 253 253 253 253 253 253 253
45699-253 253 253 253 253 253 253 253 253 253 253 253
45700-253 253 253 231 231 231 18 18 18 2 2 6
45701- 2 2 6 2 2 6 2 2 6 2 2 6
45702- 2 2 6 2 2 6 18 18 18 94 94 94
45703- 54 54 54 26 26 26 10 10 10 0 0 0
45704- 0 0 0 0 0 0 0 0 0 0 0 0
45705- 0 0 0 0 0 0 0 0 0 0 0 0
45706- 0 0 0 0 0 0 0 0 0 0 0 0
45707- 0 0 0 0 0 0 0 0 0 0 0 0
45708- 0 0 0 0 0 0 0 0 0 0 0 0
45709- 0 0 0 0 0 0 0 0 0 0 0 0
45710- 0 0 0 0 0 0 0 0 0 0 0 0
45711- 0 0 0 0 0 0 0 0 0 0 0 0
45712- 0 0 0 0 0 0 0 0 0 0 0 0
45713- 0 0 0 6 6 6 22 22 22 50 50 50
45714- 90 90 90 26 26 26 2 2 6 2 2 6
45715- 14 14 14 195 195 195 250 250 250 253 253 253
45716-253 253 253 253 253 253 253 253 253 253 253 253
45717-253 253 253 253 253 253 253 253 253 253 253 253
45718-253 253 253 253 253 253 253 253 253 253 253 253
45719-253 253 253 253 253 253 253 253 253 253 253 253
45720-250 250 250 242 242 242 54 54 54 2 2 6
45721- 2 2 6 2 2 6 2 2 6 2 2 6
45722- 2 2 6 2 2 6 2 2 6 38 38 38
45723- 86 86 86 50 50 50 22 22 22 6 6 6
45724- 0 0 0 0 0 0 0 0 0 0 0 0
45725- 0 0 0 0 0 0 0 0 0 0 0 0
45726- 0 0 0 0 0 0 0 0 0 0 0 0
45727- 0 0 0 0 0 0 0 0 0 0 0 0
45728- 0 0 0 0 0 0 0 0 0 0 0 0
45729- 0 0 0 0 0 0 0 0 0 0 0 0
45730- 0 0 0 0 0 0 0 0 0 0 0 0
45731- 0 0 0 0 0 0 0 0 0 0 0 0
45732- 0 0 0 0 0 0 0 0 0 0 0 0
45733- 6 6 6 14 14 14 38 38 38 82 82 82
45734- 34 34 34 2 2 6 2 2 6 2 2 6
45735- 42 42 42 195 195 195 246 246 246 253 253 253
45736-253 253 253 253 253 253 253 253 253 250 250 250
45737-242 242 242 242 242 242 250 250 250 253 253 253
45738-253 253 253 253 253 253 253 253 253 253 253 253
45739-253 253 253 250 250 250 246 246 246 238 238 238
45740-226 226 226 231 231 231 101 101 101 6 6 6
45741- 2 2 6 2 2 6 2 2 6 2 2 6
45742- 2 2 6 2 2 6 2 2 6 2 2 6
45743- 38 38 38 82 82 82 42 42 42 14 14 14
45744- 6 6 6 0 0 0 0 0 0 0 0 0
45745- 0 0 0 0 0 0 0 0 0 0 0 0
45746- 0 0 0 0 0 0 0 0 0 0 0 0
45747- 0 0 0 0 0 0 0 0 0 0 0 0
45748- 0 0 0 0 0 0 0 0 0 0 0 0
45749- 0 0 0 0 0 0 0 0 0 0 0 0
45750- 0 0 0 0 0 0 0 0 0 0 0 0
45751- 0 0 0 0 0 0 0 0 0 0 0 0
45752- 0 0 0 0 0 0 0 0 0 0 0 0
45753- 10 10 10 26 26 26 62 62 62 66 66 66
45754- 2 2 6 2 2 6 2 2 6 6 6 6
45755- 70 70 70 170 170 170 206 206 206 234 234 234
45756-246 246 246 250 250 250 250 250 250 238 238 238
45757-226 226 226 231 231 231 238 238 238 250 250 250
45758-250 250 250 250 250 250 246 246 246 231 231 231
45759-214 214 214 206 206 206 202 202 202 202 202 202
45760-198 198 198 202 202 202 182 182 182 18 18 18
45761- 2 2 6 2 2 6 2 2 6 2 2 6
45762- 2 2 6 2 2 6 2 2 6 2 2 6
45763- 2 2 6 62 62 62 66 66 66 30 30 30
45764- 10 10 10 0 0 0 0 0 0 0 0 0
45765- 0 0 0 0 0 0 0 0 0 0 0 0
45766- 0 0 0 0 0 0 0 0 0 0 0 0
45767- 0 0 0 0 0 0 0 0 0 0 0 0
45768- 0 0 0 0 0 0 0 0 0 0 0 0
45769- 0 0 0 0 0 0 0 0 0 0 0 0
45770- 0 0 0 0 0 0 0 0 0 0 0 0
45771- 0 0 0 0 0 0 0 0 0 0 0 0
45772- 0 0 0 0 0 0 0 0 0 0 0 0
45773- 14 14 14 42 42 42 82 82 82 18 18 18
45774- 2 2 6 2 2 6 2 2 6 10 10 10
45775- 94 94 94 182 182 182 218 218 218 242 242 242
45776-250 250 250 253 253 253 253 253 253 250 250 250
45777-234 234 234 253 253 253 253 253 253 253 253 253
45778-253 253 253 253 253 253 253 253 253 246 246 246
45779-238 238 238 226 226 226 210 210 210 202 202 202
45780-195 195 195 195 195 195 210 210 210 158 158 158
45781- 6 6 6 14 14 14 50 50 50 14 14 14
45782- 2 2 6 2 2 6 2 2 6 2 2 6
45783- 2 2 6 6 6 6 86 86 86 46 46 46
45784- 18 18 18 6 6 6 0 0 0 0 0 0
45785- 0 0 0 0 0 0 0 0 0 0 0 0
45786- 0 0 0 0 0 0 0 0 0 0 0 0
45787- 0 0 0 0 0 0 0 0 0 0 0 0
45788- 0 0 0 0 0 0 0 0 0 0 0 0
45789- 0 0 0 0 0 0 0 0 0 0 0 0
45790- 0 0 0 0 0 0 0 0 0 0 0 0
45791- 0 0 0 0 0 0 0 0 0 0 0 0
45792- 0 0 0 0 0 0 0 0 0 6 6 6
45793- 22 22 22 54 54 54 70 70 70 2 2 6
45794- 2 2 6 10 10 10 2 2 6 22 22 22
45795-166 166 166 231 231 231 250 250 250 253 253 253
45796-253 253 253 253 253 253 253 253 253 250 250 250
45797-242 242 242 253 253 253 253 253 253 253 253 253
45798-253 253 253 253 253 253 253 253 253 253 253 253
45799-253 253 253 253 253 253 253 253 253 246 246 246
45800-231 231 231 206 206 206 198 198 198 226 226 226
45801- 94 94 94 2 2 6 6 6 6 38 38 38
45802- 30 30 30 2 2 6 2 2 6 2 2 6
45803- 2 2 6 2 2 6 62 62 62 66 66 66
45804- 26 26 26 10 10 10 0 0 0 0 0 0
45805- 0 0 0 0 0 0 0 0 0 0 0 0
45806- 0 0 0 0 0 0 0 0 0 0 0 0
45807- 0 0 0 0 0 0 0 0 0 0 0 0
45808- 0 0 0 0 0 0 0 0 0 0 0 0
45809- 0 0 0 0 0 0 0 0 0 0 0 0
45810- 0 0 0 0 0 0 0 0 0 0 0 0
45811- 0 0 0 0 0 0 0 0 0 0 0 0
45812- 0 0 0 0 0 0 0 0 0 10 10 10
45813- 30 30 30 74 74 74 50 50 50 2 2 6
45814- 26 26 26 26 26 26 2 2 6 106 106 106
45815-238 238 238 253 253 253 253 253 253 253 253 253
45816-253 253 253 253 253 253 253 253 253 253 253 253
45817-253 253 253 253 253 253 253 253 253 253 253 253
45818-253 253 253 253 253 253 253 253 253 253 253 253
45819-253 253 253 253 253 253 253 253 253 253 253 253
45820-253 253 253 246 246 246 218 218 218 202 202 202
45821-210 210 210 14 14 14 2 2 6 2 2 6
45822- 30 30 30 22 22 22 2 2 6 2 2 6
45823- 2 2 6 2 2 6 18 18 18 86 86 86
45824- 42 42 42 14 14 14 0 0 0 0 0 0
45825- 0 0 0 0 0 0 0 0 0 0 0 0
45826- 0 0 0 0 0 0 0 0 0 0 0 0
45827- 0 0 0 0 0 0 0 0 0 0 0 0
45828- 0 0 0 0 0 0 0 0 0 0 0 0
45829- 0 0 0 0 0 0 0 0 0 0 0 0
45830- 0 0 0 0 0 0 0 0 0 0 0 0
45831- 0 0 0 0 0 0 0 0 0 0 0 0
45832- 0 0 0 0 0 0 0 0 0 14 14 14
45833- 42 42 42 90 90 90 22 22 22 2 2 6
45834- 42 42 42 2 2 6 18 18 18 218 218 218
45835-253 253 253 253 253 253 253 253 253 253 253 253
45836-253 253 253 253 253 253 253 253 253 253 253 253
45837-253 253 253 253 253 253 253 253 253 253 253 253
45838-253 253 253 253 253 253 253 253 253 253 253 253
45839-253 253 253 253 253 253 253 253 253 253 253 253
45840-253 253 253 253 253 253 250 250 250 221 221 221
45841-218 218 218 101 101 101 2 2 6 14 14 14
45842- 18 18 18 38 38 38 10 10 10 2 2 6
45843- 2 2 6 2 2 6 2 2 6 78 78 78
45844- 58 58 58 22 22 22 6 6 6 0 0 0
45845- 0 0 0 0 0 0 0 0 0 0 0 0
45846- 0 0 0 0 0 0 0 0 0 0 0 0
45847- 0 0 0 0 0 0 0 0 0 0 0 0
45848- 0 0 0 0 0 0 0 0 0 0 0 0
45849- 0 0 0 0 0 0 0 0 0 0 0 0
45850- 0 0 0 0 0 0 0 0 0 0 0 0
45851- 0 0 0 0 0 0 0 0 0 0 0 0
45852- 0 0 0 0 0 0 6 6 6 18 18 18
45853- 54 54 54 82 82 82 2 2 6 26 26 26
45854- 22 22 22 2 2 6 123 123 123 253 253 253
45855-253 253 253 253 253 253 253 253 253 253 253 253
45856-253 253 253 253 253 253 253 253 253 253 253 253
45857-253 253 253 253 253 253 253 253 253 253 253 253
45858-253 253 253 253 253 253 253 253 253 253 253 253
45859-253 253 253 253 253 253 253 253 253 253 253 253
45860-253 253 253 253 253 253 253 253 253 250 250 250
45861-238 238 238 198 198 198 6 6 6 38 38 38
45862- 58 58 58 26 26 26 38 38 38 2 2 6
45863- 2 2 6 2 2 6 2 2 6 46 46 46
45864- 78 78 78 30 30 30 10 10 10 0 0 0
45865- 0 0 0 0 0 0 0 0 0 0 0 0
45866- 0 0 0 0 0 0 0 0 0 0 0 0
45867- 0 0 0 0 0 0 0 0 0 0 0 0
45868- 0 0 0 0 0 0 0 0 0 0 0 0
45869- 0 0 0 0 0 0 0 0 0 0 0 0
45870- 0 0 0 0 0 0 0 0 0 0 0 0
45871- 0 0 0 0 0 0 0 0 0 0 0 0
45872- 0 0 0 0 0 0 10 10 10 30 30 30
45873- 74 74 74 58 58 58 2 2 6 42 42 42
45874- 2 2 6 22 22 22 231 231 231 253 253 253
45875-253 253 253 253 253 253 253 253 253 253 253 253
45876-253 253 253 253 253 253 253 253 253 250 250 250
45877-253 253 253 253 253 253 253 253 253 253 253 253
45878-253 253 253 253 253 253 253 253 253 253 253 253
45879-253 253 253 253 253 253 253 253 253 253 253 253
45880-253 253 253 253 253 253 253 253 253 253 253 253
45881-253 253 253 246 246 246 46 46 46 38 38 38
45882- 42 42 42 14 14 14 38 38 38 14 14 14
45883- 2 2 6 2 2 6 2 2 6 6 6 6
45884- 86 86 86 46 46 46 14 14 14 0 0 0
45885- 0 0 0 0 0 0 0 0 0 0 0 0
45886- 0 0 0 0 0 0 0 0 0 0 0 0
45887- 0 0 0 0 0 0 0 0 0 0 0 0
45888- 0 0 0 0 0 0 0 0 0 0 0 0
45889- 0 0 0 0 0 0 0 0 0 0 0 0
45890- 0 0 0 0 0 0 0 0 0 0 0 0
45891- 0 0 0 0 0 0 0 0 0 0 0 0
45892- 0 0 0 6 6 6 14 14 14 42 42 42
45893- 90 90 90 18 18 18 18 18 18 26 26 26
45894- 2 2 6 116 116 116 253 253 253 253 253 253
45895-253 253 253 253 253 253 253 253 253 253 253 253
45896-253 253 253 253 253 253 250 250 250 238 238 238
45897-253 253 253 253 253 253 253 253 253 253 253 253
45898-253 253 253 253 253 253 253 253 253 253 253 253
45899-253 253 253 253 253 253 253 253 253 253 253 253
45900-253 253 253 253 253 253 253 253 253 253 253 253
45901-253 253 253 253 253 253 94 94 94 6 6 6
45902- 2 2 6 2 2 6 10 10 10 34 34 34
45903- 2 2 6 2 2 6 2 2 6 2 2 6
45904- 74 74 74 58 58 58 22 22 22 6 6 6
45905- 0 0 0 0 0 0 0 0 0 0 0 0
45906- 0 0 0 0 0 0 0 0 0 0 0 0
45907- 0 0 0 0 0 0 0 0 0 0 0 0
45908- 0 0 0 0 0 0 0 0 0 0 0 0
45909- 0 0 0 0 0 0 0 0 0 0 0 0
45910- 0 0 0 0 0 0 0 0 0 0 0 0
45911- 0 0 0 0 0 0 0 0 0 0 0 0
45912- 0 0 0 10 10 10 26 26 26 66 66 66
45913- 82 82 82 2 2 6 38 38 38 6 6 6
45914- 14 14 14 210 210 210 253 253 253 253 253 253
45915-253 253 253 253 253 253 253 253 253 253 253 253
45916-253 253 253 253 253 253 246 246 246 242 242 242
45917-253 253 253 253 253 253 253 253 253 253 253 253
45918-253 253 253 253 253 253 253 253 253 253 253 253
45919-253 253 253 253 253 253 253 253 253 253 253 253
45920-253 253 253 253 253 253 253 253 253 253 253 253
45921-253 253 253 253 253 253 144 144 144 2 2 6
45922- 2 2 6 2 2 6 2 2 6 46 46 46
45923- 2 2 6 2 2 6 2 2 6 2 2 6
45924- 42 42 42 74 74 74 30 30 30 10 10 10
45925- 0 0 0 0 0 0 0 0 0 0 0 0
45926- 0 0 0 0 0 0 0 0 0 0 0 0
45927- 0 0 0 0 0 0 0 0 0 0 0 0
45928- 0 0 0 0 0 0 0 0 0 0 0 0
45929- 0 0 0 0 0 0 0 0 0 0 0 0
45930- 0 0 0 0 0 0 0 0 0 0 0 0
45931- 0 0 0 0 0 0 0 0 0 0 0 0
45932- 6 6 6 14 14 14 42 42 42 90 90 90
45933- 26 26 26 6 6 6 42 42 42 2 2 6
45934- 74 74 74 250 250 250 253 253 253 253 253 253
45935-253 253 253 253 253 253 253 253 253 253 253 253
45936-253 253 253 253 253 253 242 242 242 242 242 242
45937-253 253 253 253 253 253 253 253 253 253 253 253
45938-253 253 253 253 253 253 253 253 253 253 253 253
45939-253 253 253 253 253 253 253 253 253 253 253 253
45940-253 253 253 253 253 253 253 253 253 253 253 253
45941-253 253 253 253 253 253 182 182 182 2 2 6
45942- 2 2 6 2 2 6 2 2 6 46 46 46
45943- 2 2 6 2 2 6 2 2 6 2 2 6
45944- 10 10 10 86 86 86 38 38 38 10 10 10
45945- 0 0 0 0 0 0 0 0 0 0 0 0
45946- 0 0 0 0 0 0 0 0 0 0 0 0
45947- 0 0 0 0 0 0 0 0 0 0 0 0
45948- 0 0 0 0 0 0 0 0 0 0 0 0
45949- 0 0 0 0 0 0 0 0 0 0 0 0
45950- 0 0 0 0 0 0 0 0 0 0 0 0
45951- 0 0 0 0 0 0 0 0 0 0 0 0
45952- 10 10 10 26 26 26 66 66 66 82 82 82
45953- 2 2 6 22 22 22 18 18 18 2 2 6
45954-149 149 149 253 253 253 253 253 253 253 253 253
45955-253 253 253 253 253 253 253 253 253 253 253 253
45956-253 253 253 253 253 253 234 234 234 242 242 242
45957-253 253 253 253 253 253 253 253 253 253 253 253
45958-253 253 253 253 253 253 253 253 253 253 253 253
45959-253 253 253 253 253 253 253 253 253 253 253 253
45960-253 253 253 253 253 253 253 253 253 253 253 253
45961-253 253 253 253 253 253 206 206 206 2 2 6
45962- 2 2 6 2 2 6 2 2 6 38 38 38
45963- 2 2 6 2 2 6 2 2 6 2 2 6
45964- 6 6 6 86 86 86 46 46 46 14 14 14
45965- 0 0 0 0 0 0 0 0 0 0 0 0
45966- 0 0 0 0 0 0 0 0 0 0 0 0
45967- 0 0 0 0 0 0 0 0 0 0 0 0
45968- 0 0 0 0 0 0 0 0 0 0 0 0
45969- 0 0 0 0 0 0 0 0 0 0 0 0
45970- 0 0 0 0 0 0 0 0 0 0 0 0
45971- 0 0 0 0 0 0 0 0 0 6 6 6
45972- 18 18 18 46 46 46 86 86 86 18 18 18
45973- 2 2 6 34 34 34 10 10 10 6 6 6
45974-210 210 210 253 253 253 253 253 253 253 253 253
45975-253 253 253 253 253 253 253 253 253 253 253 253
45976-253 253 253 253 253 253 234 234 234 242 242 242
45977-253 253 253 253 253 253 253 253 253 253 253 253
45978-253 253 253 253 253 253 253 253 253 253 253 253
45979-253 253 253 253 253 253 253 253 253 253 253 253
45980-253 253 253 253 253 253 253 253 253 253 253 253
45981-253 253 253 253 253 253 221 221 221 6 6 6
45982- 2 2 6 2 2 6 6 6 6 30 30 30
45983- 2 2 6 2 2 6 2 2 6 2 2 6
45984- 2 2 6 82 82 82 54 54 54 18 18 18
45985- 6 6 6 0 0 0 0 0 0 0 0 0
45986- 0 0 0 0 0 0 0 0 0 0 0 0
45987- 0 0 0 0 0 0 0 0 0 0 0 0
45988- 0 0 0 0 0 0 0 0 0 0 0 0
45989- 0 0 0 0 0 0 0 0 0 0 0 0
45990- 0 0 0 0 0 0 0 0 0 0 0 0
45991- 0 0 0 0 0 0 0 0 0 10 10 10
45992- 26 26 26 66 66 66 62 62 62 2 2 6
45993- 2 2 6 38 38 38 10 10 10 26 26 26
45994-238 238 238 253 253 253 253 253 253 253 253 253
45995-253 253 253 253 253 253 253 253 253 253 253 253
45996-253 253 253 253 253 253 231 231 231 238 238 238
45997-253 253 253 253 253 253 253 253 253 253 253 253
45998-253 253 253 253 253 253 253 253 253 253 253 253
45999-253 253 253 253 253 253 253 253 253 253 253 253
46000-253 253 253 253 253 253 253 253 253 253 253 253
46001-253 253 253 253 253 253 231 231 231 6 6 6
46002- 2 2 6 2 2 6 10 10 10 30 30 30
46003- 2 2 6 2 2 6 2 2 6 2 2 6
46004- 2 2 6 66 66 66 58 58 58 22 22 22
46005- 6 6 6 0 0 0 0 0 0 0 0 0
46006- 0 0 0 0 0 0 0 0 0 0 0 0
46007- 0 0 0 0 0 0 0 0 0 0 0 0
46008- 0 0 0 0 0 0 0 0 0 0 0 0
46009- 0 0 0 0 0 0 0 0 0 0 0 0
46010- 0 0 0 0 0 0 0 0 0 0 0 0
46011- 0 0 0 0 0 0 0 0 0 10 10 10
46012- 38 38 38 78 78 78 6 6 6 2 2 6
46013- 2 2 6 46 46 46 14 14 14 42 42 42
46014-246 246 246 253 253 253 253 253 253 253 253 253
46015-253 253 253 253 253 253 253 253 253 253 253 253
46016-253 253 253 253 253 253 231 231 231 242 242 242
46017-253 253 253 253 253 253 253 253 253 253 253 253
46018-253 253 253 253 253 253 253 253 253 253 253 253
46019-253 253 253 253 253 253 253 253 253 253 253 253
46020-253 253 253 253 253 253 253 253 253 253 253 253
46021-253 253 253 253 253 253 234 234 234 10 10 10
46022- 2 2 6 2 2 6 22 22 22 14 14 14
46023- 2 2 6 2 2 6 2 2 6 2 2 6
46024- 2 2 6 66 66 66 62 62 62 22 22 22
46025- 6 6 6 0 0 0 0 0 0 0 0 0
46026- 0 0 0 0 0 0 0 0 0 0 0 0
46027- 0 0 0 0 0 0 0 0 0 0 0 0
46028- 0 0 0 0 0 0 0 0 0 0 0 0
46029- 0 0 0 0 0 0 0 0 0 0 0 0
46030- 0 0 0 0 0 0 0 0 0 0 0 0
46031- 0 0 0 0 0 0 6 6 6 18 18 18
46032- 50 50 50 74 74 74 2 2 6 2 2 6
46033- 14 14 14 70 70 70 34 34 34 62 62 62
46034-250 250 250 253 253 253 253 253 253 253 253 253
46035-253 253 253 253 253 253 253 253 253 253 253 253
46036-253 253 253 253 253 253 231 231 231 246 246 246
46037-253 253 253 253 253 253 253 253 253 253 253 253
46038-253 253 253 253 253 253 253 253 253 253 253 253
46039-253 253 253 253 253 253 253 253 253 253 253 253
46040-253 253 253 253 253 253 253 253 253 253 253 253
46041-253 253 253 253 253 253 234 234 234 14 14 14
46042- 2 2 6 2 2 6 30 30 30 2 2 6
46043- 2 2 6 2 2 6 2 2 6 2 2 6
46044- 2 2 6 66 66 66 62 62 62 22 22 22
46045- 6 6 6 0 0 0 0 0 0 0 0 0
46046- 0 0 0 0 0 0 0 0 0 0 0 0
46047- 0 0 0 0 0 0 0 0 0 0 0 0
46048- 0 0 0 0 0 0 0 0 0 0 0 0
46049- 0 0 0 0 0 0 0 0 0 0 0 0
46050- 0 0 0 0 0 0 0 0 0 0 0 0
46051- 0 0 0 0 0 0 6 6 6 18 18 18
46052- 54 54 54 62 62 62 2 2 6 2 2 6
46053- 2 2 6 30 30 30 46 46 46 70 70 70
46054-250 250 250 253 253 253 253 253 253 253 253 253
46055-253 253 253 253 253 253 253 253 253 253 253 253
46056-253 253 253 253 253 253 231 231 231 246 246 246
46057-253 253 253 253 253 253 253 253 253 253 253 253
46058-253 253 253 253 253 253 253 253 253 253 253 253
46059-253 253 253 253 253 253 253 253 253 253 253 253
46060-253 253 253 253 253 253 253 253 253 253 253 253
46061-253 253 253 253 253 253 226 226 226 10 10 10
46062- 2 2 6 6 6 6 30 30 30 2 2 6
46063- 2 2 6 2 2 6 2 2 6 2 2 6
46064- 2 2 6 66 66 66 58 58 58 22 22 22
46065- 6 6 6 0 0 0 0 0 0 0 0 0
46066- 0 0 0 0 0 0 0 0 0 0 0 0
46067- 0 0 0 0 0 0 0 0 0 0 0 0
46068- 0 0 0 0 0 0 0 0 0 0 0 0
46069- 0 0 0 0 0 0 0 0 0 0 0 0
46070- 0 0 0 0 0 0 0 0 0 0 0 0
46071- 0 0 0 0 0 0 6 6 6 22 22 22
46072- 58 58 58 62 62 62 2 2 6 2 2 6
46073- 2 2 6 2 2 6 30 30 30 78 78 78
46074-250 250 250 253 253 253 253 253 253 253 253 253
46075-253 253 253 253 253 253 253 253 253 253 253 253
46076-253 253 253 253 253 253 231 231 231 246 246 246
46077-253 253 253 253 253 253 253 253 253 253 253 253
46078-253 253 253 253 253 253 253 253 253 253 253 253
46079-253 253 253 253 253 253 253 253 253 253 253 253
46080-253 253 253 253 253 253 253 253 253 253 253 253
46081-253 253 253 253 253 253 206 206 206 2 2 6
46082- 22 22 22 34 34 34 18 14 6 22 22 22
46083- 26 26 26 18 18 18 6 6 6 2 2 6
46084- 2 2 6 82 82 82 54 54 54 18 18 18
46085- 6 6 6 0 0 0 0 0 0 0 0 0
46086- 0 0 0 0 0 0 0 0 0 0 0 0
46087- 0 0 0 0 0 0 0 0 0 0 0 0
46088- 0 0 0 0 0 0 0 0 0 0 0 0
46089- 0 0 0 0 0 0 0 0 0 0 0 0
46090- 0 0 0 0 0 0 0 0 0 0 0 0
46091- 0 0 0 0 0 0 6 6 6 26 26 26
46092- 62 62 62 106 106 106 74 54 14 185 133 11
46093-210 162 10 121 92 8 6 6 6 62 62 62
46094-238 238 238 253 253 253 253 253 253 253 253 253
46095-253 253 253 253 253 253 253 253 253 253 253 253
46096-253 253 253 253 253 253 231 231 231 246 246 246
46097-253 253 253 253 253 253 253 253 253 253 253 253
46098-253 253 253 253 253 253 253 253 253 253 253 253
46099-253 253 253 253 253 253 253 253 253 253 253 253
46100-253 253 253 253 253 253 253 253 253 253 253 253
46101-253 253 253 253 253 253 158 158 158 18 18 18
46102- 14 14 14 2 2 6 2 2 6 2 2 6
46103- 6 6 6 18 18 18 66 66 66 38 38 38
46104- 6 6 6 94 94 94 50 50 50 18 18 18
46105- 6 6 6 0 0 0 0 0 0 0 0 0
46106- 0 0 0 0 0 0 0 0 0 0 0 0
46107- 0 0 0 0 0 0 0 0 0 0 0 0
46108- 0 0 0 0 0 0 0 0 0 0 0 0
46109- 0 0 0 0 0 0 0 0 0 0 0 0
46110- 0 0 0 0 0 0 0 0 0 6 6 6
46111- 10 10 10 10 10 10 18 18 18 38 38 38
46112- 78 78 78 142 134 106 216 158 10 242 186 14
46113-246 190 14 246 190 14 156 118 10 10 10 10
46114- 90 90 90 238 238 238 253 253 253 253 253 253
46115-253 253 253 253 253 253 253 253 253 253 253 253
46116-253 253 253 253 253 253 231 231 231 250 250 250
46117-253 253 253 253 253 253 253 253 253 253 253 253
46118-253 253 253 253 253 253 253 253 253 253 253 253
46119-253 253 253 253 253 253 253 253 253 253 253 253
46120-253 253 253 253 253 253 253 253 253 246 230 190
46121-238 204 91 238 204 91 181 142 44 37 26 9
46122- 2 2 6 2 2 6 2 2 6 2 2 6
46123- 2 2 6 2 2 6 38 38 38 46 46 46
46124- 26 26 26 106 106 106 54 54 54 18 18 18
46125- 6 6 6 0 0 0 0 0 0 0 0 0
46126- 0 0 0 0 0 0 0 0 0 0 0 0
46127- 0 0 0 0 0 0 0 0 0 0 0 0
46128- 0 0 0 0 0 0 0 0 0 0 0 0
46129- 0 0 0 0 0 0 0 0 0 0 0 0
46130- 0 0 0 6 6 6 14 14 14 22 22 22
46131- 30 30 30 38 38 38 50 50 50 70 70 70
46132-106 106 106 190 142 34 226 170 11 242 186 14
46133-246 190 14 246 190 14 246 190 14 154 114 10
46134- 6 6 6 74 74 74 226 226 226 253 253 253
46135-253 253 253 253 253 253 253 253 253 253 253 253
46136-253 253 253 253 253 253 231 231 231 250 250 250
46137-253 253 253 253 253 253 253 253 253 253 253 253
46138-253 253 253 253 253 253 253 253 253 253 253 253
46139-253 253 253 253 253 253 253 253 253 253 253 253
46140-253 253 253 253 253 253 253 253 253 228 184 62
46141-241 196 14 241 208 19 232 195 16 38 30 10
46142- 2 2 6 2 2 6 2 2 6 2 2 6
46143- 2 2 6 6 6 6 30 30 30 26 26 26
46144-203 166 17 154 142 90 66 66 66 26 26 26
46145- 6 6 6 0 0 0 0 0 0 0 0 0
46146- 0 0 0 0 0 0 0 0 0 0 0 0
46147- 0 0 0 0 0 0 0 0 0 0 0 0
46148- 0 0 0 0 0 0 0 0 0 0 0 0
46149- 0 0 0 0 0 0 0 0 0 0 0 0
46150- 6 6 6 18 18 18 38 38 38 58 58 58
46151- 78 78 78 86 86 86 101 101 101 123 123 123
46152-175 146 61 210 150 10 234 174 13 246 186 14
46153-246 190 14 246 190 14 246 190 14 238 190 10
46154-102 78 10 2 2 6 46 46 46 198 198 198
46155-253 253 253 253 253 253 253 253 253 253 253 253
46156-253 253 253 253 253 253 234 234 234 242 242 242
46157-253 253 253 253 253 253 253 253 253 253 253 253
46158-253 253 253 253 253 253 253 253 253 253 253 253
46159-253 253 253 253 253 253 253 253 253 253 253 253
46160-253 253 253 253 253 253 253 253 253 224 178 62
46161-242 186 14 241 196 14 210 166 10 22 18 6
46162- 2 2 6 2 2 6 2 2 6 2 2 6
46163- 2 2 6 2 2 6 6 6 6 121 92 8
46164-238 202 15 232 195 16 82 82 82 34 34 34
46165- 10 10 10 0 0 0 0 0 0 0 0 0
46166- 0 0 0 0 0 0 0 0 0 0 0 0
46167- 0 0 0 0 0 0 0 0 0 0 0 0
46168- 0 0 0 0 0 0 0 0 0 0 0 0
46169- 0 0 0 0 0 0 0 0 0 0 0 0
46170- 14 14 14 38 38 38 70 70 70 154 122 46
46171-190 142 34 200 144 11 197 138 11 197 138 11
46172-213 154 11 226 170 11 242 186 14 246 190 14
46173-246 190 14 246 190 14 246 190 14 246 190 14
46174-225 175 15 46 32 6 2 2 6 22 22 22
46175-158 158 158 250 250 250 253 253 253 253 253 253
46176-253 253 253 253 253 253 253 253 253 253 253 253
46177-253 253 253 253 253 253 253 253 253 253 253 253
46178-253 253 253 253 253 253 253 253 253 253 253 253
46179-253 253 253 253 253 253 253 253 253 253 253 253
46180-253 253 253 250 250 250 242 242 242 224 178 62
46181-239 182 13 236 186 11 213 154 11 46 32 6
46182- 2 2 6 2 2 6 2 2 6 2 2 6
46183- 2 2 6 2 2 6 61 42 6 225 175 15
46184-238 190 10 236 186 11 112 100 78 42 42 42
46185- 14 14 14 0 0 0 0 0 0 0 0 0
46186- 0 0 0 0 0 0 0 0 0 0 0 0
46187- 0 0 0 0 0 0 0 0 0 0 0 0
46188- 0 0 0 0 0 0 0 0 0 0 0 0
46189- 0 0 0 0 0 0 0 0 0 6 6 6
46190- 22 22 22 54 54 54 154 122 46 213 154 11
46191-226 170 11 230 174 11 226 170 11 226 170 11
46192-236 178 12 242 186 14 246 190 14 246 190 14
46193-246 190 14 246 190 14 246 190 14 246 190 14
46194-241 196 14 184 144 12 10 10 10 2 2 6
46195- 6 6 6 116 116 116 242 242 242 253 253 253
46196-253 253 253 253 253 253 253 253 253 253 253 253
46197-253 253 253 253 253 253 253 253 253 253 253 253
46198-253 253 253 253 253 253 253 253 253 253 253 253
46199-253 253 253 253 253 253 253 253 253 253 253 253
46200-253 253 253 231 231 231 198 198 198 214 170 54
46201-236 178 12 236 178 12 210 150 10 137 92 6
46202- 18 14 6 2 2 6 2 2 6 2 2 6
46203- 6 6 6 70 47 6 200 144 11 236 178 12
46204-239 182 13 239 182 13 124 112 88 58 58 58
46205- 22 22 22 6 6 6 0 0 0 0 0 0
46206- 0 0 0 0 0 0 0 0 0 0 0 0
46207- 0 0 0 0 0 0 0 0 0 0 0 0
46208- 0 0 0 0 0 0 0 0 0 0 0 0
46209- 0 0 0 0 0 0 0 0 0 10 10 10
46210- 30 30 30 70 70 70 180 133 36 226 170 11
46211-239 182 13 242 186 14 242 186 14 246 186 14
46212-246 190 14 246 190 14 246 190 14 246 190 14
46213-246 190 14 246 190 14 246 190 14 246 190 14
46214-246 190 14 232 195 16 98 70 6 2 2 6
46215- 2 2 6 2 2 6 66 66 66 221 221 221
46216-253 253 253 253 253 253 253 253 253 253 253 253
46217-253 253 253 253 253 253 253 253 253 253 253 253
46218-253 253 253 253 253 253 253 253 253 253 253 253
46219-253 253 253 253 253 253 253 253 253 253 253 253
46220-253 253 253 206 206 206 198 198 198 214 166 58
46221-230 174 11 230 174 11 216 158 10 192 133 9
46222-163 110 8 116 81 8 102 78 10 116 81 8
46223-167 114 7 197 138 11 226 170 11 239 182 13
46224-242 186 14 242 186 14 162 146 94 78 78 78
46225- 34 34 34 14 14 14 6 6 6 0 0 0
46226- 0 0 0 0 0 0 0 0 0 0 0 0
46227- 0 0 0 0 0 0 0 0 0 0 0 0
46228- 0 0 0 0 0 0 0 0 0 0 0 0
46229- 0 0 0 0 0 0 0 0 0 6 6 6
46230- 30 30 30 78 78 78 190 142 34 226 170 11
46231-239 182 13 246 190 14 246 190 14 246 190 14
46232-246 190 14 246 190 14 246 190 14 246 190 14
46233-246 190 14 246 190 14 246 190 14 246 190 14
46234-246 190 14 241 196 14 203 166 17 22 18 6
46235- 2 2 6 2 2 6 2 2 6 38 38 38
46236-218 218 218 253 253 253 253 253 253 253 253 253
46237-253 253 253 253 253 253 253 253 253 253 253 253
46238-253 253 253 253 253 253 253 253 253 253 253 253
46239-253 253 253 253 253 253 253 253 253 253 253 253
46240-250 250 250 206 206 206 198 198 198 202 162 69
46241-226 170 11 236 178 12 224 166 10 210 150 10
46242-200 144 11 197 138 11 192 133 9 197 138 11
46243-210 150 10 226 170 11 242 186 14 246 190 14
46244-246 190 14 246 186 14 225 175 15 124 112 88
46245- 62 62 62 30 30 30 14 14 14 6 6 6
46246- 0 0 0 0 0 0 0 0 0 0 0 0
46247- 0 0 0 0 0 0 0 0 0 0 0 0
46248- 0 0 0 0 0 0 0 0 0 0 0 0
46249- 0 0 0 0 0 0 0 0 0 10 10 10
46250- 30 30 30 78 78 78 174 135 50 224 166 10
46251-239 182 13 246 190 14 246 190 14 246 190 14
46252-246 190 14 246 190 14 246 190 14 246 190 14
46253-246 190 14 246 190 14 246 190 14 246 190 14
46254-246 190 14 246 190 14 241 196 14 139 102 15
46255- 2 2 6 2 2 6 2 2 6 2 2 6
46256- 78 78 78 250 250 250 253 253 253 253 253 253
46257-253 253 253 253 253 253 253 253 253 253 253 253
46258-253 253 253 253 253 253 253 253 253 253 253 253
46259-253 253 253 253 253 253 253 253 253 253 253 253
46260-250 250 250 214 214 214 198 198 198 190 150 46
46261-219 162 10 236 178 12 234 174 13 224 166 10
46262-216 158 10 213 154 11 213 154 11 216 158 10
46263-226 170 11 239 182 13 246 190 14 246 190 14
46264-246 190 14 246 190 14 242 186 14 206 162 42
46265-101 101 101 58 58 58 30 30 30 14 14 14
46266- 6 6 6 0 0 0 0 0 0 0 0 0
46267- 0 0 0 0 0 0 0 0 0 0 0 0
46268- 0 0 0 0 0 0 0 0 0 0 0 0
46269- 0 0 0 0 0 0 0 0 0 10 10 10
46270- 30 30 30 74 74 74 174 135 50 216 158 10
46271-236 178 12 246 190 14 246 190 14 246 190 14
46272-246 190 14 246 190 14 246 190 14 246 190 14
46273-246 190 14 246 190 14 246 190 14 246 190 14
46274-246 190 14 246 190 14 241 196 14 226 184 13
46275- 61 42 6 2 2 6 2 2 6 2 2 6
46276- 22 22 22 238 238 238 253 253 253 253 253 253
46277-253 253 253 253 253 253 253 253 253 253 253 253
46278-253 253 253 253 253 253 253 253 253 253 253 253
46279-253 253 253 253 253 253 253 253 253 253 253 253
46280-253 253 253 226 226 226 187 187 187 180 133 36
46281-216 158 10 236 178 12 239 182 13 236 178 12
46282-230 174 11 226 170 11 226 170 11 230 174 11
46283-236 178 12 242 186 14 246 190 14 246 190 14
46284-246 190 14 246 190 14 246 186 14 239 182 13
46285-206 162 42 106 106 106 66 66 66 34 34 34
46286- 14 14 14 6 6 6 0 0 0 0 0 0
46287- 0 0 0 0 0 0 0 0 0 0 0 0
46288- 0 0 0 0 0 0 0 0 0 0 0 0
46289- 0 0 0 0 0 0 0 0 0 6 6 6
46290- 26 26 26 70 70 70 163 133 67 213 154 11
46291-236 178 12 246 190 14 246 190 14 246 190 14
46292-246 190 14 246 190 14 246 190 14 246 190 14
46293-246 190 14 246 190 14 246 190 14 246 190 14
46294-246 190 14 246 190 14 246 190 14 241 196 14
46295-190 146 13 18 14 6 2 2 6 2 2 6
46296- 46 46 46 246 246 246 253 253 253 253 253 253
46297-253 253 253 253 253 253 253 253 253 253 253 253
46298-253 253 253 253 253 253 253 253 253 253 253 253
46299-253 253 253 253 253 253 253 253 253 253 253 253
46300-253 253 253 221 221 221 86 86 86 156 107 11
46301-216 158 10 236 178 12 242 186 14 246 186 14
46302-242 186 14 239 182 13 239 182 13 242 186 14
46303-242 186 14 246 186 14 246 190 14 246 190 14
46304-246 190 14 246 190 14 246 190 14 246 190 14
46305-242 186 14 225 175 15 142 122 72 66 66 66
46306- 30 30 30 10 10 10 0 0 0 0 0 0
46307- 0 0 0 0 0 0 0 0 0 0 0 0
46308- 0 0 0 0 0 0 0 0 0 0 0 0
46309- 0 0 0 0 0 0 0 0 0 6 6 6
46310- 26 26 26 70 70 70 163 133 67 210 150 10
46311-236 178 12 246 190 14 246 190 14 246 190 14
46312-246 190 14 246 190 14 246 190 14 246 190 14
46313-246 190 14 246 190 14 246 190 14 246 190 14
46314-246 190 14 246 190 14 246 190 14 246 190 14
46315-232 195 16 121 92 8 34 34 34 106 106 106
46316-221 221 221 253 253 253 253 253 253 253 253 253
46317-253 253 253 253 253 253 253 253 253 253 253 253
46318-253 253 253 253 253 253 253 253 253 253 253 253
46319-253 253 253 253 253 253 253 253 253 253 253 253
46320-242 242 242 82 82 82 18 14 6 163 110 8
46321-216 158 10 236 178 12 242 186 14 246 190 14
46322-246 190 14 246 190 14 246 190 14 246 190 14
46323-246 190 14 246 190 14 246 190 14 246 190 14
46324-246 190 14 246 190 14 246 190 14 246 190 14
46325-246 190 14 246 190 14 242 186 14 163 133 67
46326- 46 46 46 18 18 18 6 6 6 0 0 0
46327- 0 0 0 0 0 0 0 0 0 0 0 0
46328- 0 0 0 0 0 0 0 0 0 0 0 0
46329- 0 0 0 0 0 0 0 0 0 10 10 10
46330- 30 30 30 78 78 78 163 133 67 210 150 10
46331-236 178 12 246 186 14 246 190 14 246 190 14
46332-246 190 14 246 190 14 246 190 14 246 190 14
46333-246 190 14 246 190 14 246 190 14 246 190 14
46334-246 190 14 246 190 14 246 190 14 246 190 14
46335-241 196 14 215 174 15 190 178 144 253 253 253
46336-253 253 253 253 253 253 253 253 253 253 253 253
46337-253 253 253 253 253 253 253 253 253 253 253 253
46338-253 253 253 253 253 253 253 253 253 253 253 253
46339-253 253 253 253 253 253 253 253 253 218 218 218
46340- 58 58 58 2 2 6 22 18 6 167 114 7
46341-216 158 10 236 178 12 246 186 14 246 190 14
46342-246 190 14 246 190 14 246 190 14 246 190 14
46343-246 190 14 246 190 14 246 190 14 246 190 14
46344-246 190 14 246 190 14 246 190 14 246 190 14
46345-246 190 14 246 186 14 242 186 14 190 150 46
46346- 54 54 54 22 22 22 6 6 6 0 0 0
46347- 0 0 0 0 0 0 0 0 0 0 0 0
46348- 0 0 0 0 0 0 0 0 0 0 0 0
46349- 0 0 0 0 0 0 0 0 0 14 14 14
46350- 38 38 38 86 86 86 180 133 36 213 154 11
46351-236 178 12 246 186 14 246 190 14 246 190 14
46352-246 190 14 246 190 14 246 190 14 246 190 14
46353-246 190 14 246 190 14 246 190 14 246 190 14
46354-246 190 14 246 190 14 246 190 14 246 190 14
46355-246 190 14 232 195 16 190 146 13 214 214 214
46356-253 253 253 253 253 253 253 253 253 253 253 253
46357-253 253 253 253 253 253 253 253 253 253 253 253
46358-253 253 253 253 253 253 253 253 253 253 253 253
46359-253 253 253 250 250 250 170 170 170 26 26 26
46360- 2 2 6 2 2 6 37 26 9 163 110 8
46361-219 162 10 239 182 13 246 186 14 246 190 14
46362-246 190 14 246 190 14 246 190 14 246 190 14
46363-246 190 14 246 190 14 246 190 14 246 190 14
46364-246 190 14 246 190 14 246 190 14 246 190 14
46365-246 186 14 236 178 12 224 166 10 142 122 72
46366- 46 46 46 18 18 18 6 6 6 0 0 0
46367- 0 0 0 0 0 0 0 0 0 0 0 0
46368- 0 0 0 0 0 0 0 0 0 0 0 0
46369- 0 0 0 0 0 0 6 6 6 18 18 18
46370- 50 50 50 109 106 95 192 133 9 224 166 10
46371-242 186 14 246 190 14 246 190 14 246 190 14
46372-246 190 14 246 190 14 246 190 14 246 190 14
46373-246 190 14 246 190 14 246 190 14 246 190 14
46374-246 190 14 246 190 14 246 190 14 246 190 14
46375-242 186 14 226 184 13 210 162 10 142 110 46
46376-226 226 226 253 253 253 253 253 253 253 253 253
46377-253 253 253 253 253 253 253 253 253 253 253 253
46378-253 253 253 253 253 253 253 253 253 253 253 253
46379-198 198 198 66 66 66 2 2 6 2 2 6
46380- 2 2 6 2 2 6 50 34 6 156 107 11
46381-219 162 10 239 182 13 246 186 14 246 190 14
46382-246 190 14 246 190 14 246 190 14 246 190 14
46383-246 190 14 246 190 14 246 190 14 246 190 14
46384-246 190 14 246 190 14 246 190 14 242 186 14
46385-234 174 13 213 154 11 154 122 46 66 66 66
46386- 30 30 30 10 10 10 0 0 0 0 0 0
46387- 0 0 0 0 0 0 0 0 0 0 0 0
46388- 0 0 0 0 0 0 0 0 0 0 0 0
46389- 0 0 0 0 0 0 6 6 6 22 22 22
46390- 58 58 58 154 121 60 206 145 10 234 174 13
46391-242 186 14 246 186 14 246 190 14 246 190 14
46392-246 190 14 246 190 14 246 190 14 246 190 14
46393-246 190 14 246 190 14 246 190 14 246 190 14
46394-246 190 14 246 190 14 246 190 14 246 190 14
46395-246 186 14 236 178 12 210 162 10 163 110 8
46396- 61 42 6 138 138 138 218 218 218 250 250 250
46397-253 253 253 253 253 253 253 253 253 250 250 250
46398-242 242 242 210 210 210 144 144 144 66 66 66
46399- 6 6 6 2 2 6 2 2 6 2 2 6
46400- 2 2 6 2 2 6 61 42 6 163 110 8
46401-216 158 10 236 178 12 246 190 14 246 190 14
46402-246 190 14 246 190 14 246 190 14 246 190 14
46403-246 190 14 246 190 14 246 190 14 246 190 14
46404-246 190 14 239 182 13 230 174 11 216 158 10
46405-190 142 34 124 112 88 70 70 70 38 38 38
46406- 18 18 18 6 6 6 0 0 0 0 0 0
46407- 0 0 0 0 0 0 0 0 0 0 0 0
46408- 0 0 0 0 0 0 0 0 0 0 0 0
46409- 0 0 0 0 0 0 6 6 6 22 22 22
46410- 62 62 62 168 124 44 206 145 10 224 166 10
46411-236 178 12 239 182 13 242 186 14 242 186 14
46412-246 186 14 246 190 14 246 190 14 246 190 14
46413-246 190 14 246 190 14 246 190 14 246 190 14
46414-246 190 14 246 190 14 246 190 14 246 190 14
46415-246 190 14 236 178 12 216 158 10 175 118 6
46416- 80 54 7 2 2 6 6 6 6 30 30 30
46417- 54 54 54 62 62 62 50 50 50 38 38 38
46418- 14 14 14 2 2 6 2 2 6 2 2 6
46419- 2 2 6 2 2 6 2 2 6 2 2 6
46420- 2 2 6 6 6 6 80 54 7 167 114 7
46421-213 154 11 236 178 12 246 190 14 246 190 14
46422-246 190 14 246 190 14 246 190 14 246 190 14
46423-246 190 14 242 186 14 239 182 13 239 182 13
46424-230 174 11 210 150 10 174 135 50 124 112 88
46425- 82 82 82 54 54 54 34 34 34 18 18 18
46426- 6 6 6 0 0 0 0 0 0 0 0 0
46427- 0 0 0 0 0 0 0 0 0 0 0 0
46428- 0 0 0 0 0 0 0 0 0 0 0 0
46429- 0 0 0 0 0 0 6 6 6 18 18 18
46430- 50 50 50 158 118 36 192 133 9 200 144 11
46431-216 158 10 219 162 10 224 166 10 226 170 11
46432-230 174 11 236 178 12 239 182 13 239 182 13
46433-242 186 14 246 186 14 246 190 14 246 190 14
46434-246 190 14 246 190 14 246 190 14 246 190 14
46435-246 186 14 230 174 11 210 150 10 163 110 8
46436-104 69 6 10 10 10 2 2 6 2 2 6
46437- 2 2 6 2 2 6 2 2 6 2 2 6
46438- 2 2 6 2 2 6 2 2 6 2 2 6
46439- 2 2 6 2 2 6 2 2 6 2 2 6
46440- 2 2 6 6 6 6 91 60 6 167 114 7
46441-206 145 10 230 174 11 242 186 14 246 190 14
46442-246 190 14 246 190 14 246 186 14 242 186 14
46443-239 182 13 230 174 11 224 166 10 213 154 11
46444-180 133 36 124 112 88 86 86 86 58 58 58
46445- 38 38 38 22 22 22 10 10 10 6 6 6
46446- 0 0 0 0 0 0 0 0 0 0 0 0
46447- 0 0 0 0 0 0 0 0 0 0 0 0
46448- 0 0 0 0 0 0 0 0 0 0 0 0
46449- 0 0 0 0 0 0 0 0 0 14 14 14
46450- 34 34 34 70 70 70 138 110 50 158 118 36
46451-167 114 7 180 123 7 192 133 9 197 138 11
46452-200 144 11 206 145 10 213 154 11 219 162 10
46453-224 166 10 230 174 11 239 182 13 242 186 14
46454-246 186 14 246 186 14 246 186 14 246 186 14
46455-239 182 13 216 158 10 185 133 11 152 99 6
46456-104 69 6 18 14 6 2 2 6 2 2 6
46457- 2 2 6 2 2 6 2 2 6 2 2 6
46458- 2 2 6 2 2 6 2 2 6 2 2 6
46459- 2 2 6 2 2 6 2 2 6 2 2 6
46460- 2 2 6 6 6 6 80 54 7 152 99 6
46461-192 133 9 219 162 10 236 178 12 239 182 13
46462-246 186 14 242 186 14 239 182 13 236 178 12
46463-224 166 10 206 145 10 192 133 9 154 121 60
46464- 94 94 94 62 62 62 42 42 42 22 22 22
46465- 14 14 14 6 6 6 0 0 0 0 0 0
46466- 0 0 0 0 0 0 0 0 0 0 0 0
46467- 0 0 0 0 0 0 0 0 0 0 0 0
46468- 0 0 0 0 0 0 0 0 0 0 0 0
46469- 0 0 0 0 0 0 0 0 0 6 6 6
46470- 18 18 18 34 34 34 58 58 58 78 78 78
46471-101 98 89 124 112 88 142 110 46 156 107 11
46472-163 110 8 167 114 7 175 118 6 180 123 7
46473-185 133 11 197 138 11 210 150 10 219 162 10
46474-226 170 11 236 178 12 236 178 12 234 174 13
46475-219 162 10 197 138 11 163 110 8 130 83 6
46476- 91 60 6 10 10 10 2 2 6 2 2 6
46477- 18 18 18 38 38 38 38 38 38 38 38 38
46478- 38 38 38 38 38 38 38 38 38 38 38 38
46479- 38 38 38 38 38 38 26 26 26 2 2 6
46480- 2 2 6 6 6 6 70 47 6 137 92 6
46481-175 118 6 200 144 11 219 162 10 230 174 11
46482-234 174 13 230 174 11 219 162 10 210 150 10
46483-192 133 9 163 110 8 124 112 88 82 82 82
46484- 50 50 50 30 30 30 14 14 14 6 6 6
46485- 0 0 0 0 0 0 0 0 0 0 0 0
46486- 0 0 0 0 0 0 0 0 0 0 0 0
46487- 0 0 0 0 0 0 0 0 0 0 0 0
46488- 0 0 0 0 0 0 0 0 0 0 0 0
46489- 0 0 0 0 0 0 0 0 0 0 0 0
46490- 6 6 6 14 14 14 22 22 22 34 34 34
46491- 42 42 42 58 58 58 74 74 74 86 86 86
46492-101 98 89 122 102 70 130 98 46 121 87 25
46493-137 92 6 152 99 6 163 110 8 180 123 7
46494-185 133 11 197 138 11 206 145 10 200 144 11
46495-180 123 7 156 107 11 130 83 6 104 69 6
46496- 50 34 6 54 54 54 110 110 110 101 98 89
46497- 86 86 86 82 82 82 78 78 78 78 78 78
46498- 78 78 78 78 78 78 78 78 78 78 78 78
46499- 78 78 78 82 82 82 86 86 86 94 94 94
46500-106 106 106 101 101 101 86 66 34 124 80 6
46501-156 107 11 180 123 7 192 133 9 200 144 11
46502-206 145 10 200 144 11 192 133 9 175 118 6
46503-139 102 15 109 106 95 70 70 70 42 42 42
46504- 22 22 22 10 10 10 0 0 0 0 0 0
46505- 0 0 0 0 0 0 0 0 0 0 0 0
46506- 0 0 0 0 0 0 0 0 0 0 0 0
46507- 0 0 0 0 0 0 0 0 0 0 0 0
46508- 0 0 0 0 0 0 0 0 0 0 0 0
46509- 0 0 0 0 0 0 0 0 0 0 0 0
46510- 0 0 0 0 0 0 6 6 6 10 10 10
46511- 14 14 14 22 22 22 30 30 30 38 38 38
46512- 50 50 50 62 62 62 74 74 74 90 90 90
46513-101 98 89 112 100 78 121 87 25 124 80 6
46514-137 92 6 152 99 6 152 99 6 152 99 6
46515-138 86 6 124 80 6 98 70 6 86 66 30
46516-101 98 89 82 82 82 58 58 58 46 46 46
46517- 38 38 38 34 34 34 34 34 34 34 34 34
46518- 34 34 34 34 34 34 34 34 34 34 34 34
46519- 34 34 34 34 34 34 38 38 38 42 42 42
46520- 54 54 54 82 82 82 94 86 76 91 60 6
46521-134 86 6 156 107 11 167 114 7 175 118 6
46522-175 118 6 167 114 7 152 99 6 121 87 25
46523-101 98 89 62 62 62 34 34 34 18 18 18
46524- 6 6 6 0 0 0 0 0 0 0 0 0
46525- 0 0 0 0 0 0 0 0 0 0 0 0
46526- 0 0 0 0 0 0 0 0 0 0 0 0
46527- 0 0 0 0 0 0 0 0 0 0 0 0
46528- 0 0 0 0 0 0 0 0 0 0 0 0
46529- 0 0 0 0 0 0 0 0 0 0 0 0
46530- 0 0 0 0 0 0 0 0 0 0 0 0
46531- 0 0 0 6 6 6 6 6 6 10 10 10
46532- 18 18 18 22 22 22 30 30 30 42 42 42
46533- 50 50 50 66 66 66 86 86 86 101 98 89
46534-106 86 58 98 70 6 104 69 6 104 69 6
46535-104 69 6 91 60 6 82 62 34 90 90 90
46536- 62 62 62 38 38 38 22 22 22 14 14 14
46537- 10 10 10 10 10 10 10 10 10 10 10 10
46538- 10 10 10 10 10 10 6 6 6 10 10 10
46539- 10 10 10 10 10 10 10 10 10 14 14 14
46540- 22 22 22 42 42 42 70 70 70 89 81 66
46541- 80 54 7 104 69 6 124 80 6 137 92 6
46542-134 86 6 116 81 8 100 82 52 86 86 86
46543- 58 58 58 30 30 30 14 14 14 6 6 6
46544- 0 0 0 0 0 0 0 0 0 0 0 0
46545- 0 0 0 0 0 0 0 0 0 0 0 0
46546- 0 0 0 0 0 0 0 0 0 0 0 0
46547- 0 0 0 0 0 0 0 0 0 0 0 0
46548- 0 0 0 0 0 0 0 0 0 0 0 0
46549- 0 0 0 0 0 0 0 0 0 0 0 0
46550- 0 0 0 0 0 0 0 0 0 0 0 0
46551- 0 0 0 0 0 0 0 0 0 0 0 0
46552- 0 0 0 6 6 6 10 10 10 14 14 14
46553- 18 18 18 26 26 26 38 38 38 54 54 54
46554- 70 70 70 86 86 86 94 86 76 89 81 66
46555- 89 81 66 86 86 86 74 74 74 50 50 50
46556- 30 30 30 14 14 14 6 6 6 0 0 0
46557- 0 0 0 0 0 0 0 0 0 0 0 0
46558- 0 0 0 0 0 0 0 0 0 0 0 0
46559- 0 0 0 0 0 0 0 0 0 0 0 0
46560- 6 6 6 18 18 18 34 34 34 58 58 58
46561- 82 82 82 89 81 66 89 81 66 89 81 66
46562- 94 86 66 94 86 76 74 74 74 50 50 50
46563- 26 26 26 14 14 14 6 6 6 0 0 0
46564- 0 0 0 0 0 0 0 0 0 0 0 0
46565- 0 0 0 0 0 0 0 0 0 0 0 0
46566- 0 0 0 0 0 0 0 0 0 0 0 0
46567- 0 0 0 0 0 0 0 0 0 0 0 0
46568- 0 0 0 0 0 0 0 0 0 0 0 0
46569- 0 0 0 0 0 0 0 0 0 0 0 0
46570- 0 0 0 0 0 0 0 0 0 0 0 0
46571- 0 0 0 0 0 0 0 0 0 0 0 0
46572- 0 0 0 0 0 0 0 0 0 0 0 0
46573- 6 6 6 6 6 6 14 14 14 18 18 18
46574- 30 30 30 38 38 38 46 46 46 54 54 54
46575- 50 50 50 42 42 42 30 30 30 18 18 18
46576- 10 10 10 0 0 0 0 0 0 0 0 0
46577- 0 0 0 0 0 0 0 0 0 0 0 0
46578- 0 0 0 0 0 0 0 0 0 0 0 0
46579- 0 0 0 0 0 0 0 0 0 0 0 0
46580- 0 0 0 6 6 6 14 14 14 26 26 26
46581- 38 38 38 50 50 50 58 58 58 58 58 58
46582- 54 54 54 42 42 42 30 30 30 18 18 18
46583- 10 10 10 0 0 0 0 0 0 0 0 0
46584- 0 0 0 0 0 0 0 0 0 0 0 0
46585- 0 0 0 0 0 0 0 0 0 0 0 0
46586- 0 0 0 0 0 0 0 0 0 0 0 0
46587- 0 0 0 0 0 0 0 0 0 0 0 0
46588- 0 0 0 0 0 0 0 0 0 0 0 0
46589- 0 0 0 0 0 0 0 0 0 0 0 0
46590- 0 0 0 0 0 0 0 0 0 0 0 0
46591- 0 0 0 0 0 0 0 0 0 0 0 0
46592- 0 0 0 0 0 0 0 0 0 0 0 0
46593- 0 0 0 0 0 0 0 0 0 6 6 6
46594- 6 6 6 10 10 10 14 14 14 18 18 18
46595- 18 18 18 14 14 14 10 10 10 6 6 6
46596- 0 0 0 0 0 0 0 0 0 0 0 0
46597- 0 0 0 0 0 0 0 0 0 0 0 0
46598- 0 0 0 0 0 0 0 0 0 0 0 0
46599- 0 0 0 0 0 0 0 0 0 0 0 0
46600- 0 0 0 0 0 0 0 0 0 6 6 6
46601- 14 14 14 18 18 18 22 22 22 22 22 22
46602- 18 18 18 14 14 14 10 10 10 6 6 6
46603- 0 0 0 0 0 0 0 0 0 0 0 0
46604- 0 0 0 0 0 0 0 0 0 0 0 0
46605- 0 0 0 0 0 0 0 0 0 0 0 0
46606- 0 0 0 0 0 0 0 0 0 0 0 0
46607- 0 0 0 0 0 0 0 0 0 0 0 0
46608+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46609+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46610+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46611+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46612+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46613+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46614+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46615+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46616+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46617+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46618+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46619+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46620+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46621+4 4 4 4 4 4
46622+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46623+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46624+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46625+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46626+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46627+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46628+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46629+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46630+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46631+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46632+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46633+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46634+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46635+4 4 4 4 4 4
46636+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46637+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46638+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46639+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46640+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46641+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46642+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46643+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46644+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46645+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46646+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46647+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46648+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46649+4 4 4 4 4 4
46650+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46651+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46652+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46653+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46654+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46655+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46656+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46657+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46658+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46659+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46660+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46661+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46662+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46663+4 4 4 4 4 4
46664+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46665+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46666+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46667+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46668+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46669+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46670+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46671+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46672+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46673+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46674+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46675+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46676+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46677+4 4 4 4 4 4
46678+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46679+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46680+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46681+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46682+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46683+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46684+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46685+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46686+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46687+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46688+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46689+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46690+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46691+4 4 4 4 4 4
46692+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46693+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46694+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46695+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46696+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
46697+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
46698+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46699+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46700+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46701+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
46702+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
46703+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
46704+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46705+4 4 4 4 4 4
46706+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46707+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46708+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46709+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46710+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
46711+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
46712+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46713+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46714+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46715+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
46716+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
46717+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
46718+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46719+4 4 4 4 4 4
46720+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46721+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46722+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46723+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46724+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
46725+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
46726+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
46727+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46728+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46729+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
46730+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
46731+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
46732+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
46733+4 4 4 4 4 4
46734+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46735+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46736+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46737+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
46738+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
46739+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
46740+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
46741+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46742+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
46743+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
46744+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
46745+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
46746+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
46747+4 4 4 4 4 4
46748+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46749+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46750+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46751+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
46752+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
46753+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
46754+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
46755+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
46756+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
46757+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
46758+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
46759+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
46760+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
46761+4 4 4 4 4 4
46762+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46763+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46764+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
46765+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
46766+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
46767+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
46768+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
46769+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
46770+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
46771+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
46772+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
46773+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
46774+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
46775+4 4 4 4 4 4
46776+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46777+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46778+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
46779+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
46780+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
46781+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
46782+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
46783+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
46784+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
46785+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
46786+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
46787+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
46788+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
46789+4 4 4 4 4 4
46790+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46791+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46792+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
46793+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
46794+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
46795+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
46796+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
46797+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
46798+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
46799+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
46800+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
46801+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
46802+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
46803+4 4 4 4 4 4
46804+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46805+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46806+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
46807+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
46808+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
46809+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
46810+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
46811+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
46812+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
46813+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
46814+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
46815+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
46816+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
46817+4 4 4 4 4 4
46818+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46819+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46820+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
46821+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
46822+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
46823+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
46824+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
46825+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
46826+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
46827+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
46828+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
46829+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
46830+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
46831+4 4 4 4 4 4
46832+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46833+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
46834+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
46835+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
46836+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
46837+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
46838+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
46839+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
46840+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
46841+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
46842+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
46843+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
46844+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
46845+4 4 4 4 4 4
46846+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46847+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
46848+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
46849+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
46850+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
46851+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
46852+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
46853+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
46854+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
46855+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
46856+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
46857+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
46858+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
46859+0 0 0 4 4 4
46860+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
46861+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
46862+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
46863+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
46864+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
46865+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
46866+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
46867+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
46868+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
46869+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
46870+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
46871+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
46872+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
46873+2 0 0 0 0 0
46874+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
46875+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
46876+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
46877+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
46878+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
46879+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
46880+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
46881+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
46882+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
46883+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
46884+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
46885+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
46886+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
46887+37 38 37 0 0 0
46888+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
46889+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
46890+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
46891+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
46892+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
46893+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
46894+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
46895+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
46896+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
46897+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
46898+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
46899+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
46900+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
46901+85 115 134 4 0 0
46902+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
46903+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
46904+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
46905+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
46906+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
46907+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
46908+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
46909+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
46910+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
46911+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
46912+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
46913+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
46914+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
46915+60 73 81 4 0 0
46916+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
46917+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
46918+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
46919+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
46920+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
46921+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
46922+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
46923+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
46924+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
46925+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
46926+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
46927+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
46928+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
46929+16 19 21 4 0 0
46930+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
46931+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
46932+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
46933+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
46934+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
46935+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
46936+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
46937+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
46938+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
46939+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
46940+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
46941+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
46942+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
46943+4 0 0 4 3 3
46944+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
46945+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
46946+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
46947+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
46948+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
46949+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
46950+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
46951+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
46952+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
46953+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
46954+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
46955+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
46956+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
46957+3 2 2 4 4 4
46958+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
46959+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
46960+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
46961+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
46962+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
46963+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
46964+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
46965+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
46966+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
46967+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
46968+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
46969+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
46970+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
46971+4 4 4 4 4 4
46972+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
46973+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
46974+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
46975+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
46976+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
46977+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
46978+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
46979+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
46980+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
46981+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
46982+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
46983+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
46984+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
46985+4 4 4 4 4 4
46986+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
46987+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
46988+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
46989+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
46990+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
46991+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
46992+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
46993+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
46994+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
46995+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
46996+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
46997+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
46998+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
46999+5 5 5 5 5 5
47000+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
47001+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
47002+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
47003+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
47004+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
47005+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
47006+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
47007+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
47008+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
47009+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
47010+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
47011+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
47012+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
47013+5 5 5 4 4 4
47014+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
47015+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
47016+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
47017+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
47018+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
47019+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
47020+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
47021+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
47022+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
47023+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
47024+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
47025+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
47026+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47027+4 4 4 4 4 4
47028+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
47029+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
47030+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
47031+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
47032+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
47033+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
47034+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
47035+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
47036+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
47037+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
47038+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
47039+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
47040+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47041+4 4 4 4 4 4
47042+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
47043+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
47044+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
47045+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
47046+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
47047+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
47048+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
47049+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
47050+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
47051+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
47052+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
47053+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47054+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47055+4 4 4 4 4 4
47056+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
47057+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
47058+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
47059+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
47060+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
47061+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
47062+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
47063+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
47064+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
47065+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
47066+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
47067+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47068+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47069+4 4 4 4 4 4
47070+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
47071+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
47072+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
47073+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
47074+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
47075+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
47076+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
47077+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
47078+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
47079+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
47080+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47081+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47082+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47083+4 4 4 4 4 4
47084+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
47085+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
47086+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
47087+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
47088+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
47089+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
47090+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
47091+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
47092+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
47093+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
47094+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
47095+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47096+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47097+4 4 4 4 4 4
47098+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
47099+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
47100+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
47101+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
47102+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
47103+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
47104+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
47105+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
47106+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
47107+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
47108+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
47109+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47110+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47111+4 4 4 4 4 4
47112+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
47113+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
47114+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
47115+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
47116+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
47117+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
47118+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
47119+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
47120+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
47121+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
47122+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47123+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47124+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47125+4 4 4 4 4 4
47126+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
47127+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
47128+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
47129+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
47130+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
47131+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
47132+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
47133+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
47134+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
47135+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
47136+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47137+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47138+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47139+4 4 4 4 4 4
47140+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
47141+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
47142+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
47143+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
47144+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
47145+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
47146+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
47147+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
47148+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
47149+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
47150+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47151+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47152+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47153+4 4 4 4 4 4
47154+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
47155+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
47156+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
47157+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
47158+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
47159+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
47160+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
47161+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
47162+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
47163+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47164+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47165+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47166+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47167+4 4 4 4 4 4
47168+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
47169+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
47170+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
47171+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
47172+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
47173+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
47174+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
47175+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
47176+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
47177+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47178+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47179+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47180+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47181+4 4 4 4 4 4
47182+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
47183+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
47184+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
47185+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
47186+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
47187+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
47188+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
47189+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
47190+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
47191+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47192+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47193+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47194+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47195+4 4 4 4 4 4
47196+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
47197+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
47198+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
47199+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
47200+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
47201+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
47202+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
47203+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
47204+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
47205+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47206+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47207+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47208+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47209+4 4 4 4 4 4
47210+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
47211+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
47212+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
47213+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
47214+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
47215+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
47216+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
47217+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
47218+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
47219+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47220+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47221+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47222+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47223+4 4 4 4 4 4
47224+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
47225+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
47226+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
47227+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
47228+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
47229+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
47230+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
47231+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
47232+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
47233+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47234+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47235+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47236+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47237+4 4 4 4 4 4
47238+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
47239+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
47240+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
47241+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
47242+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
47243+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
47244+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
47245+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
47246+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
47247+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47248+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47249+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47250+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47251+4 4 4 4 4 4
47252+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
47253+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
47254+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
47255+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
47256+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
47257+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
47258+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
47259+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
47260+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
47261+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47262+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47263+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47264+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47265+4 4 4 4 4 4
47266+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
47267+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
47268+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
47269+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
47270+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
47271+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
47272+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
47273+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
47274+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
47275+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47276+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47277+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47278+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47279+4 4 4 4 4 4
47280+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
47281+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
47282+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
47283+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
47284+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
47285+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
47286+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
47287+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
47288+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
47289+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47290+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47291+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47292+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47293+4 4 4 4 4 4
47294+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
47295+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
47296+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
47297+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
47298+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
47299+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
47300+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
47301+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
47302+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
47303+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47304+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47305+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47306+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47307+4 4 4 4 4 4
47308+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
47309+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
47310+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
47311+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
47312+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
47313+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
47314+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
47315+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
47316+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
47317+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47318+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47319+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47320+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47321+4 4 4 4 4 4
47322+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
47323+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
47324+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
47325+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
47326+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
47327+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
47328+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
47329+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
47330+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
47331+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47332+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47333+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47334+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47335+4 4 4 4 4 4
47336+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
47337+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
47338+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
47339+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
47340+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
47341+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
47342+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
47343+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
47344+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
47345+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47346+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47347+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47348+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47349+4 4 4 4 4 4
47350+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
47351+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
47352+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
47353+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
47354+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
47355+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
47356+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
47357+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
47358+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
47359+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47360+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47361+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47362+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47363+4 4 4 4 4 4
47364+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
47365+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
47366+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
47367+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
47368+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
47369+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
47370+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
47371+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
47372+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
47373+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
47374+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47375+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47376+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47377+4 4 4 4 4 4
47378+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
47379+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
47380+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
47381+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
47382+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
47383+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
47384+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
47385+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
47386+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
47387+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
47388+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47389+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47390+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47391+4 4 4 4 4 4
47392+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
47393+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
47394+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
47395+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
47396+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
47397+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
47398+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47399+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
47400+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
47401+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
47402+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
47403+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47404+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47405+4 4 4 4 4 4
47406+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
47407+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
47408+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
47409+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
47410+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
47411+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
47412+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
47413+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
47414+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
47415+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
47416+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47417+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47418+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47419+4 4 4 4 4 4
47420+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
47421+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
47422+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
47423+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
47424+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
47425+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
47426+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
47427+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
47428+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
47429+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
47430+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47431+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47432+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47433+4 4 4 4 4 4
47434+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
47435+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
47436+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
47437+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
47438+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
47439+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
47440+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
47441+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
47442+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
47443+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
47444+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47445+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47446+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47447+4 4 4 4 4 4
47448+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
47449+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
47450+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
47451+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
47452+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
47453+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
47454+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
47455+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
47456+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
47457+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
47458+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47459+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47460+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47461+4 4 4 4 4 4
47462+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
47463+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
47464+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
47465+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
47466+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
47467+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
47468+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
47469+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
47470+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
47471+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
47472+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47473+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47474+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47475+4 4 4 4 4 4
47476+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
47477+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
47478+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
47479+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
47480+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
47481+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
47482+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
47483+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
47484+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
47485+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47486+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47487+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47488+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47489+4 4 4 4 4 4
47490+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
47491+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
47492+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
47493+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
47494+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
47495+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
47496+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
47497+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
47498+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
47499+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47500+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47501+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47502+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47503+4 4 4 4 4 4
47504+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
47505+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
47506+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
47507+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
47508+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
47509+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
47510+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
47511+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
47512+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47513+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47514+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47515+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47516+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47517+4 4 4 4 4 4
47518+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
47519+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
47520+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
47521+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
47522+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
47523+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
47524+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
47525+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
47526+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47527+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47528+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47529+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47530+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47531+4 4 4 4 4 4
47532+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
47533+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
47534+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
47535+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
47536+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
47537+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
47538+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
47539+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
47540+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47541+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47542+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47543+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47544+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47545+4 4 4 4 4 4
47546+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
47547+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
47548+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
47549+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
47550+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
47551+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
47552+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
47553+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
47554+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47555+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47556+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47557+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47558+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47559+4 4 4 4 4 4
47560+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47561+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
47562+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
47563+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
47564+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
47565+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
47566+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
47567+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
47568+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47569+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47570+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47571+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47572+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47573+4 4 4 4 4 4
47574+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47575+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
47576+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
47577+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
47578+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
47579+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
47580+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
47581+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
47582+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47583+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47584+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47585+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47586+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47587+4 4 4 4 4 4
47588+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47589+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47590+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
47591+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
47592+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
47593+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
47594+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
47595+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
47596+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47597+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47598+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47599+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47600+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47601+4 4 4 4 4 4
47602+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47603+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47604+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
47605+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
47606+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
47607+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
47608+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
47609+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47610+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47611+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47612+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47613+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47614+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47615+4 4 4 4 4 4
47616+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47617+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47618+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47619+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
47620+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
47621+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
47622+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
47623+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47624+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47625+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47626+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47627+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47628+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47629+4 4 4 4 4 4
47630+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47631+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47632+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47633+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
47634+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
47635+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
47636+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
47637+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47638+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47639+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47640+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47641+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47642+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47643+4 4 4 4 4 4
47644+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47645+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47646+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47647+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
47648+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
47649+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
47650+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
47651+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47652+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47653+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47654+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47655+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47656+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47657+4 4 4 4 4 4
47658+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47659+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47660+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47661+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
47662+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
47663+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
47664+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47665+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47666+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47667+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47668+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47669+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47670+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47671+4 4 4 4 4 4
47672+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47673+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47674+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47675+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47676+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
47677+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
47678+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
47679+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47680+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47681+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47682+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47683+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47684+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47685+4 4 4 4 4 4
47686+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47687+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47688+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47689+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47690+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
47691+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
47692+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47693+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47694+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47695+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47696+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47697+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47698+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47699+4 4 4 4 4 4
47700+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47701+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47702+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47703+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47704+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
47705+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
47706+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47707+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47708+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47709+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47710+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47711+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47712+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47713+4 4 4 4 4 4
47714+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47715+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47716+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47717+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47718+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
47719+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
47720+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47721+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47722+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47723+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47724+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47725+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47726+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47727+4 4 4 4 4 4
47728diff --git a/drivers/video/mb862xx/mb862xxfb_accel.c b/drivers/video/mb862xx/mb862xxfb_accel.c
47729index fe92eed..106e085 100644
47730--- a/drivers/video/mb862xx/mb862xxfb_accel.c
47731+++ b/drivers/video/mb862xx/mb862xxfb_accel.c
47732@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
47733 struct mb862xxfb_par *par = info->par;
47734
47735 if (info->var.bits_per_pixel == 32) {
47736- info->fbops->fb_fillrect = cfb_fillrect;
47737- info->fbops->fb_copyarea = cfb_copyarea;
47738- info->fbops->fb_imageblit = cfb_imageblit;
47739+ pax_open_kernel();
47740+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
47741+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
47742+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
47743+ pax_close_kernel();
47744 } else {
47745 outreg(disp, GC_L0EM, 3);
47746- info->fbops->fb_fillrect = mb86290fb_fillrect;
47747- info->fbops->fb_copyarea = mb86290fb_copyarea;
47748- info->fbops->fb_imageblit = mb86290fb_imageblit;
47749+ pax_open_kernel();
47750+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
47751+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
47752+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
47753+ pax_close_kernel();
47754 }
47755 outreg(draw, GDC_REG_DRAW_BASE, 0);
47756 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
47757diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
47758index ff22871..b129bed 100644
47759--- a/drivers/video/nvidia/nvidia.c
47760+++ b/drivers/video/nvidia/nvidia.c
47761@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
47762 info->fix.line_length = (info->var.xres_virtual *
47763 info->var.bits_per_pixel) >> 3;
47764 if (info->var.accel_flags) {
47765- info->fbops->fb_imageblit = nvidiafb_imageblit;
47766- info->fbops->fb_fillrect = nvidiafb_fillrect;
47767- info->fbops->fb_copyarea = nvidiafb_copyarea;
47768- info->fbops->fb_sync = nvidiafb_sync;
47769+ pax_open_kernel();
47770+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
47771+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
47772+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
47773+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
47774+ pax_close_kernel();
47775 info->pixmap.scan_align = 4;
47776 info->flags &= ~FBINFO_HWACCEL_DISABLED;
47777 info->flags |= FBINFO_READS_FAST;
47778 NVResetGraphics(info);
47779 } else {
47780- info->fbops->fb_imageblit = cfb_imageblit;
47781- info->fbops->fb_fillrect = cfb_fillrect;
47782- info->fbops->fb_copyarea = cfb_copyarea;
47783- info->fbops->fb_sync = NULL;
47784+ pax_open_kernel();
47785+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
47786+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
47787+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
47788+ *(void **)&info->fbops->fb_sync = NULL;
47789+ pax_close_kernel();
47790 info->pixmap.scan_align = 1;
47791 info->flags |= FBINFO_HWACCEL_DISABLED;
47792 info->flags &= ~FBINFO_READS_FAST;
47793@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
47794 info->pixmap.size = 8 * 1024;
47795 info->pixmap.flags = FB_PIXMAP_SYSTEM;
47796
47797- if (!hwcur)
47798- info->fbops->fb_cursor = NULL;
47799+ if (!hwcur) {
47800+ pax_open_kernel();
47801+ *(void **)&info->fbops->fb_cursor = NULL;
47802+ pax_close_kernel();
47803+ }
47804
47805 info->var.accel_flags = (!noaccel);
47806
47807diff --git a/drivers/video/s1d13xxxfb.c b/drivers/video/s1d13xxxfb.c
47808index 76d9053..dec2bfd 100644
47809--- a/drivers/video/s1d13xxxfb.c
47810+++ b/drivers/video/s1d13xxxfb.c
47811@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
47812
47813 switch(prod_id) {
47814 case S1D13506_PROD_ID: /* activate acceleration */
47815- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
47816- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
47817+ pax_open_kernel();
47818+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
47819+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
47820+ pax_close_kernel();
47821 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
47822 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
47823 break;
47824diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
47825index 97bd662..39fab85 100644
47826--- a/drivers/video/smscufx.c
47827+++ b/drivers/video/smscufx.c
47828@@ -1171,7 +1171,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
47829 fb_deferred_io_cleanup(info);
47830 kfree(info->fbdefio);
47831 info->fbdefio = NULL;
47832- info->fbops->fb_mmap = ufx_ops_mmap;
47833+ pax_open_kernel();
47834+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
47835+ pax_close_kernel();
47836 }
47837
47838 pr_debug("released /dev/fb%d user=%d count=%d",
47839diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
47840index 86d449e..8e04dc5 100644
47841--- a/drivers/video/udlfb.c
47842+++ b/drivers/video/udlfb.c
47843@@ -619,11 +619,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
47844 dlfb_urb_completion(urb);
47845
47846 error:
47847- atomic_add(bytes_sent, &dev->bytes_sent);
47848- atomic_add(bytes_identical, &dev->bytes_identical);
47849- atomic_add(width*height*2, &dev->bytes_rendered);
47850+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
47851+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
47852+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
47853 end_cycles = get_cycles();
47854- atomic_add(((unsigned int) ((end_cycles - start_cycles)
47855+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
47856 >> 10)), /* Kcycles */
47857 &dev->cpu_kcycles_used);
47858
47859@@ -744,11 +744,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
47860 dlfb_urb_completion(urb);
47861
47862 error:
47863- atomic_add(bytes_sent, &dev->bytes_sent);
47864- atomic_add(bytes_identical, &dev->bytes_identical);
47865- atomic_add(bytes_rendered, &dev->bytes_rendered);
47866+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
47867+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
47868+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
47869 end_cycles = get_cycles();
47870- atomic_add(((unsigned int) ((end_cycles - start_cycles)
47871+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
47872 >> 10)), /* Kcycles */
47873 &dev->cpu_kcycles_used);
47874 }
47875@@ -989,7 +989,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
47876 fb_deferred_io_cleanup(info);
47877 kfree(info->fbdefio);
47878 info->fbdefio = NULL;
47879- info->fbops->fb_mmap = dlfb_ops_mmap;
47880+ pax_open_kernel();
47881+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
47882+ pax_close_kernel();
47883 }
47884
47885 pr_warn("released /dev/fb%d user=%d count=%d\n",
47886@@ -1372,7 +1374,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
47887 struct fb_info *fb_info = dev_get_drvdata(fbdev);
47888 struct dlfb_data *dev = fb_info->par;
47889 return snprintf(buf, PAGE_SIZE, "%u\n",
47890- atomic_read(&dev->bytes_rendered));
47891+ atomic_read_unchecked(&dev->bytes_rendered));
47892 }
47893
47894 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
47895@@ -1380,7 +1382,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
47896 struct fb_info *fb_info = dev_get_drvdata(fbdev);
47897 struct dlfb_data *dev = fb_info->par;
47898 return snprintf(buf, PAGE_SIZE, "%u\n",
47899- atomic_read(&dev->bytes_identical));
47900+ atomic_read_unchecked(&dev->bytes_identical));
47901 }
47902
47903 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
47904@@ -1388,7 +1390,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
47905 struct fb_info *fb_info = dev_get_drvdata(fbdev);
47906 struct dlfb_data *dev = fb_info->par;
47907 return snprintf(buf, PAGE_SIZE, "%u\n",
47908- atomic_read(&dev->bytes_sent));
47909+ atomic_read_unchecked(&dev->bytes_sent));
47910 }
47911
47912 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
47913@@ -1396,7 +1398,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
47914 struct fb_info *fb_info = dev_get_drvdata(fbdev);
47915 struct dlfb_data *dev = fb_info->par;
47916 return snprintf(buf, PAGE_SIZE, "%u\n",
47917- atomic_read(&dev->cpu_kcycles_used));
47918+ atomic_read_unchecked(&dev->cpu_kcycles_used));
47919 }
47920
47921 static ssize_t edid_show(
47922@@ -1456,10 +1458,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
47923 struct fb_info *fb_info = dev_get_drvdata(fbdev);
47924 struct dlfb_data *dev = fb_info->par;
47925
47926- atomic_set(&dev->bytes_rendered, 0);
47927- atomic_set(&dev->bytes_identical, 0);
47928- atomic_set(&dev->bytes_sent, 0);
47929- atomic_set(&dev->cpu_kcycles_used, 0);
47930+ atomic_set_unchecked(&dev->bytes_rendered, 0);
47931+ atomic_set_unchecked(&dev->bytes_identical, 0);
47932+ atomic_set_unchecked(&dev->bytes_sent, 0);
47933+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
47934
47935 return count;
47936 }
47937diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
47938index b75db01..ad2f34a 100644
47939--- a/drivers/video/uvesafb.c
47940+++ b/drivers/video/uvesafb.c
47941@@ -19,6 +19,7 @@
47942 #include <linux/io.h>
47943 #include <linux/mutex.h>
47944 #include <linux/slab.h>
47945+#include <linux/moduleloader.h>
47946 #include <video/edid.h>
47947 #include <video/uvesafb.h>
47948 #ifdef CONFIG_X86
47949@@ -569,10 +570,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
47950 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
47951 par->pmi_setpal = par->ypan = 0;
47952 } else {
47953+
47954+#ifdef CONFIG_PAX_KERNEXEC
47955+#ifdef CONFIG_MODULES
47956+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
47957+#endif
47958+ if (!par->pmi_code) {
47959+ par->pmi_setpal = par->ypan = 0;
47960+ return 0;
47961+ }
47962+#endif
47963+
47964 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
47965 + task->t.regs.edi);
47966+
47967+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
47968+ pax_open_kernel();
47969+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
47970+ pax_close_kernel();
47971+
47972+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
47973+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
47974+#else
47975 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
47976 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
47977+#endif
47978+
47979 printk(KERN_INFO "uvesafb: protected mode interface info at "
47980 "%04x:%04x\n",
47981 (u16)task->t.regs.es, (u16)task->t.regs.edi);
47982@@ -817,13 +840,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
47983 par->ypan = ypan;
47984
47985 if (par->pmi_setpal || par->ypan) {
47986+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
47987 if (__supported_pte_mask & _PAGE_NX) {
47988 par->pmi_setpal = par->ypan = 0;
47989 printk(KERN_WARNING "uvesafb: NX protection is actively."
47990 "We have better not to use the PMI.\n");
47991- } else {
47992+ } else
47993+#endif
47994 uvesafb_vbe_getpmi(task, par);
47995- }
47996 }
47997 #else
47998 /* The protected mode interface is not available on non-x86. */
47999@@ -1457,8 +1481,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
48000 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
48001
48002 /* Disable blanking if the user requested so. */
48003- if (!blank)
48004- info->fbops->fb_blank = NULL;
48005+ if (!blank) {
48006+ pax_open_kernel();
48007+ *(void **)&info->fbops->fb_blank = NULL;
48008+ pax_close_kernel();
48009+ }
48010
48011 /*
48012 * Find out how much IO memory is required for the mode with
48013@@ -1534,8 +1561,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
48014 info->flags = FBINFO_FLAG_DEFAULT |
48015 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
48016
48017- if (!par->ypan)
48018- info->fbops->fb_pan_display = NULL;
48019+ if (!par->ypan) {
48020+ pax_open_kernel();
48021+ *(void **)&info->fbops->fb_pan_display = NULL;
48022+ pax_close_kernel();
48023+ }
48024 }
48025
48026 static void uvesafb_init_mtrr(struct fb_info *info)
48027@@ -1836,6 +1866,11 @@ out:
48028 if (par->vbe_modes)
48029 kfree(par->vbe_modes);
48030
48031+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
48032+ if (par->pmi_code)
48033+ module_free_exec(NULL, par->pmi_code);
48034+#endif
48035+
48036 framebuffer_release(info);
48037 return err;
48038 }
48039@@ -1862,6 +1897,12 @@ static int uvesafb_remove(struct platform_device *dev)
48040 kfree(par->vbe_state_orig);
48041 if (par->vbe_state_saved)
48042 kfree(par->vbe_state_saved);
48043+
48044+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
48045+ if (par->pmi_code)
48046+ module_free_exec(NULL, par->pmi_code);
48047+#endif
48048+
48049 }
48050
48051 framebuffer_release(info);
48052diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
48053index 501b340..d80aa17 100644
48054--- a/drivers/video/vesafb.c
48055+++ b/drivers/video/vesafb.c
48056@@ -9,6 +9,7 @@
48057 */
48058
48059 #include <linux/module.h>
48060+#include <linux/moduleloader.h>
48061 #include <linux/kernel.h>
48062 #include <linux/errno.h>
48063 #include <linux/string.h>
48064@@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
48065 static int vram_total __initdata; /* Set total amount of memory */
48066 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
48067 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
48068-static void (*pmi_start)(void) __read_mostly;
48069-static void (*pmi_pal) (void) __read_mostly;
48070+static void (*pmi_start)(void) __read_only;
48071+static void (*pmi_pal) (void) __read_only;
48072 static int depth __read_mostly;
48073 static int vga_compat __read_mostly;
48074 /* --------------------------------------------------------------------- */
48075@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
48076 unsigned int size_vmode;
48077 unsigned int size_remap;
48078 unsigned int size_total;
48079+ void *pmi_code = NULL;
48080
48081 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
48082 return -ENODEV;
48083@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
48084 size_remap = size_total;
48085 vesafb_fix.smem_len = size_remap;
48086
48087-#ifndef __i386__
48088- screen_info.vesapm_seg = 0;
48089-#endif
48090-
48091 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
48092 printk(KERN_WARNING
48093 "vesafb: cannot reserve video memory at 0x%lx\n",
48094@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
48095 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
48096 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
48097
48098+#ifdef __i386__
48099+
48100+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
48101+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
48102+ if (!pmi_code)
48103+#elif !defined(CONFIG_PAX_KERNEXEC)
48104+ if (0)
48105+#endif
48106+
48107+#endif
48108+ screen_info.vesapm_seg = 0;
48109+
48110 if (screen_info.vesapm_seg) {
48111- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
48112- screen_info.vesapm_seg,screen_info.vesapm_off);
48113+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
48114+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
48115 }
48116
48117 if (screen_info.vesapm_seg < 0xc000)
48118@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
48119
48120 if (ypan || pmi_setpal) {
48121 unsigned short *pmi_base;
48122+
48123 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
48124- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
48125- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
48126+
48127+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
48128+ pax_open_kernel();
48129+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
48130+#else
48131+ pmi_code = pmi_base;
48132+#endif
48133+
48134+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
48135+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
48136+
48137+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
48138+ pmi_start = ktva_ktla(pmi_start);
48139+ pmi_pal = ktva_ktla(pmi_pal);
48140+ pax_close_kernel();
48141+#endif
48142+
48143 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
48144 if (pmi_base[3]) {
48145 printk(KERN_INFO "vesafb: pmi: ports = ");
48146@@ -472,8 +498,11 @@ static int __init vesafb_probe(struct platform_device *dev)
48147 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
48148 (ypan ? FBINFO_HWACCEL_YPAN : 0);
48149
48150- if (!ypan)
48151- info->fbops->fb_pan_display = NULL;
48152+ if (!ypan) {
48153+ pax_open_kernel();
48154+ *(void **)&info->fbops->fb_pan_display = NULL;
48155+ pax_close_kernel();
48156+ }
48157
48158 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
48159 err = -ENOMEM;
48160@@ -488,6 +517,11 @@ static int __init vesafb_probe(struct platform_device *dev)
48161 info->node, info->fix.id);
48162 return 0;
48163 err:
48164+
48165+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
48166+ module_free_exec(NULL, pmi_code);
48167+#endif
48168+
48169 if (info->screen_base)
48170 iounmap(info->screen_base);
48171 framebuffer_release(info);
48172diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
48173index 88714ae..16c2e11 100644
48174--- a/drivers/video/via/via_clock.h
48175+++ b/drivers/video/via/via_clock.h
48176@@ -56,7 +56,7 @@ struct via_clock {
48177
48178 void (*set_engine_pll_state)(u8 state);
48179 void (*set_engine_pll)(struct via_pll_config config);
48180-};
48181+} __no_const;
48182
48183
48184 static inline u32 get_pll_internal_frequency(u32 ref_freq,
48185diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
48186index fef20db..d28b1ab 100644
48187--- a/drivers/xen/xenfs/xenstored.c
48188+++ b/drivers/xen/xenfs/xenstored.c
48189@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
48190 static int xsd_kva_open(struct inode *inode, struct file *file)
48191 {
48192 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
48193+#ifdef CONFIG_GRKERNSEC_HIDESYM
48194+ NULL);
48195+#else
48196 xen_store_interface);
48197+#endif
48198+
48199 if (!file->private_data)
48200 return -ENOMEM;
48201 return 0;
48202diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
48203index 890bed5..17ae73e 100644
48204--- a/fs/9p/vfs_inode.c
48205+++ b/fs/9p/vfs_inode.c
48206@@ -1329,7 +1329,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
48207 void
48208 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
48209 {
48210- char *s = nd_get_link(nd);
48211+ const char *s = nd_get_link(nd);
48212
48213 p9_debug(P9_DEBUG_VFS, " %s %s\n",
48214 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
48215diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
48216index 0efd152..b5802ad 100644
48217--- a/fs/Kconfig.binfmt
48218+++ b/fs/Kconfig.binfmt
48219@@ -89,7 +89,7 @@ config HAVE_AOUT
48220
48221 config BINFMT_AOUT
48222 tristate "Kernel support for a.out and ECOFF binaries"
48223- depends on HAVE_AOUT
48224+ depends on HAVE_AOUT && BROKEN
48225 ---help---
48226 A.out (Assembler.OUTput) is a set of formats for libraries and
48227 executables used in the earliest versions of UNIX. Linux used
48228diff --git a/fs/aio.c b/fs/aio.c
48229index 71f613c..9d01f1f 100644
48230--- a/fs/aio.c
48231+++ b/fs/aio.c
48232@@ -111,7 +111,7 @@ static int aio_setup_ring(struct kioctx *ctx)
48233 size += sizeof(struct io_event) * nr_events;
48234 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
48235
48236- if (nr_pages < 0)
48237+ if (nr_pages <= 0)
48238 return -EINVAL;
48239
48240 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
48241@@ -1373,18 +1373,19 @@ static ssize_t aio_fsync(struct kiocb *iocb)
48242 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
48243 {
48244 ssize_t ret;
48245+ struct iovec iovstack;
48246
48247 #ifdef CONFIG_COMPAT
48248 if (compat)
48249 ret = compat_rw_copy_check_uvector(type,
48250 (struct compat_iovec __user *)kiocb->ki_buf,
48251- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
48252+ kiocb->ki_nbytes, 1, &iovstack,
48253 &kiocb->ki_iovec);
48254 else
48255 #endif
48256 ret = rw_copy_check_uvector(type,
48257 (struct iovec __user *)kiocb->ki_buf,
48258- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
48259+ kiocb->ki_nbytes, 1, &iovstack,
48260 &kiocb->ki_iovec);
48261 if (ret < 0)
48262 goto out;
48263@@ -1393,6 +1394,10 @@ static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
48264 if (ret < 0)
48265 goto out;
48266
48267+ if (kiocb->ki_iovec == &iovstack) {
48268+ kiocb->ki_inline_vec = iovstack;
48269+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
48270+ }
48271 kiocb->ki_nr_segs = kiocb->ki_nbytes;
48272 kiocb->ki_cur_seg = 0;
48273 /* ki_nbytes/left now reflect bytes instead of segs */
48274diff --git a/fs/attr.c b/fs/attr.c
48275index 1449adb..a2038c2 100644
48276--- a/fs/attr.c
48277+++ b/fs/attr.c
48278@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
48279 unsigned long limit;
48280
48281 limit = rlimit(RLIMIT_FSIZE);
48282+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
48283 if (limit != RLIM_INFINITY && offset > limit)
48284 goto out_sig;
48285 if (offset > inode->i_sb->s_maxbytes)
48286diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
48287index 03bc1d3..6205356 100644
48288--- a/fs/autofs4/waitq.c
48289+++ b/fs/autofs4/waitq.c
48290@@ -61,7 +61,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
48291 {
48292 unsigned long sigpipe, flags;
48293 mm_segment_t fs;
48294- const char *data = (const char *)addr;
48295+ const char __user *data = (const char __force_user *)addr;
48296 ssize_t wr = 0;
48297
48298 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
48299@@ -348,6 +348,10 @@ static int validate_request(struct autofs_wait_queue **wait,
48300 return 1;
48301 }
48302
48303+#ifdef CONFIG_GRKERNSEC_HIDESYM
48304+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
48305+#endif
48306+
48307 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
48308 enum autofs_notify notify)
48309 {
48310@@ -381,7 +385,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
48311
48312 /* If this is a direct mount request create a dummy name */
48313 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
48314+#ifdef CONFIG_GRKERNSEC_HIDESYM
48315+ /* this name does get written to userland via autofs4_write() */
48316+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
48317+#else
48318 qstr.len = sprintf(name, "%p", dentry);
48319+#endif
48320 else {
48321 qstr.len = autofs4_getpath(sbi, dentry, &name);
48322 if (!qstr.len) {
48323diff --git a/fs/befs/endian.h b/fs/befs/endian.h
48324index 2722387..c8dd2a7 100644
48325--- a/fs/befs/endian.h
48326+++ b/fs/befs/endian.h
48327@@ -11,7 +11,7 @@
48328
48329 #include <asm/byteorder.h>
48330
48331-static inline u64
48332+static inline u64 __intentional_overflow(-1)
48333 fs64_to_cpu(const struct super_block *sb, fs64 n)
48334 {
48335 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
48336@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
48337 return (__force fs64)cpu_to_be64(n);
48338 }
48339
48340-static inline u32
48341+static inline u32 __intentional_overflow(-1)
48342 fs32_to_cpu(const struct super_block *sb, fs32 n)
48343 {
48344 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
48345diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
48346index 2b3bda8..6a2d4be 100644
48347--- a/fs/befs/linuxvfs.c
48348+++ b/fs/befs/linuxvfs.c
48349@@ -510,7 +510,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
48350 {
48351 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
48352 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
48353- char *link = nd_get_link(nd);
48354+ const char *link = nd_get_link(nd);
48355 if (!IS_ERR(link))
48356 kfree(link);
48357 }
48358diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
48359index 6043567..16a9239 100644
48360--- a/fs/binfmt_aout.c
48361+++ b/fs/binfmt_aout.c
48362@@ -16,6 +16,7 @@
48363 #include <linux/string.h>
48364 #include <linux/fs.h>
48365 #include <linux/file.h>
48366+#include <linux/security.h>
48367 #include <linux/stat.h>
48368 #include <linux/fcntl.h>
48369 #include <linux/ptrace.h>
48370@@ -59,6 +60,8 @@ static int aout_core_dump(struct coredump_params *cprm)
48371 #endif
48372 # define START_STACK(u) ((void __user *)u.start_stack)
48373
48374+ memset(&dump, 0, sizeof(dump));
48375+
48376 fs = get_fs();
48377 set_fs(KERNEL_DS);
48378 has_dumped = 1;
48379@@ -70,10 +73,12 @@ static int aout_core_dump(struct coredump_params *cprm)
48380
48381 /* If the size of the dump file exceeds the rlimit, then see what would happen
48382 if we wrote the stack, but not the data area. */
48383+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
48384 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
48385 dump.u_dsize = 0;
48386
48387 /* Make sure we have enough room to write the stack and data areas. */
48388+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
48389 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
48390 dump.u_ssize = 0;
48391
48392@@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
48393 rlim = rlimit(RLIMIT_DATA);
48394 if (rlim >= RLIM_INFINITY)
48395 rlim = ~0;
48396+
48397+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
48398 if (ex.a_data + ex.a_bss > rlim)
48399 return -ENOMEM;
48400
48401@@ -268,6 +275,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
48402
48403 install_exec_creds(bprm);
48404
48405+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
48406+ current->mm->pax_flags = 0UL;
48407+#endif
48408+
48409+#ifdef CONFIG_PAX_PAGEEXEC
48410+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
48411+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
48412+
48413+#ifdef CONFIG_PAX_EMUTRAMP
48414+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
48415+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
48416+#endif
48417+
48418+#ifdef CONFIG_PAX_MPROTECT
48419+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
48420+ current->mm->pax_flags |= MF_PAX_MPROTECT;
48421+#endif
48422+
48423+ }
48424+#endif
48425+
48426 if (N_MAGIC(ex) == OMAGIC) {
48427 unsigned long text_addr, map_size;
48428 loff_t pos;
48429@@ -333,7 +361,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
48430 }
48431
48432 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
48433- PROT_READ | PROT_WRITE | PROT_EXEC,
48434+ PROT_READ | PROT_WRITE,
48435 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
48436 fd_offset + ex.a_text);
48437 if (error != N_DATADDR(ex)) {
48438diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
48439index 0c42cdb..b62581e9 100644
48440--- a/fs/binfmt_elf.c
48441+++ b/fs/binfmt_elf.c
48442@@ -33,6 +33,7 @@
48443 #include <linux/elf.h>
48444 #include <linux/utsname.h>
48445 #include <linux/coredump.h>
48446+#include <linux/xattr.h>
48447 #include <asm/uaccess.h>
48448 #include <asm/param.h>
48449 #include <asm/page.h>
48450@@ -59,6 +60,10 @@ static int elf_core_dump(struct coredump_params *cprm);
48451 #define elf_core_dump NULL
48452 #endif
48453
48454+#ifdef CONFIG_PAX_MPROTECT
48455+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
48456+#endif
48457+
48458 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
48459 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
48460 #else
48461@@ -78,6 +83,11 @@ static struct linux_binfmt elf_format = {
48462 .load_binary = load_elf_binary,
48463 .load_shlib = load_elf_library,
48464 .core_dump = elf_core_dump,
48465+
48466+#ifdef CONFIG_PAX_MPROTECT
48467+ .handle_mprotect= elf_handle_mprotect,
48468+#endif
48469+
48470 .min_coredump = ELF_EXEC_PAGESIZE,
48471 };
48472
48473@@ -85,6 +95,8 @@ static struct linux_binfmt elf_format = {
48474
48475 static int set_brk(unsigned long start, unsigned long end)
48476 {
48477+ unsigned long e = end;
48478+
48479 start = ELF_PAGEALIGN(start);
48480 end = ELF_PAGEALIGN(end);
48481 if (end > start) {
48482@@ -93,7 +105,7 @@ static int set_brk(unsigned long start, unsigned long end)
48483 if (BAD_ADDR(addr))
48484 return addr;
48485 }
48486- current->mm->start_brk = current->mm->brk = end;
48487+ current->mm->start_brk = current->mm->brk = e;
48488 return 0;
48489 }
48490
48491@@ -154,12 +166,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
48492 elf_addr_t __user *u_rand_bytes;
48493 const char *k_platform = ELF_PLATFORM;
48494 const char *k_base_platform = ELF_BASE_PLATFORM;
48495- unsigned char k_rand_bytes[16];
48496+ u32 k_rand_bytes[4];
48497 int items;
48498 elf_addr_t *elf_info;
48499 int ei_index = 0;
48500 const struct cred *cred = current_cred();
48501 struct vm_area_struct *vma;
48502+ unsigned long saved_auxv[AT_VECTOR_SIZE];
48503
48504 /*
48505 * In some cases (e.g. Hyper-Threading), we want to avoid L1
48506@@ -201,8 +214,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
48507 * Generate 16 random bytes for userspace PRNG seeding.
48508 */
48509 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
48510- u_rand_bytes = (elf_addr_t __user *)
48511- STACK_ALLOC(p, sizeof(k_rand_bytes));
48512+ srandom32(k_rand_bytes[0] ^ random32());
48513+ srandom32(k_rand_bytes[1] ^ random32());
48514+ srandom32(k_rand_bytes[2] ^ random32());
48515+ srandom32(k_rand_bytes[3] ^ random32());
48516+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
48517+ u_rand_bytes = (elf_addr_t __user *) p;
48518 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
48519 return -EFAULT;
48520
48521@@ -314,9 +331,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
48522 return -EFAULT;
48523 current->mm->env_end = p;
48524
48525+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
48526+
48527 /* Put the elf_info on the stack in the right place. */
48528 sp = (elf_addr_t __user *)envp + 1;
48529- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
48530+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
48531 return -EFAULT;
48532 return 0;
48533 }
48534@@ -380,15 +399,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
48535 an ELF header */
48536
48537 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
48538- struct file *interpreter, unsigned long *interp_map_addr,
48539- unsigned long no_base)
48540+ struct file *interpreter, unsigned long no_base)
48541 {
48542 struct elf_phdr *elf_phdata;
48543 struct elf_phdr *eppnt;
48544- unsigned long load_addr = 0;
48545+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
48546 int load_addr_set = 0;
48547 unsigned long last_bss = 0, elf_bss = 0;
48548- unsigned long error = ~0UL;
48549+ unsigned long error = -EINVAL;
48550 unsigned long total_size;
48551 int retval, i, size;
48552
48553@@ -434,6 +452,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
48554 goto out_close;
48555 }
48556
48557+#ifdef CONFIG_PAX_SEGMEXEC
48558+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
48559+ pax_task_size = SEGMEXEC_TASK_SIZE;
48560+#endif
48561+
48562 eppnt = elf_phdata;
48563 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
48564 if (eppnt->p_type == PT_LOAD) {
48565@@ -457,8 +480,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
48566 map_addr = elf_map(interpreter, load_addr + vaddr,
48567 eppnt, elf_prot, elf_type, total_size);
48568 total_size = 0;
48569- if (!*interp_map_addr)
48570- *interp_map_addr = map_addr;
48571 error = map_addr;
48572 if (BAD_ADDR(map_addr))
48573 goto out_close;
48574@@ -477,8 +498,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
48575 k = load_addr + eppnt->p_vaddr;
48576 if (BAD_ADDR(k) ||
48577 eppnt->p_filesz > eppnt->p_memsz ||
48578- eppnt->p_memsz > TASK_SIZE ||
48579- TASK_SIZE - eppnt->p_memsz < k) {
48580+ eppnt->p_memsz > pax_task_size ||
48581+ pax_task_size - eppnt->p_memsz < k) {
48582 error = -ENOMEM;
48583 goto out_close;
48584 }
48585@@ -530,6 +551,315 @@ out:
48586 return error;
48587 }
48588
48589+#ifdef CONFIG_PAX_PT_PAX_FLAGS
48590+#ifdef CONFIG_PAX_SOFTMODE
48591+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
48592+{
48593+ unsigned long pax_flags = 0UL;
48594+
48595+#ifdef CONFIG_PAX_PAGEEXEC
48596+ if (elf_phdata->p_flags & PF_PAGEEXEC)
48597+ pax_flags |= MF_PAX_PAGEEXEC;
48598+#endif
48599+
48600+#ifdef CONFIG_PAX_SEGMEXEC
48601+ if (elf_phdata->p_flags & PF_SEGMEXEC)
48602+ pax_flags |= MF_PAX_SEGMEXEC;
48603+#endif
48604+
48605+#ifdef CONFIG_PAX_EMUTRAMP
48606+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
48607+ pax_flags |= MF_PAX_EMUTRAMP;
48608+#endif
48609+
48610+#ifdef CONFIG_PAX_MPROTECT
48611+ if (elf_phdata->p_flags & PF_MPROTECT)
48612+ pax_flags |= MF_PAX_MPROTECT;
48613+#endif
48614+
48615+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
48616+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
48617+ pax_flags |= MF_PAX_RANDMMAP;
48618+#endif
48619+
48620+ return pax_flags;
48621+}
48622+#endif
48623+
48624+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
48625+{
48626+ unsigned long pax_flags = 0UL;
48627+
48628+#ifdef CONFIG_PAX_PAGEEXEC
48629+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
48630+ pax_flags |= MF_PAX_PAGEEXEC;
48631+#endif
48632+
48633+#ifdef CONFIG_PAX_SEGMEXEC
48634+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
48635+ pax_flags |= MF_PAX_SEGMEXEC;
48636+#endif
48637+
48638+#ifdef CONFIG_PAX_EMUTRAMP
48639+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
48640+ pax_flags |= MF_PAX_EMUTRAMP;
48641+#endif
48642+
48643+#ifdef CONFIG_PAX_MPROTECT
48644+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
48645+ pax_flags |= MF_PAX_MPROTECT;
48646+#endif
48647+
48648+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
48649+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
48650+ pax_flags |= MF_PAX_RANDMMAP;
48651+#endif
48652+
48653+ return pax_flags;
48654+}
48655+#endif
48656+
48657+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
48658+#ifdef CONFIG_PAX_SOFTMODE
48659+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
48660+{
48661+ unsigned long pax_flags = 0UL;
48662+
48663+#ifdef CONFIG_PAX_PAGEEXEC
48664+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
48665+ pax_flags |= MF_PAX_PAGEEXEC;
48666+#endif
48667+
48668+#ifdef CONFIG_PAX_SEGMEXEC
48669+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
48670+ pax_flags |= MF_PAX_SEGMEXEC;
48671+#endif
48672+
48673+#ifdef CONFIG_PAX_EMUTRAMP
48674+ if (pax_flags_softmode & MF_PAX_EMUTRAMP)
48675+ pax_flags |= MF_PAX_EMUTRAMP;
48676+#endif
48677+
48678+#ifdef CONFIG_PAX_MPROTECT
48679+ if (pax_flags_softmode & MF_PAX_MPROTECT)
48680+ pax_flags |= MF_PAX_MPROTECT;
48681+#endif
48682+
48683+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
48684+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
48685+ pax_flags |= MF_PAX_RANDMMAP;
48686+#endif
48687+
48688+ return pax_flags;
48689+}
48690+#endif
48691+
48692+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
48693+{
48694+ unsigned long pax_flags = 0UL;
48695+
48696+#ifdef CONFIG_PAX_PAGEEXEC
48697+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
48698+ pax_flags |= MF_PAX_PAGEEXEC;
48699+#endif
48700+
48701+#ifdef CONFIG_PAX_SEGMEXEC
48702+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
48703+ pax_flags |= MF_PAX_SEGMEXEC;
48704+#endif
48705+
48706+#ifdef CONFIG_PAX_EMUTRAMP
48707+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
48708+ pax_flags |= MF_PAX_EMUTRAMP;
48709+#endif
48710+
48711+#ifdef CONFIG_PAX_MPROTECT
48712+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
48713+ pax_flags |= MF_PAX_MPROTECT;
48714+#endif
48715+
48716+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
48717+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
48718+ pax_flags |= MF_PAX_RANDMMAP;
48719+#endif
48720+
48721+ return pax_flags;
48722+}
48723+#endif
48724+
48725+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
48726+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
48727+{
48728+ unsigned long pax_flags = 0UL;
48729+
48730+#ifdef CONFIG_PAX_EI_PAX
48731+
48732+#ifdef CONFIG_PAX_PAGEEXEC
48733+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
48734+ pax_flags |= MF_PAX_PAGEEXEC;
48735+#endif
48736+
48737+#ifdef CONFIG_PAX_SEGMEXEC
48738+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
48739+ pax_flags |= MF_PAX_SEGMEXEC;
48740+#endif
48741+
48742+#ifdef CONFIG_PAX_EMUTRAMP
48743+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
48744+ pax_flags |= MF_PAX_EMUTRAMP;
48745+#endif
48746+
48747+#ifdef CONFIG_PAX_MPROTECT
48748+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
48749+ pax_flags |= MF_PAX_MPROTECT;
48750+#endif
48751+
48752+#ifdef CONFIG_PAX_ASLR
48753+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
48754+ pax_flags |= MF_PAX_RANDMMAP;
48755+#endif
48756+
48757+#else
48758+
48759+#ifdef CONFIG_PAX_PAGEEXEC
48760+ pax_flags |= MF_PAX_PAGEEXEC;
48761+#endif
48762+
48763+#ifdef CONFIG_PAX_SEGMEXEC
48764+ pax_flags |= MF_PAX_SEGMEXEC;
48765+#endif
48766+
48767+#ifdef CONFIG_PAX_MPROTECT
48768+ pax_flags |= MF_PAX_MPROTECT;
48769+#endif
48770+
48771+#ifdef CONFIG_PAX_RANDMMAP
48772+ if (randomize_va_space)
48773+ pax_flags |= MF_PAX_RANDMMAP;
48774+#endif
48775+
48776+#endif
48777+
48778+ return pax_flags;
48779+}
48780+
48781+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
48782+{
48783+
48784+#ifdef CONFIG_PAX_PT_PAX_FLAGS
48785+ unsigned long i;
48786+
48787+ for (i = 0UL; i < elf_ex->e_phnum; i++)
48788+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
48789+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
48790+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
48791+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
48792+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
48793+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
48794+ return ~0UL;
48795+
48796+#ifdef CONFIG_PAX_SOFTMODE
48797+ if (pax_softmode)
48798+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
48799+ else
48800+#endif
48801+
48802+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
48803+ break;
48804+ }
48805+#endif
48806+
48807+ return ~0UL;
48808+}
48809+
48810+static unsigned long pax_parse_xattr_pax(struct file * const file)
48811+{
48812+
48813+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
48814+ ssize_t xattr_size, i;
48815+ unsigned char xattr_value[5];
48816+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
48817+
48818+ xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
48819+ if (xattr_size <= 0 || xattr_size > 5)
48820+ return ~0UL;
48821+
48822+ for (i = 0; i < xattr_size; i++)
48823+ switch (xattr_value[i]) {
48824+ default:
48825+ return ~0UL;
48826+
48827+#define parse_flag(option1, option2, flag) \
48828+ case option1: \
48829+ if (pax_flags_hardmode & MF_PAX_##flag) \
48830+ return ~0UL; \
48831+ pax_flags_hardmode |= MF_PAX_##flag; \
48832+ break; \
48833+ case option2: \
48834+ if (pax_flags_softmode & MF_PAX_##flag) \
48835+ return ~0UL; \
48836+ pax_flags_softmode |= MF_PAX_##flag; \
48837+ break;
48838+
48839+ parse_flag('p', 'P', PAGEEXEC);
48840+ parse_flag('e', 'E', EMUTRAMP);
48841+ parse_flag('m', 'M', MPROTECT);
48842+ parse_flag('r', 'R', RANDMMAP);
48843+ parse_flag('s', 'S', SEGMEXEC);
48844+
48845+#undef parse_flag
48846+ }
48847+
48848+ if (pax_flags_hardmode & pax_flags_softmode)
48849+ return ~0UL;
48850+
48851+#ifdef CONFIG_PAX_SOFTMODE
48852+ if (pax_softmode)
48853+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
48854+ else
48855+#endif
48856+
48857+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
48858+#else
48859+ return ~0UL;
48860+#endif
48861+
48862+}
48863+
48864+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
48865+{
48866+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
48867+
48868+ pax_flags = pax_parse_ei_pax(elf_ex);
48869+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
48870+ xattr_pax_flags = pax_parse_xattr_pax(file);
48871+
48872+ if (pt_pax_flags == ~0UL)
48873+ pt_pax_flags = xattr_pax_flags;
48874+ else if (xattr_pax_flags == ~0UL)
48875+ xattr_pax_flags = pt_pax_flags;
48876+ if (pt_pax_flags != xattr_pax_flags)
48877+ return -EINVAL;
48878+ if (pt_pax_flags != ~0UL)
48879+ pax_flags = pt_pax_flags;
48880+
48881+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
48882+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
48883+ if ((__supported_pte_mask & _PAGE_NX))
48884+ pax_flags &= ~MF_PAX_SEGMEXEC;
48885+ else
48886+ pax_flags &= ~MF_PAX_PAGEEXEC;
48887+ }
48888+#endif
48889+
48890+ if (0 > pax_check_flags(&pax_flags))
48891+ return -EINVAL;
48892+
48893+ current->mm->pax_flags = pax_flags;
48894+ return 0;
48895+}
48896+#endif
48897+
48898 /*
48899 * These are the functions used to load ELF style executables and shared
48900 * libraries. There is no binary dependent code anywhere else.
48901@@ -546,6 +876,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
48902 {
48903 unsigned int random_variable = 0;
48904
48905+#ifdef CONFIG_PAX_RANDUSTACK
48906+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
48907+ return stack_top - current->mm->delta_stack;
48908+#endif
48909+
48910 if ((current->flags & PF_RANDOMIZE) &&
48911 !(current->personality & ADDR_NO_RANDOMIZE)) {
48912 random_variable = get_random_int() & STACK_RND_MASK;
48913@@ -564,7 +899,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
48914 unsigned long load_addr = 0, load_bias = 0;
48915 int load_addr_set = 0;
48916 char * elf_interpreter = NULL;
48917- unsigned long error;
48918+ unsigned long error = 0;
48919 struct elf_phdr *elf_ppnt, *elf_phdata;
48920 unsigned long elf_bss, elf_brk;
48921 int retval, i;
48922@@ -574,12 +909,12 @@ static int load_elf_binary(struct linux_binprm *bprm)
48923 unsigned long start_code, end_code, start_data, end_data;
48924 unsigned long reloc_func_desc __maybe_unused = 0;
48925 int executable_stack = EXSTACK_DEFAULT;
48926- unsigned long def_flags = 0;
48927 struct pt_regs *regs = current_pt_regs();
48928 struct {
48929 struct elfhdr elf_ex;
48930 struct elfhdr interp_elf_ex;
48931 } *loc;
48932+ unsigned long pax_task_size = TASK_SIZE;
48933
48934 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
48935 if (!loc) {
48936@@ -715,11 +1050,81 @@ static int load_elf_binary(struct linux_binprm *bprm)
48937 goto out_free_dentry;
48938
48939 /* OK, This is the point of no return */
48940- current->mm->def_flags = def_flags;
48941+
48942+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
48943+ current->mm->pax_flags = 0UL;
48944+#endif
48945+
48946+#ifdef CONFIG_PAX_DLRESOLVE
48947+ current->mm->call_dl_resolve = 0UL;
48948+#endif
48949+
48950+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
48951+ current->mm->call_syscall = 0UL;
48952+#endif
48953+
48954+#ifdef CONFIG_PAX_ASLR
48955+ current->mm->delta_mmap = 0UL;
48956+ current->mm->delta_stack = 0UL;
48957+#endif
48958+
48959+ current->mm->def_flags = 0;
48960+
48961+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
48962+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
48963+ send_sig(SIGKILL, current, 0);
48964+ goto out_free_dentry;
48965+ }
48966+#endif
48967+
48968+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
48969+ pax_set_initial_flags(bprm);
48970+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
48971+ if (pax_set_initial_flags_func)
48972+ (pax_set_initial_flags_func)(bprm);
48973+#endif
48974+
48975+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
48976+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
48977+ current->mm->context.user_cs_limit = PAGE_SIZE;
48978+ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
48979+ }
48980+#endif
48981+
48982+#ifdef CONFIG_PAX_SEGMEXEC
48983+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
48984+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
48985+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
48986+ pax_task_size = SEGMEXEC_TASK_SIZE;
48987+ current->mm->def_flags |= VM_NOHUGEPAGE;
48988+ }
48989+#endif
48990+
48991+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
48992+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
48993+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
48994+ put_cpu();
48995+ }
48996+#endif
48997
48998 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
48999 may depend on the personality. */
49000 SET_PERSONALITY(loc->elf_ex);
49001+
49002+#ifdef CONFIG_PAX_ASLR
49003+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
49004+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
49005+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
49006+ }
49007+#endif
49008+
49009+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
49010+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
49011+ executable_stack = EXSTACK_DISABLE_X;
49012+ current->personality &= ~READ_IMPLIES_EXEC;
49013+ } else
49014+#endif
49015+
49016 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
49017 current->personality |= READ_IMPLIES_EXEC;
49018
49019@@ -810,6 +1215,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
49020 #else
49021 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
49022 #endif
49023+
49024+#ifdef CONFIG_PAX_RANDMMAP
49025+ /* PaX: randomize base address at the default exe base if requested */
49026+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
49027+#ifdef CONFIG_SPARC64
49028+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
49029+#else
49030+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
49031+#endif
49032+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
49033+ elf_flags |= MAP_FIXED;
49034+ }
49035+#endif
49036+
49037 }
49038
49039 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
49040@@ -842,9 +1261,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
49041 * allowed task size. Note that p_filesz must always be
49042 * <= p_memsz so it is only necessary to check p_memsz.
49043 */
49044- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
49045- elf_ppnt->p_memsz > TASK_SIZE ||
49046- TASK_SIZE - elf_ppnt->p_memsz < k) {
49047+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
49048+ elf_ppnt->p_memsz > pax_task_size ||
49049+ pax_task_size - elf_ppnt->p_memsz < k) {
49050 /* set_brk can never work. Avoid overflows. */
49051 send_sig(SIGKILL, current, 0);
49052 retval = -EINVAL;
49053@@ -883,17 +1302,44 @@ static int load_elf_binary(struct linux_binprm *bprm)
49054 goto out_free_dentry;
49055 }
49056 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
49057- send_sig(SIGSEGV, current, 0);
49058- retval = -EFAULT; /* Nobody gets to see this, but.. */
49059- goto out_free_dentry;
49060+ /*
49061+ * This bss-zeroing can fail if the ELF
49062+ * file specifies odd protections. So
49063+ * we don't check the return value
49064+ */
49065 }
49066
49067+#ifdef CONFIG_PAX_RANDMMAP
49068+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
49069+ unsigned long start, size, flags, vm_flags;
49070+
49071+ start = ELF_PAGEALIGN(elf_brk);
49072+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
49073+ flags = MAP_FIXED | MAP_PRIVATE;
49074+ vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
49075+
49076+ down_write(&current->mm->mmap_sem);
49077+ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
49078+ retval = -ENOMEM;
49079+ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
49080+// if (current->personality & ADDR_NO_RANDOMIZE)
49081+// vm_flags |= VM_READ | VM_MAYREAD;
49082+ start = mmap_region(NULL, start, PAGE_ALIGN(size), flags, vm_flags, 0);
49083+ retval = IS_ERR_VALUE(start) ? start : 0;
49084+ }
49085+ up_write(&current->mm->mmap_sem);
49086+ if (retval == 0)
49087+ retval = set_brk(start + size, start + size + PAGE_SIZE);
49088+ if (retval < 0) {
49089+ send_sig(SIGKILL, current, 0);
49090+ goto out_free_dentry;
49091+ }
49092+ }
49093+#endif
49094+
49095 if (elf_interpreter) {
49096- unsigned long interp_map_addr = 0;
49097-
49098 elf_entry = load_elf_interp(&loc->interp_elf_ex,
49099 interpreter,
49100- &interp_map_addr,
49101 load_bias);
49102 if (!IS_ERR((void *)elf_entry)) {
49103 /*
49104@@ -1115,7 +1561,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
49105 * Decide what to dump of a segment, part, all or none.
49106 */
49107 static unsigned long vma_dump_size(struct vm_area_struct *vma,
49108- unsigned long mm_flags)
49109+ unsigned long mm_flags, long signr)
49110 {
49111 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
49112
49113@@ -1152,7 +1598,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
49114 if (vma->vm_file == NULL)
49115 return 0;
49116
49117- if (FILTER(MAPPED_PRIVATE))
49118+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
49119 goto whole;
49120
49121 /*
49122@@ -1374,9 +1820,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
49123 {
49124 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
49125 int i = 0;
49126- do
49127+ do {
49128 i += 2;
49129- while (auxv[i - 2] != AT_NULL);
49130+ } while (auxv[i - 2] != AT_NULL);
49131 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
49132 }
49133
49134@@ -2006,14 +2452,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
49135 }
49136
49137 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
49138- unsigned long mm_flags)
49139+ struct coredump_params *cprm)
49140 {
49141 struct vm_area_struct *vma;
49142 size_t size = 0;
49143
49144 for (vma = first_vma(current, gate_vma); vma != NULL;
49145 vma = next_vma(vma, gate_vma))
49146- size += vma_dump_size(vma, mm_flags);
49147+ size += vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
49148 return size;
49149 }
49150
49151@@ -2107,7 +2553,7 @@ static int elf_core_dump(struct coredump_params *cprm)
49152
49153 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
49154
49155- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
49156+ offset += elf_core_vma_data_size(gate_vma, cprm);
49157 offset += elf_core_extra_data_size();
49158 e_shoff = offset;
49159
49160@@ -2121,10 +2567,12 @@ static int elf_core_dump(struct coredump_params *cprm)
49161 offset = dataoff;
49162
49163 size += sizeof(*elf);
49164+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
49165 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
49166 goto end_coredump;
49167
49168 size += sizeof(*phdr4note);
49169+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
49170 if (size > cprm->limit
49171 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
49172 goto end_coredump;
49173@@ -2138,7 +2586,7 @@ static int elf_core_dump(struct coredump_params *cprm)
49174 phdr.p_offset = offset;
49175 phdr.p_vaddr = vma->vm_start;
49176 phdr.p_paddr = 0;
49177- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
49178+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
49179 phdr.p_memsz = vma->vm_end - vma->vm_start;
49180 offset += phdr.p_filesz;
49181 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
49182@@ -2149,6 +2597,7 @@ static int elf_core_dump(struct coredump_params *cprm)
49183 phdr.p_align = ELF_EXEC_PAGESIZE;
49184
49185 size += sizeof(phdr);
49186+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
49187 if (size > cprm->limit
49188 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
49189 goto end_coredump;
49190@@ -2173,7 +2622,7 @@ static int elf_core_dump(struct coredump_params *cprm)
49191 unsigned long addr;
49192 unsigned long end;
49193
49194- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
49195+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
49196
49197 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
49198 struct page *page;
49199@@ -2182,6 +2631,7 @@ static int elf_core_dump(struct coredump_params *cprm)
49200 page = get_dump_page(addr);
49201 if (page) {
49202 void *kaddr = kmap(page);
49203+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
49204 stop = ((size += PAGE_SIZE) > cprm->limit) ||
49205 !dump_write(cprm->file, kaddr,
49206 PAGE_SIZE);
49207@@ -2199,6 +2649,7 @@ static int elf_core_dump(struct coredump_params *cprm)
49208
49209 if (e_phnum == PN_XNUM) {
49210 size += sizeof(*shdr4extnum);
49211+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
49212 if (size > cprm->limit
49213 || !dump_write(cprm->file, shdr4extnum,
49214 sizeof(*shdr4extnum)))
49215@@ -2219,6 +2670,97 @@ out:
49216
49217 #endif /* CONFIG_ELF_CORE */
49218
49219+#ifdef CONFIG_PAX_MPROTECT
49220+/* PaX: non-PIC ELF libraries need relocations on their executable segments
49221+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
49222+ * we'll remove VM_MAYWRITE for good on RELRO segments.
49223+ *
49224+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
49225+ * basis because we want to allow the common case and not the special ones.
49226+ */
49227+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
49228+{
49229+ struct elfhdr elf_h;
49230+ struct elf_phdr elf_p;
49231+ unsigned long i;
49232+ unsigned long oldflags;
49233+ bool is_textrel_rw, is_textrel_rx, is_relro;
49234+
49235+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
49236+ return;
49237+
49238+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
49239+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
49240+
49241+#ifdef CONFIG_PAX_ELFRELOCS
49242+ /* possible TEXTREL */
49243+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
49244+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
49245+#else
49246+ is_textrel_rw = false;
49247+ is_textrel_rx = false;
49248+#endif
49249+
49250+ /* possible RELRO */
49251+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
49252+
49253+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
49254+ return;
49255+
49256+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
49257+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
49258+
49259+#ifdef CONFIG_PAX_ETEXECRELOCS
49260+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
49261+#else
49262+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
49263+#endif
49264+
49265+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
49266+ !elf_check_arch(&elf_h) ||
49267+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
49268+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
49269+ return;
49270+
49271+ for (i = 0UL; i < elf_h.e_phnum; i++) {
49272+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
49273+ return;
49274+ switch (elf_p.p_type) {
49275+ case PT_DYNAMIC:
49276+ if (!is_textrel_rw && !is_textrel_rx)
49277+ continue;
49278+ i = 0UL;
49279+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
49280+ elf_dyn dyn;
49281+
49282+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
49283+ return;
49284+ if (dyn.d_tag == DT_NULL)
49285+ return;
49286+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
49287+ gr_log_textrel(vma);
49288+ if (is_textrel_rw)
49289+ vma->vm_flags |= VM_MAYWRITE;
49290+ else
49291+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
49292+ vma->vm_flags &= ~VM_MAYWRITE;
49293+ return;
49294+ }
49295+ i++;
49296+ }
49297+ return;
49298+
49299+ case PT_GNU_RELRO:
49300+ if (!is_relro)
49301+ continue;
49302+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
49303+ vma->vm_flags &= ~VM_MAYWRITE;
49304+ return;
49305+ }
49306+ }
49307+}
49308+#endif
49309+
49310 static int __init init_elf_binfmt(void)
49311 {
49312 register_binfmt(&elf_format);
49313diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
49314index b563719..3868998 100644
49315--- a/fs/binfmt_flat.c
49316+++ b/fs/binfmt_flat.c
49317@@ -562,7 +562,9 @@ static int load_flat_file(struct linux_binprm * bprm,
49318 realdatastart = (unsigned long) -ENOMEM;
49319 printk("Unable to allocate RAM for process data, errno %d\n",
49320 (int)-realdatastart);
49321+ down_write(&current->mm->mmap_sem);
49322 vm_munmap(textpos, text_len);
49323+ up_write(&current->mm->mmap_sem);
49324 ret = realdatastart;
49325 goto err;
49326 }
49327@@ -586,8 +588,10 @@ static int load_flat_file(struct linux_binprm * bprm,
49328 }
49329 if (IS_ERR_VALUE(result)) {
49330 printk("Unable to read data+bss, errno %d\n", (int)-result);
49331+ down_write(&current->mm->mmap_sem);
49332 vm_munmap(textpos, text_len);
49333 vm_munmap(realdatastart, len);
49334+ up_write(&current->mm->mmap_sem);
49335 ret = result;
49336 goto err;
49337 }
49338@@ -654,8 +658,10 @@ static int load_flat_file(struct linux_binprm * bprm,
49339 }
49340 if (IS_ERR_VALUE(result)) {
49341 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
49342+ down_write(&current->mm->mmap_sem);
49343 vm_munmap(textpos, text_len + data_len + extra +
49344 MAX_SHARED_LIBS * sizeof(unsigned long));
49345+ up_write(&current->mm->mmap_sem);
49346 ret = result;
49347 goto err;
49348 }
49349diff --git a/fs/bio.c b/fs/bio.c
49350index b96fc6c..431d628 100644
49351--- a/fs/bio.c
49352+++ b/fs/bio.c
49353@@ -818,7 +818,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
49354 /*
49355 * Overflow, abort
49356 */
49357- if (end < start)
49358+ if (end < start || end - start > INT_MAX - nr_pages)
49359 return ERR_PTR(-EINVAL);
49360
49361 nr_pages += end - start;
49362@@ -952,7 +952,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
49363 /*
49364 * Overflow, abort
49365 */
49366- if (end < start)
49367+ if (end < start || end - start > INT_MAX - nr_pages)
49368 return ERR_PTR(-EINVAL);
49369
49370 nr_pages += end - start;
49371@@ -1214,7 +1214,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
49372 const int read = bio_data_dir(bio) == READ;
49373 struct bio_map_data *bmd = bio->bi_private;
49374 int i;
49375- char *p = bmd->sgvecs[0].iov_base;
49376+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
49377
49378 __bio_for_each_segment(bvec, bio, i, 0) {
49379 char *addr = page_address(bvec->bv_page);
49380diff --git a/fs/block_dev.c b/fs/block_dev.c
49381index 883dc49..f27794a 100644
49382--- a/fs/block_dev.c
49383+++ b/fs/block_dev.c
49384@@ -652,7 +652,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
49385 else if (bdev->bd_contains == bdev)
49386 return true; /* is a whole device which isn't held */
49387
49388- else if (whole->bd_holder == bd_may_claim)
49389+ else if (whole->bd_holder == (void *)bd_may_claim)
49390 return true; /* is a partition of a device that is being partitioned */
49391 else if (whole->bd_holder != NULL)
49392 return false; /* is a partition of a held device */
49393diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
49394index ce1c169..1ef484f 100644
49395--- a/fs/btrfs/ctree.c
49396+++ b/fs/btrfs/ctree.c
49397@@ -1036,9 +1036,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
49398 free_extent_buffer(buf);
49399 add_root_to_dirty_list(root);
49400 } else {
49401- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
49402- parent_start = parent->start;
49403- else
49404+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
49405+ if (parent)
49406+ parent_start = parent->start;
49407+ else
49408+ parent_start = 0;
49409+ } else
49410 parent_start = 0;
49411
49412 WARN_ON(trans->transid != btrfs_header_generation(parent));
49413diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
49414index 7c4e6cc..27bd5c2 100644
49415--- a/fs/btrfs/inode.c
49416+++ b/fs/btrfs/inode.c
49417@@ -7314,7 +7314,7 @@ fail:
49418 return -ENOMEM;
49419 }
49420
49421-static int btrfs_getattr(struct vfsmount *mnt,
49422+int btrfs_getattr(struct vfsmount *mnt,
49423 struct dentry *dentry, struct kstat *stat)
49424 {
49425 struct inode *inode = dentry->d_inode;
49426@@ -7328,6 +7328,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
49427 return 0;
49428 }
49429
49430+EXPORT_SYMBOL(btrfs_getattr);
49431+
49432+dev_t get_btrfs_dev_from_inode(struct inode *inode)
49433+{
49434+ return BTRFS_I(inode)->root->anon_dev;
49435+}
49436+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
49437+
49438 /*
49439 * If a file is moved, it will inherit the cow and compression flags of the new
49440 * directory.
49441diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
49442index 338f259..b657640 100644
49443--- a/fs/btrfs/ioctl.c
49444+++ b/fs/btrfs/ioctl.c
49445@@ -3033,9 +3033,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
49446 for (i = 0; i < num_types; i++) {
49447 struct btrfs_space_info *tmp;
49448
49449+ /* Don't copy in more than we allocated */
49450 if (!slot_count)
49451 break;
49452
49453+ slot_count--;
49454+
49455 info = NULL;
49456 rcu_read_lock();
49457 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
49458@@ -3057,10 +3060,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
49459 memcpy(dest, &space, sizeof(space));
49460 dest++;
49461 space_args.total_spaces++;
49462- slot_count--;
49463 }
49464- if (!slot_count)
49465- break;
49466 }
49467 up_read(&info->groups_sem);
49468 }
49469diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
49470index 300e09a..9fe4539 100644
49471--- a/fs/btrfs/relocation.c
49472+++ b/fs/btrfs/relocation.c
49473@@ -1269,7 +1269,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
49474 }
49475 spin_unlock(&rc->reloc_root_tree.lock);
49476
49477- BUG_ON((struct btrfs_root *)node->data != root);
49478+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
49479
49480 if (!del) {
49481 spin_lock(&rc->reloc_root_tree.lock);
49482diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
49483index d8982e9..29a85fa 100644
49484--- a/fs/btrfs/super.c
49485+++ b/fs/btrfs/super.c
49486@@ -267,7 +267,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
49487 function, line, errstr);
49488 return;
49489 }
49490- ACCESS_ONCE(trans->transaction->aborted) = errno;
49491+ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
49492 __btrfs_std_error(root->fs_info, function, line, errno, NULL);
49493 }
49494 /*
49495diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
49496index 622f469..e8d2d55 100644
49497--- a/fs/cachefiles/bind.c
49498+++ b/fs/cachefiles/bind.c
49499@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
49500 args);
49501
49502 /* start by checking things over */
49503- ASSERT(cache->fstop_percent >= 0 &&
49504- cache->fstop_percent < cache->fcull_percent &&
49505+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
49506 cache->fcull_percent < cache->frun_percent &&
49507 cache->frun_percent < 100);
49508
49509- ASSERT(cache->bstop_percent >= 0 &&
49510- cache->bstop_percent < cache->bcull_percent &&
49511+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
49512 cache->bcull_percent < cache->brun_percent &&
49513 cache->brun_percent < 100);
49514
49515diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
49516index 0a1467b..6a53245 100644
49517--- a/fs/cachefiles/daemon.c
49518+++ b/fs/cachefiles/daemon.c
49519@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
49520 if (n > buflen)
49521 return -EMSGSIZE;
49522
49523- if (copy_to_user(_buffer, buffer, n) != 0)
49524+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
49525 return -EFAULT;
49526
49527 return n;
49528@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
49529 if (test_bit(CACHEFILES_DEAD, &cache->flags))
49530 return -EIO;
49531
49532- if (datalen < 0 || datalen > PAGE_SIZE - 1)
49533+ if (datalen > PAGE_SIZE - 1)
49534 return -EOPNOTSUPP;
49535
49536 /* drag the command string into the kernel so we can parse it */
49537@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
49538 if (args[0] != '%' || args[1] != '\0')
49539 return -EINVAL;
49540
49541- if (fstop < 0 || fstop >= cache->fcull_percent)
49542+ if (fstop >= cache->fcull_percent)
49543 return cachefiles_daemon_range_error(cache, args);
49544
49545 cache->fstop_percent = fstop;
49546@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
49547 if (args[0] != '%' || args[1] != '\0')
49548 return -EINVAL;
49549
49550- if (bstop < 0 || bstop >= cache->bcull_percent)
49551+ if (bstop >= cache->bcull_percent)
49552 return cachefiles_daemon_range_error(cache, args);
49553
49554 cache->bstop_percent = bstop;
49555diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
49556index 4938251..7e01445 100644
49557--- a/fs/cachefiles/internal.h
49558+++ b/fs/cachefiles/internal.h
49559@@ -59,7 +59,7 @@ struct cachefiles_cache {
49560 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
49561 struct rb_root active_nodes; /* active nodes (can't be culled) */
49562 rwlock_t active_lock; /* lock for active_nodes */
49563- atomic_t gravecounter; /* graveyard uniquifier */
49564+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
49565 unsigned frun_percent; /* when to stop culling (% files) */
49566 unsigned fcull_percent; /* when to start culling (% files) */
49567 unsigned fstop_percent; /* when to stop allocating (% files) */
49568@@ -171,19 +171,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
49569 * proc.c
49570 */
49571 #ifdef CONFIG_CACHEFILES_HISTOGRAM
49572-extern atomic_t cachefiles_lookup_histogram[HZ];
49573-extern atomic_t cachefiles_mkdir_histogram[HZ];
49574-extern atomic_t cachefiles_create_histogram[HZ];
49575+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
49576+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
49577+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
49578
49579 extern int __init cachefiles_proc_init(void);
49580 extern void cachefiles_proc_cleanup(void);
49581 static inline
49582-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
49583+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
49584 {
49585 unsigned long jif = jiffies - start_jif;
49586 if (jif >= HZ)
49587 jif = HZ - 1;
49588- atomic_inc(&histogram[jif]);
49589+ atomic_inc_unchecked(&histogram[jif]);
49590 }
49591
49592 #else
49593diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
49594index 8c01c5fc..15f982e 100644
49595--- a/fs/cachefiles/namei.c
49596+++ b/fs/cachefiles/namei.c
49597@@ -317,7 +317,7 @@ try_again:
49598 /* first step is to make up a grave dentry in the graveyard */
49599 sprintf(nbuffer, "%08x%08x",
49600 (uint32_t) get_seconds(),
49601- (uint32_t) atomic_inc_return(&cache->gravecounter));
49602+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
49603
49604 /* do the multiway lock magic */
49605 trap = lock_rename(cache->graveyard, dir);
49606diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
49607index eccd339..4c1d995 100644
49608--- a/fs/cachefiles/proc.c
49609+++ b/fs/cachefiles/proc.c
49610@@ -14,9 +14,9 @@
49611 #include <linux/seq_file.h>
49612 #include "internal.h"
49613
49614-atomic_t cachefiles_lookup_histogram[HZ];
49615-atomic_t cachefiles_mkdir_histogram[HZ];
49616-atomic_t cachefiles_create_histogram[HZ];
49617+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
49618+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
49619+atomic_unchecked_t cachefiles_create_histogram[HZ];
49620
49621 /*
49622 * display the latency histogram
49623@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
49624 return 0;
49625 default:
49626 index = (unsigned long) v - 3;
49627- x = atomic_read(&cachefiles_lookup_histogram[index]);
49628- y = atomic_read(&cachefiles_mkdir_histogram[index]);
49629- z = atomic_read(&cachefiles_create_histogram[index]);
49630+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
49631+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
49632+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
49633 if (x == 0 && y == 0 && z == 0)
49634 return 0;
49635
49636diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
49637index 4809922..aab2c39 100644
49638--- a/fs/cachefiles/rdwr.c
49639+++ b/fs/cachefiles/rdwr.c
49640@@ -965,7 +965,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
49641 old_fs = get_fs();
49642 set_fs(KERNEL_DS);
49643 ret = file->f_op->write(
49644- file, (const void __user *) data, len, &pos);
49645+ file, (const void __force_user *) data, len, &pos);
49646 set_fs(old_fs);
49647 kunmap(page);
49648 if (ret != len)
49649diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
49650index 8c1aabe..bbf856a 100644
49651--- a/fs/ceph/dir.c
49652+++ b/fs/ceph/dir.c
49653@@ -243,7 +243,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
49654 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
49655 struct ceph_mds_client *mdsc = fsc->mdsc;
49656 unsigned frag = fpos_frag(filp->f_pos);
49657- int off = fpos_off(filp->f_pos);
49658+ unsigned int off = fpos_off(filp->f_pos);
49659 int err;
49660 u32 ftype;
49661 struct ceph_mds_reply_info_parsed *rinfo;
49662diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
49663index d9ea6ed..1e6c8ac 100644
49664--- a/fs/cifs/cifs_debug.c
49665+++ b/fs/cifs/cifs_debug.c
49666@@ -267,8 +267,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
49667
49668 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
49669 #ifdef CONFIG_CIFS_STATS2
49670- atomic_set(&totBufAllocCount, 0);
49671- atomic_set(&totSmBufAllocCount, 0);
49672+ atomic_set_unchecked(&totBufAllocCount, 0);
49673+ atomic_set_unchecked(&totSmBufAllocCount, 0);
49674 #endif /* CONFIG_CIFS_STATS2 */
49675 spin_lock(&cifs_tcp_ses_lock);
49676 list_for_each(tmp1, &cifs_tcp_ses_list) {
49677@@ -281,7 +281,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
49678 tcon = list_entry(tmp3,
49679 struct cifs_tcon,
49680 tcon_list);
49681- atomic_set(&tcon->num_smbs_sent, 0);
49682+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
49683 if (server->ops->clear_stats)
49684 server->ops->clear_stats(tcon);
49685 }
49686@@ -313,8 +313,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
49687 smBufAllocCount.counter, cifs_min_small);
49688 #ifdef CONFIG_CIFS_STATS2
49689 seq_printf(m, "Total Large %d Small %d Allocations\n",
49690- atomic_read(&totBufAllocCount),
49691- atomic_read(&totSmBufAllocCount));
49692+ atomic_read_unchecked(&totBufAllocCount),
49693+ atomic_read_unchecked(&totSmBufAllocCount));
49694 #endif /* CONFIG_CIFS_STATS2 */
49695
49696 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
49697@@ -343,7 +343,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
49698 if (tcon->need_reconnect)
49699 seq_puts(m, "\tDISCONNECTED ");
49700 seq_printf(m, "\nSMBs: %d",
49701- atomic_read(&tcon->num_smbs_sent));
49702+ atomic_read_unchecked(&tcon->num_smbs_sent));
49703 if (server->ops->print_stats)
49704 server->ops->print_stats(m, tcon);
49705 }
49706diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
49707index b9db388..9a73d6d 100644
49708--- a/fs/cifs/cifsfs.c
49709+++ b/fs/cifs/cifsfs.c
49710@@ -1026,7 +1026,7 @@ cifs_init_request_bufs(void)
49711 /* cERROR(1, "CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize); */
49712 cifs_req_cachep = kmem_cache_create("cifs_request",
49713 CIFSMaxBufSize + max_hdr_size, 0,
49714- SLAB_HWCACHE_ALIGN, NULL);
49715+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
49716 if (cifs_req_cachep == NULL)
49717 return -ENOMEM;
49718
49719@@ -1053,7 +1053,7 @@ cifs_init_request_bufs(void)
49720 efficient to alloc 1 per page off the slab compared to 17K (5page)
49721 alloc of large cifs buffers even when page debugging is on */
49722 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
49723- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
49724+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
49725 NULL);
49726 if (cifs_sm_req_cachep == NULL) {
49727 mempool_destroy(cifs_req_poolp);
49728@@ -1138,8 +1138,8 @@ init_cifs(void)
49729 atomic_set(&bufAllocCount, 0);
49730 atomic_set(&smBufAllocCount, 0);
49731 #ifdef CONFIG_CIFS_STATS2
49732- atomic_set(&totBufAllocCount, 0);
49733- atomic_set(&totSmBufAllocCount, 0);
49734+ atomic_set_unchecked(&totBufAllocCount, 0);
49735+ atomic_set_unchecked(&totSmBufAllocCount, 0);
49736 #endif /* CONFIG_CIFS_STATS2 */
49737
49738 atomic_set(&midCount, 0);
49739diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
49740index e6899ce..d6b2920 100644
49741--- a/fs/cifs/cifsglob.h
49742+++ b/fs/cifs/cifsglob.h
49743@@ -751,35 +751,35 @@ struct cifs_tcon {
49744 __u16 Flags; /* optional support bits */
49745 enum statusEnum tidStatus;
49746 #ifdef CONFIG_CIFS_STATS
49747- atomic_t num_smbs_sent;
49748+ atomic_unchecked_t num_smbs_sent;
49749 union {
49750 struct {
49751- atomic_t num_writes;
49752- atomic_t num_reads;
49753- atomic_t num_flushes;
49754- atomic_t num_oplock_brks;
49755- atomic_t num_opens;
49756- atomic_t num_closes;
49757- atomic_t num_deletes;
49758- atomic_t num_mkdirs;
49759- atomic_t num_posixopens;
49760- atomic_t num_posixmkdirs;
49761- atomic_t num_rmdirs;
49762- atomic_t num_renames;
49763- atomic_t num_t2renames;
49764- atomic_t num_ffirst;
49765- atomic_t num_fnext;
49766- atomic_t num_fclose;
49767- atomic_t num_hardlinks;
49768- atomic_t num_symlinks;
49769- atomic_t num_locks;
49770- atomic_t num_acl_get;
49771- atomic_t num_acl_set;
49772+ atomic_unchecked_t num_writes;
49773+ atomic_unchecked_t num_reads;
49774+ atomic_unchecked_t num_flushes;
49775+ atomic_unchecked_t num_oplock_brks;
49776+ atomic_unchecked_t num_opens;
49777+ atomic_unchecked_t num_closes;
49778+ atomic_unchecked_t num_deletes;
49779+ atomic_unchecked_t num_mkdirs;
49780+ atomic_unchecked_t num_posixopens;
49781+ atomic_unchecked_t num_posixmkdirs;
49782+ atomic_unchecked_t num_rmdirs;
49783+ atomic_unchecked_t num_renames;
49784+ atomic_unchecked_t num_t2renames;
49785+ atomic_unchecked_t num_ffirst;
49786+ atomic_unchecked_t num_fnext;
49787+ atomic_unchecked_t num_fclose;
49788+ atomic_unchecked_t num_hardlinks;
49789+ atomic_unchecked_t num_symlinks;
49790+ atomic_unchecked_t num_locks;
49791+ atomic_unchecked_t num_acl_get;
49792+ atomic_unchecked_t num_acl_set;
49793 } cifs_stats;
49794 #ifdef CONFIG_CIFS_SMB2
49795 struct {
49796- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
49797- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
49798+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
49799+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
49800 } smb2_stats;
49801 #endif /* CONFIG_CIFS_SMB2 */
49802 } stats;
49803@@ -1080,7 +1080,7 @@ convert_delimiter(char *path, char delim)
49804 }
49805
49806 #ifdef CONFIG_CIFS_STATS
49807-#define cifs_stats_inc atomic_inc
49808+#define cifs_stats_inc atomic_inc_unchecked
49809
49810 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
49811 unsigned int bytes)
49812@@ -1445,8 +1445,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
49813 /* Various Debug counters */
49814 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
49815 #ifdef CONFIG_CIFS_STATS2
49816-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
49817-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
49818+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
49819+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
49820 #endif
49821 GLOBAL_EXTERN atomic_t smBufAllocCount;
49822 GLOBAL_EXTERN atomic_t midCount;
49823diff --git a/fs/cifs/link.c b/fs/cifs/link.c
49824index 51dc2fb..1e12a33 100644
49825--- a/fs/cifs/link.c
49826+++ b/fs/cifs/link.c
49827@@ -616,7 +616,7 @@ symlink_exit:
49828
49829 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
49830 {
49831- char *p = nd_get_link(nd);
49832+ const char *p = nd_get_link(nd);
49833 if (!IS_ERR(p))
49834 kfree(p);
49835 }
49836diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
49837index 3a00c0d..42d901c 100644
49838--- a/fs/cifs/misc.c
49839+++ b/fs/cifs/misc.c
49840@@ -169,7 +169,7 @@ cifs_buf_get(void)
49841 memset(ret_buf, 0, buf_size + 3);
49842 atomic_inc(&bufAllocCount);
49843 #ifdef CONFIG_CIFS_STATS2
49844- atomic_inc(&totBufAllocCount);
49845+ atomic_inc_unchecked(&totBufAllocCount);
49846 #endif /* CONFIG_CIFS_STATS2 */
49847 }
49848
49849@@ -204,7 +204,7 @@ cifs_small_buf_get(void)
49850 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
49851 atomic_inc(&smBufAllocCount);
49852 #ifdef CONFIG_CIFS_STATS2
49853- atomic_inc(&totSmBufAllocCount);
49854+ atomic_inc_unchecked(&totSmBufAllocCount);
49855 #endif /* CONFIG_CIFS_STATS2 */
49856
49857 }
49858diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
49859index 47bc5a8..10decbe 100644
49860--- a/fs/cifs/smb1ops.c
49861+++ b/fs/cifs/smb1ops.c
49862@@ -586,27 +586,27 @@ static void
49863 cifs_clear_stats(struct cifs_tcon *tcon)
49864 {
49865 #ifdef CONFIG_CIFS_STATS
49866- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
49867- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
49868- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
49869- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
49870- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
49871- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
49872- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
49873- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
49874- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
49875- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
49876- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
49877- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
49878- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
49879- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
49880- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
49881- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
49882- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
49883- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
49884- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
49885- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
49886- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
49887+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
49888+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
49889+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
49890+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
49891+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
49892+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
49893+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
49894+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
49895+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
49896+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
49897+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
49898+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
49899+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
49900+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
49901+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
49902+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
49903+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
49904+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
49905+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
49906+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
49907+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
49908 #endif
49909 }
49910
49911@@ -615,36 +615,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
49912 {
49913 #ifdef CONFIG_CIFS_STATS
49914 seq_printf(m, " Oplocks breaks: %d",
49915- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
49916+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
49917 seq_printf(m, "\nReads: %d Bytes: %llu",
49918- atomic_read(&tcon->stats.cifs_stats.num_reads),
49919+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
49920 (long long)(tcon->bytes_read));
49921 seq_printf(m, "\nWrites: %d Bytes: %llu",
49922- atomic_read(&tcon->stats.cifs_stats.num_writes),
49923+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
49924 (long long)(tcon->bytes_written));
49925 seq_printf(m, "\nFlushes: %d",
49926- atomic_read(&tcon->stats.cifs_stats.num_flushes));
49927+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
49928 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
49929- atomic_read(&tcon->stats.cifs_stats.num_locks),
49930- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
49931- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
49932+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
49933+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
49934+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
49935 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
49936- atomic_read(&tcon->stats.cifs_stats.num_opens),
49937- atomic_read(&tcon->stats.cifs_stats.num_closes),
49938- atomic_read(&tcon->stats.cifs_stats.num_deletes));
49939+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
49940+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
49941+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
49942 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
49943- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
49944- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
49945+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
49946+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
49947 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
49948- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
49949- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
49950+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
49951+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
49952 seq_printf(m, "\nRenames: %d T2 Renames %d",
49953- atomic_read(&tcon->stats.cifs_stats.num_renames),
49954- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
49955+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
49956+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
49957 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
49958- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
49959- atomic_read(&tcon->stats.cifs_stats.num_fnext),
49960- atomic_read(&tcon->stats.cifs_stats.num_fclose));
49961+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
49962+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
49963+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
49964 #endif
49965 }
49966
49967diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
49968index bceffe7..cd1ae59 100644
49969--- a/fs/cifs/smb2ops.c
49970+++ b/fs/cifs/smb2ops.c
49971@@ -274,8 +274,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
49972 #ifdef CONFIG_CIFS_STATS
49973 int i;
49974 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
49975- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
49976- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
49977+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
49978+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
49979 }
49980 #endif
49981 }
49982@@ -284,66 +284,66 @@ static void
49983 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
49984 {
49985 #ifdef CONFIG_CIFS_STATS
49986- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
49987- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
49988+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
49989+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
49990 seq_printf(m, "\nNegotiates: %d sent %d failed",
49991- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
49992- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
49993+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
49994+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
49995 seq_printf(m, "\nSessionSetups: %d sent %d failed",
49996- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
49997- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
49998+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
49999+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
50000 #define SMB2LOGOFF 0x0002 /* trivial request/resp */
50001 seq_printf(m, "\nLogoffs: %d sent %d failed",
50002- atomic_read(&sent[SMB2_LOGOFF_HE]),
50003- atomic_read(&failed[SMB2_LOGOFF_HE]));
50004+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
50005+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
50006 seq_printf(m, "\nTreeConnects: %d sent %d failed",
50007- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
50008- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
50009+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
50010+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
50011 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
50012- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
50013- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
50014+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
50015+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
50016 seq_printf(m, "\nCreates: %d sent %d failed",
50017- atomic_read(&sent[SMB2_CREATE_HE]),
50018- atomic_read(&failed[SMB2_CREATE_HE]));
50019+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
50020+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
50021 seq_printf(m, "\nCloses: %d sent %d failed",
50022- atomic_read(&sent[SMB2_CLOSE_HE]),
50023- atomic_read(&failed[SMB2_CLOSE_HE]));
50024+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
50025+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
50026 seq_printf(m, "\nFlushes: %d sent %d failed",
50027- atomic_read(&sent[SMB2_FLUSH_HE]),
50028- atomic_read(&failed[SMB2_FLUSH_HE]));
50029+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
50030+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
50031 seq_printf(m, "\nReads: %d sent %d failed",
50032- atomic_read(&sent[SMB2_READ_HE]),
50033- atomic_read(&failed[SMB2_READ_HE]));
50034+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
50035+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
50036 seq_printf(m, "\nWrites: %d sent %d failed",
50037- atomic_read(&sent[SMB2_WRITE_HE]),
50038- atomic_read(&failed[SMB2_WRITE_HE]));
50039+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
50040+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
50041 seq_printf(m, "\nLocks: %d sent %d failed",
50042- atomic_read(&sent[SMB2_LOCK_HE]),
50043- atomic_read(&failed[SMB2_LOCK_HE]));
50044+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
50045+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
50046 seq_printf(m, "\nIOCTLs: %d sent %d failed",
50047- atomic_read(&sent[SMB2_IOCTL_HE]),
50048- atomic_read(&failed[SMB2_IOCTL_HE]));
50049+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
50050+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
50051 seq_printf(m, "\nCancels: %d sent %d failed",
50052- atomic_read(&sent[SMB2_CANCEL_HE]),
50053- atomic_read(&failed[SMB2_CANCEL_HE]));
50054+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
50055+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
50056 seq_printf(m, "\nEchos: %d sent %d failed",
50057- atomic_read(&sent[SMB2_ECHO_HE]),
50058- atomic_read(&failed[SMB2_ECHO_HE]));
50059+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
50060+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
50061 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
50062- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
50063- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
50064+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
50065+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
50066 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
50067- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
50068- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
50069+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
50070+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
50071 seq_printf(m, "\nQueryInfos: %d sent %d failed",
50072- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
50073- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
50074+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
50075+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
50076 seq_printf(m, "\nSetInfos: %d sent %d failed",
50077- atomic_read(&sent[SMB2_SET_INFO_HE]),
50078- atomic_read(&failed[SMB2_SET_INFO_HE]));
50079+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
50080+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
50081 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
50082- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
50083- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
50084+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
50085+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
50086 #endif
50087 }
50088
50089diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
50090index 41d9d07..dbb4772 100644
50091--- a/fs/cifs/smb2pdu.c
50092+++ b/fs/cifs/smb2pdu.c
50093@@ -1761,8 +1761,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
50094 default:
50095 cERROR(1, "info level %u isn't supported",
50096 srch_inf->info_level);
50097- rc = -EINVAL;
50098- goto qdir_exit;
50099+ return -EINVAL;
50100 }
50101
50102 req->FileIndex = cpu_to_le32(index);
50103diff --git a/fs/coda/cache.c b/fs/coda/cache.c
50104index 958ae0e..505c9d0 100644
50105--- a/fs/coda/cache.c
50106+++ b/fs/coda/cache.c
50107@@ -24,7 +24,7 @@
50108 #include "coda_linux.h"
50109 #include "coda_cache.h"
50110
50111-static atomic_t permission_epoch = ATOMIC_INIT(0);
50112+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
50113
50114 /* replace or extend an acl cache hit */
50115 void coda_cache_enter(struct inode *inode, int mask)
50116@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
50117 struct coda_inode_info *cii = ITOC(inode);
50118
50119 spin_lock(&cii->c_lock);
50120- cii->c_cached_epoch = atomic_read(&permission_epoch);
50121+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
50122 if (cii->c_uid != current_fsuid()) {
50123 cii->c_uid = current_fsuid();
50124 cii->c_cached_perm = mask;
50125@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
50126 {
50127 struct coda_inode_info *cii = ITOC(inode);
50128 spin_lock(&cii->c_lock);
50129- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
50130+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
50131 spin_unlock(&cii->c_lock);
50132 }
50133
50134 /* remove all acl caches */
50135 void coda_cache_clear_all(struct super_block *sb)
50136 {
50137- atomic_inc(&permission_epoch);
50138+ atomic_inc_unchecked(&permission_epoch);
50139 }
50140
50141
50142@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
50143 spin_lock(&cii->c_lock);
50144 hit = (mask & cii->c_cached_perm) == mask &&
50145 cii->c_uid == current_fsuid() &&
50146- cii->c_cached_epoch == atomic_read(&permission_epoch);
50147+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
50148 spin_unlock(&cii->c_lock);
50149
50150 return hit;
50151diff --git a/fs/compat.c b/fs/compat.c
50152index a06dcbc..dacb6d3 100644
50153--- a/fs/compat.c
50154+++ b/fs/compat.c
50155@@ -54,7 +54,7 @@
50156 #include <asm/ioctls.h>
50157 #include "internal.h"
50158
50159-int compat_log = 1;
50160+int compat_log = 0;
50161
50162 int compat_printk(const char *fmt, ...)
50163 {
50164@@ -490,7 +490,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
50165
50166 set_fs(KERNEL_DS);
50167 /* The __user pointer cast is valid because of the set_fs() */
50168- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
50169+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
50170 set_fs(oldfs);
50171 /* truncating is ok because it's a user address */
50172 if (!ret)
50173@@ -548,7 +548,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
50174 goto out;
50175
50176 ret = -EINVAL;
50177- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
50178+ if (nr_segs > UIO_MAXIOV)
50179 goto out;
50180 if (nr_segs > fast_segs) {
50181 ret = -ENOMEM;
50182@@ -835,6 +835,7 @@ struct compat_old_linux_dirent {
50183
50184 struct compat_readdir_callback {
50185 struct compat_old_linux_dirent __user *dirent;
50186+ struct file * file;
50187 int result;
50188 };
50189
50190@@ -852,6 +853,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
50191 buf->result = -EOVERFLOW;
50192 return -EOVERFLOW;
50193 }
50194+
50195+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
50196+ return 0;
50197+
50198 buf->result++;
50199 dirent = buf->dirent;
50200 if (!access_ok(VERIFY_WRITE, dirent,
50201@@ -882,6 +887,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
50202
50203 buf.result = 0;
50204 buf.dirent = dirent;
50205+ buf.file = f.file;
50206
50207 error = vfs_readdir(f.file, compat_fillonedir, &buf);
50208 if (buf.result)
50209@@ -901,6 +907,7 @@ struct compat_linux_dirent {
50210 struct compat_getdents_callback {
50211 struct compat_linux_dirent __user *current_dir;
50212 struct compat_linux_dirent __user *previous;
50213+ struct file * file;
50214 int count;
50215 int error;
50216 };
50217@@ -922,6 +929,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
50218 buf->error = -EOVERFLOW;
50219 return -EOVERFLOW;
50220 }
50221+
50222+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
50223+ return 0;
50224+
50225 dirent = buf->previous;
50226 if (dirent) {
50227 if (__put_user(offset, &dirent->d_off))
50228@@ -967,6 +978,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
50229 buf.previous = NULL;
50230 buf.count = count;
50231 buf.error = 0;
50232+ buf.file = f.file;
50233
50234 error = vfs_readdir(f.file, compat_filldir, &buf);
50235 if (error >= 0)
50236@@ -987,6 +999,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
50237 struct compat_getdents_callback64 {
50238 struct linux_dirent64 __user *current_dir;
50239 struct linux_dirent64 __user *previous;
50240+ struct file * file;
50241 int count;
50242 int error;
50243 };
50244@@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
50245 buf->error = -EINVAL; /* only used if we fail.. */
50246 if (reclen > buf->count)
50247 return -EINVAL;
50248+
50249+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
50250+ return 0;
50251+
50252 dirent = buf->previous;
50253
50254 if (dirent) {
50255@@ -1052,13 +1069,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
50256 buf.previous = NULL;
50257 buf.count = count;
50258 buf.error = 0;
50259+ buf.file = f.file;
50260
50261 error = vfs_readdir(f.file, compat_filldir64, &buf);
50262 if (error >= 0)
50263 error = buf.error;
50264 lastdirent = buf.previous;
50265 if (lastdirent) {
50266- typeof(lastdirent->d_off) d_off = f.file->f_pos;
50267+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
50268 if (__put_user_unaligned(d_off, &lastdirent->d_off))
50269 error = -EFAULT;
50270 else
50271diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
50272index a81147e..20bf2b5 100644
50273--- a/fs/compat_binfmt_elf.c
50274+++ b/fs/compat_binfmt_elf.c
50275@@ -30,11 +30,13 @@
50276 #undef elf_phdr
50277 #undef elf_shdr
50278 #undef elf_note
50279+#undef elf_dyn
50280 #undef elf_addr_t
50281 #define elfhdr elf32_hdr
50282 #define elf_phdr elf32_phdr
50283 #define elf_shdr elf32_shdr
50284 #define elf_note elf32_note
50285+#define elf_dyn Elf32_Dyn
50286 #define elf_addr_t Elf32_Addr
50287
50288 /*
50289diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
50290index e2f57a0..3c78771 100644
50291--- a/fs/compat_ioctl.c
50292+++ b/fs/compat_ioctl.c
50293@@ -623,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
50294 return -EFAULT;
50295 if (__get_user(udata, &ss32->iomem_base))
50296 return -EFAULT;
50297- ss.iomem_base = compat_ptr(udata);
50298+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
50299 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
50300 __get_user(ss.port_high, &ss32->port_high))
50301 return -EFAULT;
50302@@ -798,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
50303 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
50304 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
50305 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
50306- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
50307+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
50308 return -EFAULT;
50309
50310 return ioctl_preallocate(file, p);
50311@@ -1620,8 +1620,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
50312 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
50313 {
50314 unsigned int a, b;
50315- a = *(unsigned int *)p;
50316- b = *(unsigned int *)q;
50317+ a = *(const unsigned int *)p;
50318+ b = *(const unsigned int *)q;
50319 if (a > b)
50320 return 1;
50321 if (a < b)
50322diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
50323index 712b10f..c33c4ca 100644
50324--- a/fs/configfs/dir.c
50325+++ b/fs/configfs/dir.c
50326@@ -1037,10 +1037,11 @@ static int configfs_dump(struct configfs_dirent *sd, int level)
50327 static int configfs_depend_prep(struct dentry *origin,
50328 struct config_item *target)
50329 {
50330- struct configfs_dirent *child_sd, *sd = origin->d_fsdata;
50331+ struct configfs_dirent *child_sd, *sd;
50332 int ret = 0;
50333
50334- BUG_ON(!origin || !sd);
50335+ BUG_ON(!origin || !origin->d_fsdata);
50336+ sd = origin->d_fsdata;
50337
50338 if (sd->s_element == target) /* Boo-yah */
50339 goto out;
50340@@ -1564,7 +1565,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
50341 }
50342 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
50343 struct configfs_dirent *next;
50344- const char * name;
50345+ const unsigned char * name;
50346+ char d_name[sizeof(next->s_dentry->d_iname)];
50347 int len;
50348 struct inode *inode = NULL;
50349
50350@@ -1574,7 +1576,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
50351 continue;
50352
50353 name = configfs_get_name(next);
50354- len = strlen(name);
50355+ if (next->s_dentry && name == next->s_dentry->d_iname) {
50356+ len = next->s_dentry->d_name.len;
50357+ memcpy(d_name, name, len);
50358+ name = d_name;
50359+ } else
50360+ len = strlen(name);
50361
50362 /*
50363 * We'll have a dentry and an inode for
50364diff --git a/fs/coredump.c b/fs/coredump.c
50365index 1774932..5812106 100644
50366--- a/fs/coredump.c
50367+++ b/fs/coredump.c
50368@@ -52,7 +52,7 @@ struct core_name {
50369 char *corename;
50370 int used, size;
50371 };
50372-static atomic_t call_count = ATOMIC_INIT(1);
50373+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
50374
50375 /* The maximal length of core_pattern is also specified in sysctl.c */
50376
50377@@ -60,7 +60,7 @@ static int expand_corename(struct core_name *cn)
50378 {
50379 char *old_corename = cn->corename;
50380
50381- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
50382+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
50383 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
50384
50385 if (!cn->corename) {
50386@@ -157,7 +157,7 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm)
50387 int pid_in_pattern = 0;
50388 int err = 0;
50389
50390- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
50391+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
50392 cn->corename = kmalloc(cn->size, GFP_KERNEL);
50393 cn->used = 0;
50394
50395@@ -414,17 +414,17 @@ static void wait_for_dump_helpers(struct file *file)
50396 pipe = file->f_path.dentry->d_inode->i_pipe;
50397
50398 pipe_lock(pipe);
50399- pipe->readers++;
50400- pipe->writers--;
50401+ atomic_inc(&pipe->readers);
50402+ atomic_dec(&pipe->writers);
50403
50404- while ((pipe->readers > 1) && (!signal_pending(current))) {
50405+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
50406 wake_up_interruptible_sync(&pipe->wait);
50407 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
50408 pipe_wait(pipe);
50409 }
50410
50411- pipe->readers--;
50412- pipe->writers++;
50413+ atomic_dec(&pipe->readers);
50414+ atomic_inc(&pipe->writers);
50415 pipe_unlock(pipe);
50416
50417 }
50418@@ -471,7 +471,8 @@ void do_coredump(siginfo_t *siginfo)
50419 int ispipe;
50420 struct files_struct *displaced;
50421 bool need_nonrelative = false;
50422- static atomic_t core_dump_count = ATOMIC_INIT(0);
50423+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
50424+ long signr = siginfo->si_signo;
50425 struct coredump_params cprm = {
50426 .siginfo = siginfo,
50427 .regs = signal_pt_regs(),
50428@@ -484,7 +485,10 @@ void do_coredump(siginfo_t *siginfo)
50429 .mm_flags = mm->flags,
50430 };
50431
50432- audit_core_dumps(siginfo->si_signo);
50433+ audit_core_dumps(signr);
50434+
50435+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
50436+ gr_handle_brute_attach(cprm.mm_flags);
50437
50438 binfmt = mm->binfmt;
50439 if (!binfmt || !binfmt->core_dump)
50440@@ -508,7 +512,7 @@ void do_coredump(siginfo_t *siginfo)
50441 need_nonrelative = true;
50442 }
50443
50444- retval = coredump_wait(siginfo->si_signo, &core_state);
50445+ retval = coredump_wait(signr, &core_state);
50446 if (retval < 0)
50447 goto fail_creds;
50448
50449@@ -556,7 +560,7 @@ void do_coredump(siginfo_t *siginfo)
50450 }
50451 cprm.limit = RLIM_INFINITY;
50452
50453- dump_count = atomic_inc_return(&core_dump_count);
50454+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
50455 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
50456 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
50457 task_tgid_vnr(current), current->comm);
50458@@ -583,6 +587,8 @@ void do_coredump(siginfo_t *siginfo)
50459 } else {
50460 struct inode *inode;
50461
50462+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
50463+
50464 if (cprm.limit < binfmt->min_coredump)
50465 goto fail_unlock;
50466
50467@@ -640,7 +646,7 @@ close_fail:
50468 filp_close(cprm.file, NULL);
50469 fail_dropcount:
50470 if (ispipe)
50471- atomic_dec(&core_dump_count);
50472+ atomic_dec_unchecked(&core_dump_count);
50473 fail_unlock:
50474 kfree(cn.corename);
50475 fail_corename:
50476@@ -659,7 +665,7 @@ fail:
50477 */
50478 int dump_write(struct file *file, const void *addr, int nr)
50479 {
50480- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
50481+ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
50482 }
50483 EXPORT_SYMBOL(dump_write);
50484
50485diff --git a/fs/dcache.c b/fs/dcache.c
50486index c3bbf85..5b71101 100644
50487--- a/fs/dcache.c
50488+++ b/fs/dcache.c
50489@@ -3139,7 +3139,7 @@ void __init vfs_caches_init(unsigned long mempages)
50490 mempages -= reserve;
50491
50492 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
50493- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
50494+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
50495
50496 dcache_init();
50497 inode_init();
50498diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
50499index a5f12b7..4ee8a6f 100644
50500--- a/fs/debugfs/inode.c
50501+++ b/fs/debugfs/inode.c
50502@@ -415,7 +415,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
50503 */
50504 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
50505 {
50506+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
50507+ return __create_file(name, S_IFDIR | S_IRWXU,
50508+#else
50509 return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
50510+#endif
50511 parent, NULL, NULL);
50512 }
50513 EXPORT_SYMBOL_GPL(debugfs_create_dir);
50514diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
50515index cc7709e..7e7211f 100644
50516--- a/fs/ecryptfs/inode.c
50517+++ b/fs/ecryptfs/inode.c
50518@@ -674,7 +674,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
50519 old_fs = get_fs();
50520 set_fs(get_ds());
50521 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
50522- (char __user *)lower_buf,
50523+ (char __force_user *)lower_buf,
50524 PATH_MAX);
50525 set_fs(old_fs);
50526 if (rc < 0)
50527@@ -706,7 +706,7 @@ out:
50528 static void
50529 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
50530 {
50531- char *buf = nd_get_link(nd);
50532+ const char *buf = nd_get_link(nd);
50533 if (!IS_ERR(buf)) {
50534 /* Free the char* */
50535 kfree(buf);
50536diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
50537index 412e6ed..d8263e8 100644
50538--- a/fs/ecryptfs/miscdev.c
50539+++ b/fs/ecryptfs/miscdev.c
50540@@ -80,13 +80,6 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file)
50541 int rc;
50542
50543 mutex_lock(&ecryptfs_daemon_hash_mux);
50544- rc = try_module_get(THIS_MODULE);
50545- if (rc == 0) {
50546- rc = -EIO;
50547- printk(KERN_ERR "%s: Error attempting to increment module use "
50548- "count; rc = [%d]\n", __func__, rc);
50549- goto out_unlock_daemon_list;
50550- }
50551 rc = ecryptfs_find_daemon_by_euid(&daemon);
50552 if (!rc) {
50553 rc = -EINVAL;
50554@@ -96,7 +89,7 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file)
50555 if (rc) {
50556 printk(KERN_ERR "%s: Error attempting to spawn daemon; "
50557 "rc = [%d]\n", __func__, rc);
50558- goto out_module_put_unlock_daemon_list;
50559+ goto out_unlock_daemon_list;
50560 }
50561 mutex_lock(&daemon->mux);
50562 if (daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN) {
50563@@ -108,9 +101,6 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file)
50564 atomic_inc(&ecryptfs_num_miscdev_opens);
50565 out_unlock_daemon:
50566 mutex_unlock(&daemon->mux);
50567-out_module_put_unlock_daemon_list:
50568- if (rc)
50569- module_put(THIS_MODULE);
50570 out_unlock_daemon_list:
50571 mutex_unlock(&ecryptfs_daemon_hash_mux);
50572 return rc;
50573@@ -147,7 +137,6 @@ ecryptfs_miscdev_release(struct inode *inode, struct file *file)
50574 "bug.\n", __func__, rc);
50575 BUG();
50576 }
50577- module_put(THIS_MODULE);
50578 return rc;
50579 }
50580
50581@@ -315,7 +304,7 @@ check_list:
50582 goto out_unlock_msg_ctx;
50583 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
50584 if (msg_ctx->msg) {
50585- if (copy_to_user(&buf[i], packet_length, packet_length_size))
50586+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
50587 goto out_unlock_msg_ctx;
50588 i += packet_length_size;
50589 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
50590@@ -471,6 +460,7 @@ out_free:
50591
50592
50593 static const struct file_operations ecryptfs_miscdev_fops = {
50594+ .owner = THIS_MODULE,
50595 .open = ecryptfs_miscdev_open,
50596 .poll = ecryptfs_miscdev_poll,
50597 .read = ecryptfs_miscdev_read,
50598diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
50599index b2a34a1..162fa69 100644
50600--- a/fs/ecryptfs/read_write.c
50601+++ b/fs/ecryptfs/read_write.c
50602@@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
50603 return -EIO;
50604 fs_save = get_fs();
50605 set_fs(get_ds());
50606- rc = vfs_write(lower_file, data, size, &offset);
50607+ rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
50608 set_fs(fs_save);
50609 mark_inode_dirty_sync(ecryptfs_inode);
50610 return rc;
50611@@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
50612 return -EIO;
50613 fs_save = get_fs();
50614 set_fs(get_ds());
50615- rc = vfs_read(lower_file, data, size, &offset);
50616+ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
50617 set_fs(fs_save);
50618 return rc;
50619 }
50620diff --git a/fs/exec.c b/fs/exec.c
50621index 20df02c..1b1d946 100644
50622--- a/fs/exec.c
50623+++ b/fs/exec.c
50624@@ -55,6 +55,17 @@
50625 #include <linux/pipe_fs_i.h>
50626 #include <linux/oom.h>
50627 #include <linux/compat.h>
50628+#include <linux/random.h>
50629+#include <linux/seq_file.h>
50630+#include <linux/coredump.h>
50631+#include <linux/mman.h>
50632+
50633+#ifdef CONFIG_PAX_REFCOUNT
50634+#include <linux/kallsyms.h>
50635+#include <linux/kdebug.h>
50636+#endif
50637+
50638+#include <trace/events/fs.h>
50639
50640 #include <asm/uaccess.h>
50641 #include <asm/mmu_context.h>
50642@@ -66,6 +77,18 @@
50643
50644 #include <trace/events/sched.h>
50645
50646+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
50647+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
50648+{
50649+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
50650+}
50651+#endif
50652+
50653+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
50654+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
50655+EXPORT_SYMBOL(pax_set_initial_flags_func);
50656+#endif
50657+
50658 int suid_dumpable = 0;
50659
50660 static LIST_HEAD(formats);
50661@@ -75,8 +98,8 @@ void __register_binfmt(struct linux_binfmt * fmt, int insert)
50662 {
50663 BUG_ON(!fmt);
50664 write_lock(&binfmt_lock);
50665- insert ? list_add(&fmt->lh, &formats) :
50666- list_add_tail(&fmt->lh, &formats);
50667+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
50668+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
50669 write_unlock(&binfmt_lock);
50670 }
50671
50672@@ -85,7 +108,7 @@ EXPORT_SYMBOL(__register_binfmt);
50673 void unregister_binfmt(struct linux_binfmt * fmt)
50674 {
50675 write_lock(&binfmt_lock);
50676- list_del(&fmt->lh);
50677+ pax_list_del((struct list_head *)&fmt->lh);
50678 write_unlock(&binfmt_lock);
50679 }
50680
50681@@ -180,18 +203,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
50682 int write)
50683 {
50684 struct page *page;
50685- int ret;
50686
50687-#ifdef CONFIG_STACK_GROWSUP
50688- if (write) {
50689- ret = expand_downwards(bprm->vma, pos);
50690- if (ret < 0)
50691- return NULL;
50692- }
50693-#endif
50694- ret = get_user_pages(current, bprm->mm, pos,
50695- 1, write, 1, &page, NULL);
50696- if (ret <= 0)
50697+ if (0 > expand_downwards(bprm->vma, pos))
50698+ return NULL;
50699+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
50700 return NULL;
50701
50702 if (write) {
50703@@ -207,6 +222,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
50704 if (size <= ARG_MAX)
50705 return page;
50706
50707+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50708+ // only allow 512KB for argv+env on suid/sgid binaries
50709+ // to prevent easy ASLR exhaustion
50710+ if (((!uid_eq(bprm->cred->euid, current_euid())) ||
50711+ (!gid_eq(bprm->cred->egid, current_egid()))) &&
50712+ (size > (512 * 1024))) {
50713+ put_page(page);
50714+ return NULL;
50715+ }
50716+#endif
50717+
50718 /*
50719 * Limit to 1/4-th the stack size for the argv+env strings.
50720 * This ensures that:
50721@@ -266,6 +292,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
50722 vma->vm_end = STACK_TOP_MAX;
50723 vma->vm_start = vma->vm_end - PAGE_SIZE;
50724 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
50725+
50726+#ifdef CONFIG_PAX_SEGMEXEC
50727+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
50728+#endif
50729+
50730 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
50731 INIT_LIST_HEAD(&vma->anon_vma_chain);
50732
50733@@ -276,6 +307,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
50734 mm->stack_vm = mm->total_vm = 1;
50735 up_write(&mm->mmap_sem);
50736 bprm->p = vma->vm_end - sizeof(void *);
50737+
50738+#ifdef CONFIG_PAX_RANDUSTACK
50739+ if (randomize_va_space)
50740+ bprm->p ^= random32() & ~PAGE_MASK;
50741+#endif
50742+
50743 return 0;
50744 err:
50745 up_write(&mm->mmap_sem);
50746@@ -396,7 +433,7 @@ struct user_arg_ptr {
50747 } ptr;
50748 };
50749
50750-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
50751+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
50752 {
50753 const char __user *native;
50754
50755@@ -405,14 +442,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
50756 compat_uptr_t compat;
50757
50758 if (get_user(compat, argv.ptr.compat + nr))
50759- return ERR_PTR(-EFAULT);
50760+ return (const char __force_user *)ERR_PTR(-EFAULT);
50761
50762 return compat_ptr(compat);
50763 }
50764 #endif
50765
50766 if (get_user(native, argv.ptr.native + nr))
50767- return ERR_PTR(-EFAULT);
50768+ return (const char __force_user *)ERR_PTR(-EFAULT);
50769
50770 return native;
50771 }
50772@@ -431,7 +468,7 @@ static int count(struct user_arg_ptr argv, int max)
50773 if (!p)
50774 break;
50775
50776- if (IS_ERR(p))
50777+ if (IS_ERR((const char __force_kernel *)p))
50778 return -EFAULT;
50779
50780 if (i >= max)
50781@@ -466,7 +503,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
50782
50783 ret = -EFAULT;
50784 str = get_user_arg_ptr(argv, argc);
50785- if (IS_ERR(str))
50786+ if (IS_ERR((const char __force_kernel *)str))
50787 goto out;
50788
50789 len = strnlen_user(str, MAX_ARG_STRLEN);
50790@@ -548,7 +585,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
50791 int r;
50792 mm_segment_t oldfs = get_fs();
50793 struct user_arg_ptr argv = {
50794- .ptr.native = (const char __user *const __user *)__argv,
50795+ .ptr.native = (const char __force_user *const __force_user *)__argv,
50796 };
50797
50798 set_fs(KERNEL_DS);
50799@@ -583,7 +620,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
50800 unsigned long new_end = old_end - shift;
50801 struct mmu_gather tlb;
50802
50803- BUG_ON(new_start > new_end);
50804+ if (new_start >= new_end || new_start < mmap_min_addr)
50805+ return -ENOMEM;
50806
50807 /*
50808 * ensure there are no vmas between where we want to go
50809@@ -592,6 +630,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
50810 if (vma != find_vma(mm, new_start))
50811 return -EFAULT;
50812
50813+#ifdef CONFIG_PAX_SEGMEXEC
50814+ BUG_ON(pax_find_mirror_vma(vma));
50815+#endif
50816+
50817 /*
50818 * cover the whole range: [new_start, old_end)
50819 */
50820@@ -672,10 +714,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
50821 stack_top = arch_align_stack(stack_top);
50822 stack_top = PAGE_ALIGN(stack_top);
50823
50824- if (unlikely(stack_top < mmap_min_addr) ||
50825- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
50826- return -ENOMEM;
50827-
50828 stack_shift = vma->vm_end - stack_top;
50829
50830 bprm->p -= stack_shift;
50831@@ -687,8 +725,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
50832 bprm->exec -= stack_shift;
50833
50834 down_write(&mm->mmap_sem);
50835+
50836+ /* Move stack pages down in memory. */
50837+ if (stack_shift) {
50838+ ret = shift_arg_pages(vma, stack_shift);
50839+ if (ret)
50840+ goto out_unlock;
50841+ }
50842+
50843 vm_flags = VM_STACK_FLAGS;
50844
50845+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
50846+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
50847+ vm_flags &= ~VM_EXEC;
50848+
50849+#ifdef CONFIG_PAX_MPROTECT
50850+ if (mm->pax_flags & MF_PAX_MPROTECT)
50851+ vm_flags &= ~VM_MAYEXEC;
50852+#endif
50853+
50854+ }
50855+#endif
50856+
50857 /*
50858 * Adjust stack execute permissions; explicitly enable for
50859 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
50860@@ -707,13 +765,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
50861 goto out_unlock;
50862 BUG_ON(prev != vma);
50863
50864- /* Move stack pages down in memory. */
50865- if (stack_shift) {
50866- ret = shift_arg_pages(vma, stack_shift);
50867- if (ret)
50868- goto out_unlock;
50869- }
50870-
50871 /* mprotect_fixup is overkill to remove the temporary stack flags */
50872 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
50873
50874@@ -737,6 +788,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
50875 #endif
50876 current->mm->start_stack = bprm->p;
50877 ret = expand_stack(vma, stack_base);
50878+
50879+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
50880+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
50881+ unsigned long size, flags, vm_flags;
50882+
50883+ size = STACK_TOP - vma->vm_end;
50884+ flags = MAP_FIXED | MAP_PRIVATE;
50885+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
50886+
50887+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, flags, vm_flags, 0);
50888+
50889+#ifdef CONFIG_X86
50890+ if (!ret) {
50891+ size = mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
50892+ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), flags, vm_flags, 0);
50893+ }
50894+#endif
50895+
50896+ }
50897+#endif
50898+
50899 if (ret)
50900 ret = -EFAULT;
50901
50902@@ -772,6 +844,8 @@ struct file *open_exec(const char *name)
50903
50904 fsnotify_open(file);
50905
50906+ trace_open_exec(name);
50907+
50908 err = deny_write_access(file);
50909 if (err)
50910 goto exit;
50911@@ -795,7 +869,7 @@ int kernel_read(struct file *file, loff_t offset,
50912 old_fs = get_fs();
50913 set_fs(get_ds());
50914 /* The cast to a user pointer is valid due to the set_fs() */
50915- result = vfs_read(file, (void __user *)addr, count, &pos);
50916+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
50917 set_fs(old_fs);
50918 return result;
50919 }
50920@@ -1247,7 +1321,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
50921 }
50922 rcu_read_unlock();
50923
50924- if (p->fs->users > n_fs) {
50925+ if (atomic_read(&p->fs->users) > n_fs) {
50926 bprm->unsafe |= LSM_UNSAFE_SHARE;
50927 } else {
50928 res = -EAGAIN;
50929@@ -1447,6 +1521,31 @@ int search_binary_handler(struct linux_binprm *bprm)
50930
50931 EXPORT_SYMBOL(search_binary_handler);
50932
50933+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50934+static DEFINE_PER_CPU(u64, exec_counter);
50935+static int __init init_exec_counters(void)
50936+{
50937+ unsigned int cpu;
50938+
50939+ for_each_possible_cpu(cpu) {
50940+ per_cpu(exec_counter, cpu) = (u64)cpu;
50941+ }
50942+
50943+ return 0;
50944+}
50945+early_initcall(init_exec_counters);
50946+static inline void increment_exec_counter(void)
50947+{
50948+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
50949+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
50950+}
50951+#else
50952+static inline void increment_exec_counter(void) {}
50953+#endif
50954+
50955+extern void gr_handle_exec_args(struct linux_binprm *bprm,
50956+ struct user_arg_ptr argv);
50957+
50958 /*
50959 * sys_execve() executes a new program.
50960 */
50961@@ -1454,6 +1553,11 @@ static int do_execve_common(const char *filename,
50962 struct user_arg_ptr argv,
50963 struct user_arg_ptr envp)
50964 {
50965+#ifdef CONFIG_GRKERNSEC
50966+ struct file *old_exec_file;
50967+ struct acl_subject_label *old_acl;
50968+ struct rlimit old_rlim[RLIM_NLIMITS];
50969+#endif
50970 struct linux_binprm *bprm;
50971 struct file *file;
50972 struct files_struct *displaced;
50973@@ -1461,6 +1565,8 @@ static int do_execve_common(const char *filename,
50974 int retval;
50975 const struct cred *cred = current_cred();
50976
50977+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
50978+
50979 /*
50980 * We move the actual failure in case of RLIMIT_NPROC excess from
50981 * set*uid() to execve() because too many poorly written programs
50982@@ -1501,12 +1607,27 @@ static int do_execve_common(const char *filename,
50983 if (IS_ERR(file))
50984 goto out_unmark;
50985
50986+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
50987+ retval = -EPERM;
50988+ goto out_file;
50989+ }
50990+
50991 sched_exec();
50992
50993 bprm->file = file;
50994 bprm->filename = filename;
50995 bprm->interp = filename;
50996
50997+ if (gr_process_user_ban()) {
50998+ retval = -EPERM;
50999+ goto out_file;
51000+ }
51001+
51002+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
51003+ retval = -EACCES;
51004+ goto out_file;
51005+ }
51006+
51007 retval = bprm_mm_init(bprm);
51008 if (retval)
51009 goto out_file;
51010@@ -1523,24 +1644,65 @@ static int do_execve_common(const char *filename,
51011 if (retval < 0)
51012 goto out;
51013
51014+#ifdef CONFIG_GRKERNSEC
51015+ old_acl = current->acl;
51016+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
51017+ old_exec_file = current->exec_file;
51018+ get_file(file);
51019+ current->exec_file = file;
51020+#endif
51021+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51022+ /* limit suid stack to 8MB
51023+ * we saved the old limits above and will restore them if this exec fails
51024+ */
51025+ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
51026+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
51027+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
51028+#endif
51029+
51030+ if (!gr_tpe_allow(file)) {
51031+ retval = -EACCES;
51032+ goto out_fail;
51033+ }
51034+
51035+ if (gr_check_crash_exec(file)) {
51036+ retval = -EACCES;
51037+ goto out_fail;
51038+ }
51039+
51040+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
51041+ bprm->unsafe);
51042+ if (retval < 0)
51043+ goto out_fail;
51044+
51045 retval = copy_strings_kernel(1, &bprm->filename, bprm);
51046 if (retval < 0)
51047- goto out;
51048+ goto out_fail;
51049
51050 bprm->exec = bprm->p;
51051 retval = copy_strings(bprm->envc, envp, bprm);
51052 if (retval < 0)
51053- goto out;
51054+ goto out_fail;
51055
51056 retval = copy_strings(bprm->argc, argv, bprm);
51057 if (retval < 0)
51058- goto out;
51059+ goto out_fail;
51060+
51061+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
51062+
51063+ gr_handle_exec_args(bprm, argv);
51064
51065 retval = search_binary_handler(bprm);
51066 if (retval < 0)
51067- goto out;
51068+ goto out_fail;
51069+#ifdef CONFIG_GRKERNSEC
51070+ if (old_exec_file)
51071+ fput(old_exec_file);
51072+#endif
51073
51074 /* execve succeeded */
51075+
51076+ increment_exec_counter();
51077 current->fs->in_exec = 0;
51078 current->in_execve = 0;
51079 acct_update_integrals(current);
51080@@ -1549,6 +1711,14 @@ static int do_execve_common(const char *filename,
51081 put_files_struct(displaced);
51082 return retval;
51083
51084+out_fail:
51085+#ifdef CONFIG_GRKERNSEC
51086+ current->acl = old_acl;
51087+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
51088+ fput(current->exec_file);
51089+ current->exec_file = old_exec_file;
51090+#endif
51091+
51092 out:
51093 if (bprm->mm) {
51094 acct_arg_size(bprm, 0);
51095@@ -1697,3 +1867,253 @@ asmlinkage long compat_sys_execve(const char __user * filename,
51096 return error;
51097 }
51098 #endif
51099+
51100+int pax_check_flags(unsigned long *flags)
51101+{
51102+ int retval = 0;
51103+
51104+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
51105+ if (*flags & MF_PAX_SEGMEXEC)
51106+ {
51107+ *flags &= ~MF_PAX_SEGMEXEC;
51108+ retval = -EINVAL;
51109+ }
51110+#endif
51111+
51112+ if ((*flags & MF_PAX_PAGEEXEC)
51113+
51114+#ifdef CONFIG_PAX_PAGEEXEC
51115+ && (*flags & MF_PAX_SEGMEXEC)
51116+#endif
51117+
51118+ )
51119+ {
51120+ *flags &= ~MF_PAX_PAGEEXEC;
51121+ retval = -EINVAL;
51122+ }
51123+
51124+ if ((*flags & MF_PAX_MPROTECT)
51125+
51126+#ifdef CONFIG_PAX_MPROTECT
51127+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
51128+#endif
51129+
51130+ )
51131+ {
51132+ *flags &= ~MF_PAX_MPROTECT;
51133+ retval = -EINVAL;
51134+ }
51135+
51136+ if ((*flags & MF_PAX_EMUTRAMP)
51137+
51138+#ifdef CONFIG_PAX_EMUTRAMP
51139+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
51140+#endif
51141+
51142+ )
51143+ {
51144+ *flags &= ~MF_PAX_EMUTRAMP;
51145+ retval = -EINVAL;
51146+ }
51147+
51148+ return retval;
51149+}
51150+
51151+EXPORT_SYMBOL(pax_check_flags);
51152+
51153+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
51154+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
51155+{
51156+ struct task_struct *tsk = current;
51157+ struct mm_struct *mm = current->mm;
51158+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
51159+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
51160+ char *path_exec = NULL;
51161+ char *path_fault = NULL;
51162+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
51163+ siginfo_t info = { };
51164+
51165+ if (buffer_exec && buffer_fault) {
51166+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
51167+
51168+ down_read(&mm->mmap_sem);
51169+ vma = mm->mmap;
51170+ while (vma && (!vma_exec || !vma_fault)) {
51171+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
51172+ vma_exec = vma;
51173+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
51174+ vma_fault = vma;
51175+ vma = vma->vm_next;
51176+ }
51177+ if (vma_exec) {
51178+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
51179+ if (IS_ERR(path_exec))
51180+ path_exec = "<path too long>";
51181+ else {
51182+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
51183+ if (path_exec) {
51184+ *path_exec = 0;
51185+ path_exec = buffer_exec;
51186+ } else
51187+ path_exec = "<path too long>";
51188+ }
51189+ }
51190+ if (vma_fault) {
51191+ start = vma_fault->vm_start;
51192+ end = vma_fault->vm_end;
51193+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
51194+ if (vma_fault->vm_file) {
51195+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
51196+ if (IS_ERR(path_fault))
51197+ path_fault = "<path too long>";
51198+ else {
51199+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
51200+ if (path_fault) {
51201+ *path_fault = 0;
51202+ path_fault = buffer_fault;
51203+ } else
51204+ path_fault = "<path too long>";
51205+ }
51206+ } else
51207+ path_fault = "<anonymous mapping>";
51208+ }
51209+ up_read(&mm->mmap_sem);
51210+ }
51211+ if (tsk->signal->curr_ip)
51212+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
51213+ else
51214+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
51215+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
51216+ from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
51217+ free_page((unsigned long)buffer_exec);
51218+ free_page((unsigned long)buffer_fault);
51219+ pax_report_insns(regs, pc, sp);
51220+ info.si_signo = SIGKILL;
51221+ info.si_errno = 0;
51222+ info.si_code = SI_KERNEL;
51223+ info.si_pid = 0;
51224+ info.si_uid = 0;
51225+ do_coredump(&info);
51226+}
51227+#endif
51228+
51229+#ifdef CONFIG_PAX_REFCOUNT
51230+void pax_report_refcount_overflow(struct pt_regs *regs)
51231+{
51232+ if (current->signal->curr_ip)
51233+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
51234+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
51235+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
51236+ else
51237+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
51238+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
51239+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
51240+ show_regs(regs);
51241+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
51242+}
51243+#endif
51244+
51245+#ifdef CONFIG_PAX_USERCOPY
51246+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
51247+static noinline int check_stack_object(const void *obj, unsigned long len)
51248+{
51249+ const void * const stack = task_stack_page(current);
51250+ const void * const stackend = stack + THREAD_SIZE;
51251+
51252+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
51253+ const void *frame = NULL;
51254+ const void *oldframe;
51255+#endif
51256+
51257+ if (obj + len < obj)
51258+ return -1;
51259+
51260+ if (obj + len <= stack || stackend <= obj)
51261+ return 0;
51262+
51263+ if (obj < stack || stackend < obj + len)
51264+ return -1;
51265+
51266+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
51267+ oldframe = __builtin_frame_address(1);
51268+ if (oldframe)
51269+ frame = __builtin_frame_address(2);
51270+ /*
51271+ low ----------------------------------------------> high
51272+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
51273+ ^----------------^
51274+ allow copies only within here
51275+ */
51276+ while (stack <= frame && frame < stackend) {
51277+ /* if obj + len extends past the last frame, this
51278+ check won't pass and the next frame will be 0,
51279+ causing us to bail out and correctly report
51280+ the copy as invalid
51281+ */
51282+ if (obj + len <= frame)
51283+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
51284+ oldframe = frame;
51285+ frame = *(const void * const *)frame;
51286+ }
51287+ return -1;
51288+#else
51289+ return 1;
51290+#endif
51291+}
51292+
51293+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
51294+{
51295+ if (current->signal->curr_ip)
51296+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
51297+ &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
51298+ else
51299+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
51300+ to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
51301+ dump_stack();
51302+ gr_handle_kernel_exploit();
51303+ do_group_exit(SIGKILL);
51304+}
51305+#endif
51306+
51307+void __check_object_size(const void *ptr, unsigned long n, bool to_user)
51308+{
51309+
51310+#ifdef CONFIG_PAX_USERCOPY
51311+ const char *type;
51312+
51313+ if (!n)
51314+ return;
51315+
51316+ type = check_heap_object(ptr, n);
51317+ if (!type) {
51318+ if (check_stack_object(ptr, n) != -1)
51319+ return;
51320+ type = "<process stack>";
51321+ }
51322+
51323+ pax_report_usercopy(ptr, n, to_user, type);
51324+#endif
51325+
51326+}
51327+EXPORT_SYMBOL(__check_object_size);
51328+
51329+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
51330+void pax_track_stack(void)
51331+{
51332+ unsigned long sp = (unsigned long)&sp;
51333+ if (sp < current_thread_info()->lowest_stack &&
51334+ sp > (unsigned long)task_stack_page(current))
51335+ current_thread_info()->lowest_stack = sp;
51336+}
51337+EXPORT_SYMBOL(pax_track_stack);
51338+#endif
51339+
51340+#ifdef CONFIG_PAX_SIZE_OVERFLOW
51341+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
51342+{
51343+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
51344+ dump_stack();
51345+ do_group_exit(SIGKILL);
51346+}
51347+EXPORT_SYMBOL(report_size_overflow);
51348+#endif
51349diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
51350index 2616d0e..2ffdec9 100644
51351--- a/fs/ext2/balloc.c
51352+++ b/fs/ext2/balloc.c
51353@@ -1190,10 +1190,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
51354
51355 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
51356 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
51357- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
51358+ if (free_blocks < root_blocks + 1 &&
51359 !uid_eq(sbi->s_resuid, current_fsuid()) &&
51360 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
51361- !in_group_p (sbi->s_resgid))) {
51362+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
51363 return 0;
51364 }
51365 return 1;
51366diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
51367index 22548f5..41521d8 100644
51368--- a/fs/ext3/balloc.c
51369+++ b/fs/ext3/balloc.c
51370@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
51371
51372 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
51373 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
51374- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
51375+ if (free_blocks < root_blocks + 1 &&
51376 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
51377 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
51378- !in_group_p (sbi->s_resgid))) {
51379+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
51380 return 0;
51381 }
51382 return 1;
51383diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
51384index 92e68b3..115d987 100644
51385--- a/fs/ext4/balloc.c
51386+++ b/fs/ext4/balloc.c
51387@@ -505,8 +505,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
51388 /* Hm, nope. Are (enough) root reserved clusters available? */
51389 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
51390 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
51391- capable(CAP_SYS_RESOURCE) ||
51392- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
51393+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
51394+ capable_nolog(CAP_SYS_RESOURCE)) {
51395
51396 if (free_clusters >= (nclusters + dirty_clusters))
51397 return 1;
51398diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
51399index bbcd6a0..2824592 100644
51400--- a/fs/ext4/ext4.h
51401+++ b/fs/ext4/ext4.h
51402@@ -1265,19 +1265,19 @@ struct ext4_sb_info {
51403 unsigned long s_mb_last_start;
51404
51405 /* stats for buddy allocator */
51406- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
51407- atomic_t s_bal_success; /* we found long enough chunks */
51408- atomic_t s_bal_allocated; /* in blocks */
51409- atomic_t s_bal_ex_scanned; /* total extents scanned */
51410- atomic_t s_bal_goals; /* goal hits */
51411- atomic_t s_bal_breaks; /* too long searches */
51412- atomic_t s_bal_2orders; /* 2^order hits */
51413+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
51414+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
51415+ atomic_unchecked_t s_bal_allocated; /* in blocks */
51416+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
51417+ atomic_unchecked_t s_bal_goals; /* goal hits */
51418+ atomic_unchecked_t s_bal_breaks; /* too long searches */
51419+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
51420 spinlock_t s_bal_lock;
51421 unsigned long s_mb_buddies_generated;
51422 unsigned long long s_mb_generation_time;
51423- atomic_t s_mb_lost_chunks;
51424- atomic_t s_mb_preallocated;
51425- atomic_t s_mb_discarded;
51426+ atomic_unchecked_t s_mb_lost_chunks;
51427+ atomic_unchecked_t s_mb_preallocated;
51428+ atomic_unchecked_t s_mb_discarded;
51429 atomic_t s_lock_busy;
51430
51431 /* locality groups */
51432diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
51433index 82f8c2d..ce7c889 100644
51434--- a/fs/ext4/mballoc.c
51435+++ b/fs/ext4/mballoc.c
51436@@ -1747,7 +1747,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
51437 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
51438
51439 if (EXT4_SB(sb)->s_mb_stats)
51440- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
51441+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
51442
51443 break;
51444 }
51445@@ -2044,7 +2044,7 @@ repeat:
51446 ac->ac_status = AC_STATUS_CONTINUE;
51447 ac->ac_flags |= EXT4_MB_HINT_FIRST;
51448 cr = 3;
51449- atomic_inc(&sbi->s_mb_lost_chunks);
51450+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
51451 goto repeat;
51452 }
51453 }
51454@@ -2552,25 +2552,25 @@ int ext4_mb_release(struct super_block *sb)
51455 if (sbi->s_mb_stats) {
51456 ext4_msg(sb, KERN_INFO,
51457 "mballoc: %u blocks %u reqs (%u success)",
51458- atomic_read(&sbi->s_bal_allocated),
51459- atomic_read(&sbi->s_bal_reqs),
51460- atomic_read(&sbi->s_bal_success));
51461+ atomic_read_unchecked(&sbi->s_bal_allocated),
51462+ atomic_read_unchecked(&sbi->s_bal_reqs),
51463+ atomic_read_unchecked(&sbi->s_bal_success));
51464 ext4_msg(sb, KERN_INFO,
51465 "mballoc: %u extents scanned, %u goal hits, "
51466 "%u 2^N hits, %u breaks, %u lost",
51467- atomic_read(&sbi->s_bal_ex_scanned),
51468- atomic_read(&sbi->s_bal_goals),
51469- atomic_read(&sbi->s_bal_2orders),
51470- atomic_read(&sbi->s_bal_breaks),
51471- atomic_read(&sbi->s_mb_lost_chunks));
51472+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
51473+ atomic_read_unchecked(&sbi->s_bal_goals),
51474+ atomic_read_unchecked(&sbi->s_bal_2orders),
51475+ atomic_read_unchecked(&sbi->s_bal_breaks),
51476+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
51477 ext4_msg(sb, KERN_INFO,
51478 "mballoc: %lu generated and it took %Lu",
51479 sbi->s_mb_buddies_generated,
51480 sbi->s_mb_generation_time);
51481 ext4_msg(sb, KERN_INFO,
51482 "mballoc: %u preallocated, %u discarded",
51483- atomic_read(&sbi->s_mb_preallocated),
51484- atomic_read(&sbi->s_mb_discarded));
51485+ atomic_read_unchecked(&sbi->s_mb_preallocated),
51486+ atomic_read_unchecked(&sbi->s_mb_discarded));
51487 }
51488
51489 free_percpu(sbi->s_locality_groups);
51490@@ -3060,16 +3060,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
51491 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
51492
51493 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
51494- atomic_inc(&sbi->s_bal_reqs);
51495- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
51496+ atomic_inc_unchecked(&sbi->s_bal_reqs);
51497+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
51498 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
51499- atomic_inc(&sbi->s_bal_success);
51500- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
51501+ atomic_inc_unchecked(&sbi->s_bal_success);
51502+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
51503 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
51504 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
51505- atomic_inc(&sbi->s_bal_goals);
51506+ atomic_inc_unchecked(&sbi->s_bal_goals);
51507 if (ac->ac_found > sbi->s_mb_max_to_scan)
51508- atomic_inc(&sbi->s_bal_breaks);
51509+ atomic_inc_unchecked(&sbi->s_bal_breaks);
51510 }
51511
51512 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
51513@@ -3469,7 +3469,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
51514 trace_ext4_mb_new_inode_pa(ac, pa);
51515
51516 ext4_mb_use_inode_pa(ac, pa);
51517- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
51518+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
51519
51520 ei = EXT4_I(ac->ac_inode);
51521 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
51522@@ -3529,7 +3529,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
51523 trace_ext4_mb_new_group_pa(ac, pa);
51524
51525 ext4_mb_use_group_pa(ac, pa);
51526- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
51527+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
51528
51529 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
51530 lg = ac->ac_lg;
51531@@ -3618,7 +3618,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
51532 * from the bitmap and continue.
51533 */
51534 }
51535- atomic_add(free, &sbi->s_mb_discarded);
51536+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
51537
51538 return err;
51539 }
51540@@ -3636,7 +3636,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
51541 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
51542 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
51543 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
51544- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
51545+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
51546 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
51547
51548 return 0;
51549diff --git a/fs/ext4/super.c b/fs/ext4/super.c
51550index 24c767d..893aa55 100644
51551--- a/fs/ext4/super.c
51552+++ b/fs/ext4/super.c
51553@@ -2429,7 +2429,7 @@ struct ext4_attr {
51554 ssize_t (*store)(struct ext4_attr *, struct ext4_sb_info *,
51555 const char *, size_t);
51556 int offset;
51557-};
51558+} __do_const;
51559
51560 static int parse_strtoul(const char *buf,
51561 unsigned long max, unsigned long *value)
51562diff --git a/fs/fcntl.c b/fs/fcntl.c
51563index 71a600a..20d87b1 100644
51564--- a/fs/fcntl.c
51565+++ b/fs/fcntl.c
51566@@ -107,6 +107,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
51567 if (err)
51568 return err;
51569
51570+ if (gr_handle_chroot_fowner(pid, type))
51571+ return -ENOENT;
51572+ if (gr_check_protected_task_fowner(pid, type))
51573+ return -EACCES;
51574+
51575 f_modown(filp, pid, type, force);
51576 return 0;
51577 }
51578diff --git a/fs/fhandle.c b/fs/fhandle.c
51579index 999ff5c..41f4109 100644
51580--- a/fs/fhandle.c
51581+++ b/fs/fhandle.c
51582@@ -67,8 +67,7 @@ static long do_sys_name_to_handle(struct path *path,
51583 } else
51584 retval = 0;
51585 /* copy the mount id */
51586- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
51587- sizeof(*mnt_id)) ||
51588+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
51589 copy_to_user(ufh, handle,
51590 sizeof(struct file_handle) + handle_bytes))
51591 retval = -EFAULT;
51592diff --git a/fs/fifo.c b/fs/fifo.c
51593index cf6f434..3d7942c 100644
51594--- a/fs/fifo.c
51595+++ b/fs/fifo.c
51596@@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
51597 */
51598 filp->f_op = &read_pipefifo_fops;
51599 pipe->r_counter++;
51600- if (pipe->readers++ == 0)
51601+ if (atomic_inc_return(&pipe->readers) == 1)
51602 wake_up_partner(inode);
51603
51604- if (!pipe->writers) {
51605+ if (!atomic_read(&pipe->writers)) {
51606 if ((filp->f_flags & O_NONBLOCK)) {
51607 /* suppress POLLHUP until we have
51608 * seen a writer */
51609@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
51610 * errno=ENXIO when there is no process reading the FIFO.
51611 */
51612 ret = -ENXIO;
51613- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
51614+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
51615 goto err;
51616
51617 filp->f_op = &write_pipefifo_fops;
51618 pipe->w_counter++;
51619- if (!pipe->writers++)
51620+ if (atomic_inc_return(&pipe->writers) == 1)
51621 wake_up_partner(inode);
51622
51623- if (!pipe->readers) {
51624+ if (!atomic_read(&pipe->readers)) {
51625 if (wait_for_partner(inode, &pipe->r_counter))
51626 goto err_wr;
51627 }
51628@@ -104,11 +104,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
51629 */
51630 filp->f_op = &rdwr_pipefifo_fops;
51631
51632- pipe->readers++;
51633- pipe->writers++;
51634+ atomic_inc(&pipe->readers);
51635+ atomic_inc(&pipe->writers);
51636 pipe->r_counter++;
51637 pipe->w_counter++;
51638- if (pipe->readers == 1 || pipe->writers == 1)
51639+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
51640 wake_up_partner(inode);
51641 break;
51642
51643@@ -122,19 +122,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
51644 return 0;
51645
51646 err_rd:
51647- if (!--pipe->readers)
51648+ if (atomic_dec_and_test(&pipe->readers))
51649 wake_up_interruptible(&pipe->wait);
51650 ret = -ERESTARTSYS;
51651 goto err;
51652
51653 err_wr:
51654- if (!--pipe->writers)
51655+ if (atomic_dec_and_test(&pipe->writers))
51656 wake_up_interruptible(&pipe->wait);
51657 ret = -ERESTARTSYS;
51658 goto err;
51659
51660 err:
51661- if (!pipe->readers && !pipe->writers)
51662+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
51663 free_pipe_info(inode);
51664
51665 err_nocleanup:
51666diff --git a/fs/file.c b/fs/file.c
51667index 2b3570b..c57924b 100644
51668--- a/fs/file.c
51669+++ b/fs/file.c
51670@@ -16,6 +16,7 @@
51671 #include <linux/slab.h>
51672 #include <linux/vmalloc.h>
51673 #include <linux/file.h>
51674+#include <linux/security.h>
51675 #include <linux/fdtable.h>
51676 #include <linux/bitops.h>
51677 #include <linux/interrupt.h>
51678@@ -892,6 +893,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
51679 if (!file)
51680 return __close_fd(files, fd);
51681
51682+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
51683 if (fd >= rlimit(RLIMIT_NOFILE))
51684 return -EBADF;
51685
51686@@ -918,6 +920,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
51687 if (unlikely(oldfd == newfd))
51688 return -EINVAL;
51689
51690+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
51691 if (newfd >= rlimit(RLIMIT_NOFILE))
51692 return -EBADF;
51693
51694@@ -973,6 +976,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
51695 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
51696 {
51697 int err;
51698+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
51699 if (from >= rlimit(RLIMIT_NOFILE))
51700 return -EINVAL;
51701 err = alloc_fd(from, flags);
51702diff --git a/fs/filesystems.c b/fs/filesystems.c
51703index da165f6..3671bdb 100644
51704--- a/fs/filesystems.c
51705+++ b/fs/filesystems.c
51706@@ -273,7 +273,12 @@ struct file_system_type *get_fs_type(const char *name)
51707 int len = dot ? dot - name : strlen(name);
51708
51709 fs = __get_fs_type(name, len);
51710+
51711+#ifdef CONFIG_GRKERNSEC_MODHARDEN
51712+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
51713+#else
51714 if (!fs && (request_module("%.*s", len, name) == 0))
51715+#endif
51716 fs = __get_fs_type(name, len);
51717
51718 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
51719diff --git a/fs/fs_struct.c b/fs/fs_struct.c
51720index fe6ca58..65318cf 100644
51721--- a/fs/fs_struct.c
51722+++ b/fs/fs_struct.c
51723@@ -4,6 +4,7 @@
51724 #include <linux/path.h>
51725 #include <linux/slab.h>
51726 #include <linux/fs_struct.h>
51727+#include <linux/grsecurity.h>
51728 #include "internal.h"
51729
51730 /*
51731@@ -19,6 +20,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
51732 write_seqcount_begin(&fs->seq);
51733 old_root = fs->root;
51734 fs->root = *path;
51735+ gr_set_chroot_entries(current, path);
51736 write_seqcount_end(&fs->seq);
51737 spin_unlock(&fs->lock);
51738 if (old_root.dentry)
51739@@ -53,6 +55,21 @@ static inline int replace_path(struct path *p, const struct path *old, const str
51740 return 1;
51741 }
51742
51743+static inline int replace_root_path(struct task_struct *task, struct path *p, const struct path *old, struct path *new)
51744+{
51745+ if (likely(p->dentry != old->dentry || p->mnt != old->mnt))
51746+ return 0;
51747+ *p = *new;
51748+
51749+ /* This function is only called from pivot_root(). Leave our
51750+ gr_chroot_dentry and is_chrooted flags as-is, so that a
51751+ pivoted root isn't treated as a chroot
51752+ */
51753+ //gr_set_chroot_entries(task, new);
51754+
51755+ return 1;
51756+}
51757+
51758 void chroot_fs_refs(struct path *old_root, struct path *new_root)
51759 {
51760 struct task_struct *g, *p;
51761@@ -67,7 +84,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
51762 int hits = 0;
51763 spin_lock(&fs->lock);
51764 write_seqcount_begin(&fs->seq);
51765- hits += replace_path(&fs->root, old_root, new_root);
51766+ hits += replace_root_path(p, &fs->root, old_root, new_root);
51767 hits += replace_path(&fs->pwd, old_root, new_root);
51768 write_seqcount_end(&fs->seq);
51769 while (hits--) {
51770@@ -99,7 +116,8 @@ void exit_fs(struct task_struct *tsk)
51771 task_lock(tsk);
51772 spin_lock(&fs->lock);
51773 tsk->fs = NULL;
51774- kill = !--fs->users;
51775+ gr_clear_chroot_entries(tsk);
51776+ kill = !atomic_dec_return(&fs->users);
51777 spin_unlock(&fs->lock);
51778 task_unlock(tsk);
51779 if (kill)
51780@@ -112,7 +130,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
51781 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
51782 /* We don't need to lock fs - think why ;-) */
51783 if (fs) {
51784- fs->users = 1;
51785+ atomic_set(&fs->users, 1);
51786 fs->in_exec = 0;
51787 spin_lock_init(&fs->lock);
51788 seqcount_init(&fs->seq);
51789@@ -121,6 +139,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
51790 spin_lock(&old->lock);
51791 fs->root = old->root;
51792 path_get(&fs->root);
51793+ /* instead of calling gr_set_chroot_entries here,
51794+ we call it from every caller of this function
51795+ */
51796 fs->pwd = old->pwd;
51797 path_get(&fs->pwd);
51798 spin_unlock(&old->lock);
51799@@ -139,8 +160,9 @@ int unshare_fs_struct(void)
51800
51801 task_lock(current);
51802 spin_lock(&fs->lock);
51803- kill = !--fs->users;
51804+ kill = !atomic_dec_return(&fs->users);
51805 current->fs = new_fs;
51806+ gr_set_chroot_entries(current, &new_fs->root);
51807 spin_unlock(&fs->lock);
51808 task_unlock(current);
51809
51810@@ -153,13 +175,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
51811
51812 int current_umask(void)
51813 {
51814- return current->fs->umask;
51815+ return current->fs->umask | gr_acl_umask();
51816 }
51817 EXPORT_SYMBOL(current_umask);
51818
51819 /* to be mentioned only in INIT_TASK */
51820 struct fs_struct init_fs = {
51821- .users = 1,
51822+ .users = ATOMIC_INIT(1),
51823 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
51824 .seq = SEQCNT_ZERO,
51825 .umask = 0022,
51826diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
51827index 8dcb114..b1072e2 100644
51828--- a/fs/fscache/cookie.c
51829+++ b/fs/fscache/cookie.c
51830@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
51831 parent ? (char *) parent->def->name : "<no-parent>",
51832 def->name, netfs_data);
51833
51834- fscache_stat(&fscache_n_acquires);
51835+ fscache_stat_unchecked(&fscache_n_acquires);
51836
51837 /* if there's no parent cookie, then we don't create one here either */
51838 if (!parent) {
51839- fscache_stat(&fscache_n_acquires_null);
51840+ fscache_stat_unchecked(&fscache_n_acquires_null);
51841 _leave(" [no parent]");
51842 return NULL;
51843 }
51844@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
51845 /* allocate and initialise a cookie */
51846 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
51847 if (!cookie) {
51848- fscache_stat(&fscache_n_acquires_oom);
51849+ fscache_stat_unchecked(&fscache_n_acquires_oom);
51850 _leave(" [ENOMEM]");
51851 return NULL;
51852 }
51853@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
51854
51855 switch (cookie->def->type) {
51856 case FSCACHE_COOKIE_TYPE_INDEX:
51857- fscache_stat(&fscache_n_cookie_index);
51858+ fscache_stat_unchecked(&fscache_n_cookie_index);
51859 break;
51860 case FSCACHE_COOKIE_TYPE_DATAFILE:
51861- fscache_stat(&fscache_n_cookie_data);
51862+ fscache_stat_unchecked(&fscache_n_cookie_data);
51863 break;
51864 default:
51865- fscache_stat(&fscache_n_cookie_special);
51866+ fscache_stat_unchecked(&fscache_n_cookie_special);
51867 break;
51868 }
51869
51870@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
51871 if (fscache_acquire_non_index_cookie(cookie) < 0) {
51872 atomic_dec(&parent->n_children);
51873 __fscache_cookie_put(cookie);
51874- fscache_stat(&fscache_n_acquires_nobufs);
51875+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
51876 _leave(" = NULL");
51877 return NULL;
51878 }
51879 }
51880
51881- fscache_stat(&fscache_n_acquires_ok);
51882+ fscache_stat_unchecked(&fscache_n_acquires_ok);
51883 _leave(" = %p", cookie);
51884 return cookie;
51885 }
51886@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
51887 cache = fscache_select_cache_for_object(cookie->parent);
51888 if (!cache) {
51889 up_read(&fscache_addremove_sem);
51890- fscache_stat(&fscache_n_acquires_no_cache);
51891+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
51892 _leave(" = -ENOMEDIUM [no cache]");
51893 return -ENOMEDIUM;
51894 }
51895@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
51896 object = cache->ops->alloc_object(cache, cookie);
51897 fscache_stat_d(&fscache_n_cop_alloc_object);
51898 if (IS_ERR(object)) {
51899- fscache_stat(&fscache_n_object_no_alloc);
51900+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
51901 ret = PTR_ERR(object);
51902 goto error;
51903 }
51904
51905- fscache_stat(&fscache_n_object_alloc);
51906+ fscache_stat_unchecked(&fscache_n_object_alloc);
51907
51908 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
51909
51910@@ -378,7 +378,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
51911
51912 _enter("{%s}", cookie->def->name);
51913
51914- fscache_stat(&fscache_n_invalidates);
51915+ fscache_stat_unchecked(&fscache_n_invalidates);
51916
51917 /* Only permit invalidation of data files. Invalidating an index will
51918 * require the caller to release all its attachments to the tree rooted
51919@@ -437,10 +437,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
51920 struct fscache_object *object;
51921 struct hlist_node *_p;
51922
51923- fscache_stat(&fscache_n_updates);
51924+ fscache_stat_unchecked(&fscache_n_updates);
51925
51926 if (!cookie) {
51927- fscache_stat(&fscache_n_updates_null);
51928+ fscache_stat_unchecked(&fscache_n_updates_null);
51929 _leave(" [no cookie]");
51930 return;
51931 }
51932@@ -474,12 +474,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
51933 struct fscache_object *object;
51934 unsigned long event;
51935
51936- fscache_stat(&fscache_n_relinquishes);
51937+ fscache_stat_unchecked(&fscache_n_relinquishes);
51938 if (retire)
51939- fscache_stat(&fscache_n_relinquishes_retire);
51940+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
51941
51942 if (!cookie) {
51943- fscache_stat(&fscache_n_relinquishes_null);
51944+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
51945 _leave(" [no cookie]");
51946 return;
51947 }
51948@@ -495,7 +495,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
51949
51950 /* wait for the cookie to finish being instantiated (or to fail) */
51951 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
51952- fscache_stat(&fscache_n_relinquishes_waitcrt);
51953+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
51954 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
51955 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
51956 }
51957diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
51958index ee38fef..0a326d4 100644
51959--- a/fs/fscache/internal.h
51960+++ b/fs/fscache/internal.h
51961@@ -148,101 +148,101 @@ extern void fscache_proc_cleanup(void);
51962 * stats.c
51963 */
51964 #ifdef CONFIG_FSCACHE_STATS
51965-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
51966-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
51967+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
51968+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
51969
51970-extern atomic_t fscache_n_op_pend;
51971-extern atomic_t fscache_n_op_run;
51972-extern atomic_t fscache_n_op_enqueue;
51973-extern atomic_t fscache_n_op_deferred_release;
51974-extern atomic_t fscache_n_op_release;
51975-extern atomic_t fscache_n_op_gc;
51976-extern atomic_t fscache_n_op_cancelled;
51977-extern atomic_t fscache_n_op_rejected;
51978+extern atomic_unchecked_t fscache_n_op_pend;
51979+extern atomic_unchecked_t fscache_n_op_run;
51980+extern atomic_unchecked_t fscache_n_op_enqueue;
51981+extern atomic_unchecked_t fscache_n_op_deferred_release;
51982+extern atomic_unchecked_t fscache_n_op_release;
51983+extern atomic_unchecked_t fscache_n_op_gc;
51984+extern atomic_unchecked_t fscache_n_op_cancelled;
51985+extern atomic_unchecked_t fscache_n_op_rejected;
51986
51987-extern atomic_t fscache_n_attr_changed;
51988-extern atomic_t fscache_n_attr_changed_ok;
51989-extern atomic_t fscache_n_attr_changed_nobufs;
51990-extern atomic_t fscache_n_attr_changed_nomem;
51991-extern atomic_t fscache_n_attr_changed_calls;
51992+extern atomic_unchecked_t fscache_n_attr_changed;
51993+extern atomic_unchecked_t fscache_n_attr_changed_ok;
51994+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
51995+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
51996+extern atomic_unchecked_t fscache_n_attr_changed_calls;
51997
51998-extern atomic_t fscache_n_allocs;
51999-extern atomic_t fscache_n_allocs_ok;
52000-extern atomic_t fscache_n_allocs_wait;
52001-extern atomic_t fscache_n_allocs_nobufs;
52002-extern atomic_t fscache_n_allocs_intr;
52003-extern atomic_t fscache_n_allocs_object_dead;
52004-extern atomic_t fscache_n_alloc_ops;
52005-extern atomic_t fscache_n_alloc_op_waits;
52006+extern atomic_unchecked_t fscache_n_allocs;
52007+extern atomic_unchecked_t fscache_n_allocs_ok;
52008+extern atomic_unchecked_t fscache_n_allocs_wait;
52009+extern atomic_unchecked_t fscache_n_allocs_nobufs;
52010+extern atomic_unchecked_t fscache_n_allocs_intr;
52011+extern atomic_unchecked_t fscache_n_allocs_object_dead;
52012+extern atomic_unchecked_t fscache_n_alloc_ops;
52013+extern atomic_unchecked_t fscache_n_alloc_op_waits;
52014
52015-extern atomic_t fscache_n_retrievals;
52016-extern atomic_t fscache_n_retrievals_ok;
52017-extern atomic_t fscache_n_retrievals_wait;
52018-extern atomic_t fscache_n_retrievals_nodata;
52019-extern atomic_t fscache_n_retrievals_nobufs;
52020-extern atomic_t fscache_n_retrievals_intr;
52021-extern atomic_t fscache_n_retrievals_nomem;
52022-extern atomic_t fscache_n_retrievals_object_dead;
52023-extern atomic_t fscache_n_retrieval_ops;
52024-extern atomic_t fscache_n_retrieval_op_waits;
52025+extern atomic_unchecked_t fscache_n_retrievals;
52026+extern atomic_unchecked_t fscache_n_retrievals_ok;
52027+extern atomic_unchecked_t fscache_n_retrievals_wait;
52028+extern atomic_unchecked_t fscache_n_retrievals_nodata;
52029+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
52030+extern atomic_unchecked_t fscache_n_retrievals_intr;
52031+extern atomic_unchecked_t fscache_n_retrievals_nomem;
52032+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
52033+extern atomic_unchecked_t fscache_n_retrieval_ops;
52034+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
52035
52036-extern atomic_t fscache_n_stores;
52037-extern atomic_t fscache_n_stores_ok;
52038-extern atomic_t fscache_n_stores_again;
52039-extern atomic_t fscache_n_stores_nobufs;
52040-extern atomic_t fscache_n_stores_oom;
52041-extern atomic_t fscache_n_store_ops;
52042-extern atomic_t fscache_n_store_calls;
52043-extern atomic_t fscache_n_store_pages;
52044-extern atomic_t fscache_n_store_radix_deletes;
52045-extern atomic_t fscache_n_store_pages_over_limit;
52046+extern atomic_unchecked_t fscache_n_stores;
52047+extern atomic_unchecked_t fscache_n_stores_ok;
52048+extern atomic_unchecked_t fscache_n_stores_again;
52049+extern atomic_unchecked_t fscache_n_stores_nobufs;
52050+extern atomic_unchecked_t fscache_n_stores_oom;
52051+extern atomic_unchecked_t fscache_n_store_ops;
52052+extern atomic_unchecked_t fscache_n_store_calls;
52053+extern atomic_unchecked_t fscache_n_store_pages;
52054+extern atomic_unchecked_t fscache_n_store_radix_deletes;
52055+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
52056
52057-extern atomic_t fscache_n_store_vmscan_not_storing;
52058-extern atomic_t fscache_n_store_vmscan_gone;
52059-extern atomic_t fscache_n_store_vmscan_busy;
52060-extern atomic_t fscache_n_store_vmscan_cancelled;
52061-extern atomic_t fscache_n_store_vmscan_wait;
52062+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
52063+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
52064+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
52065+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
52066+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
52067
52068-extern atomic_t fscache_n_marks;
52069-extern atomic_t fscache_n_uncaches;
52070+extern atomic_unchecked_t fscache_n_marks;
52071+extern atomic_unchecked_t fscache_n_uncaches;
52072
52073-extern atomic_t fscache_n_acquires;
52074-extern atomic_t fscache_n_acquires_null;
52075-extern atomic_t fscache_n_acquires_no_cache;
52076-extern atomic_t fscache_n_acquires_ok;
52077-extern atomic_t fscache_n_acquires_nobufs;
52078-extern atomic_t fscache_n_acquires_oom;
52079+extern atomic_unchecked_t fscache_n_acquires;
52080+extern atomic_unchecked_t fscache_n_acquires_null;
52081+extern atomic_unchecked_t fscache_n_acquires_no_cache;
52082+extern atomic_unchecked_t fscache_n_acquires_ok;
52083+extern atomic_unchecked_t fscache_n_acquires_nobufs;
52084+extern atomic_unchecked_t fscache_n_acquires_oom;
52085
52086-extern atomic_t fscache_n_invalidates;
52087-extern atomic_t fscache_n_invalidates_run;
52088+extern atomic_unchecked_t fscache_n_invalidates;
52089+extern atomic_unchecked_t fscache_n_invalidates_run;
52090
52091-extern atomic_t fscache_n_updates;
52092-extern atomic_t fscache_n_updates_null;
52093-extern atomic_t fscache_n_updates_run;
52094+extern atomic_unchecked_t fscache_n_updates;
52095+extern atomic_unchecked_t fscache_n_updates_null;
52096+extern atomic_unchecked_t fscache_n_updates_run;
52097
52098-extern atomic_t fscache_n_relinquishes;
52099-extern atomic_t fscache_n_relinquishes_null;
52100-extern atomic_t fscache_n_relinquishes_waitcrt;
52101-extern atomic_t fscache_n_relinquishes_retire;
52102+extern atomic_unchecked_t fscache_n_relinquishes;
52103+extern atomic_unchecked_t fscache_n_relinquishes_null;
52104+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
52105+extern atomic_unchecked_t fscache_n_relinquishes_retire;
52106
52107-extern atomic_t fscache_n_cookie_index;
52108-extern atomic_t fscache_n_cookie_data;
52109-extern atomic_t fscache_n_cookie_special;
52110+extern atomic_unchecked_t fscache_n_cookie_index;
52111+extern atomic_unchecked_t fscache_n_cookie_data;
52112+extern atomic_unchecked_t fscache_n_cookie_special;
52113
52114-extern atomic_t fscache_n_object_alloc;
52115-extern atomic_t fscache_n_object_no_alloc;
52116-extern atomic_t fscache_n_object_lookups;
52117-extern atomic_t fscache_n_object_lookups_negative;
52118-extern atomic_t fscache_n_object_lookups_positive;
52119-extern atomic_t fscache_n_object_lookups_timed_out;
52120-extern atomic_t fscache_n_object_created;
52121-extern atomic_t fscache_n_object_avail;
52122-extern atomic_t fscache_n_object_dead;
52123+extern atomic_unchecked_t fscache_n_object_alloc;
52124+extern atomic_unchecked_t fscache_n_object_no_alloc;
52125+extern atomic_unchecked_t fscache_n_object_lookups;
52126+extern atomic_unchecked_t fscache_n_object_lookups_negative;
52127+extern atomic_unchecked_t fscache_n_object_lookups_positive;
52128+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
52129+extern atomic_unchecked_t fscache_n_object_created;
52130+extern atomic_unchecked_t fscache_n_object_avail;
52131+extern atomic_unchecked_t fscache_n_object_dead;
52132
52133-extern atomic_t fscache_n_checkaux_none;
52134-extern atomic_t fscache_n_checkaux_okay;
52135-extern atomic_t fscache_n_checkaux_update;
52136-extern atomic_t fscache_n_checkaux_obsolete;
52137+extern atomic_unchecked_t fscache_n_checkaux_none;
52138+extern atomic_unchecked_t fscache_n_checkaux_okay;
52139+extern atomic_unchecked_t fscache_n_checkaux_update;
52140+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
52141
52142 extern atomic_t fscache_n_cop_alloc_object;
52143 extern atomic_t fscache_n_cop_lookup_object;
52144@@ -267,6 +267,11 @@ static inline void fscache_stat(atomic_t *stat)
52145 atomic_inc(stat);
52146 }
52147
52148+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
52149+{
52150+ atomic_inc_unchecked(stat);
52151+}
52152+
52153 static inline void fscache_stat_d(atomic_t *stat)
52154 {
52155 atomic_dec(stat);
52156@@ -279,6 +284,7 @@ extern const struct file_operations fscache_stats_fops;
52157
52158 #define __fscache_stat(stat) (NULL)
52159 #define fscache_stat(stat) do {} while (0)
52160+#define fscache_stat_unchecked(stat) do {} while (0)
52161 #define fscache_stat_d(stat) do {} while (0)
52162 #endif
52163
52164diff --git a/fs/fscache/object.c b/fs/fscache/object.c
52165index 50d41c1..10ee117 100644
52166--- a/fs/fscache/object.c
52167+++ b/fs/fscache/object.c
52168@@ -143,7 +143,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
52169 /* Invalidate an object on disk */
52170 case FSCACHE_OBJECT_INVALIDATING:
52171 clear_bit(FSCACHE_OBJECT_EV_INVALIDATE, &object->events);
52172- fscache_stat(&fscache_n_invalidates_run);
52173+ fscache_stat_unchecked(&fscache_n_invalidates_run);
52174 fscache_stat(&fscache_n_cop_invalidate_object);
52175 fscache_invalidate_object(object);
52176 fscache_stat_d(&fscache_n_cop_invalidate_object);
52177@@ -153,7 +153,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
52178 /* update the object metadata on disk */
52179 case FSCACHE_OBJECT_UPDATING:
52180 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
52181- fscache_stat(&fscache_n_updates_run);
52182+ fscache_stat_unchecked(&fscache_n_updates_run);
52183 fscache_stat(&fscache_n_cop_update_object);
52184 object->cache->ops->update_object(object);
52185 fscache_stat_d(&fscache_n_cop_update_object);
52186@@ -242,7 +242,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
52187 spin_lock(&object->lock);
52188 object->state = FSCACHE_OBJECT_DEAD;
52189 spin_unlock(&object->lock);
52190- fscache_stat(&fscache_n_object_dead);
52191+ fscache_stat_unchecked(&fscache_n_object_dead);
52192 goto terminal_transit;
52193
52194 /* handle the parent cache of this object being withdrawn from
52195@@ -257,7 +257,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
52196 spin_lock(&object->lock);
52197 object->state = FSCACHE_OBJECT_DEAD;
52198 spin_unlock(&object->lock);
52199- fscache_stat(&fscache_n_object_dead);
52200+ fscache_stat_unchecked(&fscache_n_object_dead);
52201 goto terminal_transit;
52202
52203 /* complain about the object being woken up once it is
52204@@ -495,7 +495,7 @@ static void fscache_lookup_object(struct fscache_object *object)
52205 parent->cookie->def->name, cookie->def->name,
52206 object->cache->tag->name);
52207
52208- fscache_stat(&fscache_n_object_lookups);
52209+ fscache_stat_unchecked(&fscache_n_object_lookups);
52210 fscache_stat(&fscache_n_cop_lookup_object);
52211 ret = object->cache->ops->lookup_object(object);
52212 fscache_stat_d(&fscache_n_cop_lookup_object);
52213@@ -506,7 +506,7 @@ static void fscache_lookup_object(struct fscache_object *object)
52214 if (ret == -ETIMEDOUT) {
52215 /* probably stuck behind another object, so move this one to
52216 * the back of the queue */
52217- fscache_stat(&fscache_n_object_lookups_timed_out);
52218+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
52219 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
52220 }
52221
52222@@ -529,7 +529,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
52223
52224 spin_lock(&object->lock);
52225 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
52226- fscache_stat(&fscache_n_object_lookups_negative);
52227+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
52228
52229 /* transit here to allow write requests to begin stacking up
52230 * and read requests to begin returning ENODATA */
52231@@ -575,7 +575,7 @@ void fscache_obtained_object(struct fscache_object *object)
52232 * result, in which case there may be data available */
52233 spin_lock(&object->lock);
52234 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
52235- fscache_stat(&fscache_n_object_lookups_positive);
52236+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
52237
52238 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
52239
52240@@ -589,7 +589,7 @@ void fscache_obtained_object(struct fscache_object *object)
52241 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
52242 } else {
52243 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
52244- fscache_stat(&fscache_n_object_created);
52245+ fscache_stat_unchecked(&fscache_n_object_created);
52246
52247 object->state = FSCACHE_OBJECT_AVAILABLE;
52248 spin_unlock(&object->lock);
52249@@ -634,7 +634,7 @@ static void fscache_object_available(struct fscache_object *object)
52250 fscache_enqueue_dependents(object);
52251
52252 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
52253- fscache_stat(&fscache_n_object_avail);
52254+ fscache_stat_unchecked(&fscache_n_object_avail);
52255
52256 _leave("");
52257 }
52258@@ -894,7 +894,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
52259 enum fscache_checkaux result;
52260
52261 if (!object->cookie->def->check_aux) {
52262- fscache_stat(&fscache_n_checkaux_none);
52263+ fscache_stat_unchecked(&fscache_n_checkaux_none);
52264 return FSCACHE_CHECKAUX_OKAY;
52265 }
52266
52267@@ -903,17 +903,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
52268 switch (result) {
52269 /* entry okay as is */
52270 case FSCACHE_CHECKAUX_OKAY:
52271- fscache_stat(&fscache_n_checkaux_okay);
52272+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
52273 break;
52274
52275 /* entry requires update */
52276 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
52277- fscache_stat(&fscache_n_checkaux_update);
52278+ fscache_stat_unchecked(&fscache_n_checkaux_update);
52279 break;
52280
52281 /* entry requires deletion */
52282 case FSCACHE_CHECKAUX_OBSOLETE:
52283- fscache_stat(&fscache_n_checkaux_obsolete);
52284+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
52285 break;
52286
52287 default:
52288diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
52289index 762a9ec..2023284 100644
52290--- a/fs/fscache/operation.c
52291+++ b/fs/fscache/operation.c
52292@@ -17,7 +17,7 @@
52293 #include <linux/slab.h>
52294 #include "internal.h"
52295
52296-atomic_t fscache_op_debug_id;
52297+atomic_unchecked_t fscache_op_debug_id;
52298 EXPORT_SYMBOL(fscache_op_debug_id);
52299
52300 /**
52301@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
52302 ASSERTCMP(atomic_read(&op->usage), >, 0);
52303 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
52304
52305- fscache_stat(&fscache_n_op_enqueue);
52306+ fscache_stat_unchecked(&fscache_n_op_enqueue);
52307 switch (op->flags & FSCACHE_OP_TYPE) {
52308 case FSCACHE_OP_ASYNC:
52309 _debug("queue async");
52310@@ -73,7 +73,7 @@ static void fscache_run_op(struct fscache_object *object,
52311 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
52312 if (op->processor)
52313 fscache_enqueue_operation(op);
52314- fscache_stat(&fscache_n_op_run);
52315+ fscache_stat_unchecked(&fscache_n_op_run);
52316 }
52317
52318 /*
52319@@ -105,11 +105,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
52320 if (object->n_in_progress > 0) {
52321 atomic_inc(&op->usage);
52322 list_add_tail(&op->pend_link, &object->pending_ops);
52323- fscache_stat(&fscache_n_op_pend);
52324+ fscache_stat_unchecked(&fscache_n_op_pend);
52325 } else if (!list_empty(&object->pending_ops)) {
52326 atomic_inc(&op->usage);
52327 list_add_tail(&op->pend_link, &object->pending_ops);
52328- fscache_stat(&fscache_n_op_pend);
52329+ fscache_stat_unchecked(&fscache_n_op_pend);
52330 fscache_start_operations(object);
52331 } else {
52332 ASSERTCMP(object->n_in_progress, ==, 0);
52333@@ -125,7 +125,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
52334 object->n_exclusive++; /* reads and writes must wait */
52335 atomic_inc(&op->usage);
52336 list_add_tail(&op->pend_link, &object->pending_ops);
52337- fscache_stat(&fscache_n_op_pend);
52338+ fscache_stat_unchecked(&fscache_n_op_pend);
52339 ret = 0;
52340 } else {
52341 /* If we're in any other state, there must have been an I/O
52342@@ -215,11 +215,11 @@ int fscache_submit_op(struct fscache_object *object,
52343 if (object->n_exclusive > 0) {
52344 atomic_inc(&op->usage);
52345 list_add_tail(&op->pend_link, &object->pending_ops);
52346- fscache_stat(&fscache_n_op_pend);
52347+ fscache_stat_unchecked(&fscache_n_op_pend);
52348 } else if (!list_empty(&object->pending_ops)) {
52349 atomic_inc(&op->usage);
52350 list_add_tail(&op->pend_link, &object->pending_ops);
52351- fscache_stat(&fscache_n_op_pend);
52352+ fscache_stat_unchecked(&fscache_n_op_pend);
52353 fscache_start_operations(object);
52354 } else {
52355 ASSERTCMP(object->n_exclusive, ==, 0);
52356@@ -231,12 +231,12 @@ int fscache_submit_op(struct fscache_object *object,
52357 object->n_ops++;
52358 atomic_inc(&op->usage);
52359 list_add_tail(&op->pend_link, &object->pending_ops);
52360- fscache_stat(&fscache_n_op_pend);
52361+ fscache_stat_unchecked(&fscache_n_op_pend);
52362 ret = 0;
52363 } else if (object->state == FSCACHE_OBJECT_DYING ||
52364 object->state == FSCACHE_OBJECT_LC_DYING ||
52365 object->state == FSCACHE_OBJECT_WITHDRAWING) {
52366- fscache_stat(&fscache_n_op_rejected);
52367+ fscache_stat_unchecked(&fscache_n_op_rejected);
52368 op->state = FSCACHE_OP_ST_CANCELLED;
52369 ret = -ENOBUFS;
52370 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
52371@@ -315,7 +315,7 @@ int fscache_cancel_op(struct fscache_operation *op,
52372 ret = -EBUSY;
52373 if (op->state == FSCACHE_OP_ST_PENDING) {
52374 ASSERT(!list_empty(&op->pend_link));
52375- fscache_stat(&fscache_n_op_cancelled);
52376+ fscache_stat_unchecked(&fscache_n_op_cancelled);
52377 list_del_init(&op->pend_link);
52378 if (do_cancel)
52379 do_cancel(op);
52380@@ -347,7 +347,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
52381 while (!list_empty(&object->pending_ops)) {
52382 op = list_entry(object->pending_ops.next,
52383 struct fscache_operation, pend_link);
52384- fscache_stat(&fscache_n_op_cancelled);
52385+ fscache_stat_unchecked(&fscache_n_op_cancelled);
52386 list_del_init(&op->pend_link);
52387
52388 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
52389@@ -419,7 +419,7 @@ void fscache_put_operation(struct fscache_operation *op)
52390 op->state, ==, FSCACHE_OP_ST_CANCELLED);
52391 op->state = FSCACHE_OP_ST_DEAD;
52392
52393- fscache_stat(&fscache_n_op_release);
52394+ fscache_stat_unchecked(&fscache_n_op_release);
52395
52396 if (op->release) {
52397 op->release(op);
52398@@ -442,7 +442,7 @@ void fscache_put_operation(struct fscache_operation *op)
52399 * lock, and defer it otherwise */
52400 if (!spin_trylock(&object->lock)) {
52401 _debug("defer put");
52402- fscache_stat(&fscache_n_op_deferred_release);
52403+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
52404
52405 cache = object->cache;
52406 spin_lock(&cache->op_gc_list_lock);
52407@@ -495,7 +495,7 @@ void fscache_operation_gc(struct work_struct *work)
52408
52409 _debug("GC DEFERRED REL OBJ%x OP%x",
52410 object->debug_id, op->debug_id);
52411- fscache_stat(&fscache_n_op_gc);
52412+ fscache_stat_unchecked(&fscache_n_op_gc);
52413
52414 ASSERTCMP(atomic_read(&op->usage), ==, 0);
52415 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
52416diff --git a/fs/fscache/page.c b/fs/fscache/page.c
52417index ff000e5..c44ec6d 100644
52418--- a/fs/fscache/page.c
52419+++ b/fs/fscache/page.c
52420@@ -61,7 +61,7 @@ try_again:
52421 val = radix_tree_lookup(&cookie->stores, page->index);
52422 if (!val) {
52423 rcu_read_unlock();
52424- fscache_stat(&fscache_n_store_vmscan_not_storing);
52425+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
52426 __fscache_uncache_page(cookie, page);
52427 return true;
52428 }
52429@@ -91,11 +91,11 @@ try_again:
52430 spin_unlock(&cookie->stores_lock);
52431
52432 if (xpage) {
52433- fscache_stat(&fscache_n_store_vmscan_cancelled);
52434- fscache_stat(&fscache_n_store_radix_deletes);
52435+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
52436+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
52437 ASSERTCMP(xpage, ==, page);
52438 } else {
52439- fscache_stat(&fscache_n_store_vmscan_gone);
52440+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
52441 }
52442
52443 wake_up_bit(&cookie->flags, 0);
52444@@ -110,11 +110,11 @@ page_busy:
52445 * sleeping on memory allocation, so we may need to impose a timeout
52446 * too. */
52447 if (!(gfp & __GFP_WAIT)) {
52448- fscache_stat(&fscache_n_store_vmscan_busy);
52449+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
52450 return false;
52451 }
52452
52453- fscache_stat(&fscache_n_store_vmscan_wait);
52454+ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
52455 __fscache_wait_on_page_write(cookie, page);
52456 gfp &= ~__GFP_WAIT;
52457 goto try_again;
52458@@ -140,7 +140,7 @@ static void fscache_end_page_write(struct fscache_object *object,
52459 FSCACHE_COOKIE_STORING_TAG);
52460 if (!radix_tree_tag_get(&cookie->stores, page->index,
52461 FSCACHE_COOKIE_PENDING_TAG)) {
52462- fscache_stat(&fscache_n_store_radix_deletes);
52463+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
52464 xpage = radix_tree_delete(&cookie->stores, page->index);
52465 }
52466 spin_unlock(&cookie->stores_lock);
52467@@ -161,7 +161,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
52468
52469 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
52470
52471- fscache_stat(&fscache_n_attr_changed_calls);
52472+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
52473
52474 if (fscache_object_is_active(object)) {
52475 fscache_stat(&fscache_n_cop_attr_changed);
52476@@ -187,11 +187,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
52477
52478 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
52479
52480- fscache_stat(&fscache_n_attr_changed);
52481+ fscache_stat_unchecked(&fscache_n_attr_changed);
52482
52483 op = kzalloc(sizeof(*op), GFP_KERNEL);
52484 if (!op) {
52485- fscache_stat(&fscache_n_attr_changed_nomem);
52486+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
52487 _leave(" = -ENOMEM");
52488 return -ENOMEM;
52489 }
52490@@ -209,7 +209,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
52491 if (fscache_submit_exclusive_op(object, op) < 0)
52492 goto nobufs;
52493 spin_unlock(&cookie->lock);
52494- fscache_stat(&fscache_n_attr_changed_ok);
52495+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
52496 fscache_put_operation(op);
52497 _leave(" = 0");
52498 return 0;
52499@@ -217,7 +217,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
52500 nobufs:
52501 spin_unlock(&cookie->lock);
52502 kfree(op);
52503- fscache_stat(&fscache_n_attr_changed_nobufs);
52504+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
52505 _leave(" = %d", -ENOBUFS);
52506 return -ENOBUFS;
52507 }
52508@@ -255,7 +255,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
52509 /* allocate a retrieval operation and attempt to submit it */
52510 op = kzalloc(sizeof(*op), GFP_NOIO);
52511 if (!op) {
52512- fscache_stat(&fscache_n_retrievals_nomem);
52513+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
52514 return NULL;
52515 }
52516
52517@@ -283,13 +283,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
52518 return 0;
52519 }
52520
52521- fscache_stat(&fscache_n_retrievals_wait);
52522+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
52523
52524 jif = jiffies;
52525 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
52526 fscache_wait_bit_interruptible,
52527 TASK_INTERRUPTIBLE) != 0) {
52528- fscache_stat(&fscache_n_retrievals_intr);
52529+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
52530 _leave(" = -ERESTARTSYS");
52531 return -ERESTARTSYS;
52532 }
52533@@ -318,8 +318,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
52534 */
52535 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
52536 struct fscache_retrieval *op,
52537- atomic_t *stat_op_waits,
52538- atomic_t *stat_object_dead)
52539+ atomic_unchecked_t *stat_op_waits,
52540+ atomic_unchecked_t *stat_object_dead)
52541 {
52542 int ret;
52543
52544@@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
52545 goto check_if_dead;
52546
52547 _debug(">>> WT");
52548- fscache_stat(stat_op_waits);
52549+ fscache_stat_unchecked(stat_op_waits);
52550 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
52551 fscache_wait_bit_interruptible,
52552 TASK_INTERRUPTIBLE) != 0) {
52553@@ -344,14 +344,14 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
52554
52555 check_if_dead:
52556 if (op->op.state == FSCACHE_OP_ST_CANCELLED) {
52557- fscache_stat(stat_object_dead);
52558+ fscache_stat_unchecked(stat_object_dead);
52559 _leave(" = -ENOBUFS [cancelled]");
52560 return -ENOBUFS;
52561 }
52562 if (unlikely(fscache_object_is_dead(object))) {
52563 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->op.state);
52564 fscache_cancel_op(&op->op, fscache_do_cancel_retrieval);
52565- fscache_stat(stat_object_dead);
52566+ fscache_stat_unchecked(stat_object_dead);
52567 return -ENOBUFS;
52568 }
52569 return 0;
52570@@ -378,7 +378,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
52571
52572 _enter("%p,%p,,,", cookie, page);
52573
52574- fscache_stat(&fscache_n_retrievals);
52575+ fscache_stat_unchecked(&fscache_n_retrievals);
52576
52577 if (hlist_empty(&cookie->backing_objects))
52578 goto nobufs;
52579@@ -417,7 +417,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
52580 goto nobufs_unlock_dec;
52581 spin_unlock(&cookie->lock);
52582
52583- fscache_stat(&fscache_n_retrieval_ops);
52584+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
52585
52586 /* pin the netfs read context in case we need to do the actual netfs
52587 * read because we've encountered a cache read failure */
52588@@ -447,15 +447,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
52589
52590 error:
52591 if (ret == -ENOMEM)
52592- fscache_stat(&fscache_n_retrievals_nomem);
52593+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
52594 else if (ret == -ERESTARTSYS)
52595- fscache_stat(&fscache_n_retrievals_intr);
52596+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
52597 else if (ret == -ENODATA)
52598- fscache_stat(&fscache_n_retrievals_nodata);
52599+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
52600 else if (ret < 0)
52601- fscache_stat(&fscache_n_retrievals_nobufs);
52602+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
52603 else
52604- fscache_stat(&fscache_n_retrievals_ok);
52605+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
52606
52607 fscache_put_retrieval(op);
52608 _leave(" = %d", ret);
52609@@ -467,7 +467,7 @@ nobufs_unlock:
52610 spin_unlock(&cookie->lock);
52611 kfree(op);
52612 nobufs:
52613- fscache_stat(&fscache_n_retrievals_nobufs);
52614+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
52615 _leave(" = -ENOBUFS");
52616 return -ENOBUFS;
52617 }
52618@@ -505,7 +505,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
52619
52620 _enter("%p,,%d,,,", cookie, *nr_pages);
52621
52622- fscache_stat(&fscache_n_retrievals);
52623+ fscache_stat_unchecked(&fscache_n_retrievals);
52624
52625 if (hlist_empty(&cookie->backing_objects))
52626 goto nobufs;
52627@@ -541,7 +541,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
52628 goto nobufs_unlock_dec;
52629 spin_unlock(&cookie->lock);
52630
52631- fscache_stat(&fscache_n_retrieval_ops);
52632+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
52633
52634 /* pin the netfs read context in case we need to do the actual netfs
52635 * read because we've encountered a cache read failure */
52636@@ -571,15 +571,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
52637
52638 error:
52639 if (ret == -ENOMEM)
52640- fscache_stat(&fscache_n_retrievals_nomem);
52641+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
52642 else if (ret == -ERESTARTSYS)
52643- fscache_stat(&fscache_n_retrievals_intr);
52644+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
52645 else if (ret == -ENODATA)
52646- fscache_stat(&fscache_n_retrievals_nodata);
52647+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
52648 else if (ret < 0)
52649- fscache_stat(&fscache_n_retrievals_nobufs);
52650+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
52651 else
52652- fscache_stat(&fscache_n_retrievals_ok);
52653+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
52654
52655 fscache_put_retrieval(op);
52656 _leave(" = %d", ret);
52657@@ -591,7 +591,7 @@ nobufs_unlock:
52658 spin_unlock(&cookie->lock);
52659 kfree(op);
52660 nobufs:
52661- fscache_stat(&fscache_n_retrievals_nobufs);
52662+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
52663 _leave(" = -ENOBUFS");
52664 return -ENOBUFS;
52665 }
52666@@ -615,7 +615,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
52667
52668 _enter("%p,%p,,,", cookie, page);
52669
52670- fscache_stat(&fscache_n_allocs);
52671+ fscache_stat_unchecked(&fscache_n_allocs);
52672
52673 if (hlist_empty(&cookie->backing_objects))
52674 goto nobufs;
52675@@ -647,7 +647,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
52676 goto nobufs_unlock;
52677 spin_unlock(&cookie->lock);
52678
52679- fscache_stat(&fscache_n_alloc_ops);
52680+ fscache_stat_unchecked(&fscache_n_alloc_ops);
52681
52682 ret = fscache_wait_for_retrieval_activation(
52683 object, op,
52684@@ -663,11 +663,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
52685
52686 error:
52687 if (ret == -ERESTARTSYS)
52688- fscache_stat(&fscache_n_allocs_intr);
52689+ fscache_stat_unchecked(&fscache_n_allocs_intr);
52690 else if (ret < 0)
52691- fscache_stat(&fscache_n_allocs_nobufs);
52692+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
52693 else
52694- fscache_stat(&fscache_n_allocs_ok);
52695+ fscache_stat_unchecked(&fscache_n_allocs_ok);
52696
52697 fscache_put_retrieval(op);
52698 _leave(" = %d", ret);
52699@@ -677,7 +677,7 @@ nobufs_unlock:
52700 spin_unlock(&cookie->lock);
52701 kfree(op);
52702 nobufs:
52703- fscache_stat(&fscache_n_allocs_nobufs);
52704+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
52705 _leave(" = -ENOBUFS");
52706 return -ENOBUFS;
52707 }
52708@@ -736,7 +736,7 @@ static void fscache_write_op(struct fscache_operation *_op)
52709
52710 spin_lock(&cookie->stores_lock);
52711
52712- fscache_stat(&fscache_n_store_calls);
52713+ fscache_stat_unchecked(&fscache_n_store_calls);
52714
52715 /* find a page to store */
52716 page = NULL;
52717@@ -747,7 +747,7 @@ static void fscache_write_op(struct fscache_operation *_op)
52718 page = results[0];
52719 _debug("gang %d [%lx]", n, page->index);
52720 if (page->index > op->store_limit) {
52721- fscache_stat(&fscache_n_store_pages_over_limit);
52722+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
52723 goto superseded;
52724 }
52725
52726@@ -759,7 +759,7 @@ static void fscache_write_op(struct fscache_operation *_op)
52727 spin_unlock(&cookie->stores_lock);
52728 spin_unlock(&object->lock);
52729
52730- fscache_stat(&fscache_n_store_pages);
52731+ fscache_stat_unchecked(&fscache_n_store_pages);
52732 fscache_stat(&fscache_n_cop_write_page);
52733 ret = object->cache->ops->write_page(op, page);
52734 fscache_stat_d(&fscache_n_cop_write_page);
52735@@ -860,7 +860,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
52736 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
52737 ASSERT(PageFsCache(page));
52738
52739- fscache_stat(&fscache_n_stores);
52740+ fscache_stat_unchecked(&fscache_n_stores);
52741
52742 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
52743 _leave(" = -ENOBUFS [invalidating]");
52744@@ -916,7 +916,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
52745 spin_unlock(&cookie->stores_lock);
52746 spin_unlock(&object->lock);
52747
52748- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
52749+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
52750 op->store_limit = object->store_limit;
52751
52752 if (fscache_submit_op(object, &op->op) < 0)
52753@@ -924,8 +924,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
52754
52755 spin_unlock(&cookie->lock);
52756 radix_tree_preload_end();
52757- fscache_stat(&fscache_n_store_ops);
52758- fscache_stat(&fscache_n_stores_ok);
52759+ fscache_stat_unchecked(&fscache_n_store_ops);
52760+ fscache_stat_unchecked(&fscache_n_stores_ok);
52761
52762 /* the work queue now carries its own ref on the object */
52763 fscache_put_operation(&op->op);
52764@@ -933,14 +933,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
52765 return 0;
52766
52767 already_queued:
52768- fscache_stat(&fscache_n_stores_again);
52769+ fscache_stat_unchecked(&fscache_n_stores_again);
52770 already_pending:
52771 spin_unlock(&cookie->stores_lock);
52772 spin_unlock(&object->lock);
52773 spin_unlock(&cookie->lock);
52774 radix_tree_preload_end();
52775 kfree(op);
52776- fscache_stat(&fscache_n_stores_ok);
52777+ fscache_stat_unchecked(&fscache_n_stores_ok);
52778 _leave(" = 0");
52779 return 0;
52780
52781@@ -959,14 +959,14 @@ nobufs:
52782 spin_unlock(&cookie->lock);
52783 radix_tree_preload_end();
52784 kfree(op);
52785- fscache_stat(&fscache_n_stores_nobufs);
52786+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
52787 _leave(" = -ENOBUFS");
52788 return -ENOBUFS;
52789
52790 nomem_free:
52791 kfree(op);
52792 nomem:
52793- fscache_stat(&fscache_n_stores_oom);
52794+ fscache_stat_unchecked(&fscache_n_stores_oom);
52795 _leave(" = -ENOMEM");
52796 return -ENOMEM;
52797 }
52798@@ -984,7 +984,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
52799 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
52800 ASSERTCMP(page, !=, NULL);
52801
52802- fscache_stat(&fscache_n_uncaches);
52803+ fscache_stat_unchecked(&fscache_n_uncaches);
52804
52805 /* cache withdrawal may beat us to it */
52806 if (!PageFsCache(page))
52807@@ -1035,7 +1035,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
52808 struct fscache_cookie *cookie = op->op.object->cookie;
52809
52810 #ifdef CONFIG_FSCACHE_STATS
52811- atomic_inc(&fscache_n_marks);
52812+ atomic_inc_unchecked(&fscache_n_marks);
52813 #endif
52814
52815 _debug("- mark %p{%lx}", page, page->index);
52816diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
52817index 8179e8b..5072cc7 100644
52818--- a/fs/fscache/stats.c
52819+++ b/fs/fscache/stats.c
52820@@ -18,99 +18,99 @@
52821 /*
52822 * operation counters
52823 */
52824-atomic_t fscache_n_op_pend;
52825-atomic_t fscache_n_op_run;
52826-atomic_t fscache_n_op_enqueue;
52827-atomic_t fscache_n_op_requeue;
52828-atomic_t fscache_n_op_deferred_release;
52829-atomic_t fscache_n_op_release;
52830-atomic_t fscache_n_op_gc;
52831-atomic_t fscache_n_op_cancelled;
52832-atomic_t fscache_n_op_rejected;
52833+atomic_unchecked_t fscache_n_op_pend;
52834+atomic_unchecked_t fscache_n_op_run;
52835+atomic_unchecked_t fscache_n_op_enqueue;
52836+atomic_unchecked_t fscache_n_op_requeue;
52837+atomic_unchecked_t fscache_n_op_deferred_release;
52838+atomic_unchecked_t fscache_n_op_release;
52839+atomic_unchecked_t fscache_n_op_gc;
52840+atomic_unchecked_t fscache_n_op_cancelled;
52841+atomic_unchecked_t fscache_n_op_rejected;
52842
52843-atomic_t fscache_n_attr_changed;
52844-atomic_t fscache_n_attr_changed_ok;
52845-atomic_t fscache_n_attr_changed_nobufs;
52846-atomic_t fscache_n_attr_changed_nomem;
52847-atomic_t fscache_n_attr_changed_calls;
52848+atomic_unchecked_t fscache_n_attr_changed;
52849+atomic_unchecked_t fscache_n_attr_changed_ok;
52850+atomic_unchecked_t fscache_n_attr_changed_nobufs;
52851+atomic_unchecked_t fscache_n_attr_changed_nomem;
52852+atomic_unchecked_t fscache_n_attr_changed_calls;
52853
52854-atomic_t fscache_n_allocs;
52855-atomic_t fscache_n_allocs_ok;
52856-atomic_t fscache_n_allocs_wait;
52857-atomic_t fscache_n_allocs_nobufs;
52858-atomic_t fscache_n_allocs_intr;
52859-atomic_t fscache_n_allocs_object_dead;
52860-atomic_t fscache_n_alloc_ops;
52861-atomic_t fscache_n_alloc_op_waits;
52862+atomic_unchecked_t fscache_n_allocs;
52863+atomic_unchecked_t fscache_n_allocs_ok;
52864+atomic_unchecked_t fscache_n_allocs_wait;
52865+atomic_unchecked_t fscache_n_allocs_nobufs;
52866+atomic_unchecked_t fscache_n_allocs_intr;
52867+atomic_unchecked_t fscache_n_allocs_object_dead;
52868+atomic_unchecked_t fscache_n_alloc_ops;
52869+atomic_unchecked_t fscache_n_alloc_op_waits;
52870
52871-atomic_t fscache_n_retrievals;
52872-atomic_t fscache_n_retrievals_ok;
52873-atomic_t fscache_n_retrievals_wait;
52874-atomic_t fscache_n_retrievals_nodata;
52875-atomic_t fscache_n_retrievals_nobufs;
52876-atomic_t fscache_n_retrievals_intr;
52877-atomic_t fscache_n_retrievals_nomem;
52878-atomic_t fscache_n_retrievals_object_dead;
52879-atomic_t fscache_n_retrieval_ops;
52880-atomic_t fscache_n_retrieval_op_waits;
52881+atomic_unchecked_t fscache_n_retrievals;
52882+atomic_unchecked_t fscache_n_retrievals_ok;
52883+atomic_unchecked_t fscache_n_retrievals_wait;
52884+atomic_unchecked_t fscache_n_retrievals_nodata;
52885+atomic_unchecked_t fscache_n_retrievals_nobufs;
52886+atomic_unchecked_t fscache_n_retrievals_intr;
52887+atomic_unchecked_t fscache_n_retrievals_nomem;
52888+atomic_unchecked_t fscache_n_retrievals_object_dead;
52889+atomic_unchecked_t fscache_n_retrieval_ops;
52890+atomic_unchecked_t fscache_n_retrieval_op_waits;
52891
52892-atomic_t fscache_n_stores;
52893-atomic_t fscache_n_stores_ok;
52894-atomic_t fscache_n_stores_again;
52895-atomic_t fscache_n_stores_nobufs;
52896-atomic_t fscache_n_stores_oom;
52897-atomic_t fscache_n_store_ops;
52898-atomic_t fscache_n_store_calls;
52899-atomic_t fscache_n_store_pages;
52900-atomic_t fscache_n_store_radix_deletes;
52901-atomic_t fscache_n_store_pages_over_limit;
52902+atomic_unchecked_t fscache_n_stores;
52903+atomic_unchecked_t fscache_n_stores_ok;
52904+atomic_unchecked_t fscache_n_stores_again;
52905+atomic_unchecked_t fscache_n_stores_nobufs;
52906+atomic_unchecked_t fscache_n_stores_oom;
52907+atomic_unchecked_t fscache_n_store_ops;
52908+atomic_unchecked_t fscache_n_store_calls;
52909+atomic_unchecked_t fscache_n_store_pages;
52910+atomic_unchecked_t fscache_n_store_radix_deletes;
52911+atomic_unchecked_t fscache_n_store_pages_over_limit;
52912
52913-atomic_t fscache_n_store_vmscan_not_storing;
52914-atomic_t fscache_n_store_vmscan_gone;
52915-atomic_t fscache_n_store_vmscan_busy;
52916-atomic_t fscache_n_store_vmscan_cancelled;
52917-atomic_t fscache_n_store_vmscan_wait;
52918+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
52919+atomic_unchecked_t fscache_n_store_vmscan_gone;
52920+atomic_unchecked_t fscache_n_store_vmscan_busy;
52921+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
52922+atomic_unchecked_t fscache_n_store_vmscan_wait;
52923
52924-atomic_t fscache_n_marks;
52925-atomic_t fscache_n_uncaches;
52926+atomic_unchecked_t fscache_n_marks;
52927+atomic_unchecked_t fscache_n_uncaches;
52928
52929-atomic_t fscache_n_acquires;
52930-atomic_t fscache_n_acquires_null;
52931-atomic_t fscache_n_acquires_no_cache;
52932-atomic_t fscache_n_acquires_ok;
52933-atomic_t fscache_n_acquires_nobufs;
52934-atomic_t fscache_n_acquires_oom;
52935+atomic_unchecked_t fscache_n_acquires;
52936+atomic_unchecked_t fscache_n_acquires_null;
52937+atomic_unchecked_t fscache_n_acquires_no_cache;
52938+atomic_unchecked_t fscache_n_acquires_ok;
52939+atomic_unchecked_t fscache_n_acquires_nobufs;
52940+atomic_unchecked_t fscache_n_acquires_oom;
52941
52942-atomic_t fscache_n_invalidates;
52943-atomic_t fscache_n_invalidates_run;
52944+atomic_unchecked_t fscache_n_invalidates;
52945+atomic_unchecked_t fscache_n_invalidates_run;
52946
52947-atomic_t fscache_n_updates;
52948-atomic_t fscache_n_updates_null;
52949-atomic_t fscache_n_updates_run;
52950+atomic_unchecked_t fscache_n_updates;
52951+atomic_unchecked_t fscache_n_updates_null;
52952+atomic_unchecked_t fscache_n_updates_run;
52953
52954-atomic_t fscache_n_relinquishes;
52955-atomic_t fscache_n_relinquishes_null;
52956-atomic_t fscache_n_relinquishes_waitcrt;
52957-atomic_t fscache_n_relinquishes_retire;
52958+atomic_unchecked_t fscache_n_relinquishes;
52959+atomic_unchecked_t fscache_n_relinquishes_null;
52960+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
52961+atomic_unchecked_t fscache_n_relinquishes_retire;
52962
52963-atomic_t fscache_n_cookie_index;
52964-atomic_t fscache_n_cookie_data;
52965-atomic_t fscache_n_cookie_special;
52966+atomic_unchecked_t fscache_n_cookie_index;
52967+atomic_unchecked_t fscache_n_cookie_data;
52968+atomic_unchecked_t fscache_n_cookie_special;
52969
52970-atomic_t fscache_n_object_alloc;
52971-atomic_t fscache_n_object_no_alloc;
52972-atomic_t fscache_n_object_lookups;
52973-atomic_t fscache_n_object_lookups_negative;
52974-atomic_t fscache_n_object_lookups_positive;
52975-atomic_t fscache_n_object_lookups_timed_out;
52976-atomic_t fscache_n_object_created;
52977-atomic_t fscache_n_object_avail;
52978-atomic_t fscache_n_object_dead;
52979+atomic_unchecked_t fscache_n_object_alloc;
52980+atomic_unchecked_t fscache_n_object_no_alloc;
52981+atomic_unchecked_t fscache_n_object_lookups;
52982+atomic_unchecked_t fscache_n_object_lookups_negative;
52983+atomic_unchecked_t fscache_n_object_lookups_positive;
52984+atomic_unchecked_t fscache_n_object_lookups_timed_out;
52985+atomic_unchecked_t fscache_n_object_created;
52986+atomic_unchecked_t fscache_n_object_avail;
52987+atomic_unchecked_t fscache_n_object_dead;
52988
52989-atomic_t fscache_n_checkaux_none;
52990-atomic_t fscache_n_checkaux_okay;
52991-atomic_t fscache_n_checkaux_update;
52992-atomic_t fscache_n_checkaux_obsolete;
52993+atomic_unchecked_t fscache_n_checkaux_none;
52994+atomic_unchecked_t fscache_n_checkaux_okay;
52995+atomic_unchecked_t fscache_n_checkaux_update;
52996+atomic_unchecked_t fscache_n_checkaux_obsolete;
52997
52998 atomic_t fscache_n_cop_alloc_object;
52999 atomic_t fscache_n_cop_lookup_object;
53000@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
53001 seq_puts(m, "FS-Cache statistics\n");
53002
53003 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
53004- atomic_read(&fscache_n_cookie_index),
53005- atomic_read(&fscache_n_cookie_data),
53006- atomic_read(&fscache_n_cookie_special));
53007+ atomic_read_unchecked(&fscache_n_cookie_index),
53008+ atomic_read_unchecked(&fscache_n_cookie_data),
53009+ atomic_read_unchecked(&fscache_n_cookie_special));
53010
53011 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
53012- atomic_read(&fscache_n_object_alloc),
53013- atomic_read(&fscache_n_object_no_alloc),
53014- atomic_read(&fscache_n_object_avail),
53015- atomic_read(&fscache_n_object_dead));
53016+ atomic_read_unchecked(&fscache_n_object_alloc),
53017+ atomic_read_unchecked(&fscache_n_object_no_alloc),
53018+ atomic_read_unchecked(&fscache_n_object_avail),
53019+ atomic_read_unchecked(&fscache_n_object_dead));
53020 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
53021- atomic_read(&fscache_n_checkaux_none),
53022- atomic_read(&fscache_n_checkaux_okay),
53023- atomic_read(&fscache_n_checkaux_update),
53024- atomic_read(&fscache_n_checkaux_obsolete));
53025+ atomic_read_unchecked(&fscache_n_checkaux_none),
53026+ atomic_read_unchecked(&fscache_n_checkaux_okay),
53027+ atomic_read_unchecked(&fscache_n_checkaux_update),
53028+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
53029
53030 seq_printf(m, "Pages : mrk=%u unc=%u\n",
53031- atomic_read(&fscache_n_marks),
53032- atomic_read(&fscache_n_uncaches));
53033+ atomic_read_unchecked(&fscache_n_marks),
53034+ atomic_read_unchecked(&fscache_n_uncaches));
53035
53036 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
53037 " oom=%u\n",
53038- atomic_read(&fscache_n_acquires),
53039- atomic_read(&fscache_n_acquires_null),
53040- atomic_read(&fscache_n_acquires_no_cache),
53041- atomic_read(&fscache_n_acquires_ok),
53042- atomic_read(&fscache_n_acquires_nobufs),
53043- atomic_read(&fscache_n_acquires_oom));
53044+ atomic_read_unchecked(&fscache_n_acquires),
53045+ atomic_read_unchecked(&fscache_n_acquires_null),
53046+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
53047+ atomic_read_unchecked(&fscache_n_acquires_ok),
53048+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
53049+ atomic_read_unchecked(&fscache_n_acquires_oom));
53050
53051 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
53052- atomic_read(&fscache_n_object_lookups),
53053- atomic_read(&fscache_n_object_lookups_negative),
53054- atomic_read(&fscache_n_object_lookups_positive),
53055- atomic_read(&fscache_n_object_created),
53056- atomic_read(&fscache_n_object_lookups_timed_out));
53057+ atomic_read_unchecked(&fscache_n_object_lookups),
53058+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
53059+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
53060+ atomic_read_unchecked(&fscache_n_object_created),
53061+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
53062
53063 seq_printf(m, "Invals : n=%u run=%u\n",
53064- atomic_read(&fscache_n_invalidates),
53065- atomic_read(&fscache_n_invalidates_run));
53066+ atomic_read_unchecked(&fscache_n_invalidates),
53067+ atomic_read_unchecked(&fscache_n_invalidates_run));
53068
53069 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
53070- atomic_read(&fscache_n_updates),
53071- atomic_read(&fscache_n_updates_null),
53072- atomic_read(&fscache_n_updates_run));
53073+ atomic_read_unchecked(&fscache_n_updates),
53074+ atomic_read_unchecked(&fscache_n_updates_null),
53075+ atomic_read_unchecked(&fscache_n_updates_run));
53076
53077 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
53078- atomic_read(&fscache_n_relinquishes),
53079- atomic_read(&fscache_n_relinquishes_null),
53080- atomic_read(&fscache_n_relinquishes_waitcrt),
53081- atomic_read(&fscache_n_relinquishes_retire));
53082+ atomic_read_unchecked(&fscache_n_relinquishes),
53083+ atomic_read_unchecked(&fscache_n_relinquishes_null),
53084+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
53085+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
53086
53087 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
53088- atomic_read(&fscache_n_attr_changed),
53089- atomic_read(&fscache_n_attr_changed_ok),
53090- atomic_read(&fscache_n_attr_changed_nobufs),
53091- atomic_read(&fscache_n_attr_changed_nomem),
53092- atomic_read(&fscache_n_attr_changed_calls));
53093+ atomic_read_unchecked(&fscache_n_attr_changed),
53094+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
53095+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
53096+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
53097+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
53098
53099 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
53100- atomic_read(&fscache_n_allocs),
53101- atomic_read(&fscache_n_allocs_ok),
53102- atomic_read(&fscache_n_allocs_wait),
53103- atomic_read(&fscache_n_allocs_nobufs),
53104- atomic_read(&fscache_n_allocs_intr));
53105+ atomic_read_unchecked(&fscache_n_allocs),
53106+ atomic_read_unchecked(&fscache_n_allocs_ok),
53107+ atomic_read_unchecked(&fscache_n_allocs_wait),
53108+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
53109+ atomic_read_unchecked(&fscache_n_allocs_intr));
53110 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
53111- atomic_read(&fscache_n_alloc_ops),
53112- atomic_read(&fscache_n_alloc_op_waits),
53113- atomic_read(&fscache_n_allocs_object_dead));
53114+ atomic_read_unchecked(&fscache_n_alloc_ops),
53115+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
53116+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
53117
53118 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
53119 " int=%u oom=%u\n",
53120- atomic_read(&fscache_n_retrievals),
53121- atomic_read(&fscache_n_retrievals_ok),
53122- atomic_read(&fscache_n_retrievals_wait),
53123- atomic_read(&fscache_n_retrievals_nodata),
53124- atomic_read(&fscache_n_retrievals_nobufs),
53125- atomic_read(&fscache_n_retrievals_intr),
53126- atomic_read(&fscache_n_retrievals_nomem));
53127+ atomic_read_unchecked(&fscache_n_retrievals),
53128+ atomic_read_unchecked(&fscache_n_retrievals_ok),
53129+ atomic_read_unchecked(&fscache_n_retrievals_wait),
53130+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
53131+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
53132+ atomic_read_unchecked(&fscache_n_retrievals_intr),
53133+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
53134 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
53135- atomic_read(&fscache_n_retrieval_ops),
53136- atomic_read(&fscache_n_retrieval_op_waits),
53137- atomic_read(&fscache_n_retrievals_object_dead));
53138+ atomic_read_unchecked(&fscache_n_retrieval_ops),
53139+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
53140+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
53141
53142 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
53143- atomic_read(&fscache_n_stores),
53144- atomic_read(&fscache_n_stores_ok),
53145- atomic_read(&fscache_n_stores_again),
53146- atomic_read(&fscache_n_stores_nobufs),
53147- atomic_read(&fscache_n_stores_oom));
53148+ atomic_read_unchecked(&fscache_n_stores),
53149+ atomic_read_unchecked(&fscache_n_stores_ok),
53150+ atomic_read_unchecked(&fscache_n_stores_again),
53151+ atomic_read_unchecked(&fscache_n_stores_nobufs),
53152+ atomic_read_unchecked(&fscache_n_stores_oom));
53153 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
53154- atomic_read(&fscache_n_store_ops),
53155- atomic_read(&fscache_n_store_calls),
53156- atomic_read(&fscache_n_store_pages),
53157- atomic_read(&fscache_n_store_radix_deletes),
53158- atomic_read(&fscache_n_store_pages_over_limit));
53159+ atomic_read_unchecked(&fscache_n_store_ops),
53160+ atomic_read_unchecked(&fscache_n_store_calls),
53161+ atomic_read_unchecked(&fscache_n_store_pages),
53162+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
53163+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
53164
53165 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
53166- atomic_read(&fscache_n_store_vmscan_not_storing),
53167- atomic_read(&fscache_n_store_vmscan_gone),
53168- atomic_read(&fscache_n_store_vmscan_busy),
53169- atomic_read(&fscache_n_store_vmscan_cancelled),
53170- atomic_read(&fscache_n_store_vmscan_wait));
53171+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
53172+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
53173+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
53174+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
53175+ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
53176
53177 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
53178- atomic_read(&fscache_n_op_pend),
53179- atomic_read(&fscache_n_op_run),
53180- atomic_read(&fscache_n_op_enqueue),
53181- atomic_read(&fscache_n_op_cancelled),
53182- atomic_read(&fscache_n_op_rejected));
53183+ atomic_read_unchecked(&fscache_n_op_pend),
53184+ atomic_read_unchecked(&fscache_n_op_run),
53185+ atomic_read_unchecked(&fscache_n_op_enqueue),
53186+ atomic_read_unchecked(&fscache_n_op_cancelled),
53187+ atomic_read_unchecked(&fscache_n_op_rejected));
53188 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
53189- atomic_read(&fscache_n_op_deferred_release),
53190- atomic_read(&fscache_n_op_release),
53191- atomic_read(&fscache_n_op_gc));
53192+ atomic_read_unchecked(&fscache_n_op_deferred_release),
53193+ atomic_read_unchecked(&fscache_n_op_release),
53194+ atomic_read_unchecked(&fscache_n_op_gc));
53195
53196 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
53197 atomic_read(&fscache_n_cop_alloc_object),
53198diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
53199index e397b67..b0d8709 100644
53200--- a/fs/fuse/cuse.c
53201+++ b/fs/fuse/cuse.c
53202@@ -593,10 +593,12 @@ static int __init cuse_init(void)
53203 INIT_LIST_HEAD(&cuse_conntbl[i]);
53204
53205 /* inherit and extend fuse_dev_operations */
53206- cuse_channel_fops = fuse_dev_operations;
53207- cuse_channel_fops.owner = THIS_MODULE;
53208- cuse_channel_fops.open = cuse_channel_open;
53209- cuse_channel_fops.release = cuse_channel_release;
53210+ pax_open_kernel();
53211+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
53212+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
53213+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
53214+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
53215+ pax_close_kernel();
53216
53217 cuse_class = class_create(THIS_MODULE, "cuse");
53218 if (IS_ERR(cuse_class))
53219diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
53220index e83351a..41e3c9c 100644
53221--- a/fs/fuse/dev.c
53222+++ b/fs/fuse/dev.c
53223@@ -1236,7 +1236,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
53224 ret = 0;
53225 pipe_lock(pipe);
53226
53227- if (!pipe->readers) {
53228+ if (!atomic_read(&pipe->readers)) {
53229 send_sig(SIGPIPE, current, 0);
53230 if (!ret)
53231 ret = -EPIPE;
53232diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
53233index 315e1f8..91f890c 100644
53234--- a/fs/fuse/dir.c
53235+++ b/fs/fuse/dir.c
53236@@ -1233,7 +1233,7 @@ static char *read_link(struct dentry *dentry)
53237 return link;
53238 }
53239
53240-static void free_link(char *link)
53241+static void free_link(const char *link)
53242 {
53243 if (!IS_ERR(link))
53244 free_page((unsigned long) link);
53245diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
53246index 2b6f569..fcb4d1f 100644
53247--- a/fs/gfs2/inode.c
53248+++ b/fs/gfs2/inode.c
53249@@ -1499,7 +1499,7 @@ out:
53250
53251 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
53252 {
53253- char *s = nd_get_link(nd);
53254+ const char *s = nd_get_link(nd);
53255 if (!IS_ERR(s))
53256 kfree(s);
53257 }
53258diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
53259index 78bde32..767e906 100644
53260--- a/fs/hugetlbfs/inode.c
53261+++ b/fs/hugetlbfs/inode.c
53262@@ -152,6 +152,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
53263 struct mm_struct *mm = current->mm;
53264 struct vm_area_struct *vma;
53265 struct hstate *h = hstate_file(file);
53266+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
53267 struct vm_unmapped_area_info info;
53268
53269 if (len & ~huge_page_mask(h))
53270@@ -165,17 +166,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
53271 return addr;
53272 }
53273
53274+#ifdef CONFIG_PAX_RANDMMAP
53275+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
53276+#endif
53277+
53278 if (addr) {
53279 addr = ALIGN(addr, huge_page_size(h));
53280 vma = find_vma(mm, addr);
53281- if (TASK_SIZE - len >= addr &&
53282- (!vma || addr + len <= vma->vm_start))
53283+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
53284 return addr;
53285 }
53286
53287 info.flags = 0;
53288 info.length = len;
53289 info.low_limit = TASK_UNMAPPED_BASE;
53290+
53291+#ifdef CONFIG_PAX_RANDMMAP
53292+ if (mm->pax_flags & MF_PAX_RANDMMAP)
53293+ info.low_limit += mm->delta_mmap;
53294+#endif
53295+
53296 info.high_limit = TASK_SIZE;
53297 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
53298 info.align_offset = 0;
53299@@ -897,7 +907,7 @@ static struct file_system_type hugetlbfs_fs_type = {
53300 .kill_sb = kill_litter_super,
53301 };
53302
53303-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
53304+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
53305
53306 static int can_do_hugetlb_shm(void)
53307 {
53308diff --git a/fs/inode.c b/fs/inode.c
53309index 14084b7..29af1d9 100644
53310--- a/fs/inode.c
53311+++ b/fs/inode.c
53312@@ -880,8 +880,8 @@ unsigned int get_next_ino(void)
53313
53314 #ifdef CONFIG_SMP
53315 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
53316- static atomic_t shared_last_ino;
53317- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
53318+ static atomic_unchecked_t shared_last_ino;
53319+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
53320
53321 res = next - LAST_INO_BATCH;
53322 }
53323diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
53324index 4a6cf28..d3a29d3 100644
53325--- a/fs/jffs2/erase.c
53326+++ b/fs/jffs2/erase.c
53327@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
53328 struct jffs2_unknown_node marker = {
53329 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
53330 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
53331- .totlen = cpu_to_je32(c->cleanmarker_size)
53332+ .totlen = cpu_to_je32(c->cleanmarker_size),
53333+ .hdr_crc = cpu_to_je32(0)
53334 };
53335
53336 jffs2_prealloc_raw_node_refs(c, jeb, 1);
53337diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
53338index a6597d6..41b30ec 100644
53339--- a/fs/jffs2/wbuf.c
53340+++ b/fs/jffs2/wbuf.c
53341@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
53342 {
53343 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
53344 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
53345- .totlen = constant_cpu_to_je32(8)
53346+ .totlen = constant_cpu_to_je32(8),
53347+ .hdr_crc = constant_cpu_to_je32(0)
53348 };
53349
53350 /*
53351diff --git a/fs/jfs/super.c b/fs/jfs/super.c
53352index 1a543be..a4e1363 100644
53353--- a/fs/jfs/super.c
53354+++ b/fs/jfs/super.c
53355@@ -225,7 +225,7 @@ static const match_table_t tokens = {
53356 static int parse_options(char *options, struct super_block *sb, s64 *newLVSize,
53357 int *flag)
53358 {
53359- void *nls_map = (void *)-1; /* -1: no change; NULL: none */
53360+ const void *nls_map = (const void *)-1; /* -1: no change; NULL: none */
53361 char *p;
53362 struct jfs_sb_info *sbi = JFS_SBI(sb);
53363
53364@@ -253,7 +253,7 @@ static int parse_options(char *options, struct super_block *sb, s64 *newLVSize,
53365 /* Don't do anything ;-) */
53366 break;
53367 case Opt_iocharset:
53368- if (nls_map && nls_map != (void *) -1)
53369+ if (nls_map && nls_map != (const void *) -1)
53370 unload_nls(nls_map);
53371 if (!strcmp(args[0].from, "none"))
53372 nls_map = NULL;
53373@@ -855,7 +855,7 @@ static int __init init_jfs_fs(void)
53374
53375 jfs_inode_cachep =
53376 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
53377- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
53378+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
53379 init_once);
53380 if (jfs_inode_cachep == NULL)
53381 return -ENOMEM;
53382diff --git a/fs/libfs.c b/fs/libfs.c
53383index 916da8c..1588998 100644
53384--- a/fs/libfs.c
53385+++ b/fs/libfs.c
53386@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
53387
53388 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
53389 struct dentry *next;
53390+ char d_name[sizeof(next->d_iname)];
53391+ const unsigned char *name;
53392+
53393 next = list_entry(p, struct dentry, d_u.d_child);
53394 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
53395 if (!simple_positive(next)) {
53396@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
53397
53398 spin_unlock(&next->d_lock);
53399 spin_unlock(&dentry->d_lock);
53400- if (filldir(dirent, next->d_name.name,
53401+ name = next->d_name.name;
53402+ if (name == next->d_iname) {
53403+ memcpy(d_name, name, next->d_name.len);
53404+ name = d_name;
53405+ }
53406+ if (filldir(dirent, name,
53407 next->d_name.len, filp->f_pos,
53408 next->d_inode->i_ino,
53409 dt_type(next->d_inode)) < 0)
53410diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
53411index 52e5120..808936e 100644
53412--- a/fs/lockd/clntproc.c
53413+++ b/fs/lockd/clntproc.c
53414@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
53415 /*
53416 * Cookie counter for NLM requests
53417 */
53418-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
53419+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
53420
53421 void nlmclnt_next_cookie(struct nlm_cookie *c)
53422 {
53423- u32 cookie = atomic_inc_return(&nlm_cookie);
53424+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
53425
53426 memcpy(c->data, &cookie, 4);
53427 c->len=4;
53428diff --git a/fs/locks.c b/fs/locks.c
53429index a94e331..060bce3 100644
53430--- a/fs/locks.c
53431+++ b/fs/locks.c
53432@@ -2064,16 +2064,16 @@ void locks_remove_flock(struct file *filp)
53433 return;
53434
53435 if (filp->f_op && filp->f_op->flock) {
53436- struct file_lock fl = {
53437+ struct file_lock flock = {
53438 .fl_pid = current->tgid,
53439 .fl_file = filp,
53440 .fl_flags = FL_FLOCK,
53441 .fl_type = F_UNLCK,
53442 .fl_end = OFFSET_MAX,
53443 };
53444- filp->f_op->flock(filp, F_SETLKW, &fl);
53445- if (fl.fl_ops && fl.fl_ops->fl_release_private)
53446- fl.fl_ops->fl_release_private(&fl);
53447+ filp->f_op->flock(filp, F_SETLKW, &flock);
53448+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
53449+ flock.fl_ops->fl_release_private(&flock);
53450 }
53451
53452 lock_flocks();
53453diff --git a/fs/namei.c b/fs/namei.c
53454index ec97aef..e67718d 100644
53455--- a/fs/namei.c
53456+++ b/fs/namei.c
53457@@ -319,16 +319,32 @@ int generic_permission(struct inode *inode, int mask)
53458 if (ret != -EACCES)
53459 return ret;
53460
53461+#ifdef CONFIG_GRKERNSEC
53462+ /* we'll block if we have to log due to a denied capability use */
53463+ if (mask & MAY_NOT_BLOCK)
53464+ return -ECHILD;
53465+#endif
53466+
53467 if (S_ISDIR(inode->i_mode)) {
53468 /* DACs are overridable for directories */
53469- if (inode_capable(inode, CAP_DAC_OVERRIDE))
53470- return 0;
53471 if (!(mask & MAY_WRITE))
53472- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
53473+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
53474+ inode_capable(inode, CAP_DAC_READ_SEARCH))
53475 return 0;
53476+ if (inode_capable(inode, CAP_DAC_OVERRIDE))
53477+ return 0;
53478 return -EACCES;
53479 }
53480 /*
53481+ * Searching includes executable on directories, else just read.
53482+ */
53483+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
53484+ if (mask == MAY_READ)
53485+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
53486+ inode_capable(inode, CAP_DAC_READ_SEARCH))
53487+ return 0;
53488+
53489+ /*
53490 * Read/write DACs are always overridable.
53491 * Executable DACs are overridable when there is
53492 * at least one exec bit set.
53493@@ -337,14 +353,6 @@ int generic_permission(struct inode *inode, int mask)
53494 if (inode_capable(inode, CAP_DAC_OVERRIDE))
53495 return 0;
53496
53497- /*
53498- * Searching includes executable on directories, else just read.
53499- */
53500- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
53501- if (mask == MAY_READ)
53502- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
53503- return 0;
53504-
53505 return -EACCES;
53506 }
53507
53508@@ -824,7 +832,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
53509 {
53510 struct dentry *dentry = link->dentry;
53511 int error;
53512- char *s;
53513+ const char *s;
53514
53515 BUG_ON(nd->flags & LOOKUP_RCU);
53516
53517@@ -845,6 +853,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
53518 if (error)
53519 goto out_put_nd_path;
53520
53521+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
53522+ dentry->d_inode, dentry, nd->path.mnt)) {
53523+ error = -EACCES;
53524+ goto out_put_nd_path;
53525+ }
53526+
53527 nd->last_type = LAST_BIND;
53528 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
53529 error = PTR_ERR(*p);
53530@@ -1594,6 +1608,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
53531 break;
53532 res = walk_component(nd, path, &nd->last,
53533 nd->last_type, LOOKUP_FOLLOW);
53534+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
53535+ res = -EACCES;
53536 put_link(nd, &link, cookie);
53537 } while (res > 0);
53538
53539@@ -1692,7 +1708,7 @@ EXPORT_SYMBOL(full_name_hash);
53540 static inline unsigned long hash_name(const char *name, unsigned int *hashp)
53541 {
53542 unsigned long a, b, adata, bdata, mask, hash, len;
53543- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
53544+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
53545
53546 hash = a = 0;
53547 len = -sizeof(unsigned long);
53548@@ -1977,6 +1993,8 @@ static int path_lookupat(int dfd, const char *name,
53549 if (err)
53550 break;
53551 err = lookup_last(nd, &path);
53552+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
53553+ err = -EACCES;
53554 put_link(nd, &link, cookie);
53555 }
53556 }
53557@@ -1984,6 +2002,13 @@ static int path_lookupat(int dfd, const char *name,
53558 if (!err)
53559 err = complete_walk(nd);
53560
53561+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
53562+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
53563+ path_put(&nd->path);
53564+ err = -ENOENT;
53565+ }
53566+ }
53567+
53568 if (!err && nd->flags & LOOKUP_DIRECTORY) {
53569 if (!nd->inode->i_op->lookup) {
53570 path_put(&nd->path);
53571@@ -2011,8 +2036,15 @@ static int filename_lookup(int dfd, struct filename *name,
53572 retval = path_lookupat(dfd, name->name,
53573 flags | LOOKUP_REVAL, nd);
53574
53575- if (likely(!retval))
53576+ if (likely(!retval)) {
53577 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
53578+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
53579+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
53580+ path_put(&nd->path);
53581+ return -ENOENT;
53582+ }
53583+ }
53584+ }
53585 return retval;
53586 }
53587
53588@@ -2390,6 +2422,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
53589 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
53590 return -EPERM;
53591
53592+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
53593+ return -EPERM;
53594+ if (gr_handle_rawio(inode))
53595+ return -EPERM;
53596+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
53597+ return -EACCES;
53598+
53599 return 0;
53600 }
53601
53602@@ -2611,7 +2650,7 @@ looked_up:
53603 * cleared otherwise prior to returning.
53604 */
53605 static int lookup_open(struct nameidata *nd, struct path *path,
53606- struct file *file,
53607+ struct path *link, struct file *file,
53608 const struct open_flags *op,
53609 bool got_write, int *opened)
53610 {
53611@@ -2646,6 +2685,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
53612 /* Negative dentry, just create the file */
53613 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
53614 umode_t mode = op->mode;
53615+
53616+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
53617+ error = -EACCES;
53618+ goto out_dput;
53619+ }
53620+
53621+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
53622+ error = -EACCES;
53623+ goto out_dput;
53624+ }
53625+
53626 if (!IS_POSIXACL(dir->d_inode))
53627 mode &= ~current_umask();
53628 /*
53629@@ -2667,6 +2717,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
53630 nd->flags & LOOKUP_EXCL);
53631 if (error)
53632 goto out_dput;
53633+ else
53634+ gr_handle_create(dentry, nd->path.mnt);
53635 }
53636 out_no_open:
53637 path->dentry = dentry;
53638@@ -2681,7 +2733,7 @@ out_dput:
53639 /*
53640 * Handle the last step of open()
53641 */
53642-static int do_last(struct nameidata *nd, struct path *path,
53643+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
53644 struct file *file, const struct open_flags *op,
53645 int *opened, struct filename *name)
53646 {
53647@@ -2710,16 +2762,32 @@ static int do_last(struct nameidata *nd, struct path *path,
53648 error = complete_walk(nd);
53649 if (error)
53650 return error;
53651+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
53652+ error = -ENOENT;
53653+ goto out;
53654+ }
53655 audit_inode(name, nd->path.dentry, 0);
53656 if (open_flag & O_CREAT) {
53657 error = -EISDIR;
53658 goto out;
53659 }
53660+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
53661+ error = -EACCES;
53662+ goto out;
53663+ }
53664 goto finish_open;
53665 case LAST_BIND:
53666 error = complete_walk(nd);
53667 if (error)
53668 return error;
53669+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
53670+ error = -ENOENT;
53671+ goto out;
53672+ }
53673+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
53674+ error = -EACCES;
53675+ goto out;
53676+ }
53677 audit_inode(name, dir, 0);
53678 goto finish_open;
53679 }
53680@@ -2768,7 +2836,7 @@ retry_lookup:
53681 */
53682 }
53683 mutex_lock(&dir->d_inode->i_mutex);
53684- error = lookup_open(nd, path, file, op, got_write, opened);
53685+ error = lookup_open(nd, path, link, file, op, got_write, opened);
53686 mutex_unlock(&dir->d_inode->i_mutex);
53687
53688 if (error <= 0) {
53689@@ -2792,11 +2860,28 @@ retry_lookup:
53690 goto finish_open_created;
53691 }
53692
53693+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
53694+ error = -ENOENT;
53695+ goto exit_dput;
53696+ }
53697+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
53698+ error = -EACCES;
53699+ goto exit_dput;
53700+ }
53701+
53702 /*
53703 * create/update audit record if it already exists.
53704 */
53705- if (path->dentry->d_inode)
53706+ if (path->dentry->d_inode) {
53707+ /* only check if O_CREAT is specified, all other checks need to go
53708+ into may_open */
53709+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
53710+ error = -EACCES;
53711+ goto exit_dput;
53712+ }
53713+
53714 audit_inode(name, path->dentry, 0);
53715+ }
53716
53717 /*
53718 * If atomic_open() acquired write access it is dropped now due to
53719@@ -2837,6 +2922,11 @@ finish_lookup:
53720 }
53721 }
53722 BUG_ON(inode != path->dentry->d_inode);
53723+ /* if we're resolving a symlink to another symlink */
53724+ if (link && gr_handle_symlink_owner(link, inode)) {
53725+ error = -EACCES;
53726+ goto out;
53727+ }
53728 return 1;
53729 }
53730
53731@@ -2846,7 +2936,6 @@ finish_lookup:
53732 save_parent.dentry = nd->path.dentry;
53733 save_parent.mnt = mntget(path->mnt);
53734 nd->path.dentry = path->dentry;
53735-
53736 }
53737 nd->inode = inode;
53738 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
53739@@ -2855,6 +2944,16 @@ finish_lookup:
53740 path_put(&save_parent);
53741 return error;
53742 }
53743+
53744+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
53745+ error = -ENOENT;
53746+ goto out;
53747+ }
53748+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
53749+ error = -EACCES;
53750+ goto out;
53751+ }
53752+
53753 error = -EISDIR;
53754 if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode))
53755 goto out;
53756@@ -2953,7 +3052,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
53757 if (unlikely(error))
53758 goto out;
53759
53760- error = do_last(nd, &path, file, op, &opened, pathname);
53761+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
53762 while (unlikely(error > 0)) { /* trailing symlink */
53763 struct path link = path;
53764 void *cookie;
53765@@ -2971,7 +3070,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
53766 error = follow_link(&link, nd, &cookie);
53767 if (unlikely(error))
53768 break;
53769- error = do_last(nd, &path, file, op, &opened, pathname);
53770+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
53771 put_link(nd, &link, cookie);
53772 }
53773 out:
53774@@ -3071,8 +3170,12 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
53775 goto unlock;
53776
53777 error = -EEXIST;
53778- if (dentry->d_inode)
53779+ if (dentry->d_inode) {
53780+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
53781+ error = -ENOENT;
53782+ }
53783 goto fail;
53784+ }
53785 /*
53786 * Special case - lookup gave negative, but... we had foo/bar/
53787 * From the vfs_mknod() POV we just have a negative dentry -
53788@@ -3124,6 +3227,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
53789 }
53790 EXPORT_SYMBOL(user_path_create);
53791
53792+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
53793+{
53794+ struct filename *tmp = getname(pathname);
53795+ struct dentry *res;
53796+ if (IS_ERR(tmp))
53797+ return ERR_CAST(tmp);
53798+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
53799+ if (IS_ERR(res))
53800+ putname(tmp);
53801+ else
53802+ *to = tmp;
53803+ return res;
53804+}
53805+
53806 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
53807 {
53808 int error = may_create(dir, dentry);
53809@@ -3186,6 +3303,17 @@ retry:
53810
53811 if (!IS_POSIXACL(path.dentry->d_inode))
53812 mode &= ~current_umask();
53813+
53814+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
53815+ error = -EPERM;
53816+ goto out;
53817+ }
53818+
53819+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
53820+ error = -EACCES;
53821+ goto out;
53822+ }
53823+
53824 error = security_path_mknod(&path, dentry, mode, dev);
53825 if (error)
53826 goto out;
53827@@ -3202,6 +3330,8 @@ retry:
53828 break;
53829 }
53830 out:
53831+ if (!error)
53832+ gr_handle_create(dentry, path.mnt);
53833 done_path_create(&path, dentry);
53834 if (retry_estale(error, lookup_flags)) {
53835 lookup_flags |= LOOKUP_REVAL;
53836@@ -3254,9 +3384,16 @@ retry:
53837
53838 if (!IS_POSIXACL(path.dentry->d_inode))
53839 mode &= ~current_umask();
53840+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
53841+ error = -EACCES;
53842+ goto out;
53843+ }
53844 error = security_path_mkdir(&path, dentry, mode);
53845 if (!error)
53846 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
53847+ if (!error)
53848+ gr_handle_create(dentry, path.mnt);
53849+out:
53850 done_path_create(&path, dentry);
53851 if (retry_estale(error, lookup_flags)) {
53852 lookup_flags |= LOOKUP_REVAL;
53853@@ -3337,6 +3474,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
53854 struct filename *name;
53855 struct dentry *dentry;
53856 struct nameidata nd;
53857+ ino_t saved_ino = 0;
53858+ dev_t saved_dev = 0;
53859 unsigned int lookup_flags = 0;
53860 retry:
53861 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
53862@@ -3369,10 +3508,21 @@ retry:
53863 error = -ENOENT;
53864 goto exit3;
53865 }
53866+
53867+ saved_ino = dentry->d_inode->i_ino;
53868+ saved_dev = gr_get_dev_from_dentry(dentry);
53869+
53870+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
53871+ error = -EACCES;
53872+ goto exit3;
53873+ }
53874+
53875 error = security_path_rmdir(&nd.path, dentry);
53876 if (error)
53877 goto exit3;
53878 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
53879+ if (!error && (saved_dev || saved_ino))
53880+ gr_handle_delete(saved_ino, saved_dev);
53881 exit3:
53882 dput(dentry);
53883 exit2:
53884@@ -3438,6 +3588,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
53885 struct dentry *dentry;
53886 struct nameidata nd;
53887 struct inode *inode = NULL;
53888+ ino_t saved_ino = 0;
53889+ dev_t saved_dev = 0;
53890 unsigned int lookup_flags = 0;
53891 retry:
53892 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
53893@@ -3464,10 +3616,22 @@ retry:
53894 if (!inode)
53895 goto slashes;
53896 ihold(inode);
53897+
53898+ if (inode->i_nlink <= 1) {
53899+ saved_ino = inode->i_ino;
53900+ saved_dev = gr_get_dev_from_dentry(dentry);
53901+ }
53902+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
53903+ error = -EACCES;
53904+ goto exit2;
53905+ }
53906+
53907 error = security_path_unlink(&nd.path, dentry);
53908 if (error)
53909 goto exit2;
53910 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
53911+ if (!error && (saved_ino || saved_dev))
53912+ gr_handle_delete(saved_ino, saved_dev);
53913 exit2:
53914 dput(dentry);
53915 }
53916@@ -3545,9 +3709,17 @@ retry:
53917 if (IS_ERR(dentry))
53918 goto out_putname;
53919
53920+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
53921+ error = -EACCES;
53922+ goto out;
53923+ }
53924+
53925 error = security_path_symlink(&path, dentry, from->name);
53926 if (!error)
53927 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
53928+ if (!error)
53929+ gr_handle_create(dentry, path.mnt);
53930+out:
53931 done_path_create(&path, dentry);
53932 if (retry_estale(error, lookup_flags)) {
53933 lookup_flags |= LOOKUP_REVAL;
53934@@ -3621,6 +3793,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
53935 {
53936 struct dentry *new_dentry;
53937 struct path old_path, new_path;
53938+ struct filename *to = NULL;
53939 int how = 0;
53940 int error;
53941
53942@@ -3644,7 +3817,7 @@ retry:
53943 if (error)
53944 return error;
53945
53946- new_dentry = user_path_create(newdfd, newname, &new_path,
53947+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
53948 (how & LOOKUP_REVAL));
53949 error = PTR_ERR(new_dentry);
53950 if (IS_ERR(new_dentry))
53951@@ -3656,11 +3829,28 @@ retry:
53952 error = may_linkat(&old_path);
53953 if (unlikely(error))
53954 goto out_dput;
53955+
53956+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
53957+ old_path.dentry->d_inode,
53958+ old_path.dentry->d_inode->i_mode, to)) {
53959+ error = -EACCES;
53960+ goto out_dput;
53961+ }
53962+
53963+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
53964+ old_path.dentry, old_path.mnt, to)) {
53965+ error = -EACCES;
53966+ goto out_dput;
53967+ }
53968+
53969 error = security_path_link(old_path.dentry, &new_path, new_dentry);
53970 if (error)
53971 goto out_dput;
53972 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
53973+ if (!error)
53974+ gr_handle_create(new_dentry, new_path.mnt);
53975 out_dput:
53976+ putname(to);
53977 done_path_create(&new_path, new_dentry);
53978 if (retry_estale(error, how)) {
53979 how |= LOOKUP_REVAL;
53980@@ -3906,12 +4096,21 @@ retry:
53981 if (new_dentry == trap)
53982 goto exit5;
53983
53984+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
53985+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
53986+ to);
53987+ if (error)
53988+ goto exit5;
53989+
53990 error = security_path_rename(&oldnd.path, old_dentry,
53991 &newnd.path, new_dentry);
53992 if (error)
53993 goto exit5;
53994 error = vfs_rename(old_dir->d_inode, old_dentry,
53995 new_dir->d_inode, new_dentry);
53996+ if (!error)
53997+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
53998+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
53999 exit5:
54000 dput(new_dentry);
54001 exit4:
54002@@ -3943,6 +4142,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
54003
54004 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
54005 {
54006+ char tmpbuf[64];
54007+ const char *newlink;
54008 int len;
54009
54010 len = PTR_ERR(link);
54011@@ -3952,7 +4153,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
54012 len = strlen(link);
54013 if (len > (unsigned) buflen)
54014 len = buflen;
54015- if (copy_to_user(buffer, link, len))
54016+
54017+ if (len < sizeof(tmpbuf)) {
54018+ memcpy(tmpbuf, link, len);
54019+ newlink = tmpbuf;
54020+ } else
54021+ newlink = link;
54022+
54023+ if (copy_to_user(buffer, newlink, len))
54024 len = -EFAULT;
54025 out:
54026 return len;
54027diff --git a/fs/namespace.c b/fs/namespace.c
54028index 5dd7709..6f64e9c 100644
54029--- a/fs/namespace.c
54030+++ b/fs/namespace.c
54031@@ -1219,6 +1219,9 @@ static int do_umount(struct mount *mnt, int flags)
54032 if (!(sb->s_flags & MS_RDONLY))
54033 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
54034 up_write(&sb->s_umount);
54035+
54036+ gr_log_remount(mnt->mnt_devname, retval);
54037+
54038 return retval;
54039 }
54040
54041@@ -1238,6 +1241,9 @@ static int do_umount(struct mount *mnt, int flags)
54042 br_write_unlock(&vfsmount_lock);
54043 up_write(&namespace_sem);
54044 release_mounts(&umount_list);
54045+
54046+ gr_log_unmount(mnt->mnt_devname, retval);
54047+
54048 return retval;
54049 }
54050
54051@@ -1713,7 +1719,7 @@ static int do_loopback(struct path *path, const char *old_name,
54052
54053 if (IS_ERR(mnt)) {
54054 err = PTR_ERR(mnt);
54055- goto out;
54056+ goto out2;
54057 }
54058
54059 err = graft_tree(mnt, path);
54060@@ -2294,6 +2300,16 @@ long do_mount(const char *dev_name, const char *dir_name,
54061 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
54062 MS_STRICTATIME);
54063
54064+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
54065+ retval = -EPERM;
54066+ goto dput_out;
54067+ }
54068+
54069+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
54070+ retval = -EPERM;
54071+ goto dput_out;
54072+ }
54073+
54074 if (flags & MS_REMOUNT)
54075 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
54076 data_page);
54077@@ -2308,6 +2324,9 @@ long do_mount(const char *dev_name, const char *dir_name,
54078 dev_name, data_page);
54079 dput_out:
54080 path_put(&path);
54081+
54082+ gr_log_mount(dev_name, dir_name, retval);
54083+
54084 return retval;
54085 }
54086
54087@@ -2594,6 +2613,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
54088 if (error)
54089 goto out2;
54090
54091+ if (gr_handle_chroot_pivot()) {
54092+ error = -EPERM;
54093+ goto out2;
54094+ }
54095+
54096 get_fs_root(current->fs, &root);
54097 error = lock_mount(&old);
54098 if (error)
54099@@ -2842,7 +2866,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
54100 !nsown_capable(CAP_SYS_ADMIN))
54101 return -EPERM;
54102
54103- if (fs->users != 1)
54104+ if (atomic_read(&fs->users) != 1)
54105 return -EINVAL;
54106
54107 get_mnt_ns(mnt_ns);
54108diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
54109index 59461c9..b17c57e 100644
54110--- a/fs/nfs/callback_xdr.c
54111+++ b/fs/nfs/callback_xdr.c
54112@@ -51,7 +51,7 @@ struct callback_op {
54113 callback_decode_arg_t decode_args;
54114 callback_encode_res_t encode_res;
54115 long res_maxsize;
54116-};
54117+} __do_const;
54118
54119 static struct callback_op callback_ops[];
54120
54121diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
54122index ebeb94c..ff35337 100644
54123--- a/fs/nfs/inode.c
54124+++ b/fs/nfs/inode.c
54125@@ -1042,16 +1042,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
54126 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
54127 }
54128
54129-static atomic_long_t nfs_attr_generation_counter;
54130+static atomic_long_unchecked_t nfs_attr_generation_counter;
54131
54132 static unsigned long nfs_read_attr_generation_counter(void)
54133 {
54134- return atomic_long_read(&nfs_attr_generation_counter);
54135+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
54136 }
54137
54138 unsigned long nfs_inc_attr_generation_counter(void)
54139 {
54140- return atomic_long_inc_return(&nfs_attr_generation_counter);
54141+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
54142 }
54143
54144 void nfs_fattr_init(struct nfs_fattr *fattr)
54145diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
54146index 2e9779b..5a43da0 100644
54147--- a/fs/nfs/nfs4client.c
54148+++ b/fs/nfs/nfs4client.c
54149@@ -300,7 +300,7 @@ int nfs40_walk_client_list(struct nfs_client *new,
54150 struct rpc_cred *cred)
54151 {
54152 struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id);
54153- struct nfs_client *pos, *n, *prev = NULL;
54154+ struct nfs_client *pos, *prev = NULL;
54155 struct nfs4_setclientid_res clid = {
54156 .clientid = new->cl_clientid,
54157 .confirm = new->cl_confirm,
54158@@ -308,10 +308,23 @@ int nfs40_walk_client_list(struct nfs_client *new,
54159 int status = -NFS4ERR_STALE_CLIENTID;
54160
54161 spin_lock(&nn->nfs_client_lock);
54162- list_for_each_entry_safe(pos, n, &nn->nfs_client_list, cl_share_link) {
54163+ list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) {
54164 /* If "pos" isn't marked ready, we can't trust the
54165 * remaining fields in "pos" */
54166- if (pos->cl_cons_state < NFS_CS_READY)
54167+ if (pos->cl_cons_state > NFS_CS_READY) {
54168+ atomic_inc(&pos->cl_count);
54169+ spin_unlock(&nn->nfs_client_lock);
54170+
54171+ if (prev)
54172+ nfs_put_client(prev);
54173+ prev = pos;
54174+
54175+ status = nfs_wait_client_init_complete(pos);
54176+ spin_lock(&nn->nfs_client_lock);
54177+ if (status < 0)
54178+ continue;
54179+ }
54180+ if (pos->cl_cons_state != NFS_CS_READY)
54181 continue;
54182
54183 if (pos->rpc_ops != new->rpc_ops)
54184@@ -423,16 +436,16 @@ int nfs41_walk_client_list(struct nfs_client *new,
54185 struct rpc_cred *cred)
54186 {
54187 struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id);
54188- struct nfs_client *pos, *n, *prev = NULL;
54189+ struct nfs_client *pos, *prev = NULL;
54190 int status = -NFS4ERR_STALE_CLIENTID;
54191
54192 spin_lock(&nn->nfs_client_lock);
54193- list_for_each_entry_safe(pos, n, &nn->nfs_client_list, cl_share_link) {
54194+ list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) {
54195 /* If "pos" isn't marked ready, we can't trust the
54196 * remaining fields in "pos", especially the client
54197 * ID and serverowner fields. Wait for CREATE_SESSION
54198 * to finish. */
54199- if (pos->cl_cons_state < NFS_CS_READY) {
54200+ if (pos->cl_cons_state > NFS_CS_READY) {
54201 atomic_inc(&pos->cl_count);
54202 spin_unlock(&nn->nfs_client_lock);
54203
54204@@ -440,18 +453,17 @@ int nfs41_walk_client_list(struct nfs_client *new,
54205 nfs_put_client(prev);
54206 prev = pos;
54207
54208- nfs4_schedule_lease_recovery(pos);
54209 status = nfs_wait_client_init_complete(pos);
54210- if (status < 0) {
54211- nfs_put_client(pos);
54212- spin_lock(&nn->nfs_client_lock);
54213- continue;
54214+ if (status == 0) {
54215+ nfs4_schedule_lease_recovery(pos);
54216+ status = nfs4_wait_clnt_recover(pos);
54217 }
54218- status = pos->cl_cons_state;
54219 spin_lock(&nn->nfs_client_lock);
54220 if (status < 0)
54221 continue;
54222 }
54223+ if (pos->cl_cons_state != NFS_CS_READY)
54224+ continue;
54225
54226 if (pos->rpc_ops != new->rpc_ops)
54227 continue;
54228@@ -469,17 +481,17 @@ int nfs41_walk_client_list(struct nfs_client *new,
54229 continue;
54230
54231 atomic_inc(&pos->cl_count);
54232- spin_unlock(&nn->nfs_client_lock);
54233+ *result = pos;
54234 dprintk("NFS: <-- %s using nfs_client = %p ({%d})\n",
54235 __func__, pos, atomic_read(&pos->cl_count));
54236-
54237- *result = pos;
54238- return 0;
54239+ break;
54240 }
54241
54242 /* No matching nfs_client found. */
54243 spin_unlock(&nn->nfs_client_lock);
54244 dprintk("NFS: <-- %s status = %d\n", __func__, status);
54245+ if (prev)
54246+ nfs_put_client(prev);
54247 return status;
54248 }
54249 #endif /* CONFIG_NFS_V4_1 */
54250diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
54251index 9d1c5db..1e13db8 100644
54252--- a/fs/nfsd/nfs4proc.c
54253+++ b/fs/nfsd/nfs4proc.c
54254@@ -1097,7 +1097,7 @@ struct nfsd4_operation {
54255 nfsd4op_rsize op_rsize_bop;
54256 stateid_getter op_get_currentstateid;
54257 stateid_setter op_set_currentstateid;
54258-};
54259+} __do_const;
54260
54261 static struct nfsd4_operation nfsd4_ops[];
54262
54263diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
54264index d1dd710..32ac0e8 100644
54265--- a/fs/nfsd/nfs4xdr.c
54266+++ b/fs/nfsd/nfs4xdr.c
54267@@ -1456,7 +1456,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
54268
54269 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
54270
54271-static nfsd4_dec nfsd4_dec_ops[] = {
54272+static const nfsd4_dec nfsd4_dec_ops[] = {
54273 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
54274 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
54275 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
54276@@ -1496,7 +1496,7 @@ static nfsd4_dec nfsd4_dec_ops[] = {
54277 [OP_RELEASE_LOCKOWNER] = (nfsd4_dec)nfsd4_decode_release_lockowner,
54278 };
54279
54280-static nfsd4_dec nfsd41_dec_ops[] = {
54281+static const nfsd4_dec nfsd41_dec_ops[] = {
54282 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
54283 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
54284 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
54285@@ -1558,7 +1558,7 @@ static nfsd4_dec nfsd41_dec_ops[] = {
54286 };
54287
54288 struct nfsd4_minorversion_ops {
54289- nfsd4_dec *decoders;
54290+ const nfsd4_dec *decoders;
54291 int nops;
54292 };
54293
54294diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
54295index 69c6413..c0408d2 100644
54296--- a/fs/nfsd/vfs.c
54297+++ b/fs/nfsd/vfs.c
54298@@ -939,7 +939,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
54299 } else {
54300 oldfs = get_fs();
54301 set_fs(KERNEL_DS);
54302- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
54303+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
54304 set_fs(oldfs);
54305 }
54306
54307@@ -1026,7 +1026,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
54308
54309 /* Write the data. */
54310 oldfs = get_fs(); set_fs(KERNEL_DS);
54311- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
54312+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
54313 set_fs(oldfs);
54314 if (host_err < 0)
54315 goto out_nfserr;
54316@@ -1572,7 +1572,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
54317 */
54318
54319 oldfs = get_fs(); set_fs(KERNEL_DS);
54320- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
54321+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
54322 set_fs(oldfs);
54323
54324 if (host_err < 0)
54325diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
54326index fea6bd5..8ee9d81 100644
54327--- a/fs/nls/nls_base.c
54328+++ b/fs/nls/nls_base.c
54329@@ -234,20 +234,22 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
54330
54331 int register_nls(struct nls_table * nls)
54332 {
54333- struct nls_table ** tmp = &tables;
54334+ struct nls_table *tmp = tables;
54335
54336 if (nls->next)
54337 return -EBUSY;
54338
54339 spin_lock(&nls_lock);
54340- while (*tmp) {
54341- if (nls == *tmp) {
54342+ while (tmp) {
54343+ if (nls == tmp) {
54344 spin_unlock(&nls_lock);
54345 return -EBUSY;
54346 }
54347- tmp = &(*tmp)->next;
54348+ tmp = tmp->next;
54349 }
54350- nls->next = tables;
54351+ pax_open_kernel();
54352+ *(struct nls_table **)&nls->next = tables;
54353+ pax_close_kernel();
54354 tables = nls;
54355 spin_unlock(&nls_lock);
54356 return 0;
54357@@ -255,12 +257,14 @@ int register_nls(struct nls_table * nls)
54358
54359 int unregister_nls(struct nls_table * nls)
54360 {
54361- struct nls_table ** tmp = &tables;
54362+ struct nls_table * const * tmp = &tables;
54363
54364 spin_lock(&nls_lock);
54365 while (*tmp) {
54366 if (nls == *tmp) {
54367- *tmp = nls->next;
54368+ pax_open_kernel();
54369+ *(struct nls_table **)tmp = nls->next;
54370+ pax_close_kernel();
54371 spin_unlock(&nls_lock);
54372 return 0;
54373 }
54374diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
54375index 7424929..35f6be5 100644
54376--- a/fs/nls/nls_euc-jp.c
54377+++ b/fs/nls/nls_euc-jp.c
54378@@ -561,8 +561,10 @@ static int __init init_nls_euc_jp(void)
54379 p_nls = load_nls("cp932");
54380
54381 if (p_nls) {
54382- table.charset2upper = p_nls->charset2upper;
54383- table.charset2lower = p_nls->charset2lower;
54384+ pax_open_kernel();
54385+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
54386+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
54387+ pax_close_kernel();
54388 return register_nls(&table);
54389 }
54390
54391diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
54392index e7bc1d7..06bd4bb 100644
54393--- a/fs/nls/nls_koi8-ru.c
54394+++ b/fs/nls/nls_koi8-ru.c
54395@@ -63,8 +63,10 @@ static int __init init_nls_koi8_ru(void)
54396 p_nls = load_nls("koi8-u");
54397
54398 if (p_nls) {
54399- table.charset2upper = p_nls->charset2upper;
54400- table.charset2lower = p_nls->charset2lower;
54401+ pax_open_kernel();
54402+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
54403+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
54404+ pax_close_kernel();
54405 return register_nls(&table);
54406 }
54407
54408diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
54409index 9ff4a5e..deb1f0f 100644
54410--- a/fs/notify/fanotify/fanotify_user.c
54411+++ b/fs/notify/fanotify/fanotify_user.c
54412@@ -251,8 +251,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
54413
54414 fd = fanotify_event_metadata.fd;
54415 ret = -EFAULT;
54416- if (copy_to_user(buf, &fanotify_event_metadata,
54417- fanotify_event_metadata.event_len))
54418+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
54419+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
54420 goto out_close_fd;
54421
54422 ret = prepare_for_access_response(group, event, fd);
54423diff --git a/fs/notify/notification.c b/fs/notify/notification.c
54424index 7b51b05..5ea5ef6 100644
54425--- a/fs/notify/notification.c
54426+++ b/fs/notify/notification.c
54427@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
54428 * get set to 0 so it will never get 'freed'
54429 */
54430 static struct fsnotify_event *q_overflow_event;
54431-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
54432+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
54433
54434 /**
54435 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
54436@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
54437 */
54438 u32 fsnotify_get_cookie(void)
54439 {
54440- return atomic_inc_return(&fsnotify_sync_cookie);
54441+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
54442 }
54443 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
54444
54445diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
54446index 99e3610..02c1068 100644
54447--- a/fs/ntfs/dir.c
54448+++ b/fs/ntfs/dir.c
54449@@ -1329,7 +1329,7 @@ find_next_index_buffer:
54450 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
54451 ~(s64)(ndir->itype.index.block_size - 1)));
54452 /* Bounds checks. */
54453- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
54454+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
54455 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
54456 "inode 0x%lx or driver bug.", vdir->i_ino);
54457 goto err_out;
54458diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
54459index 5b2d4f0..c6de396 100644
54460--- a/fs/ntfs/file.c
54461+++ b/fs/ntfs/file.c
54462@@ -2242,6 +2242,6 @@ const struct inode_operations ntfs_file_inode_ops = {
54463 #endif /* NTFS_RW */
54464 };
54465
54466-const struct file_operations ntfs_empty_file_ops = {};
54467+const struct file_operations ntfs_empty_file_ops __read_only;
54468
54469-const struct inode_operations ntfs_empty_inode_ops = {};
54470+const struct inode_operations ntfs_empty_inode_ops __read_only;
54471diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
54472index a9f78c7..ed8a381 100644
54473--- a/fs/ocfs2/localalloc.c
54474+++ b/fs/ocfs2/localalloc.c
54475@@ -1279,7 +1279,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
54476 goto bail;
54477 }
54478
54479- atomic_inc(&osb->alloc_stats.moves);
54480+ atomic_inc_unchecked(&osb->alloc_stats.moves);
54481
54482 bail:
54483 if (handle)
54484diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
54485index d355e6e..578d905 100644
54486--- a/fs/ocfs2/ocfs2.h
54487+++ b/fs/ocfs2/ocfs2.h
54488@@ -235,11 +235,11 @@ enum ocfs2_vol_state
54489
54490 struct ocfs2_alloc_stats
54491 {
54492- atomic_t moves;
54493- atomic_t local_data;
54494- atomic_t bitmap_data;
54495- atomic_t bg_allocs;
54496- atomic_t bg_extends;
54497+ atomic_unchecked_t moves;
54498+ atomic_unchecked_t local_data;
54499+ atomic_unchecked_t bitmap_data;
54500+ atomic_unchecked_t bg_allocs;
54501+ atomic_unchecked_t bg_extends;
54502 };
54503
54504 enum ocfs2_local_alloc_state
54505diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
54506index b7e74b5..19c6536 100644
54507--- a/fs/ocfs2/suballoc.c
54508+++ b/fs/ocfs2/suballoc.c
54509@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
54510 mlog_errno(status);
54511 goto bail;
54512 }
54513- atomic_inc(&osb->alloc_stats.bg_extends);
54514+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
54515
54516 /* You should never ask for this much metadata */
54517 BUG_ON(bits_wanted >
54518@@ -2007,7 +2007,7 @@ int ocfs2_claim_metadata(handle_t *handle,
54519 mlog_errno(status);
54520 goto bail;
54521 }
54522- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
54523+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
54524
54525 *suballoc_loc = res.sr_bg_blkno;
54526 *suballoc_bit_start = res.sr_bit_offset;
54527@@ -2171,7 +2171,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
54528 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
54529 res->sr_bits);
54530
54531- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
54532+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
54533
54534 BUG_ON(res->sr_bits != 1);
54535
54536@@ -2213,7 +2213,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
54537 mlog_errno(status);
54538 goto bail;
54539 }
54540- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
54541+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
54542
54543 BUG_ON(res.sr_bits != 1);
54544
54545@@ -2317,7 +2317,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
54546 cluster_start,
54547 num_clusters);
54548 if (!status)
54549- atomic_inc(&osb->alloc_stats.local_data);
54550+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
54551 } else {
54552 if (min_clusters > (osb->bitmap_cpg - 1)) {
54553 /* The only paths asking for contiguousness
54554@@ -2343,7 +2343,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
54555 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
54556 res.sr_bg_blkno,
54557 res.sr_bit_offset);
54558- atomic_inc(&osb->alloc_stats.bitmap_data);
54559+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
54560 *num_clusters = res.sr_bits;
54561 }
54562 }
54563diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
54564index 0e91ec2..f4b3fc6 100644
54565--- a/fs/ocfs2/super.c
54566+++ b/fs/ocfs2/super.c
54567@@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
54568 "%10s => GlobalAllocs: %d LocalAllocs: %d "
54569 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
54570 "Stats",
54571- atomic_read(&osb->alloc_stats.bitmap_data),
54572- atomic_read(&osb->alloc_stats.local_data),
54573- atomic_read(&osb->alloc_stats.bg_allocs),
54574- atomic_read(&osb->alloc_stats.moves),
54575- atomic_read(&osb->alloc_stats.bg_extends));
54576+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
54577+ atomic_read_unchecked(&osb->alloc_stats.local_data),
54578+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
54579+ atomic_read_unchecked(&osb->alloc_stats.moves),
54580+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
54581
54582 out += snprintf(buf + out, len - out,
54583 "%10s => State: %u Descriptor: %llu Size: %u bits "
54584@@ -2121,11 +2121,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
54585 spin_lock_init(&osb->osb_xattr_lock);
54586 ocfs2_init_steal_slots(osb);
54587
54588- atomic_set(&osb->alloc_stats.moves, 0);
54589- atomic_set(&osb->alloc_stats.local_data, 0);
54590- atomic_set(&osb->alloc_stats.bitmap_data, 0);
54591- atomic_set(&osb->alloc_stats.bg_allocs, 0);
54592- atomic_set(&osb->alloc_stats.bg_extends, 0);
54593+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
54594+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
54595+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
54596+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
54597+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
54598
54599 /* Copy the blockcheck stats from the superblock probe */
54600 osb->osb_ecc_stats = *stats;
54601diff --git a/fs/open.c b/fs/open.c
54602index 9b33c0c..2ffcca2 100644
54603--- a/fs/open.c
54604+++ b/fs/open.c
54605@@ -31,6 +31,8 @@
54606 #include <linux/ima.h>
54607 #include <linux/dnotify.h>
54608
54609+#define CREATE_TRACE_POINTS
54610+#include <trace/events/fs.h>
54611 #include "internal.h"
54612
54613 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
54614@@ -101,6 +103,8 @@ long vfs_truncate(struct path *path, loff_t length)
54615 error = locks_verify_truncate(inode, NULL, length);
54616 if (!error)
54617 error = security_path_truncate(path);
54618+ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
54619+ error = -EACCES;
54620 if (!error)
54621 error = do_truncate(path->dentry, length, 0, NULL);
54622
54623@@ -178,6 +182,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
54624 error = locks_verify_truncate(inode, f.file, length);
54625 if (!error)
54626 error = security_path_truncate(&f.file->f_path);
54627+ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
54628+ error = -EACCES;
54629 if (!error)
54630 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
54631 sb_end_write(inode->i_sb);
54632@@ -373,6 +379,9 @@ retry:
54633 if (__mnt_is_readonly(path.mnt))
54634 res = -EROFS;
54635
54636+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
54637+ res = -EACCES;
54638+
54639 out_path_release:
54640 path_put(&path);
54641 if (retry_estale(res, lookup_flags)) {
54642@@ -404,6 +413,8 @@ retry:
54643 if (error)
54644 goto dput_and_out;
54645
54646+ gr_log_chdir(path.dentry, path.mnt);
54647+
54648 set_fs_pwd(current->fs, &path);
54649
54650 dput_and_out:
54651@@ -433,6 +444,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
54652 goto out_putf;
54653
54654 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
54655+
54656+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
54657+ error = -EPERM;
54658+
54659+ if (!error)
54660+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
54661+
54662 if (!error)
54663 set_fs_pwd(current->fs, &f.file->f_path);
54664 out_putf:
54665@@ -462,7 +480,13 @@ retry:
54666 if (error)
54667 goto dput_and_out;
54668
54669+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
54670+ goto dput_and_out;
54671+
54672 set_fs_root(current->fs, &path);
54673+
54674+ gr_handle_chroot_chdir(&path);
54675+
54676 error = 0;
54677 dput_and_out:
54678 path_put(&path);
54679@@ -484,6 +508,16 @@ static int chmod_common(struct path *path, umode_t mode)
54680 if (error)
54681 return error;
54682 mutex_lock(&inode->i_mutex);
54683+
54684+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
54685+ error = -EACCES;
54686+ goto out_unlock;
54687+ }
54688+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
54689+ error = -EACCES;
54690+ goto out_unlock;
54691+ }
54692+
54693 error = security_path_chmod(path, mode);
54694 if (error)
54695 goto out_unlock;
54696@@ -544,6 +578,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
54697 uid = make_kuid(current_user_ns(), user);
54698 gid = make_kgid(current_user_ns(), group);
54699
54700+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
54701+ return -EACCES;
54702+
54703 newattrs.ia_valid = ATTR_CTIME;
54704 if (user != (uid_t) -1) {
54705 if (!uid_valid(uid))
54706@@ -960,6 +997,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
54707 } else {
54708 fsnotify_open(f);
54709 fd_install(fd, f);
54710+ trace_do_sys_open(tmp->name, flags, mode);
54711 }
54712 }
54713 putname(tmp);
54714diff --git a/fs/pipe.c b/fs/pipe.c
54715index 8e2e73f..1ef1048 100644
54716--- a/fs/pipe.c
54717+++ b/fs/pipe.c
54718@@ -438,9 +438,9 @@ redo:
54719 }
54720 if (bufs) /* More to do? */
54721 continue;
54722- if (!pipe->writers)
54723+ if (!atomic_read(&pipe->writers))
54724 break;
54725- if (!pipe->waiting_writers) {
54726+ if (!atomic_read(&pipe->waiting_writers)) {
54727 /* syscall merging: Usually we must not sleep
54728 * if O_NONBLOCK is set, or if we got some data.
54729 * But if a writer sleeps in kernel space, then
54730@@ -504,7 +504,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
54731 mutex_lock(&inode->i_mutex);
54732 pipe = inode->i_pipe;
54733
54734- if (!pipe->readers) {
54735+ if (!atomic_read(&pipe->readers)) {
54736 send_sig(SIGPIPE, current, 0);
54737 ret = -EPIPE;
54738 goto out;
54739@@ -553,7 +553,7 @@ redo1:
54740 for (;;) {
54741 int bufs;
54742
54743- if (!pipe->readers) {
54744+ if (!atomic_read(&pipe->readers)) {
54745 send_sig(SIGPIPE, current, 0);
54746 if (!ret)
54747 ret = -EPIPE;
54748@@ -644,9 +644,9 @@ redo2:
54749 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
54750 do_wakeup = 0;
54751 }
54752- pipe->waiting_writers++;
54753+ atomic_inc(&pipe->waiting_writers);
54754 pipe_wait(pipe);
54755- pipe->waiting_writers--;
54756+ atomic_dec(&pipe->waiting_writers);
54757 }
54758 out:
54759 mutex_unlock(&inode->i_mutex);
54760@@ -716,7 +716,7 @@ pipe_poll(struct file *filp, poll_table *wait)
54761 mask = 0;
54762 if (filp->f_mode & FMODE_READ) {
54763 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
54764- if (!pipe->writers && filp->f_version != pipe->w_counter)
54765+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
54766 mask |= POLLHUP;
54767 }
54768
54769@@ -726,7 +726,7 @@ pipe_poll(struct file *filp, poll_table *wait)
54770 * Most Unices do not set POLLERR for FIFOs but on Linux they
54771 * behave exactly like pipes for poll().
54772 */
54773- if (!pipe->readers)
54774+ if (!atomic_read(&pipe->readers))
54775 mask |= POLLERR;
54776 }
54777
54778@@ -740,10 +740,10 @@ pipe_release(struct inode *inode, int decr, int decw)
54779
54780 mutex_lock(&inode->i_mutex);
54781 pipe = inode->i_pipe;
54782- pipe->readers -= decr;
54783- pipe->writers -= decw;
54784+ atomic_sub(decr, &pipe->readers);
54785+ atomic_sub(decw, &pipe->writers);
54786
54787- if (!pipe->readers && !pipe->writers) {
54788+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
54789 free_pipe_info(inode);
54790 } else {
54791 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
54792@@ -833,7 +833,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
54793
54794 if (inode->i_pipe) {
54795 ret = 0;
54796- inode->i_pipe->readers++;
54797+ atomic_inc(&inode->i_pipe->readers);
54798 }
54799
54800 mutex_unlock(&inode->i_mutex);
54801@@ -850,7 +850,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
54802
54803 if (inode->i_pipe) {
54804 ret = 0;
54805- inode->i_pipe->writers++;
54806+ atomic_inc(&inode->i_pipe->writers);
54807 }
54808
54809 mutex_unlock(&inode->i_mutex);
54810@@ -871,9 +871,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
54811 if (inode->i_pipe) {
54812 ret = 0;
54813 if (filp->f_mode & FMODE_READ)
54814- inode->i_pipe->readers++;
54815+ atomic_inc(&inode->i_pipe->readers);
54816 if (filp->f_mode & FMODE_WRITE)
54817- inode->i_pipe->writers++;
54818+ atomic_inc(&inode->i_pipe->writers);
54819 }
54820
54821 mutex_unlock(&inode->i_mutex);
54822@@ -965,7 +965,7 @@ void free_pipe_info(struct inode *inode)
54823 inode->i_pipe = NULL;
54824 }
54825
54826-static struct vfsmount *pipe_mnt __read_mostly;
54827+struct vfsmount *pipe_mnt __read_mostly;
54828
54829 /*
54830 * pipefs_dname() is called from d_path().
54831@@ -995,7 +995,8 @@ static struct inode * get_pipe_inode(void)
54832 goto fail_iput;
54833 inode->i_pipe = pipe;
54834
54835- pipe->readers = pipe->writers = 1;
54836+ atomic_set(&pipe->readers, 1);
54837+ atomic_set(&pipe->writers, 1);
54838 inode->i_fop = &rdwr_pipefifo_fops;
54839
54840 /*
54841diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
54842index 15af622..0e9f4467 100644
54843--- a/fs/proc/Kconfig
54844+++ b/fs/proc/Kconfig
54845@@ -30,12 +30,12 @@ config PROC_FS
54846
54847 config PROC_KCORE
54848 bool "/proc/kcore support" if !ARM
54849- depends on PROC_FS && MMU
54850+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
54851
54852 config PROC_VMCORE
54853 bool "/proc/vmcore support"
54854- depends on PROC_FS && CRASH_DUMP
54855- default y
54856+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
54857+ default n
54858 help
54859 Exports the dump image of crashed kernel in ELF format.
54860
54861@@ -59,8 +59,8 @@ config PROC_SYSCTL
54862 limited in memory.
54863
54864 config PROC_PAGE_MONITOR
54865- default y
54866- depends on PROC_FS && MMU
54867+ default n
54868+ depends on PROC_FS && MMU && !GRKERNSEC
54869 bool "Enable /proc page monitoring" if EXPERT
54870 help
54871 Various /proc files exist to monitor process memory utilization:
54872diff --git a/fs/proc/array.c b/fs/proc/array.c
54873index 6a91e6f..e54dbc14 100644
54874--- a/fs/proc/array.c
54875+++ b/fs/proc/array.c
54876@@ -60,6 +60,7 @@
54877 #include <linux/tty.h>
54878 #include <linux/string.h>
54879 #include <linux/mman.h>
54880+#include <linux/grsecurity.h>
54881 #include <linux/proc_fs.h>
54882 #include <linux/ioport.h>
54883 #include <linux/uaccess.h>
54884@@ -362,6 +363,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
54885 seq_putc(m, '\n');
54886 }
54887
54888+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
54889+static inline void task_pax(struct seq_file *m, struct task_struct *p)
54890+{
54891+ if (p->mm)
54892+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
54893+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
54894+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
54895+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
54896+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
54897+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
54898+ else
54899+ seq_printf(m, "PaX:\t-----\n");
54900+}
54901+#endif
54902+
54903 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
54904 struct pid *pid, struct task_struct *task)
54905 {
54906@@ -380,9 +396,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
54907 task_cpus_allowed(m, task);
54908 cpuset_task_status_allowed(m, task);
54909 task_context_switch_counts(m, task);
54910+
54911+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
54912+ task_pax(m, task);
54913+#endif
54914+
54915+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
54916+ task_grsec_rbac(m, task);
54917+#endif
54918+
54919 return 0;
54920 }
54921
54922+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54923+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
54924+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
54925+ _mm->pax_flags & MF_PAX_SEGMEXEC))
54926+#endif
54927+
54928 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
54929 struct pid *pid, struct task_struct *task, int whole)
54930 {
54931@@ -404,6 +435,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
54932 char tcomm[sizeof(task->comm)];
54933 unsigned long flags;
54934
54935+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54936+ if (current->exec_id != m->exec_id) {
54937+ gr_log_badprocpid("stat");
54938+ return 0;
54939+ }
54940+#endif
54941+
54942 state = *get_task_state(task);
54943 vsize = eip = esp = 0;
54944 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
54945@@ -475,6 +513,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
54946 gtime = task->gtime;
54947 }
54948
54949+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54950+ if (PAX_RAND_FLAGS(mm)) {
54951+ eip = 0;
54952+ esp = 0;
54953+ wchan = 0;
54954+ }
54955+#endif
54956+#ifdef CONFIG_GRKERNSEC_HIDESYM
54957+ wchan = 0;
54958+ eip =0;
54959+ esp =0;
54960+#endif
54961+
54962 /* scale priority and nice values from timeslices to -20..20 */
54963 /* to make it look like a "normal" Unix priority/nice value */
54964 priority = task_prio(task);
54965@@ -511,9 +562,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
54966 seq_put_decimal_ull(m, ' ', vsize);
54967 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
54968 seq_put_decimal_ull(m, ' ', rsslim);
54969+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54970+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
54971+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
54972+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
54973+#else
54974 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
54975 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
54976 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
54977+#endif
54978 seq_put_decimal_ull(m, ' ', esp);
54979 seq_put_decimal_ull(m, ' ', eip);
54980 /* The signal information here is obsolete.
54981@@ -535,7 +592,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
54982 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
54983 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
54984
54985- if (mm && permitted) {
54986+ if (mm && permitted
54987+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54988+ && !PAX_RAND_FLAGS(mm)
54989+#endif
54990+ ) {
54991 seq_put_decimal_ull(m, ' ', mm->start_data);
54992 seq_put_decimal_ull(m, ' ', mm->end_data);
54993 seq_put_decimal_ull(m, ' ', mm->start_brk);
54994@@ -573,8 +634,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
54995 struct pid *pid, struct task_struct *task)
54996 {
54997 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
54998- struct mm_struct *mm = get_task_mm(task);
54999+ struct mm_struct *mm;
55000
55001+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55002+ if (current->exec_id != m->exec_id) {
55003+ gr_log_badprocpid("statm");
55004+ return 0;
55005+ }
55006+#endif
55007+ mm = get_task_mm(task);
55008 if (mm) {
55009 size = task_statm(mm, &shared, &text, &data, &resident);
55010 mmput(mm);
55011@@ -597,6 +665,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
55012 return 0;
55013 }
55014
55015+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
55016+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
55017+{
55018+ return sprintf(buffer, "%pI4\n", &task->signal->curr_ip);
55019+}
55020+#endif
55021+
55022 #ifdef CONFIG_CHECKPOINT_RESTORE
55023 static struct pid *
55024 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
55025diff --git a/fs/proc/base.c b/fs/proc/base.c
55026index 9b43ff77..0fa9564 100644
55027--- a/fs/proc/base.c
55028+++ b/fs/proc/base.c
55029@@ -111,6 +111,14 @@ struct pid_entry {
55030 union proc_op op;
55031 };
55032
55033+struct getdents_callback {
55034+ struct linux_dirent __user * current_dir;
55035+ struct linux_dirent __user * previous;
55036+ struct file * file;
55037+ int count;
55038+ int error;
55039+};
55040+
55041 #define NOD(NAME, MODE, IOP, FOP, OP) { \
55042 .name = (NAME), \
55043 .len = sizeof(NAME) - 1, \
55044@@ -208,6 +216,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
55045 if (!mm->arg_end)
55046 goto out_mm; /* Shh! No looking before we're done */
55047
55048+ if (gr_acl_handle_procpidmem(task))
55049+ goto out_mm;
55050+
55051 len = mm->arg_end - mm->arg_start;
55052
55053 if (len > PAGE_SIZE)
55054@@ -235,12 +246,28 @@ out:
55055 return res;
55056 }
55057
55058+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55059+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
55060+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
55061+ _mm->pax_flags & MF_PAX_SEGMEXEC))
55062+#endif
55063+
55064 static int proc_pid_auxv(struct task_struct *task, char *buffer)
55065 {
55066 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
55067 int res = PTR_ERR(mm);
55068 if (mm && !IS_ERR(mm)) {
55069 unsigned int nwords = 0;
55070+
55071+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55072+ /* allow if we're currently ptracing this task */
55073+ if (PAX_RAND_FLAGS(mm) &&
55074+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
55075+ mmput(mm);
55076+ return 0;
55077+ }
55078+#endif
55079+
55080 do {
55081 nwords += 2;
55082 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
55083@@ -254,7 +281,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
55084 }
55085
55086
55087-#ifdef CONFIG_KALLSYMS
55088+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
55089 /*
55090 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
55091 * Returns the resolved symbol. If that fails, simply return the address.
55092@@ -293,7 +320,7 @@ static void unlock_trace(struct task_struct *task)
55093 mutex_unlock(&task->signal->cred_guard_mutex);
55094 }
55095
55096-#ifdef CONFIG_STACKTRACE
55097+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
55098
55099 #define MAX_STACK_TRACE_DEPTH 64
55100
55101@@ -485,7 +512,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
55102 return count;
55103 }
55104
55105-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
55106+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
55107 static int proc_pid_syscall(struct task_struct *task, char *buffer)
55108 {
55109 long nr;
55110@@ -514,7 +541,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
55111 /************************************************************************/
55112
55113 /* permission checks */
55114-static int proc_fd_access_allowed(struct inode *inode)
55115+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
55116 {
55117 struct task_struct *task;
55118 int allowed = 0;
55119@@ -524,7 +551,10 @@ static int proc_fd_access_allowed(struct inode *inode)
55120 */
55121 task = get_proc_task(inode);
55122 if (task) {
55123- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
55124+ if (log)
55125+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
55126+ else
55127+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
55128 put_task_struct(task);
55129 }
55130 return allowed;
55131@@ -555,10 +585,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
55132 struct task_struct *task,
55133 int hide_pid_min)
55134 {
55135+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
55136+ return false;
55137+
55138+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55139+ rcu_read_lock();
55140+ {
55141+ const struct cred *tmpcred = current_cred();
55142+ const struct cred *cred = __task_cred(task);
55143+
55144+ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
55145+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
55146+ || in_group_p(grsec_proc_gid)
55147+#endif
55148+ ) {
55149+ rcu_read_unlock();
55150+ return true;
55151+ }
55152+ }
55153+ rcu_read_unlock();
55154+
55155+ if (!pid->hide_pid)
55156+ return false;
55157+#endif
55158+
55159 if (pid->hide_pid < hide_pid_min)
55160 return true;
55161 if (in_group_p(pid->pid_gid))
55162 return true;
55163+
55164 return ptrace_may_access(task, PTRACE_MODE_READ);
55165 }
55166
55167@@ -576,7 +631,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
55168 put_task_struct(task);
55169
55170 if (!has_perms) {
55171+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55172+ {
55173+#else
55174 if (pid->hide_pid == 2) {
55175+#endif
55176 /*
55177 * Let's make getdents(), stat(), and open()
55178 * consistent with each other. If a process
55179@@ -674,6 +733,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
55180 if (!task)
55181 return -ESRCH;
55182
55183+ if (gr_acl_handle_procpidmem(task)) {
55184+ put_task_struct(task);
55185+ return -EPERM;
55186+ }
55187+
55188 mm = mm_access(task, mode);
55189 put_task_struct(task);
55190
55191@@ -689,6 +753,10 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
55192
55193 file->private_data = mm;
55194
55195+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55196+ file->f_version = current->exec_id;
55197+#endif
55198+
55199 return 0;
55200 }
55201
55202@@ -710,6 +778,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
55203 ssize_t copied;
55204 char *page;
55205
55206+#ifdef CONFIG_GRKERNSEC
55207+ if (write)
55208+ return -EPERM;
55209+#endif
55210+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55211+ if (file->f_version != current->exec_id) {
55212+ gr_log_badprocpid("mem");
55213+ return 0;
55214+ }
55215+#endif
55216+
55217 if (!mm)
55218 return 0;
55219
55220@@ -722,7 +801,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
55221 goto free;
55222
55223 while (count > 0) {
55224- int this_len = min_t(int, count, PAGE_SIZE);
55225+ ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
55226
55227 if (write && copy_from_user(page, buf, this_len)) {
55228 copied = -EFAULT;
55229@@ -814,6 +893,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
55230 if (!mm)
55231 return 0;
55232
55233+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55234+ if (file->f_version != current->exec_id) {
55235+ gr_log_badprocpid("environ");
55236+ return 0;
55237+ }
55238+#endif
55239+
55240 page = (char *)__get_free_page(GFP_TEMPORARY);
55241 if (!page)
55242 return -ENOMEM;
55243@@ -823,7 +909,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
55244 goto free;
55245 while (count > 0) {
55246 size_t this_len, max_len;
55247- int retval;
55248+ ssize_t retval;
55249
55250 if (src >= (mm->env_end - mm->env_start))
55251 break;
55252@@ -1429,7 +1515,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
55253 int error = -EACCES;
55254
55255 /* Are we allowed to snoop on the tasks file descriptors? */
55256- if (!proc_fd_access_allowed(inode))
55257+ if (!proc_fd_access_allowed(inode, 0))
55258 goto out;
55259
55260 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
55261@@ -1473,8 +1559,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
55262 struct path path;
55263
55264 /* Are we allowed to snoop on the tasks file descriptors? */
55265- if (!proc_fd_access_allowed(inode))
55266- goto out;
55267+ /* logging this is needed for learning on chromium to work properly,
55268+ but we don't want to flood the logs from 'ps' which does a readlink
55269+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
55270+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
55271+ */
55272+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
55273+ if (!proc_fd_access_allowed(inode,0))
55274+ goto out;
55275+ } else {
55276+ if (!proc_fd_access_allowed(inode,1))
55277+ goto out;
55278+ }
55279
55280 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
55281 if (error)
55282@@ -1524,7 +1620,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
55283 rcu_read_lock();
55284 cred = __task_cred(task);
55285 inode->i_uid = cred->euid;
55286+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
55287+ inode->i_gid = grsec_proc_gid;
55288+#else
55289 inode->i_gid = cred->egid;
55290+#endif
55291 rcu_read_unlock();
55292 }
55293 security_task_to_inode(task, inode);
55294@@ -1560,10 +1660,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
55295 return -ENOENT;
55296 }
55297 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
55298+#ifdef CONFIG_GRKERNSEC_PROC_USER
55299+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
55300+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55301+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
55302+#endif
55303 task_dumpable(task)) {
55304 cred = __task_cred(task);
55305 stat->uid = cred->euid;
55306+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
55307+ stat->gid = grsec_proc_gid;
55308+#else
55309 stat->gid = cred->egid;
55310+#endif
55311 }
55312 }
55313 rcu_read_unlock();
55314@@ -1601,11 +1710,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
55315
55316 if (task) {
55317 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
55318+#ifdef CONFIG_GRKERNSEC_PROC_USER
55319+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
55320+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55321+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
55322+#endif
55323 task_dumpable(task)) {
55324 rcu_read_lock();
55325 cred = __task_cred(task);
55326 inode->i_uid = cred->euid;
55327+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
55328+ inode->i_gid = grsec_proc_gid;
55329+#else
55330 inode->i_gid = cred->egid;
55331+#endif
55332 rcu_read_unlock();
55333 } else {
55334 inode->i_uid = GLOBAL_ROOT_UID;
55335@@ -2058,6 +2176,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
55336 if (!task)
55337 goto out_no_task;
55338
55339+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
55340+ goto out;
55341+
55342 /*
55343 * Yes, it does not scale. And it should not. Don't add
55344 * new entries into /proc/<tgid>/ without very good reasons.
55345@@ -2102,6 +2223,9 @@ static int proc_pident_readdir(struct file *filp,
55346 if (!task)
55347 goto out_no_task;
55348
55349+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
55350+ goto out;
55351+
55352 ret = 0;
55353 i = filp->f_pos;
55354 switch (i) {
55355@@ -2515,7 +2639,7 @@ static const struct pid_entry tgid_base_stuff[] = {
55356 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
55357 #endif
55358 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
55359-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
55360+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
55361 INF("syscall", S_IRUGO, proc_pid_syscall),
55362 #endif
55363 INF("cmdline", S_IRUGO, proc_pid_cmdline),
55364@@ -2540,10 +2664,10 @@ static const struct pid_entry tgid_base_stuff[] = {
55365 #ifdef CONFIG_SECURITY
55366 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
55367 #endif
55368-#ifdef CONFIG_KALLSYMS
55369+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
55370 INF("wchan", S_IRUGO, proc_pid_wchan),
55371 #endif
55372-#ifdef CONFIG_STACKTRACE
55373+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
55374 ONE("stack", S_IRUGO, proc_pid_stack),
55375 #endif
55376 #ifdef CONFIG_SCHEDSTATS
55377@@ -2577,6 +2701,9 @@ static const struct pid_entry tgid_base_stuff[] = {
55378 #ifdef CONFIG_HARDWALL
55379 INF("hardwall", S_IRUGO, proc_pid_hardwall),
55380 #endif
55381+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
55382+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
55383+#endif
55384 #ifdef CONFIG_USER_NS
55385 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
55386 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
55387@@ -2705,7 +2832,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
55388 if (!inode)
55389 goto out;
55390
55391+#ifdef CONFIG_GRKERNSEC_PROC_USER
55392+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
55393+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55394+ inode->i_gid = grsec_proc_gid;
55395+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
55396+#else
55397 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
55398+#endif
55399 inode->i_op = &proc_tgid_base_inode_operations;
55400 inode->i_fop = &proc_tgid_base_operations;
55401 inode->i_flags|=S_IMMUTABLE;
55402@@ -2743,7 +2877,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
55403 if (!task)
55404 goto out;
55405
55406+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
55407+ goto out_put_task;
55408+
55409 result = proc_pid_instantiate(dir, dentry, task, NULL);
55410+out_put_task:
55411 put_task_struct(task);
55412 out:
55413 return result;
55414@@ -2806,6 +2944,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
55415 static int fake_filldir(void *buf, const char *name, int namelen,
55416 loff_t offset, u64 ino, unsigned d_type)
55417 {
55418+ struct getdents_callback * __buf = (struct getdents_callback *) buf;
55419+ __buf->error = -EINVAL;
55420 return 0;
55421 }
55422
55423@@ -2857,7 +2997,7 @@ static const struct pid_entry tid_base_stuff[] = {
55424 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
55425 #endif
55426 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
55427-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
55428+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
55429 INF("syscall", S_IRUGO, proc_pid_syscall),
55430 #endif
55431 INF("cmdline", S_IRUGO, proc_pid_cmdline),
55432@@ -2884,10 +3024,10 @@ static const struct pid_entry tid_base_stuff[] = {
55433 #ifdef CONFIG_SECURITY
55434 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
55435 #endif
55436-#ifdef CONFIG_KALLSYMS
55437+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
55438 INF("wchan", S_IRUGO, proc_pid_wchan),
55439 #endif
55440-#ifdef CONFIG_STACKTRACE
55441+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
55442 ONE("stack", S_IRUGO, proc_pid_stack),
55443 #endif
55444 #ifdef CONFIG_SCHEDSTATS
55445diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
55446index 82676e3..5f8518a 100644
55447--- a/fs/proc/cmdline.c
55448+++ b/fs/proc/cmdline.c
55449@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
55450
55451 static int __init proc_cmdline_init(void)
55452 {
55453+#ifdef CONFIG_GRKERNSEC_PROC_ADD
55454+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
55455+#else
55456 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
55457+#endif
55458 return 0;
55459 }
55460 module_init(proc_cmdline_init);
55461diff --git a/fs/proc/devices.c b/fs/proc/devices.c
55462index b143471..bb105e5 100644
55463--- a/fs/proc/devices.c
55464+++ b/fs/proc/devices.c
55465@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
55466
55467 static int __init proc_devices_init(void)
55468 {
55469+#ifdef CONFIG_GRKERNSEC_PROC_ADD
55470+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
55471+#else
55472 proc_create("devices", 0, NULL, &proc_devinfo_operations);
55473+#endif
55474 return 0;
55475 }
55476 module_init(proc_devices_init);
55477diff --git a/fs/proc/fd.c b/fs/proc/fd.c
55478index d7a4a28..0201742 100644
55479--- a/fs/proc/fd.c
55480+++ b/fs/proc/fd.c
55481@@ -25,7 +25,8 @@ static int seq_show(struct seq_file *m, void *v)
55482 if (!task)
55483 return -ENOENT;
55484
55485- files = get_files_struct(task);
55486+ if (!gr_acl_handle_procpidmem(task))
55487+ files = get_files_struct(task);
55488 put_task_struct(task);
55489
55490 if (files) {
55491@@ -302,11 +303,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
55492 */
55493 int proc_fd_permission(struct inode *inode, int mask)
55494 {
55495+ struct task_struct *task;
55496 int rv = generic_permission(inode, mask);
55497- if (rv == 0)
55498- return 0;
55499+
55500 if (task_pid(current) == proc_pid(inode))
55501 rv = 0;
55502+
55503+ task = get_proc_task(inode);
55504+ if (task == NULL)
55505+ return rv;
55506+
55507+ if (gr_acl_handle_procpidmem(task))
55508+ rv = -EACCES;
55509+
55510+ put_task_struct(task);
55511+
55512 return rv;
55513 }
55514
55515diff --git a/fs/proc/inode.c b/fs/proc/inode.c
55516index 0ac1e1b..0497e58 100644
55517--- a/fs/proc/inode.c
55518+++ b/fs/proc/inode.c
55519@@ -21,11 +21,17 @@
55520 #include <linux/seq_file.h>
55521 #include <linux/slab.h>
55522 #include <linux/mount.h>
55523+#include <linux/grsecurity.h>
55524
55525 #include <asm/uaccess.h>
55526
55527 #include "internal.h"
55528
55529+#ifdef CONFIG_PROC_SYSCTL
55530+extern const struct inode_operations proc_sys_inode_operations;
55531+extern const struct inode_operations proc_sys_dir_operations;
55532+#endif
55533+
55534 static void proc_evict_inode(struct inode *inode)
55535 {
55536 struct proc_dir_entry *de;
55537@@ -53,6 +59,13 @@ static void proc_evict_inode(struct inode *inode)
55538 ns = PROC_I(inode)->ns;
55539 if (ns_ops && ns)
55540 ns_ops->put(ns);
55541+
55542+#ifdef CONFIG_PROC_SYSCTL
55543+ if (inode->i_op == &proc_sys_inode_operations ||
55544+ inode->i_op == &proc_sys_dir_operations)
55545+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
55546+#endif
55547+
55548 }
55549
55550 static struct kmem_cache * proc_inode_cachep;
55551@@ -455,7 +468,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
55552 if (de->mode) {
55553 inode->i_mode = de->mode;
55554 inode->i_uid = de->uid;
55555+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
55556+ inode->i_gid = grsec_proc_gid;
55557+#else
55558 inode->i_gid = de->gid;
55559+#endif
55560 }
55561 if (de->size)
55562 inode->i_size = de->size;
55563diff --git a/fs/proc/internal.h b/fs/proc/internal.h
55564index 252544c..04395b9 100644
55565--- a/fs/proc/internal.h
55566+++ b/fs/proc/internal.h
55567@@ -55,6 +55,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
55568 struct pid *pid, struct task_struct *task);
55569 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
55570 struct pid *pid, struct task_struct *task);
55571+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
55572+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
55573+#endif
55574 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
55575
55576 extern const struct file_operations proc_tid_children_operations;
55577diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
55578index e96d4f1..8b116ed 100644
55579--- a/fs/proc/kcore.c
55580+++ b/fs/proc/kcore.c
55581@@ -480,9 +480,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
55582 * the addresses in the elf_phdr on our list.
55583 */
55584 start = kc_offset_to_vaddr(*fpos - elf_buflen);
55585- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
55586+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
55587+ if (tsz > buflen)
55588 tsz = buflen;
55589-
55590+
55591 while (buflen) {
55592 struct kcore_list *m;
55593
55594@@ -511,20 +512,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
55595 kfree(elf_buf);
55596 } else {
55597 if (kern_addr_valid(start)) {
55598- unsigned long n;
55599+ char *elf_buf;
55600+ mm_segment_t oldfs;
55601
55602- n = copy_to_user(buffer, (char *)start, tsz);
55603- /*
55604- * We cannot distinguish between fault on source
55605- * and fault on destination. When this happens
55606- * we clear too and hope it will trigger the
55607- * EFAULT again.
55608- */
55609- if (n) {
55610- if (clear_user(buffer + tsz - n,
55611- n))
55612+ elf_buf = kmalloc(tsz, GFP_KERNEL);
55613+ if (!elf_buf)
55614+ return -ENOMEM;
55615+ oldfs = get_fs();
55616+ set_fs(KERNEL_DS);
55617+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
55618+ set_fs(oldfs);
55619+ if (copy_to_user(buffer, elf_buf, tsz)) {
55620+ kfree(elf_buf);
55621 return -EFAULT;
55622+ }
55623 }
55624+ set_fs(oldfs);
55625+ kfree(elf_buf);
55626 } else {
55627 if (clear_user(buffer, tsz))
55628 return -EFAULT;
55629@@ -544,6 +548,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
55630
55631 static int open_kcore(struct inode *inode, struct file *filp)
55632 {
55633+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
55634+ return -EPERM;
55635+#endif
55636 if (!capable(CAP_SYS_RAWIO))
55637 return -EPERM;
55638 if (kcore_need_update)
55639diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
55640index 80e4645..53e5fcf 100644
55641--- a/fs/proc/meminfo.c
55642+++ b/fs/proc/meminfo.c
55643@@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
55644 vmi.used >> 10,
55645 vmi.largest_chunk >> 10
55646 #ifdef CONFIG_MEMORY_FAILURE
55647- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
55648+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
55649 #endif
55650 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
55651 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
55652diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
55653index b1822dd..df622cb 100644
55654--- a/fs/proc/nommu.c
55655+++ b/fs/proc/nommu.c
55656@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
55657 if (len < 1)
55658 len = 1;
55659 seq_printf(m, "%*c", len, ' ');
55660- seq_path(m, &file->f_path, "");
55661+ seq_path(m, &file->f_path, "\n\\");
55662 }
55663
55664 seq_putc(m, '\n');
55665diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
55666index fe72cd0..21b52ff 100644
55667--- a/fs/proc/proc_net.c
55668+++ b/fs/proc/proc_net.c
55669@@ -23,6 +23,7 @@
55670 #include <linux/nsproxy.h>
55671 #include <net/net_namespace.h>
55672 #include <linux/seq_file.h>
55673+#include <linux/grsecurity.h>
55674
55675 #include "internal.h"
55676
55677@@ -105,6 +106,17 @@ static struct net *get_proc_task_net(struct inode *dir)
55678 struct task_struct *task;
55679 struct nsproxy *ns;
55680 struct net *net = NULL;
55681+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55682+ const struct cred *cred = current_cred();
55683+#endif
55684+
55685+#ifdef CONFIG_GRKERNSEC_PROC_USER
55686+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
55687+ return net;
55688+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55689+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
55690+ return net;
55691+#endif
55692
55693 rcu_read_lock();
55694 task = pid_task(proc_pid(dir), PIDTYPE_PID);
55695diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
55696index 1827d88..43b0279 100644
55697--- a/fs/proc/proc_sysctl.c
55698+++ b/fs/proc/proc_sysctl.c
55699@@ -12,11 +12,15 @@
55700 #include <linux/module.h>
55701 #include "internal.h"
55702
55703+extern int gr_handle_chroot_sysctl(const int op);
55704+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
55705+ const int op);
55706+
55707 static const struct dentry_operations proc_sys_dentry_operations;
55708 static const struct file_operations proc_sys_file_operations;
55709-static const struct inode_operations proc_sys_inode_operations;
55710+const struct inode_operations proc_sys_inode_operations;
55711 static const struct file_operations proc_sys_dir_file_operations;
55712-static const struct inode_operations proc_sys_dir_operations;
55713+const struct inode_operations proc_sys_dir_operations;
55714
55715 void proc_sys_poll_notify(struct ctl_table_poll *poll)
55716 {
55717@@ -466,6 +470,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
55718
55719 err = NULL;
55720 d_set_d_op(dentry, &proc_sys_dentry_operations);
55721+
55722+ gr_handle_proc_create(dentry, inode);
55723+
55724 d_add(dentry, inode);
55725
55726 out:
55727@@ -481,6 +488,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
55728 struct inode *inode = filp->f_path.dentry->d_inode;
55729 struct ctl_table_header *head = grab_header(inode);
55730 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
55731+ int op = write ? MAY_WRITE : MAY_READ;
55732 ssize_t error;
55733 size_t res;
55734
55735@@ -492,7 +500,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
55736 * and won't be until we finish.
55737 */
55738 error = -EPERM;
55739- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
55740+ if (sysctl_perm(head, table, op))
55741 goto out;
55742
55743 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
55744@@ -500,6 +508,22 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
55745 if (!table->proc_handler)
55746 goto out;
55747
55748+#ifdef CONFIG_GRKERNSEC
55749+ error = -EPERM;
55750+ if (gr_handle_chroot_sysctl(op))
55751+ goto out;
55752+ dget(filp->f_path.dentry);
55753+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
55754+ dput(filp->f_path.dentry);
55755+ goto out;
55756+ }
55757+ dput(filp->f_path.dentry);
55758+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
55759+ goto out;
55760+ if (write && !capable(CAP_SYS_ADMIN))
55761+ goto out;
55762+#endif
55763+
55764 /* careful: calling conventions are nasty here */
55765 res = count;
55766 error = table->proc_handler(table, write, buf, &res, ppos);
55767@@ -597,6 +621,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
55768 return -ENOMEM;
55769 } else {
55770 d_set_d_op(child, &proc_sys_dentry_operations);
55771+
55772+ gr_handle_proc_create(child, inode);
55773+
55774 d_add(child, inode);
55775 }
55776 } else {
55777@@ -640,6 +667,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
55778 if ((*pos)++ < file->f_pos)
55779 return 0;
55780
55781+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
55782+ return 0;
55783+
55784 if (unlikely(S_ISLNK(table->mode)))
55785 res = proc_sys_link_fill_cache(file, dirent, filldir, head, table);
55786 else
55787@@ -750,6 +780,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
55788 if (IS_ERR(head))
55789 return PTR_ERR(head);
55790
55791+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
55792+ return -ENOENT;
55793+
55794 generic_fillattr(inode, stat);
55795 if (table)
55796 stat->mode = (stat->mode & S_IFMT) | table->mode;
55797@@ -772,13 +805,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
55798 .llseek = generic_file_llseek,
55799 };
55800
55801-static const struct inode_operations proc_sys_inode_operations = {
55802+const struct inode_operations proc_sys_inode_operations = {
55803 .permission = proc_sys_permission,
55804 .setattr = proc_sys_setattr,
55805 .getattr = proc_sys_getattr,
55806 };
55807
55808-static const struct inode_operations proc_sys_dir_operations = {
55809+const struct inode_operations proc_sys_dir_operations = {
55810 .lookup = proc_sys_lookup,
55811 .permission = proc_sys_permission,
55812 .setattr = proc_sys_setattr,
55813@@ -854,7 +887,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
55814 static struct ctl_dir *new_dir(struct ctl_table_set *set,
55815 const char *name, int namelen)
55816 {
55817- struct ctl_table *table;
55818+ ctl_table_no_const *table;
55819 struct ctl_dir *new;
55820 struct ctl_node *node;
55821 char *new_name;
55822@@ -866,7 +899,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
55823 return NULL;
55824
55825 node = (struct ctl_node *)(new + 1);
55826- table = (struct ctl_table *)(node + 1);
55827+ table = (ctl_table_no_const *)(node + 1);
55828 new_name = (char *)(table + 2);
55829 memcpy(new_name, name, namelen);
55830 new_name[namelen] = '\0';
55831@@ -1035,7 +1068,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
55832 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
55833 struct ctl_table_root *link_root)
55834 {
55835- struct ctl_table *link_table, *entry, *link;
55836+ ctl_table_no_const *link_table, *link;
55837+ struct ctl_table *entry;
55838 struct ctl_table_header *links;
55839 struct ctl_node *node;
55840 char *link_name;
55841@@ -1058,7 +1092,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
55842 return NULL;
55843
55844 node = (struct ctl_node *)(links + 1);
55845- link_table = (struct ctl_table *)(node + nr_entries);
55846+ link_table = (ctl_table_no_const *)(node + nr_entries);
55847 link_name = (char *)&link_table[nr_entries + 1];
55848
55849 for (link = link_table, entry = table; entry->procname; link++, entry++) {
55850@@ -1306,8 +1340,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
55851 struct ctl_table_header ***subheader, struct ctl_table_set *set,
55852 struct ctl_table *table)
55853 {
55854- struct ctl_table *ctl_table_arg = NULL;
55855- struct ctl_table *entry, *files;
55856+ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
55857+ struct ctl_table *entry;
55858 int nr_files = 0;
55859 int nr_dirs = 0;
55860 int err = -ENOMEM;
55861@@ -1319,10 +1353,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
55862 nr_files++;
55863 }
55864
55865- files = table;
55866 /* If there are mixed files and directories we need a new table */
55867 if (nr_dirs && nr_files) {
55868- struct ctl_table *new;
55869+ ctl_table_no_const *new;
55870 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
55871 GFP_KERNEL);
55872 if (!files)
55873@@ -1340,7 +1373,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
55874 /* Register everything except a directory full of subdirectories */
55875 if (nr_files || !nr_dirs) {
55876 struct ctl_table_header *header;
55877- header = __register_sysctl_table(set, path, files);
55878+ header = __register_sysctl_table(set, path, files ? files : table);
55879 if (!header) {
55880 kfree(ctl_table_arg);
55881 goto out;
55882diff --git a/fs/proc/root.c b/fs/proc/root.c
55883index 9c7fab1..ed1c8e0 100644
55884--- a/fs/proc/root.c
55885+++ b/fs/proc/root.c
55886@@ -180,7 +180,15 @@ void __init proc_root_init(void)
55887 #ifdef CONFIG_PROC_DEVICETREE
55888 proc_device_tree_init();
55889 #endif
55890+#ifdef CONFIG_GRKERNSEC_PROC_ADD
55891+#ifdef CONFIG_GRKERNSEC_PROC_USER
55892+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
55893+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55894+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
55895+#endif
55896+#else
55897 proc_mkdir("bus", NULL);
55898+#endif
55899 proc_sys_init();
55900 }
55901
55902diff --git a/fs/proc/self.c b/fs/proc/self.c
55903index aa5cc3b..c91a5d0 100644
55904--- a/fs/proc/self.c
55905+++ b/fs/proc/self.c
55906@@ -37,7 +37,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
55907 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
55908 void *cookie)
55909 {
55910- char *s = nd_get_link(nd);
55911+ const char *s = nd_get_link(nd);
55912 if (!IS_ERR(s))
55913 kfree(s);
55914 }
55915diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
55916index ca5ce7f..02c1cf0 100644
55917--- a/fs/proc/task_mmu.c
55918+++ b/fs/proc/task_mmu.c
55919@@ -11,12 +11,19 @@
55920 #include <linux/rmap.h>
55921 #include <linux/swap.h>
55922 #include <linux/swapops.h>
55923+#include <linux/grsecurity.h>
55924
55925 #include <asm/elf.h>
55926 #include <asm/uaccess.h>
55927 #include <asm/tlbflush.h>
55928 #include "internal.h"
55929
55930+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55931+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
55932+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
55933+ _mm->pax_flags & MF_PAX_SEGMEXEC))
55934+#endif
55935+
55936 void task_mem(struct seq_file *m, struct mm_struct *mm)
55937 {
55938 unsigned long data, text, lib, swap;
55939@@ -52,8 +59,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
55940 "VmExe:\t%8lu kB\n"
55941 "VmLib:\t%8lu kB\n"
55942 "VmPTE:\t%8lu kB\n"
55943- "VmSwap:\t%8lu kB\n",
55944- hiwater_vm << (PAGE_SHIFT-10),
55945+ "VmSwap:\t%8lu kB\n"
55946+
55947+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
55948+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
55949+#endif
55950+
55951+ ,hiwater_vm << (PAGE_SHIFT-10),
55952 total_vm << (PAGE_SHIFT-10),
55953 mm->locked_vm << (PAGE_SHIFT-10),
55954 mm->pinned_vm << (PAGE_SHIFT-10),
55955@@ -62,7 +74,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
55956 data << (PAGE_SHIFT-10),
55957 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
55958 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
55959- swap << (PAGE_SHIFT-10));
55960+ swap << (PAGE_SHIFT-10)
55961+
55962+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
55963+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55964+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
55965+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
55966+#else
55967+ , mm->context.user_cs_base
55968+ , mm->context.user_cs_limit
55969+#endif
55970+#endif
55971+
55972+ );
55973 }
55974
55975 unsigned long task_vsize(struct mm_struct *mm)
55976@@ -277,13 +301,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
55977 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
55978 }
55979
55980- /* We don't show the stack guard page in /proc/maps */
55981+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55982+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
55983+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
55984+#else
55985 start = vma->vm_start;
55986- if (stack_guard_page_start(vma, start))
55987- start += PAGE_SIZE;
55988 end = vma->vm_end;
55989- if (stack_guard_page_end(vma, end))
55990- end -= PAGE_SIZE;
55991+#endif
55992
55993 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
55994 start,
55995@@ -292,7 +316,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
55996 flags & VM_WRITE ? 'w' : '-',
55997 flags & VM_EXEC ? 'x' : '-',
55998 flags & VM_MAYSHARE ? 's' : 'p',
55999+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56000+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
56001+#else
56002 pgoff,
56003+#endif
56004 MAJOR(dev), MINOR(dev), ino, &len);
56005
56006 /*
56007@@ -301,7 +329,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
56008 */
56009 if (file) {
56010 pad_len_spaces(m, len);
56011- seq_path(m, &file->f_path, "\n");
56012+ seq_path(m, &file->f_path, "\n\\");
56013 goto done;
56014 }
56015
56016@@ -327,8 +355,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
56017 * Thread stack in /proc/PID/task/TID/maps or
56018 * the main process stack.
56019 */
56020- if (!is_pid || (vma->vm_start <= mm->start_stack &&
56021- vma->vm_end >= mm->start_stack)) {
56022+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
56023+ (vma->vm_start <= mm->start_stack &&
56024+ vma->vm_end >= mm->start_stack)) {
56025 name = "[stack]";
56026 } else {
56027 /* Thread stack in /proc/PID/maps */
56028@@ -352,6 +381,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
56029 struct proc_maps_private *priv = m->private;
56030 struct task_struct *task = priv->task;
56031
56032+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56033+ if (current->exec_id != m->exec_id) {
56034+ gr_log_badprocpid("maps");
56035+ return 0;
56036+ }
56037+#endif
56038+
56039 show_map_vma(m, vma, is_pid);
56040
56041 if (m->count < m->size) /* vma is copied successfully */
56042@@ -589,12 +625,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
56043 .private = &mss,
56044 };
56045
56046+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56047+ if (current->exec_id != m->exec_id) {
56048+ gr_log_badprocpid("smaps");
56049+ return 0;
56050+ }
56051+#endif
56052 memset(&mss, 0, sizeof mss);
56053- mss.vma = vma;
56054- /* mmap_sem is held in m_start */
56055- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
56056- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
56057-
56058+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56059+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
56060+#endif
56061+ mss.vma = vma;
56062+ /* mmap_sem is held in m_start */
56063+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
56064+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
56065+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56066+ }
56067+#endif
56068 show_map_vma(m, vma, is_pid);
56069
56070 seq_printf(m,
56071@@ -612,7 +659,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
56072 "KernelPageSize: %8lu kB\n"
56073 "MMUPageSize: %8lu kB\n"
56074 "Locked: %8lu kB\n",
56075+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56076+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
56077+#else
56078 (vma->vm_end - vma->vm_start) >> 10,
56079+#endif
56080 mss.resident >> 10,
56081 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
56082 mss.shared_clean >> 10,
56083@@ -1264,6 +1315,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
56084 int n;
56085 char buffer[50];
56086
56087+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56088+ if (current->exec_id != m->exec_id) {
56089+ gr_log_badprocpid("numa_maps");
56090+ return 0;
56091+ }
56092+#endif
56093+
56094 if (!mm)
56095 return 0;
56096
56097@@ -1281,11 +1339,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
56098 mpol_to_str(buffer, sizeof(buffer), pol);
56099 mpol_cond_put(pol);
56100
56101+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56102+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
56103+#else
56104 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
56105+#endif
56106
56107 if (file) {
56108 seq_printf(m, " file=");
56109- seq_path(m, &file->f_path, "\n\t= ");
56110+ seq_path(m, &file->f_path, "\n\t\\= ");
56111 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
56112 seq_printf(m, " heap");
56113 } else {
56114diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
56115index 1ccfa53..0848f95 100644
56116--- a/fs/proc/task_nommu.c
56117+++ b/fs/proc/task_nommu.c
56118@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
56119 else
56120 bytes += kobjsize(mm);
56121
56122- if (current->fs && current->fs->users > 1)
56123+ if (current->fs && atomic_read(&current->fs->users) > 1)
56124 sbytes += kobjsize(current->fs);
56125 else
56126 bytes += kobjsize(current->fs);
56127@@ -168,7 +168,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
56128
56129 if (file) {
56130 pad_len_spaces(m, len);
56131- seq_path(m, &file->f_path, "");
56132+ seq_path(m, &file->f_path, "\n\\");
56133 } else if (mm) {
56134 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
56135
56136diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
56137index b00fcc9..e0c6381 100644
56138--- a/fs/qnx6/qnx6.h
56139+++ b/fs/qnx6/qnx6.h
56140@@ -74,7 +74,7 @@ enum {
56141 BYTESEX_BE,
56142 };
56143
56144-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
56145+static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
56146 {
56147 if (sbi->s_bytesex == BYTESEX_LE)
56148 return le64_to_cpu((__force __le64)n);
56149@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
56150 return (__force __fs64)cpu_to_be64(n);
56151 }
56152
56153-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
56154+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
56155 {
56156 if (sbi->s_bytesex == BYTESEX_LE)
56157 return le32_to_cpu((__force __le32)n);
56158diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
56159index 16e8abb..2dcf914 100644
56160--- a/fs/quota/netlink.c
56161+++ b/fs/quota/netlink.c
56162@@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
56163 void quota_send_warning(struct kqid qid, dev_t dev,
56164 const char warntype)
56165 {
56166- static atomic_t seq;
56167+ static atomic_unchecked_t seq;
56168 struct sk_buff *skb;
56169 void *msg_head;
56170 int ret;
56171@@ -49,7 +49,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
56172 "VFS: Not enough memory to send quota warning.\n");
56173 return;
56174 }
56175- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
56176+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
56177 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
56178 if (!msg_head) {
56179 printk(KERN_ERR
56180diff --git a/fs/readdir.c b/fs/readdir.c
56181index 5e69ef5..e5d9099 100644
56182--- a/fs/readdir.c
56183+++ b/fs/readdir.c
56184@@ -17,6 +17,7 @@
56185 #include <linux/security.h>
56186 #include <linux/syscalls.h>
56187 #include <linux/unistd.h>
56188+#include <linux/namei.h>
56189
56190 #include <asm/uaccess.h>
56191
56192@@ -67,6 +68,7 @@ struct old_linux_dirent {
56193
56194 struct readdir_callback {
56195 struct old_linux_dirent __user * dirent;
56196+ struct file * file;
56197 int result;
56198 };
56199
56200@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
56201 buf->result = -EOVERFLOW;
56202 return -EOVERFLOW;
56203 }
56204+
56205+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
56206+ return 0;
56207+
56208 buf->result++;
56209 dirent = buf->dirent;
56210 if (!access_ok(VERIFY_WRITE, dirent,
56211@@ -114,6 +120,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
56212
56213 buf.result = 0;
56214 buf.dirent = dirent;
56215+ buf.file = f.file;
56216
56217 error = vfs_readdir(f.file, fillonedir, &buf);
56218 if (buf.result)
56219@@ -139,6 +146,7 @@ struct linux_dirent {
56220 struct getdents_callback {
56221 struct linux_dirent __user * current_dir;
56222 struct linux_dirent __user * previous;
56223+ struct file * file;
56224 int count;
56225 int error;
56226 };
56227@@ -160,6 +168,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
56228 buf->error = -EOVERFLOW;
56229 return -EOVERFLOW;
56230 }
56231+
56232+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
56233+ return 0;
56234+
56235 dirent = buf->previous;
56236 if (dirent) {
56237 if (__put_user(offset, &dirent->d_off))
56238@@ -205,6 +217,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
56239 buf.previous = NULL;
56240 buf.count = count;
56241 buf.error = 0;
56242+ buf.file = f.file;
56243
56244 error = vfs_readdir(f.file, filldir, &buf);
56245 if (error >= 0)
56246@@ -223,6 +236,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
56247 struct getdents_callback64 {
56248 struct linux_dirent64 __user * current_dir;
56249 struct linux_dirent64 __user * previous;
56250+ struct file *file;
56251 int count;
56252 int error;
56253 };
56254@@ -238,6 +252,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
56255 buf->error = -EINVAL; /* only used if we fail.. */
56256 if (reclen > buf->count)
56257 return -EINVAL;
56258+
56259+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
56260+ return 0;
56261+
56262 dirent = buf->previous;
56263 if (dirent) {
56264 if (__put_user(offset, &dirent->d_off))
56265@@ -283,6 +301,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
56266
56267 buf.current_dir = dirent;
56268 buf.previous = NULL;
56269+ buf.file = f.file;
56270 buf.count = count;
56271 buf.error = 0;
56272
56273@@ -291,7 +310,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
56274 error = buf.error;
56275 lastdirent = buf.previous;
56276 if (lastdirent) {
56277- typeof(lastdirent->d_off) d_off = f.file->f_pos;
56278+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
56279 if (__put_user(d_off, &lastdirent->d_off))
56280 error = -EFAULT;
56281 else
56282diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
56283index 2b7882b..1c5ef48 100644
56284--- a/fs/reiserfs/do_balan.c
56285+++ b/fs/reiserfs/do_balan.c
56286@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
56287 return;
56288 }
56289
56290- atomic_inc(&(fs_generation(tb->tb_sb)));
56291+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
56292 do_balance_starts(tb);
56293
56294 /* balance leaf returns 0 except if combining L R and S into
56295diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
56296index e60e870..f40ac16 100644
56297--- a/fs/reiserfs/procfs.c
56298+++ b/fs/reiserfs/procfs.c
56299@@ -112,7 +112,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
56300 "SMALL_TAILS " : "NO_TAILS ",
56301 replay_only(sb) ? "REPLAY_ONLY " : "",
56302 convert_reiserfs(sb) ? "CONV " : "",
56303- atomic_read(&r->s_generation_counter),
56304+ atomic_read_unchecked(&r->s_generation_counter),
56305 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
56306 SF(s_do_balance), SF(s_unneeded_left_neighbor),
56307 SF(s_good_search_by_key_reada), SF(s_bmaps),
56308diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
56309index 157e474..65a6114 100644
56310--- a/fs/reiserfs/reiserfs.h
56311+++ b/fs/reiserfs/reiserfs.h
56312@@ -453,7 +453,7 @@ struct reiserfs_sb_info {
56313 /* Comment? -Hans */
56314 wait_queue_head_t s_wait;
56315 /* To be obsoleted soon by per buffer seals.. -Hans */
56316- atomic_t s_generation_counter; // increased by one every time the
56317+ atomic_unchecked_t s_generation_counter; // increased by one every time the
56318 // tree gets re-balanced
56319 unsigned long s_properties; /* File system properties. Currently holds
56320 on-disk FS format */
56321@@ -1978,7 +1978,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
56322 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
56323
56324 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
56325-#define get_generation(s) atomic_read (&fs_generation(s))
56326+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
56327 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
56328 #define __fs_changed(gen,s) (gen != get_generation (s))
56329 #define fs_changed(gen,s) \
56330diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
56331index c196369..4cce1d9 100644
56332--- a/fs/reiserfs/xattr.c
56333+++ b/fs/reiserfs/xattr.c
56334@@ -187,8 +187,8 @@ fill_with_dentries(void *buf, const char *name, int namelen, loff_t offset,
56335 if (dbuf->count == ARRAY_SIZE(dbuf->dentries))
56336 return -ENOSPC;
56337
56338- if (name[0] == '.' && (name[1] == '\0' ||
56339- (name[1] == '.' && name[2] == '\0')))
56340+ if (name[0] == '.' && (namelen < 2 ||
56341+ (namelen == 2 && name[1] == '.')))
56342 return 0;
56343
56344 dentry = lookup_one_len(name, dbuf->xadir, namelen);
56345diff --git a/fs/select.c b/fs/select.c
56346index 2ef72d9..f213b17 100644
56347--- a/fs/select.c
56348+++ b/fs/select.c
56349@@ -20,6 +20,7 @@
56350 #include <linux/export.h>
56351 #include <linux/slab.h>
56352 #include <linux/poll.h>
56353+#include <linux/security.h>
56354 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
56355 #include <linux/file.h>
56356 #include <linux/fdtable.h>
56357@@ -826,6 +827,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
56358 struct poll_list *walk = head;
56359 unsigned long todo = nfds;
56360
56361+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
56362 if (nfds > rlimit(RLIMIT_NOFILE))
56363 return -EINVAL;
56364
56365diff --git a/fs/seq_file.c b/fs/seq_file.c
56366index f2bc3df..239d4f6 100644
56367--- a/fs/seq_file.c
56368+++ b/fs/seq_file.c
56369@@ -10,6 +10,7 @@
56370 #include <linux/seq_file.h>
56371 #include <linux/slab.h>
56372 #include <linux/cred.h>
56373+#include <linux/sched.h>
56374
56375 #include <asm/uaccess.h>
56376 #include <asm/page.h>
56377@@ -60,6 +61,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
56378 #ifdef CONFIG_USER_NS
56379 p->user_ns = file->f_cred->user_ns;
56380 #endif
56381+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56382+ p->exec_id = current->exec_id;
56383+#endif
56384
56385 /*
56386 * Wrappers around seq_open(e.g. swaps_open) need to be
56387@@ -96,7 +100,7 @@ static int traverse(struct seq_file *m, loff_t offset)
56388 return 0;
56389 }
56390 if (!m->buf) {
56391- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
56392+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
56393 if (!m->buf)
56394 return -ENOMEM;
56395 }
56396@@ -136,7 +140,7 @@ static int traverse(struct seq_file *m, loff_t offset)
56397 Eoverflow:
56398 m->op->stop(m, p);
56399 kfree(m->buf);
56400- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
56401+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
56402 return !m->buf ? -ENOMEM : -EAGAIN;
56403 }
56404
56405@@ -191,7 +195,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
56406
56407 /* grab buffer if we didn't have one */
56408 if (!m->buf) {
56409- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
56410+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
56411 if (!m->buf)
56412 goto Enomem;
56413 }
56414@@ -232,7 +236,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
56415 goto Fill;
56416 m->op->stop(m, p);
56417 kfree(m->buf);
56418- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
56419+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
56420 if (!m->buf)
56421 goto Enomem;
56422 m->count = 0;
56423@@ -581,7 +585,7 @@ static void single_stop(struct seq_file *p, void *v)
56424 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
56425 void *data)
56426 {
56427- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
56428+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
56429 int res = -ENOMEM;
56430
56431 if (op) {
56432diff --git a/fs/splice.c b/fs/splice.c
56433index 6909d89..5b2e8f9 100644
56434--- a/fs/splice.c
56435+++ b/fs/splice.c
56436@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
56437 pipe_lock(pipe);
56438
56439 for (;;) {
56440- if (!pipe->readers) {
56441+ if (!atomic_read(&pipe->readers)) {
56442 send_sig(SIGPIPE, current, 0);
56443 if (!ret)
56444 ret = -EPIPE;
56445@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
56446 do_wakeup = 0;
56447 }
56448
56449- pipe->waiting_writers++;
56450+ atomic_inc(&pipe->waiting_writers);
56451 pipe_wait(pipe);
56452- pipe->waiting_writers--;
56453+ atomic_dec(&pipe->waiting_writers);
56454 }
56455
56456 pipe_unlock(pipe);
56457@@ -563,7 +563,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
56458 old_fs = get_fs();
56459 set_fs(get_ds());
56460 /* The cast to a user pointer is valid due to the set_fs() */
56461- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
56462+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
56463 set_fs(old_fs);
56464
56465 return res;
56466@@ -578,7 +578,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
56467 old_fs = get_fs();
56468 set_fs(get_ds());
56469 /* The cast to a user pointer is valid due to the set_fs() */
56470- res = vfs_write(file, (const char __user *)buf, count, &pos);
56471+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
56472 set_fs(old_fs);
56473
56474 return res;
56475@@ -630,7 +630,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
56476 goto err;
56477
56478 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
56479- vec[i].iov_base = (void __user *) page_address(page);
56480+ vec[i].iov_base = (void __force_user *) page_address(page);
56481 vec[i].iov_len = this_len;
56482 spd.pages[i] = page;
56483 spd.nr_pages++;
56484@@ -851,10 +851,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
56485 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
56486 {
56487 while (!pipe->nrbufs) {
56488- if (!pipe->writers)
56489+ if (!atomic_read(&pipe->writers))
56490 return 0;
56491
56492- if (!pipe->waiting_writers && sd->num_spliced)
56493+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
56494 return 0;
56495
56496 if (sd->flags & SPLICE_F_NONBLOCK)
56497@@ -1189,7 +1189,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
56498 * out of the pipe right after the splice_to_pipe(). So set
56499 * PIPE_READERS appropriately.
56500 */
56501- pipe->readers = 1;
56502+ atomic_set(&pipe->readers, 1);
56503
56504 current->splice_pipe = pipe;
56505 }
56506@@ -1738,9 +1738,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
56507 ret = -ERESTARTSYS;
56508 break;
56509 }
56510- if (!pipe->writers)
56511+ if (!atomic_read(&pipe->writers))
56512 break;
56513- if (!pipe->waiting_writers) {
56514+ if (!atomic_read(&pipe->waiting_writers)) {
56515 if (flags & SPLICE_F_NONBLOCK) {
56516 ret = -EAGAIN;
56517 break;
56518@@ -1772,7 +1772,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
56519 pipe_lock(pipe);
56520
56521 while (pipe->nrbufs >= pipe->buffers) {
56522- if (!pipe->readers) {
56523+ if (!atomic_read(&pipe->readers)) {
56524 send_sig(SIGPIPE, current, 0);
56525 ret = -EPIPE;
56526 break;
56527@@ -1785,9 +1785,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
56528 ret = -ERESTARTSYS;
56529 break;
56530 }
56531- pipe->waiting_writers++;
56532+ atomic_inc(&pipe->waiting_writers);
56533 pipe_wait(pipe);
56534- pipe->waiting_writers--;
56535+ atomic_dec(&pipe->waiting_writers);
56536 }
56537
56538 pipe_unlock(pipe);
56539@@ -1823,14 +1823,14 @@ retry:
56540 pipe_double_lock(ipipe, opipe);
56541
56542 do {
56543- if (!opipe->readers) {
56544+ if (!atomic_read(&opipe->readers)) {
56545 send_sig(SIGPIPE, current, 0);
56546 if (!ret)
56547 ret = -EPIPE;
56548 break;
56549 }
56550
56551- if (!ipipe->nrbufs && !ipipe->writers)
56552+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
56553 break;
56554
56555 /*
56556@@ -1927,7 +1927,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
56557 pipe_double_lock(ipipe, opipe);
56558
56559 do {
56560- if (!opipe->readers) {
56561+ if (!atomic_read(&opipe->readers)) {
56562 send_sig(SIGPIPE, current, 0);
56563 if (!ret)
56564 ret = -EPIPE;
56565@@ -1972,7 +1972,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
56566 * return EAGAIN if we have the potential of some data in the
56567 * future, otherwise just return 0
56568 */
56569- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
56570+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
56571 ret = -EAGAIN;
56572
56573 pipe_unlock(ipipe);
56574diff --git a/fs/stat.c b/fs/stat.c
56575index 14f4545..9b7f55b 100644
56576--- a/fs/stat.c
56577+++ b/fs/stat.c
56578@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
56579 stat->gid = inode->i_gid;
56580 stat->rdev = inode->i_rdev;
56581 stat->size = i_size_read(inode);
56582- stat->atime = inode->i_atime;
56583- stat->mtime = inode->i_mtime;
56584+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
56585+ stat->atime = inode->i_ctime;
56586+ stat->mtime = inode->i_ctime;
56587+ } else {
56588+ stat->atime = inode->i_atime;
56589+ stat->mtime = inode->i_mtime;
56590+ }
56591 stat->ctime = inode->i_ctime;
56592 stat->blksize = (1 << inode->i_blkbits);
56593 stat->blocks = inode->i_blocks;
56594@@ -46,8 +51,14 @@ int vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
56595 if (retval)
56596 return retval;
56597
56598- if (inode->i_op->getattr)
56599- return inode->i_op->getattr(mnt, dentry, stat);
56600+ if (inode->i_op->getattr) {
56601+ retval = inode->i_op->getattr(mnt, dentry, stat);
56602+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
56603+ stat->atime = stat->ctime;
56604+ stat->mtime = stat->ctime;
56605+ }
56606+ return retval;
56607+ }
56608
56609 generic_fillattr(inode, stat);
56610 return 0;
56611diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
56612index 614b2b5..4d321e6 100644
56613--- a/fs/sysfs/bin.c
56614+++ b/fs/sysfs/bin.c
56615@@ -235,13 +235,13 @@ static int bin_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
56616 return ret;
56617 }
56618
56619-static int bin_access(struct vm_area_struct *vma, unsigned long addr,
56620- void *buf, int len, int write)
56621+static ssize_t bin_access(struct vm_area_struct *vma, unsigned long addr,
56622+ void *buf, size_t len, int write)
56623 {
56624 struct file *file = vma->vm_file;
56625 struct bin_buffer *bb = file->private_data;
56626 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
56627- int ret;
56628+ ssize_t ret;
56629
56630 if (!bb->vm_ops)
56631 return -EINVAL;
56632diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
56633index 1f8c823..ed57cfe 100644
56634--- a/fs/sysfs/dir.c
56635+++ b/fs/sysfs/dir.c
56636@@ -40,7 +40,7 @@ static DEFINE_IDA(sysfs_ino_ida);
56637 *
56638 * Returns 31 bit hash of ns + name (so it fits in an off_t )
56639 */
56640-static unsigned int sysfs_name_hash(const void *ns, const char *name)
56641+static unsigned int sysfs_name_hash(const void *ns, const unsigned char *name)
56642 {
56643 unsigned long hash = init_name_hash();
56644 unsigned int len = strlen(name);
56645@@ -685,6 +685,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
56646 struct sysfs_dirent *sd;
56647 int rc;
56648
56649+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
56650+ const char *parent_name = parent_sd->s_name;
56651+
56652+ mode = S_IFDIR | S_IRWXU;
56653+
56654+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
56655+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
56656+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
56657+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
56658+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
56659+#endif
56660+
56661 /* allocate */
56662 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
56663 if (!sd)
56664diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
56665index 602f56d..6853db8 100644
56666--- a/fs/sysfs/file.c
56667+++ b/fs/sysfs/file.c
56668@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
56669
56670 struct sysfs_open_dirent {
56671 atomic_t refcnt;
56672- atomic_t event;
56673+ atomic_unchecked_t event;
56674 wait_queue_head_t poll;
56675 struct list_head buffers; /* goes through sysfs_buffer.list */
56676 };
56677@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
56678 if (!sysfs_get_active(attr_sd))
56679 return -ENODEV;
56680
56681- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
56682+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
56683 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
56684
56685 sysfs_put_active(attr_sd);
56686@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
56687 return -ENOMEM;
56688
56689 atomic_set(&new_od->refcnt, 0);
56690- atomic_set(&new_od->event, 1);
56691+ atomic_set_unchecked(&new_od->event, 1);
56692 init_waitqueue_head(&new_od->poll);
56693 INIT_LIST_HEAD(&new_od->buffers);
56694 goto retry;
56695@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
56696
56697 sysfs_put_active(attr_sd);
56698
56699- if (buffer->event != atomic_read(&od->event))
56700+ if (buffer->event != atomic_read_unchecked(&od->event))
56701 goto trigger;
56702
56703 return DEFAULT_POLLMASK;
56704@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
56705
56706 od = sd->s_attr.open;
56707 if (od) {
56708- atomic_inc(&od->event);
56709+ atomic_inc_unchecked(&od->event);
56710 wake_up_interruptible(&od->poll);
56711 }
56712
56713diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
56714index 3c9eb56..9dea5be 100644
56715--- a/fs/sysfs/symlink.c
56716+++ b/fs/sysfs/symlink.c
56717@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
56718
56719 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
56720 {
56721- char *page = nd_get_link(nd);
56722+ const char *page = nd_get_link(nd);
56723 if (!IS_ERR(page))
56724 free_page((unsigned long)page);
56725 }
56726diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
56727index 69d4889..a810bd4 100644
56728--- a/fs/sysv/sysv.h
56729+++ b/fs/sysv/sysv.h
56730@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
56731 #endif
56732 }
56733
56734-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
56735+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
56736 {
56737 if (sbi->s_bytesex == BYTESEX_PDP)
56738 return PDP_swab((__force __u32)n);
56739diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
56740index e18b988..f1d4ad0f 100644
56741--- a/fs/ubifs/io.c
56742+++ b/fs/ubifs/io.c
56743@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
56744 return err;
56745 }
56746
56747-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
56748+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
56749 {
56750 int err;
56751
56752diff --git a/fs/udf/misc.c b/fs/udf/misc.c
56753index c175b4d..8f36a16 100644
56754--- a/fs/udf/misc.c
56755+++ b/fs/udf/misc.c
56756@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
56757
56758 u8 udf_tag_checksum(const struct tag *t)
56759 {
56760- u8 *data = (u8 *)t;
56761+ const u8 *data = (const u8 *)t;
56762 u8 checksum = 0;
56763 int i;
56764 for (i = 0; i < sizeof(struct tag); ++i)
56765diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
56766index 8d974c4..b82f6ec 100644
56767--- a/fs/ufs/swab.h
56768+++ b/fs/ufs/swab.h
56769@@ -22,7 +22,7 @@ enum {
56770 BYTESEX_BE
56771 };
56772
56773-static inline u64
56774+static inline u64 __intentional_overflow(-1)
56775 fs64_to_cpu(struct super_block *sbp, __fs64 n)
56776 {
56777 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
56778@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
56779 return (__force __fs64)cpu_to_be64(n);
56780 }
56781
56782-static inline u32
56783+static inline u32 __intentional_overflow(-1)
56784 fs32_to_cpu(struct super_block *sbp, __fs32 n)
56785 {
56786 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
56787diff --git a/fs/utimes.c b/fs/utimes.c
56788index f4fb7ec..3fe03c0 100644
56789--- a/fs/utimes.c
56790+++ b/fs/utimes.c
56791@@ -1,6 +1,7 @@
56792 #include <linux/compiler.h>
56793 #include <linux/file.h>
56794 #include <linux/fs.h>
56795+#include <linux/security.h>
56796 #include <linux/linkage.h>
56797 #include <linux/mount.h>
56798 #include <linux/namei.h>
56799@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
56800 goto mnt_drop_write_and_out;
56801 }
56802 }
56803+
56804+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
56805+ error = -EACCES;
56806+ goto mnt_drop_write_and_out;
56807+ }
56808+
56809 mutex_lock(&inode->i_mutex);
56810 error = notify_change(path->dentry, &newattrs);
56811 mutex_unlock(&inode->i_mutex);
56812diff --git a/fs/xattr.c b/fs/xattr.c
56813index 3377dff..4feded6 100644
56814--- a/fs/xattr.c
56815+++ b/fs/xattr.c
56816@@ -319,7 +319,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
56817 * Extended attribute SET operations
56818 */
56819 static long
56820-setxattr(struct dentry *d, const char __user *name, const void __user *value,
56821+setxattr(struct path *path, const char __user *name, const void __user *value,
56822 size_t size, int flags)
56823 {
56824 int error;
56825@@ -355,7 +355,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
56826 posix_acl_fix_xattr_from_user(kvalue, size);
56827 }
56828
56829- error = vfs_setxattr(d, kname, kvalue, size, flags);
56830+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
56831+ error = -EACCES;
56832+ goto out;
56833+ }
56834+
56835+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
56836 out:
56837 if (vvalue)
56838 vfree(vvalue);
56839@@ -377,7 +382,7 @@ retry:
56840 return error;
56841 error = mnt_want_write(path.mnt);
56842 if (!error) {
56843- error = setxattr(path.dentry, name, value, size, flags);
56844+ error = setxattr(&path, name, value, size, flags);
56845 mnt_drop_write(path.mnt);
56846 }
56847 path_put(&path);
56848@@ -401,7 +406,7 @@ retry:
56849 return error;
56850 error = mnt_want_write(path.mnt);
56851 if (!error) {
56852- error = setxattr(path.dentry, name, value, size, flags);
56853+ error = setxattr(&path, name, value, size, flags);
56854 mnt_drop_write(path.mnt);
56855 }
56856 path_put(&path);
56857@@ -416,16 +421,14 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
56858 const void __user *,value, size_t, size, int, flags)
56859 {
56860 struct fd f = fdget(fd);
56861- struct dentry *dentry;
56862 int error = -EBADF;
56863
56864 if (!f.file)
56865 return error;
56866- dentry = f.file->f_path.dentry;
56867- audit_inode(NULL, dentry, 0);
56868+ audit_inode(NULL, f.file->f_path.dentry, 0);
56869 error = mnt_want_write_file(f.file);
56870 if (!error) {
56871- error = setxattr(dentry, name, value, size, flags);
56872+ error = setxattr(&f.file->f_path, name, value, size, flags);
56873 mnt_drop_write_file(f.file);
56874 }
56875 fdput(f);
56876diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
56877index 9fbea87..6b19972 100644
56878--- a/fs/xattr_acl.c
56879+++ b/fs/xattr_acl.c
56880@@ -76,8 +76,8 @@ struct posix_acl *
56881 posix_acl_from_xattr(struct user_namespace *user_ns,
56882 const void *value, size_t size)
56883 {
56884- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
56885- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
56886+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
56887+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
56888 int count;
56889 struct posix_acl *acl;
56890 struct posix_acl_entry *acl_e;
56891diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
56892index 572a858..12a9b0d 100644
56893--- a/fs/xfs/xfs_bmap.c
56894+++ b/fs/xfs/xfs_bmap.c
56895@@ -192,7 +192,7 @@ xfs_bmap_validate_ret(
56896 int nmap,
56897 int ret_nmap);
56898 #else
56899-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
56900+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
56901 #endif /* DEBUG */
56902
56903 STATIC int
56904diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
56905index 1b9fc3e..e1bdde0 100644
56906--- a/fs/xfs/xfs_dir2_sf.c
56907+++ b/fs/xfs/xfs_dir2_sf.c
56908@@ -851,7 +851,15 @@ xfs_dir2_sf_getdents(
56909 }
56910
56911 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
56912- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
56913+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
56914+ char name[sfep->namelen];
56915+ memcpy(name, sfep->name, sfep->namelen);
56916+ if (filldir(dirent, name, sfep->namelen,
56917+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
56918+ *offset = off & 0x7fffffff;
56919+ return 0;
56920+ }
56921+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
56922 off & 0x7fffffff, ino, DT_UNKNOWN)) {
56923 *offset = off & 0x7fffffff;
56924 return 0;
56925diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
56926index c1c3ef8..0952438 100644
56927--- a/fs/xfs/xfs_ioctl.c
56928+++ b/fs/xfs/xfs_ioctl.c
56929@@ -127,7 +127,7 @@ xfs_find_handle(
56930 }
56931
56932 error = -EFAULT;
56933- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
56934+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
56935 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
56936 goto out_put;
56937
56938diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
56939index d82efaa..0904a8e 100644
56940--- a/fs/xfs/xfs_iops.c
56941+++ b/fs/xfs/xfs_iops.c
56942@@ -395,7 +395,7 @@ xfs_vn_put_link(
56943 struct nameidata *nd,
56944 void *p)
56945 {
56946- char *s = nd_get_link(nd);
56947+ const char *s = nd_get_link(nd);
56948
56949 if (!IS_ERR(s))
56950 kfree(s);
56951diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
56952new file mode 100644
56953index 0000000..92247e4
56954--- /dev/null
56955+++ b/grsecurity/Kconfig
56956@@ -0,0 +1,1021 @@
56957+#
56958+# grecurity configuration
56959+#
56960+menu "Memory Protections"
56961+depends on GRKERNSEC
56962+
56963+config GRKERNSEC_KMEM
56964+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
56965+ default y if GRKERNSEC_CONFIG_AUTO
56966+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
56967+ help
56968+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
56969+ be written to or read from to modify or leak the contents of the running
56970+ kernel. /dev/port will also not be allowed to be opened and support
56971+ for /dev/cpu/*/msr will be removed. If you have module
56972+ support disabled, enabling this will close up five ways that are
56973+ currently used to insert malicious code into the running kernel.
56974+
56975+ Even with all these features enabled, we still highly recommend that
56976+ you use the RBAC system, as it is still possible for an attacker to
56977+ modify the running kernel through privileged I/O granted by ioperm/iopl.
56978+
56979+ If you are not using XFree86, you may be able to stop this additional
56980+ case by enabling the 'Disable privileged I/O' option. Though nothing
56981+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
56982+ but only to video memory, which is the only writing we allow in this
56983+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
56984+ not be allowed to mprotect it with PROT_WRITE later.
56985+ Enabling this feature will prevent the "cpupower" and "powertop" tools
56986+ from working.
56987+
56988+ It is highly recommended that you say Y here if you meet all the
56989+ conditions above.
56990+
56991+config GRKERNSEC_VM86
56992+ bool "Restrict VM86 mode"
56993+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
56994+ depends on X86_32
56995+
56996+ help
56997+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
56998+ make use of a special execution mode on 32bit x86 processors called
56999+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
57000+ video cards and will still work with this option enabled. The purpose
57001+ of the option is to prevent exploitation of emulation errors in
57002+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
57003+ Nearly all users should be able to enable this option.
57004+
57005+config GRKERNSEC_IO
57006+ bool "Disable privileged I/O"
57007+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
57008+ depends on X86
57009+ select RTC_CLASS
57010+ select RTC_INTF_DEV
57011+ select RTC_DRV_CMOS
57012+
57013+ help
57014+ If you say Y here, all ioperm and iopl calls will return an error.
57015+ Ioperm and iopl can be used to modify the running kernel.
57016+ Unfortunately, some programs need this access to operate properly,
57017+ the most notable of which are XFree86 and hwclock. hwclock can be
57018+ remedied by having RTC support in the kernel, so real-time
57019+ clock support is enabled if this option is enabled, to ensure
57020+ that hwclock operates correctly. XFree86 still will not
57021+ operate correctly with this option enabled, so DO NOT CHOOSE Y
57022+ IF YOU USE XFree86. If you use XFree86 and you still want to
57023+ protect your kernel against modification, use the RBAC system.
57024+
57025+config GRKERNSEC_JIT_HARDEN
57026+ bool "Harden BPF JIT against spray attacks"
57027+ default y if GRKERNSEC_CONFIG_AUTO
57028+ depends on BPF_JIT
57029+ help
57030+ If you say Y here, the native code generated by the kernel's Berkeley
57031+ Packet Filter (BPF) JIT engine will be hardened against JIT-spraying
57032+ attacks that attempt to fit attacker-beneficial instructions in
57033+ 32bit immediate fields of JIT-generated native instructions. The
57034+ attacker will generally aim to cause an unintended instruction sequence
57035+ of JIT-generated native code to execute by jumping into the middle of
57036+ a generated instruction. This feature effectively randomizes the 32bit
57037+ immediate constants present in the generated code to thwart such attacks.
57038+
57039+ If you're using KERNEXEC, it's recommended that you enable this option
57040+ to supplement the hardening of the kernel.
57041+
57042+config GRKERNSEC_RAND_THREADSTACK
57043+ bool "Insert random gaps between thread stacks"
57044+ default y if GRKERNSEC_CONFIG_AUTO
57045+ depends on PAX_RANDMMAP && !PPC
57046+ help
57047+ If you say Y here, a random-sized gap will be enforced between allocated
57048+ thread stacks. Glibc's NPTL and other threading libraries that
57049+ pass MAP_STACK to the kernel for thread stack allocation are supported.
57050+ The implementation currently provides 8 bits of entropy for the gap.
57051+
57052+ Many distributions do not compile threaded remote services with the
57053+ -fstack-check argument to GCC, causing the variable-sized stack-based
57054+ allocator, alloca(), to not probe the stack on allocation. This
57055+ permits an unbounded alloca() to skip over any guard page and potentially
57056+ modify another thread's stack reliably. An enforced random gap
57057+ reduces the reliability of such an attack and increases the chance
57058+ that such a read/write to another thread's stack instead lands in
57059+ an unmapped area, causing a crash and triggering grsecurity's
57060+ anti-bruteforcing logic.
57061+
57062+config GRKERNSEC_PROC_MEMMAP
57063+ bool "Harden ASLR against information leaks and entropy reduction"
57064+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
57065+ depends on PAX_NOEXEC || PAX_ASLR
57066+ help
57067+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
57068+ give no information about the addresses of its mappings if
57069+ PaX features that rely on random addresses are enabled on the task.
57070+ In addition to sanitizing this information and disabling other
57071+ dangerous sources of information, this option causes reads of sensitive
57072+ /proc/<pid> entries where the file descriptor was opened in a different
57073+ task than the one performing the read. Such attempts are logged.
57074+ This option also limits argv/env strings for suid/sgid binaries
57075+ to 512KB to prevent a complete exhaustion of the stack entropy provided
57076+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
57077+ binaries to prevent alternative mmap layouts from being abused.
57078+
57079+ If you use PaX it is essential that you say Y here as it closes up
57080+ several holes that make full ASLR useless locally.
57081+
57082+config GRKERNSEC_BRUTE
57083+ bool "Deter exploit bruteforcing"
57084+ default y if GRKERNSEC_CONFIG_AUTO
57085+ help
57086+ If you say Y here, attempts to bruteforce exploits against forking
57087+ daemons such as apache or sshd, as well as against suid/sgid binaries
57088+ will be deterred. When a child of a forking daemon is killed by PaX
57089+ or crashes due to an illegal instruction or other suspicious signal,
57090+ the parent process will be delayed 30 seconds upon every subsequent
57091+ fork until the administrator is able to assess the situation and
57092+ restart the daemon.
57093+ In the suid/sgid case, the attempt is logged, the user has all their
57094+ processes terminated, and they are prevented from executing any further
57095+ processes for 15 minutes.
57096+ It is recommended that you also enable signal logging in the auditing
57097+ section so that logs are generated when a process triggers a suspicious
57098+ signal.
57099+ If the sysctl option is enabled, a sysctl option with name
57100+ "deter_bruteforce" is created.
57101+
57102+
57103+config GRKERNSEC_MODHARDEN
57104+ bool "Harden module auto-loading"
57105+ default y if GRKERNSEC_CONFIG_AUTO
57106+ depends on MODULES
57107+ help
57108+ If you say Y here, module auto-loading in response to use of some
57109+ feature implemented by an unloaded module will be restricted to
57110+ root users. Enabling this option helps defend against attacks
57111+ by unprivileged users who abuse the auto-loading behavior to
57112+ cause a vulnerable module to load that is then exploited.
57113+
57114+ If this option prevents a legitimate use of auto-loading for a
57115+ non-root user, the administrator can execute modprobe manually
57116+ with the exact name of the module mentioned in the alert log.
57117+ Alternatively, the administrator can add the module to the list
57118+ of modules loaded at boot by modifying init scripts.
57119+
57120+ Modification of init scripts will most likely be needed on
57121+ Ubuntu servers with encrypted home directory support enabled,
57122+ as the first non-root user logging in will cause the ecb(aes),
57123+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
57124+
57125+config GRKERNSEC_HIDESYM
57126+ bool "Hide kernel symbols"
57127+ default y if GRKERNSEC_CONFIG_AUTO
57128+ select PAX_USERCOPY_SLABS
57129+ help
57130+ If you say Y here, getting information on loaded modules, and
57131+ displaying all kernel symbols through a syscall will be restricted
57132+ to users with CAP_SYS_MODULE. For software compatibility reasons,
57133+ /proc/kallsyms will be restricted to the root user. The RBAC
57134+ system can hide that entry even from root.
57135+
57136+ This option also prevents leaking of kernel addresses through
57137+ several /proc entries.
57138+
57139+ Note that this option is only effective provided the following
57140+ conditions are met:
57141+ 1) The kernel using grsecurity is not precompiled by some distribution
57142+ 2) You have also enabled GRKERNSEC_DMESG
57143+ 3) You are using the RBAC system and hiding other files such as your
57144+ kernel image and System.map. Alternatively, enabling this option
57145+ causes the permissions on /boot, /lib/modules, and the kernel
57146+ source directory to change at compile time to prevent
57147+ reading by non-root users.
57148+ If the above conditions are met, this option will aid in providing a
57149+ useful protection against local kernel exploitation of overflows
57150+ and arbitrary read/write vulnerabilities.
57151+
57152+config GRKERNSEC_KERN_LOCKOUT
57153+ bool "Active kernel exploit response"
57154+ default y if GRKERNSEC_CONFIG_AUTO
57155+ depends on X86 || ARM || PPC || SPARC
57156+ help
57157+ If you say Y here, when a PaX alert is triggered due to suspicious
57158+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
57159+ or an OOPS occurs due to bad memory accesses, instead of just
57160+ terminating the offending process (and potentially allowing
57161+ a subsequent exploit from the same user), we will take one of two
57162+ actions:
57163+ If the user was root, we will panic the system
57164+ If the user was non-root, we will log the attempt, terminate
57165+ all processes owned by the user, then prevent them from creating
57166+ any new processes until the system is restarted
57167+ This deters repeated kernel exploitation/bruteforcing attempts
57168+ and is useful for later forensics.
57169+
57170+endmenu
57171+menu "Role Based Access Control Options"
57172+depends on GRKERNSEC
57173+
57174+config GRKERNSEC_RBAC_DEBUG
57175+ bool
57176+
57177+config GRKERNSEC_NO_RBAC
57178+ bool "Disable RBAC system"
57179+ help
57180+ If you say Y here, the /dev/grsec device will be removed from the kernel,
57181+ preventing the RBAC system from being enabled. You should only say Y
57182+ here if you have no intention of using the RBAC system, so as to prevent
57183+ an attacker with root access from misusing the RBAC system to hide files
57184+ and processes when loadable module support and /dev/[k]mem have been
57185+ locked down.
57186+
57187+config GRKERNSEC_ACL_HIDEKERN
57188+ bool "Hide kernel processes"
57189+ help
57190+ If you say Y here, all kernel threads will be hidden to all
57191+ processes but those whose subject has the "view hidden processes"
57192+ flag.
57193+
57194+config GRKERNSEC_ACL_MAXTRIES
57195+ int "Maximum tries before password lockout"
57196+ default 3
57197+ help
57198+ This option enforces the maximum number of times a user can attempt
57199+ to authorize themselves with the grsecurity RBAC system before being
57200+ denied the ability to attempt authorization again for a specified time.
57201+ The lower the number, the harder it will be to brute-force a password.
57202+
57203+config GRKERNSEC_ACL_TIMEOUT
57204+ int "Time to wait after max password tries, in seconds"
57205+ default 30
57206+ help
57207+ This option specifies the time the user must wait after attempting to
57208+ authorize to the RBAC system with the maximum number of invalid
57209+ passwords. The higher the number, the harder it will be to brute-force
57210+ a password.
57211+
57212+endmenu
57213+menu "Filesystem Protections"
57214+depends on GRKERNSEC
57215+
57216+config GRKERNSEC_PROC
57217+ bool "Proc restrictions"
57218+ default y if GRKERNSEC_CONFIG_AUTO
57219+ help
57220+ If you say Y here, the permissions of the /proc filesystem
57221+ will be altered to enhance system security and privacy. You MUST
57222+ choose either a user only restriction or a user and group restriction.
57223+ Depending upon the option you choose, you can either restrict users to
57224+ see only the processes they themselves run, or choose a group that can
57225+ view all processes and files normally restricted to root if you choose
57226+ the "restrict to user only" option. NOTE: If you're running identd or
57227+ ntpd as a non-root user, you will have to run it as the group you
57228+ specify here.
57229+
57230+config GRKERNSEC_PROC_USER
57231+ bool "Restrict /proc to user only"
57232+ depends on GRKERNSEC_PROC
57233+ help
57234+ If you say Y here, non-root users will only be able to view their own
57235+ processes, and restricts them from viewing network-related information,
57236+ and viewing kernel symbol and module information.
57237+
57238+config GRKERNSEC_PROC_USERGROUP
57239+ bool "Allow special group"
57240+ default y if GRKERNSEC_CONFIG_AUTO
57241+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
57242+ help
57243+ If you say Y here, you will be able to select a group that will be
57244+ able to view all processes and network-related information. If you've
57245+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
57246+ remain hidden. This option is useful if you want to run identd as
57247+ a non-root user. The group you select may also be chosen at boot time
57248+ via "grsec_proc_gid=" on the kernel commandline.
57249+
57250+config GRKERNSEC_PROC_GID
57251+ int "GID for special group"
57252+ depends on GRKERNSEC_PROC_USERGROUP
57253+ default 1001
57254+
57255+config GRKERNSEC_PROC_ADD
57256+ bool "Additional restrictions"
57257+ default y if GRKERNSEC_CONFIG_AUTO
57258+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
57259+ help
57260+ If you say Y here, additional restrictions will be placed on
57261+ /proc that keep normal users from viewing device information and
57262+ slabinfo information that could be useful for exploits.
57263+
57264+config GRKERNSEC_LINK
57265+ bool "Linking restrictions"
57266+ default y if GRKERNSEC_CONFIG_AUTO
57267+ help
57268+ If you say Y here, /tmp race exploits will be prevented, since users
57269+ will no longer be able to follow symlinks owned by other users in
57270+ world-writable +t directories (e.g. /tmp), unless the owner of the
57271+ symlink is the owner of the directory. users will also not be
57272+ able to hardlink to files they do not own. If the sysctl option is
57273+ enabled, a sysctl option with name "linking_restrictions" is created.
57274+
57275+config GRKERNSEC_SYMLINKOWN
57276+ bool "Kernel-enforced SymlinksIfOwnerMatch"
57277+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
57278+ help
57279+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
57280+ that prevents it from being used as a security feature. As Apache
57281+ verifies the symlink by performing a stat() against the target of
57282+ the symlink before it is followed, an attacker can setup a symlink
57283+ to point to a same-owned file, then replace the symlink with one
57284+ that targets another user's file just after Apache "validates" the
57285+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
57286+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
57287+ will be in place for the group you specify. If the sysctl option
57288+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
57289+ created.
57290+
57291+config GRKERNSEC_SYMLINKOWN_GID
57292+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
57293+ depends on GRKERNSEC_SYMLINKOWN
57294+ default 1006
57295+ help
57296+ Setting this GID determines what group kernel-enforced
57297+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
57298+ is enabled, a sysctl option with name "symlinkown_gid" is created.
57299+
57300+config GRKERNSEC_FIFO
57301+ bool "FIFO restrictions"
57302+ default y if GRKERNSEC_CONFIG_AUTO
57303+ help
57304+ If you say Y here, users will not be able to write to FIFOs they don't
57305+ own in world-writable +t directories (e.g. /tmp), unless the owner of
57306+ the FIFO is the same owner of the directory it's held in. If the sysctl
57307+ option is enabled, a sysctl option with name "fifo_restrictions" is
57308+ created.
57309+
57310+config GRKERNSEC_SYSFS_RESTRICT
57311+ bool "Sysfs/debugfs restriction"
57312+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
57313+ depends on SYSFS
57314+ help
57315+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
57316+ any filesystem normally mounted under it (e.g. debugfs) will be
57317+ mostly accessible only by root. These filesystems generally provide access
57318+ to hardware and debug information that isn't appropriate for unprivileged
57319+ users of the system. Sysfs and debugfs have also become a large source
57320+ of new vulnerabilities, ranging from infoleaks to local compromise.
57321+ There has been very little oversight with an eye toward security involved
57322+ in adding new exporters of information to these filesystems, so their
57323+ use is discouraged.
57324+ For reasons of compatibility, a few directories have been whitelisted
57325+ for access by non-root users:
57326+ /sys/fs/selinux
57327+ /sys/fs/fuse
57328+ /sys/devices/system/cpu
57329+
57330+config GRKERNSEC_ROFS
57331+ bool "Runtime read-only mount protection"
57332+ help
57333+ If you say Y here, a sysctl option with name "romount_protect" will
57334+ be created. By setting this option to 1 at runtime, filesystems
57335+ will be protected in the following ways:
57336+ * No new writable mounts will be allowed
57337+ * Existing read-only mounts won't be able to be remounted read/write
57338+ * Write operations will be denied on all block devices
57339+ This option acts independently of grsec_lock: once it is set to 1,
57340+ it cannot be turned off. Therefore, please be mindful of the resulting
57341+ behavior if this option is enabled in an init script on a read-only
57342+ filesystem. This feature is mainly intended for secure embedded systems.
57343+
57344+config GRKERNSEC_DEVICE_SIDECHANNEL
57345+ bool "Eliminate stat/notify-based device sidechannels"
57346+ default y if GRKERNSEC_CONFIG_AUTO
57347+ help
57348+ If you say Y here, timing analyses on block or character
57349+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
57350+ will be thwarted for unprivileged users. If a process without
57351+ CAP_MKNOD stats such a device, the last access and last modify times
57352+ will match the device's create time. No access or modify events
57353+ will be triggered through inotify/dnotify/fanotify for such devices.
57354+ This feature will prevent attacks that may at a minimum
57355+ allow an attacker to determine the administrator's password length.
57356+
57357+config GRKERNSEC_CHROOT
57358+ bool "Chroot jail restrictions"
57359+ default y if GRKERNSEC_CONFIG_AUTO
57360+ help
57361+ If you say Y here, you will be able to choose several options that will
57362+ make breaking out of a chrooted jail much more difficult. If you
57363+ encounter no software incompatibilities with the following options, it
57364+ is recommended that you enable each one.
57365+
57366+config GRKERNSEC_CHROOT_MOUNT
57367+ bool "Deny mounts"
57368+ default y if GRKERNSEC_CONFIG_AUTO
57369+ depends on GRKERNSEC_CHROOT
57370+ help
57371+ If you say Y here, processes inside a chroot will not be able to
57372+ mount or remount filesystems. If the sysctl option is enabled, a
57373+ sysctl option with name "chroot_deny_mount" is created.
57374+
57375+config GRKERNSEC_CHROOT_DOUBLE
57376+ bool "Deny double-chroots"
57377+ default y if GRKERNSEC_CONFIG_AUTO
57378+ depends on GRKERNSEC_CHROOT
57379+ help
57380+ If you say Y here, processes inside a chroot will not be able to chroot
57381+ again outside the chroot. This is a widely used method of breaking
57382+ out of a chroot jail and should not be allowed. If the sysctl
57383+ option is enabled, a sysctl option with name
57384+ "chroot_deny_chroot" is created.
57385+
57386+config GRKERNSEC_CHROOT_PIVOT
57387+ bool "Deny pivot_root in chroot"
57388+ default y if GRKERNSEC_CONFIG_AUTO
57389+ depends on GRKERNSEC_CHROOT
57390+ help
57391+ If you say Y here, processes inside a chroot will not be able to use
57392+ a function called pivot_root() that was introduced in Linux 2.3.41. It
57393+ works similar to chroot in that it changes the root filesystem. This
57394+ function could be misused in a chrooted process to attempt to break out
57395+ of the chroot, and therefore should not be allowed. If the sysctl
57396+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
57397+ created.
57398+
57399+config GRKERNSEC_CHROOT_CHDIR
57400+ bool "Enforce chdir(\"/\") on all chroots"
57401+ default y if GRKERNSEC_CONFIG_AUTO
57402+ depends on GRKERNSEC_CHROOT
57403+ help
57404+ If you say Y here, the current working directory of all newly-chrooted
57405+ applications will be set to the the root directory of the chroot.
57406+ The man page on chroot(2) states:
57407+ Note that this call does not change the current working
57408+ directory, so that `.' can be outside the tree rooted at
57409+ `/'. In particular, the super-user can escape from a
57410+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
57411+
57412+ It is recommended that you say Y here, since it's not known to break
57413+ any software. If the sysctl option is enabled, a sysctl option with
57414+ name "chroot_enforce_chdir" is created.
57415+
57416+config GRKERNSEC_CHROOT_CHMOD
57417+ bool "Deny (f)chmod +s"
57418+ default y if GRKERNSEC_CONFIG_AUTO
57419+ depends on GRKERNSEC_CHROOT
57420+ help
57421+ If you say Y here, processes inside a chroot will not be able to chmod
57422+ or fchmod files to make them have suid or sgid bits. This protects
57423+ against another published method of breaking a chroot. If the sysctl
57424+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
57425+ created.
57426+
57427+config GRKERNSEC_CHROOT_FCHDIR
57428+ bool "Deny fchdir out of chroot"
57429+ default y if GRKERNSEC_CONFIG_AUTO
57430+ depends on GRKERNSEC_CHROOT
57431+ help
57432+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
57433+ to a file descriptor of the chrooting process that points to a directory
57434+ outside the filesystem will be stopped. If the sysctl option
57435+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
57436+
57437+config GRKERNSEC_CHROOT_MKNOD
57438+ bool "Deny mknod"
57439+ default y if GRKERNSEC_CONFIG_AUTO
57440+ depends on GRKERNSEC_CHROOT
57441+ help
57442+ If you say Y here, processes inside a chroot will not be allowed to
57443+ mknod. The problem with using mknod inside a chroot is that it
57444+ would allow an attacker to create a device entry that is the same
57445+ as one on the physical root of your system, which could range from
57446+ anything from the console device to a device for your harddrive (which
57447+ they could then use to wipe the drive or steal data). It is recommended
57448+ that you say Y here, unless you run into software incompatibilities.
57449+ If the sysctl option is enabled, a sysctl option with name
57450+ "chroot_deny_mknod" is created.
57451+
57452+config GRKERNSEC_CHROOT_SHMAT
57453+ bool "Deny shmat() out of chroot"
57454+ default y if GRKERNSEC_CONFIG_AUTO
57455+ depends on GRKERNSEC_CHROOT
57456+ help
57457+ If you say Y here, processes inside a chroot will not be able to attach
57458+ to shared memory segments that were created outside of the chroot jail.
57459+ It is recommended that you say Y here. If the sysctl option is enabled,
57460+ a sysctl option with name "chroot_deny_shmat" is created.
57461+
57462+config GRKERNSEC_CHROOT_UNIX
57463+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
57464+ default y if GRKERNSEC_CONFIG_AUTO
57465+ depends on GRKERNSEC_CHROOT
57466+ help
57467+ If you say Y here, processes inside a chroot will not be able to
57468+ connect to abstract (meaning not belonging to a filesystem) Unix
57469+ domain sockets that were bound outside of a chroot. It is recommended
57470+ that you say Y here. If the sysctl option is enabled, a sysctl option
57471+ with name "chroot_deny_unix" is created.
57472+
57473+config GRKERNSEC_CHROOT_FINDTASK
57474+ bool "Protect outside processes"
57475+ default y if GRKERNSEC_CONFIG_AUTO
57476+ depends on GRKERNSEC_CHROOT
57477+ help
57478+ If you say Y here, processes inside a chroot will not be able to
57479+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
57480+ getsid, or view any process outside of the chroot. If the sysctl
57481+ option is enabled, a sysctl option with name "chroot_findtask" is
57482+ created.
57483+
57484+config GRKERNSEC_CHROOT_NICE
57485+ bool "Restrict priority changes"
57486+ default y if GRKERNSEC_CONFIG_AUTO
57487+ depends on GRKERNSEC_CHROOT
57488+ help
57489+ If you say Y here, processes inside a chroot will not be able to raise
57490+ the priority of processes in the chroot, or alter the priority of
57491+ processes outside the chroot. This provides more security than simply
57492+ removing CAP_SYS_NICE from the process' capability set. If the
57493+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
57494+ is created.
57495+
57496+config GRKERNSEC_CHROOT_SYSCTL
57497+ bool "Deny sysctl writes"
57498+ default y if GRKERNSEC_CONFIG_AUTO
57499+ depends on GRKERNSEC_CHROOT
57500+ help
57501+ If you say Y here, an attacker in a chroot will not be able to
57502+ write to sysctl entries, either by sysctl(2) or through a /proc
57503+ interface. It is strongly recommended that you say Y here. If the
57504+ sysctl option is enabled, a sysctl option with name
57505+ "chroot_deny_sysctl" is created.
57506+
57507+config GRKERNSEC_CHROOT_CAPS
57508+ bool "Capability restrictions"
57509+ default y if GRKERNSEC_CONFIG_AUTO
57510+ depends on GRKERNSEC_CHROOT
57511+ help
57512+ If you say Y here, the capabilities on all processes within a
57513+ chroot jail will be lowered to stop module insertion, raw i/o,
57514+ system and net admin tasks, rebooting the system, modifying immutable
57515+ files, modifying IPC owned by another, and changing the system time.
57516+ This is left an option because it can break some apps. Disable this
57517+ if your chrooted apps are having problems performing those kinds of
57518+ tasks. If the sysctl option is enabled, a sysctl option with
57519+ name "chroot_caps" is created.
57520+
57521+endmenu
57522+menu "Kernel Auditing"
57523+depends on GRKERNSEC
57524+
57525+config GRKERNSEC_AUDIT_GROUP
57526+ bool "Single group for auditing"
57527+ help
57528+ If you say Y here, the exec and chdir logging features will only operate
57529+ on a group you specify. This option is recommended if you only want to
57530+ watch certain users instead of having a large amount of logs from the
57531+ entire system. If the sysctl option is enabled, a sysctl option with
57532+ name "audit_group" is created.
57533+
57534+config GRKERNSEC_AUDIT_GID
57535+ int "GID for auditing"
57536+ depends on GRKERNSEC_AUDIT_GROUP
57537+ default 1007
57538+
57539+config GRKERNSEC_EXECLOG
57540+ bool "Exec logging"
57541+ help
57542+ If you say Y here, all execve() calls will be logged (since the
57543+ other exec*() calls are frontends to execve(), all execution
57544+ will be logged). Useful for shell-servers that like to keep track
57545+ of their users. If the sysctl option is enabled, a sysctl option with
57546+ name "exec_logging" is created.
57547+ WARNING: This option when enabled will produce a LOT of logs, especially
57548+ on an active system.
57549+
57550+config GRKERNSEC_RESLOG
57551+ bool "Resource logging"
57552+ default y if GRKERNSEC_CONFIG_AUTO
57553+ help
57554+ If you say Y here, all attempts to overstep resource limits will
57555+ be logged with the resource name, the requested size, and the current
57556+ limit. It is highly recommended that you say Y here. If the sysctl
57557+ option is enabled, a sysctl option with name "resource_logging" is
57558+ created. If the RBAC system is enabled, the sysctl value is ignored.
57559+
57560+config GRKERNSEC_CHROOT_EXECLOG
57561+ bool "Log execs within chroot"
57562+ help
57563+ If you say Y here, all executions inside a chroot jail will be logged
57564+ to syslog. This can cause a large amount of logs if certain
57565+ applications (eg. djb's daemontools) are installed on the system, and
57566+ is therefore left as an option. If the sysctl option is enabled, a
57567+ sysctl option with name "chroot_execlog" is created.
57568+
57569+config GRKERNSEC_AUDIT_PTRACE
57570+ bool "Ptrace logging"
57571+ help
57572+ If you say Y here, all attempts to attach to a process via ptrace
57573+ will be logged. If the sysctl option is enabled, a sysctl option
57574+ with name "audit_ptrace" is created.
57575+
57576+config GRKERNSEC_AUDIT_CHDIR
57577+ bool "Chdir logging"
57578+ help
57579+ If you say Y here, all chdir() calls will be logged. If the sysctl
57580+ option is enabled, a sysctl option with name "audit_chdir" is created.
57581+
57582+config GRKERNSEC_AUDIT_MOUNT
57583+ bool "(Un)Mount logging"
57584+ help
57585+ If you say Y here, all mounts and unmounts will be logged. If the
57586+ sysctl option is enabled, a sysctl option with name "audit_mount" is
57587+ created.
57588+
57589+config GRKERNSEC_SIGNAL
57590+ bool "Signal logging"
57591+ default y if GRKERNSEC_CONFIG_AUTO
57592+ help
57593+ If you say Y here, certain important signals will be logged, such as
57594+ SIGSEGV, which will as a result inform you of when a error in a program
57595+ occurred, which in some cases could mean a possible exploit attempt.
57596+ If the sysctl option is enabled, a sysctl option with name
57597+ "signal_logging" is created.
57598+
57599+config GRKERNSEC_FORKFAIL
57600+ bool "Fork failure logging"
57601+ help
57602+ If you say Y here, all failed fork() attempts will be logged.
57603+ This could suggest a fork bomb, or someone attempting to overstep
57604+ their process limit. If the sysctl option is enabled, a sysctl option
57605+ with name "forkfail_logging" is created.
57606+
57607+config GRKERNSEC_TIME
57608+ bool "Time change logging"
57609+ default y if GRKERNSEC_CONFIG_AUTO
57610+ help
57611+ If you say Y here, any changes of the system clock will be logged.
57612+ If the sysctl option is enabled, a sysctl option with name
57613+ "timechange_logging" is created.
57614+
57615+config GRKERNSEC_PROC_IPADDR
57616+ bool "/proc/<pid>/ipaddr support"
57617+ default y if GRKERNSEC_CONFIG_AUTO
57618+ help
57619+ If you say Y here, a new entry will be added to each /proc/<pid>
57620+ directory that contains the IP address of the person using the task.
57621+ The IP is carried across local TCP and AF_UNIX stream sockets.
57622+ This information can be useful for IDS/IPSes to perform remote response
57623+ to a local attack. The entry is readable by only the owner of the
57624+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
57625+ the RBAC system), and thus does not create privacy concerns.
57626+
57627+config GRKERNSEC_RWXMAP_LOG
57628+ bool 'Denied RWX mmap/mprotect logging'
57629+ default y if GRKERNSEC_CONFIG_AUTO
57630+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
57631+ help
57632+ If you say Y here, calls to mmap() and mprotect() with explicit
57633+ usage of PROT_WRITE and PROT_EXEC together will be logged when
57634+ denied by the PAX_MPROTECT feature. If the sysctl option is
57635+ enabled, a sysctl option with name "rwxmap_logging" is created.
57636+
57637+config GRKERNSEC_AUDIT_TEXTREL
57638+ bool 'ELF text relocations logging (READ HELP)'
57639+ depends on PAX_MPROTECT
57640+ help
57641+ If you say Y here, text relocations will be logged with the filename
57642+ of the offending library or binary. The purpose of the feature is
57643+ to help Linux distribution developers get rid of libraries and
57644+ binaries that need text relocations which hinder the future progress
57645+ of PaX. Only Linux distribution developers should say Y here, and
57646+ never on a production machine, as this option creates an information
57647+ leak that could aid an attacker in defeating the randomization of
57648+ a single memory region. If the sysctl option is enabled, a sysctl
57649+ option with name "audit_textrel" is created.
57650+
57651+endmenu
57652+
57653+menu "Executable Protections"
57654+depends on GRKERNSEC
57655+
57656+config GRKERNSEC_DMESG
57657+ bool "Dmesg(8) restriction"
57658+ default y if GRKERNSEC_CONFIG_AUTO
57659+ help
57660+ If you say Y here, non-root users will not be able to use dmesg(8)
57661+ to view the contents of the kernel's circular log buffer.
57662+ The kernel's log buffer often contains kernel addresses and other
57663+ identifying information useful to an attacker in fingerprinting a
57664+ system for a targeted exploit.
57665+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
57666+ created.
57667+
57668+config GRKERNSEC_HARDEN_PTRACE
57669+ bool "Deter ptrace-based process snooping"
57670+ default y if GRKERNSEC_CONFIG_AUTO
57671+ help
57672+ If you say Y here, TTY sniffers and other malicious monitoring
57673+ programs implemented through ptrace will be defeated. If you
57674+ have been using the RBAC system, this option has already been
57675+ enabled for several years for all users, with the ability to make
57676+ fine-grained exceptions.
57677+
57678+ This option only affects the ability of non-root users to ptrace
57679+ processes that are not a descendent of the ptracing process.
57680+ This means that strace ./binary and gdb ./binary will still work,
57681+ but attaching to arbitrary processes will not. If the sysctl
57682+ option is enabled, a sysctl option with name "harden_ptrace" is
57683+ created.
57684+
57685+config GRKERNSEC_PTRACE_READEXEC
57686+ bool "Require read access to ptrace sensitive binaries"
57687+ default y if GRKERNSEC_CONFIG_AUTO
57688+ help
57689+ If you say Y here, unprivileged users will not be able to ptrace unreadable
57690+ binaries. This option is useful in environments that
57691+ remove the read bits (e.g. file mode 4711) from suid binaries to
57692+ prevent infoleaking of their contents. This option adds
57693+ consistency to the use of that file mode, as the binary could normally
57694+ be read out when run without privileges while ptracing.
57695+
57696+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
57697+ is created.
57698+
57699+config GRKERNSEC_SETXID
57700+ bool "Enforce consistent multithreaded privileges"
57701+ default y if GRKERNSEC_CONFIG_AUTO
57702+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
57703+ help
57704+ If you say Y here, a change from a root uid to a non-root uid
57705+ in a multithreaded application will cause the resulting uids,
57706+ gids, supplementary groups, and capabilities in that thread
57707+ to be propagated to the other threads of the process. In most
57708+ cases this is unnecessary, as glibc will emulate this behavior
57709+ on behalf of the application. Other libcs do not act in the
57710+ same way, allowing the other threads of the process to continue
57711+ running with root privileges. If the sysctl option is enabled,
57712+ a sysctl option with name "consistent_setxid" is created.
57713+
57714+config GRKERNSEC_TPE
57715+ bool "Trusted Path Execution (TPE)"
57716+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
57717+ help
57718+ If you say Y here, you will be able to choose a gid to add to the
57719+ supplementary groups of users you want to mark as "untrusted."
57720+ These users will not be able to execute any files that are not in
57721+ root-owned directories writable only by root. If the sysctl option
57722+ is enabled, a sysctl option with name "tpe" is created.
57723+
57724+config GRKERNSEC_TPE_ALL
57725+ bool "Partially restrict all non-root users"
57726+ depends on GRKERNSEC_TPE
57727+ help
57728+ If you say Y here, all non-root users will be covered under
57729+ a weaker TPE restriction. This is separate from, and in addition to,
57730+ the main TPE options that you have selected elsewhere. Thus, if a
57731+ "trusted" GID is chosen, this restriction applies to even that GID.
57732+ Under this restriction, all non-root users will only be allowed to
57733+ execute files in directories they own that are not group or
57734+ world-writable, or in directories owned by root and writable only by
57735+ root. If the sysctl option is enabled, a sysctl option with name
57736+ "tpe_restrict_all" is created.
57737+
57738+config GRKERNSEC_TPE_INVERT
57739+ bool "Invert GID option"
57740+ depends on GRKERNSEC_TPE
57741+ help
57742+ If you say Y here, the group you specify in the TPE configuration will
57743+ decide what group TPE restrictions will be *disabled* for. This
57744+ option is useful if you want TPE restrictions to be applied to most
57745+ users on the system. If the sysctl option is enabled, a sysctl option
57746+ with name "tpe_invert" is created. Unlike other sysctl options, this
57747+ entry will default to on for backward-compatibility.
57748+
57749+config GRKERNSEC_TPE_GID
57750+ int
57751+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
57752+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
57753+
57754+config GRKERNSEC_TPE_UNTRUSTED_GID
57755+ int "GID for TPE-untrusted users"
57756+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
57757+ default 1005
57758+ help
57759+ Setting this GID determines what group TPE restrictions will be
57760+ *enabled* for. If the sysctl option is enabled, a sysctl option
57761+ with name "tpe_gid" is created.
57762+
57763+config GRKERNSEC_TPE_TRUSTED_GID
57764+ int "GID for TPE-trusted users"
57765+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
57766+ default 1005
57767+ help
57768+ Setting this GID determines what group TPE restrictions will be
57769+ *disabled* for. If the sysctl option is enabled, a sysctl option
57770+ with name "tpe_gid" is created.
57771+
57772+endmenu
57773+menu "Network Protections"
57774+depends on GRKERNSEC
57775+
57776+config GRKERNSEC_RANDNET
57777+ bool "Larger entropy pools"
57778+ default y if GRKERNSEC_CONFIG_AUTO
57779+ help
57780+ If you say Y here, the entropy pools used for many features of Linux
57781+ and grsecurity will be doubled in size. Since several grsecurity
57782+ features use additional randomness, it is recommended that you say Y
57783+ here. Saying Y here has a similar effect as modifying
57784+ /proc/sys/kernel/random/poolsize.
57785+
57786+config GRKERNSEC_BLACKHOLE
57787+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
57788+ default y if GRKERNSEC_CONFIG_AUTO
57789+ depends on NET
57790+ help
57791+ If you say Y here, neither TCP resets nor ICMP
57792+ destination-unreachable packets will be sent in response to packets
57793+ sent to ports for which no associated listening process exists.
57794+ This feature supports both IPV4 and IPV6 and exempts the
57795+ loopback interface from blackholing. Enabling this feature
57796+ makes a host more resilient to DoS attacks and reduces network
57797+ visibility against scanners.
57798+
57799+ The blackhole feature as-implemented is equivalent to the FreeBSD
57800+ blackhole feature, as it prevents RST responses to all packets, not
57801+ just SYNs. Under most application behavior this causes no
57802+ problems, but applications (like haproxy) may not close certain
57803+ connections in a way that cleanly terminates them on the remote
57804+ end, leaving the remote host in LAST_ACK state. Because of this
57805+ side-effect and to prevent intentional LAST_ACK DoSes, this
57806+ feature also adds automatic mitigation against such attacks.
57807+ The mitigation drastically reduces the amount of time a socket
57808+ can spend in LAST_ACK state. If you're using haproxy and not
57809+ all servers it connects to have this option enabled, consider
57810+ disabling this feature on the haproxy host.
57811+
57812+ If the sysctl option is enabled, two sysctl options with names
57813+ "ip_blackhole" and "lastack_retries" will be created.
57814+ While "ip_blackhole" takes the standard zero/non-zero on/off
57815+ toggle, "lastack_retries" uses the same kinds of values as
57816+ "tcp_retries1" and "tcp_retries2". The default value of 4
57817+ prevents a socket from lasting more than 45 seconds in LAST_ACK
57818+ state.
57819+
57820+config GRKERNSEC_NO_SIMULT_CONNECT
57821+ bool "Disable TCP Simultaneous Connect"
57822+ default y if GRKERNSEC_CONFIG_AUTO
57823+ depends on NET
57824+ help
57825+ If you say Y here, a feature by Willy Tarreau will be enabled that
57826+ removes a weakness in Linux's strict implementation of TCP that
57827+ allows two clients to connect to each other without either entering
57828+ a listening state. The weakness allows an attacker to easily prevent
57829+ a client from connecting to a known server provided the source port
57830+ for the connection is guessed correctly.
57831+
57832+ As the weakness could be used to prevent an antivirus or IPS from
57833+ fetching updates, or prevent an SSL gateway from fetching a CRL,
57834+ it should be eliminated by enabling this option. Though Linux is
57835+ one of few operating systems supporting simultaneous connect, it
57836+ has no legitimate use in practice and is rarely supported by firewalls.
57837+
57838+config GRKERNSEC_SOCKET
57839+ bool "Socket restrictions"
57840+ depends on NET
57841+ help
57842+ If you say Y here, you will be able to choose from several options.
57843+ If you assign a GID on your system and add it to the supplementary
57844+ groups of users you want to restrict socket access to, this patch
57845+ will perform up to three things, based on the option(s) you choose.
57846+
57847+config GRKERNSEC_SOCKET_ALL
57848+ bool "Deny any sockets to group"
57849+ depends on GRKERNSEC_SOCKET
57850+ help
57851+ If you say Y here, you will be able to choose a GID of whose users will
57852+ be unable to connect to other hosts from your machine or run server
57853+ applications from your machine. If the sysctl option is enabled, a
57854+ sysctl option with name "socket_all" is created.
57855+
57856+config GRKERNSEC_SOCKET_ALL_GID
57857+ int "GID to deny all sockets for"
57858+ depends on GRKERNSEC_SOCKET_ALL
57859+ default 1004
57860+ help
57861+ Here you can choose the GID to disable socket access for. Remember to
57862+ add the users you want socket access disabled for to the GID
57863+ specified here. If the sysctl option is enabled, a sysctl option
57864+ with name "socket_all_gid" is created.
57865+
57866+config GRKERNSEC_SOCKET_CLIENT
57867+ bool "Deny client sockets to group"
57868+ depends on GRKERNSEC_SOCKET
57869+ help
57870+ If you say Y here, you will be able to choose a GID of whose users will
57871+ be unable to connect to other hosts from your machine, but will be
57872+ able to run servers. If this option is enabled, all users in the group
57873+ you specify will have to use passive mode when initiating ftp transfers
57874+ from the shell on your machine. If the sysctl option is enabled, a
57875+ sysctl option with name "socket_client" is created.
57876+
57877+config GRKERNSEC_SOCKET_CLIENT_GID
57878+ int "GID to deny client sockets for"
57879+ depends on GRKERNSEC_SOCKET_CLIENT
57880+ default 1003
57881+ help
57882+ Here you can choose the GID to disable client socket access for.
57883+ Remember to add the users you want client socket access disabled for to
57884+ the GID specified here. If the sysctl option is enabled, a sysctl
57885+ option with name "socket_client_gid" is created.
57886+
57887+config GRKERNSEC_SOCKET_SERVER
57888+ bool "Deny server sockets to group"
57889+ depends on GRKERNSEC_SOCKET
57890+ help
57891+ If you say Y here, you will be able to choose a GID of whose users will
57892+ be unable to run server applications from your machine. If the sysctl
57893+ option is enabled, a sysctl option with name "socket_server" is created.
57894+
57895+config GRKERNSEC_SOCKET_SERVER_GID
57896+ int "GID to deny server sockets for"
57897+ depends on GRKERNSEC_SOCKET_SERVER
57898+ default 1002
57899+ help
57900+ Here you can choose the GID to disable server socket access for.
57901+ Remember to add the users you want server socket access disabled for to
57902+ the GID specified here. If the sysctl option is enabled, a sysctl
57903+ option with name "socket_server_gid" is created.
57904+
57905+endmenu
57906+menu "Sysctl Support"
57907+depends on GRKERNSEC && SYSCTL
57908+
57909+config GRKERNSEC_SYSCTL
57910+ bool "Sysctl support"
57911+ default y if GRKERNSEC_CONFIG_AUTO
57912+ help
57913+ If you say Y here, you will be able to change the options that
57914+ grsecurity runs with at bootup, without having to recompile your
57915+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
57916+ to enable (1) or disable (0) various features. All the sysctl entries
57917+ are mutable until the "grsec_lock" entry is set to a non-zero value.
57918+ All features enabled in the kernel configuration are disabled at boot
57919+ if you do not say Y to the "Turn on features by default" option.
57920+ All options should be set at startup, and the grsec_lock entry should
57921+ be set to a non-zero value after all the options are set.
57922+ *THIS IS EXTREMELY IMPORTANT*
57923+
57924+config GRKERNSEC_SYSCTL_DISTRO
57925+ bool "Extra sysctl support for distro makers (READ HELP)"
57926+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
57927+ help
57928+ If you say Y here, additional sysctl options will be created
57929+ for features that affect processes running as root. Therefore,
57930+ it is critical when using this option that the grsec_lock entry be
57931+ enabled after boot. Only distros with prebuilt kernel packages
57932+ with this option enabled that can ensure grsec_lock is enabled
57933+ after boot should use this option.
57934+ *Failure to set grsec_lock after boot makes all grsec features
57935+ this option covers useless*
57936+
57937+ Currently this option creates the following sysctl entries:
57938+ "Disable Privileged I/O": "disable_priv_io"
57939+
57940+config GRKERNSEC_SYSCTL_ON
57941+ bool "Turn on features by default"
57942+ default y if GRKERNSEC_CONFIG_AUTO
57943+ depends on GRKERNSEC_SYSCTL
57944+ help
57945+ If you say Y here, instead of having all features enabled in the
57946+ kernel configuration disabled at boot time, the features will be
57947+ enabled at boot time. It is recommended you say Y here unless
57948+ there is some reason you would want all sysctl-tunable features to
57949+ be disabled by default. As mentioned elsewhere, it is important
57950+ to enable the grsec_lock entry once you have finished modifying
57951+ the sysctl entries.
57952+
57953+endmenu
57954+menu "Logging Options"
57955+depends on GRKERNSEC
57956+
57957+config GRKERNSEC_FLOODTIME
57958+ int "Seconds in between log messages (minimum)"
57959+ default 10
57960+ help
57961+ This option allows you to enforce the number of seconds between
57962+ grsecurity log messages. The default should be suitable for most
57963+ people, however, if you choose to change it, choose a value small enough
57964+ to allow informative logs to be produced, but large enough to
57965+ prevent flooding.
57966+
57967+config GRKERNSEC_FLOODBURST
57968+ int "Number of messages in a burst (maximum)"
57969+ default 6
57970+ help
57971+ This option allows you to choose the maximum number of messages allowed
57972+ within the flood time interval you chose in a separate option. The
57973+ default should be suitable for most people, however if you find that
57974+ many of your logs are being interpreted as flooding, you may want to
57975+ raise this value.
57976+
57977+endmenu
57978diff --git a/grsecurity/Makefile b/grsecurity/Makefile
57979new file mode 100644
57980index 0000000..1b9afa9
57981--- /dev/null
57982+++ b/grsecurity/Makefile
57983@@ -0,0 +1,38 @@
57984+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
57985+# during 2001-2009 it has been completely redesigned by Brad Spengler
57986+# into an RBAC system
57987+#
57988+# All code in this directory and various hooks inserted throughout the kernel
57989+# are copyright Brad Spengler - Open Source Security, Inc., and released
57990+# under the GPL v2 or higher
57991+
57992+KBUILD_CFLAGS += -Werror
57993+
57994+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
57995+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
57996+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
57997+
57998+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
57999+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
58000+ gracl_learn.o grsec_log.o
58001+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
58002+
58003+ifdef CONFIG_NET
58004+obj-y += grsec_sock.o
58005+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
58006+endif
58007+
58008+ifndef CONFIG_GRKERNSEC
58009+obj-y += grsec_disabled.o
58010+endif
58011+
58012+ifdef CONFIG_GRKERNSEC_HIDESYM
58013+extra-y := grsec_hidesym.o
58014+$(obj)/grsec_hidesym.o:
58015+ @-chmod -f 500 /boot
58016+ @-chmod -f 500 /lib/modules
58017+ @-chmod -f 500 /lib64/modules
58018+ @-chmod -f 500 /lib32/modules
58019+ @-chmod -f 700 .
58020+ @echo ' grsec: protected kernel image paths'
58021+endif
58022diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
58023new file mode 100644
58024index 0000000..b306b36
58025--- /dev/null
58026+++ b/grsecurity/gracl.c
58027@@ -0,0 +1,4071 @@
58028+#include <linux/kernel.h>
58029+#include <linux/module.h>
58030+#include <linux/sched.h>
58031+#include <linux/mm.h>
58032+#include <linux/file.h>
58033+#include <linux/fs.h>
58034+#include <linux/namei.h>
58035+#include <linux/mount.h>
58036+#include <linux/tty.h>
58037+#include <linux/proc_fs.h>
58038+#include <linux/lglock.h>
58039+#include <linux/slab.h>
58040+#include <linux/vmalloc.h>
58041+#include <linux/types.h>
58042+#include <linux/sysctl.h>
58043+#include <linux/netdevice.h>
58044+#include <linux/ptrace.h>
58045+#include <linux/gracl.h>
58046+#include <linux/gralloc.h>
58047+#include <linux/security.h>
58048+#include <linux/grinternal.h>
58049+#include <linux/pid_namespace.h>
58050+#include <linux/stop_machine.h>
58051+#include <linux/fdtable.h>
58052+#include <linux/percpu.h>
58053+#include <linux/lglock.h>
58054+#include <linux/hugetlb.h>
58055+#include <linux/posix-timers.h>
58056+#include "../fs/mount.h"
58057+
58058+#include <asm/uaccess.h>
58059+#include <asm/errno.h>
58060+#include <asm/mman.h>
58061+
58062+extern struct lglock vfsmount_lock;
58063+
58064+static struct acl_role_db acl_role_set;
58065+static struct name_db name_set;
58066+static struct inodev_db inodev_set;
58067+
58068+/* for keeping track of userspace pointers used for subjects, so we
58069+ can share references in the kernel as well
58070+*/
58071+
58072+static struct path real_root;
58073+
58074+static struct acl_subj_map_db subj_map_set;
58075+
58076+static struct acl_role_label *default_role;
58077+
58078+static struct acl_role_label *role_list;
58079+
58080+static u16 acl_sp_role_value;
58081+
58082+extern char *gr_shared_page[4];
58083+static DEFINE_MUTEX(gr_dev_mutex);
58084+DEFINE_RWLOCK(gr_inode_lock);
58085+
58086+struct gr_arg *gr_usermode;
58087+
58088+static unsigned int gr_status __read_only = GR_STATUS_INIT;
58089+
58090+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
58091+extern void gr_clear_learn_entries(void);
58092+
58093+unsigned char *gr_system_salt;
58094+unsigned char *gr_system_sum;
58095+
58096+static struct sprole_pw **acl_special_roles = NULL;
58097+static __u16 num_sprole_pws = 0;
58098+
58099+static struct acl_role_label *kernel_role = NULL;
58100+
58101+static unsigned int gr_auth_attempts = 0;
58102+static unsigned long gr_auth_expires = 0UL;
58103+
58104+#ifdef CONFIG_NET
58105+extern struct vfsmount *sock_mnt;
58106+#endif
58107+
58108+extern struct vfsmount *pipe_mnt;
58109+extern struct vfsmount *shm_mnt;
58110+
58111+#ifdef CONFIG_HUGETLBFS
58112+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
58113+#endif
58114+
58115+static struct acl_object_label *fakefs_obj_rw;
58116+static struct acl_object_label *fakefs_obj_rwx;
58117+
58118+extern int gr_init_uidset(void);
58119+extern void gr_free_uidset(void);
58120+extern void gr_remove_uid(uid_t uid);
58121+extern int gr_find_uid(uid_t uid);
58122+
58123+__inline__ int
58124+gr_acl_is_enabled(void)
58125+{
58126+ return (gr_status & GR_READY);
58127+}
58128+
58129+#ifdef CONFIG_BTRFS_FS
58130+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
58131+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
58132+#endif
58133+
58134+static inline dev_t __get_dev(const struct dentry *dentry)
58135+{
58136+#ifdef CONFIG_BTRFS_FS
58137+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
58138+ return get_btrfs_dev_from_inode(dentry->d_inode);
58139+ else
58140+#endif
58141+ return dentry->d_inode->i_sb->s_dev;
58142+}
58143+
58144+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
58145+{
58146+ return __get_dev(dentry);
58147+}
58148+
58149+static char gr_task_roletype_to_char(struct task_struct *task)
58150+{
58151+ switch (task->role->roletype &
58152+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
58153+ GR_ROLE_SPECIAL)) {
58154+ case GR_ROLE_DEFAULT:
58155+ return 'D';
58156+ case GR_ROLE_USER:
58157+ return 'U';
58158+ case GR_ROLE_GROUP:
58159+ return 'G';
58160+ case GR_ROLE_SPECIAL:
58161+ return 'S';
58162+ }
58163+
58164+ return 'X';
58165+}
58166+
58167+char gr_roletype_to_char(void)
58168+{
58169+ return gr_task_roletype_to_char(current);
58170+}
58171+
58172+__inline__ int
58173+gr_acl_tpe_check(void)
58174+{
58175+ if (unlikely(!(gr_status & GR_READY)))
58176+ return 0;
58177+ if (current->role->roletype & GR_ROLE_TPE)
58178+ return 1;
58179+ else
58180+ return 0;
58181+}
58182+
58183+int
58184+gr_handle_rawio(const struct inode *inode)
58185+{
58186+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
58187+ if (inode && S_ISBLK(inode->i_mode) &&
58188+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
58189+ !capable(CAP_SYS_RAWIO))
58190+ return 1;
58191+#endif
58192+ return 0;
58193+}
58194+
58195+static int
58196+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
58197+{
58198+ if (likely(lena != lenb))
58199+ return 0;
58200+
58201+ return !memcmp(a, b, lena);
58202+}
58203+
58204+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
58205+{
58206+ *buflen -= namelen;
58207+ if (*buflen < 0)
58208+ return -ENAMETOOLONG;
58209+ *buffer -= namelen;
58210+ memcpy(*buffer, str, namelen);
58211+ return 0;
58212+}
58213+
58214+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
58215+{
58216+ return prepend(buffer, buflen, name->name, name->len);
58217+}
58218+
58219+static int prepend_path(const struct path *path, struct path *root,
58220+ char **buffer, int *buflen)
58221+{
58222+ struct dentry *dentry = path->dentry;
58223+ struct vfsmount *vfsmnt = path->mnt;
58224+ struct mount *mnt = real_mount(vfsmnt);
58225+ bool slash = false;
58226+ int error = 0;
58227+
58228+ while (dentry != root->dentry || vfsmnt != root->mnt) {
58229+ struct dentry * parent;
58230+
58231+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
58232+ /* Global root? */
58233+ if (!mnt_has_parent(mnt)) {
58234+ goto out;
58235+ }
58236+ dentry = mnt->mnt_mountpoint;
58237+ mnt = mnt->mnt_parent;
58238+ vfsmnt = &mnt->mnt;
58239+ continue;
58240+ }
58241+ parent = dentry->d_parent;
58242+ prefetch(parent);
58243+ spin_lock(&dentry->d_lock);
58244+ error = prepend_name(buffer, buflen, &dentry->d_name);
58245+ spin_unlock(&dentry->d_lock);
58246+ if (!error)
58247+ error = prepend(buffer, buflen, "/", 1);
58248+ if (error)
58249+ break;
58250+
58251+ slash = true;
58252+ dentry = parent;
58253+ }
58254+
58255+out:
58256+ if (!error && !slash)
58257+ error = prepend(buffer, buflen, "/", 1);
58258+
58259+ return error;
58260+}
58261+
58262+/* this must be called with vfsmount_lock and rename_lock held */
58263+
58264+static char *__our_d_path(const struct path *path, struct path *root,
58265+ char *buf, int buflen)
58266+{
58267+ char *res = buf + buflen;
58268+ int error;
58269+
58270+ prepend(&res, &buflen, "\0", 1);
58271+ error = prepend_path(path, root, &res, &buflen);
58272+ if (error)
58273+ return ERR_PTR(error);
58274+
58275+ return res;
58276+}
58277+
58278+static char *
58279+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
58280+{
58281+ char *retval;
58282+
58283+ retval = __our_d_path(path, root, buf, buflen);
58284+ if (unlikely(IS_ERR(retval)))
58285+ retval = strcpy(buf, "<path too long>");
58286+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
58287+ retval[1] = '\0';
58288+
58289+ return retval;
58290+}
58291+
58292+static char *
58293+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
58294+ char *buf, int buflen)
58295+{
58296+ struct path path;
58297+ char *res;
58298+
58299+ path.dentry = (struct dentry *)dentry;
58300+ path.mnt = (struct vfsmount *)vfsmnt;
58301+
58302+ /* we can use real_root.dentry, real_root.mnt, because this is only called
58303+ by the RBAC system */
58304+ res = gen_full_path(&path, &real_root, buf, buflen);
58305+
58306+ return res;
58307+}
58308+
58309+static char *
58310+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
58311+ char *buf, int buflen)
58312+{
58313+ char *res;
58314+ struct path path;
58315+ struct path root;
58316+ struct task_struct *reaper = init_pid_ns.child_reaper;
58317+
58318+ path.dentry = (struct dentry *)dentry;
58319+ path.mnt = (struct vfsmount *)vfsmnt;
58320+
58321+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
58322+ get_fs_root(reaper->fs, &root);
58323+
58324+ br_read_lock(&vfsmount_lock);
58325+ write_seqlock(&rename_lock);
58326+ res = gen_full_path(&path, &root, buf, buflen);
58327+ write_sequnlock(&rename_lock);
58328+ br_read_unlock(&vfsmount_lock);
58329+
58330+ path_put(&root);
58331+ return res;
58332+}
58333+
58334+static char *
58335+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
58336+{
58337+ char *ret;
58338+ br_read_lock(&vfsmount_lock);
58339+ write_seqlock(&rename_lock);
58340+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
58341+ PAGE_SIZE);
58342+ write_sequnlock(&rename_lock);
58343+ br_read_unlock(&vfsmount_lock);
58344+ return ret;
58345+}
58346+
58347+static char *
58348+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
58349+{
58350+ char *ret;
58351+ char *buf;
58352+ int buflen;
58353+
58354+ br_read_lock(&vfsmount_lock);
58355+ write_seqlock(&rename_lock);
58356+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
58357+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
58358+ buflen = (int)(ret - buf);
58359+ if (buflen >= 5)
58360+ prepend(&ret, &buflen, "/proc", 5);
58361+ else
58362+ ret = strcpy(buf, "<path too long>");
58363+ write_sequnlock(&rename_lock);
58364+ br_read_unlock(&vfsmount_lock);
58365+ return ret;
58366+}
58367+
58368+char *
58369+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
58370+{
58371+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
58372+ PAGE_SIZE);
58373+}
58374+
58375+char *
58376+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
58377+{
58378+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
58379+ PAGE_SIZE);
58380+}
58381+
58382+char *
58383+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
58384+{
58385+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
58386+ PAGE_SIZE);
58387+}
58388+
58389+char *
58390+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
58391+{
58392+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
58393+ PAGE_SIZE);
58394+}
58395+
58396+char *
58397+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
58398+{
58399+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
58400+ PAGE_SIZE);
58401+}
58402+
58403+__inline__ __u32
58404+to_gr_audit(const __u32 reqmode)
58405+{
58406+ /* masks off auditable permission flags, then shifts them to create
58407+ auditing flags, and adds the special case of append auditing if
58408+ we're requesting write */
58409+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
58410+}
58411+
58412+struct acl_subject_label *
58413+lookup_subject_map(const struct acl_subject_label *userp)
58414+{
58415+ unsigned int index = gr_shash(userp, subj_map_set.s_size);
58416+ struct subject_map *match;
58417+
58418+ match = subj_map_set.s_hash[index];
58419+
58420+ while (match && match->user != userp)
58421+ match = match->next;
58422+
58423+ if (match != NULL)
58424+ return match->kernel;
58425+ else
58426+ return NULL;
58427+}
58428+
58429+static void
58430+insert_subj_map_entry(struct subject_map *subjmap)
58431+{
58432+ unsigned int index = gr_shash(subjmap->user, subj_map_set.s_size);
58433+ struct subject_map **curr;
58434+
58435+ subjmap->prev = NULL;
58436+
58437+ curr = &subj_map_set.s_hash[index];
58438+ if (*curr != NULL)
58439+ (*curr)->prev = subjmap;
58440+
58441+ subjmap->next = *curr;
58442+ *curr = subjmap;
58443+
58444+ return;
58445+}
58446+
58447+static struct acl_role_label *
58448+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
58449+ const gid_t gid)
58450+{
58451+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
58452+ struct acl_role_label *match;
58453+ struct role_allowed_ip *ipp;
58454+ unsigned int x;
58455+ u32 curr_ip = task->signal->curr_ip;
58456+
58457+ task->signal->saved_ip = curr_ip;
58458+
58459+ match = acl_role_set.r_hash[index];
58460+
58461+ while (match) {
58462+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
58463+ for (x = 0; x < match->domain_child_num; x++) {
58464+ if (match->domain_children[x] == uid)
58465+ goto found;
58466+ }
58467+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
58468+ break;
58469+ match = match->next;
58470+ }
58471+found:
58472+ if (match == NULL) {
58473+ try_group:
58474+ index = gr_rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
58475+ match = acl_role_set.r_hash[index];
58476+
58477+ while (match) {
58478+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
58479+ for (x = 0; x < match->domain_child_num; x++) {
58480+ if (match->domain_children[x] == gid)
58481+ goto found2;
58482+ }
58483+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
58484+ break;
58485+ match = match->next;
58486+ }
58487+found2:
58488+ if (match == NULL)
58489+ match = default_role;
58490+ if (match->allowed_ips == NULL)
58491+ return match;
58492+ else {
58493+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
58494+ if (likely
58495+ ((ntohl(curr_ip) & ipp->netmask) ==
58496+ (ntohl(ipp->addr) & ipp->netmask)))
58497+ return match;
58498+ }
58499+ match = default_role;
58500+ }
58501+ } else if (match->allowed_ips == NULL) {
58502+ return match;
58503+ } else {
58504+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
58505+ if (likely
58506+ ((ntohl(curr_ip) & ipp->netmask) ==
58507+ (ntohl(ipp->addr) & ipp->netmask)))
58508+ return match;
58509+ }
58510+ goto try_group;
58511+ }
58512+
58513+ return match;
58514+}
58515+
58516+struct acl_subject_label *
58517+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
58518+ const struct acl_role_label *role)
58519+{
58520+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
58521+ struct acl_subject_label *match;
58522+
58523+ match = role->subj_hash[index];
58524+
58525+ while (match && (match->inode != ino || match->device != dev ||
58526+ (match->mode & GR_DELETED))) {
58527+ match = match->next;
58528+ }
58529+
58530+ if (match && !(match->mode & GR_DELETED))
58531+ return match;
58532+ else
58533+ return NULL;
58534+}
58535+
58536+struct acl_subject_label *
58537+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
58538+ const struct acl_role_label *role)
58539+{
58540+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
58541+ struct acl_subject_label *match;
58542+
58543+ match = role->subj_hash[index];
58544+
58545+ while (match && (match->inode != ino || match->device != dev ||
58546+ !(match->mode & GR_DELETED))) {
58547+ match = match->next;
58548+ }
58549+
58550+ if (match && (match->mode & GR_DELETED))
58551+ return match;
58552+ else
58553+ return NULL;
58554+}
58555+
58556+static struct acl_object_label *
58557+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
58558+ const struct acl_subject_label *subj)
58559+{
58560+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
58561+ struct acl_object_label *match;
58562+
58563+ match = subj->obj_hash[index];
58564+
58565+ while (match && (match->inode != ino || match->device != dev ||
58566+ (match->mode & GR_DELETED))) {
58567+ match = match->next;
58568+ }
58569+
58570+ if (match && !(match->mode & GR_DELETED))
58571+ return match;
58572+ else
58573+ return NULL;
58574+}
58575+
58576+static struct acl_object_label *
58577+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
58578+ const struct acl_subject_label *subj)
58579+{
58580+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
58581+ struct acl_object_label *match;
58582+
58583+ match = subj->obj_hash[index];
58584+
58585+ while (match && (match->inode != ino || match->device != dev ||
58586+ !(match->mode & GR_DELETED))) {
58587+ match = match->next;
58588+ }
58589+
58590+ if (match && (match->mode & GR_DELETED))
58591+ return match;
58592+
58593+ match = subj->obj_hash[index];
58594+
58595+ while (match && (match->inode != ino || match->device != dev ||
58596+ (match->mode & GR_DELETED))) {
58597+ match = match->next;
58598+ }
58599+
58600+ if (match && !(match->mode & GR_DELETED))
58601+ return match;
58602+ else
58603+ return NULL;
58604+}
58605+
58606+static struct name_entry *
58607+lookup_name_entry(const char *name)
58608+{
58609+ unsigned int len = strlen(name);
58610+ unsigned int key = full_name_hash(name, len);
58611+ unsigned int index = key % name_set.n_size;
58612+ struct name_entry *match;
58613+
58614+ match = name_set.n_hash[index];
58615+
58616+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
58617+ match = match->next;
58618+
58619+ return match;
58620+}
58621+
58622+static struct name_entry *
58623+lookup_name_entry_create(const char *name)
58624+{
58625+ unsigned int len = strlen(name);
58626+ unsigned int key = full_name_hash(name, len);
58627+ unsigned int index = key % name_set.n_size;
58628+ struct name_entry *match;
58629+
58630+ match = name_set.n_hash[index];
58631+
58632+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
58633+ !match->deleted))
58634+ match = match->next;
58635+
58636+ if (match && match->deleted)
58637+ return match;
58638+
58639+ match = name_set.n_hash[index];
58640+
58641+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
58642+ match->deleted))
58643+ match = match->next;
58644+
58645+ if (match && !match->deleted)
58646+ return match;
58647+ else
58648+ return NULL;
58649+}
58650+
58651+static struct inodev_entry *
58652+lookup_inodev_entry(const ino_t ino, const dev_t dev)
58653+{
58654+ unsigned int index = gr_fhash(ino, dev, inodev_set.i_size);
58655+ struct inodev_entry *match;
58656+
58657+ match = inodev_set.i_hash[index];
58658+
58659+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
58660+ match = match->next;
58661+
58662+ return match;
58663+}
58664+
58665+static void
58666+insert_inodev_entry(struct inodev_entry *entry)
58667+{
58668+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
58669+ inodev_set.i_size);
58670+ struct inodev_entry **curr;
58671+
58672+ entry->prev = NULL;
58673+
58674+ curr = &inodev_set.i_hash[index];
58675+ if (*curr != NULL)
58676+ (*curr)->prev = entry;
58677+
58678+ entry->next = *curr;
58679+ *curr = entry;
58680+
58681+ return;
58682+}
58683+
58684+static void
58685+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
58686+{
58687+ unsigned int index =
58688+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
58689+ struct acl_role_label **curr;
58690+ struct acl_role_label *tmp, *tmp2;
58691+
58692+ curr = &acl_role_set.r_hash[index];
58693+
58694+ /* simple case, slot is empty, just set it to our role */
58695+ if (*curr == NULL) {
58696+ *curr = role;
58697+ } else {
58698+ /* example:
58699+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
58700+ 2 -> 3
58701+ */
58702+ /* first check to see if we can already be reached via this slot */
58703+ tmp = *curr;
58704+ while (tmp && tmp != role)
58705+ tmp = tmp->next;
58706+ if (tmp == role) {
58707+ /* we don't need to add ourselves to this slot's chain */
58708+ return;
58709+ }
58710+ /* we need to add ourselves to this chain, two cases */
58711+ if (role->next == NULL) {
58712+ /* simple case, append the current chain to our role */
58713+ role->next = *curr;
58714+ *curr = role;
58715+ } else {
58716+ /* 1 -> 2 -> 3 -> 4
58717+ 2 -> 3 -> 4
58718+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
58719+ */
58720+ /* trickier case: walk our role's chain until we find
58721+ the role for the start of the current slot's chain */
58722+ tmp = role;
58723+ tmp2 = *curr;
58724+ while (tmp->next && tmp->next != tmp2)
58725+ tmp = tmp->next;
58726+ if (tmp->next == tmp2) {
58727+ /* from example above, we found 3, so just
58728+ replace this slot's chain with ours */
58729+ *curr = role;
58730+ } else {
58731+ /* we didn't find a subset of our role's chain
58732+ in the current slot's chain, so append their
58733+ chain to ours, and set us as the first role in
58734+ the slot's chain
58735+
58736+ we could fold this case with the case above,
58737+ but making it explicit for clarity
58738+ */
58739+ tmp->next = tmp2;
58740+ *curr = role;
58741+ }
58742+ }
58743+ }
58744+
58745+ return;
58746+}
58747+
58748+static void
58749+insert_acl_role_label(struct acl_role_label *role)
58750+{
58751+ int i;
58752+
58753+ if (role_list == NULL) {
58754+ role_list = role;
58755+ role->prev = NULL;
58756+ } else {
58757+ role->prev = role_list;
58758+ role_list = role;
58759+ }
58760+
58761+ /* used for hash chains */
58762+ role->next = NULL;
58763+
58764+ if (role->roletype & GR_ROLE_DOMAIN) {
58765+ for (i = 0; i < role->domain_child_num; i++)
58766+ __insert_acl_role_label(role, role->domain_children[i]);
58767+ } else
58768+ __insert_acl_role_label(role, role->uidgid);
58769+}
58770+
58771+static int
58772+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
58773+{
58774+ struct name_entry **curr, *nentry;
58775+ struct inodev_entry *ientry;
58776+ unsigned int len = strlen(name);
58777+ unsigned int key = full_name_hash(name, len);
58778+ unsigned int index = key % name_set.n_size;
58779+
58780+ curr = &name_set.n_hash[index];
58781+
58782+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
58783+ curr = &((*curr)->next);
58784+
58785+ if (*curr != NULL)
58786+ return 1;
58787+
58788+ nentry = acl_alloc(sizeof (struct name_entry));
58789+ if (nentry == NULL)
58790+ return 0;
58791+ ientry = acl_alloc(sizeof (struct inodev_entry));
58792+ if (ientry == NULL)
58793+ return 0;
58794+ ientry->nentry = nentry;
58795+
58796+ nentry->key = key;
58797+ nentry->name = name;
58798+ nentry->inode = inode;
58799+ nentry->device = device;
58800+ nentry->len = len;
58801+ nentry->deleted = deleted;
58802+
58803+ nentry->prev = NULL;
58804+ curr = &name_set.n_hash[index];
58805+ if (*curr != NULL)
58806+ (*curr)->prev = nentry;
58807+ nentry->next = *curr;
58808+ *curr = nentry;
58809+
58810+ /* insert us into the table searchable by inode/dev */
58811+ insert_inodev_entry(ientry);
58812+
58813+ return 1;
58814+}
58815+
58816+static void
58817+insert_acl_obj_label(struct acl_object_label *obj,
58818+ struct acl_subject_label *subj)
58819+{
58820+ unsigned int index =
58821+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
58822+ struct acl_object_label **curr;
58823+
58824+
58825+ obj->prev = NULL;
58826+
58827+ curr = &subj->obj_hash[index];
58828+ if (*curr != NULL)
58829+ (*curr)->prev = obj;
58830+
58831+ obj->next = *curr;
58832+ *curr = obj;
58833+
58834+ return;
58835+}
58836+
58837+static void
58838+insert_acl_subj_label(struct acl_subject_label *obj,
58839+ struct acl_role_label *role)
58840+{
58841+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
58842+ struct acl_subject_label **curr;
58843+
58844+ obj->prev = NULL;
58845+
58846+ curr = &role->subj_hash[index];
58847+ if (*curr != NULL)
58848+ (*curr)->prev = obj;
58849+
58850+ obj->next = *curr;
58851+ *curr = obj;
58852+
58853+ return;
58854+}
58855+
58856+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
58857+
58858+static void *
58859+create_table(__u32 * len, int elementsize)
58860+{
58861+ unsigned int table_sizes[] = {
58862+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
58863+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
58864+ 4194301, 8388593, 16777213, 33554393, 67108859
58865+ };
58866+ void *newtable = NULL;
58867+ unsigned int pwr = 0;
58868+
58869+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
58870+ table_sizes[pwr] <= *len)
58871+ pwr++;
58872+
58873+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
58874+ return newtable;
58875+
58876+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
58877+ newtable =
58878+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
58879+ else
58880+ newtable = vmalloc(table_sizes[pwr] * elementsize);
58881+
58882+ *len = table_sizes[pwr];
58883+
58884+ return newtable;
58885+}
58886+
58887+static int
58888+init_variables(const struct gr_arg *arg)
58889+{
58890+ struct task_struct *reaper = init_pid_ns.child_reaper;
58891+ unsigned int stacksize;
58892+
58893+ subj_map_set.s_size = arg->role_db.num_subjects;
58894+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
58895+ name_set.n_size = arg->role_db.num_objects;
58896+ inodev_set.i_size = arg->role_db.num_objects;
58897+
58898+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
58899+ !name_set.n_size || !inodev_set.i_size)
58900+ return 1;
58901+
58902+ if (!gr_init_uidset())
58903+ return 1;
58904+
58905+ /* set up the stack that holds allocation info */
58906+
58907+ stacksize = arg->role_db.num_pointers + 5;
58908+
58909+ if (!acl_alloc_stack_init(stacksize))
58910+ return 1;
58911+
58912+ /* grab reference for the real root dentry and vfsmount */
58913+ get_fs_root(reaper->fs, &real_root);
58914+
58915+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58916+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
58917+#endif
58918+
58919+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
58920+ if (fakefs_obj_rw == NULL)
58921+ return 1;
58922+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
58923+
58924+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
58925+ if (fakefs_obj_rwx == NULL)
58926+ return 1;
58927+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
58928+
58929+ subj_map_set.s_hash =
58930+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
58931+ acl_role_set.r_hash =
58932+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
58933+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
58934+ inodev_set.i_hash =
58935+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
58936+
58937+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
58938+ !name_set.n_hash || !inodev_set.i_hash)
58939+ return 1;
58940+
58941+ memset(subj_map_set.s_hash, 0,
58942+ sizeof(struct subject_map *) * subj_map_set.s_size);
58943+ memset(acl_role_set.r_hash, 0,
58944+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
58945+ memset(name_set.n_hash, 0,
58946+ sizeof (struct name_entry *) * name_set.n_size);
58947+ memset(inodev_set.i_hash, 0,
58948+ sizeof (struct inodev_entry *) * inodev_set.i_size);
58949+
58950+ return 0;
58951+}
58952+
58953+/* free information not needed after startup
58954+ currently contains user->kernel pointer mappings for subjects
58955+*/
58956+
58957+static void
58958+free_init_variables(void)
58959+{
58960+ __u32 i;
58961+
58962+ if (subj_map_set.s_hash) {
58963+ for (i = 0; i < subj_map_set.s_size; i++) {
58964+ if (subj_map_set.s_hash[i]) {
58965+ kfree(subj_map_set.s_hash[i]);
58966+ subj_map_set.s_hash[i] = NULL;
58967+ }
58968+ }
58969+
58970+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
58971+ PAGE_SIZE)
58972+ kfree(subj_map_set.s_hash);
58973+ else
58974+ vfree(subj_map_set.s_hash);
58975+ }
58976+
58977+ return;
58978+}
58979+
58980+static void
58981+free_variables(void)
58982+{
58983+ struct acl_subject_label *s;
58984+ struct acl_role_label *r;
58985+ struct task_struct *task, *task2;
58986+ unsigned int x;
58987+
58988+ gr_clear_learn_entries();
58989+
58990+ read_lock(&tasklist_lock);
58991+ do_each_thread(task2, task) {
58992+ task->acl_sp_role = 0;
58993+ task->acl_role_id = 0;
58994+ task->acl = NULL;
58995+ task->role = NULL;
58996+ } while_each_thread(task2, task);
58997+ read_unlock(&tasklist_lock);
58998+
58999+ /* release the reference to the real root dentry and vfsmount */
59000+ path_put(&real_root);
59001+ memset(&real_root, 0, sizeof(real_root));
59002+
59003+ /* free all object hash tables */
59004+
59005+ FOR_EACH_ROLE_START(r)
59006+ if (r->subj_hash == NULL)
59007+ goto next_role;
59008+ FOR_EACH_SUBJECT_START(r, s, x)
59009+ if (s->obj_hash == NULL)
59010+ break;
59011+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
59012+ kfree(s->obj_hash);
59013+ else
59014+ vfree(s->obj_hash);
59015+ FOR_EACH_SUBJECT_END(s, x)
59016+ FOR_EACH_NESTED_SUBJECT_START(r, s)
59017+ if (s->obj_hash == NULL)
59018+ break;
59019+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
59020+ kfree(s->obj_hash);
59021+ else
59022+ vfree(s->obj_hash);
59023+ FOR_EACH_NESTED_SUBJECT_END(s)
59024+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
59025+ kfree(r->subj_hash);
59026+ else
59027+ vfree(r->subj_hash);
59028+ r->subj_hash = NULL;
59029+next_role:
59030+ FOR_EACH_ROLE_END(r)
59031+
59032+ acl_free_all();
59033+
59034+ if (acl_role_set.r_hash) {
59035+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
59036+ PAGE_SIZE)
59037+ kfree(acl_role_set.r_hash);
59038+ else
59039+ vfree(acl_role_set.r_hash);
59040+ }
59041+ if (name_set.n_hash) {
59042+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
59043+ PAGE_SIZE)
59044+ kfree(name_set.n_hash);
59045+ else
59046+ vfree(name_set.n_hash);
59047+ }
59048+
59049+ if (inodev_set.i_hash) {
59050+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
59051+ PAGE_SIZE)
59052+ kfree(inodev_set.i_hash);
59053+ else
59054+ vfree(inodev_set.i_hash);
59055+ }
59056+
59057+ gr_free_uidset();
59058+
59059+ memset(&name_set, 0, sizeof (struct name_db));
59060+ memset(&inodev_set, 0, sizeof (struct inodev_db));
59061+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
59062+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
59063+
59064+ default_role = NULL;
59065+ kernel_role = NULL;
59066+ role_list = NULL;
59067+
59068+ return;
59069+}
59070+
59071+static __u32
59072+count_user_objs(struct acl_object_label *userp)
59073+{
59074+ struct acl_object_label o_tmp;
59075+ __u32 num = 0;
59076+
59077+ while (userp) {
59078+ if (copy_from_user(&o_tmp, userp,
59079+ sizeof (struct acl_object_label)))
59080+ break;
59081+
59082+ userp = o_tmp.prev;
59083+ num++;
59084+ }
59085+
59086+ return num;
59087+}
59088+
59089+static struct acl_subject_label *
59090+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
59091+
59092+static int
59093+copy_user_glob(struct acl_object_label *obj)
59094+{
59095+ struct acl_object_label *g_tmp, **guser;
59096+ unsigned int len;
59097+ char *tmp;
59098+
59099+ if (obj->globbed == NULL)
59100+ return 0;
59101+
59102+ guser = &obj->globbed;
59103+ while (*guser) {
59104+ g_tmp = (struct acl_object_label *)
59105+ acl_alloc(sizeof (struct acl_object_label));
59106+ if (g_tmp == NULL)
59107+ return -ENOMEM;
59108+
59109+ if (copy_from_user(g_tmp, *guser,
59110+ sizeof (struct acl_object_label)))
59111+ return -EFAULT;
59112+
59113+ len = strnlen_user(g_tmp->filename, PATH_MAX);
59114+
59115+ if (!len || len >= PATH_MAX)
59116+ return -EINVAL;
59117+
59118+ if ((tmp = (char *) acl_alloc(len)) == NULL)
59119+ return -ENOMEM;
59120+
59121+ if (copy_from_user(tmp, g_tmp->filename, len))
59122+ return -EFAULT;
59123+ tmp[len-1] = '\0';
59124+ g_tmp->filename = tmp;
59125+
59126+ *guser = g_tmp;
59127+ guser = &(g_tmp->next);
59128+ }
59129+
59130+ return 0;
59131+}
59132+
59133+static int
59134+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
59135+ struct acl_role_label *role)
59136+{
59137+ struct acl_object_label *o_tmp;
59138+ unsigned int len;
59139+ int ret;
59140+ char *tmp;
59141+
59142+ while (userp) {
59143+ if ((o_tmp = (struct acl_object_label *)
59144+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
59145+ return -ENOMEM;
59146+
59147+ if (copy_from_user(o_tmp, userp,
59148+ sizeof (struct acl_object_label)))
59149+ return -EFAULT;
59150+
59151+ userp = o_tmp->prev;
59152+
59153+ len = strnlen_user(o_tmp->filename, PATH_MAX);
59154+
59155+ if (!len || len >= PATH_MAX)
59156+ return -EINVAL;
59157+
59158+ if ((tmp = (char *) acl_alloc(len)) == NULL)
59159+ return -ENOMEM;
59160+
59161+ if (copy_from_user(tmp, o_tmp->filename, len))
59162+ return -EFAULT;
59163+ tmp[len-1] = '\0';
59164+ o_tmp->filename = tmp;
59165+
59166+ insert_acl_obj_label(o_tmp, subj);
59167+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
59168+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
59169+ return -ENOMEM;
59170+
59171+ ret = copy_user_glob(o_tmp);
59172+ if (ret)
59173+ return ret;
59174+
59175+ if (o_tmp->nested) {
59176+ int already_copied;
59177+
59178+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
59179+ if (IS_ERR(o_tmp->nested))
59180+ return PTR_ERR(o_tmp->nested);
59181+
59182+ /* insert into nested subject list if we haven't copied this one yet
59183+ to prevent duplicate entries */
59184+ if (!already_copied) {
59185+ o_tmp->nested->next = role->hash->first;
59186+ role->hash->first = o_tmp->nested;
59187+ }
59188+ }
59189+ }
59190+
59191+ return 0;
59192+}
59193+
59194+static __u32
59195+count_user_subjs(struct acl_subject_label *userp)
59196+{
59197+ struct acl_subject_label s_tmp;
59198+ __u32 num = 0;
59199+
59200+ while (userp) {
59201+ if (copy_from_user(&s_tmp, userp,
59202+ sizeof (struct acl_subject_label)))
59203+ break;
59204+
59205+ userp = s_tmp.prev;
59206+ }
59207+
59208+ return num;
59209+}
59210+
59211+static int
59212+copy_user_allowedips(struct acl_role_label *rolep)
59213+{
59214+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
59215+
59216+ ruserip = rolep->allowed_ips;
59217+
59218+ while (ruserip) {
59219+ rlast = rtmp;
59220+
59221+ if ((rtmp = (struct role_allowed_ip *)
59222+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
59223+ return -ENOMEM;
59224+
59225+ if (copy_from_user(rtmp, ruserip,
59226+ sizeof (struct role_allowed_ip)))
59227+ return -EFAULT;
59228+
59229+ ruserip = rtmp->prev;
59230+
59231+ if (!rlast) {
59232+ rtmp->prev = NULL;
59233+ rolep->allowed_ips = rtmp;
59234+ } else {
59235+ rlast->next = rtmp;
59236+ rtmp->prev = rlast;
59237+ }
59238+
59239+ if (!ruserip)
59240+ rtmp->next = NULL;
59241+ }
59242+
59243+ return 0;
59244+}
59245+
59246+static int
59247+copy_user_transitions(struct acl_role_label *rolep)
59248+{
59249+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
59250+
59251+ unsigned int len;
59252+ char *tmp;
59253+
59254+ rusertp = rolep->transitions;
59255+
59256+ while (rusertp) {
59257+ rlast = rtmp;
59258+
59259+ if ((rtmp = (struct role_transition *)
59260+ acl_alloc(sizeof (struct role_transition))) == NULL)
59261+ return -ENOMEM;
59262+
59263+ if (copy_from_user(rtmp, rusertp,
59264+ sizeof (struct role_transition)))
59265+ return -EFAULT;
59266+
59267+ rusertp = rtmp->prev;
59268+
59269+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
59270+
59271+ if (!len || len >= GR_SPROLE_LEN)
59272+ return -EINVAL;
59273+
59274+ if ((tmp = (char *) acl_alloc(len)) == NULL)
59275+ return -ENOMEM;
59276+
59277+ if (copy_from_user(tmp, rtmp->rolename, len))
59278+ return -EFAULT;
59279+ tmp[len-1] = '\0';
59280+ rtmp->rolename = tmp;
59281+
59282+ if (!rlast) {
59283+ rtmp->prev = NULL;
59284+ rolep->transitions = rtmp;
59285+ } else {
59286+ rlast->next = rtmp;
59287+ rtmp->prev = rlast;
59288+ }
59289+
59290+ if (!rusertp)
59291+ rtmp->next = NULL;
59292+ }
59293+
59294+ return 0;
59295+}
59296+
59297+static struct acl_subject_label *
59298+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
59299+{
59300+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
59301+ unsigned int len;
59302+ char *tmp;
59303+ __u32 num_objs;
59304+ struct acl_ip_label **i_tmp, *i_utmp2;
59305+ struct gr_hash_struct ghash;
59306+ struct subject_map *subjmap;
59307+ unsigned int i_num;
59308+ int err;
59309+
59310+ if (already_copied != NULL)
59311+ *already_copied = 0;
59312+
59313+ s_tmp = lookup_subject_map(userp);
59314+
59315+ /* we've already copied this subject into the kernel, just return
59316+ the reference to it, and don't copy it over again
59317+ */
59318+ if (s_tmp) {
59319+ if (already_copied != NULL)
59320+ *already_copied = 1;
59321+ return(s_tmp);
59322+ }
59323+
59324+ if ((s_tmp = (struct acl_subject_label *)
59325+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
59326+ return ERR_PTR(-ENOMEM);
59327+
59328+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
59329+ if (subjmap == NULL)
59330+ return ERR_PTR(-ENOMEM);
59331+
59332+ subjmap->user = userp;
59333+ subjmap->kernel = s_tmp;
59334+ insert_subj_map_entry(subjmap);
59335+
59336+ if (copy_from_user(s_tmp, userp,
59337+ sizeof (struct acl_subject_label)))
59338+ return ERR_PTR(-EFAULT);
59339+
59340+ len = strnlen_user(s_tmp->filename, PATH_MAX);
59341+
59342+ if (!len || len >= PATH_MAX)
59343+ return ERR_PTR(-EINVAL);
59344+
59345+ if ((tmp = (char *) acl_alloc(len)) == NULL)
59346+ return ERR_PTR(-ENOMEM);
59347+
59348+ if (copy_from_user(tmp, s_tmp->filename, len))
59349+ return ERR_PTR(-EFAULT);
59350+ tmp[len-1] = '\0';
59351+ s_tmp->filename = tmp;
59352+
59353+ if (!strcmp(s_tmp->filename, "/"))
59354+ role->root_label = s_tmp;
59355+
59356+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
59357+ return ERR_PTR(-EFAULT);
59358+
59359+ /* copy user and group transition tables */
59360+
59361+ if (s_tmp->user_trans_num) {
59362+ uid_t *uidlist;
59363+
59364+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
59365+ if (uidlist == NULL)
59366+ return ERR_PTR(-ENOMEM);
59367+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
59368+ return ERR_PTR(-EFAULT);
59369+
59370+ s_tmp->user_transitions = uidlist;
59371+ }
59372+
59373+ if (s_tmp->group_trans_num) {
59374+ gid_t *gidlist;
59375+
59376+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
59377+ if (gidlist == NULL)
59378+ return ERR_PTR(-ENOMEM);
59379+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
59380+ return ERR_PTR(-EFAULT);
59381+
59382+ s_tmp->group_transitions = gidlist;
59383+ }
59384+
59385+ /* set up object hash table */
59386+ num_objs = count_user_objs(ghash.first);
59387+
59388+ s_tmp->obj_hash_size = num_objs;
59389+ s_tmp->obj_hash =
59390+ (struct acl_object_label **)
59391+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
59392+
59393+ if (!s_tmp->obj_hash)
59394+ return ERR_PTR(-ENOMEM);
59395+
59396+ memset(s_tmp->obj_hash, 0,
59397+ s_tmp->obj_hash_size *
59398+ sizeof (struct acl_object_label *));
59399+
59400+ /* add in objects */
59401+ err = copy_user_objs(ghash.first, s_tmp, role);
59402+
59403+ if (err)
59404+ return ERR_PTR(err);
59405+
59406+ /* set pointer for parent subject */
59407+ if (s_tmp->parent_subject) {
59408+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
59409+
59410+ if (IS_ERR(s_tmp2))
59411+ return s_tmp2;
59412+
59413+ s_tmp->parent_subject = s_tmp2;
59414+ }
59415+
59416+ /* add in ip acls */
59417+
59418+ if (!s_tmp->ip_num) {
59419+ s_tmp->ips = NULL;
59420+ goto insert;
59421+ }
59422+
59423+ i_tmp =
59424+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
59425+ sizeof (struct acl_ip_label *));
59426+
59427+ if (!i_tmp)
59428+ return ERR_PTR(-ENOMEM);
59429+
59430+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
59431+ *(i_tmp + i_num) =
59432+ (struct acl_ip_label *)
59433+ acl_alloc(sizeof (struct acl_ip_label));
59434+ if (!*(i_tmp + i_num))
59435+ return ERR_PTR(-ENOMEM);
59436+
59437+ if (copy_from_user
59438+ (&i_utmp2, s_tmp->ips + i_num,
59439+ sizeof (struct acl_ip_label *)))
59440+ return ERR_PTR(-EFAULT);
59441+
59442+ if (copy_from_user
59443+ (*(i_tmp + i_num), i_utmp2,
59444+ sizeof (struct acl_ip_label)))
59445+ return ERR_PTR(-EFAULT);
59446+
59447+ if ((*(i_tmp + i_num))->iface == NULL)
59448+ continue;
59449+
59450+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
59451+ if (!len || len >= IFNAMSIZ)
59452+ return ERR_PTR(-EINVAL);
59453+ tmp = acl_alloc(len);
59454+ if (tmp == NULL)
59455+ return ERR_PTR(-ENOMEM);
59456+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
59457+ return ERR_PTR(-EFAULT);
59458+ (*(i_tmp + i_num))->iface = tmp;
59459+ }
59460+
59461+ s_tmp->ips = i_tmp;
59462+
59463+insert:
59464+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
59465+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
59466+ return ERR_PTR(-ENOMEM);
59467+
59468+ return s_tmp;
59469+}
59470+
59471+static int
59472+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
59473+{
59474+ struct acl_subject_label s_pre;
59475+ struct acl_subject_label * ret;
59476+ int err;
59477+
59478+ while (userp) {
59479+ if (copy_from_user(&s_pre, userp,
59480+ sizeof (struct acl_subject_label)))
59481+ return -EFAULT;
59482+
59483+ ret = do_copy_user_subj(userp, role, NULL);
59484+
59485+ err = PTR_ERR(ret);
59486+ if (IS_ERR(ret))
59487+ return err;
59488+
59489+ insert_acl_subj_label(ret, role);
59490+
59491+ userp = s_pre.prev;
59492+ }
59493+
59494+ return 0;
59495+}
59496+
59497+static int
59498+copy_user_acl(struct gr_arg *arg)
59499+{
59500+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
59501+ struct acl_subject_label *subj_list;
59502+ struct sprole_pw *sptmp;
59503+ struct gr_hash_struct *ghash;
59504+ uid_t *domainlist;
59505+ unsigned int r_num;
59506+ unsigned int len;
59507+ char *tmp;
59508+ int err = 0;
59509+ __u16 i;
59510+ __u32 num_subjs;
59511+
59512+ /* we need a default and kernel role */
59513+ if (arg->role_db.num_roles < 2)
59514+ return -EINVAL;
59515+
59516+ /* copy special role authentication info from userspace */
59517+
59518+ num_sprole_pws = arg->num_sprole_pws;
59519+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
59520+
59521+ if (!acl_special_roles && num_sprole_pws)
59522+ return -ENOMEM;
59523+
59524+ for (i = 0; i < num_sprole_pws; i++) {
59525+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
59526+ if (!sptmp)
59527+ return -ENOMEM;
59528+ if (copy_from_user(sptmp, arg->sprole_pws + i,
59529+ sizeof (struct sprole_pw)))
59530+ return -EFAULT;
59531+
59532+ len = strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
59533+
59534+ if (!len || len >= GR_SPROLE_LEN)
59535+ return -EINVAL;
59536+
59537+ if ((tmp = (char *) acl_alloc(len)) == NULL)
59538+ return -ENOMEM;
59539+
59540+ if (copy_from_user(tmp, sptmp->rolename, len))
59541+ return -EFAULT;
59542+
59543+ tmp[len-1] = '\0';
59544+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59545+ printk(KERN_ALERT "Copying special role %s\n", tmp);
59546+#endif
59547+ sptmp->rolename = tmp;
59548+ acl_special_roles[i] = sptmp;
59549+ }
59550+
59551+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
59552+
59553+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
59554+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
59555+
59556+ if (!r_tmp)
59557+ return -ENOMEM;
59558+
59559+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
59560+ sizeof (struct acl_role_label *)))
59561+ return -EFAULT;
59562+
59563+ if (copy_from_user(r_tmp, r_utmp2,
59564+ sizeof (struct acl_role_label)))
59565+ return -EFAULT;
59566+
59567+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
59568+
59569+ if (!len || len >= PATH_MAX)
59570+ return -EINVAL;
59571+
59572+ if ((tmp = (char *) acl_alloc(len)) == NULL)
59573+ return -ENOMEM;
59574+
59575+ if (copy_from_user(tmp, r_tmp->rolename, len))
59576+ return -EFAULT;
59577+
59578+ tmp[len-1] = '\0';
59579+ r_tmp->rolename = tmp;
59580+
59581+ if (!strcmp(r_tmp->rolename, "default")
59582+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
59583+ default_role = r_tmp;
59584+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
59585+ kernel_role = r_tmp;
59586+ }
59587+
59588+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
59589+ return -ENOMEM;
59590+
59591+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct)))
59592+ return -EFAULT;
59593+
59594+ r_tmp->hash = ghash;
59595+
59596+ num_subjs = count_user_subjs(r_tmp->hash->first);
59597+
59598+ r_tmp->subj_hash_size = num_subjs;
59599+ r_tmp->subj_hash =
59600+ (struct acl_subject_label **)
59601+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
59602+
59603+ if (!r_tmp->subj_hash)
59604+ return -ENOMEM;
59605+
59606+ err = copy_user_allowedips(r_tmp);
59607+ if (err)
59608+ return err;
59609+
59610+ /* copy domain info */
59611+ if (r_tmp->domain_children != NULL) {
59612+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
59613+ if (domainlist == NULL)
59614+ return -ENOMEM;
59615+
59616+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
59617+ return -EFAULT;
59618+
59619+ r_tmp->domain_children = domainlist;
59620+ }
59621+
59622+ err = copy_user_transitions(r_tmp);
59623+ if (err)
59624+ return err;
59625+
59626+ memset(r_tmp->subj_hash, 0,
59627+ r_tmp->subj_hash_size *
59628+ sizeof (struct acl_subject_label *));
59629+
59630+ /* acquire the list of subjects, then NULL out
59631+ the list prior to parsing the subjects for this role,
59632+ as during this parsing the list is replaced with a list
59633+ of *nested* subjects for the role
59634+ */
59635+ subj_list = r_tmp->hash->first;
59636+
59637+ /* set nested subject list to null */
59638+ r_tmp->hash->first = NULL;
59639+
59640+ err = copy_user_subjs(subj_list, r_tmp);
59641+
59642+ if (err)
59643+ return err;
59644+
59645+ insert_acl_role_label(r_tmp);
59646+ }
59647+
59648+ if (default_role == NULL || kernel_role == NULL)
59649+ return -EINVAL;
59650+
59651+ return err;
59652+}
59653+
59654+static int
59655+gracl_init(struct gr_arg *args)
59656+{
59657+ int error = 0;
59658+
59659+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
59660+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
59661+
59662+ if (init_variables(args)) {
59663+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
59664+ error = -ENOMEM;
59665+ free_variables();
59666+ goto out;
59667+ }
59668+
59669+ error = copy_user_acl(args);
59670+ free_init_variables();
59671+ if (error) {
59672+ free_variables();
59673+ goto out;
59674+ }
59675+
59676+ if ((error = gr_set_acls(0))) {
59677+ free_variables();
59678+ goto out;
59679+ }
59680+
59681+ pax_open_kernel();
59682+ gr_status |= GR_READY;
59683+ pax_close_kernel();
59684+
59685+ out:
59686+ return error;
59687+}
59688+
59689+/* derived from glibc fnmatch() 0: match, 1: no match*/
59690+
59691+static int
59692+glob_match(const char *p, const char *n)
59693+{
59694+ char c;
59695+
59696+ while ((c = *p++) != '\0') {
59697+ switch (c) {
59698+ case '?':
59699+ if (*n == '\0')
59700+ return 1;
59701+ else if (*n == '/')
59702+ return 1;
59703+ break;
59704+ case '\\':
59705+ if (*n != c)
59706+ return 1;
59707+ break;
59708+ case '*':
59709+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
59710+ if (*n == '/')
59711+ return 1;
59712+ else if (c == '?') {
59713+ if (*n == '\0')
59714+ return 1;
59715+ else
59716+ ++n;
59717+ }
59718+ }
59719+ if (c == '\0') {
59720+ return 0;
59721+ } else {
59722+ const char *endp;
59723+
59724+ if ((endp = strchr(n, '/')) == NULL)
59725+ endp = n + strlen(n);
59726+
59727+ if (c == '[') {
59728+ for (--p; n < endp; ++n)
59729+ if (!glob_match(p, n))
59730+ return 0;
59731+ } else if (c == '/') {
59732+ while (*n != '\0' && *n != '/')
59733+ ++n;
59734+ if (*n == '/' && !glob_match(p, n + 1))
59735+ return 0;
59736+ } else {
59737+ for (--p; n < endp; ++n)
59738+ if (*n == c && !glob_match(p, n))
59739+ return 0;
59740+ }
59741+
59742+ return 1;
59743+ }
59744+ case '[':
59745+ {
59746+ int not;
59747+ char cold;
59748+
59749+ if (*n == '\0' || *n == '/')
59750+ return 1;
59751+
59752+ not = (*p == '!' || *p == '^');
59753+ if (not)
59754+ ++p;
59755+
59756+ c = *p++;
59757+ for (;;) {
59758+ unsigned char fn = (unsigned char)*n;
59759+
59760+ if (c == '\0')
59761+ return 1;
59762+ else {
59763+ if (c == fn)
59764+ goto matched;
59765+ cold = c;
59766+ c = *p++;
59767+
59768+ if (c == '-' && *p != ']') {
59769+ unsigned char cend = *p++;
59770+
59771+ if (cend == '\0')
59772+ return 1;
59773+
59774+ if (cold <= fn && fn <= cend)
59775+ goto matched;
59776+
59777+ c = *p++;
59778+ }
59779+ }
59780+
59781+ if (c == ']')
59782+ break;
59783+ }
59784+ if (!not)
59785+ return 1;
59786+ break;
59787+ matched:
59788+ while (c != ']') {
59789+ if (c == '\0')
59790+ return 1;
59791+
59792+ c = *p++;
59793+ }
59794+ if (not)
59795+ return 1;
59796+ }
59797+ break;
59798+ default:
59799+ if (c != *n)
59800+ return 1;
59801+ }
59802+
59803+ ++n;
59804+ }
59805+
59806+ if (*n == '\0')
59807+ return 0;
59808+
59809+ if (*n == '/')
59810+ return 0;
59811+
59812+ return 1;
59813+}
59814+
59815+static struct acl_object_label *
59816+chk_glob_label(struct acl_object_label *globbed,
59817+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
59818+{
59819+ struct acl_object_label *tmp;
59820+
59821+ if (*path == NULL)
59822+ *path = gr_to_filename_nolock(dentry, mnt);
59823+
59824+ tmp = globbed;
59825+
59826+ while (tmp) {
59827+ if (!glob_match(tmp->filename, *path))
59828+ return tmp;
59829+ tmp = tmp->next;
59830+ }
59831+
59832+ return NULL;
59833+}
59834+
59835+static struct acl_object_label *
59836+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
59837+ const ino_t curr_ino, const dev_t curr_dev,
59838+ const struct acl_subject_label *subj, char **path, const int checkglob)
59839+{
59840+ struct acl_subject_label *tmpsubj;
59841+ struct acl_object_label *retval;
59842+ struct acl_object_label *retval2;
59843+
59844+ tmpsubj = (struct acl_subject_label *) subj;
59845+ read_lock(&gr_inode_lock);
59846+ do {
59847+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
59848+ if (retval) {
59849+ if (checkglob && retval->globbed) {
59850+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
59851+ if (retval2)
59852+ retval = retval2;
59853+ }
59854+ break;
59855+ }
59856+ } while ((tmpsubj = tmpsubj->parent_subject));
59857+ read_unlock(&gr_inode_lock);
59858+
59859+ return retval;
59860+}
59861+
59862+static __inline__ struct acl_object_label *
59863+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
59864+ struct dentry *curr_dentry,
59865+ const struct acl_subject_label *subj, char **path, const int checkglob)
59866+{
59867+ int newglob = checkglob;
59868+ ino_t inode;
59869+ dev_t device;
59870+
59871+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
59872+ as we don't want a / * rule to match instead of the / object
59873+ don't do this for create lookups that call this function though, since they're looking up
59874+ on the parent and thus need globbing checks on all paths
59875+ */
59876+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
59877+ newglob = GR_NO_GLOB;
59878+
59879+ spin_lock(&curr_dentry->d_lock);
59880+ inode = curr_dentry->d_inode->i_ino;
59881+ device = __get_dev(curr_dentry);
59882+ spin_unlock(&curr_dentry->d_lock);
59883+
59884+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
59885+}
59886+
59887+#ifdef CONFIG_HUGETLBFS
59888+static inline bool
59889+is_hugetlbfs_mnt(const struct vfsmount *mnt)
59890+{
59891+ int i;
59892+ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
59893+ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
59894+ return true;
59895+ }
59896+
59897+ return false;
59898+}
59899+#endif
59900+
59901+static struct acl_object_label *
59902+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
59903+ const struct acl_subject_label *subj, char *path, const int checkglob)
59904+{
59905+ struct dentry *dentry = (struct dentry *) l_dentry;
59906+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
59907+ struct mount *real_mnt = real_mount(mnt);
59908+ struct acl_object_label *retval;
59909+ struct dentry *parent;
59910+
59911+ br_read_lock(&vfsmount_lock);
59912+ write_seqlock(&rename_lock);
59913+
59914+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
59915+#ifdef CONFIG_NET
59916+ mnt == sock_mnt ||
59917+#endif
59918+#ifdef CONFIG_HUGETLBFS
59919+ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
59920+#endif
59921+ /* ignore Eric Biederman */
59922+ IS_PRIVATE(l_dentry->d_inode))) {
59923+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
59924+ goto out;
59925+ }
59926+
59927+ for (;;) {
59928+ if (dentry == real_root.dentry && mnt == real_root.mnt)
59929+ break;
59930+
59931+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
59932+ if (!mnt_has_parent(real_mnt))
59933+ break;
59934+
59935+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
59936+ if (retval != NULL)
59937+ goto out;
59938+
59939+ dentry = real_mnt->mnt_mountpoint;
59940+ real_mnt = real_mnt->mnt_parent;
59941+ mnt = &real_mnt->mnt;
59942+ continue;
59943+ }
59944+
59945+ parent = dentry->d_parent;
59946+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
59947+ if (retval != NULL)
59948+ goto out;
59949+
59950+ dentry = parent;
59951+ }
59952+
59953+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
59954+
59955+ /* real_root is pinned so we don't have to hold a reference */
59956+ if (retval == NULL)
59957+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
59958+out:
59959+ write_sequnlock(&rename_lock);
59960+ br_read_unlock(&vfsmount_lock);
59961+
59962+ BUG_ON(retval == NULL);
59963+
59964+ return retval;
59965+}
59966+
59967+static __inline__ struct acl_object_label *
59968+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
59969+ const struct acl_subject_label *subj)
59970+{
59971+ char *path = NULL;
59972+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
59973+}
59974+
59975+static __inline__ struct acl_object_label *
59976+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
59977+ const struct acl_subject_label *subj)
59978+{
59979+ char *path = NULL;
59980+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
59981+}
59982+
59983+static __inline__ struct acl_object_label *
59984+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
59985+ const struct acl_subject_label *subj, char *path)
59986+{
59987+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
59988+}
59989+
59990+static struct acl_subject_label *
59991+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
59992+ const struct acl_role_label *role)
59993+{
59994+ struct dentry *dentry = (struct dentry *) l_dentry;
59995+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
59996+ struct mount *real_mnt = real_mount(mnt);
59997+ struct acl_subject_label *retval;
59998+ struct dentry *parent;
59999+
60000+ br_read_lock(&vfsmount_lock);
60001+ write_seqlock(&rename_lock);
60002+
60003+ for (;;) {
60004+ if (dentry == real_root.dentry && mnt == real_root.mnt)
60005+ break;
60006+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
60007+ if (!mnt_has_parent(real_mnt))
60008+ break;
60009+
60010+ spin_lock(&dentry->d_lock);
60011+ read_lock(&gr_inode_lock);
60012+ retval =
60013+ lookup_acl_subj_label(dentry->d_inode->i_ino,
60014+ __get_dev(dentry), role);
60015+ read_unlock(&gr_inode_lock);
60016+ spin_unlock(&dentry->d_lock);
60017+ if (retval != NULL)
60018+ goto out;
60019+
60020+ dentry = real_mnt->mnt_mountpoint;
60021+ real_mnt = real_mnt->mnt_parent;
60022+ mnt = &real_mnt->mnt;
60023+ continue;
60024+ }
60025+
60026+ spin_lock(&dentry->d_lock);
60027+ read_lock(&gr_inode_lock);
60028+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
60029+ __get_dev(dentry), role);
60030+ read_unlock(&gr_inode_lock);
60031+ parent = dentry->d_parent;
60032+ spin_unlock(&dentry->d_lock);
60033+
60034+ if (retval != NULL)
60035+ goto out;
60036+
60037+ dentry = parent;
60038+ }
60039+
60040+ spin_lock(&dentry->d_lock);
60041+ read_lock(&gr_inode_lock);
60042+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
60043+ __get_dev(dentry), role);
60044+ read_unlock(&gr_inode_lock);
60045+ spin_unlock(&dentry->d_lock);
60046+
60047+ if (unlikely(retval == NULL)) {
60048+ /* real_root is pinned, we don't need to hold a reference */
60049+ read_lock(&gr_inode_lock);
60050+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
60051+ __get_dev(real_root.dentry), role);
60052+ read_unlock(&gr_inode_lock);
60053+ }
60054+out:
60055+ write_sequnlock(&rename_lock);
60056+ br_read_unlock(&vfsmount_lock);
60057+
60058+ BUG_ON(retval == NULL);
60059+
60060+ return retval;
60061+}
60062+
60063+static void
60064+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
60065+{
60066+ struct task_struct *task = current;
60067+ const struct cred *cred = current_cred();
60068+
60069+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
60070+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
60071+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
60072+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
60073+
60074+ return;
60075+}
60076+
60077+static void
60078+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
60079+{
60080+ struct task_struct *task = current;
60081+ const struct cred *cred = current_cred();
60082+
60083+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
60084+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
60085+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
60086+ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
60087+
60088+ return;
60089+}
60090+
60091+static void
60092+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
60093+{
60094+ struct task_struct *task = current;
60095+ const struct cred *cred = current_cred();
60096+
60097+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
60098+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
60099+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
60100+ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
60101+
60102+ return;
60103+}
60104+
60105+__u32
60106+gr_search_file(const struct dentry * dentry, const __u32 mode,
60107+ const struct vfsmount * mnt)
60108+{
60109+ __u32 retval = mode;
60110+ struct acl_subject_label *curracl;
60111+ struct acl_object_label *currobj;
60112+
60113+ if (unlikely(!(gr_status & GR_READY)))
60114+ return (mode & ~GR_AUDITS);
60115+
60116+ curracl = current->acl;
60117+
60118+ currobj = chk_obj_label(dentry, mnt, curracl);
60119+ retval = currobj->mode & mode;
60120+
60121+ /* if we're opening a specified transfer file for writing
60122+ (e.g. /dev/initctl), then transfer our role to init
60123+ */
60124+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
60125+ current->role->roletype & GR_ROLE_PERSIST)) {
60126+ struct task_struct *task = init_pid_ns.child_reaper;
60127+
60128+ if (task->role != current->role) {
60129+ task->acl_sp_role = 0;
60130+ task->acl_role_id = current->acl_role_id;
60131+ task->role = current->role;
60132+ rcu_read_lock();
60133+ read_lock(&grsec_exec_file_lock);
60134+ gr_apply_subject_to_task(task);
60135+ read_unlock(&grsec_exec_file_lock);
60136+ rcu_read_unlock();
60137+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
60138+ }
60139+ }
60140+
60141+ if (unlikely
60142+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
60143+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
60144+ __u32 new_mode = mode;
60145+
60146+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
60147+
60148+ retval = new_mode;
60149+
60150+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
60151+ new_mode |= GR_INHERIT;
60152+
60153+ if (!(mode & GR_NOLEARN))
60154+ gr_log_learn(dentry, mnt, new_mode);
60155+ }
60156+
60157+ return retval;
60158+}
60159+
60160+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
60161+ const struct dentry *parent,
60162+ const struct vfsmount *mnt)
60163+{
60164+ struct name_entry *match;
60165+ struct acl_object_label *matchpo;
60166+ struct acl_subject_label *curracl;
60167+ char *path;
60168+
60169+ if (unlikely(!(gr_status & GR_READY)))
60170+ return NULL;
60171+
60172+ preempt_disable();
60173+ path = gr_to_filename_rbac(new_dentry, mnt);
60174+ match = lookup_name_entry_create(path);
60175+
60176+ curracl = current->acl;
60177+
60178+ if (match) {
60179+ read_lock(&gr_inode_lock);
60180+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
60181+ read_unlock(&gr_inode_lock);
60182+
60183+ if (matchpo) {
60184+ preempt_enable();
60185+ return matchpo;
60186+ }
60187+ }
60188+
60189+ // lookup parent
60190+
60191+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
60192+
60193+ preempt_enable();
60194+ return matchpo;
60195+}
60196+
60197+__u32
60198+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
60199+ const struct vfsmount * mnt, const __u32 mode)
60200+{
60201+ struct acl_object_label *matchpo;
60202+ __u32 retval;
60203+
60204+ if (unlikely(!(gr_status & GR_READY)))
60205+ return (mode & ~GR_AUDITS);
60206+
60207+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
60208+
60209+ retval = matchpo->mode & mode;
60210+
60211+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
60212+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
60213+ __u32 new_mode = mode;
60214+
60215+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
60216+
60217+ gr_log_learn(new_dentry, mnt, new_mode);
60218+ return new_mode;
60219+ }
60220+
60221+ return retval;
60222+}
60223+
60224+__u32
60225+gr_check_link(const struct dentry * new_dentry,
60226+ const struct dentry * parent_dentry,
60227+ const struct vfsmount * parent_mnt,
60228+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
60229+{
60230+ struct acl_object_label *obj;
60231+ __u32 oldmode, newmode;
60232+ __u32 needmode;
60233+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
60234+ GR_DELETE | GR_INHERIT;
60235+
60236+ if (unlikely(!(gr_status & GR_READY)))
60237+ return (GR_CREATE | GR_LINK);
60238+
60239+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
60240+ oldmode = obj->mode;
60241+
60242+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
60243+ newmode = obj->mode;
60244+
60245+ needmode = newmode & checkmodes;
60246+
60247+ // old name for hardlink must have at least the permissions of the new name
60248+ if ((oldmode & needmode) != needmode)
60249+ goto bad;
60250+
60251+ // if old name had restrictions/auditing, make sure the new name does as well
60252+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
60253+
60254+ // don't allow hardlinking of suid/sgid/fcapped files without permission
60255+ if (is_privileged_binary(old_dentry))
60256+ needmode |= GR_SETID;
60257+
60258+ if ((newmode & needmode) != needmode)
60259+ goto bad;
60260+
60261+ // enforce minimum permissions
60262+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
60263+ return newmode;
60264+bad:
60265+ needmode = oldmode;
60266+ if (is_privileged_binary(old_dentry))
60267+ needmode |= GR_SETID;
60268+
60269+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
60270+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
60271+ return (GR_CREATE | GR_LINK);
60272+ } else if (newmode & GR_SUPPRESS)
60273+ return GR_SUPPRESS;
60274+ else
60275+ return 0;
60276+}
60277+
60278+int
60279+gr_check_hidden_task(const struct task_struct *task)
60280+{
60281+ if (unlikely(!(gr_status & GR_READY)))
60282+ return 0;
60283+
60284+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
60285+ return 1;
60286+
60287+ return 0;
60288+}
60289+
60290+int
60291+gr_check_protected_task(const struct task_struct *task)
60292+{
60293+ if (unlikely(!(gr_status & GR_READY) || !task))
60294+ return 0;
60295+
60296+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
60297+ task->acl != current->acl)
60298+ return 1;
60299+
60300+ return 0;
60301+}
60302+
60303+int
60304+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
60305+{
60306+ struct task_struct *p;
60307+ int ret = 0;
60308+
60309+ if (unlikely(!(gr_status & GR_READY) || !pid))
60310+ return ret;
60311+
60312+ read_lock(&tasklist_lock);
60313+ do_each_pid_task(pid, type, p) {
60314+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
60315+ p->acl != current->acl) {
60316+ ret = 1;
60317+ goto out;
60318+ }
60319+ } while_each_pid_task(pid, type, p);
60320+out:
60321+ read_unlock(&tasklist_lock);
60322+
60323+ return ret;
60324+}
60325+
60326+void
60327+gr_copy_label(struct task_struct *tsk)
60328+{
60329+ tsk->signal->used_accept = 0;
60330+ tsk->acl_sp_role = 0;
60331+ tsk->acl_role_id = current->acl_role_id;
60332+ tsk->acl = current->acl;
60333+ tsk->role = current->role;
60334+ tsk->signal->curr_ip = current->signal->curr_ip;
60335+ tsk->signal->saved_ip = current->signal->saved_ip;
60336+ if (current->exec_file)
60337+ get_file(current->exec_file);
60338+ tsk->exec_file = current->exec_file;
60339+ tsk->is_writable = current->is_writable;
60340+ if (unlikely(current->signal->used_accept)) {
60341+ current->signal->curr_ip = 0;
60342+ current->signal->saved_ip = 0;
60343+ }
60344+
60345+ return;
60346+}
60347+
60348+static void
60349+gr_set_proc_res(struct task_struct *task)
60350+{
60351+ struct acl_subject_label *proc;
60352+ unsigned short i;
60353+
60354+ proc = task->acl;
60355+
60356+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
60357+ return;
60358+
60359+ for (i = 0; i < RLIM_NLIMITS; i++) {
60360+ if (!(proc->resmask & (1U << i)))
60361+ continue;
60362+
60363+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
60364+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
60365+
60366+ if (i == RLIMIT_CPU)
60367+ update_rlimit_cpu(task, proc->res[i].rlim_cur);
60368+ }
60369+
60370+ return;
60371+}
60372+
60373+extern int __gr_process_user_ban(struct user_struct *user);
60374+
60375+int
60376+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
60377+{
60378+ unsigned int i;
60379+ __u16 num;
60380+ uid_t *uidlist;
60381+ uid_t curuid;
60382+ int realok = 0;
60383+ int effectiveok = 0;
60384+ int fsok = 0;
60385+ uid_t globalreal, globaleffective, globalfs;
60386+
60387+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
60388+ struct user_struct *user;
60389+
60390+ if (!uid_valid(real))
60391+ goto skipit;
60392+
60393+ /* find user based on global namespace */
60394+
60395+ globalreal = GR_GLOBAL_UID(real);
60396+
60397+ user = find_user(make_kuid(&init_user_ns, globalreal));
60398+ if (user == NULL)
60399+ goto skipit;
60400+
60401+ if (__gr_process_user_ban(user)) {
60402+ /* for find_user */
60403+ free_uid(user);
60404+ return 1;
60405+ }
60406+
60407+ /* for find_user */
60408+ free_uid(user);
60409+
60410+skipit:
60411+#endif
60412+
60413+ if (unlikely(!(gr_status & GR_READY)))
60414+ return 0;
60415+
60416+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
60417+ gr_log_learn_uid_change(real, effective, fs);
60418+
60419+ num = current->acl->user_trans_num;
60420+ uidlist = current->acl->user_transitions;
60421+
60422+ if (uidlist == NULL)
60423+ return 0;
60424+
60425+ if (!uid_valid(real)) {
60426+ realok = 1;
60427+ globalreal = (uid_t)-1;
60428+ } else {
60429+ globalreal = GR_GLOBAL_UID(real);
60430+ }
60431+ if (!uid_valid(effective)) {
60432+ effectiveok = 1;
60433+ globaleffective = (uid_t)-1;
60434+ } else {
60435+ globaleffective = GR_GLOBAL_UID(effective);
60436+ }
60437+ if (!uid_valid(fs)) {
60438+ fsok = 1;
60439+ globalfs = (uid_t)-1;
60440+ } else {
60441+ globalfs = GR_GLOBAL_UID(fs);
60442+ }
60443+
60444+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
60445+ for (i = 0; i < num; i++) {
60446+ curuid = uidlist[i];
60447+ if (globalreal == curuid)
60448+ realok = 1;
60449+ if (globaleffective == curuid)
60450+ effectiveok = 1;
60451+ if (globalfs == curuid)
60452+ fsok = 1;
60453+ }
60454+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
60455+ for (i = 0; i < num; i++) {
60456+ curuid = uidlist[i];
60457+ if (globalreal == curuid)
60458+ break;
60459+ if (globaleffective == curuid)
60460+ break;
60461+ if (globalfs == curuid)
60462+ break;
60463+ }
60464+ /* not in deny list */
60465+ if (i == num) {
60466+ realok = 1;
60467+ effectiveok = 1;
60468+ fsok = 1;
60469+ }
60470+ }
60471+
60472+ if (realok && effectiveok && fsok)
60473+ return 0;
60474+ else {
60475+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
60476+ return 1;
60477+ }
60478+}
60479+
60480+int
60481+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
60482+{
60483+ unsigned int i;
60484+ __u16 num;
60485+ gid_t *gidlist;
60486+ gid_t curgid;
60487+ int realok = 0;
60488+ int effectiveok = 0;
60489+ int fsok = 0;
60490+ gid_t globalreal, globaleffective, globalfs;
60491+
60492+ if (unlikely(!(gr_status & GR_READY)))
60493+ return 0;
60494+
60495+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
60496+ gr_log_learn_gid_change(real, effective, fs);
60497+
60498+ num = current->acl->group_trans_num;
60499+ gidlist = current->acl->group_transitions;
60500+
60501+ if (gidlist == NULL)
60502+ return 0;
60503+
60504+ if (!gid_valid(real)) {
60505+ realok = 1;
60506+ globalreal = (gid_t)-1;
60507+ } else {
60508+ globalreal = GR_GLOBAL_GID(real);
60509+ }
60510+ if (!gid_valid(effective)) {
60511+ effectiveok = 1;
60512+ globaleffective = (gid_t)-1;
60513+ } else {
60514+ globaleffective = GR_GLOBAL_GID(effective);
60515+ }
60516+ if (!gid_valid(fs)) {
60517+ fsok = 1;
60518+ globalfs = (gid_t)-1;
60519+ } else {
60520+ globalfs = GR_GLOBAL_GID(fs);
60521+ }
60522+
60523+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
60524+ for (i = 0; i < num; i++) {
60525+ curgid = gidlist[i];
60526+ if (globalreal == curgid)
60527+ realok = 1;
60528+ if (globaleffective == curgid)
60529+ effectiveok = 1;
60530+ if (globalfs == curgid)
60531+ fsok = 1;
60532+ }
60533+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
60534+ for (i = 0; i < num; i++) {
60535+ curgid = gidlist[i];
60536+ if (globalreal == curgid)
60537+ break;
60538+ if (globaleffective == curgid)
60539+ break;
60540+ if (globalfs == curgid)
60541+ break;
60542+ }
60543+ /* not in deny list */
60544+ if (i == num) {
60545+ realok = 1;
60546+ effectiveok = 1;
60547+ fsok = 1;
60548+ }
60549+ }
60550+
60551+ if (realok && effectiveok && fsok)
60552+ return 0;
60553+ else {
60554+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
60555+ return 1;
60556+ }
60557+}
60558+
60559+extern int gr_acl_is_capable(const int cap);
60560+
60561+void
60562+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
60563+{
60564+ struct acl_role_label *role = task->role;
60565+ struct acl_subject_label *subj = NULL;
60566+ struct acl_object_label *obj;
60567+ struct file *filp;
60568+ uid_t uid;
60569+ gid_t gid;
60570+
60571+ if (unlikely(!(gr_status & GR_READY)))
60572+ return;
60573+
60574+ uid = GR_GLOBAL_UID(kuid);
60575+ gid = GR_GLOBAL_GID(kgid);
60576+
60577+ filp = task->exec_file;
60578+
60579+ /* kernel process, we'll give them the kernel role */
60580+ if (unlikely(!filp)) {
60581+ task->role = kernel_role;
60582+ task->acl = kernel_role->root_label;
60583+ return;
60584+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
60585+ role = lookup_acl_role_label(task, uid, gid);
60586+
60587+ /* don't change the role if we're not a privileged process */
60588+ if (role && task->role != role &&
60589+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
60590+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
60591+ return;
60592+
60593+ /* perform subject lookup in possibly new role
60594+ we can use this result below in the case where role == task->role
60595+ */
60596+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
60597+
60598+ /* if we changed uid/gid, but result in the same role
60599+ and are using inheritance, don't lose the inherited subject
60600+ if current subject is other than what normal lookup
60601+ would result in, we arrived via inheritance, don't
60602+ lose subject
60603+ */
60604+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
60605+ (subj == task->acl)))
60606+ task->acl = subj;
60607+
60608+ task->role = role;
60609+
60610+ task->is_writable = 0;
60611+
60612+ /* ignore additional mmap checks for processes that are writable
60613+ by the default ACL */
60614+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
60615+ if (unlikely(obj->mode & GR_WRITE))
60616+ task->is_writable = 1;
60617+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
60618+ if (unlikely(obj->mode & GR_WRITE))
60619+ task->is_writable = 1;
60620+
60621+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
60622+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
60623+#endif
60624+
60625+ gr_set_proc_res(task);
60626+
60627+ return;
60628+}
60629+
60630+int
60631+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
60632+ const int unsafe_flags)
60633+{
60634+ struct task_struct *task = current;
60635+ struct acl_subject_label *newacl;
60636+ struct acl_object_label *obj;
60637+ __u32 retmode;
60638+
60639+ if (unlikely(!(gr_status & GR_READY)))
60640+ return 0;
60641+
60642+ newacl = chk_subj_label(dentry, mnt, task->role);
60643+
60644+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
60645+ did an exec
60646+ */
60647+ rcu_read_lock();
60648+ read_lock(&tasklist_lock);
60649+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
60650+ (task->parent->acl->mode & GR_POVERRIDE))) {
60651+ read_unlock(&tasklist_lock);
60652+ rcu_read_unlock();
60653+ goto skip_check;
60654+ }
60655+ read_unlock(&tasklist_lock);
60656+ rcu_read_unlock();
60657+
60658+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
60659+ !(task->role->roletype & GR_ROLE_GOD) &&
60660+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
60661+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
60662+ if (unsafe_flags & LSM_UNSAFE_SHARE)
60663+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
60664+ else
60665+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
60666+ return -EACCES;
60667+ }
60668+
60669+skip_check:
60670+
60671+ obj = chk_obj_label(dentry, mnt, task->acl);
60672+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
60673+
60674+ if (!(task->acl->mode & GR_INHERITLEARN) &&
60675+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
60676+ if (obj->nested)
60677+ task->acl = obj->nested;
60678+ else
60679+ task->acl = newacl;
60680+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
60681+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
60682+
60683+ task->is_writable = 0;
60684+
60685+ /* ignore additional mmap checks for processes that are writable
60686+ by the default ACL */
60687+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
60688+ if (unlikely(obj->mode & GR_WRITE))
60689+ task->is_writable = 1;
60690+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
60691+ if (unlikely(obj->mode & GR_WRITE))
60692+ task->is_writable = 1;
60693+
60694+ gr_set_proc_res(task);
60695+
60696+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
60697+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
60698+#endif
60699+ return 0;
60700+}
60701+
60702+/* always called with valid inodev ptr */
60703+static void
60704+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
60705+{
60706+ struct acl_object_label *matchpo;
60707+ struct acl_subject_label *matchps;
60708+ struct acl_subject_label *subj;
60709+ struct acl_role_label *role;
60710+ unsigned int x;
60711+
60712+ FOR_EACH_ROLE_START(role)
60713+ FOR_EACH_SUBJECT_START(role, subj, x)
60714+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
60715+ matchpo->mode |= GR_DELETED;
60716+ FOR_EACH_SUBJECT_END(subj,x)
60717+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
60718+ /* nested subjects aren't in the role's subj_hash table */
60719+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
60720+ matchpo->mode |= GR_DELETED;
60721+ FOR_EACH_NESTED_SUBJECT_END(subj)
60722+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
60723+ matchps->mode |= GR_DELETED;
60724+ FOR_EACH_ROLE_END(role)
60725+
60726+ inodev->nentry->deleted = 1;
60727+
60728+ return;
60729+}
60730+
60731+void
60732+gr_handle_delete(const ino_t ino, const dev_t dev)
60733+{
60734+ struct inodev_entry *inodev;
60735+
60736+ if (unlikely(!(gr_status & GR_READY)))
60737+ return;
60738+
60739+ write_lock(&gr_inode_lock);
60740+ inodev = lookup_inodev_entry(ino, dev);
60741+ if (inodev != NULL)
60742+ do_handle_delete(inodev, ino, dev);
60743+ write_unlock(&gr_inode_lock);
60744+
60745+ return;
60746+}
60747+
60748+static void
60749+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
60750+ const ino_t newinode, const dev_t newdevice,
60751+ struct acl_subject_label *subj)
60752+{
60753+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
60754+ struct acl_object_label *match;
60755+
60756+ match = subj->obj_hash[index];
60757+
60758+ while (match && (match->inode != oldinode ||
60759+ match->device != olddevice ||
60760+ !(match->mode & GR_DELETED)))
60761+ match = match->next;
60762+
60763+ if (match && (match->inode == oldinode)
60764+ && (match->device == olddevice)
60765+ && (match->mode & GR_DELETED)) {
60766+ if (match->prev == NULL) {
60767+ subj->obj_hash[index] = match->next;
60768+ if (match->next != NULL)
60769+ match->next->prev = NULL;
60770+ } else {
60771+ match->prev->next = match->next;
60772+ if (match->next != NULL)
60773+ match->next->prev = match->prev;
60774+ }
60775+ match->prev = NULL;
60776+ match->next = NULL;
60777+ match->inode = newinode;
60778+ match->device = newdevice;
60779+ match->mode &= ~GR_DELETED;
60780+
60781+ insert_acl_obj_label(match, subj);
60782+ }
60783+
60784+ return;
60785+}
60786+
60787+static void
60788+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
60789+ const ino_t newinode, const dev_t newdevice,
60790+ struct acl_role_label *role)
60791+{
60792+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
60793+ struct acl_subject_label *match;
60794+
60795+ match = role->subj_hash[index];
60796+
60797+ while (match && (match->inode != oldinode ||
60798+ match->device != olddevice ||
60799+ !(match->mode & GR_DELETED)))
60800+ match = match->next;
60801+
60802+ if (match && (match->inode == oldinode)
60803+ && (match->device == olddevice)
60804+ && (match->mode & GR_DELETED)) {
60805+ if (match->prev == NULL) {
60806+ role->subj_hash[index] = match->next;
60807+ if (match->next != NULL)
60808+ match->next->prev = NULL;
60809+ } else {
60810+ match->prev->next = match->next;
60811+ if (match->next != NULL)
60812+ match->next->prev = match->prev;
60813+ }
60814+ match->prev = NULL;
60815+ match->next = NULL;
60816+ match->inode = newinode;
60817+ match->device = newdevice;
60818+ match->mode &= ~GR_DELETED;
60819+
60820+ insert_acl_subj_label(match, role);
60821+ }
60822+
60823+ return;
60824+}
60825+
60826+static void
60827+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
60828+ const ino_t newinode, const dev_t newdevice)
60829+{
60830+ unsigned int index = gr_fhash(oldinode, olddevice, inodev_set.i_size);
60831+ struct inodev_entry *match;
60832+
60833+ match = inodev_set.i_hash[index];
60834+
60835+ while (match && (match->nentry->inode != oldinode ||
60836+ match->nentry->device != olddevice || !match->nentry->deleted))
60837+ match = match->next;
60838+
60839+ if (match && (match->nentry->inode == oldinode)
60840+ && (match->nentry->device == olddevice) &&
60841+ match->nentry->deleted) {
60842+ if (match->prev == NULL) {
60843+ inodev_set.i_hash[index] = match->next;
60844+ if (match->next != NULL)
60845+ match->next->prev = NULL;
60846+ } else {
60847+ match->prev->next = match->next;
60848+ if (match->next != NULL)
60849+ match->next->prev = match->prev;
60850+ }
60851+ match->prev = NULL;
60852+ match->next = NULL;
60853+ match->nentry->inode = newinode;
60854+ match->nentry->device = newdevice;
60855+ match->nentry->deleted = 0;
60856+
60857+ insert_inodev_entry(match);
60858+ }
60859+
60860+ return;
60861+}
60862+
60863+static void
60864+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
60865+{
60866+ struct acl_subject_label *subj;
60867+ struct acl_role_label *role;
60868+ unsigned int x;
60869+
60870+ FOR_EACH_ROLE_START(role)
60871+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
60872+
60873+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
60874+ if ((subj->inode == ino) && (subj->device == dev)) {
60875+ subj->inode = ino;
60876+ subj->device = dev;
60877+ }
60878+ /* nested subjects aren't in the role's subj_hash table */
60879+ update_acl_obj_label(matchn->inode, matchn->device,
60880+ ino, dev, subj);
60881+ FOR_EACH_NESTED_SUBJECT_END(subj)
60882+ FOR_EACH_SUBJECT_START(role, subj, x)
60883+ update_acl_obj_label(matchn->inode, matchn->device,
60884+ ino, dev, subj);
60885+ FOR_EACH_SUBJECT_END(subj,x)
60886+ FOR_EACH_ROLE_END(role)
60887+
60888+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
60889+
60890+ return;
60891+}
60892+
60893+static void
60894+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
60895+ const struct vfsmount *mnt)
60896+{
60897+ ino_t ino = dentry->d_inode->i_ino;
60898+ dev_t dev = __get_dev(dentry);
60899+
60900+ __do_handle_create(matchn, ino, dev);
60901+
60902+ return;
60903+}
60904+
60905+void
60906+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
60907+{
60908+ struct name_entry *matchn;
60909+
60910+ if (unlikely(!(gr_status & GR_READY)))
60911+ return;
60912+
60913+ preempt_disable();
60914+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
60915+
60916+ if (unlikely((unsigned long)matchn)) {
60917+ write_lock(&gr_inode_lock);
60918+ do_handle_create(matchn, dentry, mnt);
60919+ write_unlock(&gr_inode_lock);
60920+ }
60921+ preempt_enable();
60922+
60923+ return;
60924+}
60925+
60926+void
60927+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
60928+{
60929+ struct name_entry *matchn;
60930+
60931+ if (unlikely(!(gr_status & GR_READY)))
60932+ return;
60933+
60934+ preempt_disable();
60935+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
60936+
60937+ if (unlikely((unsigned long)matchn)) {
60938+ write_lock(&gr_inode_lock);
60939+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
60940+ write_unlock(&gr_inode_lock);
60941+ }
60942+ preempt_enable();
60943+
60944+ return;
60945+}
60946+
60947+void
60948+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
60949+ struct dentry *old_dentry,
60950+ struct dentry *new_dentry,
60951+ struct vfsmount *mnt, const __u8 replace)
60952+{
60953+ struct name_entry *matchn;
60954+ struct inodev_entry *inodev;
60955+ struct inode *inode = new_dentry->d_inode;
60956+ ino_t old_ino = old_dentry->d_inode->i_ino;
60957+ dev_t old_dev = __get_dev(old_dentry);
60958+
60959+ /* vfs_rename swaps the name and parent link for old_dentry and
60960+ new_dentry
60961+ at this point, old_dentry has the new name, parent link, and inode
60962+ for the renamed file
60963+ if a file is being replaced by a rename, new_dentry has the inode
60964+ and name for the replaced file
60965+ */
60966+
60967+ if (unlikely(!(gr_status & GR_READY)))
60968+ return;
60969+
60970+ preempt_disable();
60971+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
60972+
60973+ /* we wouldn't have to check d_inode if it weren't for
60974+ NFS silly-renaming
60975+ */
60976+
60977+ write_lock(&gr_inode_lock);
60978+ if (unlikely(replace && inode)) {
60979+ ino_t new_ino = inode->i_ino;
60980+ dev_t new_dev = __get_dev(new_dentry);
60981+
60982+ inodev = lookup_inodev_entry(new_ino, new_dev);
60983+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
60984+ do_handle_delete(inodev, new_ino, new_dev);
60985+ }
60986+
60987+ inodev = lookup_inodev_entry(old_ino, old_dev);
60988+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
60989+ do_handle_delete(inodev, old_ino, old_dev);
60990+
60991+ if (unlikely((unsigned long)matchn))
60992+ do_handle_create(matchn, old_dentry, mnt);
60993+
60994+ write_unlock(&gr_inode_lock);
60995+ preempt_enable();
60996+
60997+ return;
60998+}
60999+
61000+static int
61001+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
61002+ unsigned char **sum)
61003+{
61004+ struct acl_role_label *r;
61005+ struct role_allowed_ip *ipp;
61006+ struct role_transition *trans;
61007+ unsigned int i;
61008+ int found = 0;
61009+ u32 curr_ip = current->signal->curr_ip;
61010+
61011+ current->signal->saved_ip = curr_ip;
61012+
61013+ /* check transition table */
61014+
61015+ for (trans = current->role->transitions; trans; trans = trans->next) {
61016+ if (!strcmp(rolename, trans->rolename)) {
61017+ found = 1;
61018+ break;
61019+ }
61020+ }
61021+
61022+ if (!found)
61023+ return 0;
61024+
61025+ /* handle special roles that do not require authentication
61026+ and check ip */
61027+
61028+ FOR_EACH_ROLE_START(r)
61029+ if (!strcmp(rolename, r->rolename) &&
61030+ (r->roletype & GR_ROLE_SPECIAL)) {
61031+ found = 0;
61032+ if (r->allowed_ips != NULL) {
61033+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
61034+ if ((ntohl(curr_ip) & ipp->netmask) ==
61035+ (ntohl(ipp->addr) & ipp->netmask))
61036+ found = 1;
61037+ }
61038+ } else
61039+ found = 2;
61040+ if (!found)
61041+ return 0;
61042+
61043+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
61044+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
61045+ *salt = NULL;
61046+ *sum = NULL;
61047+ return 1;
61048+ }
61049+ }
61050+ FOR_EACH_ROLE_END(r)
61051+
61052+ for (i = 0; i < num_sprole_pws; i++) {
61053+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
61054+ *salt = acl_special_roles[i]->salt;
61055+ *sum = acl_special_roles[i]->sum;
61056+ return 1;
61057+ }
61058+ }
61059+
61060+ return 0;
61061+}
61062+
61063+static void
61064+assign_special_role(char *rolename)
61065+{
61066+ struct acl_object_label *obj;
61067+ struct acl_role_label *r;
61068+ struct acl_role_label *assigned = NULL;
61069+ struct task_struct *tsk;
61070+ struct file *filp;
61071+
61072+ FOR_EACH_ROLE_START(r)
61073+ if (!strcmp(rolename, r->rolename) &&
61074+ (r->roletype & GR_ROLE_SPECIAL)) {
61075+ assigned = r;
61076+ break;
61077+ }
61078+ FOR_EACH_ROLE_END(r)
61079+
61080+ if (!assigned)
61081+ return;
61082+
61083+ read_lock(&tasklist_lock);
61084+ read_lock(&grsec_exec_file_lock);
61085+
61086+ tsk = current->real_parent;
61087+ if (tsk == NULL)
61088+ goto out_unlock;
61089+
61090+ filp = tsk->exec_file;
61091+ if (filp == NULL)
61092+ goto out_unlock;
61093+
61094+ tsk->is_writable = 0;
61095+
61096+ tsk->acl_sp_role = 1;
61097+ tsk->acl_role_id = ++acl_sp_role_value;
61098+ tsk->role = assigned;
61099+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
61100+
61101+ /* ignore additional mmap checks for processes that are writable
61102+ by the default ACL */
61103+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
61104+ if (unlikely(obj->mode & GR_WRITE))
61105+ tsk->is_writable = 1;
61106+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
61107+ if (unlikely(obj->mode & GR_WRITE))
61108+ tsk->is_writable = 1;
61109+
61110+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
61111+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
61112+#endif
61113+
61114+out_unlock:
61115+ read_unlock(&grsec_exec_file_lock);
61116+ read_unlock(&tasklist_lock);
61117+ return;
61118+}
61119+
61120+int gr_check_secure_terminal(struct task_struct *task)
61121+{
61122+ struct task_struct *p, *p2, *p3;
61123+ struct files_struct *files;
61124+ struct fdtable *fdt;
61125+ struct file *our_file = NULL, *file;
61126+ int i;
61127+
61128+ if (task->signal->tty == NULL)
61129+ return 1;
61130+
61131+ files = get_files_struct(task);
61132+ if (files != NULL) {
61133+ rcu_read_lock();
61134+ fdt = files_fdtable(files);
61135+ for (i=0; i < fdt->max_fds; i++) {
61136+ file = fcheck_files(files, i);
61137+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
61138+ get_file(file);
61139+ our_file = file;
61140+ }
61141+ }
61142+ rcu_read_unlock();
61143+ put_files_struct(files);
61144+ }
61145+
61146+ if (our_file == NULL)
61147+ return 1;
61148+
61149+ read_lock(&tasklist_lock);
61150+ do_each_thread(p2, p) {
61151+ files = get_files_struct(p);
61152+ if (files == NULL ||
61153+ (p->signal && p->signal->tty == task->signal->tty)) {
61154+ if (files != NULL)
61155+ put_files_struct(files);
61156+ continue;
61157+ }
61158+ rcu_read_lock();
61159+ fdt = files_fdtable(files);
61160+ for (i=0; i < fdt->max_fds; i++) {
61161+ file = fcheck_files(files, i);
61162+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
61163+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
61164+ p3 = task;
61165+ while (task_pid_nr(p3) > 0) {
61166+ if (p3 == p)
61167+ break;
61168+ p3 = p3->real_parent;
61169+ }
61170+ if (p3 == p)
61171+ break;
61172+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
61173+ gr_handle_alertkill(p);
61174+ rcu_read_unlock();
61175+ put_files_struct(files);
61176+ read_unlock(&tasklist_lock);
61177+ fput(our_file);
61178+ return 0;
61179+ }
61180+ }
61181+ rcu_read_unlock();
61182+ put_files_struct(files);
61183+ } while_each_thread(p2, p);
61184+ read_unlock(&tasklist_lock);
61185+
61186+ fput(our_file);
61187+ return 1;
61188+}
61189+
61190+static int gr_rbac_disable(void *unused)
61191+{
61192+ pax_open_kernel();
61193+ gr_status &= ~GR_READY;
61194+ pax_close_kernel();
61195+
61196+ return 0;
61197+}
61198+
61199+ssize_t
61200+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
61201+{
61202+ struct gr_arg_wrapper uwrap;
61203+ unsigned char *sprole_salt = NULL;
61204+ unsigned char *sprole_sum = NULL;
61205+ int error = sizeof (struct gr_arg_wrapper);
61206+ int error2 = 0;
61207+
61208+ mutex_lock(&gr_dev_mutex);
61209+
61210+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
61211+ error = -EPERM;
61212+ goto out;
61213+ }
61214+
61215+ if (count != sizeof (struct gr_arg_wrapper)) {
61216+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
61217+ error = -EINVAL;
61218+ goto out;
61219+ }
61220+
61221+
61222+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
61223+ gr_auth_expires = 0;
61224+ gr_auth_attempts = 0;
61225+ }
61226+
61227+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
61228+ error = -EFAULT;
61229+ goto out;
61230+ }
61231+
61232+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
61233+ error = -EINVAL;
61234+ goto out;
61235+ }
61236+
61237+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
61238+ error = -EFAULT;
61239+ goto out;
61240+ }
61241+
61242+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
61243+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
61244+ time_after(gr_auth_expires, get_seconds())) {
61245+ error = -EBUSY;
61246+ goto out;
61247+ }
61248+
61249+ /* if non-root trying to do anything other than use a special role,
61250+ do not attempt authentication, do not count towards authentication
61251+ locking
61252+ */
61253+
61254+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
61255+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
61256+ gr_is_global_nonroot(current_uid())) {
61257+ error = -EPERM;
61258+ goto out;
61259+ }
61260+
61261+ /* ensure pw and special role name are null terminated */
61262+
61263+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
61264+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
61265+
61266+ /* Okay.
61267+ * We have our enough of the argument structure..(we have yet
61268+ * to copy_from_user the tables themselves) . Copy the tables
61269+ * only if we need them, i.e. for loading operations. */
61270+
61271+ switch (gr_usermode->mode) {
61272+ case GR_STATUS:
61273+ if (gr_status & GR_READY) {
61274+ error = 1;
61275+ if (!gr_check_secure_terminal(current))
61276+ error = 3;
61277+ } else
61278+ error = 2;
61279+ goto out;
61280+ case GR_SHUTDOWN:
61281+ if ((gr_status & GR_READY)
61282+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
61283+ stop_machine(gr_rbac_disable, NULL, NULL);
61284+ free_variables();
61285+ memset(gr_usermode, 0, sizeof (struct gr_arg));
61286+ memset(gr_system_salt, 0, GR_SALT_LEN);
61287+ memset(gr_system_sum, 0, GR_SHA_LEN);
61288+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
61289+ } else if (gr_status & GR_READY) {
61290+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
61291+ error = -EPERM;
61292+ } else {
61293+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
61294+ error = -EAGAIN;
61295+ }
61296+ break;
61297+ case GR_ENABLE:
61298+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
61299+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
61300+ else {
61301+ if (gr_status & GR_READY)
61302+ error = -EAGAIN;
61303+ else
61304+ error = error2;
61305+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
61306+ }
61307+ break;
61308+ case GR_RELOAD:
61309+ if (!(gr_status & GR_READY)) {
61310+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
61311+ error = -EAGAIN;
61312+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
61313+ stop_machine(gr_rbac_disable, NULL, NULL);
61314+ free_variables();
61315+ error2 = gracl_init(gr_usermode);
61316+ if (!error2)
61317+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
61318+ else {
61319+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
61320+ error = error2;
61321+ }
61322+ } else {
61323+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
61324+ error = -EPERM;
61325+ }
61326+ break;
61327+ case GR_SEGVMOD:
61328+ if (unlikely(!(gr_status & GR_READY))) {
61329+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
61330+ error = -EAGAIN;
61331+ break;
61332+ }
61333+
61334+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
61335+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
61336+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
61337+ struct acl_subject_label *segvacl;
61338+ segvacl =
61339+ lookup_acl_subj_label(gr_usermode->segv_inode,
61340+ gr_usermode->segv_device,
61341+ current->role);
61342+ if (segvacl) {
61343+ segvacl->crashes = 0;
61344+ segvacl->expires = 0;
61345+ }
61346+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
61347+ gr_remove_uid(gr_usermode->segv_uid);
61348+ }
61349+ } else {
61350+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
61351+ error = -EPERM;
61352+ }
61353+ break;
61354+ case GR_SPROLE:
61355+ case GR_SPROLEPAM:
61356+ if (unlikely(!(gr_status & GR_READY))) {
61357+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
61358+ error = -EAGAIN;
61359+ break;
61360+ }
61361+
61362+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
61363+ current->role->expires = 0;
61364+ current->role->auth_attempts = 0;
61365+ }
61366+
61367+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
61368+ time_after(current->role->expires, get_seconds())) {
61369+ error = -EBUSY;
61370+ goto out;
61371+ }
61372+
61373+ if (lookup_special_role_auth
61374+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
61375+ && ((!sprole_salt && !sprole_sum)
61376+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
61377+ char *p = "";
61378+ assign_special_role(gr_usermode->sp_role);
61379+ read_lock(&tasklist_lock);
61380+ if (current->real_parent)
61381+ p = current->real_parent->role->rolename;
61382+ read_unlock(&tasklist_lock);
61383+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
61384+ p, acl_sp_role_value);
61385+ } else {
61386+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
61387+ error = -EPERM;
61388+ if(!(current->role->auth_attempts++))
61389+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
61390+
61391+ goto out;
61392+ }
61393+ break;
61394+ case GR_UNSPROLE:
61395+ if (unlikely(!(gr_status & GR_READY))) {
61396+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
61397+ error = -EAGAIN;
61398+ break;
61399+ }
61400+
61401+ if (current->role->roletype & GR_ROLE_SPECIAL) {
61402+ char *p = "";
61403+ int i = 0;
61404+
61405+ read_lock(&tasklist_lock);
61406+ if (current->real_parent) {
61407+ p = current->real_parent->role->rolename;
61408+ i = current->real_parent->acl_role_id;
61409+ }
61410+ read_unlock(&tasklist_lock);
61411+
61412+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
61413+ gr_set_acls(1);
61414+ } else {
61415+ error = -EPERM;
61416+ goto out;
61417+ }
61418+ break;
61419+ default:
61420+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
61421+ error = -EINVAL;
61422+ break;
61423+ }
61424+
61425+ if (error != -EPERM)
61426+ goto out;
61427+
61428+ if(!(gr_auth_attempts++))
61429+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
61430+
61431+ out:
61432+ mutex_unlock(&gr_dev_mutex);
61433+ return error;
61434+}
61435+
61436+/* must be called with
61437+ rcu_read_lock();
61438+ read_lock(&tasklist_lock);
61439+ read_lock(&grsec_exec_file_lock);
61440+*/
61441+int gr_apply_subject_to_task(struct task_struct *task)
61442+{
61443+ struct acl_object_label *obj;
61444+ char *tmpname;
61445+ struct acl_subject_label *tmpsubj;
61446+ struct file *filp;
61447+ struct name_entry *nmatch;
61448+
61449+ filp = task->exec_file;
61450+ if (filp == NULL)
61451+ return 0;
61452+
61453+ /* the following is to apply the correct subject
61454+ on binaries running when the RBAC system
61455+ is enabled, when the binaries have been
61456+ replaced or deleted since their execution
61457+ -----
61458+ when the RBAC system starts, the inode/dev
61459+ from exec_file will be one the RBAC system
61460+ is unaware of. It only knows the inode/dev
61461+ of the present file on disk, or the absence
61462+ of it.
61463+ */
61464+ preempt_disable();
61465+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
61466+
61467+ nmatch = lookup_name_entry(tmpname);
61468+ preempt_enable();
61469+ tmpsubj = NULL;
61470+ if (nmatch) {
61471+ if (nmatch->deleted)
61472+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
61473+ else
61474+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
61475+ if (tmpsubj != NULL)
61476+ task->acl = tmpsubj;
61477+ }
61478+ if (tmpsubj == NULL)
61479+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
61480+ task->role);
61481+ if (task->acl) {
61482+ task->is_writable = 0;
61483+ /* ignore additional mmap checks for processes that are writable
61484+ by the default ACL */
61485+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
61486+ if (unlikely(obj->mode & GR_WRITE))
61487+ task->is_writable = 1;
61488+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
61489+ if (unlikely(obj->mode & GR_WRITE))
61490+ task->is_writable = 1;
61491+
61492+ gr_set_proc_res(task);
61493+
61494+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
61495+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
61496+#endif
61497+ } else {
61498+ return 1;
61499+ }
61500+
61501+ return 0;
61502+}
61503+
61504+int
61505+gr_set_acls(const int type)
61506+{
61507+ struct task_struct *task, *task2;
61508+ struct acl_role_label *role = current->role;
61509+ __u16 acl_role_id = current->acl_role_id;
61510+ const struct cred *cred;
61511+ int ret;
61512+
61513+ rcu_read_lock();
61514+ read_lock(&tasklist_lock);
61515+ read_lock(&grsec_exec_file_lock);
61516+ do_each_thread(task2, task) {
61517+ /* check to see if we're called from the exit handler,
61518+ if so, only replace ACLs that have inherited the admin
61519+ ACL */
61520+
61521+ if (type && (task->role != role ||
61522+ task->acl_role_id != acl_role_id))
61523+ continue;
61524+
61525+ task->acl_role_id = 0;
61526+ task->acl_sp_role = 0;
61527+
61528+ if (task->exec_file) {
61529+ cred = __task_cred(task);
61530+ task->role = lookup_acl_role_label(task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
61531+ ret = gr_apply_subject_to_task(task);
61532+ if (ret) {
61533+ read_unlock(&grsec_exec_file_lock);
61534+ read_unlock(&tasklist_lock);
61535+ rcu_read_unlock();
61536+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
61537+ return ret;
61538+ }
61539+ } else {
61540+ // it's a kernel process
61541+ task->role = kernel_role;
61542+ task->acl = kernel_role->root_label;
61543+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
61544+ task->acl->mode &= ~GR_PROCFIND;
61545+#endif
61546+ }
61547+ } while_each_thread(task2, task);
61548+ read_unlock(&grsec_exec_file_lock);
61549+ read_unlock(&tasklist_lock);
61550+ rcu_read_unlock();
61551+
61552+ return 0;
61553+}
61554+
61555+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
61556+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
61557+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
61558+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
61559+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
61560+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
61561+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
61562+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
61563+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
61564+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
61565+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
61566+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
61567+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
61568+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
61569+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
61570+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
61571+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
61572+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
61573+};
61574+
61575+void
61576+gr_learn_resource(const struct task_struct *task,
61577+ const int res, const unsigned long wanted, const int gt)
61578+{
61579+ struct acl_subject_label *acl;
61580+ const struct cred *cred;
61581+
61582+ if (unlikely((gr_status & GR_READY) &&
61583+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
61584+ goto skip_reslog;
61585+
61586+ gr_log_resource(task, res, wanted, gt);
61587+skip_reslog:
61588+
61589+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
61590+ return;
61591+
61592+ acl = task->acl;
61593+
61594+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
61595+ !(acl->resmask & (1U << (unsigned short) res))))
61596+ return;
61597+
61598+ if (wanted >= acl->res[res].rlim_cur) {
61599+ unsigned long res_add;
61600+
61601+ res_add = wanted + res_learn_bumps[res];
61602+
61603+ acl->res[res].rlim_cur = res_add;
61604+
61605+ if (wanted > acl->res[res].rlim_max)
61606+ acl->res[res].rlim_max = res_add;
61607+
61608+ /* only log the subject filename, since resource logging is supported for
61609+ single-subject learning only */
61610+ rcu_read_lock();
61611+ cred = __task_cred(task);
61612+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
61613+ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
61614+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
61615+ "", (unsigned long) res, &task->signal->saved_ip);
61616+ rcu_read_unlock();
61617+ }
61618+
61619+ return;
61620+}
61621+EXPORT_SYMBOL(gr_learn_resource);
61622+#endif
61623+
61624+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
61625+void
61626+pax_set_initial_flags(struct linux_binprm *bprm)
61627+{
61628+ struct task_struct *task = current;
61629+ struct acl_subject_label *proc;
61630+ unsigned long flags;
61631+
61632+ if (unlikely(!(gr_status & GR_READY)))
61633+ return;
61634+
61635+ flags = pax_get_flags(task);
61636+
61637+ proc = task->acl;
61638+
61639+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
61640+ flags &= ~MF_PAX_PAGEEXEC;
61641+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
61642+ flags &= ~MF_PAX_SEGMEXEC;
61643+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
61644+ flags &= ~MF_PAX_RANDMMAP;
61645+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
61646+ flags &= ~MF_PAX_EMUTRAMP;
61647+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
61648+ flags &= ~MF_PAX_MPROTECT;
61649+
61650+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
61651+ flags |= MF_PAX_PAGEEXEC;
61652+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
61653+ flags |= MF_PAX_SEGMEXEC;
61654+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
61655+ flags |= MF_PAX_RANDMMAP;
61656+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
61657+ flags |= MF_PAX_EMUTRAMP;
61658+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
61659+ flags |= MF_PAX_MPROTECT;
61660+
61661+ pax_set_flags(task, flags);
61662+
61663+ return;
61664+}
61665+#endif
61666+
61667+int
61668+gr_handle_proc_ptrace(struct task_struct *task)
61669+{
61670+ struct file *filp;
61671+ struct task_struct *tmp = task;
61672+ struct task_struct *curtemp = current;
61673+ __u32 retmode;
61674+
61675+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
61676+ if (unlikely(!(gr_status & GR_READY)))
61677+ return 0;
61678+#endif
61679+
61680+ read_lock(&tasklist_lock);
61681+ read_lock(&grsec_exec_file_lock);
61682+ filp = task->exec_file;
61683+
61684+ while (task_pid_nr(tmp) > 0) {
61685+ if (tmp == curtemp)
61686+ break;
61687+ tmp = tmp->real_parent;
61688+ }
61689+
61690+ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
61691+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
61692+ read_unlock(&grsec_exec_file_lock);
61693+ read_unlock(&tasklist_lock);
61694+ return 1;
61695+ }
61696+
61697+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
61698+ if (!(gr_status & GR_READY)) {
61699+ read_unlock(&grsec_exec_file_lock);
61700+ read_unlock(&tasklist_lock);
61701+ return 0;
61702+ }
61703+#endif
61704+
61705+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
61706+ read_unlock(&grsec_exec_file_lock);
61707+ read_unlock(&tasklist_lock);
61708+
61709+ if (retmode & GR_NOPTRACE)
61710+ return 1;
61711+
61712+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
61713+ && (current->acl != task->acl || (current->acl != current->role->root_label
61714+ && task_pid_nr(current) != task_pid_nr(task))))
61715+ return 1;
61716+
61717+ return 0;
61718+}
61719+
61720+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
61721+{
61722+ if (unlikely(!(gr_status & GR_READY)))
61723+ return;
61724+
61725+ if (!(current->role->roletype & GR_ROLE_GOD))
61726+ return;
61727+
61728+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
61729+ p->role->rolename, gr_task_roletype_to_char(p),
61730+ p->acl->filename);
61731+}
61732+
61733+int
61734+gr_handle_ptrace(struct task_struct *task, const long request)
61735+{
61736+ struct task_struct *tmp = task;
61737+ struct task_struct *curtemp = current;
61738+ __u32 retmode;
61739+
61740+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
61741+ if (unlikely(!(gr_status & GR_READY)))
61742+ return 0;
61743+#endif
61744+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
61745+ read_lock(&tasklist_lock);
61746+ while (task_pid_nr(tmp) > 0) {
61747+ if (tmp == curtemp)
61748+ break;
61749+ tmp = tmp->real_parent;
61750+ }
61751+
61752+ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
61753+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
61754+ read_unlock(&tasklist_lock);
61755+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
61756+ return 1;
61757+ }
61758+ read_unlock(&tasklist_lock);
61759+ }
61760+
61761+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
61762+ if (!(gr_status & GR_READY))
61763+ return 0;
61764+#endif
61765+
61766+ read_lock(&grsec_exec_file_lock);
61767+ if (unlikely(!task->exec_file)) {
61768+ read_unlock(&grsec_exec_file_lock);
61769+ return 0;
61770+ }
61771+
61772+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
61773+ read_unlock(&grsec_exec_file_lock);
61774+
61775+ if (retmode & GR_NOPTRACE) {
61776+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
61777+ return 1;
61778+ }
61779+
61780+ if (retmode & GR_PTRACERD) {
61781+ switch (request) {
61782+ case PTRACE_SEIZE:
61783+ case PTRACE_POKETEXT:
61784+ case PTRACE_POKEDATA:
61785+ case PTRACE_POKEUSR:
61786+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
61787+ case PTRACE_SETREGS:
61788+ case PTRACE_SETFPREGS:
61789+#endif
61790+#ifdef CONFIG_X86
61791+ case PTRACE_SETFPXREGS:
61792+#endif
61793+#ifdef CONFIG_ALTIVEC
61794+ case PTRACE_SETVRREGS:
61795+#endif
61796+ return 1;
61797+ default:
61798+ return 0;
61799+ }
61800+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
61801+ !(current->role->roletype & GR_ROLE_GOD) &&
61802+ (current->acl != task->acl)) {
61803+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
61804+ return 1;
61805+ }
61806+
61807+ return 0;
61808+}
61809+
61810+static int is_writable_mmap(const struct file *filp)
61811+{
61812+ struct task_struct *task = current;
61813+ struct acl_object_label *obj, *obj2;
61814+
61815+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
61816+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
61817+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
61818+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
61819+ task->role->root_label);
61820+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
61821+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
61822+ return 1;
61823+ }
61824+ }
61825+ return 0;
61826+}
61827+
61828+int
61829+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
61830+{
61831+ __u32 mode;
61832+
61833+ if (unlikely(!file || !(prot & PROT_EXEC)))
61834+ return 1;
61835+
61836+ if (is_writable_mmap(file))
61837+ return 0;
61838+
61839+ mode =
61840+ gr_search_file(file->f_path.dentry,
61841+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
61842+ file->f_path.mnt);
61843+
61844+ if (!gr_tpe_allow(file))
61845+ return 0;
61846+
61847+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
61848+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
61849+ return 0;
61850+ } else if (unlikely(!(mode & GR_EXEC))) {
61851+ return 0;
61852+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
61853+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
61854+ return 1;
61855+ }
61856+
61857+ return 1;
61858+}
61859+
61860+int
61861+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
61862+{
61863+ __u32 mode;
61864+
61865+ if (unlikely(!file || !(prot & PROT_EXEC)))
61866+ return 1;
61867+
61868+ if (is_writable_mmap(file))
61869+ return 0;
61870+
61871+ mode =
61872+ gr_search_file(file->f_path.dentry,
61873+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
61874+ file->f_path.mnt);
61875+
61876+ if (!gr_tpe_allow(file))
61877+ return 0;
61878+
61879+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
61880+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
61881+ return 0;
61882+ } else if (unlikely(!(mode & GR_EXEC))) {
61883+ return 0;
61884+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
61885+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
61886+ return 1;
61887+ }
61888+
61889+ return 1;
61890+}
61891+
61892+void
61893+gr_acl_handle_psacct(struct task_struct *task, const long code)
61894+{
61895+ unsigned long runtime;
61896+ unsigned long cputime;
61897+ unsigned int wday, cday;
61898+ __u8 whr, chr;
61899+ __u8 wmin, cmin;
61900+ __u8 wsec, csec;
61901+ struct timespec timeval;
61902+
61903+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
61904+ !(task->acl->mode & GR_PROCACCT)))
61905+ return;
61906+
61907+ do_posix_clock_monotonic_gettime(&timeval);
61908+ runtime = timeval.tv_sec - task->start_time.tv_sec;
61909+ wday = runtime / (3600 * 24);
61910+ runtime -= wday * (3600 * 24);
61911+ whr = runtime / 3600;
61912+ runtime -= whr * 3600;
61913+ wmin = runtime / 60;
61914+ runtime -= wmin * 60;
61915+ wsec = runtime;
61916+
61917+ cputime = (task->utime + task->stime) / HZ;
61918+ cday = cputime / (3600 * 24);
61919+ cputime -= cday * (3600 * 24);
61920+ chr = cputime / 3600;
61921+ cputime -= chr * 3600;
61922+ cmin = cputime / 60;
61923+ cputime -= cmin * 60;
61924+ csec = cputime;
61925+
61926+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
61927+
61928+ return;
61929+}
61930+
61931+void gr_set_kernel_label(struct task_struct *task)
61932+{
61933+ if (gr_status & GR_READY) {
61934+ task->role = kernel_role;
61935+ task->acl = kernel_role->root_label;
61936+ }
61937+ return;
61938+}
61939+
61940+#ifdef CONFIG_TASKSTATS
61941+int gr_is_taskstats_denied(int pid)
61942+{
61943+ struct task_struct *task;
61944+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61945+ const struct cred *cred;
61946+#endif
61947+ int ret = 0;
61948+
61949+ /* restrict taskstats viewing to un-chrooted root users
61950+ who have the 'view' subject flag if the RBAC system is enabled
61951+ */
61952+
61953+ rcu_read_lock();
61954+ read_lock(&tasklist_lock);
61955+ task = find_task_by_vpid(pid);
61956+ if (task) {
61957+#ifdef CONFIG_GRKERNSEC_CHROOT
61958+ if (proc_is_chrooted(task))
61959+ ret = -EACCES;
61960+#endif
61961+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61962+ cred = __task_cred(task);
61963+#ifdef CONFIG_GRKERNSEC_PROC_USER
61964+ if (gr_is_global_nonroot(cred->uid))
61965+ ret = -EACCES;
61966+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61967+ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
61968+ ret = -EACCES;
61969+#endif
61970+#endif
61971+ if (gr_status & GR_READY) {
61972+ if (!(task->acl->mode & GR_VIEW))
61973+ ret = -EACCES;
61974+ }
61975+ } else
61976+ ret = -ENOENT;
61977+
61978+ read_unlock(&tasklist_lock);
61979+ rcu_read_unlock();
61980+
61981+ return ret;
61982+}
61983+#endif
61984+
61985+/* AUXV entries are filled via a descendant of search_binary_handler
61986+ after we've already applied the subject for the target
61987+*/
61988+int gr_acl_enable_at_secure(void)
61989+{
61990+ if (unlikely(!(gr_status & GR_READY)))
61991+ return 0;
61992+
61993+ if (current->acl->mode & GR_ATSECURE)
61994+ return 1;
61995+
61996+ return 0;
61997+}
61998+
61999+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
62000+{
62001+ struct task_struct *task = current;
62002+ struct dentry *dentry = file->f_path.dentry;
62003+ struct vfsmount *mnt = file->f_path.mnt;
62004+ struct acl_object_label *obj, *tmp;
62005+ struct acl_subject_label *subj;
62006+ unsigned int bufsize;
62007+ int is_not_root;
62008+ char *path;
62009+ dev_t dev = __get_dev(dentry);
62010+
62011+ if (unlikely(!(gr_status & GR_READY)))
62012+ return 1;
62013+
62014+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
62015+ return 1;
62016+
62017+ /* ignore Eric Biederman */
62018+ if (IS_PRIVATE(dentry->d_inode))
62019+ return 1;
62020+
62021+ subj = task->acl;
62022+ read_lock(&gr_inode_lock);
62023+ do {
62024+ obj = lookup_acl_obj_label(ino, dev, subj);
62025+ if (obj != NULL) {
62026+ read_unlock(&gr_inode_lock);
62027+ return (obj->mode & GR_FIND) ? 1 : 0;
62028+ }
62029+ } while ((subj = subj->parent_subject));
62030+ read_unlock(&gr_inode_lock);
62031+
62032+ /* this is purely an optimization since we're looking for an object
62033+ for the directory we're doing a readdir on
62034+ if it's possible for any globbed object to match the entry we're
62035+ filling into the directory, then the object we find here will be
62036+ an anchor point with attached globbed objects
62037+ */
62038+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
62039+ if (obj->globbed == NULL)
62040+ return (obj->mode & GR_FIND) ? 1 : 0;
62041+
62042+ is_not_root = ((obj->filename[0] == '/') &&
62043+ (obj->filename[1] == '\0')) ? 0 : 1;
62044+ bufsize = PAGE_SIZE - namelen - is_not_root;
62045+
62046+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
62047+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
62048+ return 1;
62049+
62050+ preempt_disable();
62051+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
62052+ bufsize);
62053+
62054+ bufsize = strlen(path);
62055+
62056+ /* if base is "/", don't append an additional slash */
62057+ if (is_not_root)
62058+ *(path + bufsize) = '/';
62059+ memcpy(path + bufsize + is_not_root, name, namelen);
62060+ *(path + bufsize + namelen + is_not_root) = '\0';
62061+
62062+ tmp = obj->globbed;
62063+ while (tmp) {
62064+ if (!glob_match(tmp->filename, path)) {
62065+ preempt_enable();
62066+ return (tmp->mode & GR_FIND) ? 1 : 0;
62067+ }
62068+ tmp = tmp->next;
62069+ }
62070+ preempt_enable();
62071+ return (obj->mode & GR_FIND) ? 1 : 0;
62072+}
62073+
62074+void gr_put_exec_file(struct task_struct *task)
62075+{
62076+ struct file *filp;
62077+
62078+ write_lock(&grsec_exec_file_lock);
62079+ filp = task->exec_file;
62080+ task->exec_file = NULL;
62081+ write_unlock(&grsec_exec_file_lock);
62082+
62083+ if (filp)
62084+ fput(filp);
62085+
62086+ return;
62087+}
62088+
62089+
62090+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
62091+EXPORT_SYMBOL(gr_acl_is_enabled);
62092+#endif
62093+EXPORT_SYMBOL(gr_set_kernel_label);
62094+#ifdef CONFIG_SECURITY
62095+EXPORT_SYMBOL(gr_check_user_change);
62096+EXPORT_SYMBOL(gr_check_group_change);
62097+#endif
62098+
62099diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
62100new file mode 100644
62101index 0000000..34fefda
62102--- /dev/null
62103+++ b/grsecurity/gracl_alloc.c
62104@@ -0,0 +1,105 @@
62105+#include <linux/kernel.h>
62106+#include <linux/mm.h>
62107+#include <linux/slab.h>
62108+#include <linux/vmalloc.h>
62109+#include <linux/gracl.h>
62110+#include <linux/grsecurity.h>
62111+
62112+static unsigned long alloc_stack_next = 1;
62113+static unsigned long alloc_stack_size = 1;
62114+static void **alloc_stack;
62115+
62116+static __inline__ int
62117+alloc_pop(void)
62118+{
62119+ if (alloc_stack_next == 1)
62120+ return 0;
62121+
62122+ kfree(alloc_stack[alloc_stack_next - 2]);
62123+
62124+ alloc_stack_next--;
62125+
62126+ return 1;
62127+}
62128+
62129+static __inline__ int
62130+alloc_push(void *buf)
62131+{
62132+ if (alloc_stack_next >= alloc_stack_size)
62133+ return 1;
62134+
62135+ alloc_stack[alloc_stack_next - 1] = buf;
62136+
62137+ alloc_stack_next++;
62138+
62139+ return 0;
62140+}
62141+
62142+void *
62143+acl_alloc(unsigned long len)
62144+{
62145+ void *ret = NULL;
62146+
62147+ if (!len || len > PAGE_SIZE)
62148+ goto out;
62149+
62150+ ret = kmalloc(len, GFP_KERNEL);
62151+
62152+ if (ret) {
62153+ if (alloc_push(ret)) {
62154+ kfree(ret);
62155+ ret = NULL;
62156+ }
62157+ }
62158+
62159+out:
62160+ return ret;
62161+}
62162+
62163+void *
62164+acl_alloc_num(unsigned long num, unsigned long len)
62165+{
62166+ if (!len || (num > (PAGE_SIZE / len)))
62167+ return NULL;
62168+
62169+ return acl_alloc(num * len);
62170+}
62171+
62172+void
62173+acl_free_all(void)
62174+{
62175+ if (gr_acl_is_enabled() || !alloc_stack)
62176+ return;
62177+
62178+ while (alloc_pop()) ;
62179+
62180+ if (alloc_stack) {
62181+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
62182+ kfree(alloc_stack);
62183+ else
62184+ vfree(alloc_stack);
62185+ }
62186+
62187+ alloc_stack = NULL;
62188+ alloc_stack_size = 1;
62189+ alloc_stack_next = 1;
62190+
62191+ return;
62192+}
62193+
62194+int
62195+acl_alloc_stack_init(unsigned long size)
62196+{
62197+ if ((size * sizeof (void *)) <= PAGE_SIZE)
62198+ alloc_stack =
62199+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
62200+ else
62201+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
62202+
62203+ alloc_stack_size = size;
62204+
62205+ if (!alloc_stack)
62206+ return 0;
62207+ else
62208+ return 1;
62209+}
62210diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
62211new file mode 100644
62212index 0000000..bdd51ea
62213--- /dev/null
62214+++ b/grsecurity/gracl_cap.c
62215@@ -0,0 +1,110 @@
62216+#include <linux/kernel.h>
62217+#include <linux/module.h>
62218+#include <linux/sched.h>
62219+#include <linux/gracl.h>
62220+#include <linux/grsecurity.h>
62221+#include <linux/grinternal.h>
62222+
62223+extern const char *captab_log[];
62224+extern int captab_log_entries;
62225+
62226+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
62227+{
62228+ struct acl_subject_label *curracl;
62229+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
62230+ kernel_cap_t cap_audit = __cap_empty_set;
62231+
62232+ if (!gr_acl_is_enabled())
62233+ return 1;
62234+
62235+ curracl = task->acl;
62236+
62237+ cap_drop = curracl->cap_lower;
62238+ cap_mask = curracl->cap_mask;
62239+ cap_audit = curracl->cap_invert_audit;
62240+
62241+ while ((curracl = curracl->parent_subject)) {
62242+ /* if the cap isn't specified in the current computed mask but is specified in the
62243+ current level subject, and is lowered in the current level subject, then add
62244+ it to the set of dropped capabilities
62245+ otherwise, add the current level subject's mask to the current computed mask
62246+ */
62247+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
62248+ cap_raise(cap_mask, cap);
62249+ if (cap_raised(curracl->cap_lower, cap))
62250+ cap_raise(cap_drop, cap);
62251+ if (cap_raised(curracl->cap_invert_audit, cap))
62252+ cap_raise(cap_audit, cap);
62253+ }
62254+ }
62255+
62256+ if (!cap_raised(cap_drop, cap)) {
62257+ if (cap_raised(cap_audit, cap))
62258+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
62259+ return 1;
62260+ }
62261+
62262+ curracl = task->acl;
62263+
62264+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
62265+ && cap_raised(cred->cap_effective, cap)) {
62266+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
62267+ task->role->roletype, GR_GLOBAL_UID(cred->uid),
62268+ GR_GLOBAL_GID(cred->gid), task->exec_file ?
62269+ gr_to_filename(task->exec_file->f_path.dentry,
62270+ task->exec_file->f_path.mnt) : curracl->filename,
62271+ curracl->filename, 0UL,
62272+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
62273+ return 1;
62274+ }
62275+
62276+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
62277+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
62278+
62279+ return 0;
62280+}
62281+
62282+int
62283+gr_acl_is_capable(const int cap)
62284+{
62285+ return gr_task_acl_is_capable(current, current_cred(), cap);
62286+}
62287+
62288+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
62289+{
62290+ struct acl_subject_label *curracl;
62291+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
62292+
62293+ if (!gr_acl_is_enabled())
62294+ return 1;
62295+
62296+ curracl = task->acl;
62297+
62298+ cap_drop = curracl->cap_lower;
62299+ cap_mask = curracl->cap_mask;
62300+
62301+ while ((curracl = curracl->parent_subject)) {
62302+ /* if the cap isn't specified in the current computed mask but is specified in the
62303+ current level subject, and is lowered in the current level subject, then add
62304+ it to the set of dropped capabilities
62305+ otherwise, add the current level subject's mask to the current computed mask
62306+ */
62307+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
62308+ cap_raise(cap_mask, cap);
62309+ if (cap_raised(curracl->cap_lower, cap))
62310+ cap_raise(cap_drop, cap);
62311+ }
62312+ }
62313+
62314+ if (!cap_raised(cap_drop, cap))
62315+ return 1;
62316+
62317+ return 0;
62318+}
62319+
62320+int
62321+gr_acl_is_capable_nolog(const int cap)
62322+{
62323+ return gr_task_acl_is_capable_nolog(current, cap);
62324+}
62325+
62326diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
62327new file mode 100644
62328index 0000000..a340c17
62329--- /dev/null
62330+++ b/grsecurity/gracl_fs.c
62331@@ -0,0 +1,431 @@
62332+#include <linux/kernel.h>
62333+#include <linux/sched.h>
62334+#include <linux/types.h>
62335+#include <linux/fs.h>
62336+#include <linux/file.h>
62337+#include <linux/stat.h>
62338+#include <linux/grsecurity.h>
62339+#include <linux/grinternal.h>
62340+#include <linux/gracl.h>
62341+
62342+umode_t
62343+gr_acl_umask(void)
62344+{
62345+ if (unlikely(!gr_acl_is_enabled()))
62346+ return 0;
62347+
62348+ return current->role->umask;
62349+}
62350+
62351+__u32
62352+gr_acl_handle_hidden_file(const struct dentry * dentry,
62353+ const struct vfsmount * mnt)
62354+{
62355+ __u32 mode;
62356+
62357+ if (unlikely(!dentry->d_inode))
62358+ return GR_FIND;
62359+
62360+ mode =
62361+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
62362+
62363+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
62364+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
62365+ return mode;
62366+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
62367+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
62368+ return 0;
62369+ } else if (unlikely(!(mode & GR_FIND)))
62370+ return 0;
62371+
62372+ return GR_FIND;
62373+}
62374+
62375+__u32
62376+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
62377+ int acc_mode)
62378+{
62379+ __u32 reqmode = GR_FIND;
62380+ __u32 mode;
62381+
62382+ if (unlikely(!dentry->d_inode))
62383+ return reqmode;
62384+
62385+ if (acc_mode & MAY_APPEND)
62386+ reqmode |= GR_APPEND;
62387+ else if (acc_mode & MAY_WRITE)
62388+ reqmode |= GR_WRITE;
62389+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
62390+ reqmode |= GR_READ;
62391+
62392+ mode =
62393+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
62394+ mnt);
62395+
62396+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
62397+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
62398+ reqmode & GR_READ ? " reading" : "",
62399+ reqmode & GR_WRITE ? " writing" : reqmode &
62400+ GR_APPEND ? " appending" : "");
62401+ return reqmode;
62402+ } else
62403+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
62404+ {
62405+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
62406+ reqmode & GR_READ ? " reading" : "",
62407+ reqmode & GR_WRITE ? " writing" : reqmode &
62408+ GR_APPEND ? " appending" : "");
62409+ return 0;
62410+ } else if (unlikely((mode & reqmode) != reqmode))
62411+ return 0;
62412+
62413+ return reqmode;
62414+}
62415+
62416+__u32
62417+gr_acl_handle_creat(const struct dentry * dentry,
62418+ const struct dentry * p_dentry,
62419+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
62420+ const int imode)
62421+{
62422+ __u32 reqmode = GR_WRITE | GR_CREATE;
62423+ __u32 mode;
62424+
62425+ if (acc_mode & MAY_APPEND)
62426+ reqmode |= GR_APPEND;
62427+ // if a directory was required or the directory already exists, then
62428+ // don't count this open as a read
62429+ if ((acc_mode & MAY_READ) &&
62430+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
62431+ reqmode |= GR_READ;
62432+ if ((open_flags & O_CREAT) &&
62433+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
62434+ reqmode |= GR_SETID;
62435+
62436+ mode =
62437+ gr_check_create(dentry, p_dentry, p_mnt,
62438+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
62439+
62440+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
62441+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
62442+ reqmode & GR_READ ? " reading" : "",
62443+ reqmode & GR_WRITE ? " writing" : reqmode &
62444+ GR_APPEND ? " appending" : "");
62445+ return reqmode;
62446+ } else
62447+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
62448+ {
62449+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
62450+ reqmode & GR_READ ? " reading" : "",
62451+ reqmode & GR_WRITE ? " writing" : reqmode &
62452+ GR_APPEND ? " appending" : "");
62453+ return 0;
62454+ } else if (unlikely((mode & reqmode) != reqmode))
62455+ return 0;
62456+
62457+ return reqmode;
62458+}
62459+
62460+__u32
62461+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
62462+ const int fmode)
62463+{
62464+ __u32 mode, reqmode = GR_FIND;
62465+
62466+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
62467+ reqmode |= GR_EXEC;
62468+ if (fmode & S_IWOTH)
62469+ reqmode |= GR_WRITE;
62470+ if (fmode & S_IROTH)
62471+ reqmode |= GR_READ;
62472+
62473+ mode =
62474+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
62475+ mnt);
62476+
62477+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
62478+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
62479+ reqmode & GR_READ ? " reading" : "",
62480+ reqmode & GR_WRITE ? " writing" : "",
62481+ reqmode & GR_EXEC ? " executing" : "");
62482+ return reqmode;
62483+ } else
62484+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
62485+ {
62486+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
62487+ reqmode & GR_READ ? " reading" : "",
62488+ reqmode & GR_WRITE ? " writing" : "",
62489+ reqmode & GR_EXEC ? " executing" : "");
62490+ return 0;
62491+ } else if (unlikely((mode & reqmode) != reqmode))
62492+ return 0;
62493+
62494+ return reqmode;
62495+}
62496+
62497+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
62498+{
62499+ __u32 mode;
62500+
62501+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
62502+
62503+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
62504+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
62505+ return mode;
62506+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
62507+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
62508+ return 0;
62509+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
62510+ return 0;
62511+
62512+ return (reqmode);
62513+}
62514+
62515+__u32
62516+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
62517+{
62518+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
62519+}
62520+
62521+__u32
62522+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
62523+{
62524+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
62525+}
62526+
62527+__u32
62528+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
62529+{
62530+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
62531+}
62532+
62533+__u32
62534+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
62535+{
62536+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
62537+}
62538+
62539+__u32
62540+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
62541+ umode_t *modeptr)
62542+{
62543+ umode_t mode;
62544+
62545+ *modeptr &= ~gr_acl_umask();
62546+ mode = *modeptr;
62547+
62548+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
62549+ return 1;
62550+
62551+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
62552+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
62553+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
62554+ GR_CHMOD_ACL_MSG);
62555+ } else {
62556+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
62557+ }
62558+}
62559+
62560+__u32
62561+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
62562+{
62563+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
62564+}
62565+
62566+__u32
62567+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
62568+{
62569+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
62570+}
62571+
62572+__u32
62573+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
62574+{
62575+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
62576+}
62577+
62578+__u32
62579+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
62580+{
62581+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
62582+ GR_UNIXCONNECT_ACL_MSG);
62583+}
62584+
62585+/* hardlinks require at minimum create and link permission,
62586+ any additional privilege required is based on the
62587+ privilege of the file being linked to
62588+*/
62589+__u32
62590+gr_acl_handle_link(const struct dentry * new_dentry,
62591+ const struct dentry * parent_dentry,
62592+ const struct vfsmount * parent_mnt,
62593+ const struct dentry * old_dentry,
62594+ const struct vfsmount * old_mnt, const struct filename *to)
62595+{
62596+ __u32 mode;
62597+ __u32 needmode = GR_CREATE | GR_LINK;
62598+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
62599+
62600+ mode =
62601+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
62602+ old_mnt);
62603+
62604+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
62605+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
62606+ return mode;
62607+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
62608+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
62609+ return 0;
62610+ } else if (unlikely((mode & needmode) != needmode))
62611+ return 0;
62612+
62613+ return 1;
62614+}
62615+
62616+__u32
62617+gr_acl_handle_symlink(const struct dentry * new_dentry,
62618+ const struct dentry * parent_dentry,
62619+ const struct vfsmount * parent_mnt, const struct filename *from)
62620+{
62621+ __u32 needmode = GR_WRITE | GR_CREATE;
62622+ __u32 mode;
62623+
62624+ mode =
62625+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
62626+ GR_CREATE | GR_AUDIT_CREATE |
62627+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
62628+
62629+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
62630+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
62631+ return mode;
62632+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
62633+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
62634+ return 0;
62635+ } else if (unlikely((mode & needmode) != needmode))
62636+ return 0;
62637+
62638+ return (GR_WRITE | GR_CREATE);
62639+}
62640+
62641+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
62642+{
62643+ __u32 mode;
62644+
62645+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
62646+
62647+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
62648+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
62649+ return mode;
62650+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
62651+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
62652+ return 0;
62653+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
62654+ return 0;
62655+
62656+ return (reqmode);
62657+}
62658+
62659+__u32
62660+gr_acl_handle_mknod(const struct dentry * new_dentry,
62661+ const struct dentry * parent_dentry,
62662+ const struct vfsmount * parent_mnt,
62663+ const int mode)
62664+{
62665+ __u32 reqmode = GR_WRITE | GR_CREATE;
62666+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
62667+ reqmode |= GR_SETID;
62668+
62669+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
62670+ reqmode, GR_MKNOD_ACL_MSG);
62671+}
62672+
62673+__u32
62674+gr_acl_handle_mkdir(const struct dentry *new_dentry,
62675+ const struct dentry *parent_dentry,
62676+ const struct vfsmount *parent_mnt)
62677+{
62678+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
62679+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
62680+}
62681+
62682+#define RENAME_CHECK_SUCCESS(old, new) \
62683+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
62684+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
62685+
62686+int
62687+gr_acl_handle_rename(struct dentry *new_dentry,
62688+ struct dentry *parent_dentry,
62689+ const struct vfsmount *parent_mnt,
62690+ struct dentry *old_dentry,
62691+ struct inode *old_parent_inode,
62692+ struct vfsmount *old_mnt, const struct filename *newname)
62693+{
62694+ __u32 comp1, comp2;
62695+ int error = 0;
62696+
62697+ if (unlikely(!gr_acl_is_enabled()))
62698+ return 0;
62699+
62700+ if (!new_dentry->d_inode) {
62701+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
62702+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
62703+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
62704+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
62705+ GR_DELETE | GR_AUDIT_DELETE |
62706+ GR_AUDIT_READ | GR_AUDIT_WRITE |
62707+ GR_SUPPRESS, old_mnt);
62708+ } else {
62709+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
62710+ GR_CREATE | GR_DELETE |
62711+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
62712+ GR_AUDIT_READ | GR_AUDIT_WRITE |
62713+ GR_SUPPRESS, parent_mnt);
62714+ comp2 =
62715+ gr_search_file(old_dentry,
62716+ GR_READ | GR_WRITE | GR_AUDIT_READ |
62717+ GR_DELETE | GR_AUDIT_DELETE |
62718+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
62719+ }
62720+
62721+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
62722+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
62723+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
62724+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
62725+ && !(comp2 & GR_SUPPRESS)) {
62726+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
62727+ error = -EACCES;
62728+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
62729+ error = -EACCES;
62730+
62731+ return error;
62732+}
62733+
62734+void
62735+gr_acl_handle_exit(void)
62736+{
62737+ u16 id;
62738+ char *rolename;
62739+
62740+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
62741+ !(current->role->roletype & GR_ROLE_PERSIST))) {
62742+ id = current->acl_role_id;
62743+ rolename = current->role->rolename;
62744+ gr_set_acls(1);
62745+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
62746+ }
62747+
62748+ gr_put_exec_file(current);
62749+ return;
62750+}
62751+
62752+int
62753+gr_acl_handle_procpidmem(const struct task_struct *task)
62754+{
62755+ if (unlikely(!gr_acl_is_enabled()))
62756+ return 0;
62757+
62758+ if (task != current && task->acl->mode & GR_PROTPROCFD)
62759+ return -EACCES;
62760+
62761+ return 0;
62762+}
62763diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
62764new file mode 100644
62765index 0000000..8132048
62766--- /dev/null
62767+++ b/grsecurity/gracl_ip.c
62768@@ -0,0 +1,387 @@
62769+#include <linux/kernel.h>
62770+#include <asm/uaccess.h>
62771+#include <asm/errno.h>
62772+#include <net/sock.h>
62773+#include <linux/file.h>
62774+#include <linux/fs.h>
62775+#include <linux/net.h>
62776+#include <linux/in.h>
62777+#include <linux/skbuff.h>
62778+#include <linux/ip.h>
62779+#include <linux/udp.h>
62780+#include <linux/types.h>
62781+#include <linux/sched.h>
62782+#include <linux/netdevice.h>
62783+#include <linux/inetdevice.h>
62784+#include <linux/gracl.h>
62785+#include <linux/grsecurity.h>
62786+#include <linux/grinternal.h>
62787+
62788+#define GR_BIND 0x01
62789+#define GR_CONNECT 0x02
62790+#define GR_INVERT 0x04
62791+#define GR_BINDOVERRIDE 0x08
62792+#define GR_CONNECTOVERRIDE 0x10
62793+#define GR_SOCK_FAMILY 0x20
62794+
62795+static const char * gr_protocols[IPPROTO_MAX] = {
62796+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
62797+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
62798+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
62799+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
62800+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
62801+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
62802+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
62803+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
62804+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
62805+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
62806+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
62807+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
62808+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
62809+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
62810+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
62811+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
62812+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
62813+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
62814+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
62815+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
62816+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
62817+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
62818+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
62819+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
62820+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
62821+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
62822+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
62823+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
62824+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
62825+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
62826+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
62827+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
62828+ };
62829+
62830+static const char * gr_socktypes[SOCK_MAX] = {
62831+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
62832+ "unknown:7", "unknown:8", "unknown:9", "packet"
62833+ };
62834+
62835+static const char * gr_sockfamilies[AF_MAX+1] = {
62836+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
62837+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
62838+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
62839+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
62840+ };
62841+
62842+const char *
62843+gr_proto_to_name(unsigned char proto)
62844+{
62845+ return gr_protocols[proto];
62846+}
62847+
62848+const char *
62849+gr_socktype_to_name(unsigned char type)
62850+{
62851+ return gr_socktypes[type];
62852+}
62853+
62854+const char *
62855+gr_sockfamily_to_name(unsigned char family)
62856+{
62857+ return gr_sockfamilies[family];
62858+}
62859+
62860+int
62861+gr_search_socket(const int domain, const int type, const int protocol)
62862+{
62863+ struct acl_subject_label *curr;
62864+ const struct cred *cred = current_cred();
62865+
62866+ if (unlikely(!gr_acl_is_enabled()))
62867+ goto exit;
62868+
62869+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
62870+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
62871+ goto exit; // let the kernel handle it
62872+
62873+ curr = current->acl;
62874+
62875+ if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
62876+ /* the family is allowed, if this is PF_INET allow it only if
62877+ the extra sock type/protocol checks pass */
62878+ if (domain == PF_INET)
62879+ goto inet_check;
62880+ goto exit;
62881+ } else {
62882+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
62883+ __u32 fakeip = 0;
62884+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
62885+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
62886+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
62887+ gr_to_filename(current->exec_file->f_path.dentry,
62888+ current->exec_file->f_path.mnt) :
62889+ curr->filename, curr->filename,
62890+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
62891+ &current->signal->saved_ip);
62892+ goto exit;
62893+ }
62894+ goto exit_fail;
62895+ }
62896+
62897+inet_check:
62898+ /* the rest of this checking is for IPv4 only */
62899+ if (!curr->ips)
62900+ goto exit;
62901+
62902+ if ((curr->ip_type & (1U << type)) &&
62903+ (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
62904+ goto exit;
62905+
62906+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
62907+ /* we don't place acls on raw sockets , and sometimes
62908+ dgram/ip sockets are opened for ioctl and not
62909+ bind/connect, so we'll fake a bind learn log */
62910+ if (type == SOCK_RAW || type == SOCK_PACKET) {
62911+ __u32 fakeip = 0;
62912+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
62913+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
62914+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
62915+ gr_to_filename(current->exec_file->f_path.dentry,
62916+ current->exec_file->f_path.mnt) :
62917+ curr->filename, curr->filename,
62918+ &fakeip, 0, type,
62919+ protocol, GR_CONNECT, &current->signal->saved_ip);
62920+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
62921+ __u32 fakeip = 0;
62922+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
62923+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
62924+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
62925+ gr_to_filename(current->exec_file->f_path.dentry,
62926+ current->exec_file->f_path.mnt) :
62927+ curr->filename, curr->filename,
62928+ &fakeip, 0, type,
62929+ protocol, GR_BIND, &current->signal->saved_ip);
62930+ }
62931+ /* we'll log when they use connect or bind */
62932+ goto exit;
62933+ }
62934+
62935+exit_fail:
62936+ if (domain == PF_INET)
62937+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
62938+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
62939+ else
62940+#ifndef CONFIG_IPV6
62941+ if (domain != PF_INET6)
62942+#endif
62943+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
62944+ gr_socktype_to_name(type), protocol);
62945+
62946+ return 0;
62947+exit:
62948+ return 1;
62949+}
62950+
62951+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
62952+{
62953+ if ((ip->mode & mode) &&
62954+ (ip_port >= ip->low) &&
62955+ (ip_port <= ip->high) &&
62956+ ((ntohl(ip_addr) & our_netmask) ==
62957+ (ntohl(our_addr) & our_netmask))
62958+ && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
62959+ && (ip->type & (1U << type))) {
62960+ if (ip->mode & GR_INVERT)
62961+ return 2; // specifically denied
62962+ else
62963+ return 1; // allowed
62964+ }
62965+
62966+ return 0; // not specifically allowed, may continue parsing
62967+}
62968+
62969+static int
62970+gr_search_connectbind(const int full_mode, struct sock *sk,
62971+ struct sockaddr_in *addr, const int type)
62972+{
62973+ char iface[IFNAMSIZ] = {0};
62974+ struct acl_subject_label *curr;
62975+ struct acl_ip_label *ip;
62976+ struct inet_sock *isk;
62977+ struct net_device *dev;
62978+ struct in_device *idev;
62979+ unsigned long i;
62980+ int ret;
62981+ int mode = full_mode & (GR_BIND | GR_CONNECT);
62982+ __u32 ip_addr = 0;
62983+ __u32 our_addr;
62984+ __u32 our_netmask;
62985+ char *p;
62986+ __u16 ip_port = 0;
62987+ const struct cred *cred = current_cred();
62988+
62989+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
62990+ return 0;
62991+
62992+ curr = current->acl;
62993+ isk = inet_sk(sk);
62994+
62995+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
62996+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
62997+ addr->sin_addr.s_addr = curr->inaddr_any_override;
62998+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
62999+ struct sockaddr_in saddr;
63000+ int err;
63001+
63002+ saddr.sin_family = AF_INET;
63003+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
63004+ saddr.sin_port = isk->inet_sport;
63005+
63006+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
63007+ if (err)
63008+ return err;
63009+
63010+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
63011+ if (err)
63012+ return err;
63013+ }
63014+
63015+ if (!curr->ips)
63016+ return 0;
63017+
63018+ ip_addr = addr->sin_addr.s_addr;
63019+ ip_port = ntohs(addr->sin_port);
63020+
63021+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
63022+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
63023+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
63024+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
63025+ gr_to_filename(current->exec_file->f_path.dentry,
63026+ current->exec_file->f_path.mnt) :
63027+ curr->filename, curr->filename,
63028+ &ip_addr, ip_port, type,
63029+ sk->sk_protocol, mode, &current->signal->saved_ip);
63030+ return 0;
63031+ }
63032+
63033+ for (i = 0; i < curr->ip_num; i++) {
63034+ ip = *(curr->ips + i);
63035+ if (ip->iface != NULL) {
63036+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
63037+ p = strchr(iface, ':');
63038+ if (p != NULL)
63039+ *p = '\0';
63040+ dev = dev_get_by_name(sock_net(sk), iface);
63041+ if (dev == NULL)
63042+ continue;
63043+ idev = in_dev_get(dev);
63044+ if (idev == NULL) {
63045+ dev_put(dev);
63046+ continue;
63047+ }
63048+ rcu_read_lock();
63049+ for_ifa(idev) {
63050+ if (!strcmp(ip->iface, ifa->ifa_label)) {
63051+ our_addr = ifa->ifa_address;
63052+ our_netmask = 0xffffffff;
63053+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
63054+ if (ret == 1) {
63055+ rcu_read_unlock();
63056+ in_dev_put(idev);
63057+ dev_put(dev);
63058+ return 0;
63059+ } else if (ret == 2) {
63060+ rcu_read_unlock();
63061+ in_dev_put(idev);
63062+ dev_put(dev);
63063+ goto denied;
63064+ }
63065+ }
63066+ } endfor_ifa(idev);
63067+ rcu_read_unlock();
63068+ in_dev_put(idev);
63069+ dev_put(dev);
63070+ } else {
63071+ our_addr = ip->addr;
63072+ our_netmask = ip->netmask;
63073+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
63074+ if (ret == 1)
63075+ return 0;
63076+ else if (ret == 2)
63077+ goto denied;
63078+ }
63079+ }
63080+
63081+denied:
63082+ if (mode == GR_BIND)
63083+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
63084+ else if (mode == GR_CONNECT)
63085+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
63086+
63087+ return -EACCES;
63088+}
63089+
63090+int
63091+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
63092+{
63093+ /* always allow disconnection of dgram sockets with connect */
63094+ if (addr->sin_family == AF_UNSPEC)
63095+ return 0;
63096+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
63097+}
63098+
63099+int
63100+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
63101+{
63102+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
63103+}
63104+
63105+int gr_search_listen(struct socket *sock)
63106+{
63107+ struct sock *sk = sock->sk;
63108+ struct sockaddr_in addr;
63109+
63110+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
63111+ addr.sin_port = inet_sk(sk)->inet_sport;
63112+
63113+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
63114+}
63115+
63116+int gr_search_accept(struct socket *sock)
63117+{
63118+ struct sock *sk = sock->sk;
63119+ struct sockaddr_in addr;
63120+
63121+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
63122+ addr.sin_port = inet_sk(sk)->inet_sport;
63123+
63124+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
63125+}
63126+
63127+int
63128+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
63129+{
63130+ if (addr)
63131+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
63132+ else {
63133+ struct sockaddr_in sin;
63134+ const struct inet_sock *inet = inet_sk(sk);
63135+
63136+ sin.sin_addr.s_addr = inet->inet_daddr;
63137+ sin.sin_port = inet->inet_dport;
63138+
63139+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
63140+ }
63141+}
63142+
63143+int
63144+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
63145+{
63146+ struct sockaddr_in sin;
63147+
63148+ if (unlikely(skb->len < sizeof (struct udphdr)))
63149+ return 0; // skip this packet
63150+
63151+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
63152+ sin.sin_port = udp_hdr(skb)->source;
63153+
63154+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
63155+}
63156diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
63157new file mode 100644
63158index 0000000..25f54ef
63159--- /dev/null
63160+++ b/grsecurity/gracl_learn.c
63161@@ -0,0 +1,207 @@
63162+#include <linux/kernel.h>
63163+#include <linux/mm.h>
63164+#include <linux/sched.h>
63165+#include <linux/poll.h>
63166+#include <linux/string.h>
63167+#include <linux/file.h>
63168+#include <linux/types.h>
63169+#include <linux/vmalloc.h>
63170+#include <linux/grinternal.h>
63171+
63172+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
63173+ size_t count, loff_t *ppos);
63174+extern int gr_acl_is_enabled(void);
63175+
63176+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
63177+static int gr_learn_attached;
63178+
63179+/* use a 512k buffer */
63180+#define LEARN_BUFFER_SIZE (512 * 1024)
63181+
63182+static DEFINE_SPINLOCK(gr_learn_lock);
63183+static DEFINE_MUTEX(gr_learn_user_mutex);
63184+
63185+/* we need to maintain two buffers, so that the kernel context of grlearn
63186+ uses a semaphore around the userspace copying, and the other kernel contexts
63187+ use a spinlock when copying into the buffer, since they cannot sleep
63188+*/
63189+static char *learn_buffer;
63190+static char *learn_buffer_user;
63191+static int learn_buffer_len;
63192+static int learn_buffer_user_len;
63193+
63194+static ssize_t
63195+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
63196+{
63197+ DECLARE_WAITQUEUE(wait, current);
63198+ ssize_t retval = 0;
63199+
63200+ add_wait_queue(&learn_wait, &wait);
63201+ set_current_state(TASK_INTERRUPTIBLE);
63202+ do {
63203+ mutex_lock(&gr_learn_user_mutex);
63204+ spin_lock(&gr_learn_lock);
63205+ if (learn_buffer_len)
63206+ break;
63207+ spin_unlock(&gr_learn_lock);
63208+ mutex_unlock(&gr_learn_user_mutex);
63209+ if (file->f_flags & O_NONBLOCK) {
63210+ retval = -EAGAIN;
63211+ goto out;
63212+ }
63213+ if (signal_pending(current)) {
63214+ retval = -ERESTARTSYS;
63215+ goto out;
63216+ }
63217+
63218+ schedule();
63219+ } while (1);
63220+
63221+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
63222+ learn_buffer_user_len = learn_buffer_len;
63223+ retval = learn_buffer_len;
63224+ learn_buffer_len = 0;
63225+
63226+ spin_unlock(&gr_learn_lock);
63227+
63228+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
63229+ retval = -EFAULT;
63230+
63231+ mutex_unlock(&gr_learn_user_mutex);
63232+out:
63233+ set_current_state(TASK_RUNNING);
63234+ remove_wait_queue(&learn_wait, &wait);
63235+ return retval;
63236+}
63237+
63238+static unsigned int
63239+poll_learn(struct file * file, poll_table * wait)
63240+{
63241+ poll_wait(file, &learn_wait, wait);
63242+
63243+ if (learn_buffer_len)
63244+ return (POLLIN | POLLRDNORM);
63245+
63246+ return 0;
63247+}
63248+
63249+void
63250+gr_clear_learn_entries(void)
63251+{
63252+ char *tmp;
63253+
63254+ mutex_lock(&gr_learn_user_mutex);
63255+ spin_lock(&gr_learn_lock);
63256+ tmp = learn_buffer;
63257+ learn_buffer = NULL;
63258+ spin_unlock(&gr_learn_lock);
63259+ if (tmp)
63260+ vfree(tmp);
63261+ if (learn_buffer_user != NULL) {
63262+ vfree(learn_buffer_user);
63263+ learn_buffer_user = NULL;
63264+ }
63265+ learn_buffer_len = 0;
63266+ mutex_unlock(&gr_learn_user_mutex);
63267+
63268+ return;
63269+}
63270+
63271+void
63272+gr_add_learn_entry(const char *fmt, ...)
63273+{
63274+ va_list args;
63275+ unsigned int len;
63276+
63277+ if (!gr_learn_attached)
63278+ return;
63279+
63280+ spin_lock(&gr_learn_lock);
63281+
63282+ /* leave a gap at the end so we know when it's "full" but don't have to
63283+ compute the exact length of the string we're trying to append
63284+ */
63285+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
63286+ spin_unlock(&gr_learn_lock);
63287+ wake_up_interruptible(&learn_wait);
63288+ return;
63289+ }
63290+ if (learn_buffer == NULL) {
63291+ spin_unlock(&gr_learn_lock);
63292+ return;
63293+ }
63294+
63295+ va_start(args, fmt);
63296+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
63297+ va_end(args);
63298+
63299+ learn_buffer_len += len + 1;
63300+
63301+ spin_unlock(&gr_learn_lock);
63302+ wake_up_interruptible(&learn_wait);
63303+
63304+ return;
63305+}
63306+
63307+static int
63308+open_learn(struct inode *inode, struct file *file)
63309+{
63310+ if (file->f_mode & FMODE_READ && gr_learn_attached)
63311+ return -EBUSY;
63312+ if (file->f_mode & FMODE_READ) {
63313+ int retval = 0;
63314+ mutex_lock(&gr_learn_user_mutex);
63315+ if (learn_buffer == NULL)
63316+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
63317+ if (learn_buffer_user == NULL)
63318+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
63319+ if (learn_buffer == NULL) {
63320+ retval = -ENOMEM;
63321+ goto out_error;
63322+ }
63323+ if (learn_buffer_user == NULL) {
63324+ retval = -ENOMEM;
63325+ goto out_error;
63326+ }
63327+ learn_buffer_len = 0;
63328+ learn_buffer_user_len = 0;
63329+ gr_learn_attached = 1;
63330+out_error:
63331+ mutex_unlock(&gr_learn_user_mutex);
63332+ return retval;
63333+ }
63334+ return 0;
63335+}
63336+
63337+static int
63338+close_learn(struct inode *inode, struct file *file)
63339+{
63340+ if (file->f_mode & FMODE_READ) {
63341+ char *tmp = NULL;
63342+ mutex_lock(&gr_learn_user_mutex);
63343+ spin_lock(&gr_learn_lock);
63344+ tmp = learn_buffer;
63345+ learn_buffer = NULL;
63346+ spin_unlock(&gr_learn_lock);
63347+ if (tmp)
63348+ vfree(tmp);
63349+ if (learn_buffer_user != NULL) {
63350+ vfree(learn_buffer_user);
63351+ learn_buffer_user = NULL;
63352+ }
63353+ learn_buffer_len = 0;
63354+ learn_buffer_user_len = 0;
63355+ gr_learn_attached = 0;
63356+ mutex_unlock(&gr_learn_user_mutex);
63357+ }
63358+
63359+ return 0;
63360+}
63361+
63362+const struct file_operations grsec_fops = {
63363+ .read = read_learn,
63364+ .write = write_grsec_handler,
63365+ .open = open_learn,
63366+ .release = close_learn,
63367+ .poll = poll_learn,
63368+};
63369diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
63370new file mode 100644
63371index 0000000..39645c9
63372--- /dev/null
63373+++ b/grsecurity/gracl_res.c
63374@@ -0,0 +1,68 @@
63375+#include <linux/kernel.h>
63376+#include <linux/sched.h>
63377+#include <linux/gracl.h>
63378+#include <linux/grinternal.h>
63379+
63380+static const char *restab_log[] = {
63381+ [RLIMIT_CPU] = "RLIMIT_CPU",
63382+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
63383+ [RLIMIT_DATA] = "RLIMIT_DATA",
63384+ [RLIMIT_STACK] = "RLIMIT_STACK",
63385+ [RLIMIT_CORE] = "RLIMIT_CORE",
63386+ [RLIMIT_RSS] = "RLIMIT_RSS",
63387+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
63388+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
63389+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
63390+ [RLIMIT_AS] = "RLIMIT_AS",
63391+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
63392+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
63393+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
63394+ [RLIMIT_NICE] = "RLIMIT_NICE",
63395+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
63396+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
63397+ [GR_CRASH_RES] = "RLIMIT_CRASH"
63398+};
63399+
63400+void
63401+gr_log_resource(const struct task_struct *task,
63402+ const int res, const unsigned long wanted, const int gt)
63403+{
63404+ const struct cred *cred;
63405+ unsigned long rlim;
63406+
63407+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
63408+ return;
63409+
63410+ // not yet supported resource
63411+ if (unlikely(!restab_log[res]))
63412+ return;
63413+
63414+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
63415+ rlim = task_rlimit_max(task, res);
63416+ else
63417+ rlim = task_rlimit(task, res);
63418+
63419+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
63420+ return;
63421+
63422+ rcu_read_lock();
63423+ cred = __task_cred(task);
63424+
63425+ if (res == RLIMIT_NPROC &&
63426+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
63427+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
63428+ goto out_rcu_unlock;
63429+ else if (res == RLIMIT_MEMLOCK &&
63430+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
63431+ goto out_rcu_unlock;
63432+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
63433+ goto out_rcu_unlock;
63434+ rcu_read_unlock();
63435+
63436+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
63437+
63438+ return;
63439+out_rcu_unlock:
63440+ rcu_read_unlock();
63441+ return;
63442+}
63443diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
63444new file mode 100644
63445index 0000000..cb1e5ab
63446--- /dev/null
63447+++ b/grsecurity/gracl_segv.c
63448@@ -0,0 +1,303 @@
63449+#include <linux/kernel.h>
63450+#include <linux/mm.h>
63451+#include <asm/uaccess.h>
63452+#include <asm/errno.h>
63453+#include <asm/mman.h>
63454+#include <net/sock.h>
63455+#include <linux/file.h>
63456+#include <linux/fs.h>
63457+#include <linux/net.h>
63458+#include <linux/in.h>
63459+#include <linux/slab.h>
63460+#include <linux/types.h>
63461+#include <linux/sched.h>
63462+#include <linux/timer.h>
63463+#include <linux/gracl.h>
63464+#include <linux/grsecurity.h>
63465+#include <linux/grinternal.h>
63466+
63467+static struct crash_uid *uid_set;
63468+static unsigned short uid_used;
63469+static DEFINE_SPINLOCK(gr_uid_lock);
63470+extern rwlock_t gr_inode_lock;
63471+extern struct acl_subject_label *
63472+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
63473+ struct acl_role_label *role);
63474+
63475+#ifdef CONFIG_BTRFS_FS
63476+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
63477+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
63478+#endif
63479+
63480+static inline dev_t __get_dev(const struct dentry *dentry)
63481+{
63482+#ifdef CONFIG_BTRFS_FS
63483+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
63484+ return get_btrfs_dev_from_inode(dentry->d_inode);
63485+ else
63486+#endif
63487+ return dentry->d_inode->i_sb->s_dev;
63488+}
63489+
63490+int
63491+gr_init_uidset(void)
63492+{
63493+ uid_set =
63494+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
63495+ uid_used = 0;
63496+
63497+ return uid_set ? 1 : 0;
63498+}
63499+
63500+void
63501+gr_free_uidset(void)
63502+{
63503+ if (uid_set)
63504+ kfree(uid_set);
63505+
63506+ return;
63507+}
63508+
63509+int
63510+gr_find_uid(const uid_t uid)
63511+{
63512+ struct crash_uid *tmp = uid_set;
63513+ uid_t buid;
63514+ int low = 0, high = uid_used - 1, mid;
63515+
63516+ while (high >= low) {
63517+ mid = (low + high) >> 1;
63518+ buid = tmp[mid].uid;
63519+ if (buid == uid)
63520+ return mid;
63521+ if (buid > uid)
63522+ high = mid - 1;
63523+ if (buid < uid)
63524+ low = mid + 1;
63525+ }
63526+
63527+ return -1;
63528+}
63529+
63530+static __inline__ void
63531+gr_insertsort(void)
63532+{
63533+ unsigned short i, j;
63534+ struct crash_uid index;
63535+
63536+ for (i = 1; i < uid_used; i++) {
63537+ index = uid_set[i];
63538+ j = i;
63539+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
63540+ uid_set[j] = uid_set[j - 1];
63541+ j--;
63542+ }
63543+ uid_set[j] = index;
63544+ }
63545+
63546+ return;
63547+}
63548+
63549+static __inline__ void
63550+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
63551+{
63552+ int loc;
63553+ uid_t uid = GR_GLOBAL_UID(kuid);
63554+
63555+ if (uid_used == GR_UIDTABLE_MAX)
63556+ return;
63557+
63558+ loc = gr_find_uid(uid);
63559+
63560+ if (loc >= 0) {
63561+ uid_set[loc].expires = expires;
63562+ return;
63563+ }
63564+
63565+ uid_set[uid_used].uid = uid;
63566+ uid_set[uid_used].expires = expires;
63567+ uid_used++;
63568+
63569+ gr_insertsort();
63570+
63571+ return;
63572+}
63573+
63574+void
63575+gr_remove_uid(const unsigned short loc)
63576+{
63577+ unsigned short i;
63578+
63579+ for (i = loc + 1; i < uid_used; i++)
63580+ uid_set[i - 1] = uid_set[i];
63581+
63582+ uid_used--;
63583+
63584+ return;
63585+}
63586+
63587+int
63588+gr_check_crash_uid(const kuid_t kuid)
63589+{
63590+ int loc;
63591+ int ret = 0;
63592+ uid_t uid;
63593+
63594+ if (unlikely(!gr_acl_is_enabled()))
63595+ return 0;
63596+
63597+ uid = GR_GLOBAL_UID(kuid);
63598+
63599+ spin_lock(&gr_uid_lock);
63600+ loc = gr_find_uid(uid);
63601+
63602+ if (loc < 0)
63603+ goto out_unlock;
63604+
63605+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
63606+ gr_remove_uid(loc);
63607+ else
63608+ ret = 1;
63609+
63610+out_unlock:
63611+ spin_unlock(&gr_uid_lock);
63612+ return ret;
63613+}
63614+
63615+static __inline__ int
63616+proc_is_setxid(const struct cred *cred)
63617+{
63618+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
63619+ !uid_eq(cred->uid, cred->fsuid))
63620+ return 1;
63621+ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
63622+ !gid_eq(cred->gid, cred->fsgid))
63623+ return 1;
63624+
63625+ return 0;
63626+}
63627+
63628+extern int gr_fake_force_sig(int sig, struct task_struct *t);
63629+
63630+void
63631+gr_handle_crash(struct task_struct *task, const int sig)
63632+{
63633+ struct acl_subject_label *curr;
63634+ struct task_struct *tsk, *tsk2;
63635+ const struct cred *cred;
63636+ const struct cred *cred2;
63637+
63638+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
63639+ return;
63640+
63641+ if (unlikely(!gr_acl_is_enabled()))
63642+ return;
63643+
63644+ curr = task->acl;
63645+
63646+ if (!(curr->resmask & (1U << GR_CRASH_RES)))
63647+ return;
63648+
63649+ if (time_before_eq(curr->expires, get_seconds())) {
63650+ curr->expires = 0;
63651+ curr->crashes = 0;
63652+ }
63653+
63654+ curr->crashes++;
63655+
63656+ if (!curr->expires)
63657+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
63658+
63659+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
63660+ time_after(curr->expires, get_seconds())) {
63661+ rcu_read_lock();
63662+ cred = __task_cred(task);
63663+ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
63664+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
63665+ spin_lock(&gr_uid_lock);
63666+ gr_insert_uid(cred->uid, curr->expires);
63667+ spin_unlock(&gr_uid_lock);
63668+ curr->expires = 0;
63669+ curr->crashes = 0;
63670+ read_lock(&tasklist_lock);
63671+ do_each_thread(tsk2, tsk) {
63672+ cred2 = __task_cred(tsk);
63673+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
63674+ gr_fake_force_sig(SIGKILL, tsk);
63675+ } while_each_thread(tsk2, tsk);
63676+ read_unlock(&tasklist_lock);
63677+ } else {
63678+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
63679+ read_lock(&tasklist_lock);
63680+ read_lock(&grsec_exec_file_lock);
63681+ do_each_thread(tsk2, tsk) {
63682+ if (likely(tsk != task)) {
63683+ // if this thread has the same subject as the one that triggered
63684+ // RES_CRASH and it's the same binary, kill it
63685+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
63686+ gr_fake_force_sig(SIGKILL, tsk);
63687+ }
63688+ } while_each_thread(tsk2, tsk);
63689+ read_unlock(&grsec_exec_file_lock);
63690+ read_unlock(&tasklist_lock);
63691+ }
63692+ rcu_read_unlock();
63693+ }
63694+
63695+ return;
63696+}
63697+
63698+int
63699+gr_check_crash_exec(const struct file *filp)
63700+{
63701+ struct acl_subject_label *curr;
63702+
63703+ if (unlikely(!gr_acl_is_enabled()))
63704+ return 0;
63705+
63706+ read_lock(&gr_inode_lock);
63707+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
63708+ __get_dev(filp->f_path.dentry),
63709+ current->role);
63710+ read_unlock(&gr_inode_lock);
63711+
63712+ if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
63713+ (!curr->crashes && !curr->expires))
63714+ return 0;
63715+
63716+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
63717+ time_after(curr->expires, get_seconds()))
63718+ return 1;
63719+ else if (time_before_eq(curr->expires, get_seconds())) {
63720+ curr->crashes = 0;
63721+ curr->expires = 0;
63722+ }
63723+
63724+ return 0;
63725+}
63726+
63727+void
63728+gr_handle_alertkill(struct task_struct *task)
63729+{
63730+ struct acl_subject_label *curracl;
63731+ __u32 curr_ip;
63732+ struct task_struct *p, *p2;
63733+
63734+ if (unlikely(!gr_acl_is_enabled()))
63735+ return;
63736+
63737+ curracl = task->acl;
63738+ curr_ip = task->signal->curr_ip;
63739+
63740+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
63741+ read_lock(&tasklist_lock);
63742+ do_each_thread(p2, p) {
63743+ if (p->signal->curr_ip == curr_ip)
63744+ gr_fake_force_sig(SIGKILL, p);
63745+ } while_each_thread(p2, p);
63746+ read_unlock(&tasklist_lock);
63747+ } else if (curracl->mode & GR_KILLPROC)
63748+ gr_fake_force_sig(SIGKILL, task);
63749+
63750+ return;
63751+}
63752diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
63753new file mode 100644
63754index 0000000..98011b0
63755--- /dev/null
63756+++ b/grsecurity/gracl_shm.c
63757@@ -0,0 +1,40 @@
63758+#include <linux/kernel.h>
63759+#include <linux/mm.h>
63760+#include <linux/sched.h>
63761+#include <linux/file.h>
63762+#include <linux/ipc.h>
63763+#include <linux/gracl.h>
63764+#include <linux/grsecurity.h>
63765+#include <linux/grinternal.h>
63766+
63767+int
63768+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
63769+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
63770+{
63771+ struct task_struct *task;
63772+
63773+ if (!gr_acl_is_enabled())
63774+ return 1;
63775+
63776+ rcu_read_lock();
63777+ read_lock(&tasklist_lock);
63778+
63779+ task = find_task_by_vpid(shm_cprid);
63780+
63781+ if (unlikely(!task))
63782+ task = find_task_by_vpid(shm_lapid);
63783+
63784+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
63785+ (task_pid_nr(task) == shm_lapid)) &&
63786+ (task->acl->mode & GR_PROTSHM) &&
63787+ (task->acl != current->acl))) {
63788+ read_unlock(&tasklist_lock);
63789+ rcu_read_unlock();
63790+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
63791+ return 0;
63792+ }
63793+ read_unlock(&tasklist_lock);
63794+ rcu_read_unlock();
63795+
63796+ return 1;
63797+}
63798diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
63799new file mode 100644
63800index 0000000..bc0be01
63801--- /dev/null
63802+++ b/grsecurity/grsec_chdir.c
63803@@ -0,0 +1,19 @@
63804+#include <linux/kernel.h>
63805+#include <linux/sched.h>
63806+#include <linux/fs.h>
63807+#include <linux/file.h>
63808+#include <linux/grsecurity.h>
63809+#include <linux/grinternal.h>
63810+
63811+void
63812+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
63813+{
63814+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
63815+ if ((grsec_enable_chdir && grsec_enable_group &&
63816+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
63817+ !grsec_enable_group)) {
63818+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
63819+ }
63820+#endif
63821+ return;
63822+}
63823diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
63824new file mode 100644
63825index 0000000..6d2de57
63826--- /dev/null
63827+++ b/grsecurity/grsec_chroot.c
63828@@ -0,0 +1,357 @@
63829+#include <linux/kernel.h>
63830+#include <linux/module.h>
63831+#include <linux/sched.h>
63832+#include <linux/file.h>
63833+#include <linux/fs.h>
63834+#include <linux/mount.h>
63835+#include <linux/types.h>
63836+#include "../fs/mount.h"
63837+#include <linux/grsecurity.h>
63838+#include <linux/grinternal.h>
63839+
63840+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
63841+{
63842+#ifdef CONFIG_GRKERNSEC
63843+ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
63844+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root)
63845+ task->gr_is_chrooted = 1;
63846+ else
63847+ task->gr_is_chrooted = 0;
63848+
63849+ task->gr_chroot_dentry = path->dentry;
63850+#endif
63851+ return;
63852+}
63853+
63854+void gr_clear_chroot_entries(struct task_struct *task)
63855+{
63856+#ifdef CONFIG_GRKERNSEC
63857+ task->gr_is_chrooted = 0;
63858+ task->gr_chroot_dentry = NULL;
63859+#endif
63860+ return;
63861+}
63862+
63863+int
63864+gr_handle_chroot_unix(const pid_t pid)
63865+{
63866+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
63867+ struct task_struct *p;
63868+
63869+ if (unlikely(!grsec_enable_chroot_unix))
63870+ return 1;
63871+
63872+ if (likely(!proc_is_chrooted(current)))
63873+ return 1;
63874+
63875+ rcu_read_lock();
63876+ read_lock(&tasklist_lock);
63877+ p = find_task_by_vpid_unrestricted(pid);
63878+ if (unlikely(p && !have_same_root(current, p))) {
63879+ read_unlock(&tasklist_lock);
63880+ rcu_read_unlock();
63881+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
63882+ return 0;
63883+ }
63884+ read_unlock(&tasklist_lock);
63885+ rcu_read_unlock();
63886+#endif
63887+ return 1;
63888+}
63889+
63890+int
63891+gr_handle_chroot_nice(void)
63892+{
63893+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
63894+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
63895+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
63896+ return -EPERM;
63897+ }
63898+#endif
63899+ return 0;
63900+}
63901+
63902+int
63903+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
63904+{
63905+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
63906+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
63907+ && proc_is_chrooted(current)) {
63908+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
63909+ return -EACCES;
63910+ }
63911+#endif
63912+ return 0;
63913+}
63914+
63915+int
63916+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
63917+{
63918+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
63919+ struct task_struct *p;
63920+ int ret = 0;
63921+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
63922+ return ret;
63923+
63924+ read_lock(&tasklist_lock);
63925+ do_each_pid_task(pid, type, p) {
63926+ if (!have_same_root(current, p)) {
63927+ ret = 1;
63928+ goto out;
63929+ }
63930+ } while_each_pid_task(pid, type, p);
63931+out:
63932+ read_unlock(&tasklist_lock);
63933+ return ret;
63934+#endif
63935+ return 0;
63936+}
63937+
63938+int
63939+gr_pid_is_chrooted(struct task_struct *p)
63940+{
63941+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
63942+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
63943+ return 0;
63944+
63945+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
63946+ !have_same_root(current, p)) {
63947+ return 1;
63948+ }
63949+#endif
63950+ return 0;
63951+}
63952+
63953+EXPORT_SYMBOL(gr_pid_is_chrooted);
63954+
63955+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
63956+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
63957+{
63958+ struct path path, currentroot;
63959+ int ret = 0;
63960+
63961+ path.dentry = (struct dentry *)u_dentry;
63962+ path.mnt = (struct vfsmount *)u_mnt;
63963+ get_fs_root(current->fs, &currentroot);
63964+ if (path_is_under(&path, &currentroot))
63965+ ret = 1;
63966+ path_put(&currentroot);
63967+
63968+ return ret;
63969+}
63970+#endif
63971+
63972+int
63973+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
63974+{
63975+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
63976+ if (!grsec_enable_chroot_fchdir)
63977+ return 1;
63978+
63979+ if (!proc_is_chrooted(current))
63980+ return 1;
63981+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
63982+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
63983+ return 0;
63984+ }
63985+#endif
63986+ return 1;
63987+}
63988+
63989+int
63990+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
63991+ const time_t shm_createtime)
63992+{
63993+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
63994+ struct task_struct *p;
63995+ time_t starttime;
63996+
63997+ if (unlikely(!grsec_enable_chroot_shmat))
63998+ return 1;
63999+
64000+ if (likely(!proc_is_chrooted(current)))
64001+ return 1;
64002+
64003+ rcu_read_lock();
64004+ read_lock(&tasklist_lock);
64005+
64006+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
64007+ starttime = p->start_time.tv_sec;
64008+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
64009+ if (have_same_root(current, p)) {
64010+ goto allow;
64011+ } else {
64012+ read_unlock(&tasklist_lock);
64013+ rcu_read_unlock();
64014+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
64015+ return 0;
64016+ }
64017+ }
64018+ /* creator exited, pid reuse, fall through to next check */
64019+ }
64020+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
64021+ if (unlikely(!have_same_root(current, p))) {
64022+ read_unlock(&tasklist_lock);
64023+ rcu_read_unlock();
64024+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
64025+ return 0;
64026+ }
64027+ }
64028+
64029+allow:
64030+ read_unlock(&tasklist_lock);
64031+ rcu_read_unlock();
64032+#endif
64033+ return 1;
64034+}
64035+
64036+void
64037+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
64038+{
64039+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
64040+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
64041+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
64042+#endif
64043+ return;
64044+}
64045+
64046+int
64047+gr_handle_chroot_mknod(const struct dentry *dentry,
64048+ const struct vfsmount *mnt, const int mode)
64049+{
64050+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
64051+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
64052+ proc_is_chrooted(current)) {
64053+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
64054+ return -EPERM;
64055+ }
64056+#endif
64057+ return 0;
64058+}
64059+
64060+int
64061+gr_handle_chroot_mount(const struct dentry *dentry,
64062+ const struct vfsmount *mnt, const char *dev_name)
64063+{
64064+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
64065+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
64066+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
64067+ return -EPERM;
64068+ }
64069+#endif
64070+ return 0;
64071+}
64072+
64073+int
64074+gr_handle_chroot_pivot(void)
64075+{
64076+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
64077+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
64078+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
64079+ return -EPERM;
64080+ }
64081+#endif
64082+ return 0;
64083+}
64084+
64085+int
64086+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
64087+{
64088+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
64089+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
64090+ !gr_is_outside_chroot(dentry, mnt)) {
64091+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
64092+ return -EPERM;
64093+ }
64094+#endif
64095+ return 0;
64096+}
64097+
64098+extern const char *captab_log[];
64099+extern int captab_log_entries;
64100+
64101+int
64102+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
64103+{
64104+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
64105+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
64106+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
64107+ if (cap_raised(chroot_caps, cap)) {
64108+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
64109+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
64110+ }
64111+ return 0;
64112+ }
64113+ }
64114+#endif
64115+ return 1;
64116+}
64117+
64118+int
64119+gr_chroot_is_capable(const int cap)
64120+{
64121+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
64122+ return gr_task_chroot_is_capable(current, current_cred(), cap);
64123+#endif
64124+ return 1;
64125+}
64126+
64127+int
64128+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
64129+{
64130+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
64131+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
64132+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
64133+ if (cap_raised(chroot_caps, cap)) {
64134+ return 0;
64135+ }
64136+ }
64137+#endif
64138+ return 1;
64139+}
64140+
64141+int
64142+gr_chroot_is_capable_nolog(const int cap)
64143+{
64144+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
64145+ return gr_task_chroot_is_capable_nolog(current, cap);
64146+#endif
64147+ return 1;
64148+}
64149+
64150+int
64151+gr_handle_chroot_sysctl(const int op)
64152+{
64153+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
64154+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
64155+ proc_is_chrooted(current))
64156+ return -EACCES;
64157+#endif
64158+ return 0;
64159+}
64160+
64161+void
64162+gr_handle_chroot_chdir(struct path *path)
64163+{
64164+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
64165+ if (grsec_enable_chroot_chdir)
64166+ set_fs_pwd(current->fs, path);
64167+#endif
64168+ return;
64169+}
64170+
64171+int
64172+gr_handle_chroot_chmod(const struct dentry *dentry,
64173+ const struct vfsmount *mnt, const int mode)
64174+{
64175+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
64176+ /* allow chmod +s on directories, but not files */
64177+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
64178+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
64179+ proc_is_chrooted(current)) {
64180+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
64181+ return -EPERM;
64182+ }
64183+#endif
64184+ return 0;
64185+}
64186diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
64187new file mode 100644
64188index 0000000..207d409
64189--- /dev/null
64190+++ b/grsecurity/grsec_disabled.c
64191@@ -0,0 +1,434 @@
64192+#include <linux/kernel.h>
64193+#include <linux/module.h>
64194+#include <linux/sched.h>
64195+#include <linux/file.h>
64196+#include <linux/fs.h>
64197+#include <linux/kdev_t.h>
64198+#include <linux/net.h>
64199+#include <linux/in.h>
64200+#include <linux/ip.h>
64201+#include <linux/skbuff.h>
64202+#include <linux/sysctl.h>
64203+
64204+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
64205+void
64206+pax_set_initial_flags(struct linux_binprm *bprm)
64207+{
64208+ return;
64209+}
64210+#endif
64211+
64212+#ifdef CONFIG_SYSCTL
64213+__u32
64214+gr_handle_sysctl(const struct ctl_table * table, const int op)
64215+{
64216+ return 0;
64217+}
64218+#endif
64219+
64220+#ifdef CONFIG_TASKSTATS
64221+int gr_is_taskstats_denied(int pid)
64222+{
64223+ return 0;
64224+}
64225+#endif
64226+
64227+int
64228+gr_acl_is_enabled(void)
64229+{
64230+ return 0;
64231+}
64232+
64233+void
64234+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
64235+{
64236+ return;
64237+}
64238+
64239+int
64240+gr_handle_rawio(const struct inode *inode)
64241+{
64242+ return 0;
64243+}
64244+
64245+void
64246+gr_acl_handle_psacct(struct task_struct *task, const long code)
64247+{
64248+ return;
64249+}
64250+
64251+int
64252+gr_handle_ptrace(struct task_struct *task, const long request)
64253+{
64254+ return 0;
64255+}
64256+
64257+int
64258+gr_handle_proc_ptrace(struct task_struct *task)
64259+{
64260+ return 0;
64261+}
64262+
64263+int
64264+gr_set_acls(const int type)
64265+{
64266+ return 0;
64267+}
64268+
64269+int
64270+gr_check_hidden_task(const struct task_struct *tsk)
64271+{
64272+ return 0;
64273+}
64274+
64275+int
64276+gr_check_protected_task(const struct task_struct *task)
64277+{
64278+ return 0;
64279+}
64280+
64281+int
64282+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
64283+{
64284+ return 0;
64285+}
64286+
64287+void
64288+gr_copy_label(struct task_struct *tsk)
64289+{
64290+ return;
64291+}
64292+
64293+void
64294+gr_set_pax_flags(struct task_struct *task)
64295+{
64296+ return;
64297+}
64298+
64299+int
64300+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
64301+ const int unsafe_share)
64302+{
64303+ return 0;
64304+}
64305+
64306+void
64307+gr_handle_delete(const ino_t ino, const dev_t dev)
64308+{
64309+ return;
64310+}
64311+
64312+void
64313+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
64314+{
64315+ return;
64316+}
64317+
64318+void
64319+gr_handle_crash(struct task_struct *task, const int sig)
64320+{
64321+ return;
64322+}
64323+
64324+int
64325+gr_check_crash_exec(const struct file *filp)
64326+{
64327+ return 0;
64328+}
64329+
64330+int
64331+gr_check_crash_uid(const kuid_t uid)
64332+{
64333+ return 0;
64334+}
64335+
64336+void
64337+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
64338+ struct dentry *old_dentry,
64339+ struct dentry *new_dentry,
64340+ struct vfsmount *mnt, const __u8 replace)
64341+{
64342+ return;
64343+}
64344+
64345+int
64346+gr_search_socket(const int family, const int type, const int protocol)
64347+{
64348+ return 1;
64349+}
64350+
64351+int
64352+gr_search_connectbind(const int mode, const struct socket *sock,
64353+ const struct sockaddr_in *addr)
64354+{
64355+ return 0;
64356+}
64357+
64358+void
64359+gr_handle_alertkill(struct task_struct *task)
64360+{
64361+ return;
64362+}
64363+
64364+__u32
64365+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
64366+{
64367+ return 1;
64368+}
64369+
64370+__u32
64371+gr_acl_handle_hidden_file(const struct dentry * dentry,
64372+ const struct vfsmount * mnt)
64373+{
64374+ return 1;
64375+}
64376+
64377+__u32
64378+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
64379+ int acc_mode)
64380+{
64381+ return 1;
64382+}
64383+
64384+__u32
64385+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
64386+{
64387+ return 1;
64388+}
64389+
64390+__u32
64391+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
64392+{
64393+ return 1;
64394+}
64395+
64396+int
64397+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
64398+ unsigned int *vm_flags)
64399+{
64400+ return 1;
64401+}
64402+
64403+__u32
64404+gr_acl_handle_truncate(const struct dentry * dentry,
64405+ const struct vfsmount * mnt)
64406+{
64407+ return 1;
64408+}
64409+
64410+__u32
64411+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
64412+{
64413+ return 1;
64414+}
64415+
64416+__u32
64417+gr_acl_handle_access(const struct dentry * dentry,
64418+ const struct vfsmount * mnt, const int fmode)
64419+{
64420+ return 1;
64421+}
64422+
64423+__u32
64424+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
64425+ umode_t *mode)
64426+{
64427+ return 1;
64428+}
64429+
64430+__u32
64431+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
64432+{
64433+ return 1;
64434+}
64435+
64436+__u32
64437+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
64438+{
64439+ return 1;
64440+}
64441+
64442+void
64443+grsecurity_init(void)
64444+{
64445+ return;
64446+}
64447+
64448+umode_t gr_acl_umask(void)
64449+{
64450+ return 0;
64451+}
64452+
64453+__u32
64454+gr_acl_handle_mknod(const struct dentry * new_dentry,
64455+ const struct dentry * parent_dentry,
64456+ const struct vfsmount * parent_mnt,
64457+ const int mode)
64458+{
64459+ return 1;
64460+}
64461+
64462+__u32
64463+gr_acl_handle_mkdir(const struct dentry * new_dentry,
64464+ const struct dentry * parent_dentry,
64465+ const struct vfsmount * parent_mnt)
64466+{
64467+ return 1;
64468+}
64469+
64470+__u32
64471+gr_acl_handle_symlink(const struct dentry * new_dentry,
64472+ const struct dentry * parent_dentry,
64473+ const struct vfsmount * parent_mnt, const struct filename *from)
64474+{
64475+ return 1;
64476+}
64477+
64478+__u32
64479+gr_acl_handle_link(const struct dentry * new_dentry,
64480+ const struct dentry * parent_dentry,
64481+ const struct vfsmount * parent_mnt,
64482+ const struct dentry * old_dentry,
64483+ const struct vfsmount * old_mnt, const struct filename *to)
64484+{
64485+ return 1;
64486+}
64487+
64488+int
64489+gr_acl_handle_rename(const struct dentry *new_dentry,
64490+ const struct dentry *parent_dentry,
64491+ const struct vfsmount *parent_mnt,
64492+ const struct dentry *old_dentry,
64493+ const struct inode *old_parent_inode,
64494+ const struct vfsmount *old_mnt, const struct filename *newname)
64495+{
64496+ return 0;
64497+}
64498+
64499+int
64500+gr_acl_handle_filldir(const struct file *file, const char *name,
64501+ const int namelen, const ino_t ino)
64502+{
64503+ return 1;
64504+}
64505+
64506+int
64507+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
64508+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
64509+{
64510+ return 1;
64511+}
64512+
64513+int
64514+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
64515+{
64516+ return 0;
64517+}
64518+
64519+int
64520+gr_search_accept(const struct socket *sock)
64521+{
64522+ return 0;
64523+}
64524+
64525+int
64526+gr_search_listen(const struct socket *sock)
64527+{
64528+ return 0;
64529+}
64530+
64531+int
64532+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
64533+{
64534+ return 0;
64535+}
64536+
64537+__u32
64538+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
64539+{
64540+ return 1;
64541+}
64542+
64543+__u32
64544+gr_acl_handle_creat(const struct dentry * dentry,
64545+ const struct dentry * p_dentry,
64546+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
64547+ const int imode)
64548+{
64549+ return 1;
64550+}
64551+
64552+void
64553+gr_acl_handle_exit(void)
64554+{
64555+ return;
64556+}
64557+
64558+int
64559+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
64560+{
64561+ return 1;
64562+}
64563+
64564+void
64565+gr_set_role_label(const kuid_t uid, const kgid_t gid)
64566+{
64567+ return;
64568+}
64569+
64570+int
64571+gr_acl_handle_procpidmem(const struct task_struct *task)
64572+{
64573+ return 0;
64574+}
64575+
64576+int
64577+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
64578+{
64579+ return 0;
64580+}
64581+
64582+int
64583+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
64584+{
64585+ return 0;
64586+}
64587+
64588+void
64589+gr_set_kernel_label(struct task_struct *task)
64590+{
64591+ return;
64592+}
64593+
64594+int
64595+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
64596+{
64597+ return 0;
64598+}
64599+
64600+int
64601+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
64602+{
64603+ return 0;
64604+}
64605+
64606+int gr_acl_enable_at_secure(void)
64607+{
64608+ return 0;
64609+}
64610+
64611+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
64612+{
64613+ return dentry->d_inode->i_sb->s_dev;
64614+}
64615+
64616+void gr_put_exec_file(struct task_struct *task)
64617+{
64618+ return;
64619+}
64620+
64621+EXPORT_SYMBOL(gr_set_kernel_label);
64622+#ifdef CONFIG_SECURITY
64623+EXPORT_SYMBOL(gr_check_user_change);
64624+EXPORT_SYMBOL(gr_check_group_change);
64625+#endif
64626diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
64627new file mode 100644
64628index 0000000..387032b
64629--- /dev/null
64630+++ b/grsecurity/grsec_exec.c
64631@@ -0,0 +1,187 @@
64632+#include <linux/kernel.h>
64633+#include <linux/sched.h>
64634+#include <linux/file.h>
64635+#include <linux/binfmts.h>
64636+#include <linux/fs.h>
64637+#include <linux/types.h>
64638+#include <linux/grdefs.h>
64639+#include <linux/grsecurity.h>
64640+#include <linux/grinternal.h>
64641+#include <linux/capability.h>
64642+#include <linux/module.h>
64643+#include <linux/compat.h>
64644+
64645+#include <asm/uaccess.h>
64646+
64647+#ifdef CONFIG_GRKERNSEC_EXECLOG
64648+static char gr_exec_arg_buf[132];
64649+static DEFINE_MUTEX(gr_exec_arg_mutex);
64650+#endif
64651+
64652+struct user_arg_ptr {
64653+#ifdef CONFIG_COMPAT
64654+ bool is_compat;
64655+#endif
64656+ union {
64657+ const char __user *const __user *native;
64658+#ifdef CONFIG_COMPAT
64659+ const compat_uptr_t __user *compat;
64660+#endif
64661+ } ptr;
64662+};
64663+
64664+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
64665+
64666+void
64667+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
64668+{
64669+#ifdef CONFIG_GRKERNSEC_EXECLOG
64670+ char *grarg = gr_exec_arg_buf;
64671+ unsigned int i, x, execlen = 0;
64672+ char c;
64673+
64674+ if (!((grsec_enable_execlog && grsec_enable_group &&
64675+ in_group_p(grsec_audit_gid))
64676+ || (grsec_enable_execlog && !grsec_enable_group)))
64677+ return;
64678+
64679+ mutex_lock(&gr_exec_arg_mutex);
64680+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
64681+
64682+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
64683+ const char __user *p;
64684+ unsigned int len;
64685+
64686+ p = get_user_arg_ptr(argv, i);
64687+ if (IS_ERR(p))
64688+ goto log;
64689+
64690+ len = strnlen_user(p, 128 - execlen);
64691+ if (len > 128 - execlen)
64692+ len = 128 - execlen;
64693+ else if (len > 0)
64694+ len--;
64695+ if (copy_from_user(grarg + execlen, p, len))
64696+ goto log;
64697+
64698+ /* rewrite unprintable characters */
64699+ for (x = 0; x < len; x++) {
64700+ c = *(grarg + execlen + x);
64701+ if (c < 32 || c > 126)
64702+ *(grarg + execlen + x) = ' ';
64703+ }
64704+
64705+ execlen += len;
64706+ *(grarg + execlen) = ' ';
64707+ *(grarg + execlen + 1) = '\0';
64708+ execlen++;
64709+ }
64710+
64711+ log:
64712+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
64713+ bprm->file->f_path.mnt, grarg);
64714+ mutex_unlock(&gr_exec_arg_mutex);
64715+#endif
64716+ return;
64717+}
64718+
64719+#ifdef CONFIG_GRKERNSEC
64720+extern int gr_acl_is_capable(const int cap);
64721+extern int gr_acl_is_capable_nolog(const int cap);
64722+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
64723+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
64724+extern int gr_chroot_is_capable(const int cap);
64725+extern int gr_chroot_is_capable_nolog(const int cap);
64726+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
64727+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
64728+#endif
64729+
64730+const char *captab_log[] = {
64731+ "CAP_CHOWN",
64732+ "CAP_DAC_OVERRIDE",
64733+ "CAP_DAC_READ_SEARCH",
64734+ "CAP_FOWNER",
64735+ "CAP_FSETID",
64736+ "CAP_KILL",
64737+ "CAP_SETGID",
64738+ "CAP_SETUID",
64739+ "CAP_SETPCAP",
64740+ "CAP_LINUX_IMMUTABLE",
64741+ "CAP_NET_BIND_SERVICE",
64742+ "CAP_NET_BROADCAST",
64743+ "CAP_NET_ADMIN",
64744+ "CAP_NET_RAW",
64745+ "CAP_IPC_LOCK",
64746+ "CAP_IPC_OWNER",
64747+ "CAP_SYS_MODULE",
64748+ "CAP_SYS_RAWIO",
64749+ "CAP_SYS_CHROOT",
64750+ "CAP_SYS_PTRACE",
64751+ "CAP_SYS_PACCT",
64752+ "CAP_SYS_ADMIN",
64753+ "CAP_SYS_BOOT",
64754+ "CAP_SYS_NICE",
64755+ "CAP_SYS_RESOURCE",
64756+ "CAP_SYS_TIME",
64757+ "CAP_SYS_TTY_CONFIG",
64758+ "CAP_MKNOD",
64759+ "CAP_LEASE",
64760+ "CAP_AUDIT_WRITE",
64761+ "CAP_AUDIT_CONTROL",
64762+ "CAP_SETFCAP",
64763+ "CAP_MAC_OVERRIDE",
64764+ "CAP_MAC_ADMIN",
64765+ "CAP_SYSLOG",
64766+ "CAP_WAKE_ALARM"
64767+};
64768+
64769+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
64770+
64771+int gr_is_capable(const int cap)
64772+{
64773+#ifdef CONFIG_GRKERNSEC
64774+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
64775+ return 1;
64776+ return 0;
64777+#else
64778+ return 1;
64779+#endif
64780+}
64781+
64782+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
64783+{
64784+#ifdef CONFIG_GRKERNSEC
64785+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
64786+ return 1;
64787+ return 0;
64788+#else
64789+ return 1;
64790+#endif
64791+}
64792+
64793+int gr_is_capable_nolog(const int cap)
64794+{
64795+#ifdef CONFIG_GRKERNSEC
64796+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
64797+ return 1;
64798+ return 0;
64799+#else
64800+ return 1;
64801+#endif
64802+}
64803+
64804+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
64805+{
64806+#ifdef CONFIG_GRKERNSEC
64807+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
64808+ return 1;
64809+ return 0;
64810+#else
64811+ return 1;
64812+#endif
64813+}
64814+
64815+EXPORT_SYMBOL(gr_is_capable);
64816+EXPORT_SYMBOL(gr_is_capable_nolog);
64817+EXPORT_SYMBOL(gr_task_is_capable);
64818+EXPORT_SYMBOL(gr_task_is_capable_nolog);
64819diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
64820new file mode 100644
64821index 0000000..06cc6ea
64822--- /dev/null
64823+++ b/grsecurity/grsec_fifo.c
64824@@ -0,0 +1,24 @@
64825+#include <linux/kernel.h>
64826+#include <linux/sched.h>
64827+#include <linux/fs.h>
64828+#include <linux/file.h>
64829+#include <linux/grinternal.h>
64830+
64831+int
64832+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
64833+ const struct dentry *dir, const int flag, const int acc_mode)
64834+{
64835+#ifdef CONFIG_GRKERNSEC_FIFO
64836+ const struct cred *cred = current_cred();
64837+
64838+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
64839+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
64840+ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
64841+ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
64842+ if (!inode_permission(dentry->d_inode, acc_mode))
64843+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
64844+ return -EACCES;
64845+ }
64846+#endif
64847+ return 0;
64848+}
64849diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
64850new file mode 100644
64851index 0000000..8ca18bf
64852--- /dev/null
64853+++ b/grsecurity/grsec_fork.c
64854@@ -0,0 +1,23 @@
64855+#include <linux/kernel.h>
64856+#include <linux/sched.h>
64857+#include <linux/grsecurity.h>
64858+#include <linux/grinternal.h>
64859+#include <linux/errno.h>
64860+
64861+void
64862+gr_log_forkfail(const int retval)
64863+{
64864+#ifdef CONFIG_GRKERNSEC_FORKFAIL
64865+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
64866+ switch (retval) {
64867+ case -EAGAIN:
64868+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
64869+ break;
64870+ case -ENOMEM:
64871+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
64872+ break;
64873+ }
64874+ }
64875+#endif
64876+ return;
64877+}
64878diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
64879new file mode 100644
64880index 0000000..a862e9f
64881--- /dev/null
64882+++ b/grsecurity/grsec_init.c
64883@@ -0,0 +1,283 @@
64884+#include <linux/kernel.h>
64885+#include <linux/sched.h>
64886+#include <linux/mm.h>
64887+#include <linux/gracl.h>
64888+#include <linux/slab.h>
64889+#include <linux/vmalloc.h>
64890+#include <linux/percpu.h>
64891+#include <linux/module.h>
64892+
64893+int grsec_enable_ptrace_readexec;
64894+int grsec_enable_setxid;
64895+int grsec_enable_symlinkown;
64896+kgid_t grsec_symlinkown_gid;
64897+int grsec_enable_brute;
64898+int grsec_enable_link;
64899+int grsec_enable_dmesg;
64900+int grsec_enable_harden_ptrace;
64901+int grsec_enable_fifo;
64902+int grsec_enable_execlog;
64903+int grsec_enable_signal;
64904+int grsec_enable_forkfail;
64905+int grsec_enable_audit_ptrace;
64906+int grsec_enable_time;
64907+int grsec_enable_audit_textrel;
64908+int grsec_enable_group;
64909+kgid_t grsec_audit_gid;
64910+int grsec_enable_chdir;
64911+int grsec_enable_mount;
64912+int grsec_enable_rofs;
64913+int grsec_enable_chroot_findtask;
64914+int grsec_enable_chroot_mount;
64915+int grsec_enable_chroot_shmat;
64916+int grsec_enable_chroot_fchdir;
64917+int grsec_enable_chroot_double;
64918+int grsec_enable_chroot_pivot;
64919+int grsec_enable_chroot_chdir;
64920+int grsec_enable_chroot_chmod;
64921+int grsec_enable_chroot_mknod;
64922+int grsec_enable_chroot_nice;
64923+int grsec_enable_chroot_execlog;
64924+int grsec_enable_chroot_caps;
64925+int grsec_enable_chroot_sysctl;
64926+int grsec_enable_chroot_unix;
64927+int grsec_enable_tpe;
64928+kgid_t grsec_tpe_gid;
64929+int grsec_enable_blackhole;
64930+#ifdef CONFIG_IPV6_MODULE
64931+EXPORT_SYMBOL(grsec_enable_blackhole);
64932+#endif
64933+int grsec_lastack_retries;
64934+int grsec_enable_tpe_all;
64935+int grsec_enable_tpe_invert;
64936+int grsec_enable_socket_all;
64937+kgid_t grsec_socket_all_gid;
64938+int grsec_enable_socket_client;
64939+kgid_t grsec_socket_client_gid;
64940+int grsec_enable_socket_server;
64941+kgid_t grsec_socket_server_gid;
64942+int grsec_resource_logging;
64943+int grsec_disable_privio;
64944+int grsec_enable_log_rwxmaps;
64945+int grsec_lock;
64946+
64947+DEFINE_SPINLOCK(grsec_alert_lock);
64948+unsigned long grsec_alert_wtime = 0;
64949+unsigned long grsec_alert_fyet = 0;
64950+
64951+DEFINE_SPINLOCK(grsec_audit_lock);
64952+
64953+DEFINE_RWLOCK(grsec_exec_file_lock);
64954+
64955+char *gr_shared_page[4];
64956+
64957+char *gr_alert_log_fmt;
64958+char *gr_audit_log_fmt;
64959+char *gr_alert_log_buf;
64960+char *gr_audit_log_buf;
64961+
64962+extern struct gr_arg *gr_usermode;
64963+extern unsigned char *gr_system_salt;
64964+extern unsigned char *gr_system_sum;
64965+
64966+void __init
64967+grsecurity_init(void)
64968+{
64969+ int j;
64970+ /* create the per-cpu shared pages */
64971+
64972+#ifdef CONFIG_X86
64973+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
64974+#endif
64975+
64976+ for (j = 0; j < 4; j++) {
64977+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
64978+ if (gr_shared_page[j] == NULL) {
64979+ panic("Unable to allocate grsecurity shared page");
64980+ return;
64981+ }
64982+ }
64983+
64984+ /* allocate log buffers */
64985+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
64986+ if (!gr_alert_log_fmt) {
64987+ panic("Unable to allocate grsecurity alert log format buffer");
64988+ return;
64989+ }
64990+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
64991+ if (!gr_audit_log_fmt) {
64992+ panic("Unable to allocate grsecurity audit log format buffer");
64993+ return;
64994+ }
64995+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
64996+ if (!gr_alert_log_buf) {
64997+ panic("Unable to allocate grsecurity alert log buffer");
64998+ return;
64999+ }
65000+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
65001+ if (!gr_audit_log_buf) {
65002+ panic("Unable to allocate grsecurity audit log buffer");
65003+ return;
65004+ }
65005+
65006+ /* allocate memory for authentication structure */
65007+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
65008+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
65009+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
65010+
65011+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
65012+ panic("Unable to allocate grsecurity authentication structure");
65013+ return;
65014+ }
65015+
65016+
65017+#ifdef CONFIG_GRKERNSEC_IO
65018+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
65019+ grsec_disable_privio = 1;
65020+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
65021+ grsec_disable_privio = 1;
65022+#else
65023+ grsec_disable_privio = 0;
65024+#endif
65025+#endif
65026+
65027+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
65028+ /* for backward compatibility, tpe_invert always defaults to on if
65029+ enabled in the kernel
65030+ */
65031+ grsec_enable_tpe_invert = 1;
65032+#endif
65033+
65034+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
65035+#ifndef CONFIG_GRKERNSEC_SYSCTL
65036+ grsec_lock = 1;
65037+#endif
65038+
65039+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
65040+ grsec_enable_audit_textrel = 1;
65041+#endif
65042+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
65043+ grsec_enable_log_rwxmaps = 1;
65044+#endif
65045+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
65046+ grsec_enable_group = 1;
65047+ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
65048+#endif
65049+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
65050+ grsec_enable_ptrace_readexec = 1;
65051+#endif
65052+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
65053+ grsec_enable_chdir = 1;
65054+#endif
65055+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
65056+ grsec_enable_harden_ptrace = 1;
65057+#endif
65058+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
65059+ grsec_enable_mount = 1;
65060+#endif
65061+#ifdef CONFIG_GRKERNSEC_LINK
65062+ grsec_enable_link = 1;
65063+#endif
65064+#ifdef CONFIG_GRKERNSEC_BRUTE
65065+ grsec_enable_brute = 1;
65066+#endif
65067+#ifdef CONFIG_GRKERNSEC_DMESG
65068+ grsec_enable_dmesg = 1;
65069+#endif
65070+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65071+ grsec_enable_blackhole = 1;
65072+ grsec_lastack_retries = 4;
65073+#endif
65074+#ifdef CONFIG_GRKERNSEC_FIFO
65075+ grsec_enable_fifo = 1;
65076+#endif
65077+#ifdef CONFIG_GRKERNSEC_EXECLOG
65078+ grsec_enable_execlog = 1;
65079+#endif
65080+#ifdef CONFIG_GRKERNSEC_SETXID
65081+ grsec_enable_setxid = 1;
65082+#endif
65083+#ifdef CONFIG_GRKERNSEC_SIGNAL
65084+ grsec_enable_signal = 1;
65085+#endif
65086+#ifdef CONFIG_GRKERNSEC_FORKFAIL
65087+ grsec_enable_forkfail = 1;
65088+#endif
65089+#ifdef CONFIG_GRKERNSEC_TIME
65090+ grsec_enable_time = 1;
65091+#endif
65092+#ifdef CONFIG_GRKERNSEC_RESLOG
65093+ grsec_resource_logging = 1;
65094+#endif
65095+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
65096+ grsec_enable_chroot_findtask = 1;
65097+#endif
65098+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
65099+ grsec_enable_chroot_unix = 1;
65100+#endif
65101+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
65102+ grsec_enable_chroot_mount = 1;
65103+#endif
65104+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
65105+ grsec_enable_chroot_fchdir = 1;
65106+#endif
65107+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
65108+ grsec_enable_chroot_shmat = 1;
65109+#endif
65110+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
65111+ grsec_enable_audit_ptrace = 1;
65112+#endif
65113+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
65114+ grsec_enable_chroot_double = 1;
65115+#endif
65116+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
65117+ grsec_enable_chroot_pivot = 1;
65118+#endif
65119+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
65120+ grsec_enable_chroot_chdir = 1;
65121+#endif
65122+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
65123+ grsec_enable_chroot_chmod = 1;
65124+#endif
65125+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
65126+ grsec_enable_chroot_mknod = 1;
65127+#endif
65128+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
65129+ grsec_enable_chroot_nice = 1;
65130+#endif
65131+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
65132+ grsec_enable_chroot_execlog = 1;
65133+#endif
65134+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
65135+ grsec_enable_chroot_caps = 1;
65136+#endif
65137+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
65138+ grsec_enable_chroot_sysctl = 1;
65139+#endif
65140+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
65141+ grsec_enable_symlinkown = 1;
65142+ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
65143+#endif
65144+#ifdef CONFIG_GRKERNSEC_TPE
65145+ grsec_enable_tpe = 1;
65146+ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
65147+#ifdef CONFIG_GRKERNSEC_TPE_ALL
65148+ grsec_enable_tpe_all = 1;
65149+#endif
65150+#endif
65151+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
65152+ grsec_enable_socket_all = 1;
65153+ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
65154+#endif
65155+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
65156+ grsec_enable_socket_client = 1;
65157+ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
65158+#endif
65159+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
65160+ grsec_enable_socket_server = 1;
65161+ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
65162+#endif
65163+#endif
65164+
65165+ return;
65166+}
65167diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
65168new file mode 100644
65169index 0000000..5e05e20
65170--- /dev/null
65171+++ b/grsecurity/grsec_link.c
65172@@ -0,0 +1,58 @@
65173+#include <linux/kernel.h>
65174+#include <linux/sched.h>
65175+#include <linux/fs.h>
65176+#include <linux/file.h>
65177+#include <linux/grinternal.h>
65178+
65179+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
65180+{
65181+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
65182+ const struct inode *link_inode = link->dentry->d_inode;
65183+
65184+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
65185+ /* ignore root-owned links, e.g. /proc/self */
65186+ gr_is_global_nonroot(link_inode->i_uid) && target &&
65187+ !uid_eq(link_inode->i_uid, target->i_uid)) {
65188+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
65189+ return 1;
65190+ }
65191+#endif
65192+ return 0;
65193+}
65194+
65195+int
65196+gr_handle_follow_link(const struct inode *parent,
65197+ const struct inode *inode,
65198+ const struct dentry *dentry, const struct vfsmount *mnt)
65199+{
65200+#ifdef CONFIG_GRKERNSEC_LINK
65201+ const struct cred *cred = current_cred();
65202+
65203+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
65204+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
65205+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
65206+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
65207+ return -EACCES;
65208+ }
65209+#endif
65210+ return 0;
65211+}
65212+
65213+int
65214+gr_handle_hardlink(const struct dentry *dentry,
65215+ const struct vfsmount *mnt,
65216+ struct inode *inode, const int mode, const struct filename *to)
65217+{
65218+#ifdef CONFIG_GRKERNSEC_LINK
65219+ const struct cred *cred = current_cred();
65220+
65221+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
65222+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
65223+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
65224+ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
65225+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
65226+ return -EPERM;
65227+ }
65228+#endif
65229+ return 0;
65230+}
65231diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
65232new file mode 100644
65233index 0000000..7c06085
65234--- /dev/null
65235+++ b/grsecurity/grsec_log.c
65236@@ -0,0 +1,326 @@
65237+#include <linux/kernel.h>
65238+#include <linux/sched.h>
65239+#include <linux/file.h>
65240+#include <linux/tty.h>
65241+#include <linux/fs.h>
65242+#include <linux/grinternal.h>
65243+
65244+#ifdef CONFIG_TREE_PREEMPT_RCU
65245+#define DISABLE_PREEMPT() preempt_disable()
65246+#define ENABLE_PREEMPT() preempt_enable()
65247+#else
65248+#define DISABLE_PREEMPT()
65249+#define ENABLE_PREEMPT()
65250+#endif
65251+
65252+#define BEGIN_LOCKS(x) \
65253+ DISABLE_PREEMPT(); \
65254+ rcu_read_lock(); \
65255+ read_lock(&tasklist_lock); \
65256+ read_lock(&grsec_exec_file_lock); \
65257+ if (x != GR_DO_AUDIT) \
65258+ spin_lock(&grsec_alert_lock); \
65259+ else \
65260+ spin_lock(&grsec_audit_lock)
65261+
65262+#define END_LOCKS(x) \
65263+ if (x != GR_DO_AUDIT) \
65264+ spin_unlock(&grsec_alert_lock); \
65265+ else \
65266+ spin_unlock(&grsec_audit_lock); \
65267+ read_unlock(&grsec_exec_file_lock); \
65268+ read_unlock(&tasklist_lock); \
65269+ rcu_read_unlock(); \
65270+ ENABLE_PREEMPT(); \
65271+ if (x == GR_DONT_AUDIT) \
65272+ gr_handle_alertkill(current)
65273+
65274+enum {
65275+ FLOODING,
65276+ NO_FLOODING
65277+};
65278+
65279+extern char *gr_alert_log_fmt;
65280+extern char *gr_audit_log_fmt;
65281+extern char *gr_alert_log_buf;
65282+extern char *gr_audit_log_buf;
65283+
65284+static int gr_log_start(int audit)
65285+{
65286+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
65287+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
65288+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
65289+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
65290+ unsigned long curr_secs = get_seconds();
65291+
65292+ if (audit == GR_DO_AUDIT)
65293+ goto set_fmt;
65294+
65295+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
65296+ grsec_alert_wtime = curr_secs;
65297+ grsec_alert_fyet = 0;
65298+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
65299+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
65300+ grsec_alert_fyet++;
65301+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
65302+ grsec_alert_wtime = curr_secs;
65303+ grsec_alert_fyet++;
65304+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
65305+ return FLOODING;
65306+ }
65307+ else return FLOODING;
65308+
65309+set_fmt:
65310+#endif
65311+ memset(buf, 0, PAGE_SIZE);
65312+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
65313+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
65314+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
65315+ } else if (current->signal->curr_ip) {
65316+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
65317+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
65318+ } else if (gr_acl_is_enabled()) {
65319+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
65320+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
65321+ } else {
65322+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
65323+ strcpy(buf, fmt);
65324+ }
65325+
65326+ return NO_FLOODING;
65327+}
65328+
65329+static void gr_log_middle(int audit, const char *msg, va_list ap)
65330+ __attribute__ ((format (printf, 2, 0)));
65331+
65332+static void gr_log_middle(int audit, const char *msg, va_list ap)
65333+{
65334+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
65335+ unsigned int len = strlen(buf);
65336+
65337+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
65338+
65339+ return;
65340+}
65341+
65342+static void gr_log_middle_varargs(int audit, const char *msg, ...)
65343+ __attribute__ ((format (printf, 2, 3)));
65344+
65345+static void gr_log_middle_varargs(int audit, const char *msg, ...)
65346+{
65347+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
65348+ unsigned int len = strlen(buf);
65349+ va_list ap;
65350+
65351+ va_start(ap, msg);
65352+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
65353+ va_end(ap);
65354+
65355+ return;
65356+}
65357+
65358+static void gr_log_end(int audit, int append_default)
65359+{
65360+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
65361+ if (append_default) {
65362+ struct task_struct *task = current;
65363+ struct task_struct *parent = task->real_parent;
65364+ const struct cred *cred = __task_cred(task);
65365+ const struct cred *pcred = __task_cred(parent);
65366+ unsigned int len = strlen(buf);
65367+
65368+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
65369+ }
65370+
65371+ printk("%s\n", buf);
65372+
65373+ return;
65374+}
65375+
65376+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
65377+{
65378+ int logtype;
65379+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
65380+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
65381+ void *voidptr = NULL;
65382+ int num1 = 0, num2 = 0;
65383+ unsigned long ulong1 = 0, ulong2 = 0;
65384+ struct dentry *dentry = NULL;
65385+ struct vfsmount *mnt = NULL;
65386+ struct file *file = NULL;
65387+ struct task_struct *task = NULL;
65388+ const struct cred *cred, *pcred;
65389+ va_list ap;
65390+
65391+ BEGIN_LOCKS(audit);
65392+ logtype = gr_log_start(audit);
65393+ if (logtype == FLOODING) {
65394+ END_LOCKS(audit);
65395+ return;
65396+ }
65397+ va_start(ap, argtypes);
65398+ switch (argtypes) {
65399+ case GR_TTYSNIFF:
65400+ task = va_arg(ap, struct task_struct *);
65401+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
65402+ break;
65403+ case GR_SYSCTL_HIDDEN:
65404+ str1 = va_arg(ap, char *);
65405+ gr_log_middle_varargs(audit, msg, result, str1);
65406+ break;
65407+ case GR_RBAC:
65408+ dentry = va_arg(ap, struct dentry *);
65409+ mnt = va_arg(ap, struct vfsmount *);
65410+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
65411+ break;
65412+ case GR_RBAC_STR:
65413+ dentry = va_arg(ap, struct dentry *);
65414+ mnt = va_arg(ap, struct vfsmount *);
65415+ str1 = va_arg(ap, char *);
65416+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
65417+ break;
65418+ case GR_STR_RBAC:
65419+ str1 = va_arg(ap, char *);
65420+ dentry = va_arg(ap, struct dentry *);
65421+ mnt = va_arg(ap, struct vfsmount *);
65422+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
65423+ break;
65424+ case GR_RBAC_MODE2:
65425+ dentry = va_arg(ap, struct dentry *);
65426+ mnt = va_arg(ap, struct vfsmount *);
65427+ str1 = va_arg(ap, char *);
65428+ str2 = va_arg(ap, char *);
65429+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
65430+ break;
65431+ case GR_RBAC_MODE3:
65432+ dentry = va_arg(ap, struct dentry *);
65433+ mnt = va_arg(ap, struct vfsmount *);
65434+ str1 = va_arg(ap, char *);
65435+ str2 = va_arg(ap, char *);
65436+ str3 = va_arg(ap, char *);
65437+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
65438+ break;
65439+ case GR_FILENAME:
65440+ dentry = va_arg(ap, struct dentry *);
65441+ mnt = va_arg(ap, struct vfsmount *);
65442+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
65443+ break;
65444+ case GR_STR_FILENAME:
65445+ str1 = va_arg(ap, char *);
65446+ dentry = va_arg(ap, struct dentry *);
65447+ mnt = va_arg(ap, struct vfsmount *);
65448+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
65449+ break;
65450+ case GR_FILENAME_STR:
65451+ dentry = va_arg(ap, struct dentry *);
65452+ mnt = va_arg(ap, struct vfsmount *);
65453+ str1 = va_arg(ap, char *);
65454+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
65455+ break;
65456+ case GR_FILENAME_TWO_INT:
65457+ dentry = va_arg(ap, struct dentry *);
65458+ mnt = va_arg(ap, struct vfsmount *);
65459+ num1 = va_arg(ap, int);
65460+ num2 = va_arg(ap, int);
65461+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
65462+ break;
65463+ case GR_FILENAME_TWO_INT_STR:
65464+ dentry = va_arg(ap, struct dentry *);
65465+ mnt = va_arg(ap, struct vfsmount *);
65466+ num1 = va_arg(ap, int);
65467+ num2 = va_arg(ap, int);
65468+ str1 = va_arg(ap, char *);
65469+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
65470+ break;
65471+ case GR_TEXTREL:
65472+ file = va_arg(ap, struct file *);
65473+ ulong1 = va_arg(ap, unsigned long);
65474+ ulong2 = va_arg(ap, unsigned long);
65475+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
65476+ break;
65477+ case GR_PTRACE:
65478+ task = va_arg(ap, struct task_struct *);
65479+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
65480+ break;
65481+ case GR_RESOURCE:
65482+ task = va_arg(ap, struct task_struct *);
65483+ cred = __task_cred(task);
65484+ pcred = __task_cred(task->real_parent);
65485+ ulong1 = va_arg(ap, unsigned long);
65486+ str1 = va_arg(ap, char *);
65487+ ulong2 = va_arg(ap, unsigned long);
65488+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
65489+ break;
65490+ case GR_CAP:
65491+ task = va_arg(ap, struct task_struct *);
65492+ cred = __task_cred(task);
65493+ pcred = __task_cred(task->real_parent);
65494+ str1 = va_arg(ap, char *);
65495+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
65496+ break;
65497+ case GR_SIG:
65498+ str1 = va_arg(ap, char *);
65499+ voidptr = va_arg(ap, void *);
65500+ gr_log_middle_varargs(audit, msg, str1, voidptr);
65501+ break;
65502+ case GR_SIG2:
65503+ task = va_arg(ap, struct task_struct *);
65504+ cred = __task_cred(task);
65505+ pcred = __task_cred(task->real_parent);
65506+ num1 = va_arg(ap, int);
65507+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
65508+ break;
65509+ case GR_CRASH1:
65510+ task = va_arg(ap, struct task_struct *);
65511+ cred = __task_cred(task);
65512+ pcred = __task_cred(task->real_parent);
65513+ ulong1 = va_arg(ap, unsigned long);
65514+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
65515+ break;
65516+ case GR_CRASH2:
65517+ task = va_arg(ap, struct task_struct *);
65518+ cred = __task_cred(task);
65519+ pcred = __task_cred(task->real_parent);
65520+ ulong1 = va_arg(ap, unsigned long);
65521+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
65522+ break;
65523+ case GR_RWXMAP:
65524+ file = va_arg(ap, struct file *);
65525+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
65526+ break;
65527+ case GR_PSACCT:
65528+ {
65529+ unsigned int wday, cday;
65530+ __u8 whr, chr;
65531+ __u8 wmin, cmin;
65532+ __u8 wsec, csec;
65533+ char cur_tty[64] = { 0 };
65534+ char parent_tty[64] = { 0 };
65535+
65536+ task = va_arg(ap, struct task_struct *);
65537+ wday = va_arg(ap, unsigned int);
65538+ cday = va_arg(ap, unsigned int);
65539+ whr = va_arg(ap, int);
65540+ chr = va_arg(ap, int);
65541+ wmin = va_arg(ap, int);
65542+ cmin = va_arg(ap, int);
65543+ wsec = va_arg(ap, int);
65544+ csec = va_arg(ap, int);
65545+ ulong1 = va_arg(ap, unsigned long);
65546+ cred = __task_cred(task);
65547+ pcred = __task_cred(task->real_parent);
65548+
65549+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
65550+ }
65551+ break;
65552+ default:
65553+ gr_log_middle(audit, msg, ap);
65554+ }
65555+ va_end(ap);
65556+ // these don't need DEFAULTSECARGS printed on the end
65557+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
65558+ gr_log_end(audit, 0);
65559+ else
65560+ gr_log_end(audit, 1);
65561+ END_LOCKS(audit);
65562+}
65563diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
65564new file mode 100644
65565index 0000000..f536303
65566--- /dev/null
65567+++ b/grsecurity/grsec_mem.c
65568@@ -0,0 +1,40 @@
65569+#include <linux/kernel.h>
65570+#include <linux/sched.h>
65571+#include <linux/mm.h>
65572+#include <linux/mman.h>
65573+#include <linux/grinternal.h>
65574+
65575+void
65576+gr_handle_ioperm(void)
65577+{
65578+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
65579+ return;
65580+}
65581+
65582+void
65583+gr_handle_iopl(void)
65584+{
65585+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
65586+ return;
65587+}
65588+
65589+void
65590+gr_handle_mem_readwrite(u64 from, u64 to)
65591+{
65592+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
65593+ return;
65594+}
65595+
65596+void
65597+gr_handle_vm86(void)
65598+{
65599+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
65600+ return;
65601+}
65602+
65603+void
65604+gr_log_badprocpid(const char *entry)
65605+{
65606+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
65607+ return;
65608+}
65609diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
65610new file mode 100644
65611index 0000000..2131422
65612--- /dev/null
65613+++ b/grsecurity/grsec_mount.c
65614@@ -0,0 +1,62 @@
65615+#include <linux/kernel.h>
65616+#include <linux/sched.h>
65617+#include <linux/mount.h>
65618+#include <linux/grsecurity.h>
65619+#include <linux/grinternal.h>
65620+
65621+void
65622+gr_log_remount(const char *devname, const int retval)
65623+{
65624+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
65625+ if (grsec_enable_mount && (retval >= 0))
65626+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
65627+#endif
65628+ return;
65629+}
65630+
65631+void
65632+gr_log_unmount(const char *devname, const int retval)
65633+{
65634+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
65635+ if (grsec_enable_mount && (retval >= 0))
65636+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
65637+#endif
65638+ return;
65639+}
65640+
65641+void
65642+gr_log_mount(const char *from, const char *to, const int retval)
65643+{
65644+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
65645+ if (grsec_enable_mount && (retval >= 0))
65646+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
65647+#endif
65648+ return;
65649+}
65650+
65651+int
65652+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
65653+{
65654+#ifdef CONFIG_GRKERNSEC_ROFS
65655+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
65656+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
65657+ return -EPERM;
65658+ } else
65659+ return 0;
65660+#endif
65661+ return 0;
65662+}
65663+
65664+int
65665+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
65666+{
65667+#ifdef CONFIG_GRKERNSEC_ROFS
65668+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
65669+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
65670+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
65671+ return -EPERM;
65672+ } else
65673+ return 0;
65674+#endif
65675+ return 0;
65676+}
65677diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
65678new file mode 100644
65679index 0000000..a3b12a0
65680--- /dev/null
65681+++ b/grsecurity/grsec_pax.c
65682@@ -0,0 +1,36 @@
65683+#include <linux/kernel.h>
65684+#include <linux/sched.h>
65685+#include <linux/mm.h>
65686+#include <linux/file.h>
65687+#include <linux/grinternal.h>
65688+#include <linux/grsecurity.h>
65689+
65690+void
65691+gr_log_textrel(struct vm_area_struct * vma)
65692+{
65693+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
65694+ if (grsec_enable_audit_textrel)
65695+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
65696+#endif
65697+ return;
65698+}
65699+
65700+void
65701+gr_log_rwxmmap(struct file *file)
65702+{
65703+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
65704+ if (grsec_enable_log_rwxmaps)
65705+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
65706+#endif
65707+ return;
65708+}
65709+
65710+void
65711+gr_log_rwxmprotect(struct file *file)
65712+{
65713+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
65714+ if (grsec_enable_log_rwxmaps)
65715+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
65716+#endif
65717+ return;
65718+}
65719diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
65720new file mode 100644
65721index 0000000..f7f29aa
65722--- /dev/null
65723+++ b/grsecurity/grsec_ptrace.c
65724@@ -0,0 +1,30 @@
65725+#include <linux/kernel.h>
65726+#include <linux/sched.h>
65727+#include <linux/grinternal.h>
65728+#include <linux/security.h>
65729+
65730+void
65731+gr_audit_ptrace(struct task_struct *task)
65732+{
65733+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
65734+ if (grsec_enable_audit_ptrace)
65735+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
65736+#endif
65737+ return;
65738+}
65739+
65740+int
65741+gr_ptrace_readexec(struct file *file, int unsafe_flags)
65742+{
65743+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
65744+ const struct dentry *dentry = file->f_path.dentry;
65745+ const struct vfsmount *mnt = file->f_path.mnt;
65746+
65747+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
65748+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
65749+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
65750+ return -EACCES;
65751+ }
65752+#endif
65753+ return 0;
65754+}
65755diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
65756new file mode 100644
65757index 0000000..e09715a
65758--- /dev/null
65759+++ b/grsecurity/grsec_sig.c
65760@@ -0,0 +1,222 @@
65761+#include <linux/kernel.h>
65762+#include <linux/sched.h>
65763+#include <linux/delay.h>
65764+#include <linux/grsecurity.h>
65765+#include <linux/grinternal.h>
65766+#include <linux/hardirq.h>
65767+
65768+char *signames[] = {
65769+ [SIGSEGV] = "Segmentation fault",
65770+ [SIGILL] = "Illegal instruction",
65771+ [SIGABRT] = "Abort",
65772+ [SIGBUS] = "Invalid alignment/Bus error"
65773+};
65774+
65775+void
65776+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
65777+{
65778+#ifdef CONFIG_GRKERNSEC_SIGNAL
65779+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
65780+ (sig == SIGABRT) || (sig == SIGBUS))) {
65781+ if (task_pid_nr(t) == task_pid_nr(current)) {
65782+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
65783+ } else {
65784+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
65785+ }
65786+ }
65787+#endif
65788+ return;
65789+}
65790+
65791+int
65792+gr_handle_signal(const struct task_struct *p, const int sig)
65793+{
65794+#ifdef CONFIG_GRKERNSEC
65795+ /* ignore the 0 signal for protected task checks */
65796+ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
65797+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
65798+ return -EPERM;
65799+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
65800+ return -EPERM;
65801+ }
65802+#endif
65803+ return 0;
65804+}
65805+
65806+#ifdef CONFIG_GRKERNSEC
65807+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
65808+
65809+int gr_fake_force_sig(int sig, struct task_struct *t)
65810+{
65811+ unsigned long int flags;
65812+ int ret, blocked, ignored;
65813+ struct k_sigaction *action;
65814+
65815+ spin_lock_irqsave(&t->sighand->siglock, flags);
65816+ action = &t->sighand->action[sig-1];
65817+ ignored = action->sa.sa_handler == SIG_IGN;
65818+ blocked = sigismember(&t->blocked, sig);
65819+ if (blocked || ignored) {
65820+ action->sa.sa_handler = SIG_DFL;
65821+ if (blocked) {
65822+ sigdelset(&t->blocked, sig);
65823+ recalc_sigpending_and_wake(t);
65824+ }
65825+ }
65826+ if (action->sa.sa_handler == SIG_DFL)
65827+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
65828+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
65829+
65830+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
65831+
65832+ return ret;
65833+}
65834+#endif
65835+
65836+#ifdef CONFIG_GRKERNSEC_BRUTE
65837+#define GR_USER_BAN_TIME (15 * 60)
65838+#define GR_DAEMON_BRUTE_TIME (30 * 60)
65839+
65840+static int __get_dumpable(unsigned long mm_flags)
65841+{
65842+ int ret;
65843+
65844+ ret = mm_flags & MMF_DUMPABLE_MASK;
65845+ return (ret >= 2) ? 2 : ret;
65846+}
65847+#endif
65848+
65849+void gr_handle_brute_attach(unsigned long mm_flags)
65850+{
65851+#ifdef CONFIG_GRKERNSEC_BRUTE
65852+ struct task_struct *p = current;
65853+ kuid_t uid = GLOBAL_ROOT_UID;
65854+ int daemon = 0;
65855+
65856+ if (!grsec_enable_brute)
65857+ return;
65858+
65859+ rcu_read_lock();
65860+ read_lock(&tasklist_lock);
65861+ read_lock(&grsec_exec_file_lock);
65862+ if (p->real_parent && p->real_parent->exec_file == p->exec_file) {
65863+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
65864+ p->real_parent->brute = 1;
65865+ daemon = 1;
65866+ } else {
65867+ const struct cred *cred = __task_cred(p), *cred2;
65868+ struct task_struct *tsk, *tsk2;
65869+
65870+ if (!__get_dumpable(mm_flags) && gr_is_global_nonroot(cred->uid)) {
65871+ struct user_struct *user;
65872+
65873+ uid = cred->uid;
65874+
65875+ /* this is put upon execution past expiration */
65876+ user = find_user(uid);
65877+ if (user == NULL)
65878+ goto unlock;
65879+ user->banned = 1;
65880+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
65881+ if (user->ban_expires == ~0UL)
65882+ user->ban_expires--;
65883+
65884+ do_each_thread(tsk2, tsk) {
65885+ cred2 = __task_cred(tsk);
65886+ if (tsk != p && uid_eq(cred2->uid, uid))
65887+ gr_fake_force_sig(SIGKILL, tsk);
65888+ } while_each_thread(tsk2, tsk);
65889+ }
65890+ }
65891+unlock:
65892+ read_unlock(&grsec_exec_file_lock);
65893+ read_unlock(&tasklist_lock);
65894+ rcu_read_unlock();
65895+
65896+ if (gr_is_global_nonroot(uid))
65897+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n",
65898+ GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
65899+ else if (daemon)
65900+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
65901+
65902+#endif
65903+ return;
65904+}
65905+
65906+void gr_handle_brute_check(void)
65907+{
65908+#ifdef CONFIG_GRKERNSEC_BRUTE
65909+ struct task_struct *p = current;
65910+
65911+ if (unlikely(p->brute)) {
65912+ if (!grsec_enable_brute)
65913+ p->brute = 0;
65914+ else if (time_before(get_seconds(), p->brute_expires))
65915+ msleep(30 * 1000);
65916+ }
65917+#endif
65918+ return;
65919+}
65920+
65921+void gr_handle_kernel_exploit(void)
65922+{
65923+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
65924+ const struct cred *cred;
65925+ struct task_struct *tsk, *tsk2;
65926+ struct user_struct *user;
65927+ kuid_t uid;
65928+
65929+ if (in_irq() || in_serving_softirq() || in_nmi())
65930+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
65931+
65932+ uid = current_uid();
65933+
65934+ if (gr_is_global_root(uid))
65935+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
65936+ else {
65937+ /* kill all the processes of this user, hold a reference
65938+ to their creds struct, and prevent them from creating
65939+ another process until system reset
65940+ */
65941+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
65942+ GR_GLOBAL_UID(uid));
65943+ /* we intentionally leak this ref */
65944+ user = get_uid(current->cred->user);
65945+ if (user) {
65946+ user->banned = 1;
65947+ user->ban_expires = ~0UL;
65948+ }
65949+
65950+ read_lock(&tasklist_lock);
65951+ do_each_thread(tsk2, tsk) {
65952+ cred = __task_cred(tsk);
65953+ if (uid_eq(cred->uid, uid))
65954+ gr_fake_force_sig(SIGKILL, tsk);
65955+ } while_each_thread(tsk2, tsk);
65956+ read_unlock(&tasklist_lock);
65957+ }
65958+#endif
65959+}
65960+
65961+int __gr_process_user_ban(struct user_struct *user)
65962+{
65963+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
65964+ if (unlikely(user->banned)) {
65965+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
65966+ user->banned = 0;
65967+ user->ban_expires = 0;
65968+ free_uid(user);
65969+ } else
65970+ return -EPERM;
65971+ }
65972+#endif
65973+ return 0;
65974+}
65975+
65976+int gr_process_user_ban(void)
65977+{
65978+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
65979+ return __gr_process_user_ban(current->cred->user);
65980+#endif
65981+ return 0;
65982+}
65983diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
65984new file mode 100644
65985index 0000000..4030d57
65986--- /dev/null
65987+++ b/grsecurity/grsec_sock.c
65988@@ -0,0 +1,244 @@
65989+#include <linux/kernel.h>
65990+#include <linux/module.h>
65991+#include <linux/sched.h>
65992+#include <linux/file.h>
65993+#include <linux/net.h>
65994+#include <linux/in.h>
65995+#include <linux/ip.h>
65996+#include <net/sock.h>
65997+#include <net/inet_sock.h>
65998+#include <linux/grsecurity.h>
65999+#include <linux/grinternal.h>
66000+#include <linux/gracl.h>
66001+
66002+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
66003+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
66004+
66005+EXPORT_SYMBOL(gr_search_udp_recvmsg);
66006+EXPORT_SYMBOL(gr_search_udp_sendmsg);
66007+
66008+#ifdef CONFIG_UNIX_MODULE
66009+EXPORT_SYMBOL(gr_acl_handle_unix);
66010+EXPORT_SYMBOL(gr_acl_handle_mknod);
66011+EXPORT_SYMBOL(gr_handle_chroot_unix);
66012+EXPORT_SYMBOL(gr_handle_create);
66013+#endif
66014+
66015+#ifdef CONFIG_GRKERNSEC
66016+#define gr_conn_table_size 32749
66017+struct conn_table_entry {
66018+ struct conn_table_entry *next;
66019+ struct signal_struct *sig;
66020+};
66021+
66022+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
66023+DEFINE_SPINLOCK(gr_conn_table_lock);
66024+
66025+extern const char * gr_socktype_to_name(unsigned char type);
66026+extern const char * gr_proto_to_name(unsigned char proto);
66027+extern const char * gr_sockfamily_to_name(unsigned char family);
66028+
66029+static __inline__ int
66030+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
66031+{
66032+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
66033+}
66034+
66035+static __inline__ int
66036+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
66037+ __u16 sport, __u16 dport)
66038+{
66039+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
66040+ sig->gr_sport == sport && sig->gr_dport == dport))
66041+ return 1;
66042+ else
66043+ return 0;
66044+}
66045+
66046+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
66047+{
66048+ struct conn_table_entry **match;
66049+ unsigned int index;
66050+
66051+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
66052+ sig->gr_sport, sig->gr_dport,
66053+ gr_conn_table_size);
66054+
66055+ newent->sig = sig;
66056+
66057+ match = &gr_conn_table[index];
66058+ newent->next = *match;
66059+ *match = newent;
66060+
66061+ return;
66062+}
66063+
66064+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
66065+{
66066+ struct conn_table_entry *match, *last = NULL;
66067+ unsigned int index;
66068+
66069+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
66070+ sig->gr_sport, sig->gr_dport,
66071+ gr_conn_table_size);
66072+
66073+ match = gr_conn_table[index];
66074+ while (match && !conn_match(match->sig,
66075+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
66076+ sig->gr_dport)) {
66077+ last = match;
66078+ match = match->next;
66079+ }
66080+
66081+ if (match) {
66082+ if (last)
66083+ last->next = match->next;
66084+ else
66085+ gr_conn_table[index] = NULL;
66086+ kfree(match);
66087+ }
66088+
66089+ return;
66090+}
66091+
66092+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
66093+ __u16 sport, __u16 dport)
66094+{
66095+ struct conn_table_entry *match;
66096+ unsigned int index;
66097+
66098+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
66099+
66100+ match = gr_conn_table[index];
66101+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
66102+ match = match->next;
66103+
66104+ if (match)
66105+ return match->sig;
66106+ else
66107+ return NULL;
66108+}
66109+
66110+#endif
66111+
66112+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
66113+{
66114+#ifdef CONFIG_GRKERNSEC
66115+ struct signal_struct *sig = task->signal;
66116+ struct conn_table_entry *newent;
66117+
66118+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
66119+ if (newent == NULL)
66120+ return;
66121+ /* no bh lock needed since we are called with bh disabled */
66122+ spin_lock(&gr_conn_table_lock);
66123+ gr_del_task_from_ip_table_nolock(sig);
66124+ sig->gr_saddr = inet->inet_rcv_saddr;
66125+ sig->gr_daddr = inet->inet_daddr;
66126+ sig->gr_sport = inet->inet_sport;
66127+ sig->gr_dport = inet->inet_dport;
66128+ gr_add_to_task_ip_table_nolock(sig, newent);
66129+ spin_unlock(&gr_conn_table_lock);
66130+#endif
66131+ return;
66132+}
66133+
66134+void gr_del_task_from_ip_table(struct task_struct *task)
66135+{
66136+#ifdef CONFIG_GRKERNSEC
66137+ spin_lock_bh(&gr_conn_table_lock);
66138+ gr_del_task_from_ip_table_nolock(task->signal);
66139+ spin_unlock_bh(&gr_conn_table_lock);
66140+#endif
66141+ return;
66142+}
66143+
66144+void
66145+gr_attach_curr_ip(const struct sock *sk)
66146+{
66147+#ifdef CONFIG_GRKERNSEC
66148+ struct signal_struct *p, *set;
66149+ const struct inet_sock *inet = inet_sk(sk);
66150+
66151+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
66152+ return;
66153+
66154+ set = current->signal;
66155+
66156+ spin_lock_bh(&gr_conn_table_lock);
66157+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
66158+ inet->inet_dport, inet->inet_sport);
66159+ if (unlikely(p != NULL)) {
66160+ set->curr_ip = p->curr_ip;
66161+ set->used_accept = 1;
66162+ gr_del_task_from_ip_table_nolock(p);
66163+ spin_unlock_bh(&gr_conn_table_lock);
66164+ return;
66165+ }
66166+ spin_unlock_bh(&gr_conn_table_lock);
66167+
66168+ set->curr_ip = inet->inet_daddr;
66169+ set->used_accept = 1;
66170+#endif
66171+ return;
66172+}
66173+
66174+int
66175+gr_handle_sock_all(const int family, const int type, const int protocol)
66176+{
66177+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
66178+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
66179+ (family != AF_UNIX)) {
66180+ if (family == AF_INET)
66181+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
66182+ else
66183+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
66184+ return -EACCES;
66185+ }
66186+#endif
66187+ return 0;
66188+}
66189+
66190+int
66191+gr_handle_sock_server(const struct sockaddr *sck)
66192+{
66193+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
66194+ if (grsec_enable_socket_server &&
66195+ in_group_p(grsec_socket_server_gid) &&
66196+ sck && (sck->sa_family != AF_UNIX) &&
66197+ (sck->sa_family != AF_LOCAL)) {
66198+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
66199+ return -EACCES;
66200+ }
66201+#endif
66202+ return 0;
66203+}
66204+
66205+int
66206+gr_handle_sock_server_other(const struct sock *sck)
66207+{
66208+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
66209+ if (grsec_enable_socket_server &&
66210+ in_group_p(grsec_socket_server_gid) &&
66211+ sck && (sck->sk_family != AF_UNIX) &&
66212+ (sck->sk_family != AF_LOCAL)) {
66213+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
66214+ return -EACCES;
66215+ }
66216+#endif
66217+ return 0;
66218+}
66219+
66220+int
66221+gr_handle_sock_client(const struct sockaddr *sck)
66222+{
66223+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
66224+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
66225+ sck && (sck->sa_family != AF_UNIX) &&
66226+ (sck->sa_family != AF_LOCAL)) {
66227+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
66228+ return -EACCES;
66229+ }
66230+#endif
66231+ return 0;
66232+}
66233diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
66234new file mode 100644
66235index 0000000..f55ef0f
66236--- /dev/null
66237+++ b/grsecurity/grsec_sysctl.c
66238@@ -0,0 +1,469 @@
66239+#include <linux/kernel.h>
66240+#include <linux/sched.h>
66241+#include <linux/sysctl.h>
66242+#include <linux/grsecurity.h>
66243+#include <linux/grinternal.h>
66244+
66245+int
66246+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
66247+{
66248+#ifdef CONFIG_GRKERNSEC_SYSCTL
66249+ if (dirname == NULL || name == NULL)
66250+ return 0;
66251+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
66252+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
66253+ return -EACCES;
66254+ }
66255+#endif
66256+ return 0;
66257+}
66258+
66259+#ifdef CONFIG_GRKERNSEC_ROFS
66260+static int __maybe_unused one = 1;
66261+#endif
66262+
66263+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
66264+struct ctl_table grsecurity_table[] = {
66265+#ifdef CONFIG_GRKERNSEC_SYSCTL
66266+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
66267+#ifdef CONFIG_GRKERNSEC_IO
66268+ {
66269+ .procname = "disable_priv_io",
66270+ .data = &grsec_disable_privio,
66271+ .maxlen = sizeof(int),
66272+ .mode = 0600,
66273+ .proc_handler = &proc_dointvec,
66274+ },
66275+#endif
66276+#endif
66277+#ifdef CONFIG_GRKERNSEC_LINK
66278+ {
66279+ .procname = "linking_restrictions",
66280+ .data = &grsec_enable_link,
66281+ .maxlen = sizeof(int),
66282+ .mode = 0600,
66283+ .proc_handler = &proc_dointvec,
66284+ },
66285+#endif
66286+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
66287+ {
66288+ .procname = "enforce_symlinksifowner",
66289+ .data = &grsec_enable_symlinkown,
66290+ .maxlen = sizeof(int),
66291+ .mode = 0600,
66292+ .proc_handler = &proc_dointvec,
66293+ },
66294+ {
66295+ .procname = "symlinkown_gid",
66296+ .data = &grsec_symlinkown_gid,
66297+ .maxlen = sizeof(int),
66298+ .mode = 0600,
66299+ .proc_handler = &proc_dointvec,
66300+ },
66301+#endif
66302+#ifdef CONFIG_GRKERNSEC_BRUTE
66303+ {
66304+ .procname = "deter_bruteforce",
66305+ .data = &grsec_enable_brute,
66306+ .maxlen = sizeof(int),
66307+ .mode = 0600,
66308+ .proc_handler = &proc_dointvec,
66309+ },
66310+#endif
66311+#ifdef CONFIG_GRKERNSEC_FIFO
66312+ {
66313+ .procname = "fifo_restrictions",
66314+ .data = &grsec_enable_fifo,
66315+ .maxlen = sizeof(int),
66316+ .mode = 0600,
66317+ .proc_handler = &proc_dointvec,
66318+ },
66319+#endif
66320+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
66321+ {
66322+ .procname = "ptrace_readexec",
66323+ .data = &grsec_enable_ptrace_readexec,
66324+ .maxlen = sizeof(int),
66325+ .mode = 0600,
66326+ .proc_handler = &proc_dointvec,
66327+ },
66328+#endif
66329+#ifdef CONFIG_GRKERNSEC_SETXID
66330+ {
66331+ .procname = "consistent_setxid",
66332+ .data = &grsec_enable_setxid,
66333+ .maxlen = sizeof(int),
66334+ .mode = 0600,
66335+ .proc_handler = &proc_dointvec,
66336+ },
66337+#endif
66338+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66339+ {
66340+ .procname = "ip_blackhole",
66341+ .data = &grsec_enable_blackhole,
66342+ .maxlen = sizeof(int),
66343+ .mode = 0600,
66344+ .proc_handler = &proc_dointvec,
66345+ },
66346+ {
66347+ .procname = "lastack_retries",
66348+ .data = &grsec_lastack_retries,
66349+ .maxlen = sizeof(int),
66350+ .mode = 0600,
66351+ .proc_handler = &proc_dointvec,
66352+ },
66353+#endif
66354+#ifdef CONFIG_GRKERNSEC_EXECLOG
66355+ {
66356+ .procname = "exec_logging",
66357+ .data = &grsec_enable_execlog,
66358+ .maxlen = sizeof(int),
66359+ .mode = 0600,
66360+ .proc_handler = &proc_dointvec,
66361+ },
66362+#endif
66363+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
66364+ {
66365+ .procname = "rwxmap_logging",
66366+ .data = &grsec_enable_log_rwxmaps,
66367+ .maxlen = sizeof(int),
66368+ .mode = 0600,
66369+ .proc_handler = &proc_dointvec,
66370+ },
66371+#endif
66372+#ifdef CONFIG_GRKERNSEC_SIGNAL
66373+ {
66374+ .procname = "signal_logging",
66375+ .data = &grsec_enable_signal,
66376+ .maxlen = sizeof(int),
66377+ .mode = 0600,
66378+ .proc_handler = &proc_dointvec,
66379+ },
66380+#endif
66381+#ifdef CONFIG_GRKERNSEC_FORKFAIL
66382+ {
66383+ .procname = "forkfail_logging",
66384+ .data = &grsec_enable_forkfail,
66385+ .maxlen = sizeof(int),
66386+ .mode = 0600,
66387+ .proc_handler = &proc_dointvec,
66388+ },
66389+#endif
66390+#ifdef CONFIG_GRKERNSEC_TIME
66391+ {
66392+ .procname = "timechange_logging",
66393+ .data = &grsec_enable_time,
66394+ .maxlen = sizeof(int),
66395+ .mode = 0600,
66396+ .proc_handler = &proc_dointvec,
66397+ },
66398+#endif
66399+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
66400+ {
66401+ .procname = "chroot_deny_shmat",
66402+ .data = &grsec_enable_chroot_shmat,
66403+ .maxlen = sizeof(int),
66404+ .mode = 0600,
66405+ .proc_handler = &proc_dointvec,
66406+ },
66407+#endif
66408+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
66409+ {
66410+ .procname = "chroot_deny_unix",
66411+ .data = &grsec_enable_chroot_unix,
66412+ .maxlen = sizeof(int),
66413+ .mode = 0600,
66414+ .proc_handler = &proc_dointvec,
66415+ },
66416+#endif
66417+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
66418+ {
66419+ .procname = "chroot_deny_mount",
66420+ .data = &grsec_enable_chroot_mount,
66421+ .maxlen = sizeof(int),
66422+ .mode = 0600,
66423+ .proc_handler = &proc_dointvec,
66424+ },
66425+#endif
66426+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
66427+ {
66428+ .procname = "chroot_deny_fchdir",
66429+ .data = &grsec_enable_chroot_fchdir,
66430+ .maxlen = sizeof(int),
66431+ .mode = 0600,
66432+ .proc_handler = &proc_dointvec,
66433+ },
66434+#endif
66435+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
66436+ {
66437+ .procname = "chroot_deny_chroot",
66438+ .data = &grsec_enable_chroot_double,
66439+ .maxlen = sizeof(int),
66440+ .mode = 0600,
66441+ .proc_handler = &proc_dointvec,
66442+ },
66443+#endif
66444+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
66445+ {
66446+ .procname = "chroot_deny_pivot",
66447+ .data = &grsec_enable_chroot_pivot,
66448+ .maxlen = sizeof(int),
66449+ .mode = 0600,
66450+ .proc_handler = &proc_dointvec,
66451+ },
66452+#endif
66453+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
66454+ {
66455+ .procname = "chroot_enforce_chdir",
66456+ .data = &grsec_enable_chroot_chdir,
66457+ .maxlen = sizeof(int),
66458+ .mode = 0600,
66459+ .proc_handler = &proc_dointvec,
66460+ },
66461+#endif
66462+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
66463+ {
66464+ .procname = "chroot_deny_chmod",
66465+ .data = &grsec_enable_chroot_chmod,
66466+ .maxlen = sizeof(int),
66467+ .mode = 0600,
66468+ .proc_handler = &proc_dointvec,
66469+ },
66470+#endif
66471+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
66472+ {
66473+ .procname = "chroot_deny_mknod",
66474+ .data = &grsec_enable_chroot_mknod,
66475+ .maxlen = sizeof(int),
66476+ .mode = 0600,
66477+ .proc_handler = &proc_dointvec,
66478+ },
66479+#endif
66480+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
66481+ {
66482+ .procname = "chroot_restrict_nice",
66483+ .data = &grsec_enable_chroot_nice,
66484+ .maxlen = sizeof(int),
66485+ .mode = 0600,
66486+ .proc_handler = &proc_dointvec,
66487+ },
66488+#endif
66489+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
66490+ {
66491+ .procname = "chroot_execlog",
66492+ .data = &grsec_enable_chroot_execlog,
66493+ .maxlen = sizeof(int),
66494+ .mode = 0600,
66495+ .proc_handler = &proc_dointvec,
66496+ },
66497+#endif
66498+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
66499+ {
66500+ .procname = "chroot_caps",
66501+ .data = &grsec_enable_chroot_caps,
66502+ .maxlen = sizeof(int),
66503+ .mode = 0600,
66504+ .proc_handler = &proc_dointvec,
66505+ },
66506+#endif
66507+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
66508+ {
66509+ .procname = "chroot_deny_sysctl",
66510+ .data = &grsec_enable_chroot_sysctl,
66511+ .maxlen = sizeof(int),
66512+ .mode = 0600,
66513+ .proc_handler = &proc_dointvec,
66514+ },
66515+#endif
66516+#ifdef CONFIG_GRKERNSEC_TPE
66517+ {
66518+ .procname = "tpe",
66519+ .data = &grsec_enable_tpe,
66520+ .maxlen = sizeof(int),
66521+ .mode = 0600,
66522+ .proc_handler = &proc_dointvec,
66523+ },
66524+ {
66525+ .procname = "tpe_gid",
66526+ .data = &grsec_tpe_gid,
66527+ .maxlen = sizeof(int),
66528+ .mode = 0600,
66529+ .proc_handler = &proc_dointvec,
66530+ },
66531+#endif
66532+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
66533+ {
66534+ .procname = "tpe_invert",
66535+ .data = &grsec_enable_tpe_invert,
66536+ .maxlen = sizeof(int),
66537+ .mode = 0600,
66538+ .proc_handler = &proc_dointvec,
66539+ },
66540+#endif
66541+#ifdef CONFIG_GRKERNSEC_TPE_ALL
66542+ {
66543+ .procname = "tpe_restrict_all",
66544+ .data = &grsec_enable_tpe_all,
66545+ .maxlen = sizeof(int),
66546+ .mode = 0600,
66547+ .proc_handler = &proc_dointvec,
66548+ },
66549+#endif
66550+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
66551+ {
66552+ .procname = "socket_all",
66553+ .data = &grsec_enable_socket_all,
66554+ .maxlen = sizeof(int),
66555+ .mode = 0600,
66556+ .proc_handler = &proc_dointvec,
66557+ },
66558+ {
66559+ .procname = "socket_all_gid",
66560+ .data = &grsec_socket_all_gid,
66561+ .maxlen = sizeof(int),
66562+ .mode = 0600,
66563+ .proc_handler = &proc_dointvec,
66564+ },
66565+#endif
66566+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
66567+ {
66568+ .procname = "socket_client",
66569+ .data = &grsec_enable_socket_client,
66570+ .maxlen = sizeof(int),
66571+ .mode = 0600,
66572+ .proc_handler = &proc_dointvec,
66573+ },
66574+ {
66575+ .procname = "socket_client_gid",
66576+ .data = &grsec_socket_client_gid,
66577+ .maxlen = sizeof(int),
66578+ .mode = 0600,
66579+ .proc_handler = &proc_dointvec,
66580+ },
66581+#endif
66582+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
66583+ {
66584+ .procname = "socket_server",
66585+ .data = &grsec_enable_socket_server,
66586+ .maxlen = sizeof(int),
66587+ .mode = 0600,
66588+ .proc_handler = &proc_dointvec,
66589+ },
66590+ {
66591+ .procname = "socket_server_gid",
66592+ .data = &grsec_socket_server_gid,
66593+ .maxlen = sizeof(int),
66594+ .mode = 0600,
66595+ .proc_handler = &proc_dointvec,
66596+ },
66597+#endif
66598+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
66599+ {
66600+ .procname = "audit_group",
66601+ .data = &grsec_enable_group,
66602+ .maxlen = sizeof(int),
66603+ .mode = 0600,
66604+ .proc_handler = &proc_dointvec,
66605+ },
66606+ {
66607+ .procname = "audit_gid",
66608+ .data = &grsec_audit_gid,
66609+ .maxlen = sizeof(int),
66610+ .mode = 0600,
66611+ .proc_handler = &proc_dointvec,
66612+ },
66613+#endif
66614+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
66615+ {
66616+ .procname = "audit_chdir",
66617+ .data = &grsec_enable_chdir,
66618+ .maxlen = sizeof(int),
66619+ .mode = 0600,
66620+ .proc_handler = &proc_dointvec,
66621+ },
66622+#endif
66623+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
66624+ {
66625+ .procname = "audit_mount",
66626+ .data = &grsec_enable_mount,
66627+ .maxlen = sizeof(int),
66628+ .mode = 0600,
66629+ .proc_handler = &proc_dointvec,
66630+ },
66631+#endif
66632+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
66633+ {
66634+ .procname = "audit_textrel",
66635+ .data = &grsec_enable_audit_textrel,
66636+ .maxlen = sizeof(int),
66637+ .mode = 0600,
66638+ .proc_handler = &proc_dointvec,
66639+ },
66640+#endif
66641+#ifdef CONFIG_GRKERNSEC_DMESG
66642+ {
66643+ .procname = "dmesg",
66644+ .data = &grsec_enable_dmesg,
66645+ .maxlen = sizeof(int),
66646+ .mode = 0600,
66647+ .proc_handler = &proc_dointvec,
66648+ },
66649+#endif
66650+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
66651+ {
66652+ .procname = "chroot_findtask",
66653+ .data = &grsec_enable_chroot_findtask,
66654+ .maxlen = sizeof(int),
66655+ .mode = 0600,
66656+ .proc_handler = &proc_dointvec,
66657+ },
66658+#endif
66659+#ifdef CONFIG_GRKERNSEC_RESLOG
66660+ {
66661+ .procname = "resource_logging",
66662+ .data = &grsec_resource_logging,
66663+ .maxlen = sizeof(int),
66664+ .mode = 0600,
66665+ .proc_handler = &proc_dointvec,
66666+ },
66667+#endif
66668+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
66669+ {
66670+ .procname = "audit_ptrace",
66671+ .data = &grsec_enable_audit_ptrace,
66672+ .maxlen = sizeof(int),
66673+ .mode = 0600,
66674+ .proc_handler = &proc_dointvec,
66675+ },
66676+#endif
66677+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
66678+ {
66679+ .procname = "harden_ptrace",
66680+ .data = &grsec_enable_harden_ptrace,
66681+ .maxlen = sizeof(int),
66682+ .mode = 0600,
66683+ .proc_handler = &proc_dointvec,
66684+ },
66685+#endif
66686+ {
66687+ .procname = "grsec_lock",
66688+ .data = &grsec_lock,
66689+ .maxlen = sizeof(int),
66690+ .mode = 0600,
66691+ .proc_handler = &proc_dointvec,
66692+ },
66693+#endif
66694+#ifdef CONFIG_GRKERNSEC_ROFS
66695+ {
66696+ .procname = "romount_protect",
66697+ .data = &grsec_enable_rofs,
66698+ .maxlen = sizeof(int),
66699+ .mode = 0600,
66700+ .proc_handler = &proc_dointvec_minmax,
66701+ .extra1 = &one,
66702+ .extra2 = &one,
66703+ },
66704+#endif
66705+ { }
66706+};
66707+#endif
66708diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
66709new file mode 100644
66710index 0000000..0dc13c3
66711--- /dev/null
66712+++ b/grsecurity/grsec_time.c
66713@@ -0,0 +1,16 @@
66714+#include <linux/kernel.h>
66715+#include <linux/sched.h>
66716+#include <linux/grinternal.h>
66717+#include <linux/module.h>
66718+
66719+void
66720+gr_log_timechange(void)
66721+{
66722+#ifdef CONFIG_GRKERNSEC_TIME
66723+ if (grsec_enable_time)
66724+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
66725+#endif
66726+ return;
66727+}
66728+
66729+EXPORT_SYMBOL(gr_log_timechange);
66730diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
66731new file mode 100644
66732index 0000000..ee57dcf
66733--- /dev/null
66734+++ b/grsecurity/grsec_tpe.c
66735@@ -0,0 +1,73 @@
66736+#include <linux/kernel.h>
66737+#include <linux/sched.h>
66738+#include <linux/file.h>
66739+#include <linux/fs.h>
66740+#include <linux/grinternal.h>
66741+
66742+extern int gr_acl_tpe_check(void);
66743+
66744+int
66745+gr_tpe_allow(const struct file *file)
66746+{
66747+#ifdef CONFIG_GRKERNSEC
66748+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
66749+ const struct cred *cred = current_cred();
66750+ char *msg = NULL;
66751+ char *msg2 = NULL;
66752+
66753+ // never restrict root
66754+ if (gr_is_global_root(cred->uid))
66755+ return 1;
66756+
66757+ if (grsec_enable_tpe) {
66758+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
66759+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
66760+ msg = "not being in trusted group";
66761+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
66762+ msg = "being in untrusted group";
66763+#else
66764+ if (in_group_p(grsec_tpe_gid))
66765+ msg = "being in untrusted group";
66766+#endif
66767+ }
66768+ if (!msg && gr_acl_tpe_check())
66769+ msg = "being in untrusted role";
66770+
66771+ // not in any affected group/role
66772+ if (!msg)
66773+ goto next_check;
66774+
66775+ if (gr_is_global_nonroot(inode->i_uid))
66776+ msg2 = "file in non-root-owned directory";
66777+ else if (inode->i_mode & S_IWOTH)
66778+ msg2 = "file in world-writable directory";
66779+ else if (inode->i_mode & S_IWGRP)
66780+ msg2 = "file in group-writable directory";
66781+
66782+ if (msg && msg2) {
66783+ char fullmsg[70] = {0};
66784+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
66785+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
66786+ return 0;
66787+ }
66788+ msg = NULL;
66789+next_check:
66790+#ifdef CONFIG_GRKERNSEC_TPE_ALL
66791+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
66792+ return 1;
66793+
66794+ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
66795+ msg = "directory not owned by user";
66796+ else if (inode->i_mode & S_IWOTH)
66797+ msg = "file in world-writable directory";
66798+ else if (inode->i_mode & S_IWGRP)
66799+ msg = "file in group-writable directory";
66800+
66801+ if (msg) {
66802+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
66803+ return 0;
66804+ }
66805+#endif
66806+#endif
66807+ return 1;
66808+}
66809diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
66810new file mode 100644
66811index 0000000..9f7b1ac
66812--- /dev/null
66813+++ b/grsecurity/grsum.c
66814@@ -0,0 +1,61 @@
66815+#include <linux/err.h>
66816+#include <linux/kernel.h>
66817+#include <linux/sched.h>
66818+#include <linux/mm.h>
66819+#include <linux/scatterlist.h>
66820+#include <linux/crypto.h>
66821+#include <linux/gracl.h>
66822+
66823+
66824+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
66825+#error "crypto and sha256 must be built into the kernel"
66826+#endif
66827+
66828+int
66829+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
66830+{
66831+ char *p;
66832+ struct crypto_hash *tfm;
66833+ struct hash_desc desc;
66834+ struct scatterlist sg;
66835+ unsigned char temp_sum[GR_SHA_LEN];
66836+ volatile int retval = 0;
66837+ volatile int dummy = 0;
66838+ unsigned int i;
66839+
66840+ sg_init_table(&sg, 1);
66841+
66842+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
66843+ if (IS_ERR(tfm)) {
66844+ /* should never happen, since sha256 should be built in */
66845+ return 1;
66846+ }
66847+
66848+ desc.tfm = tfm;
66849+ desc.flags = 0;
66850+
66851+ crypto_hash_init(&desc);
66852+
66853+ p = salt;
66854+ sg_set_buf(&sg, p, GR_SALT_LEN);
66855+ crypto_hash_update(&desc, &sg, sg.length);
66856+
66857+ p = entry->pw;
66858+ sg_set_buf(&sg, p, strlen(p));
66859+
66860+ crypto_hash_update(&desc, &sg, sg.length);
66861+
66862+ crypto_hash_final(&desc, temp_sum);
66863+
66864+ memset(entry->pw, 0, GR_PW_LEN);
66865+
66866+ for (i = 0; i < GR_SHA_LEN; i++)
66867+ if (sum[i] != temp_sum[i])
66868+ retval = 1;
66869+ else
66870+ dummy = 1; // waste a cycle
66871+
66872+ crypto_free_hash(tfm);
66873+
66874+ return retval;
66875+}
66876diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
66877index 77ff547..181834f 100644
66878--- a/include/asm-generic/4level-fixup.h
66879+++ b/include/asm-generic/4level-fixup.h
66880@@ -13,8 +13,10 @@
66881 #define pmd_alloc(mm, pud, address) \
66882 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
66883 NULL: pmd_offset(pud, address))
66884+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
66885
66886 #define pud_alloc(mm, pgd, address) (pgd)
66887+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
66888 #define pud_offset(pgd, start) (pgd)
66889 #define pud_none(pud) 0
66890 #define pud_bad(pud) 0
66891diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
66892index b7babf0..04ad282 100644
66893--- a/include/asm-generic/atomic-long.h
66894+++ b/include/asm-generic/atomic-long.h
66895@@ -22,6 +22,12 @@
66896
66897 typedef atomic64_t atomic_long_t;
66898
66899+#ifdef CONFIG_PAX_REFCOUNT
66900+typedef atomic64_unchecked_t atomic_long_unchecked_t;
66901+#else
66902+typedef atomic64_t atomic_long_unchecked_t;
66903+#endif
66904+
66905 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
66906
66907 static inline long atomic_long_read(atomic_long_t *l)
66908@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
66909 return (long)atomic64_read(v);
66910 }
66911
66912+#ifdef CONFIG_PAX_REFCOUNT
66913+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
66914+{
66915+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66916+
66917+ return (long)atomic64_read_unchecked(v);
66918+}
66919+#endif
66920+
66921 static inline void atomic_long_set(atomic_long_t *l, long i)
66922 {
66923 atomic64_t *v = (atomic64_t *)l;
66924@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
66925 atomic64_set(v, i);
66926 }
66927
66928+#ifdef CONFIG_PAX_REFCOUNT
66929+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
66930+{
66931+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66932+
66933+ atomic64_set_unchecked(v, i);
66934+}
66935+#endif
66936+
66937 static inline void atomic_long_inc(atomic_long_t *l)
66938 {
66939 atomic64_t *v = (atomic64_t *)l;
66940@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
66941 atomic64_inc(v);
66942 }
66943
66944+#ifdef CONFIG_PAX_REFCOUNT
66945+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
66946+{
66947+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66948+
66949+ atomic64_inc_unchecked(v);
66950+}
66951+#endif
66952+
66953 static inline void atomic_long_dec(atomic_long_t *l)
66954 {
66955 atomic64_t *v = (atomic64_t *)l;
66956@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
66957 atomic64_dec(v);
66958 }
66959
66960+#ifdef CONFIG_PAX_REFCOUNT
66961+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
66962+{
66963+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66964+
66965+ atomic64_dec_unchecked(v);
66966+}
66967+#endif
66968+
66969 static inline void atomic_long_add(long i, atomic_long_t *l)
66970 {
66971 atomic64_t *v = (atomic64_t *)l;
66972@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
66973 atomic64_add(i, v);
66974 }
66975
66976+#ifdef CONFIG_PAX_REFCOUNT
66977+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
66978+{
66979+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66980+
66981+ atomic64_add_unchecked(i, v);
66982+}
66983+#endif
66984+
66985 static inline void atomic_long_sub(long i, atomic_long_t *l)
66986 {
66987 atomic64_t *v = (atomic64_t *)l;
66988@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
66989 atomic64_sub(i, v);
66990 }
66991
66992+#ifdef CONFIG_PAX_REFCOUNT
66993+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
66994+{
66995+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66996+
66997+ atomic64_sub_unchecked(i, v);
66998+}
66999+#endif
67000+
67001 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
67002 {
67003 atomic64_t *v = (atomic64_t *)l;
67004@@ -101,6 +161,15 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
67005 return (long)atomic64_add_return(i, v);
67006 }
67007
67008+#ifdef CONFIG_PAX_REFCOUNT
67009+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
67010+{
67011+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
67012+
67013+ return (long)atomic64_add_return_unchecked(i, v);
67014+}
67015+#endif
67016+
67017 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
67018 {
67019 atomic64_t *v = (atomic64_t *)l;
67020@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
67021 return (long)atomic64_inc_return(v);
67022 }
67023
67024+#ifdef CONFIG_PAX_REFCOUNT
67025+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
67026+{
67027+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
67028+
67029+ return (long)atomic64_inc_return_unchecked(v);
67030+}
67031+#endif
67032+
67033 static inline long atomic_long_dec_return(atomic_long_t *l)
67034 {
67035 atomic64_t *v = (atomic64_t *)l;
67036@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
67037
67038 typedef atomic_t atomic_long_t;
67039
67040+#ifdef CONFIG_PAX_REFCOUNT
67041+typedef atomic_unchecked_t atomic_long_unchecked_t;
67042+#else
67043+typedef atomic_t atomic_long_unchecked_t;
67044+#endif
67045+
67046 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
67047 static inline long atomic_long_read(atomic_long_t *l)
67048 {
67049@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
67050 return (long)atomic_read(v);
67051 }
67052
67053+#ifdef CONFIG_PAX_REFCOUNT
67054+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
67055+{
67056+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
67057+
67058+ return (long)atomic_read_unchecked(v);
67059+}
67060+#endif
67061+
67062 static inline void atomic_long_set(atomic_long_t *l, long i)
67063 {
67064 atomic_t *v = (atomic_t *)l;
67065@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
67066 atomic_set(v, i);
67067 }
67068
67069+#ifdef CONFIG_PAX_REFCOUNT
67070+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
67071+{
67072+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
67073+
67074+ atomic_set_unchecked(v, i);
67075+}
67076+#endif
67077+
67078 static inline void atomic_long_inc(atomic_long_t *l)
67079 {
67080 atomic_t *v = (atomic_t *)l;
67081@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
67082 atomic_inc(v);
67083 }
67084
67085+#ifdef CONFIG_PAX_REFCOUNT
67086+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
67087+{
67088+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
67089+
67090+ atomic_inc_unchecked(v);
67091+}
67092+#endif
67093+
67094 static inline void atomic_long_dec(atomic_long_t *l)
67095 {
67096 atomic_t *v = (atomic_t *)l;
67097@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
67098 atomic_dec(v);
67099 }
67100
67101+#ifdef CONFIG_PAX_REFCOUNT
67102+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
67103+{
67104+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
67105+
67106+ atomic_dec_unchecked(v);
67107+}
67108+#endif
67109+
67110 static inline void atomic_long_add(long i, atomic_long_t *l)
67111 {
67112 atomic_t *v = (atomic_t *)l;
67113@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
67114 atomic_add(i, v);
67115 }
67116
67117+#ifdef CONFIG_PAX_REFCOUNT
67118+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
67119+{
67120+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
67121+
67122+ atomic_add_unchecked(i, v);
67123+}
67124+#endif
67125+
67126 static inline void atomic_long_sub(long i, atomic_long_t *l)
67127 {
67128 atomic_t *v = (atomic_t *)l;
67129@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
67130 atomic_sub(i, v);
67131 }
67132
67133+#ifdef CONFIG_PAX_REFCOUNT
67134+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
67135+{
67136+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
67137+
67138+ atomic_sub_unchecked(i, v);
67139+}
67140+#endif
67141+
67142 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
67143 {
67144 atomic_t *v = (atomic_t *)l;
67145@@ -218,6 +356,16 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
67146 return (long)atomic_add_return(i, v);
67147 }
67148
67149+#ifdef CONFIG_PAX_REFCOUNT
67150+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
67151+{
67152+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
67153+
67154+ return (long)atomic_add_return_unchecked(i, v);
67155+}
67156+
67157+#endif
67158+
67159 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
67160 {
67161 atomic_t *v = (atomic_t *)l;
67162@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
67163 return (long)atomic_inc_return(v);
67164 }
67165
67166+#ifdef CONFIG_PAX_REFCOUNT
67167+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
67168+{
67169+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
67170+
67171+ return (long)atomic_inc_return_unchecked(v);
67172+}
67173+#endif
67174+
67175 static inline long atomic_long_dec_return(atomic_long_t *l)
67176 {
67177 atomic_t *v = (atomic_t *)l;
67178@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
67179
67180 #endif /* BITS_PER_LONG == 64 */
67181
67182+#ifdef CONFIG_PAX_REFCOUNT
67183+static inline void pax_refcount_needs_these_functions(void)
67184+{
67185+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
67186+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
67187+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
67188+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
67189+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
67190+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
67191+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
67192+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
67193+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
67194+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
67195+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
67196+#ifdef CONFIG_X86
67197+ atomic_clear_mask_unchecked(0, NULL);
67198+ atomic_set_mask_unchecked(0, NULL);
67199+#endif
67200+
67201+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
67202+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
67203+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
67204+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
67205+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
67206+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
67207+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
67208+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
67209+}
67210+#else
67211+#define atomic_read_unchecked(v) atomic_read(v)
67212+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
67213+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
67214+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
67215+#define atomic_inc_unchecked(v) atomic_inc(v)
67216+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
67217+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
67218+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
67219+#define atomic_dec_unchecked(v) atomic_dec(v)
67220+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
67221+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
67222+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
67223+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
67224+
67225+#define atomic_long_read_unchecked(v) atomic_long_read(v)
67226+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
67227+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
67228+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
67229+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
67230+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
67231+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
67232+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
67233+#endif
67234+
67235 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
67236diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
67237index 1ced641..c896ee8 100644
67238--- a/include/asm-generic/atomic.h
67239+++ b/include/asm-generic/atomic.h
67240@@ -159,7 +159,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
67241 * Atomically clears the bits set in @mask from @v
67242 */
67243 #ifndef atomic_clear_mask
67244-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
67245+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
67246 {
67247 unsigned long flags;
67248
67249diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
67250index b18ce4f..2ee2843 100644
67251--- a/include/asm-generic/atomic64.h
67252+++ b/include/asm-generic/atomic64.h
67253@@ -16,6 +16,8 @@ typedef struct {
67254 long long counter;
67255 } atomic64_t;
67256
67257+typedef atomic64_t atomic64_unchecked_t;
67258+
67259 #define ATOMIC64_INIT(i) { (i) }
67260
67261 extern long long atomic64_read(const atomic64_t *v);
67262@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
67263 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
67264 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
67265
67266+#define atomic64_read_unchecked(v) atomic64_read(v)
67267+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
67268+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
67269+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
67270+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
67271+#define atomic64_inc_unchecked(v) atomic64_inc(v)
67272+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
67273+#define atomic64_dec_unchecked(v) atomic64_dec(v)
67274+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
67275+
67276 #endif /* _ASM_GENERIC_ATOMIC64_H */
67277diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
67278index 1bfcfe5..e04c5c9 100644
67279--- a/include/asm-generic/cache.h
67280+++ b/include/asm-generic/cache.h
67281@@ -6,7 +6,7 @@
67282 * cache lines need to provide their own cache.h.
67283 */
67284
67285-#define L1_CACHE_SHIFT 5
67286-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
67287+#define L1_CACHE_SHIFT 5UL
67288+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
67289
67290 #endif /* __ASM_GENERIC_CACHE_H */
67291diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
67292index 0d68a1e..b74a761 100644
67293--- a/include/asm-generic/emergency-restart.h
67294+++ b/include/asm-generic/emergency-restart.h
67295@@ -1,7 +1,7 @@
67296 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
67297 #define _ASM_GENERIC_EMERGENCY_RESTART_H
67298
67299-static inline void machine_emergency_restart(void)
67300+static inline __noreturn void machine_emergency_restart(void)
67301 {
67302 machine_restart(NULL);
67303 }
67304diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
67305index 90f99c7..00ce236 100644
67306--- a/include/asm-generic/kmap_types.h
67307+++ b/include/asm-generic/kmap_types.h
67308@@ -2,9 +2,9 @@
67309 #define _ASM_GENERIC_KMAP_TYPES_H
67310
67311 #ifdef __WITH_KM_FENCE
67312-# define KM_TYPE_NR 41
67313+# define KM_TYPE_NR 42
67314 #else
67315-# define KM_TYPE_NR 20
67316+# define KM_TYPE_NR 21
67317 #endif
67318
67319 #endif
67320diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
67321index 9ceb03b..62b0b8f 100644
67322--- a/include/asm-generic/local.h
67323+++ b/include/asm-generic/local.h
67324@@ -23,24 +23,37 @@ typedef struct
67325 atomic_long_t a;
67326 } local_t;
67327
67328+typedef struct {
67329+ atomic_long_unchecked_t a;
67330+} local_unchecked_t;
67331+
67332 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
67333
67334 #define local_read(l) atomic_long_read(&(l)->a)
67335+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
67336 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
67337+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
67338 #define local_inc(l) atomic_long_inc(&(l)->a)
67339+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
67340 #define local_dec(l) atomic_long_dec(&(l)->a)
67341+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
67342 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
67343+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
67344 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
67345+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
67346
67347 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
67348 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
67349 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
67350 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
67351 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
67352+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
67353 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
67354 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
67355+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
67356
67357 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
67358+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
67359 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
67360 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
67361 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
67362diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
67363index 725612b..9cc513a 100644
67364--- a/include/asm-generic/pgtable-nopmd.h
67365+++ b/include/asm-generic/pgtable-nopmd.h
67366@@ -1,14 +1,19 @@
67367 #ifndef _PGTABLE_NOPMD_H
67368 #define _PGTABLE_NOPMD_H
67369
67370-#ifndef __ASSEMBLY__
67371-
67372 #include <asm-generic/pgtable-nopud.h>
67373
67374-struct mm_struct;
67375-
67376 #define __PAGETABLE_PMD_FOLDED
67377
67378+#define PMD_SHIFT PUD_SHIFT
67379+#define PTRS_PER_PMD 1
67380+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
67381+#define PMD_MASK (~(PMD_SIZE-1))
67382+
67383+#ifndef __ASSEMBLY__
67384+
67385+struct mm_struct;
67386+
67387 /*
67388 * Having the pmd type consist of a pud gets the size right, and allows
67389 * us to conceptually access the pud entry that this pmd is folded into
67390@@ -16,11 +21,6 @@ struct mm_struct;
67391 */
67392 typedef struct { pud_t pud; } pmd_t;
67393
67394-#define PMD_SHIFT PUD_SHIFT
67395-#define PTRS_PER_PMD 1
67396-#define PMD_SIZE (1UL << PMD_SHIFT)
67397-#define PMD_MASK (~(PMD_SIZE-1))
67398-
67399 /*
67400 * The "pud_xxx()" functions here are trivial for a folded two-level
67401 * setup: the pmd is never bad, and a pmd always exists (as it's folded
67402diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
67403index 810431d..0ec4804f 100644
67404--- a/include/asm-generic/pgtable-nopud.h
67405+++ b/include/asm-generic/pgtable-nopud.h
67406@@ -1,10 +1,15 @@
67407 #ifndef _PGTABLE_NOPUD_H
67408 #define _PGTABLE_NOPUD_H
67409
67410-#ifndef __ASSEMBLY__
67411-
67412 #define __PAGETABLE_PUD_FOLDED
67413
67414+#define PUD_SHIFT PGDIR_SHIFT
67415+#define PTRS_PER_PUD 1
67416+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
67417+#define PUD_MASK (~(PUD_SIZE-1))
67418+
67419+#ifndef __ASSEMBLY__
67420+
67421 /*
67422 * Having the pud type consist of a pgd gets the size right, and allows
67423 * us to conceptually access the pgd entry that this pud is folded into
67424@@ -12,11 +17,6 @@
67425 */
67426 typedef struct { pgd_t pgd; } pud_t;
67427
67428-#define PUD_SHIFT PGDIR_SHIFT
67429-#define PTRS_PER_PUD 1
67430-#define PUD_SIZE (1UL << PUD_SHIFT)
67431-#define PUD_MASK (~(PUD_SIZE-1))
67432-
67433 /*
67434 * The "pgd_xxx()" functions here are trivial for a folded two-level
67435 * setup: the pud is never bad, and a pud always exists (as it's folded
67436@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
67437 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
67438
67439 #define pgd_populate(mm, pgd, pud) do { } while (0)
67440+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
67441 /*
67442 * (puds are folded into pgds so this doesn't get actually called,
67443 * but the define is needed for a generic inline function.)
67444diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
67445index 5cf680a..4b74d62 100644
67446--- a/include/asm-generic/pgtable.h
67447+++ b/include/asm-generic/pgtable.h
67448@@ -688,6 +688,14 @@ static inline pmd_t pmd_mknuma(pmd_t pmd)
67449 }
67450 #endif /* CONFIG_NUMA_BALANCING */
67451
67452+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
67453+static inline unsigned long pax_open_kernel(void) { return 0; }
67454+#endif
67455+
67456+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
67457+static inline unsigned long pax_close_kernel(void) { return 0; }
67458+#endif
67459+
67460 #endif /* CONFIG_MMU */
67461
67462 #endif /* !__ASSEMBLY__ */
67463diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
67464index d1ea7ce..b1ebf2a 100644
67465--- a/include/asm-generic/vmlinux.lds.h
67466+++ b/include/asm-generic/vmlinux.lds.h
67467@@ -218,6 +218,7 @@
67468 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
67469 VMLINUX_SYMBOL(__start_rodata) = .; \
67470 *(.rodata) *(.rodata.*) \
67471+ *(.data..read_only) \
67472 *(__vermagic) /* Kernel version magic */ \
67473 . = ALIGN(8); \
67474 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
67475@@ -725,17 +726,18 @@
67476 * section in the linker script will go there too. @phdr should have
67477 * a leading colon.
67478 *
67479- * Note that this macros defines __per_cpu_load as an absolute symbol.
67480+ * Note that this macros defines per_cpu_load as an absolute symbol.
67481 * If there is no need to put the percpu section at a predetermined
67482 * address, use PERCPU_SECTION.
67483 */
67484 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
67485- VMLINUX_SYMBOL(__per_cpu_load) = .; \
67486- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
67487+ per_cpu_load = .; \
67488+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
67489 - LOAD_OFFSET) { \
67490+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
67491 PERCPU_INPUT(cacheline) \
67492 } phdr \
67493- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
67494+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
67495
67496 /**
67497 * PERCPU_SECTION - define output section for percpu area, simple version
67498diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
67499index 418d270..bfd2794 100644
67500--- a/include/crypto/algapi.h
67501+++ b/include/crypto/algapi.h
67502@@ -34,7 +34,7 @@ struct crypto_type {
67503 unsigned int maskclear;
67504 unsigned int maskset;
67505 unsigned int tfmsize;
67506-};
67507+} __do_const;
67508
67509 struct crypto_instance {
67510 struct crypto_alg alg;
67511diff --git a/include/drm/drmP.h b/include/drm/drmP.h
67512index fad21c9..ab858bc 100644
67513--- a/include/drm/drmP.h
67514+++ b/include/drm/drmP.h
67515@@ -72,6 +72,7 @@
67516 #include <linux/workqueue.h>
67517 #include <linux/poll.h>
67518 #include <asm/pgalloc.h>
67519+#include <asm/local.h>
67520 #include <drm/drm.h>
67521 #include <drm/drm_sarea.h>
67522
67523@@ -293,10 +294,12 @@ do { \
67524 * \param cmd command.
67525 * \param arg argument.
67526 */
67527-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
67528+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
67529+ struct drm_file *file_priv);
67530+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
67531 struct drm_file *file_priv);
67532
67533-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
67534+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
67535 unsigned long arg);
67536
67537 #define DRM_IOCTL_NR(n) _IOC_NR(n)
67538@@ -311,9 +314,9 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
67539 struct drm_ioctl_desc {
67540 unsigned int cmd;
67541 int flags;
67542- drm_ioctl_t *func;
67543+ drm_ioctl_t func;
67544 unsigned int cmd_drv;
67545-};
67546+} __do_const;
67547
67548 /**
67549 * Creates a driver or general drm_ioctl_desc array entry for the given
67550@@ -995,7 +998,7 @@ struct drm_info_list {
67551 int (*show)(struct seq_file*, void*); /** show callback */
67552 u32 driver_features; /**< Required driver features for this entry */
67553 void *data;
67554-};
67555+} __do_const;
67556
67557 /**
67558 * debugfs node structure. This structure represents a debugfs file.
67559@@ -1068,7 +1071,7 @@ struct drm_device {
67560
67561 /** \name Usage Counters */
67562 /*@{ */
67563- int open_count; /**< Outstanding files open */
67564+ local_t open_count; /**< Outstanding files open */
67565 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
67566 atomic_t vma_count; /**< Outstanding vma areas open */
67567 int buf_use; /**< Buffers in use -- cannot alloc */
67568@@ -1079,7 +1082,7 @@ struct drm_device {
67569 /*@{ */
67570 unsigned long counters;
67571 enum drm_stat_type types[15];
67572- atomic_t counts[15];
67573+ atomic_unchecked_t counts[15];
67574 /*@} */
67575
67576 struct list_head filelist;
67577diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
67578index f43d556..94d9343 100644
67579--- a/include/drm/drm_crtc_helper.h
67580+++ b/include/drm/drm_crtc_helper.h
67581@@ -109,7 +109,7 @@ struct drm_encoder_helper_funcs {
67582 struct drm_connector *connector);
67583 /* disable encoder when not in use - more explicit than dpms off */
67584 void (*disable)(struct drm_encoder *encoder);
67585-};
67586+} __no_const;
67587
67588 /**
67589 * drm_connector_helper_funcs - helper operations for connectors
67590diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
67591index 72dcbe8..8db58d7 100644
67592--- a/include/drm/ttm/ttm_memory.h
67593+++ b/include/drm/ttm/ttm_memory.h
67594@@ -48,7 +48,7 @@
67595
67596 struct ttm_mem_shrink {
67597 int (*do_shrink) (struct ttm_mem_shrink *);
67598-};
67599+} __no_const;
67600
67601 /**
67602 * struct ttm_mem_global - Global memory accounting structure.
67603diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
67604index 4b840e8..155d235 100644
67605--- a/include/keys/asymmetric-subtype.h
67606+++ b/include/keys/asymmetric-subtype.h
67607@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
67608 /* Verify the signature on a key of this subtype (optional) */
67609 int (*verify_signature)(const struct key *key,
67610 const struct public_key_signature *sig);
67611-};
67612+} __do_const;
67613
67614 /**
67615 * asymmetric_key_subtype - Get the subtype from an asymmetric key
67616diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
67617index c1da539..1dcec55 100644
67618--- a/include/linux/atmdev.h
67619+++ b/include/linux/atmdev.h
67620@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
67621 #endif
67622
67623 struct k_atm_aal_stats {
67624-#define __HANDLE_ITEM(i) atomic_t i
67625+#define __HANDLE_ITEM(i) atomic_unchecked_t i
67626 __AAL_STAT_ITEMS
67627 #undef __HANDLE_ITEM
67628 };
67629@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
67630 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
67631 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
67632 struct module *owner;
67633-};
67634+} __do_const ;
67635
67636 struct atmphy_ops {
67637 int (*start)(struct atm_dev *dev);
67638diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
67639index 0530b98..96a8ac0 100644
67640--- a/include/linux/binfmts.h
67641+++ b/include/linux/binfmts.h
67642@@ -73,8 +73,9 @@ struct linux_binfmt {
67643 int (*load_binary)(struct linux_binprm *);
67644 int (*load_shlib)(struct file *);
67645 int (*core_dump)(struct coredump_params *cprm);
67646+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
67647 unsigned long min_coredump; /* minimal dump size */
67648-};
67649+} __do_const;
67650
67651 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
67652
67653diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
67654index f94bc83..62b9cfe 100644
67655--- a/include/linux/blkdev.h
67656+++ b/include/linux/blkdev.h
67657@@ -1498,7 +1498,7 @@ struct block_device_operations {
67658 /* this callback is with swap_lock and sometimes page table lock held */
67659 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
67660 struct module *owner;
67661-};
67662+} __do_const;
67663
67664 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
67665 unsigned long);
67666diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
67667index 7c2e030..b72475d 100644
67668--- a/include/linux/blktrace_api.h
67669+++ b/include/linux/blktrace_api.h
67670@@ -23,7 +23,7 @@ struct blk_trace {
67671 struct dentry *dir;
67672 struct dentry *dropped_file;
67673 struct dentry *msg_file;
67674- atomic_t dropped;
67675+ atomic_unchecked_t dropped;
67676 };
67677
67678 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
67679diff --git a/include/linux/cache.h b/include/linux/cache.h
67680index 4c57065..4307975 100644
67681--- a/include/linux/cache.h
67682+++ b/include/linux/cache.h
67683@@ -16,6 +16,10 @@
67684 #define __read_mostly
67685 #endif
67686
67687+#ifndef __read_only
67688+#define __read_only __read_mostly
67689+#endif
67690+
67691 #ifndef ____cacheline_aligned
67692 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
67693 #endif
67694diff --git a/include/linux/capability.h b/include/linux/capability.h
67695index 98503b7..cc36d18 100644
67696--- a/include/linux/capability.h
67697+++ b/include/linux/capability.h
67698@@ -211,8 +211,13 @@ extern bool capable(int cap);
67699 extern bool ns_capable(struct user_namespace *ns, int cap);
67700 extern bool nsown_capable(int cap);
67701 extern bool inode_capable(const struct inode *inode, int cap);
67702+extern bool capable_nolog(int cap);
67703+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
67704+extern bool inode_capable_nolog(const struct inode *inode, int cap);
67705
67706 /* audit system wants to get cap info from files as well */
67707 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
67708
67709+extern int is_privileged_binary(const struct dentry *dentry);
67710+
67711 #endif /* !_LINUX_CAPABILITY_H */
67712diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
67713index 8609d57..86e4d79 100644
67714--- a/include/linux/cdrom.h
67715+++ b/include/linux/cdrom.h
67716@@ -87,7 +87,6 @@ struct cdrom_device_ops {
67717
67718 /* driver specifications */
67719 const int capability; /* capability flags */
67720- int n_minors; /* number of active minor devices */
67721 /* handle uniform packets for scsi type devices (scsi,atapi) */
67722 int (*generic_packet) (struct cdrom_device_info *,
67723 struct packet_command *);
67724diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
67725index 42e55de..1cd0e66 100644
67726--- a/include/linux/cleancache.h
67727+++ b/include/linux/cleancache.h
67728@@ -31,7 +31,7 @@ struct cleancache_ops {
67729 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
67730 void (*invalidate_inode)(int, struct cleancache_filekey);
67731 void (*invalidate_fs)(int);
67732-};
67733+} __no_const;
67734
67735 extern struct cleancache_ops
67736 cleancache_register_ops(struct cleancache_ops *ops);
67737diff --git a/include/linux/compat.h b/include/linux/compat.h
67738index dec7e2d..45db13f 100644
67739--- a/include/linux/compat.h
67740+++ b/include/linux/compat.h
67741@@ -311,14 +311,14 @@ long compat_sys_msgsnd(int first, int second, int third, void __user *uptr);
67742 long compat_sys_msgrcv(int first, int second, int msgtyp, int third,
67743 int version, void __user *uptr);
67744 long compat_sys_shmat(int first, int second, compat_uptr_t third, int version,
67745- void __user *uptr);
67746+ void __user *uptr) __intentional_overflow(0);
67747 #else
67748 long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
67749 long compat_sys_msgsnd(int msqid, struct compat_msgbuf __user *msgp,
67750 compat_ssize_t msgsz, int msgflg);
67751 long compat_sys_msgrcv(int msqid, struct compat_msgbuf __user *msgp,
67752 compat_ssize_t msgsz, long msgtyp, int msgflg);
67753-long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
67754+long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
67755 #endif
67756 long compat_sys_msgctl(int first, int second, void __user *uptr);
67757 long compat_sys_shmctl(int first, int second, void __user *uptr);
67758@@ -414,7 +414,7 @@ extern int compat_ptrace_request(struct task_struct *child,
67759 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
67760 compat_ulong_t addr, compat_ulong_t data);
67761 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
67762- compat_long_t addr, compat_long_t data);
67763+ compat_ulong_t addr, compat_ulong_t data);
67764
67765 /*
67766 * epoll (fs/eventpoll.c) compat bits follow ...
67767diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
67768index 662fd1b..e801992 100644
67769--- a/include/linux/compiler-gcc4.h
67770+++ b/include/linux/compiler-gcc4.h
67771@@ -34,6 +34,21 @@
67772 #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
67773
67774 #if __GNUC_MINOR__ >= 5
67775+
67776+#ifdef CONSTIFY_PLUGIN
67777+#define __no_const __attribute__((no_const))
67778+#define __do_const __attribute__((do_const))
67779+#endif
67780+
67781+#ifdef SIZE_OVERFLOW_PLUGIN
67782+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
67783+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
67784+#endif
67785+
67786+#ifdef LATENT_ENTROPY_PLUGIN
67787+#define __latent_entropy __attribute__((latent_entropy))
67788+#endif
67789+
67790 /*
67791 * Mark a position in code as unreachable. This can be used to
67792 * suppress control flow warnings after asm blocks that transfer
67793@@ -49,6 +64,11 @@
67794 #define __noclone __attribute__((__noclone__))
67795
67796 #endif
67797+
67798+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
67799+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
67800+#define __bos0(ptr) __bos((ptr), 0)
67801+#define __bos1(ptr) __bos((ptr), 1)
67802 #endif
67803
67804 #if __GNUC_MINOR__ >= 6
67805diff --git a/include/linux/compiler.h b/include/linux/compiler.h
67806index dd852b7..1ad5fba 100644
67807--- a/include/linux/compiler.h
67808+++ b/include/linux/compiler.h
67809@@ -5,11 +5,14 @@
67810
67811 #ifdef __CHECKER__
67812 # define __user __attribute__((noderef, address_space(1)))
67813+# define __force_user __force __user
67814 # define __kernel __attribute__((address_space(0)))
67815+# define __force_kernel __force __kernel
67816 # define __safe __attribute__((safe))
67817 # define __force __attribute__((force))
67818 # define __nocast __attribute__((nocast))
67819 # define __iomem __attribute__((noderef, address_space(2)))
67820+# define __force_iomem __force __iomem
67821 # define __must_hold(x) __attribute__((context(x,1,1)))
67822 # define __acquires(x) __attribute__((context(x,0,1)))
67823 # define __releases(x) __attribute__((context(x,1,0)))
67824@@ -17,20 +20,37 @@
67825 # define __release(x) __context__(x,-1)
67826 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
67827 # define __percpu __attribute__((noderef, address_space(3)))
67828+# define __force_percpu __force __percpu
67829 #ifdef CONFIG_SPARSE_RCU_POINTER
67830 # define __rcu __attribute__((noderef, address_space(4)))
67831+# define __force_rcu __force __rcu
67832 #else
67833 # define __rcu
67834+# define __force_rcu
67835 #endif
67836 extern void __chk_user_ptr(const volatile void __user *);
67837 extern void __chk_io_ptr(const volatile void __iomem *);
67838 #else
67839-# define __user
67840-# define __kernel
67841+# ifdef CHECKER_PLUGIN
67842+//# define __user
67843+//# define __force_user
67844+//# define __kernel
67845+//# define __force_kernel
67846+# else
67847+# ifdef STRUCTLEAK_PLUGIN
67848+# define __user __attribute__((user))
67849+# else
67850+# define __user
67851+# endif
67852+# define __force_user
67853+# define __kernel
67854+# define __force_kernel
67855+# endif
67856 # define __safe
67857 # define __force
67858 # define __nocast
67859 # define __iomem
67860+# define __force_iomem
67861 # define __chk_user_ptr(x) (void)0
67862 # define __chk_io_ptr(x) (void)0
67863 # define __builtin_warning(x, y...) (1)
67864@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
67865 # define __release(x) (void)0
67866 # define __cond_lock(x,c) (c)
67867 # define __percpu
67868+# define __force_percpu
67869 # define __rcu
67870+# define __force_rcu
67871 #endif
67872
67873 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
67874@@ -275,6 +297,26 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
67875 # define __attribute_const__ /* unimplemented */
67876 #endif
67877
67878+#ifndef __no_const
67879+# define __no_const
67880+#endif
67881+
67882+#ifndef __do_const
67883+# define __do_const
67884+#endif
67885+
67886+#ifndef __size_overflow
67887+# define __size_overflow(...)
67888+#endif
67889+
67890+#ifndef __intentional_overflow
67891+# define __intentional_overflow(...)
67892+#endif
67893+
67894+#ifndef __latent_entropy
67895+# define __latent_entropy
67896+#endif
67897+
67898 /*
67899 * Tell gcc if a function is cold. The compiler will assume any path
67900 * directly leading to the call is unlikely.
67901@@ -284,6 +326,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
67902 #define __cold
67903 #endif
67904
67905+#ifndef __alloc_size
67906+#define __alloc_size(...)
67907+#endif
67908+
67909+#ifndef __bos
67910+#define __bos(ptr, arg)
67911+#endif
67912+
67913+#ifndef __bos0
67914+#define __bos0(ptr)
67915+#endif
67916+
67917+#ifndef __bos1
67918+#define __bos1(ptr)
67919+#endif
67920+
67921 /* Simple shorthand for a section definition */
67922 #ifndef __section
67923 # define __section(S) __attribute__ ((__section__(#S)))
67924@@ -323,6 +381,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
67925 * use is to mediate communication between process-level code and irq/NMI
67926 * handlers, all running on the same CPU.
67927 */
67928-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
67929+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
67930+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
67931
67932 #endif /* __LINUX_COMPILER_H */
67933diff --git a/include/linux/completion.h b/include/linux/completion.h
67934index 51494e6..0fd1b61 100644
67935--- a/include/linux/completion.h
67936+++ b/include/linux/completion.h
67937@@ -78,13 +78,13 @@ static inline void init_completion(struct completion *x)
67938
67939 extern void wait_for_completion(struct completion *);
67940 extern int wait_for_completion_interruptible(struct completion *x);
67941-extern int wait_for_completion_killable(struct completion *x);
67942+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
67943 extern unsigned long wait_for_completion_timeout(struct completion *x,
67944 unsigned long timeout);
67945 extern long wait_for_completion_interruptible_timeout(
67946- struct completion *x, unsigned long timeout);
67947+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
67948 extern long wait_for_completion_killable_timeout(
67949- struct completion *x, unsigned long timeout);
67950+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
67951 extern bool try_wait_for_completion(struct completion *x);
67952 extern bool completion_done(struct completion *x);
67953
67954diff --git a/include/linux/configfs.h b/include/linux/configfs.h
67955index 34025df..d94bbbc 100644
67956--- a/include/linux/configfs.h
67957+++ b/include/linux/configfs.h
67958@@ -125,7 +125,7 @@ struct configfs_attribute {
67959 const char *ca_name;
67960 struct module *ca_owner;
67961 umode_t ca_mode;
67962-};
67963+} __do_const;
67964
67965 /*
67966 * Users often need to create attribute structures for their configurable
67967diff --git a/include/linux/cpu.h b/include/linux/cpu.h
67968index ce7a074..01ab8ac 100644
67969--- a/include/linux/cpu.h
67970+++ b/include/linux/cpu.h
67971@@ -115,7 +115,7 @@ enum {
67972 /* Need to know about CPUs going up/down? */
67973 #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
67974 #define cpu_notifier(fn, pri) { \
67975- static struct notifier_block fn##_nb __cpuinitdata = \
67976+ static struct notifier_block fn##_nb = \
67977 { .notifier_call = fn, .priority = pri }; \
67978 register_cpu_notifier(&fn##_nb); \
67979 }
67980diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
67981index a55b88e..fba90c5 100644
67982--- a/include/linux/cpufreq.h
67983+++ b/include/linux/cpufreq.h
67984@@ -240,7 +240,7 @@ struct cpufreq_driver {
67985 int (*suspend) (struct cpufreq_policy *policy);
67986 int (*resume) (struct cpufreq_policy *policy);
67987 struct freq_attr **attr;
67988-};
67989+} __do_const;
67990
67991 /* flags */
67992
67993@@ -299,6 +299,7 @@ struct global_attr {
67994 ssize_t (*store)(struct kobject *a, struct attribute *b,
67995 const char *c, size_t count);
67996 };
67997+typedef struct global_attr __no_const global_attr_no_const;
67998
67999 #define define_one_global_ro(_name) \
68000 static struct global_attr _name = \
68001diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
68002index 24cd1037..20a63aae 100644
68003--- a/include/linux/cpuidle.h
68004+++ b/include/linux/cpuidle.h
68005@@ -54,7 +54,8 @@ struct cpuidle_state {
68006 int index);
68007
68008 int (*enter_dead) (struct cpuidle_device *dev, int index);
68009-};
68010+} __do_const;
68011+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
68012
68013 /* Idle State Flags */
68014 #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
68015@@ -216,7 +217,7 @@ struct cpuidle_governor {
68016 void (*reflect) (struct cpuidle_device *dev, int index);
68017
68018 struct module *owner;
68019-};
68020+} __do_const;
68021
68022 #ifdef CONFIG_CPU_IDLE
68023
68024diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
68025index 0325602..5e9feff 100644
68026--- a/include/linux/cpumask.h
68027+++ b/include/linux/cpumask.h
68028@@ -118,17 +118,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
68029 }
68030
68031 /* Valid inputs for n are -1 and 0. */
68032-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
68033+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
68034 {
68035 return n+1;
68036 }
68037
68038-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
68039+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
68040 {
68041 return n+1;
68042 }
68043
68044-static inline unsigned int cpumask_next_and(int n,
68045+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
68046 const struct cpumask *srcp,
68047 const struct cpumask *andp)
68048 {
68049@@ -167,7 +167,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
68050 *
68051 * Returns >= nr_cpu_ids if no further cpus set.
68052 */
68053-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
68054+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
68055 {
68056 /* -1 is a legal arg here. */
68057 if (n != -1)
68058@@ -182,7 +182,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
68059 *
68060 * Returns >= nr_cpu_ids if no further cpus unset.
68061 */
68062-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
68063+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
68064 {
68065 /* -1 is a legal arg here. */
68066 if (n != -1)
68067@@ -190,7 +190,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
68068 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
68069 }
68070
68071-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
68072+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
68073 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
68074
68075 /**
68076diff --git a/include/linux/cred.h b/include/linux/cred.h
68077index 04421e8..6bce4ef 100644
68078--- a/include/linux/cred.h
68079+++ b/include/linux/cred.h
68080@@ -194,6 +194,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
68081 static inline void validate_process_creds(void)
68082 {
68083 }
68084+static inline void validate_task_creds(struct task_struct *task)
68085+{
68086+}
68087 #endif
68088
68089 /**
68090diff --git a/include/linux/crypto.h b/include/linux/crypto.h
68091index b92eadf..b4ecdc1 100644
68092--- a/include/linux/crypto.h
68093+++ b/include/linux/crypto.h
68094@@ -373,7 +373,7 @@ struct cipher_tfm {
68095 const u8 *key, unsigned int keylen);
68096 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
68097 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
68098-};
68099+} __no_const;
68100
68101 struct hash_tfm {
68102 int (*init)(struct hash_desc *desc);
68103@@ -394,13 +394,13 @@ struct compress_tfm {
68104 int (*cot_decompress)(struct crypto_tfm *tfm,
68105 const u8 *src, unsigned int slen,
68106 u8 *dst, unsigned int *dlen);
68107-};
68108+} __no_const;
68109
68110 struct rng_tfm {
68111 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
68112 unsigned int dlen);
68113 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
68114-};
68115+} __no_const;
68116
68117 #define crt_ablkcipher crt_u.ablkcipher
68118 #define crt_aead crt_u.aead
68119diff --git a/include/linux/ctype.h b/include/linux/ctype.h
68120index 8acfe31..6ffccd63 100644
68121--- a/include/linux/ctype.h
68122+++ b/include/linux/ctype.h
68123@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
68124 * Fast implementation of tolower() for internal usage. Do not use in your
68125 * code.
68126 */
68127-static inline char _tolower(const char c)
68128+static inline unsigned char _tolower(const unsigned char c)
68129 {
68130 return c | 0x20;
68131 }
68132diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
68133index 7925bf0..d5143d2 100644
68134--- a/include/linux/decompress/mm.h
68135+++ b/include/linux/decompress/mm.h
68136@@ -77,7 +77,7 @@ static void free(void *where)
68137 * warnings when not needed (indeed large_malloc / large_free are not
68138 * needed by inflate */
68139
68140-#define malloc(a) kmalloc(a, GFP_KERNEL)
68141+#define malloc(a) kmalloc((a), GFP_KERNEL)
68142 #define free(a) kfree(a)
68143
68144 #define large_malloc(a) vmalloc(a)
68145diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
68146index e83ef39..33e0eb3 100644
68147--- a/include/linux/devfreq.h
68148+++ b/include/linux/devfreq.h
68149@@ -114,7 +114,7 @@ struct devfreq_governor {
68150 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
68151 int (*event_handler)(struct devfreq *devfreq,
68152 unsigned int event, void *data);
68153-};
68154+} __do_const;
68155
68156 /**
68157 * struct devfreq - Device devfreq structure
68158diff --git a/include/linux/device.h b/include/linux/device.h
68159index 43dcda9..7a1fb65 100644
68160--- a/include/linux/device.h
68161+++ b/include/linux/device.h
68162@@ -294,7 +294,7 @@ struct subsys_interface {
68163 struct list_head node;
68164 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
68165 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
68166-};
68167+} __do_const;
68168
68169 int subsys_interface_register(struct subsys_interface *sif);
68170 void subsys_interface_unregister(struct subsys_interface *sif);
68171@@ -474,7 +474,7 @@ struct device_type {
68172 void (*release)(struct device *dev);
68173
68174 const struct dev_pm_ops *pm;
68175-};
68176+} __do_const;
68177
68178 /* interface for exporting device attributes */
68179 struct device_attribute {
68180@@ -484,11 +484,12 @@ struct device_attribute {
68181 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
68182 const char *buf, size_t count);
68183 };
68184+typedef struct device_attribute __no_const device_attribute_no_const;
68185
68186 struct dev_ext_attribute {
68187 struct device_attribute attr;
68188 void *var;
68189-};
68190+} __do_const;
68191
68192 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
68193 char *buf);
68194diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
68195index 94af418..b1ca7a2 100644
68196--- a/include/linux/dma-mapping.h
68197+++ b/include/linux/dma-mapping.h
68198@@ -54,7 +54,7 @@ struct dma_map_ops {
68199 u64 (*get_required_mask)(struct device *dev);
68200 #endif
68201 int is_phys;
68202-};
68203+} __do_const;
68204
68205 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
68206
68207diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
68208index d3201e4..8281e63 100644
68209--- a/include/linux/dmaengine.h
68210+++ b/include/linux/dmaengine.h
68211@@ -1018,9 +1018,9 @@ struct dma_pinned_list {
68212 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
68213 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
68214
68215-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
68216+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
68217 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
68218-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
68219+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
68220 struct dma_pinned_list *pinned_list, struct page *page,
68221 unsigned int offset, size_t len);
68222
68223diff --git a/include/linux/efi.h b/include/linux/efi.h
68224index 7a9498a..155713d 100644
68225--- a/include/linux/efi.h
68226+++ b/include/linux/efi.h
68227@@ -733,6 +733,7 @@ struct efivar_operations {
68228 efi_set_variable_t *set_variable;
68229 efi_query_variable_info_t *query_variable_info;
68230 };
68231+typedef struct efivar_operations __no_const efivar_operations_no_const;
68232
68233 struct efivars {
68234 /*
68235diff --git a/include/linux/elf.h b/include/linux/elf.h
68236index 8c9048e..16a4665 100644
68237--- a/include/linux/elf.h
68238+++ b/include/linux/elf.h
68239@@ -20,6 +20,7 @@ extern Elf32_Dyn _DYNAMIC [];
68240 #define elf_note elf32_note
68241 #define elf_addr_t Elf32_Off
68242 #define Elf_Half Elf32_Half
68243+#define elf_dyn Elf32_Dyn
68244
68245 #else
68246
68247@@ -30,6 +31,7 @@ extern Elf64_Dyn _DYNAMIC [];
68248 #define elf_note elf64_note
68249 #define elf_addr_t Elf64_Off
68250 #define Elf_Half Elf64_Half
68251+#define elf_dyn Elf64_Dyn
68252
68253 #endif
68254
68255diff --git a/include/linux/err.h b/include/linux/err.h
68256index f2edce2..cc2082c 100644
68257--- a/include/linux/err.h
68258+++ b/include/linux/err.h
68259@@ -19,12 +19,12 @@
68260
68261 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
68262
68263-static inline void * __must_check ERR_PTR(long error)
68264+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
68265 {
68266 return (void *) error;
68267 }
68268
68269-static inline long __must_check PTR_ERR(const void *ptr)
68270+static inline long __must_check __intentional_overflow(-1) PTR_ERR(const void *ptr)
68271 {
68272 return (long) ptr;
68273 }
68274diff --git a/include/linux/extcon.h b/include/linux/extcon.h
68275index fcb51c8..bdafcf6 100644
68276--- a/include/linux/extcon.h
68277+++ b/include/linux/extcon.h
68278@@ -134,7 +134,7 @@ struct extcon_dev {
68279 /* /sys/class/extcon/.../mutually_exclusive/... */
68280 struct attribute_group attr_g_muex;
68281 struct attribute **attrs_muex;
68282- struct device_attribute *d_attrs_muex;
68283+ device_attribute_no_const *d_attrs_muex;
68284 };
68285
68286 /**
68287diff --git a/include/linux/fb.h b/include/linux/fb.h
68288index c7a9571..02eeffe 100644
68289--- a/include/linux/fb.h
68290+++ b/include/linux/fb.h
68291@@ -302,7 +302,7 @@ struct fb_ops {
68292 /* called at KDB enter and leave time to prepare the console */
68293 int (*fb_debug_enter)(struct fb_info *info);
68294 int (*fb_debug_leave)(struct fb_info *info);
68295-};
68296+} __do_const;
68297
68298 #ifdef CONFIG_FB_TILEBLITTING
68299 #define FB_TILE_CURSOR_NONE 0
68300diff --git a/include/linux/filter.h b/include/linux/filter.h
68301index c45eabc..baa0be5 100644
68302--- a/include/linux/filter.h
68303+++ b/include/linux/filter.h
68304@@ -20,6 +20,7 @@ struct compat_sock_fprog {
68305
68306 struct sk_buff;
68307 struct sock;
68308+struct bpf_jit_work;
68309
68310 struct sk_filter
68311 {
68312@@ -27,6 +28,9 @@ struct sk_filter
68313 unsigned int len; /* Number of filter blocks */
68314 unsigned int (*bpf_func)(const struct sk_buff *skb,
68315 const struct sock_filter *filter);
68316+#ifdef CONFIG_BPF_JIT
68317+ struct bpf_jit_work *work;
68318+#endif
68319 struct rcu_head rcu;
68320 struct sock_filter insns[0];
68321 };
68322diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
68323index 3044254..9767f41 100644
68324--- a/include/linux/frontswap.h
68325+++ b/include/linux/frontswap.h
68326@@ -11,7 +11,7 @@ struct frontswap_ops {
68327 int (*load)(unsigned, pgoff_t, struct page *);
68328 void (*invalidate_page)(unsigned, pgoff_t);
68329 void (*invalidate_area)(unsigned);
68330-};
68331+} __no_const;
68332
68333 extern bool frontswap_enabled;
68334 extern struct frontswap_ops
68335diff --git a/include/linux/fs.h b/include/linux/fs.h
68336index 7617ee0..b575199 100644
68337--- a/include/linux/fs.h
68338+++ b/include/linux/fs.h
68339@@ -1541,7 +1541,8 @@ struct file_operations {
68340 long (*fallocate)(struct file *file, int mode, loff_t offset,
68341 loff_t len);
68342 int (*show_fdinfo)(struct seq_file *m, struct file *f);
68343-};
68344+} __do_const;
68345+typedef struct file_operations __no_const file_operations_no_const;
68346
68347 struct inode_operations {
68348 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
68349@@ -2665,4 +2666,14 @@ static inline void inode_has_no_xattr(struct inode *inode)
68350 inode->i_flags |= S_NOSEC;
68351 }
68352
68353+static inline bool is_sidechannel_device(const struct inode *inode)
68354+{
68355+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
68356+ umode_t mode = inode->i_mode;
68357+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
68358+#else
68359+ return false;
68360+#endif
68361+}
68362+
68363 #endif /* _LINUX_FS_H */
68364diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
68365index 324f931..f292b65 100644
68366--- a/include/linux/fs_struct.h
68367+++ b/include/linux/fs_struct.h
68368@@ -6,7 +6,7 @@
68369 #include <linux/seqlock.h>
68370
68371 struct fs_struct {
68372- int users;
68373+ atomic_t users;
68374 spinlock_t lock;
68375 seqcount_t seq;
68376 int umask;
68377diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
68378index 5dfa0aa..6acf322 100644
68379--- a/include/linux/fscache-cache.h
68380+++ b/include/linux/fscache-cache.h
68381@@ -112,7 +112,7 @@ struct fscache_operation {
68382 fscache_operation_release_t release;
68383 };
68384
68385-extern atomic_t fscache_op_debug_id;
68386+extern atomic_unchecked_t fscache_op_debug_id;
68387 extern void fscache_op_work_func(struct work_struct *work);
68388
68389 extern void fscache_enqueue_operation(struct fscache_operation *);
68390@@ -134,7 +134,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
68391 INIT_WORK(&op->work, fscache_op_work_func);
68392 atomic_set(&op->usage, 1);
68393 op->state = FSCACHE_OP_ST_INITIALISED;
68394- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
68395+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
68396 op->processor = processor;
68397 op->release = release;
68398 INIT_LIST_HEAD(&op->pend_link);
68399diff --git a/include/linux/fscache.h b/include/linux/fscache.h
68400index 7a08623..4c07b0f 100644
68401--- a/include/linux/fscache.h
68402+++ b/include/linux/fscache.h
68403@@ -152,7 +152,7 @@ struct fscache_cookie_def {
68404 * - this is mandatory for any object that may have data
68405 */
68406 void (*now_uncached)(void *cookie_netfs_data);
68407-};
68408+} __do_const;
68409
68410 /*
68411 * fscache cached network filesystem type
68412diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
68413index 0fbfb46..508eb0d 100644
68414--- a/include/linux/fsnotify.h
68415+++ b/include/linux/fsnotify.h
68416@@ -195,6 +195,9 @@ static inline void fsnotify_access(struct file *file)
68417 struct inode *inode = path->dentry->d_inode;
68418 __u32 mask = FS_ACCESS;
68419
68420+ if (is_sidechannel_device(inode))
68421+ return;
68422+
68423 if (S_ISDIR(inode->i_mode))
68424 mask |= FS_ISDIR;
68425
68426@@ -213,6 +216,9 @@ static inline void fsnotify_modify(struct file *file)
68427 struct inode *inode = path->dentry->d_inode;
68428 __u32 mask = FS_MODIFY;
68429
68430+ if (is_sidechannel_device(inode))
68431+ return;
68432+
68433 if (S_ISDIR(inode->i_mode))
68434 mask |= FS_ISDIR;
68435
68436@@ -315,7 +321,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
68437 */
68438 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
68439 {
68440- return kstrdup(name, GFP_KERNEL);
68441+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
68442 }
68443
68444 /*
68445diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
68446index a3d4895..ddd2a50 100644
68447--- a/include/linux/ftrace_event.h
68448+++ b/include/linux/ftrace_event.h
68449@@ -272,7 +272,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
68450 extern int trace_add_event_call(struct ftrace_event_call *call);
68451 extern void trace_remove_event_call(struct ftrace_event_call *call);
68452
68453-#define is_signed_type(type) (((type)(-1)) < 0)
68454+#define is_signed_type(type) (((type)(-1)) < (type)1)
68455
68456 int trace_set_clr_event(const char *system, const char *event, int set);
68457
68458diff --git a/include/linux/genhd.h b/include/linux/genhd.h
68459index 79b8bba..86b539e 100644
68460--- a/include/linux/genhd.h
68461+++ b/include/linux/genhd.h
68462@@ -194,7 +194,7 @@ struct gendisk {
68463 struct kobject *slave_dir;
68464
68465 struct timer_rand_state *random;
68466- atomic_t sync_io; /* RAID */
68467+ atomic_unchecked_t sync_io; /* RAID */
68468 struct disk_events *ev;
68469 #ifdef CONFIG_BLK_DEV_INTEGRITY
68470 struct blk_integrity *integrity;
68471diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
68472index 023bc34..b02b46a 100644
68473--- a/include/linux/genl_magic_func.h
68474+++ b/include/linux/genl_magic_func.h
68475@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
68476 },
68477
68478 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
68479-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
68480+static struct genl_ops ZZZ_genl_ops[] = {
68481 #include GENL_MAGIC_INCLUDE_FILE
68482 };
68483
68484diff --git a/include/linux/gfp.h b/include/linux/gfp.h
68485index 0f615eb..5c3832f 100644
68486--- a/include/linux/gfp.h
68487+++ b/include/linux/gfp.h
68488@@ -35,6 +35,13 @@ struct vm_area_struct;
68489 #define ___GFP_NO_KSWAPD 0x400000u
68490 #define ___GFP_OTHER_NODE 0x800000u
68491 #define ___GFP_WRITE 0x1000000u
68492+
68493+#ifdef CONFIG_PAX_USERCOPY_SLABS
68494+#define ___GFP_USERCOPY 0x2000000u
68495+#else
68496+#define ___GFP_USERCOPY 0
68497+#endif
68498+
68499 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
68500
68501 /*
68502@@ -92,6 +99,7 @@ struct vm_area_struct;
68503 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
68504 #define __GFP_KMEMCG ((__force gfp_t)___GFP_KMEMCG) /* Allocation comes from a memcg-accounted resource */
68505 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
68506+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
68507
68508 /*
68509 * This may seem redundant, but it's a way of annotating false positives vs.
68510@@ -99,7 +107,7 @@ struct vm_area_struct;
68511 */
68512 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
68513
68514-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
68515+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
68516 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
68517
68518 /* This equals 0, but use constants in case they ever change */
68519@@ -153,6 +161,8 @@ struct vm_area_struct;
68520 /* 4GB DMA on some platforms */
68521 #define GFP_DMA32 __GFP_DMA32
68522
68523+#define GFP_USERCOPY __GFP_USERCOPY
68524+
68525 /* Convert GFP flags to their corresponding migrate type */
68526 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
68527 {
68528diff --git a/include/linux/gracl.h b/include/linux/gracl.h
68529new file mode 100644
68530index 0000000..ebe6d72
68531--- /dev/null
68532+++ b/include/linux/gracl.h
68533@@ -0,0 +1,319 @@
68534+#ifndef GR_ACL_H
68535+#define GR_ACL_H
68536+
68537+#include <linux/grdefs.h>
68538+#include <linux/resource.h>
68539+#include <linux/capability.h>
68540+#include <linux/dcache.h>
68541+#include <asm/resource.h>
68542+
68543+/* Major status information */
68544+
68545+#define GR_VERSION "grsecurity 2.9.1"
68546+#define GRSECURITY_VERSION 0x2901
68547+
68548+enum {
68549+ GR_SHUTDOWN = 0,
68550+ GR_ENABLE = 1,
68551+ GR_SPROLE = 2,
68552+ GR_RELOAD = 3,
68553+ GR_SEGVMOD = 4,
68554+ GR_STATUS = 5,
68555+ GR_UNSPROLE = 6,
68556+ GR_PASSSET = 7,
68557+ GR_SPROLEPAM = 8,
68558+};
68559+
68560+/* Password setup definitions
68561+ * kernel/grhash.c */
68562+enum {
68563+ GR_PW_LEN = 128,
68564+ GR_SALT_LEN = 16,
68565+ GR_SHA_LEN = 32,
68566+};
68567+
68568+enum {
68569+ GR_SPROLE_LEN = 64,
68570+};
68571+
68572+enum {
68573+ GR_NO_GLOB = 0,
68574+ GR_REG_GLOB,
68575+ GR_CREATE_GLOB
68576+};
68577+
68578+#define GR_NLIMITS 32
68579+
68580+/* Begin Data Structures */
68581+
68582+struct sprole_pw {
68583+ unsigned char *rolename;
68584+ unsigned char salt[GR_SALT_LEN];
68585+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
68586+};
68587+
68588+struct name_entry {
68589+ __u32 key;
68590+ ino_t inode;
68591+ dev_t device;
68592+ char *name;
68593+ __u16 len;
68594+ __u8 deleted;
68595+ struct name_entry *prev;
68596+ struct name_entry *next;
68597+};
68598+
68599+struct inodev_entry {
68600+ struct name_entry *nentry;
68601+ struct inodev_entry *prev;
68602+ struct inodev_entry *next;
68603+};
68604+
68605+struct acl_role_db {
68606+ struct acl_role_label **r_hash;
68607+ __u32 r_size;
68608+};
68609+
68610+struct inodev_db {
68611+ struct inodev_entry **i_hash;
68612+ __u32 i_size;
68613+};
68614+
68615+struct name_db {
68616+ struct name_entry **n_hash;
68617+ __u32 n_size;
68618+};
68619+
68620+struct crash_uid {
68621+ uid_t uid;
68622+ unsigned long expires;
68623+};
68624+
68625+struct gr_hash_struct {
68626+ void **table;
68627+ void **nametable;
68628+ void *first;
68629+ __u32 table_size;
68630+ __u32 used_size;
68631+ int type;
68632+};
68633+
68634+/* Userspace Grsecurity ACL data structures */
68635+
68636+struct acl_subject_label {
68637+ char *filename;
68638+ ino_t inode;
68639+ dev_t device;
68640+ __u32 mode;
68641+ kernel_cap_t cap_mask;
68642+ kernel_cap_t cap_lower;
68643+ kernel_cap_t cap_invert_audit;
68644+
68645+ struct rlimit res[GR_NLIMITS];
68646+ __u32 resmask;
68647+
68648+ __u8 user_trans_type;
68649+ __u8 group_trans_type;
68650+ uid_t *user_transitions;
68651+ gid_t *group_transitions;
68652+ __u16 user_trans_num;
68653+ __u16 group_trans_num;
68654+
68655+ __u32 sock_families[2];
68656+ __u32 ip_proto[8];
68657+ __u32 ip_type;
68658+ struct acl_ip_label **ips;
68659+ __u32 ip_num;
68660+ __u32 inaddr_any_override;
68661+
68662+ __u32 crashes;
68663+ unsigned long expires;
68664+
68665+ struct acl_subject_label *parent_subject;
68666+ struct gr_hash_struct *hash;
68667+ struct acl_subject_label *prev;
68668+ struct acl_subject_label *next;
68669+
68670+ struct acl_object_label **obj_hash;
68671+ __u32 obj_hash_size;
68672+ __u16 pax_flags;
68673+};
68674+
68675+struct role_allowed_ip {
68676+ __u32 addr;
68677+ __u32 netmask;
68678+
68679+ struct role_allowed_ip *prev;
68680+ struct role_allowed_ip *next;
68681+};
68682+
68683+struct role_transition {
68684+ char *rolename;
68685+
68686+ struct role_transition *prev;
68687+ struct role_transition *next;
68688+};
68689+
68690+struct acl_role_label {
68691+ char *rolename;
68692+ uid_t uidgid;
68693+ __u16 roletype;
68694+
68695+ __u16 auth_attempts;
68696+ unsigned long expires;
68697+
68698+ struct acl_subject_label *root_label;
68699+ struct gr_hash_struct *hash;
68700+
68701+ struct acl_role_label *prev;
68702+ struct acl_role_label *next;
68703+
68704+ struct role_transition *transitions;
68705+ struct role_allowed_ip *allowed_ips;
68706+ uid_t *domain_children;
68707+ __u16 domain_child_num;
68708+
68709+ umode_t umask;
68710+
68711+ struct acl_subject_label **subj_hash;
68712+ __u32 subj_hash_size;
68713+};
68714+
68715+struct user_acl_role_db {
68716+ struct acl_role_label **r_table;
68717+ __u32 num_pointers; /* Number of allocations to track */
68718+ __u32 num_roles; /* Number of roles */
68719+ __u32 num_domain_children; /* Number of domain children */
68720+ __u32 num_subjects; /* Number of subjects */
68721+ __u32 num_objects; /* Number of objects */
68722+};
68723+
68724+struct acl_object_label {
68725+ char *filename;
68726+ ino_t inode;
68727+ dev_t device;
68728+ __u32 mode;
68729+
68730+ struct acl_subject_label *nested;
68731+ struct acl_object_label *globbed;
68732+
68733+ /* next two structures not used */
68734+
68735+ struct acl_object_label *prev;
68736+ struct acl_object_label *next;
68737+};
68738+
68739+struct acl_ip_label {
68740+ char *iface;
68741+ __u32 addr;
68742+ __u32 netmask;
68743+ __u16 low, high;
68744+ __u8 mode;
68745+ __u32 type;
68746+ __u32 proto[8];
68747+
68748+ /* next two structures not used */
68749+
68750+ struct acl_ip_label *prev;
68751+ struct acl_ip_label *next;
68752+};
68753+
68754+struct gr_arg {
68755+ struct user_acl_role_db role_db;
68756+ unsigned char pw[GR_PW_LEN];
68757+ unsigned char salt[GR_SALT_LEN];
68758+ unsigned char sum[GR_SHA_LEN];
68759+ unsigned char sp_role[GR_SPROLE_LEN];
68760+ struct sprole_pw *sprole_pws;
68761+ dev_t segv_device;
68762+ ino_t segv_inode;
68763+ uid_t segv_uid;
68764+ __u16 num_sprole_pws;
68765+ __u16 mode;
68766+};
68767+
68768+struct gr_arg_wrapper {
68769+ struct gr_arg *arg;
68770+ __u32 version;
68771+ __u32 size;
68772+};
68773+
68774+struct subject_map {
68775+ struct acl_subject_label *user;
68776+ struct acl_subject_label *kernel;
68777+ struct subject_map *prev;
68778+ struct subject_map *next;
68779+};
68780+
68781+struct acl_subj_map_db {
68782+ struct subject_map **s_hash;
68783+ __u32 s_size;
68784+};
68785+
68786+/* End Data Structures Section */
68787+
68788+/* Hash functions generated by empirical testing by Brad Spengler
68789+ Makes good use of the low bits of the inode. Generally 0-1 times
68790+ in loop for successful match. 0-3 for unsuccessful match.
68791+ Shift/add algorithm with modulus of table size and an XOR*/
68792+
68793+static __inline__ unsigned int
68794+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
68795+{
68796+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
68797+}
68798+
68799+ static __inline__ unsigned int
68800+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
68801+{
68802+ return ((const unsigned long)userp % sz);
68803+}
68804+
68805+static __inline__ unsigned int
68806+gr_fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
68807+{
68808+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
68809+}
68810+
68811+static __inline__ unsigned int
68812+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
68813+{
68814+ return full_name_hash((const unsigned char *)name, len) % sz;
68815+}
68816+
68817+#define FOR_EACH_ROLE_START(role) \
68818+ role = role_list; \
68819+ while (role) {
68820+
68821+#define FOR_EACH_ROLE_END(role) \
68822+ role = role->prev; \
68823+ }
68824+
68825+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
68826+ subj = NULL; \
68827+ iter = 0; \
68828+ while (iter < role->subj_hash_size) { \
68829+ if (subj == NULL) \
68830+ subj = role->subj_hash[iter]; \
68831+ if (subj == NULL) { \
68832+ iter++; \
68833+ continue; \
68834+ }
68835+
68836+#define FOR_EACH_SUBJECT_END(subj,iter) \
68837+ subj = subj->next; \
68838+ if (subj == NULL) \
68839+ iter++; \
68840+ }
68841+
68842+
68843+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
68844+ subj = role->hash->first; \
68845+ while (subj != NULL) {
68846+
68847+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
68848+ subj = subj->next; \
68849+ }
68850+
68851+#endif
68852+
68853diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
68854new file mode 100644
68855index 0000000..323ecf2
68856--- /dev/null
68857+++ b/include/linux/gralloc.h
68858@@ -0,0 +1,9 @@
68859+#ifndef __GRALLOC_H
68860+#define __GRALLOC_H
68861+
68862+void acl_free_all(void);
68863+int acl_alloc_stack_init(unsigned long size);
68864+void *acl_alloc(unsigned long len);
68865+void *acl_alloc_num(unsigned long num, unsigned long len);
68866+
68867+#endif
68868diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
68869new file mode 100644
68870index 0000000..be66033
68871--- /dev/null
68872+++ b/include/linux/grdefs.h
68873@@ -0,0 +1,140 @@
68874+#ifndef GRDEFS_H
68875+#define GRDEFS_H
68876+
68877+/* Begin grsecurity status declarations */
68878+
68879+enum {
68880+ GR_READY = 0x01,
68881+ GR_STATUS_INIT = 0x00 // disabled state
68882+};
68883+
68884+/* Begin ACL declarations */
68885+
68886+/* Role flags */
68887+
68888+enum {
68889+ GR_ROLE_USER = 0x0001,
68890+ GR_ROLE_GROUP = 0x0002,
68891+ GR_ROLE_DEFAULT = 0x0004,
68892+ GR_ROLE_SPECIAL = 0x0008,
68893+ GR_ROLE_AUTH = 0x0010,
68894+ GR_ROLE_NOPW = 0x0020,
68895+ GR_ROLE_GOD = 0x0040,
68896+ GR_ROLE_LEARN = 0x0080,
68897+ GR_ROLE_TPE = 0x0100,
68898+ GR_ROLE_DOMAIN = 0x0200,
68899+ GR_ROLE_PAM = 0x0400,
68900+ GR_ROLE_PERSIST = 0x0800
68901+};
68902+
68903+/* ACL Subject and Object mode flags */
68904+enum {
68905+ GR_DELETED = 0x80000000
68906+};
68907+
68908+/* ACL Object-only mode flags */
68909+enum {
68910+ GR_READ = 0x00000001,
68911+ GR_APPEND = 0x00000002,
68912+ GR_WRITE = 0x00000004,
68913+ GR_EXEC = 0x00000008,
68914+ GR_FIND = 0x00000010,
68915+ GR_INHERIT = 0x00000020,
68916+ GR_SETID = 0x00000040,
68917+ GR_CREATE = 0x00000080,
68918+ GR_DELETE = 0x00000100,
68919+ GR_LINK = 0x00000200,
68920+ GR_AUDIT_READ = 0x00000400,
68921+ GR_AUDIT_APPEND = 0x00000800,
68922+ GR_AUDIT_WRITE = 0x00001000,
68923+ GR_AUDIT_EXEC = 0x00002000,
68924+ GR_AUDIT_FIND = 0x00004000,
68925+ GR_AUDIT_INHERIT= 0x00008000,
68926+ GR_AUDIT_SETID = 0x00010000,
68927+ GR_AUDIT_CREATE = 0x00020000,
68928+ GR_AUDIT_DELETE = 0x00040000,
68929+ GR_AUDIT_LINK = 0x00080000,
68930+ GR_PTRACERD = 0x00100000,
68931+ GR_NOPTRACE = 0x00200000,
68932+ GR_SUPPRESS = 0x00400000,
68933+ GR_NOLEARN = 0x00800000,
68934+ GR_INIT_TRANSFER= 0x01000000
68935+};
68936+
68937+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
68938+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
68939+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
68940+
68941+/* ACL subject-only mode flags */
68942+enum {
68943+ GR_KILL = 0x00000001,
68944+ GR_VIEW = 0x00000002,
68945+ GR_PROTECTED = 0x00000004,
68946+ GR_LEARN = 0x00000008,
68947+ GR_OVERRIDE = 0x00000010,
68948+ /* just a placeholder, this mode is only used in userspace */
68949+ GR_DUMMY = 0x00000020,
68950+ GR_PROTSHM = 0x00000040,
68951+ GR_KILLPROC = 0x00000080,
68952+ GR_KILLIPPROC = 0x00000100,
68953+ /* just a placeholder, this mode is only used in userspace */
68954+ GR_NOTROJAN = 0x00000200,
68955+ GR_PROTPROCFD = 0x00000400,
68956+ GR_PROCACCT = 0x00000800,
68957+ GR_RELAXPTRACE = 0x00001000,
68958+ //GR_NESTED = 0x00002000,
68959+ GR_INHERITLEARN = 0x00004000,
68960+ GR_PROCFIND = 0x00008000,
68961+ GR_POVERRIDE = 0x00010000,
68962+ GR_KERNELAUTH = 0x00020000,
68963+ GR_ATSECURE = 0x00040000,
68964+ GR_SHMEXEC = 0x00080000
68965+};
68966+
68967+enum {
68968+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
68969+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
68970+ GR_PAX_ENABLE_MPROTECT = 0x0004,
68971+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
68972+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
68973+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
68974+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
68975+ GR_PAX_DISABLE_MPROTECT = 0x0400,
68976+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
68977+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
68978+};
68979+
68980+enum {
68981+ GR_ID_USER = 0x01,
68982+ GR_ID_GROUP = 0x02,
68983+};
68984+
68985+enum {
68986+ GR_ID_ALLOW = 0x01,
68987+ GR_ID_DENY = 0x02,
68988+};
68989+
68990+#define GR_CRASH_RES 31
68991+#define GR_UIDTABLE_MAX 500
68992+
68993+/* begin resource learning section */
68994+enum {
68995+ GR_RLIM_CPU_BUMP = 60,
68996+ GR_RLIM_FSIZE_BUMP = 50000,
68997+ GR_RLIM_DATA_BUMP = 10000,
68998+ GR_RLIM_STACK_BUMP = 1000,
68999+ GR_RLIM_CORE_BUMP = 10000,
69000+ GR_RLIM_RSS_BUMP = 500000,
69001+ GR_RLIM_NPROC_BUMP = 1,
69002+ GR_RLIM_NOFILE_BUMP = 5,
69003+ GR_RLIM_MEMLOCK_BUMP = 50000,
69004+ GR_RLIM_AS_BUMP = 500000,
69005+ GR_RLIM_LOCKS_BUMP = 2,
69006+ GR_RLIM_SIGPENDING_BUMP = 5,
69007+ GR_RLIM_MSGQUEUE_BUMP = 10000,
69008+ GR_RLIM_NICE_BUMP = 1,
69009+ GR_RLIM_RTPRIO_BUMP = 1,
69010+ GR_RLIM_RTTIME_BUMP = 1000000
69011+};
69012+
69013+#endif
69014diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
69015new file mode 100644
69016index 0000000..9bb6662
69017--- /dev/null
69018+++ b/include/linux/grinternal.h
69019@@ -0,0 +1,215 @@
69020+#ifndef __GRINTERNAL_H
69021+#define __GRINTERNAL_H
69022+
69023+#ifdef CONFIG_GRKERNSEC
69024+
69025+#include <linux/fs.h>
69026+#include <linux/mnt_namespace.h>
69027+#include <linux/nsproxy.h>
69028+#include <linux/gracl.h>
69029+#include <linux/grdefs.h>
69030+#include <linux/grmsg.h>
69031+
69032+void gr_add_learn_entry(const char *fmt, ...)
69033+ __attribute__ ((format (printf, 1, 2)));
69034+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
69035+ const struct vfsmount *mnt);
69036+__u32 gr_check_create(const struct dentry *new_dentry,
69037+ const struct dentry *parent,
69038+ const struct vfsmount *mnt, const __u32 mode);
69039+int gr_check_protected_task(const struct task_struct *task);
69040+__u32 to_gr_audit(const __u32 reqmode);
69041+int gr_set_acls(const int type);
69042+int gr_apply_subject_to_task(struct task_struct *task);
69043+int gr_acl_is_enabled(void);
69044+char gr_roletype_to_char(void);
69045+
69046+void gr_handle_alertkill(struct task_struct *task);
69047+char *gr_to_filename(const struct dentry *dentry,
69048+ const struct vfsmount *mnt);
69049+char *gr_to_filename1(const struct dentry *dentry,
69050+ const struct vfsmount *mnt);
69051+char *gr_to_filename2(const struct dentry *dentry,
69052+ const struct vfsmount *mnt);
69053+char *gr_to_filename3(const struct dentry *dentry,
69054+ const struct vfsmount *mnt);
69055+
69056+extern int grsec_enable_ptrace_readexec;
69057+extern int grsec_enable_harden_ptrace;
69058+extern int grsec_enable_link;
69059+extern int grsec_enable_fifo;
69060+extern int grsec_enable_execve;
69061+extern int grsec_enable_shm;
69062+extern int grsec_enable_execlog;
69063+extern int grsec_enable_signal;
69064+extern int grsec_enable_audit_ptrace;
69065+extern int grsec_enable_forkfail;
69066+extern int grsec_enable_time;
69067+extern int grsec_enable_rofs;
69068+extern int grsec_enable_chroot_shmat;
69069+extern int grsec_enable_chroot_mount;
69070+extern int grsec_enable_chroot_double;
69071+extern int grsec_enable_chroot_pivot;
69072+extern int grsec_enable_chroot_chdir;
69073+extern int grsec_enable_chroot_chmod;
69074+extern int grsec_enable_chroot_mknod;
69075+extern int grsec_enable_chroot_fchdir;
69076+extern int grsec_enable_chroot_nice;
69077+extern int grsec_enable_chroot_execlog;
69078+extern int grsec_enable_chroot_caps;
69079+extern int grsec_enable_chroot_sysctl;
69080+extern int grsec_enable_chroot_unix;
69081+extern int grsec_enable_symlinkown;
69082+extern kgid_t grsec_symlinkown_gid;
69083+extern int grsec_enable_tpe;
69084+extern kgid_t grsec_tpe_gid;
69085+extern int grsec_enable_tpe_all;
69086+extern int grsec_enable_tpe_invert;
69087+extern int grsec_enable_socket_all;
69088+extern kgid_t grsec_socket_all_gid;
69089+extern int grsec_enable_socket_client;
69090+extern kgid_t grsec_socket_client_gid;
69091+extern int grsec_enable_socket_server;
69092+extern kgid_t grsec_socket_server_gid;
69093+extern kgid_t grsec_audit_gid;
69094+extern int grsec_enable_group;
69095+extern int grsec_enable_audit_textrel;
69096+extern int grsec_enable_log_rwxmaps;
69097+extern int grsec_enable_mount;
69098+extern int grsec_enable_chdir;
69099+extern int grsec_resource_logging;
69100+extern int grsec_enable_blackhole;
69101+extern int grsec_lastack_retries;
69102+extern int grsec_enable_brute;
69103+extern int grsec_lock;
69104+
69105+extern spinlock_t grsec_alert_lock;
69106+extern unsigned long grsec_alert_wtime;
69107+extern unsigned long grsec_alert_fyet;
69108+
69109+extern spinlock_t grsec_audit_lock;
69110+
69111+extern rwlock_t grsec_exec_file_lock;
69112+
69113+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
69114+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
69115+ (tsk)->exec_file->f_vfsmnt) : "/")
69116+
69117+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
69118+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
69119+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
69120+
69121+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
69122+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
69123+ (tsk)->exec_file->f_vfsmnt) : "/")
69124+
69125+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
69126+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
69127+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
69128+
69129+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
69130+
69131+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
69132+
69133+#define GR_CHROOT_CAPS {{ \
69134+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
69135+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
69136+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
69137+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
69138+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
69139+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
69140+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
69141+
69142+#define security_learn(normal_msg,args...) \
69143+({ \
69144+ read_lock(&grsec_exec_file_lock); \
69145+ gr_add_learn_entry(normal_msg "\n", ## args); \
69146+ read_unlock(&grsec_exec_file_lock); \
69147+})
69148+
69149+enum {
69150+ GR_DO_AUDIT,
69151+ GR_DONT_AUDIT,
69152+ /* used for non-audit messages that we shouldn't kill the task on */
69153+ GR_DONT_AUDIT_GOOD
69154+};
69155+
69156+enum {
69157+ GR_TTYSNIFF,
69158+ GR_RBAC,
69159+ GR_RBAC_STR,
69160+ GR_STR_RBAC,
69161+ GR_RBAC_MODE2,
69162+ GR_RBAC_MODE3,
69163+ GR_FILENAME,
69164+ GR_SYSCTL_HIDDEN,
69165+ GR_NOARGS,
69166+ GR_ONE_INT,
69167+ GR_ONE_INT_TWO_STR,
69168+ GR_ONE_STR,
69169+ GR_STR_INT,
69170+ GR_TWO_STR_INT,
69171+ GR_TWO_INT,
69172+ GR_TWO_U64,
69173+ GR_THREE_INT,
69174+ GR_FIVE_INT_TWO_STR,
69175+ GR_TWO_STR,
69176+ GR_THREE_STR,
69177+ GR_FOUR_STR,
69178+ GR_STR_FILENAME,
69179+ GR_FILENAME_STR,
69180+ GR_FILENAME_TWO_INT,
69181+ GR_FILENAME_TWO_INT_STR,
69182+ GR_TEXTREL,
69183+ GR_PTRACE,
69184+ GR_RESOURCE,
69185+ GR_CAP,
69186+ GR_SIG,
69187+ GR_SIG2,
69188+ GR_CRASH1,
69189+ GR_CRASH2,
69190+ GR_PSACCT,
69191+ GR_RWXMAP
69192+};
69193+
69194+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
69195+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
69196+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
69197+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
69198+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
69199+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
69200+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
69201+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
69202+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
69203+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
69204+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
69205+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
69206+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
69207+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
69208+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
69209+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
69210+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
69211+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
69212+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
69213+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
69214+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
69215+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
69216+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
69217+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
69218+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
69219+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
69220+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
69221+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
69222+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
69223+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
69224+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
69225+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
69226+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
69227+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
69228+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
69229+
69230+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
69231+
69232+#endif
69233+
69234+#endif
69235diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
69236new file mode 100644
69237index 0000000..2bd4c8d
69238--- /dev/null
69239+++ b/include/linux/grmsg.h
69240@@ -0,0 +1,111 @@
69241+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
69242+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
69243+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
69244+#define GR_STOPMOD_MSG "denied modification of module state by "
69245+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
69246+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
69247+#define GR_IOPERM_MSG "denied use of ioperm() by "
69248+#define GR_IOPL_MSG "denied use of iopl() by "
69249+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
69250+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
69251+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
69252+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
69253+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
69254+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
69255+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
69256+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
69257+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
69258+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
69259+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
69260+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
69261+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
69262+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
69263+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
69264+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
69265+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
69266+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
69267+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
69268+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
69269+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
69270+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
69271+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
69272+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
69273+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
69274+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
69275+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
69276+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
69277+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
69278+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
69279+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
69280+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
69281+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
69282+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
69283+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
69284+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
69285+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
69286+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
69287+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
69288+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
69289+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
69290+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
69291+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
69292+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
69293+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
69294+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
69295+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
69296+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
69297+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
69298+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
69299+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
69300+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
69301+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
69302+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
69303+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
69304+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
69305+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
69306+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
69307+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
69308+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
69309+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
69310+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
69311+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
69312+#define GR_FAILFORK_MSG "failed fork with errno %s by "
69313+#define GR_NICE_CHROOT_MSG "denied priority change by "
69314+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
69315+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
69316+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
69317+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
69318+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
69319+#define GR_TIME_MSG "time set by "
69320+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
69321+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
69322+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
69323+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
69324+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
69325+#define GR_BIND_MSG "denied bind() by "
69326+#define GR_CONNECT_MSG "denied connect() by "
69327+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
69328+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
69329+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
69330+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
69331+#define GR_CAP_ACL_MSG "use of %s denied for "
69332+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
69333+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
69334+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
69335+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
69336+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
69337+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
69338+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
69339+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
69340+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
69341+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
69342+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
69343+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
69344+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
69345+#define GR_VM86_MSG "denied use of vm86 by "
69346+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
69347+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
69348+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
69349+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
69350+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
69351+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
69352diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
69353new file mode 100644
69354index 0000000..8da63a4
69355--- /dev/null
69356+++ b/include/linux/grsecurity.h
69357@@ -0,0 +1,242 @@
69358+#ifndef GR_SECURITY_H
69359+#define GR_SECURITY_H
69360+#include <linux/fs.h>
69361+#include <linux/fs_struct.h>
69362+#include <linux/binfmts.h>
69363+#include <linux/gracl.h>
69364+
69365+/* notify of brain-dead configs */
69366+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
69367+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
69368+#endif
69369+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
69370+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
69371+#endif
69372+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
69373+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
69374+#endif
69375+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
69376+#error "CONFIG_PAX enabled, but no PaX options are enabled."
69377+#endif
69378+
69379+void gr_handle_brute_attach(unsigned long mm_flags);
69380+void gr_handle_brute_check(void);
69381+void gr_handle_kernel_exploit(void);
69382+int gr_process_user_ban(void);
69383+
69384+char gr_roletype_to_char(void);
69385+
69386+int gr_acl_enable_at_secure(void);
69387+
69388+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
69389+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
69390+
69391+void gr_del_task_from_ip_table(struct task_struct *p);
69392+
69393+int gr_pid_is_chrooted(struct task_struct *p);
69394+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
69395+int gr_handle_chroot_nice(void);
69396+int gr_handle_chroot_sysctl(const int op);
69397+int gr_handle_chroot_setpriority(struct task_struct *p,
69398+ const int niceval);
69399+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
69400+int gr_handle_chroot_chroot(const struct dentry *dentry,
69401+ const struct vfsmount *mnt);
69402+void gr_handle_chroot_chdir(struct path *path);
69403+int gr_handle_chroot_chmod(const struct dentry *dentry,
69404+ const struct vfsmount *mnt, const int mode);
69405+int gr_handle_chroot_mknod(const struct dentry *dentry,
69406+ const struct vfsmount *mnt, const int mode);
69407+int gr_handle_chroot_mount(const struct dentry *dentry,
69408+ const struct vfsmount *mnt,
69409+ const char *dev_name);
69410+int gr_handle_chroot_pivot(void);
69411+int gr_handle_chroot_unix(const pid_t pid);
69412+
69413+int gr_handle_rawio(const struct inode *inode);
69414+
69415+void gr_handle_ioperm(void);
69416+void gr_handle_iopl(void);
69417+
69418+umode_t gr_acl_umask(void);
69419+
69420+int gr_tpe_allow(const struct file *file);
69421+
69422+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
69423+void gr_clear_chroot_entries(struct task_struct *task);
69424+
69425+void gr_log_forkfail(const int retval);
69426+void gr_log_timechange(void);
69427+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
69428+void gr_log_chdir(const struct dentry *dentry,
69429+ const struct vfsmount *mnt);
69430+void gr_log_chroot_exec(const struct dentry *dentry,
69431+ const struct vfsmount *mnt);
69432+void gr_log_remount(const char *devname, const int retval);
69433+void gr_log_unmount(const char *devname, const int retval);
69434+void gr_log_mount(const char *from, const char *to, const int retval);
69435+void gr_log_textrel(struct vm_area_struct *vma);
69436+void gr_log_rwxmmap(struct file *file);
69437+void gr_log_rwxmprotect(struct file *file);
69438+
69439+int gr_handle_follow_link(const struct inode *parent,
69440+ const struct inode *inode,
69441+ const struct dentry *dentry,
69442+ const struct vfsmount *mnt);
69443+int gr_handle_fifo(const struct dentry *dentry,
69444+ const struct vfsmount *mnt,
69445+ const struct dentry *dir, const int flag,
69446+ const int acc_mode);
69447+int gr_handle_hardlink(const struct dentry *dentry,
69448+ const struct vfsmount *mnt,
69449+ struct inode *inode,
69450+ const int mode, const struct filename *to);
69451+
69452+int gr_is_capable(const int cap);
69453+int gr_is_capable_nolog(const int cap);
69454+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
69455+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
69456+
69457+void gr_copy_label(struct task_struct *tsk);
69458+void gr_handle_crash(struct task_struct *task, const int sig);
69459+int gr_handle_signal(const struct task_struct *p, const int sig);
69460+int gr_check_crash_uid(const kuid_t uid);
69461+int gr_check_protected_task(const struct task_struct *task);
69462+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
69463+int gr_acl_handle_mmap(const struct file *file,
69464+ const unsigned long prot);
69465+int gr_acl_handle_mprotect(const struct file *file,
69466+ const unsigned long prot);
69467+int gr_check_hidden_task(const struct task_struct *tsk);
69468+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
69469+ const struct vfsmount *mnt);
69470+__u32 gr_acl_handle_utime(const struct dentry *dentry,
69471+ const struct vfsmount *mnt);
69472+__u32 gr_acl_handle_access(const struct dentry *dentry,
69473+ const struct vfsmount *mnt, const int fmode);
69474+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
69475+ const struct vfsmount *mnt, umode_t *mode);
69476+__u32 gr_acl_handle_chown(const struct dentry *dentry,
69477+ const struct vfsmount *mnt);
69478+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
69479+ const struct vfsmount *mnt);
69480+int gr_handle_ptrace(struct task_struct *task, const long request);
69481+int gr_handle_proc_ptrace(struct task_struct *task);
69482+__u32 gr_acl_handle_execve(const struct dentry *dentry,
69483+ const struct vfsmount *mnt);
69484+int gr_check_crash_exec(const struct file *filp);
69485+int gr_acl_is_enabled(void);
69486+void gr_set_kernel_label(struct task_struct *task);
69487+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
69488+ const kgid_t gid);
69489+int gr_set_proc_label(const struct dentry *dentry,
69490+ const struct vfsmount *mnt,
69491+ const int unsafe_flags);
69492+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
69493+ const struct vfsmount *mnt);
69494+__u32 gr_acl_handle_open(const struct dentry *dentry,
69495+ const struct vfsmount *mnt, int acc_mode);
69496+__u32 gr_acl_handle_creat(const struct dentry *dentry,
69497+ const struct dentry *p_dentry,
69498+ const struct vfsmount *p_mnt,
69499+ int open_flags, int acc_mode, const int imode);
69500+void gr_handle_create(const struct dentry *dentry,
69501+ const struct vfsmount *mnt);
69502+void gr_handle_proc_create(const struct dentry *dentry,
69503+ const struct inode *inode);
69504+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
69505+ const struct dentry *parent_dentry,
69506+ const struct vfsmount *parent_mnt,
69507+ const int mode);
69508+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
69509+ const struct dentry *parent_dentry,
69510+ const struct vfsmount *parent_mnt);
69511+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
69512+ const struct vfsmount *mnt);
69513+void gr_handle_delete(const ino_t ino, const dev_t dev);
69514+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
69515+ const struct vfsmount *mnt);
69516+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
69517+ const struct dentry *parent_dentry,
69518+ const struct vfsmount *parent_mnt,
69519+ const struct filename *from);
69520+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
69521+ const struct dentry *parent_dentry,
69522+ const struct vfsmount *parent_mnt,
69523+ const struct dentry *old_dentry,
69524+ const struct vfsmount *old_mnt, const struct filename *to);
69525+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
69526+int gr_acl_handle_rename(struct dentry *new_dentry,
69527+ struct dentry *parent_dentry,
69528+ const struct vfsmount *parent_mnt,
69529+ struct dentry *old_dentry,
69530+ struct inode *old_parent_inode,
69531+ struct vfsmount *old_mnt, const struct filename *newname);
69532+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
69533+ struct dentry *old_dentry,
69534+ struct dentry *new_dentry,
69535+ struct vfsmount *mnt, const __u8 replace);
69536+__u32 gr_check_link(const struct dentry *new_dentry,
69537+ const struct dentry *parent_dentry,
69538+ const struct vfsmount *parent_mnt,
69539+ const struct dentry *old_dentry,
69540+ const struct vfsmount *old_mnt);
69541+int gr_acl_handle_filldir(const struct file *file, const char *name,
69542+ const unsigned int namelen, const ino_t ino);
69543+
69544+__u32 gr_acl_handle_unix(const struct dentry *dentry,
69545+ const struct vfsmount *mnt);
69546+void gr_acl_handle_exit(void);
69547+void gr_acl_handle_psacct(struct task_struct *task, const long code);
69548+int gr_acl_handle_procpidmem(const struct task_struct *task);
69549+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
69550+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
69551+void gr_audit_ptrace(struct task_struct *task);
69552+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
69553+void gr_put_exec_file(struct task_struct *task);
69554+
69555+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
69556+
69557+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
69558+extern void gr_learn_resource(const struct task_struct *task, const int res,
69559+ const unsigned long wanted, const int gt);
69560+#else
69561+static inline void gr_learn_resource(const struct task_struct *task, const int res,
69562+ const unsigned long wanted, const int gt)
69563+{
69564+}
69565+#endif
69566+
69567+#ifdef CONFIG_GRKERNSEC_RESLOG
69568+extern void gr_log_resource(const struct task_struct *task, const int res,
69569+ const unsigned long wanted, const int gt);
69570+#else
69571+static inline void gr_log_resource(const struct task_struct *task, const int res,
69572+ const unsigned long wanted, const int gt)
69573+{
69574+}
69575+#endif
69576+
69577+#ifdef CONFIG_GRKERNSEC
69578+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
69579+void gr_handle_vm86(void);
69580+void gr_handle_mem_readwrite(u64 from, u64 to);
69581+
69582+void gr_log_badprocpid(const char *entry);
69583+
69584+extern int grsec_enable_dmesg;
69585+extern int grsec_disable_privio;
69586+
69587+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
69588+extern kgid_t grsec_proc_gid;
69589+#endif
69590+
69591+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
69592+extern int grsec_enable_chroot_findtask;
69593+#endif
69594+#ifdef CONFIG_GRKERNSEC_SETXID
69595+extern int grsec_enable_setxid;
69596+#endif
69597+#endif
69598+
69599+#endif
69600diff --git a/include/linux/grsock.h b/include/linux/grsock.h
69601new file mode 100644
69602index 0000000..e7ffaaf
69603--- /dev/null
69604+++ b/include/linux/grsock.h
69605@@ -0,0 +1,19 @@
69606+#ifndef __GRSOCK_H
69607+#define __GRSOCK_H
69608+
69609+extern void gr_attach_curr_ip(const struct sock *sk);
69610+extern int gr_handle_sock_all(const int family, const int type,
69611+ const int protocol);
69612+extern int gr_handle_sock_server(const struct sockaddr *sck);
69613+extern int gr_handle_sock_server_other(const struct sock *sck);
69614+extern int gr_handle_sock_client(const struct sockaddr *sck);
69615+extern int gr_search_connect(struct socket * sock,
69616+ struct sockaddr_in * addr);
69617+extern int gr_search_bind(struct socket * sock,
69618+ struct sockaddr_in * addr);
69619+extern int gr_search_listen(struct socket * sock);
69620+extern int gr_search_accept(struct socket * sock);
69621+extern int gr_search_socket(const int domain, const int type,
69622+ const int protocol);
69623+
69624+#endif
69625diff --git a/include/linux/highmem.h b/include/linux/highmem.h
69626index ef788b5..ac41b7b 100644
69627--- a/include/linux/highmem.h
69628+++ b/include/linux/highmem.h
69629@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
69630 kunmap_atomic(kaddr);
69631 }
69632
69633+static inline void sanitize_highpage(struct page *page)
69634+{
69635+ void *kaddr;
69636+ unsigned long flags;
69637+
69638+ local_irq_save(flags);
69639+ kaddr = kmap_atomic(page);
69640+ clear_page(kaddr);
69641+ kunmap_atomic(kaddr);
69642+ local_irq_restore(flags);
69643+}
69644+
69645 static inline void zero_user_segments(struct page *page,
69646 unsigned start1, unsigned end1,
69647 unsigned start2, unsigned end2)
69648diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
69649index 1c7b89a..7f52502 100644
69650--- a/include/linux/hwmon-sysfs.h
69651+++ b/include/linux/hwmon-sysfs.h
69652@@ -25,7 +25,8 @@
69653 struct sensor_device_attribute{
69654 struct device_attribute dev_attr;
69655 int index;
69656-};
69657+} __do_const;
69658+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
69659 #define to_sensor_dev_attr(_dev_attr) \
69660 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
69661
69662@@ -41,7 +42,7 @@ struct sensor_device_attribute_2 {
69663 struct device_attribute dev_attr;
69664 u8 index;
69665 u8 nr;
69666-};
69667+} __do_const;
69668 #define to_sensor_dev_attr_2(_dev_attr) \
69669 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
69670
69671diff --git a/include/linux/i2c.h b/include/linux/i2c.h
69672index d0c4db7..61b3577 100644
69673--- a/include/linux/i2c.h
69674+++ b/include/linux/i2c.h
69675@@ -369,6 +369,7 @@ struct i2c_algorithm {
69676 /* To determine what the adapter supports */
69677 u32 (*functionality) (struct i2c_adapter *);
69678 };
69679+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
69680
69681 /*
69682 * i2c_adapter is the structure used to identify a physical i2c bus along
69683diff --git a/include/linux/i2o.h b/include/linux/i2o.h
69684index d23c3c2..eb63c81 100644
69685--- a/include/linux/i2o.h
69686+++ b/include/linux/i2o.h
69687@@ -565,7 +565,7 @@ struct i2o_controller {
69688 struct i2o_device *exec; /* Executive */
69689 #if BITS_PER_LONG == 64
69690 spinlock_t context_list_lock; /* lock for context_list */
69691- atomic_t context_list_counter; /* needed for unique contexts */
69692+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
69693 struct list_head context_list; /* list of context id's
69694 and pointers */
69695 #endif
69696diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
69697index aff7ad8..3942bbd 100644
69698--- a/include/linux/if_pppox.h
69699+++ b/include/linux/if_pppox.h
69700@@ -76,7 +76,7 @@ struct pppox_proto {
69701 int (*ioctl)(struct socket *sock, unsigned int cmd,
69702 unsigned long arg);
69703 struct module *owner;
69704-};
69705+} __do_const;
69706
69707 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
69708 extern void unregister_pppox_proto(int proto_num);
69709diff --git a/include/linux/init.h b/include/linux/init.h
69710index 10ed4f4..8e8490d 100644
69711--- a/include/linux/init.h
69712+++ b/include/linux/init.h
69713@@ -39,9 +39,36 @@
69714 * Also note, that this data cannot be "const".
69715 */
69716
69717+#ifdef MODULE
69718+#define add_init_latent_entropy
69719+#define add_devinit_latent_entropy
69720+#define add_cpuinit_latent_entropy
69721+#define add_meminit_latent_entropy
69722+#else
69723+#define add_init_latent_entropy __latent_entropy
69724+
69725+#ifdef CONFIG_HOTPLUG
69726+#define add_devinit_latent_entropy
69727+#else
69728+#define add_devinit_latent_entropy __latent_entropy
69729+#endif
69730+
69731+#ifdef CONFIG_HOTPLUG_CPU
69732+#define add_cpuinit_latent_entropy
69733+#else
69734+#define add_cpuinit_latent_entropy __latent_entropy
69735+#endif
69736+
69737+#ifdef CONFIG_MEMORY_HOTPLUG
69738+#define add_meminit_latent_entropy
69739+#else
69740+#define add_meminit_latent_entropy __latent_entropy
69741+#endif
69742+#endif
69743+
69744 /* These are for everybody (although not all archs will actually
69745 discard it in modules) */
69746-#define __init __section(.init.text) __cold notrace
69747+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
69748 #define __initdata __section(.init.data)
69749 #define __initconst __constsection(.init.rodata)
69750 #define __exitdata __section(.exit.data)
69751@@ -94,7 +121,7 @@
69752 #define __exit __section(.exit.text) __exitused __cold notrace
69753
69754 /* Used for HOTPLUG_CPU */
69755-#define __cpuinit __section(.cpuinit.text) __cold notrace
69756+#define __cpuinit __section(.cpuinit.text) __cold notrace add_cpuinit_latent_entropy
69757 #define __cpuinitdata __section(.cpuinit.data)
69758 #define __cpuinitconst __constsection(.cpuinit.rodata)
69759 #define __cpuexit __section(.cpuexit.text) __exitused __cold notrace
69760@@ -102,7 +129,7 @@
69761 #define __cpuexitconst __constsection(.cpuexit.rodata)
69762
69763 /* Used for MEMORY_HOTPLUG */
69764-#define __meminit __section(.meminit.text) __cold notrace
69765+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
69766 #define __meminitdata __section(.meminit.data)
69767 #define __meminitconst __constsection(.meminit.rodata)
69768 #define __memexit __section(.memexit.text) __exitused __cold notrace
69769diff --git a/include/linux/init_task.h b/include/linux/init_task.h
69770index 6d087c5..401cab8 100644
69771--- a/include/linux/init_task.h
69772+++ b/include/linux/init_task.h
69773@@ -143,6 +143,12 @@ extern struct task_group root_task_group;
69774
69775 #define INIT_TASK_COMM "swapper"
69776
69777+#ifdef CONFIG_X86
69778+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
69779+#else
69780+#define INIT_TASK_THREAD_INFO
69781+#endif
69782+
69783 /*
69784 * INIT_TASK is used to set up the first task table, touch at
69785 * your own risk!. Base=0, limit=0x1fffff (=2MB)
69786@@ -182,6 +188,7 @@ extern struct task_group root_task_group;
69787 RCU_POINTER_INITIALIZER(cred, &init_cred), \
69788 .comm = INIT_TASK_COMM, \
69789 .thread = INIT_THREAD, \
69790+ INIT_TASK_THREAD_INFO \
69791 .fs = &init_fs, \
69792 .files = &init_files, \
69793 .signal = &init_signals, \
69794diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
69795index 5fa5afe..ac55b25 100644
69796--- a/include/linux/interrupt.h
69797+++ b/include/linux/interrupt.h
69798@@ -430,7 +430,7 @@ enum
69799 /* map softirq index to softirq name. update 'softirq_to_name' in
69800 * kernel/softirq.c when adding a new softirq.
69801 */
69802-extern char *softirq_to_name[NR_SOFTIRQS];
69803+extern const char * const softirq_to_name[NR_SOFTIRQS];
69804
69805 /* softirq mask and active fields moved to irq_cpustat_t in
69806 * asm/hardirq.h to get better cache usage. KAO
69807@@ -438,12 +438,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
69808
69809 struct softirq_action
69810 {
69811- void (*action)(struct softirq_action *);
69812-};
69813+ void (*action)(void);
69814+} __no_const;
69815
69816 asmlinkage void do_softirq(void);
69817 asmlinkage void __do_softirq(void);
69818-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
69819+extern void open_softirq(int nr, void (*action)(void));
69820 extern void softirq_init(void);
69821 extern void __raise_softirq_irqoff(unsigned int nr);
69822
69823diff --git a/include/linux/iommu.h b/include/linux/iommu.h
69824index f3b99e1..9b73cee 100644
69825--- a/include/linux/iommu.h
69826+++ b/include/linux/iommu.h
69827@@ -101,7 +101,7 @@ struct iommu_ops {
69828 int (*domain_set_attr)(struct iommu_domain *domain,
69829 enum iommu_attr attr, void *data);
69830 unsigned long pgsize_bitmap;
69831-};
69832+} __do_const;
69833
69834 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
69835 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
69836diff --git a/include/linux/irq.h b/include/linux/irq.h
69837index fdf2c4a..5332486 100644
69838--- a/include/linux/irq.h
69839+++ b/include/linux/irq.h
69840@@ -328,7 +328,8 @@ struct irq_chip {
69841 void (*irq_print_chip)(struct irq_data *data, struct seq_file *p);
69842
69843 unsigned long flags;
69844-};
69845+} __do_const;
69846+typedef struct irq_chip __no_const irq_chip_no_const;
69847
69848 /*
69849 * irq_chip specific flags
69850diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
69851index 6883e19..06992b1 100644
69852--- a/include/linux/kallsyms.h
69853+++ b/include/linux/kallsyms.h
69854@@ -15,7 +15,8 @@
69855
69856 struct module;
69857
69858-#ifdef CONFIG_KALLSYMS
69859+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
69860+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
69861 /* Lookup the address for a symbol. Returns 0 if not found. */
69862 unsigned long kallsyms_lookup_name(const char *name);
69863
69864@@ -106,6 +107,17 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
69865 /* Stupid that this does nothing, but I didn't create this mess. */
69866 #define __print_symbol(fmt, addr)
69867 #endif /*CONFIG_KALLSYMS*/
69868+#else /* when included by kallsyms.c, vsnprintf.c, or
69869+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
69870+extern void __print_symbol(const char *fmt, unsigned long address);
69871+extern int sprint_backtrace(char *buffer, unsigned long address);
69872+extern int sprint_symbol(char *buffer, unsigned long address);
69873+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
69874+const char *kallsyms_lookup(unsigned long addr,
69875+ unsigned long *symbolsize,
69876+ unsigned long *offset,
69877+ char **modname, char *namebuf);
69878+#endif
69879
69880 /* This macro allows us to keep printk typechecking */
69881 static __printf(1, 2)
69882diff --git a/include/linux/key-type.h b/include/linux/key-type.h
69883index 518a53a..5e28358 100644
69884--- a/include/linux/key-type.h
69885+++ b/include/linux/key-type.h
69886@@ -125,7 +125,7 @@ struct key_type {
69887 /* internal fields */
69888 struct list_head link; /* link in types list */
69889 struct lock_class_key lock_class; /* key->sem lock class */
69890-};
69891+} __do_const;
69892
69893 extern struct key_type key_type_keyring;
69894
69895diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
69896index 4dff0c6..1ca9b72 100644
69897--- a/include/linux/kgdb.h
69898+++ b/include/linux/kgdb.h
69899@@ -53,7 +53,7 @@ extern int kgdb_connected;
69900 extern int kgdb_io_module_registered;
69901
69902 extern atomic_t kgdb_setting_breakpoint;
69903-extern atomic_t kgdb_cpu_doing_single_step;
69904+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
69905
69906 extern struct task_struct *kgdb_usethread;
69907 extern struct task_struct *kgdb_contthread;
69908@@ -255,7 +255,7 @@ struct kgdb_arch {
69909 void (*correct_hw_break)(void);
69910
69911 void (*enable_nmi)(bool on);
69912-};
69913+} __do_const;
69914
69915 /**
69916 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
69917@@ -280,7 +280,7 @@ struct kgdb_io {
69918 void (*pre_exception) (void);
69919 void (*post_exception) (void);
69920 int is_console;
69921-};
69922+} __do_const;
69923
69924 extern struct kgdb_arch arch_kgdb_ops;
69925
69926diff --git a/include/linux/kmod.h b/include/linux/kmod.h
69927index 5398d58..5883a34 100644
69928--- a/include/linux/kmod.h
69929+++ b/include/linux/kmod.h
69930@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
69931 * usually useless though. */
69932 extern __printf(2, 3)
69933 int __request_module(bool wait, const char *name, ...);
69934+extern __printf(3, 4)
69935+int ___request_module(bool wait, char *param_name, const char *name, ...);
69936 #define request_module(mod...) __request_module(true, mod)
69937 #define request_module_nowait(mod...) __request_module(false, mod)
69938 #define try_then_request_module(x, mod...) \
69939diff --git a/include/linux/kobject.h b/include/linux/kobject.h
69940index 939b112..ed6ed51 100644
69941--- a/include/linux/kobject.h
69942+++ b/include/linux/kobject.h
69943@@ -111,7 +111,7 @@ struct kobj_type {
69944 struct attribute **default_attrs;
69945 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
69946 const void *(*namespace)(struct kobject *kobj);
69947-};
69948+} __do_const;
69949
69950 struct kobj_uevent_env {
69951 char *envp[UEVENT_NUM_ENVP];
69952@@ -134,6 +134,7 @@ struct kobj_attribute {
69953 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
69954 const char *buf, size_t count);
69955 };
69956+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
69957
69958 extern const struct sysfs_ops kobj_sysfs_ops;
69959
69960diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
69961index f66b065..c2c29b4 100644
69962--- a/include/linux/kobject_ns.h
69963+++ b/include/linux/kobject_ns.h
69964@@ -43,7 +43,7 @@ struct kobj_ns_type_operations {
69965 const void *(*netlink_ns)(struct sock *sk);
69966 const void *(*initial_ns)(void);
69967 void (*drop_ns)(void *);
69968-};
69969+} __do_const;
69970
69971 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
69972 int kobj_ns_type_registered(enum kobj_ns_type type);
69973diff --git a/include/linux/kref.h b/include/linux/kref.h
69974index 4972e6e..de4d19b 100644
69975--- a/include/linux/kref.h
69976+++ b/include/linux/kref.h
69977@@ -64,7 +64,7 @@ static inline void kref_get(struct kref *kref)
69978 static inline int kref_sub(struct kref *kref, unsigned int count,
69979 void (*release)(struct kref *kref))
69980 {
69981- WARN_ON(release == NULL);
69982+ BUG_ON(release == NULL);
69983
69984 if (atomic_sub_and_test((int) count, &kref->refcount)) {
69985 release(kref);
69986diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
69987index 2c497ab..afe32f5 100644
69988--- a/include/linux/kvm_host.h
69989+++ b/include/linux/kvm_host.h
69990@@ -418,7 +418,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
69991 int __must_check vcpu_load(struct kvm_vcpu *vcpu);
69992 void vcpu_put(struct kvm_vcpu *vcpu);
69993
69994-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
69995+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
69996 struct module *module);
69997 void kvm_exit(void);
69998
69999@@ -574,7 +574,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
70000 struct kvm_guest_debug *dbg);
70001 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
70002
70003-int kvm_arch_init(void *opaque);
70004+int kvm_arch_init(const void *opaque);
70005 void kvm_arch_exit(void);
70006
70007 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
70008diff --git a/include/linux/libata.h b/include/linux/libata.h
70009index 649e5f8..ead5194 100644
70010--- a/include/linux/libata.h
70011+++ b/include/linux/libata.h
70012@@ -915,7 +915,7 @@ struct ata_port_operations {
70013 * fields must be pointers.
70014 */
70015 const struct ata_port_operations *inherits;
70016-};
70017+} __do_const;
70018
70019 struct ata_port_info {
70020 unsigned long flags;
70021diff --git a/include/linux/list.h b/include/linux/list.h
70022index cc6d2aa..c10ee83 100644
70023--- a/include/linux/list.h
70024+++ b/include/linux/list.h
70025@@ -112,6 +112,19 @@ extern void __list_del_entry(struct list_head *entry);
70026 extern void list_del(struct list_head *entry);
70027 #endif
70028
70029+extern void __pax_list_add(struct list_head *new,
70030+ struct list_head *prev,
70031+ struct list_head *next);
70032+static inline void pax_list_add(struct list_head *new, struct list_head *head)
70033+{
70034+ __pax_list_add(new, head, head->next);
70035+}
70036+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
70037+{
70038+ __pax_list_add(new, head->prev, head);
70039+}
70040+extern void pax_list_del(struct list_head *entry);
70041+
70042 /**
70043 * list_replace - replace old entry by new one
70044 * @old : the element to be replaced
70045@@ -145,6 +158,8 @@ static inline void list_del_init(struct list_head *entry)
70046 INIT_LIST_HEAD(entry);
70047 }
70048
70049+extern void pax_list_del_init(struct list_head *entry);
70050+
70051 /**
70052 * list_move - delete from one list and add as another's head
70053 * @list: the entry to move
70054diff --git a/include/linux/math64.h b/include/linux/math64.h
70055index b8ba855..0148090 100644
70056--- a/include/linux/math64.h
70057+++ b/include/linux/math64.h
70058@@ -14,7 +14,7 @@
70059 * This is commonly provided by 32bit archs to provide an optimized 64bit
70060 * divide.
70061 */
70062-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
70063+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
70064 {
70065 *remainder = dividend % divisor;
70066 return dividend / divisor;
70067@@ -50,7 +50,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
70068 #define div64_long(x,y) div_s64((x),(y))
70069
70070 #ifndef div_u64_rem
70071-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
70072+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
70073 {
70074 *remainder = do_div(dividend, divisor);
70075 return dividend;
70076@@ -79,7 +79,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
70077 * divide.
70078 */
70079 #ifndef div_u64
70080-static inline u64 div_u64(u64 dividend, u32 divisor)
70081+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
70082 {
70083 u32 remainder;
70084 return div_u64_rem(dividend, divisor, &remainder);
70085diff --git a/include/linux/mm.h b/include/linux/mm.h
70086index 66e2f7c..b916b9a 100644
70087--- a/include/linux/mm.h
70088+++ b/include/linux/mm.h
70089@@ -101,6 +101,11 @@ extern unsigned int kobjsize(const void *objp);
70090 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
70091 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
70092 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
70093+
70094+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
70095+#define VM_PAGEEXEC 0x02000000 /* vma->vm_page_prot needs special handling */
70096+#endif
70097+
70098 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
70099
70100 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
70101@@ -200,8 +205,8 @@ struct vm_operations_struct {
70102 /* called by access_process_vm when get_user_pages() fails, typically
70103 * for use by special VMAs that can switch between memory and hardware
70104 */
70105- int (*access)(struct vm_area_struct *vma, unsigned long addr,
70106- void *buf, int len, int write);
70107+ ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
70108+ void *buf, size_t len, int write);
70109 #ifdef CONFIG_NUMA
70110 /*
70111 * set_policy() op must add a reference to any non-NULL @new mempolicy
70112@@ -231,6 +236,7 @@ struct vm_operations_struct {
70113 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
70114 unsigned long size, pgoff_t pgoff);
70115 };
70116+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
70117
70118 struct mmu_gather;
70119 struct inode;
70120@@ -995,8 +1001,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
70121 unsigned long *pfn);
70122 int follow_phys(struct vm_area_struct *vma, unsigned long address,
70123 unsigned int flags, unsigned long *prot, resource_size_t *phys);
70124-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
70125- void *buf, int len, int write);
70126+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
70127+ void *buf, size_t len, int write);
70128
70129 static inline void unmap_shared_mapping_range(struct address_space *mapping,
70130 loff_t const holebegin, loff_t const holelen)
70131@@ -1035,10 +1041,10 @@ static inline int fixup_user_fault(struct task_struct *tsk,
70132 }
70133 #endif
70134
70135-extern int make_pages_present(unsigned long addr, unsigned long end);
70136-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
70137-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
70138- void *buf, int len, int write);
70139+extern ssize_t make_pages_present(unsigned long addr, unsigned long end);
70140+extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
70141+extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
70142+ void *buf, size_t len, int write);
70143
70144 int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
70145 unsigned long start, int len, unsigned int foll_flags,
70146@@ -1068,34 +1074,6 @@ int set_page_dirty(struct page *page);
70147 int set_page_dirty_lock(struct page *page);
70148 int clear_page_dirty_for_io(struct page *page);
70149
70150-/* Is the vma a continuation of the stack vma above it? */
70151-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
70152-{
70153- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
70154-}
70155-
70156-static inline int stack_guard_page_start(struct vm_area_struct *vma,
70157- unsigned long addr)
70158-{
70159- return (vma->vm_flags & VM_GROWSDOWN) &&
70160- (vma->vm_start == addr) &&
70161- !vma_growsdown(vma->vm_prev, addr);
70162-}
70163-
70164-/* Is the vma a continuation of the stack vma below it? */
70165-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
70166-{
70167- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
70168-}
70169-
70170-static inline int stack_guard_page_end(struct vm_area_struct *vma,
70171- unsigned long addr)
70172-{
70173- return (vma->vm_flags & VM_GROWSUP) &&
70174- (vma->vm_end == addr) &&
70175- !vma_growsup(vma->vm_next, addr);
70176-}
70177-
70178 extern pid_t
70179 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
70180
70181@@ -1198,6 +1176,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
70182 }
70183 #endif
70184
70185+#ifdef CONFIG_MMU
70186+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
70187+#else
70188+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
70189+{
70190+ return __pgprot(0);
70191+}
70192+#endif
70193+
70194 int vma_wants_writenotify(struct vm_area_struct *vma);
70195
70196 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
70197@@ -1216,8 +1203,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
70198 {
70199 return 0;
70200 }
70201+
70202+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
70203+ unsigned long address)
70204+{
70205+ return 0;
70206+}
70207 #else
70208 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
70209+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
70210 #endif
70211
70212 #ifdef __PAGETABLE_PMD_FOLDED
70213@@ -1226,8 +1220,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
70214 {
70215 return 0;
70216 }
70217+
70218+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
70219+ unsigned long address)
70220+{
70221+ return 0;
70222+}
70223 #else
70224 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
70225+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
70226 #endif
70227
70228 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
70229@@ -1245,11 +1246,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
70230 NULL: pud_offset(pgd, address);
70231 }
70232
70233+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
70234+{
70235+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
70236+ NULL: pud_offset(pgd, address);
70237+}
70238+
70239 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
70240 {
70241 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
70242 NULL: pmd_offset(pud, address);
70243 }
70244+
70245+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
70246+{
70247+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
70248+ NULL: pmd_offset(pud, address);
70249+}
70250 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
70251
70252 #if USE_SPLIT_PTLOCKS
70253@@ -1479,6 +1492,7 @@ extern unsigned long do_mmap_pgoff(struct file *, unsigned long,
70254 unsigned long, unsigned long,
70255 unsigned long, unsigned long);
70256 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
70257+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
70258
70259 /* These take the mm semaphore themselves */
70260 extern unsigned long vm_brk(unsigned long, unsigned long);
70261@@ -1573,6 +1587,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
70262 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
70263 struct vm_area_struct **pprev);
70264
70265+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
70266+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
70267+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
70268+
70269 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
70270 NULL if none. Assume start_addr < end_addr. */
70271 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
70272@@ -1601,15 +1619,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
70273 return vma;
70274 }
70275
70276-#ifdef CONFIG_MMU
70277-pgprot_t vm_get_page_prot(unsigned long vm_flags);
70278-#else
70279-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
70280-{
70281- return __pgprot(0);
70282-}
70283-#endif
70284-
70285 #ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
70286 unsigned long change_prot_numa(struct vm_area_struct *vma,
70287 unsigned long start, unsigned long end);
70288@@ -1649,6 +1658,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
70289 static inline void vm_stat_account(struct mm_struct *mm,
70290 unsigned long flags, struct file *file, long pages)
70291 {
70292+
70293+#ifdef CONFIG_PAX_RANDMMAP
70294+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
70295+#endif
70296+
70297 mm->total_vm += pages;
70298 }
70299 #endif /* CONFIG_PROC_FS */
70300@@ -1721,7 +1735,7 @@ extern int unpoison_memory(unsigned long pfn);
70301 extern int sysctl_memory_failure_early_kill;
70302 extern int sysctl_memory_failure_recovery;
70303 extern void shake_page(struct page *p, int access);
70304-extern atomic_long_t mce_bad_pages;
70305+extern atomic_long_unchecked_t mce_bad_pages;
70306 extern int soft_offline_page(struct page *page, int flags);
70307
70308 extern void dump_page(struct page *page);
70309@@ -1752,5 +1766,11 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
70310 static inline bool page_is_guard(struct page *page) { return false; }
70311 #endif /* CONFIG_DEBUG_PAGEALLOC */
70312
70313+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
70314+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
70315+#else
70316+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
70317+#endif
70318+
70319 #endif /* __KERNEL__ */
70320 #endif /* _LINUX_MM_H */
70321diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
70322index f8f5162..3aaf20f 100644
70323--- a/include/linux/mm_types.h
70324+++ b/include/linux/mm_types.h
70325@@ -288,6 +288,8 @@ struct vm_area_struct {
70326 #ifdef CONFIG_NUMA
70327 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
70328 #endif
70329+
70330+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
70331 };
70332
70333 struct core_thread {
70334@@ -436,6 +438,24 @@ struct mm_struct {
70335 int first_nid;
70336 #endif
70337 struct uprobes_state uprobes_state;
70338+
70339+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
70340+ unsigned long pax_flags;
70341+#endif
70342+
70343+#ifdef CONFIG_PAX_DLRESOLVE
70344+ unsigned long call_dl_resolve;
70345+#endif
70346+
70347+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
70348+ unsigned long call_syscall;
70349+#endif
70350+
70351+#ifdef CONFIG_PAX_ASLR
70352+ unsigned long delta_mmap; /* randomized offset */
70353+ unsigned long delta_stack; /* randomized offset */
70354+#endif
70355+
70356 };
70357
70358 /* first nid will either be a valid NID or one of these values */
70359diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
70360index c5d5278..f0b68c8 100644
70361--- a/include/linux/mmiotrace.h
70362+++ b/include/linux/mmiotrace.h
70363@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
70364 /* Called from ioremap.c */
70365 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
70366 void __iomem *addr);
70367-extern void mmiotrace_iounmap(volatile void __iomem *addr);
70368+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
70369
70370 /* For anyone to insert markers. Remember trailing newline. */
70371 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
70372@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
70373 {
70374 }
70375
70376-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
70377+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
70378 {
70379 }
70380
70381diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
70382index 73b64a3..6562925 100644
70383--- a/include/linux/mmzone.h
70384+++ b/include/linux/mmzone.h
70385@@ -412,7 +412,7 @@ struct zone {
70386 unsigned long flags; /* zone flags, see below */
70387
70388 /* Zone statistics */
70389- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70390+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70391
70392 /*
70393 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
70394diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
70395index fed3def..c933f99 100644
70396--- a/include/linux/mod_devicetable.h
70397+++ b/include/linux/mod_devicetable.h
70398@@ -12,7 +12,7 @@
70399 typedef unsigned long kernel_ulong_t;
70400 #endif
70401
70402-#define PCI_ANY_ID (~0)
70403+#define PCI_ANY_ID ((__u16)~0)
70404
70405 struct pci_device_id {
70406 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
70407@@ -139,7 +139,7 @@ struct usb_device_id {
70408 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
70409 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
70410
70411-#define HID_ANY_ID (~0)
70412+#define HID_ANY_ID (~0U)
70413 #define HID_BUS_ANY 0xffff
70414 #define HID_GROUP_ANY 0x0000
70415
70416@@ -498,7 +498,7 @@ struct dmi_system_id {
70417 const char *ident;
70418 struct dmi_strmatch matches[4];
70419 void *driver_data;
70420-};
70421+} __do_const;
70422 /*
70423 * struct dmi_device_id appears during expansion of
70424 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
70425diff --git a/include/linux/module.h b/include/linux/module.h
70426index 1375ee3..ced8177 100644
70427--- a/include/linux/module.h
70428+++ b/include/linux/module.h
70429@@ -17,9 +17,11 @@
70430 #include <linux/moduleparam.h>
70431 #include <linux/tracepoint.h>
70432 #include <linux/export.h>
70433+#include <linux/fs.h>
70434
70435 #include <linux/percpu.h>
70436 #include <asm/module.h>
70437+#include <asm/pgtable.h>
70438
70439 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
70440 #define MODULE_SIG_STRING "~Module signature appended~\n"
70441@@ -54,12 +56,13 @@ struct module_attribute {
70442 int (*test)(struct module *);
70443 void (*free)(struct module *);
70444 };
70445+typedef struct module_attribute __no_const module_attribute_no_const;
70446
70447 struct module_version_attribute {
70448 struct module_attribute mattr;
70449 const char *module_name;
70450 const char *version;
70451-} __attribute__ ((__aligned__(sizeof(void *))));
70452+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
70453
70454 extern ssize_t __modver_version_show(struct module_attribute *,
70455 struct module_kobject *, char *);
70456@@ -232,7 +235,7 @@ struct module
70457
70458 /* Sysfs stuff. */
70459 struct module_kobject mkobj;
70460- struct module_attribute *modinfo_attrs;
70461+ module_attribute_no_const *modinfo_attrs;
70462 const char *version;
70463 const char *srcversion;
70464 struct kobject *holders_dir;
70465@@ -281,19 +284,16 @@ struct module
70466 int (*init)(void);
70467
70468 /* If this is non-NULL, vfree after init() returns */
70469- void *module_init;
70470+ void *module_init_rx, *module_init_rw;
70471
70472 /* Here is the actual code + data, vfree'd on unload. */
70473- void *module_core;
70474+ void *module_core_rx, *module_core_rw;
70475
70476 /* Here are the sizes of the init and core sections */
70477- unsigned int init_size, core_size;
70478+ unsigned int init_size_rw, core_size_rw;
70479
70480 /* The size of the executable code in each section. */
70481- unsigned int init_text_size, core_text_size;
70482-
70483- /* Size of RO sections of the module (text+rodata) */
70484- unsigned int init_ro_size, core_ro_size;
70485+ unsigned int init_size_rx, core_size_rx;
70486
70487 /* Arch-specific module values */
70488 struct mod_arch_specific arch;
70489@@ -349,6 +349,10 @@ struct module
70490 #ifdef CONFIG_EVENT_TRACING
70491 struct ftrace_event_call **trace_events;
70492 unsigned int num_trace_events;
70493+ struct file_operations trace_id;
70494+ struct file_operations trace_enable;
70495+ struct file_operations trace_format;
70496+ struct file_operations trace_filter;
70497 #endif
70498 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
70499 unsigned int num_ftrace_callsites;
70500@@ -396,16 +400,46 @@ bool is_module_address(unsigned long addr);
70501 bool is_module_percpu_address(unsigned long addr);
70502 bool is_module_text_address(unsigned long addr);
70503
70504+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
70505+{
70506+
70507+#ifdef CONFIG_PAX_KERNEXEC
70508+ if (ktla_ktva(addr) >= (unsigned long)start &&
70509+ ktla_ktva(addr) < (unsigned long)start + size)
70510+ return 1;
70511+#endif
70512+
70513+ return ((void *)addr >= start && (void *)addr < start + size);
70514+}
70515+
70516+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
70517+{
70518+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
70519+}
70520+
70521+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
70522+{
70523+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
70524+}
70525+
70526+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
70527+{
70528+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
70529+}
70530+
70531+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
70532+{
70533+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
70534+}
70535+
70536 static inline int within_module_core(unsigned long addr, struct module *mod)
70537 {
70538- return (unsigned long)mod->module_core <= addr &&
70539- addr < (unsigned long)mod->module_core + mod->core_size;
70540+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
70541 }
70542
70543 static inline int within_module_init(unsigned long addr, struct module *mod)
70544 {
70545- return (unsigned long)mod->module_init <= addr &&
70546- addr < (unsigned long)mod->module_init + mod->init_size;
70547+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
70548 }
70549
70550 /* Search for module by name: must hold module_mutex. */
70551diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
70552index 560ca53..ef621ef 100644
70553--- a/include/linux/moduleloader.h
70554+++ b/include/linux/moduleloader.h
70555@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
70556 sections. Returns NULL on failure. */
70557 void *module_alloc(unsigned long size);
70558
70559+#ifdef CONFIG_PAX_KERNEXEC
70560+void *module_alloc_exec(unsigned long size);
70561+#else
70562+#define module_alloc_exec(x) module_alloc(x)
70563+#endif
70564+
70565 /* Free memory returned from module_alloc. */
70566 void module_free(struct module *mod, void *module_region);
70567
70568+#ifdef CONFIG_PAX_KERNEXEC
70569+void module_free_exec(struct module *mod, void *module_region);
70570+#else
70571+#define module_free_exec(x, y) module_free((x), (y))
70572+#endif
70573+
70574 /*
70575 * Apply the given relocation to the (simplified) ELF. Return -error
70576 * or 0.
70577@@ -45,7 +57,9 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
70578 unsigned int relsec,
70579 struct module *me)
70580 {
70581+#ifdef CONFIG_MODULES
70582 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
70583+#endif
70584 return -ENOEXEC;
70585 }
70586 #endif
70587@@ -67,7 +81,9 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
70588 unsigned int relsec,
70589 struct module *me)
70590 {
70591+#ifdef CONFIG_MODULES
70592 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
70593+#endif
70594 return -ENOEXEC;
70595 }
70596 #endif
70597diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
70598index 137b419..fe663ec 100644
70599--- a/include/linux/moduleparam.h
70600+++ b/include/linux/moduleparam.h
70601@@ -284,7 +284,7 @@ static inline void __kernel_param_unlock(void)
70602 * @len is usually just sizeof(string).
70603 */
70604 #define module_param_string(name, string, len, perm) \
70605- static const struct kparam_string __param_string_##name \
70606+ static const struct kparam_string __param_string_##name __used \
70607 = { len, string }; \
70608 __module_param_call(MODULE_PARAM_PREFIX, name, \
70609 &param_ops_string, \
70610@@ -423,7 +423,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
70611 */
70612 #define module_param_array_named(name, array, type, nump, perm) \
70613 param_check_##type(name, &(array)[0]); \
70614- static const struct kparam_array __param_arr_##name \
70615+ static const struct kparam_array __param_arr_##name __used \
70616 = { .max = ARRAY_SIZE(array), .num = nump, \
70617 .ops = &param_ops_##type, \
70618 .elemsize = sizeof(array[0]), .elem = array }; \
70619diff --git a/include/linux/namei.h b/include/linux/namei.h
70620index 5a5ff57..5ae5070 100644
70621--- a/include/linux/namei.h
70622+++ b/include/linux/namei.h
70623@@ -19,7 +19,7 @@ struct nameidata {
70624 unsigned seq;
70625 int last_type;
70626 unsigned depth;
70627- char *saved_names[MAX_NESTED_LINKS + 1];
70628+ const char *saved_names[MAX_NESTED_LINKS + 1];
70629 };
70630
70631 /*
70632@@ -84,12 +84,12 @@ extern void unlock_rename(struct dentry *, struct dentry *);
70633
70634 extern void nd_jump_link(struct nameidata *nd, struct path *path);
70635
70636-static inline void nd_set_link(struct nameidata *nd, char *path)
70637+static inline void nd_set_link(struct nameidata *nd, const char *path)
70638 {
70639 nd->saved_names[nd->depth] = path;
70640 }
70641
70642-static inline char *nd_get_link(struct nameidata *nd)
70643+static inline const char *nd_get_link(const struct nameidata *nd)
70644 {
70645 return nd->saved_names[nd->depth];
70646 }
70647diff --git a/include/linux/net.h b/include/linux/net.h
70648index aa16731..514b875 100644
70649--- a/include/linux/net.h
70650+++ b/include/linux/net.h
70651@@ -183,7 +183,7 @@ struct net_proto_family {
70652 int (*create)(struct net *net, struct socket *sock,
70653 int protocol, int kern);
70654 struct module *owner;
70655-};
70656+} __do_const;
70657
70658 struct iovec;
70659 struct kvec;
70660diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
70661index 9ef07d0..130a5d9 100644
70662--- a/include/linux/netdevice.h
70663+++ b/include/linux/netdevice.h
70664@@ -1012,6 +1012,7 @@ struct net_device_ops {
70665 u32 pid, u32 seq,
70666 struct net_device *dev);
70667 };
70668+typedef struct net_device_ops __no_const net_device_ops_no_const;
70669
70670 /*
70671 * The DEVICE structure.
70672@@ -1078,7 +1079,7 @@ struct net_device {
70673 int iflink;
70674
70675 struct net_device_stats stats;
70676- atomic_long_t rx_dropped; /* dropped packets by core network
70677+ atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
70678 * Do not use this in drivers.
70679 */
70680
70681diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
70682index ee14284..bc65d63 100644
70683--- a/include/linux/netfilter.h
70684+++ b/include/linux/netfilter.h
70685@@ -82,7 +82,7 @@ struct nf_sockopt_ops {
70686 #endif
70687 /* Use the module struct to lock set/get code in place */
70688 struct module *owner;
70689-};
70690+} __do_const;
70691
70692 /* Function to register/unregister hook points. */
70693 int nf_register_hook(struct nf_hook_ops *reg);
70694diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
70695index 7958e84..ed74d7a 100644
70696--- a/include/linux/netfilter/ipset/ip_set.h
70697+++ b/include/linux/netfilter/ipset/ip_set.h
70698@@ -98,7 +98,7 @@ struct ip_set_type_variant {
70699 /* Return true if "b" set is the same as "a"
70700 * according to the create set parameters */
70701 bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
70702-};
70703+} __do_const;
70704
70705 /* The core set type structure */
70706 struct ip_set_type {
70707diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
70708index 4966dde..7d8ce06 100644
70709--- a/include/linux/netfilter/nfnetlink.h
70710+++ b/include/linux/netfilter/nfnetlink.h
70711@@ -16,7 +16,7 @@ struct nfnl_callback {
70712 const struct nlattr * const cda[]);
70713 const struct nla_policy *policy; /* netlink attribute policy */
70714 const u_int16_t attr_count; /* number of nlattr's */
70715-};
70716+} __do_const;
70717
70718 struct nfnetlink_subsystem {
70719 const char *name;
70720diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
70721new file mode 100644
70722index 0000000..33f4af8
70723--- /dev/null
70724+++ b/include/linux/netfilter/xt_gradm.h
70725@@ -0,0 +1,9 @@
70726+#ifndef _LINUX_NETFILTER_XT_GRADM_H
70727+#define _LINUX_NETFILTER_XT_GRADM_H 1
70728+
70729+struct xt_gradm_mtinfo {
70730+ __u16 flags;
70731+ __u16 invflags;
70732+};
70733+
70734+#endif
70735diff --git a/include/linux/nls.h b/include/linux/nls.h
70736index 5dc635f..35f5e11 100644
70737--- a/include/linux/nls.h
70738+++ b/include/linux/nls.h
70739@@ -31,7 +31,7 @@ struct nls_table {
70740 const unsigned char *charset2upper;
70741 struct module *owner;
70742 struct nls_table *next;
70743-};
70744+} __do_const;
70745
70746 /* this value hold the maximum octet of charset */
70747 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
70748diff --git a/include/linux/notifier.h b/include/linux/notifier.h
70749index d65746e..62e72c2 100644
70750--- a/include/linux/notifier.h
70751+++ b/include/linux/notifier.h
70752@@ -51,7 +51,8 @@ struct notifier_block {
70753 int (*notifier_call)(struct notifier_block *, unsigned long, void *);
70754 struct notifier_block __rcu *next;
70755 int priority;
70756-};
70757+} __do_const;
70758+typedef struct notifier_block __no_const notifier_block_no_const;
70759
70760 struct atomic_notifier_head {
70761 spinlock_t lock;
70762diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
70763index a4c5624..79d6d88 100644
70764--- a/include/linux/oprofile.h
70765+++ b/include/linux/oprofile.h
70766@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
70767 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
70768 char const * name, ulong * val);
70769
70770-/** Create a file for read-only access to an atomic_t. */
70771+/** Create a file for read-only access to an atomic_unchecked_t. */
70772 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
70773- char const * name, atomic_t * val);
70774+ char const * name, atomic_unchecked_t * val);
70775
70776 /** create a directory */
70777 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
70778diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
70779index 45fc162..01a4068 100644
70780--- a/include/linux/pci_hotplug.h
70781+++ b/include/linux/pci_hotplug.h
70782@@ -80,7 +80,8 @@ struct hotplug_slot_ops {
70783 int (*get_attention_status) (struct hotplug_slot *slot, u8 *value);
70784 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
70785 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
70786-};
70787+} __do_const;
70788+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
70789
70790 /**
70791 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
70792diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
70793index a280650..2b67b91 100644
70794--- a/include/linux/perf_event.h
70795+++ b/include/linux/perf_event.h
70796@@ -328,8 +328,8 @@ struct perf_event {
70797
70798 enum perf_event_active_state state;
70799 unsigned int attach_state;
70800- local64_t count;
70801- atomic64_t child_count;
70802+ local64_t count; /* PaX: fix it one day */
70803+ atomic64_unchecked_t child_count;
70804
70805 /*
70806 * These are the total time in nanoseconds that the event
70807@@ -380,8 +380,8 @@ struct perf_event {
70808 * These accumulate total time (in nanoseconds) that children
70809 * events have been enabled and running, respectively.
70810 */
70811- atomic64_t child_total_time_enabled;
70812- atomic64_t child_total_time_running;
70813+ atomic64_unchecked_t child_total_time_enabled;
70814+ atomic64_unchecked_t child_total_time_running;
70815
70816 /*
70817 * Protect attach/detach and child_list:
70818@@ -807,7 +807,7 @@ static inline void perf_restore_debug_store(void) { }
70819 */
70820 #define perf_cpu_notifier(fn) \
70821 do { \
70822- static struct notifier_block fn##_nb __cpuinitdata = \
70823+ static struct notifier_block fn##_nb = \
70824 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
70825 unsigned long cpu = smp_processor_id(); \
70826 unsigned long flags; \
70827diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
70828index ad1a427..6419649 100644
70829--- a/include/linux/pipe_fs_i.h
70830+++ b/include/linux/pipe_fs_i.h
70831@@ -45,9 +45,9 @@ struct pipe_buffer {
70832 struct pipe_inode_info {
70833 wait_queue_head_t wait;
70834 unsigned int nrbufs, curbuf, buffers;
70835- unsigned int readers;
70836- unsigned int writers;
70837- unsigned int waiting_writers;
70838+ atomic_t readers;
70839+ atomic_t writers;
70840+ atomic_t waiting_writers;
70841 unsigned int r_counter;
70842 unsigned int w_counter;
70843 struct page *tmp_page;
70844diff --git a/include/linux/platform_data/usb-ehci-s5p.h b/include/linux/platform_data/usb-ehci-s5p.h
70845index 5f28cae..3d23723 100644
70846--- a/include/linux/platform_data/usb-ehci-s5p.h
70847+++ b/include/linux/platform_data/usb-ehci-s5p.h
70848@@ -14,7 +14,7 @@
70849 struct s5p_ehci_platdata {
70850 int (*phy_init)(struct platform_device *pdev, int type);
70851 int (*phy_exit)(struct platform_device *pdev, int type);
70852-};
70853+} __no_const;
70854
70855 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
70856
70857diff --git a/include/linux/platform_data/usb-exynos.h b/include/linux/platform_data/usb-exynos.h
70858index c256c59..8ea94c7 100644
70859--- a/include/linux/platform_data/usb-exynos.h
70860+++ b/include/linux/platform_data/usb-exynos.h
70861@@ -14,7 +14,7 @@
70862 struct exynos4_ohci_platdata {
70863 int (*phy_init)(struct platform_device *pdev, int type);
70864 int (*phy_exit)(struct platform_device *pdev, int type);
70865-};
70866+} __no_const;
70867
70868 extern void exynos4_ohci_set_platdata(struct exynos4_ohci_platdata *pd);
70869
70870diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
70871index 7c1d252..c5c773e 100644
70872--- a/include/linux/pm_domain.h
70873+++ b/include/linux/pm_domain.h
70874@@ -48,7 +48,7 @@ struct gpd_dev_ops {
70875
70876 struct gpd_cpu_data {
70877 unsigned int saved_exit_latency;
70878- struct cpuidle_state *idle_state;
70879+ cpuidle_state_no_const *idle_state;
70880 };
70881
70882 struct generic_pm_domain {
70883diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
70884index f271860..6b3bec5 100644
70885--- a/include/linux/pm_runtime.h
70886+++ b/include/linux/pm_runtime.h
70887@@ -97,7 +97,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
70888
70889 static inline void pm_runtime_mark_last_busy(struct device *dev)
70890 {
70891- ACCESS_ONCE(dev->power.last_busy) = jiffies;
70892+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
70893 }
70894
70895 #else /* !CONFIG_PM_RUNTIME */
70896diff --git a/include/linux/pnp.h b/include/linux/pnp.h
70897index 195aafc..49a7bc2 100644
70898--- a/include/linux/pnp.h
70899+++ b/include/linux/pnp.h
70900@@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
70901 struct pnp_fixup {
70902 char id[7];
70903 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
70904-};
70905+} __do_const;
70906
70907 /* config parameters */
70908 #define PNP_CONFIG_NORMAL 0x0001
70909diff --git a/include/linux/poison.h b/include/linux/poison.h
70910index 2110a81..13a11bb 100644
70911--- a/include/linux/poison.h
70912+++ b/include/linux/poison.h
70913@@ -19,8 +19,8 @@
70914 * under normal circumstances, used to verify that nobody uses
70915 * non-initialized list entries.
70916 */
70917-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
70918-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
70919+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
70920+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
70921
70922 /********** include/linux/timer.h **********/
70923 /*
70924diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
70925index c0f44c2..1572583 100644
70926--- a/include/linux/power/smartreflex.h
70927+++ b/include/linux/power/smartreflex.h
70928@@ -238,7 +238,7 @@ struct omap_sr_class_data {
70929 int (*notify)(struct omap_sr *sr, u32 status);
70930 u8 notify_flags;
70931 u8 class_type;
70932-};
70933+} __do_const;
70934
70935 /**
70936 * struct omap_sr_nvalue_table - Smartreflex n-target value info
70937diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
70938index 4ea1d37..80f4b33 100644
70939--- a/include/linux/ppp-comp.h
70940+++ b/include/linux/ppp-comp.h
70941@@ -84,7 +84,7 @@ struct compressor {
70942 struct module *owner;
70943 /* Extra skb space needed by the compressor algorithm */
70944 unsigned int comp_extra;
70945-};
70946+} __do_const;
70947
70948 /*
70949 * The return value from decompress routine is the length of the
70950diff --git a/include/linux/printk.h b/include/linux/printk.h
70951index 9afc01e..92c32e8 100644
70952--- a/include/linux/printk.h
70953+++ b/include/linux/printk.h
70954@@ -101,6 +101,8 @@ void early_printk(const char *fmt, ...);
70955 extern int printk_needs_cpu(int cpu);
70956 extern void printk_tick(void);
70957
70958+extern int kptr_restrict;
70959+
70960 #ifdef CONFIG_PRINTK
70961 asmlinkage __printf(5, 0)
70962 int vprintk_emit(int facility, int level,
70963@@ -135,7 +137,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
70964
70965 extern int printk_delay_msec;
70966 extern int dmesg_restrict;
70967-extern int kptr_restrict;
70968
70969 void log_buf_kexec_setup(void);
70970 void __init setup_log_buf(int early);
70971diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
70972index 32676b3..e46f2c0 100644
70973--- a/include/linux/proc_fs.h
70974+++ b/include/linux/proc_fs.h
70975@@ -159,6 +159,18 @@ static inline struct proc_dir_entry *proc_create(const char *name, umode_t mode,
70976 return proc_create_data(name, mode, parent, proc_fops, NULL);
70977 }
70978
70979+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
70980+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
70981+{
70982+#ifdef CONFIG_GRKERNSEC_PROC_USER
70983+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
70984+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
70985+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
70986+#else
70987+ return proc_create_data(name, mode, parent, proc_fops, NULL);
70988+#endif
70989+}
70990+
70991 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
70992 umode_t mode, struct proc_dir_entry *base,
70993 read_proc_t *read_proc, void * data)
70994@@ -268,7 +280,7 @@ struct proc_ns_operations {
70995 void (*put)(void *ns);
70996 int (*install)(struct nsproxy *nsproxy, void *ns);
70997 unsigned int (*inum)(void *ns);
70998-};
70999+} __do_const;
71000 extern const struct proc_ns_operations netns_operations;
71001 extern const struct proc_ns_operations utsns_operations;
71002 extern const struct proc_ns_operations ipcns_operations;
71003diff --git a/include/linux/random.h b/include/linux/random.h
71004index d984608..d6f0042 100644
71005--- a/include/linux/random.h
71006+++ b/include/linux/random.h
71007@@ -39,6 +39,11 @@ void prandom_seed(u32 seed);
71008 u32 prandom_u32_state(struct rnd_state *);
71009 void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
71010
71011+static inline unsigned long pax_get_random_long(void)
71012+{
71013+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
71014+}
71015+
71016 /*
71017 * Handle minimum values for seeds
71018 */
71019diff --git a/include/linux/rculist.h b/include/linux/rculist.h
71020index c92dd28..08f4eab 100644
71021--- a/include/linux/rculist.h
71022+++ b/include/linux/rculist.h
71023@@ -44,6 +44,9 @@ extern void __list_add_rcu(struct list_head *new,
71024 struct list_head *prev, struct list_head *next);
71025 #endif
71026
71027+extern void __pax_list_add_rcu(struct list_head *new,
71028+ struct list_head *prev, struct list_head *next);
71029+
71030 /**
71031 * list_add_rcu - add a new entry to rcu-protected list
71032 * @new: new entry to be added
71033@@ -65,6 +68,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
71034 __list_add_rcu(new, head, head->next);
71035 }
71036
71037+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
71038+{
71039+ __pax_list_add_rcu(new, head, head->next);
71040+}
71041+
71042 /**
71043 * list_add_tail_rcu - add a new entry to rcu-protected list
71044 * @new: new entry to be added
71045@@ -87,6 +95,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
71046 __list_add_rcu(new, head->prev, head);
71047 }
71048
71049+static inline void pax_list_add_tail_rcu(struct list_head *new,
71050+ struct list_head *head)
71051+{
71052+ __pax_list_add_rcu(new, head->prev, head);
71053+}
71054+
71055 /**
71056 * list_del_rcu - deletes entry from list without re-initialization
71057 * @entry: the element to delete from the list.
71058@@ -117,6 +131,8 @@ static inline void list_del_rcu(struct list_head *entry)
71059 entry->prev = LIST_POISON2;
71060 }
71061
71062+extern void pax_list_del_rcu(struct list_head *entry);
71063+
71064 /**
71065 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
71066 * @n: the element to delete from the hash list.
71067diff --git a/include/linux/reboot.h b/include/linux/reboot.h
71068index 23b3630..e1bc12b 100644
71069--- a/include/linux/reboot.h
71070+++ b/include/linux/reboot.h
71071@@ -18,9 +18,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
71072 * Architecture-specific implementations of sys_reboot commands.
71073 */
71074
71075-extern void machine_restart(char *cmd);
71076-extern void machine_halt(void);
71077-extern void machine_power_off(void);
71078+extern void machine_restart(char *cmd) __noreturn;
71079+extern void machine_halt(void) __noreturn;
71080+extern void machine_power_off(void) __noreturn;
71081
71082 extern void machine_shutdown(void);
71083 struct pt_regs;
71084@@ -31,9 +31,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
71085 */
71086
71087 extern void kernel_restart_prepare(char *cmd);
71088-extern void kernel_restart(char *cmd);
71089-extern void kernel_halt(void);
71090-extern void kernel_power_off(void);
71091+extern void kernel_restart(char *cmd) __noreturn;
71092+extern void kernel_halt(void) __noreturn;
71093+extern void kernel_power_off(void) __noreturn;
71094
71095 extern int C_A_D; /* for sysctl */
71096 void ctrl_alt_del(void);
71097@@ -47,7 +47,7 @@ extern int orderly_poweroff(bool force);
71098 * Emergency restart, callable from an interrupt handler.
71099 */
71100
71101-extern void emergency_restart(void);
71102+extern void emergency_restart(void) __noreturn;
71103 #include <asm/emergency-restart.h>
71104
71105 #endif /* _LINUX_REBOOT_H */
71106diff --git a/include/linux/regset.h b/include/linux/regset.h
71107index 8e0c9fe..ac4d221 100644
71108--- a/include/linux/regset.h
71109+++ b/include/linux/regset.h
71110@@ -161,7 +161,8 @@ struct user_regset {
71111 unsigned int align;
71112 unsigned int bias;
71113 unsigned int core_note_type;
71114-};
71115+} __do_const;
71116+typedef struct user_regset __no_const user_regset_no_const;
71117
71118 /**
71119 * struct user_regset_view - available regsets
71120diff --git a/include/linux/relay.h b/include/linux/relay.h
71121index 91cacc3..b55ff74 100644
71122--- a/include/linux/relay.h
71123+++ b/include/linux/relay.h
71124@@ -160,7 +160,7 @@ struct rchan_callbacks
71125 * The callback should return 0 if successful, negative if not.
71126 */
71127 int (*remove_buf_file)(struct dentry *dentry);
71128-};
71129+} __no_const;
71130
71131 /*
71132 * CONFIG_RELAY kernel API, kernel/relay.c
71133diff --git a/include/linux/rio.h b/include/linux/rio.h
71134index a3e7842..d973ca6 100644
71135--- a/include/linux/rio.h
71136+++ b/include/linux/rio.h
71137@@ -339,7 +339,7 @@ struct rio_ops {
71138 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
71139 u64 rstart, u32 size, u32 flags);
71140 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
71141-};
71142+} __no_const;
71143
71144 #define RIO_RESOURCE_MEM 0x00000100
71145 #define RIO_RESOURCE_DOORBELL 0x00000200
71146diff --git a/include/linux/rmap.h b/include/linux/rmap.h
71147index c20635c..2f5def4 100644
71148--- a/include/linux/rmap.h
71149+++ b/include/linux/rmap.h
71150@@ -145,8 +145,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
71151 void anon_vma_init(void); /* create anon_vma_cachep */
71152 int anon_vma_prepare(struct vm_area_struct *);
71153 void unlink_anon_vmas(struct vm_area_struct *);
71154-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
71155-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
71156+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
71157+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
71158
71159 static inline void anon_vma_merge(struct vm_area_struct *vma,
71160 struct vm_area_struct *next)
71161diff --git a/include/linux/sched.h b/include/linux/sched.h
71162index d211247..eac6c2c 100644
71163--- a/include/linux/sched.h
71164+++ b/include/linux/sched.h
71165@@ -61,6 +61,7 @@ struct bio_list;
71166 struct fs_struct;
71167 struct perf_event_context;
71168 struct blk_plug;
71169+struct linux_binprm;
71170
71171 /*
71172 * List of flags we want to share for kernel threads,
71173@@ -327,7 +328,7 @@ extern char __sched_text_start[], __sched_text_end[];
71174 extern int in_sched_functions(unsigned long addr);
71175
71176 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
71177-extern signed long schedule_timeout(signed long timeout);
71178+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
71179 extern signed long schedule_timeout_interruptible(signed long timeout);
71180 extern signed long schedule_timeout_killable(signed long timeout);
71181 extern signed long schedule_timeout_uninterruptible(signed long timeout);
71182@@ -354,10 +355,23 @@ struct user_namespace;
71183 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
71184
71185 extern int sysctl_max_map_count;
71186+extern unsigned long sysctl_heap_stack_gap;
71187
71188 #include <linux/aio.h>
71189
71190 #ifdef CONFIG_MMU
71191+
71192+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
71193+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
71194+#else
71195+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
71196+{
71197+ return 0;
71198+}
71199+#endif
71200+
71201+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
71202+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
71203 extern void arch_pick_mmap_layout(struct mm_struct *mm);
71204 extern unsigned long
71205 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
71206@@ -639,6 +653,17 @@ struct signal_struct {
71207 #ifdef CONFIG_TASKSTATS
71208 struct taskstats *stats;
71209 #endif
71210+
71211+#ifdef CONFIG_GRKERNSEC
71212+ u32 curr_ip;
71213+ u32 saved_ip;
71214+ u32 gr_saddr;
71215+ u32 gr_daddr;
71216+ u16 gr_sport;
71217+ u16 gr_dport;
71218+ u8 used_accept:1;
71219+#endif
71220+
71221 #ifdef CONFIG_AUDIT
71222 unsigned audit_tty;
71223 struct tty_audit_buf *tty_audit_buf;
71224@@ -717,6 +742,11 @@ struct user_struct {
71225 struct key *session_keyring; /* UID's default session keyring */
71226 #endif
71227
71228+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
71229+ unsigned int banned;
71230+ unsigned long ban_expires;
71231+#endif
71232+
71233 /* Hash table maintenance information */
71234 struct hlist_node uidhash_node;
71235 kuid_t uid;
71236@@ -1116,7 +1146,7 @@ struct sched_class {
71237 #ifdef CONFIG_FAIR_GROUP_SCHED
71238 void (*task_move_group) (struct task_struct *p, int on_rq);
71239 #endif
71240-};
71241+} __do_const;
71242
71243 struct load_weight {
71244 unsigned long weight, inv_weight;
71245@@ -1360,8 +1390,8 @@ struct task_struct {
71246 struct list_head thread_group;
71247
71248 struct completion *vfork_done; /* for vfork() */
71249- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
71250- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
71251+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
71252+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
71253
71254 cputime_t utime, stime, utimescaled, stimescaled;
71255 cputime_t gtime;
71256@@ -1377,11 +1407,6 @@ struct task_struct {
71257 struct task_cputime cputime_expires;
71258 struct list_head cpu_timers[3];
71259
71260-/* process credentials */
71261- const struct cred __rcu *real_cred; /* objective and real subjective task
71262- * credentials (COW) */
71263- const struct cred __rcu *cred; /* effective (overridable) subjective task
71264- * credentials (COW) */
71265 char comm[TASK_COMM_LEN]; /* executable name excluding path
71266 - access with [gs]et_task_comm (which lock
71267 it with task_lock())
71268@@ -1398,6 +1423,10 @@ struct task_struct {
71269 #endif
71270 /* CPU-specific state of this task */
71271 struct thread_struct thread;
71272+/* thread_info moved to task_struct */
71273+#ifdef CONFIG_X86
71274+ struct thread_info tinfo;
71275+#endif
71276 /* filesystem information */
71277 struct fs_struct *fs;
71278 /* open file information */
71279@@ -1471,6 +1500,10 @@ struct task_struct {
71280 gfp_t lockdep_reclaim_gfp;
71281 #endif
71282
71283+/* process credentials */
71284+ const struct cred __rcu *real_cred; /* objective and real subjective task
71285+ * credentials (COW) */
71286+
71287 /* journalling filesystem info */
71288 void *journal_info;
71289
71290@@ -1509,6 +1542,10 @@ struct task_struct {
71291 /* cg_list protected by css_set_lock and tsk->alloc_lock */
71292 struct list_head cg_list;
71293 #endif
71294+
71295+ const struct cred __rcu *cred; /* effective (overridable) subjective task
71296+ * credentials (COW) */
71297+
71298 #ifdef CONFIG_FUTEX
71299 struct robust_list_head __user *robust_list;
71300 #ifdef CONFIG_COMPAT
71301@@ -1605,8 +1642,74 @@ struct task_struct {
71302 #ifdef CONFIG_UPROBES
71303 struct uprobe_task *utask;
71304 #endif
71305+
71306+#ifdef CONFIG_GRKERNSEC
71307+ /* grsecurity */
71308+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71309+ u64 exec_id;
71310+#endif
71311+#ifdef CONFIG_GRKERNSEC_SETXID
71312+ const struct cred *delayed_cred;
71313+#endif
71314+ struct dentry *gr_chroot_dentry;
71315+ struct acl_subject_label *acl;
71316+ struct acl_role_label *role;
71317+ struct file *exec_file;
71318+ unsigned long brute_expires;
71319+ u16 acl_role_id;
71320+ /* is this the task that authenticated to the special role */
71321+ u8 acl_sp_role;
71322+ u8 is_writable;
71323+ u8 brute;
71324+ u8 gr_is_chrooted;
71325+#endif
71326+
71327 };
71328
71329+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
71330+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
71331+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
71332+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
71333+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
71334+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
71335+
71336+#ifdef CONFIG_PAX_SOFTMODE
71337+extern int pax_softmode;
71338+#endif
71339+
71340+extern int pax_check_flags(unsigned long *);
71341+
71342+/* if tsk != current then task_lock must be held on it */
71343+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
71344+static inline unsigned long pax_get_flags(struct task_struct *tsk)
71345+{
71346+ if (likely(tsk->mm))
71347+ return tsk->mm->pax_flags;
71348+ else
71349+ return 0UL;
71350+}
71351+
71352+/* if tsk != current then task_lock must be held on it */
71353+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
71354+{
71355+ if (likely(tsk->mm)) {
71356+ tsk->mm->pax_flags = flags;
71357+ return 0;
71358+ }
71359+ return -EINVAL;
71360+}
71361+#endif
71362+
71363+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
71364+extern void pax_set_initial_flags(struct linux_binprm *bprm);
71365+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
71366+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
71367+#endif
71368+
71369+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
71370+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
71371+extern void pax_report_refcount_overflow(struct pt_regs *regs);
71372+
71373 /* Future-safe accessor for struct task_struct's cpus_allowed. */
71374 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
71375
71376@@ -1696,7 +1799,7 @@ struct pid_namespace;
71377 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
71378 struct pid_namespace *ns);
71379
71380-static inline pid_t task_pid_nr(struct task_struct *tsk)
71381+static inline pid_t task_pid_nr(const struct task_struct *tsk)
71382 {
71383 return tsk->pid;
71384 }
71385@@ -2155,7 +2258,9 @@ void yield(void);
71386 extern struct exec_domain default_exec_domain;
71387
71388 union thread_union {
71389+#ifndef CONFIG_X86
71390 struct thread_info thread_info;
71391+#endif
71392 unsigned long stack[THREAD_SIZE/sizeof(long)];
71393 };
71394
71395@@ -2188,6 +2293,7 @@ extern struct pid_namespace init_pid_ns;
71396 */
71397
71398 extern struct task_struct *find_task_by_vpid(pid_t nr);
71399+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
71400 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
71401 struct pid_namespace *ns);
71402
71403@@ -2344,7 +2450,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
71404 extern void exit_itimers(struct signal_struct *);
71405 extern void flush_itimer_signals(void);
71406
71407-extern void do_group_exit(int);
71408+extern __noreturn void do_group_exit(int);
71409
71410 extern int allow_signal(int);
71411 extern int disallow_signal(int);
71412@@ -2545,9 +2651,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
71413
71414 #endif
71415
71416-static inline int object_is_on_stack(void *obj)
71417+static inline int object_starts_on_stack(void *obj)
71418 {
71419- void *stack = task_stack_page(current);
71420+ const void *stack = task_stack_page(current);
71421
71422 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
71423 }
71424diff --git a/include/linux/security.h b/include/linux/security.h
71425index eee7478..290f7ba 100644
71426--- a/include/linux/security.h
71427+++ b/include/linux/security.h
71428@@ -26,6 +26,7 @@
71429 #include <linux/capability.h>
71430 #include <linux/slab.h>
71431 #include <linux/err.h>
71432+#include <linux/grsecurity.h>
71433
71434 struct linux_binprm;
71435 struct cred;
71436diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
71437index 68a04a3..866e6a1 100644
71438--- a/include/linux/seq_file.h
71439+++ b/include/linux/seq_file.h
71440@@ -26,6 +26,9 @@ struct seq_file {
71441 struct mutex lock;
71442 const struct seq_operations *op;
71443 int poll_event;
71444+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71445+ u64 exec_id;
71446+#endif
71447 #ifdef CONFIG_USER_NS
71448 struct user_namespace *user_ns;
71449 #endif
71450@@ -38,6 +41,7 @@ struct seq_operations {
71451 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
71452 int (*show) (struct seq_file *m, void *v);
71453 };
71454+typedef struct seq_operations __no_const seq_operations_no_const;
71455
71456 #define SEQ_SKIP 1
71457
71458diff --git a/include/linux/shm.h b/include/linux/shm.h
71459index 429c199..4d42e38 100644
71460--- a/include/linux/shm.h
71461+++ b/include/linux/shm.h
71462@@ -21,6 +21,10 @@ struct shmid_kernel /* private to the kernel */
71463
71464 /* The task created the shm object. NULL if the task is dead. */
71465 struct task_struct *shm_creator;
71466+#ifdef CONFIG_GRKERNSEC
71467+ time_t shm_createtime;
71468+ pid_t shm_lapid;
71469+#endif
71470 };
71471
71472 /* shm_mode upper byte flags */
71473diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
71474index 98399e2..7c74c41 100644
71475--- a/include/linux/skbuff.h
71476+++ b/include/linux/skbuff.h
71477@@ -590,7 +590,7 @@ extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
71478 extern struct sk_buff *__alloc_skb(unsigned int size,
71479 gfp_t priority, int flags, int node);
71480 extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
71481-static inline struct sk_buff *alloc_skb(unsigned int size,
71482+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
71483 gfp_t priority)
71484 {
71485 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
71486@@ -700,7 +700,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
71487 */
71488 static inline int skb_queue_empty(const struct sk_buff_head *list)
71489 {
71490- return list->next == (struct sk_buff *)list;
71491+ return list->next == (const struct sk_buff *)list;
71492 }
71493
71494 /**
71495@@ -713,7 +713,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
71496 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
71497 const struct sk_buff *skb)
71498 {
71499- return skb->next == (struct sk_buff *)list;
71500+ return skb->next == (const struct sk_buff *)list;
71501 }
71502
71503 /**
71504@@ -726,7 +726,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
71505 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
71506 const struct sk_buff *skb)
71507 {
71508- return skb->prev == (struct sk_buff *)list;
71509+ return skb->prev == (const struct sk_buff *)list;
71510 }
71511
71512 /**
71513@@ -1727,7 +1727,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
71514 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
71515 */
71516 #ifndef NET_SKB_PAD
71517-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
71518+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
71519 #endif
71520
71521 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
71522@@ -2305,7 +2305,7 @@ extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
71523 int noblock, int *err);
71524 extern unsigned int datagram_poll(struct file *file, struct socket *sock,
71525 struct poll_table_struct *wait);
71526-extern int skb_copy_datagram_iovec(const struct sk_buff *from,
71527+extern int __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from,
71528 int offset, struct iovec *to,
71529 int size);
71530 extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
71531@@ -2595,6 +2595,9 @@ static inline void nf_reset(struct sk_buff *skb)
71532 nf_bridge_put(skb->nf_bridge);
71533 skb->nf_bridge = NULL;
71534 #endif
71535+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
71536+ skb->nf_trace = 0;
71537+#endif
71538 }
71539
71540 /* Note: This doesn't put any conntrack and bridge info in dst. */
71541diff --git a/include/linux/slab.h b/include/linux/slab.h
71542index 5d168d7..720bff3 100644
71543--- a/include/linux/slab.h
71544+++ b/include/linux/slab.h
71545@@ -12,13 +12,20 @@
71546 #include <linux/gfp.h>
71547 #include <linux/types.h>
71548 #include <linux/workqueue.h>
71549-
71550+#include <linux/err.h>
71551
71552 /*
71553 * Flags to pass to kmem_cache_create().
71554 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
71555 */
71556 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
71557+
71558+#ifdef CONFIG_PAX_USERCOPY_SLABS
71559+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
71560+#else
71561+#define SLAB_USERCOPY 0x00000000UL
71562+#endif
71563+
71564 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
71565 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
71566 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
71567@@ -89,10 +96,13 @@
71568 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
71569 * Both make kfree a no-op.
71570 */
71571-#define ZERO_SIZE_PTR ((void *)16)
71572+#define ZERO_SIZE_PTR \
71573+({ \
71574+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
71575+ (void *)(-MAX_ERRNO-1L); \
71576+})
71577
71578-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
71579- (unsigned long)ZERO_SIZE_PTR)
71580+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
71581
71582 /*
71583 * Common fields provided in kmem_cache by all slab allocators
71584@@ -112,7 +122,7 @@ struct kmem_cache {
71585 unsigned int align; /* Alignment as calculated */
71586 unsigned long flags; /* Active flags on the slab */
71587 const char *name; /* Slab name for sysfs */
71588- int refcount; /* Use counter */
71589+ atomic_t refcount; /* Use counter */
71590 void (*ctor)(void *); /* Called on object slot creation */
71591 struct list_head list; /* List of all slab caches on the system */
71592 };
71593@@ -232,6 +242,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
71594 void kfree(const void *);
71595 void kzfree(const void *);
71596 size_t ksize(const void *);
71597+const char *check_heap_object(const void *ptr, unsigned long n);
71598+bool is_usercopy_object(const void *ptr);
71599
71600 /*
71601 * Allocator specific definitions. These are mainly used to establish optimized
71602@@ -311,6 +323,7 @@ size_t ksize(const void *);
71603 * for general use, and so are not documented here. For a full list of
71604 * potential flags, always refer to linux/gfp.h.
71605 */
71606+
71607 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
71608 {
71609 if (size != 0 && n > SIZE_MAX / size)
71610@@ -370,7 +383,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
71611 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
71612 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
71613 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
71614-extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
71615+extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
71616 #define kmalloc_track_caller(size, flags) \
71617 __kmalloc_track_caller(size, flags, _RET_IP_)
71618 #else
71619@@ -390,7 +403,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
71620 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
71621 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
71622 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
71623-extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
71624+extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
71625 #define kmalloc_node_track_caller(size, flags, node) \
71626 __kmalloc_node_track_caller(size, flags, node, \
71627 _RET_IP_)
71628diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
71629index 8bb6e0e..8eb0dbe 100644
71630--- a/include/linux/slab_def.h
71631+++ b/include/linux/slab_def.h
71632@@ -52,7 +52,7 @@ struct kmem_cache {
71633 /* 4) cache creation/removal */
71634 const char *name;
71635 struct list_head list;
71636- int refcount;
71637+ atomic_t refcount;
71638 int object_size;
71639 int align;
71640
71641@@ -68,10 +68,10 @@ struct kmem_cache {
71642 unsigned long node_allocs;
71643 unsigned long node_frees;
71644 unsigned long node_overflow;
71645- atomic_t allochit;
71646- atomic_t allocmiss;
71647- atomic_t freehit;
71648- atomic_t freemiss;
71649+ atomic_unchecked_t allochit;
71650+ atomic_unchecked_t allocmiss;
71651+ atomic_unchecked_t freehit;
71652+ atomic_unchecked_t freemiss;
71653
71654 /*
71655 * If debugging is enabled, then the allocator can add additional
71656@@ -111,11 +111,16 @@ struct cache_sizes {
71657 #ifdef CONFIG_ZONE_DMA
71658 struct kmem_cache *cs_dmacachep;
71659 #endif
71660+
71661+#ifdef CONFIG_PAX_USERCOPY_SLABS
71662+ struct kmem_cache *cs_usercopycachep;
71663+#endif
71664+
71665 };
71666 extern struct cache_sizes malloc_sizes[];
71667
71668 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
71669-void *__kmalloc(size_t size, gfp_t flags);
71670+void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
71671
71672 #ifdef CONFIG_TRACING
71673 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
71674@@ -152,6 +157,13 @@ found:
71675 cachep = malloc_sizes[i].cs_dmacachep;
71676 else
71677 #endif
71678+
71679+#ifdef CONFIG_PAX_USERCOPY_SLABS
71680+ if (flags & GFP_USERCOPY)
71681+ cachep = malloc_sizes[i].cs_usercopycachep;
71682+ else
71683+#endif
71684+
71685 cachep = malloc_sizes[i].cs_cachep;
71686
71687 ret = kmem_cache_alloc_trace(cachep, flags, size);
71688@@ -162,7 +174,7 @@ found:
71689 }
71690
71691 #ifdef CONFIG_NUMA
71692-extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
71693+extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
71694 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
71695
71696 #ifdef CONFIG_TRACING
71697@@ -205,6 +217,13 @@ found:
71698 cachep = malloc_sizes[i].cs_dmacachep;
71699 else
71700 #endif
71701+
71702+#ifdef CONFIG_PAX_USERCOPY_SLABS
71703+ if (flags & GFP_USERCOPY)
71704+ cachep = malloc_sizes[i].cs_usercopycachep;
71705+ else
71706+#endif
71707+
71708 cachep = malloc_sizes[i].cs_cachep;
71709
71710 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
71711diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
71712index f28e14a..7831211 100644
71713--- a/include/linux/slob_def.h
71714+++ b/include/linux/slob_def.h
71715@@ -11,7 +11,7 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
71716 return kmem_cache_alloc_node(cachep, flags, NUMA_NO_NODE);
71717 }
71718
71719-void *__kmalloc_node(size_t size, gfp_t flags, int node);
71720+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
71721
71722 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
71723 {
71724@@ -31,7 +31,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
71725 return __kmalloc_node(size, flags, NUMA_NO_NODE);
71726 }
71727
71728-static __always_inline void *__kmalloc(size_t size, gfp_t flags)
71729+static __always_inline __size_overflow(1) void *__kmalloc(size_t size, gfp_t flags)
71730 {
71731 return kmalloc(size, flags);
71732 }
71733diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
71734index 9db4825..ed42fb5 100644
71735--- a/include/linux/slub_def.h
71736+++ b/include/linux/slub_def.h
71737@@ -91,7 +91,7 @@ struct kmem_cache {
71738 struct kmem_cache_order_objects max;
71739 struct kmem_cache_order_objects min;
71740 gfp_t allocflags; /* gfp flags to use on each alloc */
71741- int refcount; /* Refcount for slab cache destroy */
71742+ atomic_t refcount; /* Refcount for slab cache destroy */
71743 void (*ctor)(void *);
71744 int inuse; /* Offset to metadata */
71745 int align; /* Alignment */
71746@@ -156,7 +156,7 @@ extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
71747 * Sorry that the following has to be that ugly but some versions of GCC
71748 * have trouble with constant propagation and loops.
71749 */
71750-static __always_inline int kmalloc_index(size_t size)
71751+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
71752 {
71753 if (!size)
71754 return 0;
71755@@ -221,7 +221,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
71756 }
71757
71758 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
71759-void *__kmalloc(size_t size, gfp_t flags);
71760+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
71761
71762 static __always_inline void *
71763 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
71764@@ -265,7 +265,7 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
71765 }
71766 #endif
71767
71768-static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
71769+static __always_inline __size_overflow(1) void *kmalloc_large(size_t size, gfp_t flags)
71770 {
71771 unsigned int order = get_order(size);
71772 return kmalloc_order_trace(size, flags, order);
71773@@ -290,7 +290,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
71774 }
71775
71776 #ifdef CONFIG_NUMA
71777-void *__kmalloc_node(size_t size, gfp_t flags, int node);
71778+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
71779 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
71780
71781 #ifdef CONFIG_TRACING
71782diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
71783index e8d702e..0a56eb4 100644
71784--- a/include/linux/sock_diag.h
71785+++ b/include/linux/sock_diag.h
71786@@ -10,7 +10,7 @@ struct sock;
71787 struct sock_diag_handler {
71788 __u8 family;
71789 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
71790-};
71791+} __do_const;
71792
71793 int sock_diag_register(const struct sock_diag_handler *h);
71794 void sock_diag_unregister(const struct sock_diag_handler *h);
71795diff --git a/include/linux/sonet.h b/include/linux/sonet.h
71796index 680f9a3..f13aeb0 100644
71797--- a/include/linux/sonet.h
71798+++ b/include/linux/sonet.h
71799@@ -7,7 +7,7 @@
71800 #include <uapi/linux/sonet.h>
71801
71802 struct k_sonet_stats {
71803-#define __HANDLE_ITEM(i) atomic_t i
71804+#define __HANDLE_ITEM(i) atomic_unchecked_t i
71805 __SONET_ITEMS
71806 #undef __HANDLE_ITEM
71807 };
71808diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
71809index 34206b8..3db7f1c 100644
71810--- a/include/linux/sunrpc/clnt.h
71811+++ b/include/linux/sunrpc/clnt.h
71812@@ -96,7 +96,7 @@ struct rpc_procinfo {
71813 unsigned int p_timer; /* Which RTT timer to use */
71814 u32 p_statidx; /* Which procedure to account */
71815 const char * p_name; /* name of procedure */
71816-};
71817+} __do_const;
71818
71819 #ifdef __KERNEL__
71820
71821@@ -176,9 +176,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
71822 {
71823 switch (sap->sa_family) {
71824 case AF_INET:
71825- return ntohs(((struct sockaddr_in *)sap)->sin_port);
71826+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
71827 case AF_INET6:
71828- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
71829+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
71830 }
71831 return 0;
71832 }
71833@@ -211,7 +211,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
71834 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
71835 const struct sockaddr *src)
71836 {
71837- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
71838+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
71839 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
71840
71841 dsin->sin_family = ssin->sin_family;
71842@@ -314,7 +314,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
71843 if (sa->sa_family != AF_INET6)
71844 return 0;
71845
71846- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
71847+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
71848 }
71849
71850 #endif /* __KERNEL__ */
71851diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
71852index 676ddf5..4c519a1 100644
71853--- a/include/linux/sunrpc/svc.h
71854+++ b/include/linux/sunrpc/svc.h
71855@@ -410,7 +410,7 @@ struct svc_procedure {
71856 unsigned int pc_count; /* call count */
71857 unsigned int pc_cachetype; /* cache info (NFS) */
71858 unsigned int pc_xdrressize; /* maximum size of XDR reply */
71859-};
71860+} __do_const;
71861
71862 /*
71863 * Function prototypes.
71864diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
71865index 0b8e3e6..33e0a01 100644
71866--- a/include/linux/sunrpc/svc_rdma.h
71867+++ b/include/linux/sunrpc/svc_rdma.h
71868@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
71869 extern unsigned int svcrdma_max_requests;
71870 extern unsigned int svcrdma_max_req_size;
71871
71872-extern atomic_t rdma_stat_recv;
71873-extern atomic_t rdma_stat_read;
71874-extern atomic_t rdma_stat_write;
71875-extern atomic_t rdma_stat_sq_starve;
71876-extern atomic_t rdma_stat_rq_starve;
71877-extern atomic_t rdma_stat_rq_poll;
71878-extern atomic_t rdma_stat_rq_prod;
71879-extern atomic_t rdma_stat_sq_poll;
71880-extern atomic_t rdma_stat_sq_prod;
71881+extern atomic_unchecked_t rdma_stat_recv;
71882+extern atomic_unchecked_t rdma_stat_read;
71883+extern atomic_unchecked_t rdma_stat_write;
71884+extern atomic_unchecked_t rdma_stat_sq_starve;
71885+extern atomic_unchecked_t rdma_stat_rq_starve;
71886+extern atomic_unchecked_t rdma_stat_rq_poll;
71887+extern atomic_unchecked_t rdma_stat_rq_prod;
71888+extern atomic_unchecked_t rdma_stat_sq_poll;
71889+extern atomic_unchecked_t rdma_stat_sq_prod;
71890
71891 #define RPCRDMA_VERSION 1
71892
71893diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
71894index dd74084a..7f509d5 100644
71895--- a/include/linux/sunrpc/svcauth.h
71896+++ b/include/linux/sunrpc/svcauth.h
71897@@ -109,7 +109,7 @@ struct auth_ops {
71898 int (*release)(struct svc_rqst *rq);
71899 void (*domain_release)(struct auth_domain *);
71900 int (*set_client)(struct svc_rqst *rq);
71901-};
71902+} __do_const;
71903
71904 #define SVC_GARBAGE 1
71905 #define SVC_SYSERR 2
71906diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
71907index 071d62c..4ccc7ac 100644
71908--- a/include/linux/swiotlb.h
71909+++ b/include/linux/swiotlb.h
71910@@ -59,7 +59,8 @@ extern void
71911
71912 extern void
71913 swiotlb_free_coherent(struct device *hwdev, size_t size,
71914- void *vaddr, dma_addr_t dma_handle);
71915+ void *vaddr, dma_addr_t dma_handle,
71916+ struct dma_attrs *attrs);
71917
71918 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
71919 unsigned long offset, size_t size,
71920diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
71921index 45e2db2..1635156a 100644
71922--- a/include/linux/syscalls.h
71923+++ b/include/linux/syscalls.h
71924@@ -615,7 +615,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
71925 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
71926 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
71927 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
71928- struct sockaddr __user *, int);
71929+ struct sockaddr __user *, int) __intentional_overflow(0);
71930 asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
71931 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
71932 unsigned int vlen, unsigned flags);
71933diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
71934index 27b3b0b..e093dd9 100644
71935--- a/include/linux/syscore_ops.h
71936+++ b/include/linux/syscore_ops.h
71937@@ -16,7 +16,7 @@ struct syscore_ops {
71938 int (*suspend)(void);
71939 void (*resume)(void);
71940 void (*shutdown)(void);
71941-};
71942+} __do_const;
71943
71944 extern void register_syscore_ops(struct syscore_ops *ops);
71945 extern void unregister_syscore_ops(struct syscore_ops *ops);
71946diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
71947index 14a8ff2..af52bad 100644
71948--- a/include/linux/sysctl.h
71949+++ b/include/linux/sysctl.h
71950@@ -34,13 +34,13 @@ struct ctl_table_root;
71951 struct ctl_table_header;
71952 struct ctl_dir;
71953
71954-typedef struct ctl_table ctl_table;
71955-
71956 typedef int proc_handler (struct ctl_table *ctl, int write,
71957 void __user *buffer, size_t *lenp, loff_t *ppos);
71958
71959 extern int proc_dostring(struct ctl_table *, int,
71960 void __user *, size_t *, loff_t *);
71961+extern int proc_dostring_modpriv(struct ctl_table *, int,
71962+ void __user *, size_t *, loff_t *);
71963 extern int proc_dointvec(struct ctl_table *, int,
71964 void __user *, size_t *, loff_t *);
71965 extern int proc_dointvec_minmax(struct ctl_table *, int,
71966@@ -115,7 +115,9 @@ struct ctl_table
71967 struct ctl_table_poll *poll;
71968 void *extra1;
71969 void *extra2;
71970-};
71971+} __do_const;
71972+typedef struct ctl_table __no_const ctl_table_no_const;
71973+typedef struct ctl_table ctl_table;
71974
71975 struct ctl_node {
71976 struct rb_node node;
71977diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
71978index 381f06d..dc16cc7 100644
71979--- a/include/linux/sysfs.h
71980+++ b/include/linux/sysfs.h
71981@@ -31,7 +31,8 @@ struct attribute {
71982 struct lock_class_key *key;
71983 struct lock_class_key skey;
71984 #endif
71985-};
71986+} __do_const;
71987+typedef struct attribute __no_const attribute_no_const;
71988
71989 /**
71990 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
71991@@ -59,8 +60,8 @@ struct attribute_group {
71992 umode_t (*is_visible)(struct kobject *,
71993 struct attribute *, int);
71994 struct attribute **attrs;
71995-};
71996-
71997+} __do_const;
71998+typedef struct attribute_group __no_const attribute_group_no_const;
71999
72000
72001 /**
72002@@ -107,7 +108,8 @@ struct bin_attribute {
72003 char *, loff_t, size_t);
72004 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
72005 struct vm_area_struct *vma);
72006-};
72007+} __do_const;
72008+typedef struct bin_attribute __no_const bin_attribute_no_const;
72009
72010 /**
72011 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
72012diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
72013index 7faf933..9b85a0c 100644
72014--- a/include/linux/sysrq.h
72015+++ b/include/linux/sysrq.h
72016@@ -16,6 +16,7 @@
72017
72018 #include <linux/errno.h>
72019 #include <linux/types.h>
72020+#include <linux/compiler.h>
72021
72022 /* Enable/disable SYSRQ support by default (0==no, 1==yes). */
72023 #define SYSRQ_DEFAULT_ENABLE 1
72024@@ -36,7 +37,7 @@ struct sysrq_key_op {
72025 char *help_msg;
72026 char *action_msg;
72027 int enable_mask;
72028-};
72029+} __do_const;
72030
72031 #ifdef CONFIG_MAGIC_SYSRQ
72032
72033diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
72034index e7e0473..7989295 100644
72035--- a/include/linux/thread_info.h
72036+++ b/include/linux/thread_info.h
72037@@ -148,6 +148,15 @@ static inline bool test_and_clear_restore_sigmask(void)
72038 #error "no set_restore_sigmask() provided and default one won't work"
72039 #endif
72040
72041+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user);
72042+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
72043+{
72044+#ifndef CONFIG_PAX_USERCOPY_DEBUG
72045+ if (!__builtin_constant_p(n))
72046+#endif
72047+ __check_object_size(ptr, n, to_user);
72048+}
72049+
72050 #endif /* __KERNEL__ */
72051
72052 #endif /* _LINUX_THREAD_INFO_H */
72053diff --git a/include/linux/tty.h b/include/linux/tty.h
72054index 8db1b56..c16a040 100644
72055--- a/include/linux/tty.h
72056+++ b/include/linux/tty.h
72057@@ -194,7 +194,7 @@ struct tty_port {
72058 const struct tty_port_operations *ops; /* Port operations */
72059 spinlock_t lock; /* Lock protecting tty field */
72060 int blocked_open; /* Waiting to open */
72061- int count; /* Usage count */
72062+ atomic_t count; /* Usage count */
72063 wait_queue_head_t open_wait; /* Open waiters */
72064 wait_queue_head_t close_wait; /* Close waiters */
72065 wait_queue_head_t delta_msr_wait; /* Modem status change */
72066@@ -490,7 +490,7 @@ extern int tty_port_open(struct tty_port *port,
72067 struct tty_struct *tty, struct file *filp);
72068 static inline int tty_port_users(struct tty_port *port)
72069 {
72070- return port->count + port->blocked_open;
72071+ return atomic_read(&port->count) + port->blocked_open;
72072 }
72073
72074 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
72075diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
72076index dd976cf..e272742 100644
72077--- a/include/linux/tty_driver.h
72078+++ b/include/linux/tty_driver.h
72079@@ -284,7 +284,7 @@ struct tty_operations {
72080 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
72081 #endif
72082 const struct file_operations *proc_fops;
72083-};
72084+} __do_const;
72085
72086 struct tty_driver {
72087 int magic; /* magic number for this structure */
72088diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
72089index fb79dd8d..07d4773 100644
72090--- a/include/linux/tty_ldisc.h
72091+++ b/include/linux/tty_ldisc.h
72092@@ -149,7 +149,7 @@ struct tty_ldisc_ops {
72093
72094 struct module *owner;
72095
72096- int refcount;
72097+ atomic_t refcount;
72098 };
72099
72100 struct tty_ldisc {
72101diff --git a/include/linux/types.h b/include/linux/types.h
72102index 4d118ba..c3ee9bf 100644
72103--- a/include/linux/types.h
72104+++ b/include/linux/types.h
72105@@ -176,10 +176,26 @@ typedef struct {
72106 int counter;
72107 } atomic_t;
72108
72109+#ifdef CONFIG_PAX_REFCOUNT
72110+typedef struct {
72111+ int counter;
72112+} atomic_unchecked_t;
72113+#else
72114+typedef atomic_t atomic_unchecked_t;
72115+#endif
72116+
72117 #ifdef CONFIG_64BIT
72118 typedef struct {
72119 long counter;
72120 } atomic64_t;
72121+
72122+#ifdef CONFIG_PAX_REFCOUNT
72123+typedef struct {
72124+ long counter;
72125+} atomic64_unchecked_t;
72126+#else
72127+typedef atomic64_t atomic64_unchecked_t;
72128+#endif
72129 #endif
72130
72131 struct list_head {
72132diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
72133index 5ca0951..ab496a5 100644
72134--- a/include/linux/uaccess.h
72135+++ b/include/linux/uaccess.h
72136@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
72137 long ret; \
72138 mm_segment_t old_fs = get_fs(); \
72139 \
72140- set_fs(KERNEL_DS); \
72141 pagefault_disable(); \
72142- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
72143- pagefault_enable(); \
72144+ set_fs(KERNEL_DS); \
72145+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
72146 set_fs(old_fs); \
72147+ pagefault_enable(); \
72148 ret; \
72149 })
72150
72151diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
72152index 8e522cbc..aa8572d 100644
72153--- a/include/linux/uidgid.h
72154+++ b/include/linux/uidgid.h
72155@@ -197,4 +197,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
72156
72157 #endif /* CONFIG_USER_NS */
72158
72159+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
72160+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
72161+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
72162+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
72163+
72164 #endif /* _LINUX_UIDGID_H */
72165diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
72166index 99c1b4d..562e6f3 100644
72167--- a/include/linux/unaligned/access_ok.h
72168+++ b/include/linux/unaligned/access_ok.h
72169@@ -4,34 +4,34 @@
72170 #include <linux/kernel.h>
72171 #include <asm/byteorder.h>
72172
72173-static inline u16 get_unaligned_le16(const void *p)
72174+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
72175 {
72176- return le16_to_cpup((__le16 *)p);
72177+ return le16_to_cpup((const __le16 *)p);
72178 }
72179
72180-static inline u32 get_unaligned_le32(const void *p)
72181+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
72182 {
72183- return le32_to_cpup((__le32 *)p);
72184+ return le32_to_cpup((const __le32 *)p);
72185 }
72186
72187-static inline u64 get_unaligned_le64(const void *p)
72188+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
72189 {
72190- return le64_to_cpup((__le64 *)p);
72191+ return le64_to_cpup((const __le64 *)p);
72192 }
72193
72194-static inline u16 get_unaligned_be16(const void *p)
72195+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
72196 {
72197- return be16_to_cpup((__be16 *)p);
72198+ return be16_to_cpup((const __be16 *)p);
72199 }
72200
72201-static inline u32 get_unaligned_be32(const void *p)
72202+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
72203 {
72204- return be32_to_cpup((__be32 *)p);
72205+ return be32_to_cpup((const __be32 *)p);
72206 }
72207
72208-static inline u64 get_unaligned_be64(const void *p)
72209+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
72210 {
72211- return be64_to_cpup((__be64 *)p);
72212+ return be64_to_cpup((const __be64 *)p);
72213 }
72214
72215 static inline void put_unaligned_le16(u16 val, void *p)
72216diff --git a/include/linux/usb.h b/include/linux/usb.h
72217index 4d22d0f..8d0e8f8 100644
72218--- a/include/linux/usb.h
72219+++ b/include/linux/usb.h
72220@@ -554,7 +554,7 @@ struct usb_device {
72221 int maxchild;
72222
72223 u32 quirks;
72224- atomic_t urbnum;
72225+ atomic_unchecked_t urbnum;
72226
72227 unsigned long active_duration;
72228
72229@@ -1604,7 +1604,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
72230
72231 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
72232 __u8 request, __u8 requesttype, __u16 value, __u16 index,
72233- void *data, __u16 size, int timeout);
72234+ void *data, __u16 size, int timeout) __intentional_overflow(-1);
72235 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
72236 void *data, int len, int *actual_length, int timeout);
72237 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
72238diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
72239index c5d36c6..108f4f9 100644
72240--- a/include/linux/usb/renesas_usbhs.h
72241+++ b/include/linux/usb/renesas_usbhs.h
72242@@ -39,7 +39,7 @@ enum {
72243 */
72244 struct renesas_usbhs_driver_callback {
72245 int (*notify_hotplug)(struct platform_device *pdev);
72246-};
72247+} __no_const;
72248
72249 /*
72250 * callback functions for platform
72251diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
72252index 5209cfe..b6b215f 100644
72253--- a/include/linux/user_namespace.h
72254+++ b/include/linux/user_namespace.h
72255@@ -21,7 +21,7 @@ struct user_namespace {
72256 struct uid_gid_map uid_map;
72257 struct uid_gid_map gid_map;
72258 struct uid_gid_map projid_map;
72259- struct kref kref;
72260+ atomic_t count;
72261 struct user_namespace *parent;
72262 kuid_t owner;
72263 kgid_t group;
72264@@ -37,18 +37,18 @@ extern struct user_namespace init_user_ns;
72265 static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
72266 {
72267 if (ns)
72268- kref_get(&ns->kref);
72269+ atomic_inc(&ns->count);
72270 return ns;
72271 }
72272
72273 extern int create_user_ns(struct cred *new);
72274 extern int unshare_userns(unsigned long unshare_flags, struct cred **new_cred);
72275-extern void free_user_ns(struct kref *kref);
72276+extern void free_user_ns(struct user_namespace *ns);
72277
72278 static inline void put_user_ns(struct user_namespace *ns)
72279 {
72280- if (ns)
72281- kref_put(&ns->kref, free_user_ns);
72282+ if (ns && atomic_dec_and_test(&ns->count))
72283+ free_user_ns(ns);
72284 }
72285
72286 struct seq_operations;
72287diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
72288index 6f8fbcf..8259001 100644
72289--- a/include/linux/vermagic.h
72290+++ b/include/linux/vermagic.h
72291@@ -25,9 +25,35 @@
72292 #define MODULE_ARCH_VERMAGIC ""
72293 #endif
72294
72295+#ifdef CONFIG_PAX_REFCOUNT
72296+#define MODULE_PAX_REFCOUNT "REFCOUNT "
72297+#else
72298+#define MODULE_PAX_REFCOUNT ""
72299+#endif
72300+
72301+#ifdef CONSTIFY_PLUGIN
72302+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
72303+#else
72304+#define MODULE_CONSTIFY_PLUGIN ""
72305+#endif
72306+
72307+#ifdef STACKLEAK_PLUGIN
72308+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
72309+#else
72310+#define MODULE_STACKLEAK_PLUGIN ""
72311+#endif
72312+
72313+#ifdef CONFIG_GRKERNSEC
72314+#define MODULE_GRSEC "GRSEC "
72315+#else
72316+#define MODULE_GRSEC ""
72317+#endif
72318+
72319 #define VERMAGIC_STRING \
72320 UTS_RELEASE " " \
72321 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
72322 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
72323- MODULE_ARCH_VERMAGIC
72324+ MODULE_ARCH_VERMAGIC \
72325+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
72326+ MODULE_GRSEC
72327
72328diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
72329index 6071e91..ca6a489 100644
72330--- a/include/linux/vmalloc.h
72331+++ b/include/linux/vmalloc.h
72332@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
72333 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
72334 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
72335 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
72336+
72337+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
72338+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
72339+#endif
72340+
72341 /* bits [20..32] reserved for arch specific ioremap internals */
72342
72343 /*
72344@@ -62,7 +67,7 @@ extern void *vmalloc_32_user(unsigned long size);
72345 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
72346 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
72347 unsigned long start, unsigned long end, gfp_t gfp_mask,
72348- pgprot_t prot, int node, const void *caller);
72349+ pgprot_t prot, int node, const void *caller) __size_overflow(1);
72350 extern void vfree(const void *addr);
72351
72352 extern void *vmap(struct page **pages, unsigned int count,
72353@@ -124,8 +129,8 @@ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
72354 extern void free_vm_area(struct vm_struct *area);
72355
72356 /* for /dev/kmem */
72357-extern long vread(char *buf, char *addr, unsigned long count);
72358-extern long vwrite(char *buf, char *addr, unsigned long count);
72359+extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
72360+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
72361
72362 /*
72363 * Internals. Dont't use..
72364diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
72365index a13291f..af51fa3 100644
72366--- a/include/linux/vmstat.h
72367+++ b/include/linux/vmstat.h
72368@@ -95,18 +95,18 @@ static inline void vm_events_fold_cpu(int cpu)
72369 /*
72370 * Zone based page accounting with per cpu differentials.
72371 */
72372-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
72373+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
72374
72375 static inline void zone_page_state_add(long x, struct zone *zone,
72376 enum zone_stat_item item)
72377 {
72378- atomic_long_add(x, &zone->vm_stat[item]);
72379- atomic_long_add(x, &vm_stat[item]);
72380+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
72381+ atomic_long_add_unchecked(x, &vm_stat[item]);
72382 }
72383
72384 static inline unsigned long global_page_state(enum zone_stat_item item)
72385 {
72386- long x = atomic_long_read(&vm_stat[item]);
72387+ long x = atomic_long_read_unchecked(&vm_stat[item]);
72388 #ifdef CONFIG_SMP
72389 if (x < 0)
72390 x = 0;
72391@@ -117,7 +117,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
72392 static inline unsigned long zone_page_state(struct zone *zone,
72393 enum zone_stat_item item)
72394 {
72395- long x = atomic_long_read(&zone->vm_stat[item]);
72396+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
72397 #ifdef CONFIG_SMP
72398 if (x < 0)
72399 x = 0;
72400@@ -134,7 +134,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
72401 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
72402 enum zone_stat_item item)
72403 {
72404- long x = atomic_long_read(&zone->vm_stat[item]);
72405+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
72406
72407 #ifdef CONFIG_SMP
72408 int cpu;
72409@@ -226,8 +226,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
72410
72411 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
72412 {
72413- atomic_long_inc(&zone->vm_stat[item]);
72414- atomic_long_inc(&vm_stat[item]);
72415+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
72416+ atomic_long_inc_unchecked(&vm_stat[item]);
72417 }
72418
72419 static inline void __inc_zone_page_state(struct page *page,
72420@@ -238,8 +238,8 @@ static inline void __inc_zone_page_state(struct page *page,
72421
72422 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
72423 {
72424- atomic_long_dec(&zone->vm_stat[item]);
72425- atomic_long_dec(&vm_stat[item]);
72426+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
72427+ atomic_long_dec_unchecked(&vm_stat[item]);
72428 }
72429
72430 static inline void __dec_zone_page_state(struct page *page,
72431diff --git a/include/linux/xattr.h b/include/linux/xattr.h
72432index fdbafc6..b7ffd47 100644
72433--- a/include/linux/xattr.h
72434+++ b/include/linux/xattr.h
72435@@ -28,7 +28,7 @@ struct xattr_handler {
72436 size_t size, int handler_flags);
72437 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
72438 size_t size, int flags, int handler_flags);
72439-};
72440+} __do_const;
72441
72442 struct xattr {
72443 char *name;
72444diff --git a/include/linux/zlib.h b/include/linux/zlib.h
72445index 9c5a6b4..09c9438 100644
72446--- a/include/linux/zlib.h
72447+++ b/include/linux/zlib.h
72448@@ -31,6 +31,7 @@
72449 #define _ZLIB_H
72450
72451 #include <linux/zconf.h>
72452+#include <linux/compiler.h>
72453
72454 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
72455 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
72456@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
72457
72458 /* basic functions */
72459
72460-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
72461+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
72462 /*
72463 Returns the number of bytes that needs to be allocated for a per-
72464 stream workspace with the specified parameters. A pointer to this
72465diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
72466index 95d1c91..6798cca 100644
72467--- a/include/media/v4l2-dev.h
72468+++ b/include/media/v4l2-dev.h
72469@@ -76,7 +76,7 @@ struct v4l2_file_operations {
72470 int (*mmap) (struct file *, struct vm_area_struct *);
72471 int (*open) (struct file *);
72472 int (*release) (struct file *);
72473-};
72474+} __do_const;
72475
72476 /*
72477 * Newer version of video_device, handled by videodev2.c
72478diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
72479index 4118ad1..cb7e25f 100644
72480--- a/include/media/v4l2-ioctl.h
72481+++ b/include/media/v4l2-ioctl.h
72482@@ -284,7 +284,6 @@ struct v4l2_ioctl_ops {
72483 bool valid_prio, int cmd, void *arg);
72484 };
72485
72486-
72487 /* v4l debugging and diagnostics */
72488
72489 /* Debug bitmask flags to be used on V4L2 */
72490diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
72491index adcbb20..62c2559 100644
72492--- a/include/net/9p/transport.h
72493+++ b/include/net/9p/transport.h
72494@@ -57,7 +57,7 @@ struct p9_trans_module {
72495 int (*cancel) (struct p9_client *, struct p9_req_t *req);
72496 int (*zc_request)(struct p9_client *, struct p9_req_t *,
72497 char *, char *, int , int, int, int);
72498-};
72499+} __do_const;
72500
72501 void v9fs_register_trans(struct p9_trans_module *m);
72502 void v9fs_unregister_trans(struct p9_trans_module *m);
72503diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
72504index 7588ef4..e62d35f 100644
72505--- a/include/net/bluetooth/l2cap.h
72506+++ b/include/net/bluetooth/l2cap.h
72507@@ -552,7 +552,7 @@ struct l2cap_ops {
72508 void (*defer) (struct l2cap_chan *chan);
72509 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
72510 unsigned long len, int nb);
72511-};
72512+} __do_const;
72513
72514 struct l2cap_conn {
72515 struct hci_conn *hcon;
72516diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
72517index 9e5425b..8136ffc 100644
72518--- a/include/net/caif/cfctrl.h
72519+++ b/include/net/caif/cfctrl.h
72520@@ -52,7 +52,7 @@ struct cfctrl_rsp {
72521 void (*radioset_rsp)(void);
72522 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
72523 struct cflayer *client_layer);
72524-};
72525+} __no_const;
72526
72527 /* Link Setup Parameters for CAIF-Links. */
72528 struct cfctrl_link_param {
72529@@ -101,8 +101,8 @@ struct cfctrl_request_info {
72530 struct cfctrl {
72531 struct cfsrvl serv;
72532 struct cfctrl_rsp res;
72533- atomic_t req_seq_no;
72534- atomic_t rsp_seq_no;
72535+ atomic_unchecked_t req_seq_no;
72536+ atomic_unchecked_t rsp_seq_no;
72537 struct list_head list;
72538 /* Protects from simultaneous access to first_req list */
72539 spinlock_t info_list_lock;
72540diff --git a/include/net/flow.h b/include/net/flow.h
72541index 628e11b..4c475df 100644
72542--- a/include/net/flow.h
72543+++ b/include/net/flow.h
72544@@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
72545
72546 extern void flow_cache_flush(void);
72547 extern void flow_cache_flush_deferred(void);
72548-extern atomic_t flow_cache_genid;
72549+extern atomic_unchecked_t flow_cache_genid;
72550
72551 #endif
72552diff --git a/include/net/genetlink.h b/include/net/genetlink.h
72553index bdfbe68..4402ebe 100644
72554--- a/include/net/genetlink.h
72555+++ b/include/net/genetlink.h
72556@@ -118,7 +118,7 @@ struct genl_ops {
72557 struct netlink_callback *cb);
72558 int (*done)(struct netlink_callback *cb);
72559 struct list_head ops_list;
72560-};
72561+} __do_const;
72562
72563 extern int genl_register_family(struct genl_family *family);
72564 extern int genl_register_family_with_ops(struct genl_family *family,
72565diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
72566index e5062c9..48a9a4b 100644
72567--- a/include/net/gro_cells.h
72568+++ b/include/net/gro_cells.h
72569@@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
72570 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
72571
72572 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
72573- atomic_long_inc(&dev->rx_dropped);
72574+ atomic_long_inc_unchecked(&dev->rx_dropped);
72575 kfree_skb(skb);
72576 return;
72577 }
72578@@ -73,8 +73,8 @@ static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *de
72579 int i;
72580
72581 gcells->gro_cells_mask = roundup_pow_of_two(netif_get_num_default_rss_queues()) - 1;
72582- gcells->cells = kcalloc(sizeof(struct gro_cell),
72583- gcells->gro_cells_mask + 1,
72584+ gcells->cells = kcalloc(gcells->gro_cells_mask + 1,
72585+ sizeof(struct gro_cell),
72586 GFP_KERNEL);
72587 if (!gcells->cells)
72588 return -ENOMEM;
72589diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
72590index 1832927..ce39aea 100644
72591--- a/include/net/inet_connection_sock.h
72592+++ b/include/net/inet_connection_sock.h
72593@@ -62,7 +62,7 @@ struct inet_connection_sock_af_ops {
72594 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
72595 int (*bind_conflict)(const struct sock *sk,
72596 const struct inet_bind_bucket *tb, bool relax);
72597-};
72598+} __do_const;
72599
72600 /** inet_connection_sock - INET connection oriented sock
72601 *
72602diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
72603index 53f464d..ba76aaa 100644
72604--- a/include/net/inetpeer.h
72605+++ b/include/net/inetpeer.h
72606@@ -47,8 +47,8 @@ struct inet_peer {
72607 */
72608 union {
72609 struct {
72610- atomic_t rid; /* Frag reception counter */
72611- atomic_t ip_id_count; /* IP ID for the next packet */
72612+ atomic_unchecked_t rid; /* Frag reception counter */
72613+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
72614 };
72615 struct rcu_head rcu;
72616 struct inet_peer *gc_next;
72617@@ -182,11 +182,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
72618 more++;
72619 inet_peer_refcheck(p);
72620 do {
72621- old = atomic_read(&p->ip_id_count);
72622+ old = atomic_read_unchecked(&p->ip_id_count);
72623 new = old + more;
72624 if (!new)
72625 new = 1;
72626- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
72627+ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
72628 return new;
72629 }
72630
72631diff --git a/include/net/ip.h b/include/net/ip.h
72632index a68f838..74518ab 100644
72633--- a/include/net/ip.h
72634+++ b/include/net/ip.h
72635@@ -202,7 +202,7 @@ extern struct local_ports {
72636 } sysctl_local_ports;
72637 extern void inet_get_local_port_range(int *low, int *high);
72638
72639-extern unsigned long *sysctl_local_reserved_ports;
72640+extern unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
72641 static inline int inet_is_reserved_local_port(int port)
72642 {
72643 return test_bit(port, sysctl_local_reserved_ports);
72644diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
72645index e49db91..76a81de 100644
72646--- a/include/net/ip_fib.h
72647+++ b/include/net/ip_fib.h
72648@@ -167,7 +167,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
72649
72650 #define FIB_RES_SADDR(net, res) \
72651 ((FIB_RES_NH(res).nh_saddr_genid == \
72652- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
72653+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
72654 FIB_RES_NH(res).nh_saddr : \
72655 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
72656 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
72657diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
72658index 68c69d5..bdab192 100644
72659--- a/include/net/ip_vs.h
72660+++ b/include/net/ip_vs.h
72661@@ -599,7 +599,7 @@ struct ip_vs_conn {
72662 struct ip_vs_conn *control; /* Master control connection */
72663 atomic_t n_control; /* Number of controlled ones */
72664 struct ip_vs_dest *dest; /* real server */
72665- atomic_t in_pkts; /* incoming packet counter */
72666+ atomic_unchecked_t in_pkts; /* incoming packet counter */
72667
72668 /* packet transmitter for different forwarding methods. If it
72669 mangles the packet, it must return NF_DROP or better NF_STOLEN,
72670@@ -737,7 +737,7 @@ struct ip_vs_dest {
72671 __be16 port; /* port number of the server */
72672 union nf_inet_addr addr; /* IP address of the server */
72673 volatile unsigned int flags; /* dest status flags */
72674- atomic_t conn_flags; /* flags to copy to conn */
72675+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
72676 atomic_t weight; /* server weight */
72677
72678 atomic_t refcnt; /* reference counter */
72679@@ -980,11 +980,11 @@ struct netns_ipvs {
72680 /* ip_vs_lblc */
72681 int sysctl_lblc_expiration;
72682 struct ctl_table_header *lblc_ctl_header;
72683- struct ctl_table *lblc_ctl_table;
72684+ ctl_table_no_const *lblc_ctl_table;
72685 /* ip_vs_lblcr */
72686 int sysctl_lblcr_expiration;
72687 struct ctl_table_header *lblcr_ctl_header;
72688- struct ctl_table *lblcr_ctl_table;
72689+ ctl_table_no_const *lblcr_ctl_table;
72690 /* ip_vs_est */
72691 struct list_head est_list; /* estimator list */
72692 spinlock_t est_lock;
72693diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
72694index 80ffde3..968b0f4 100644
72695--- a/include/net/irda/ircomm_tty.h
72696+++ b/include/net/irda/ircomm_tty.h
72697@@ -35,6 +35,7 @@
72698 #include <linux/termios.h>
72699 #include <linux/timer.h>
72700 #include <linux/tty.h> /* struct tty_struct */
72701+#include <asm/local.h>
72702
72703 #include <net/irda/irias_object.h>
72704 #include <net/irda/ircomm_core.h>
72705diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
72706index cc7c197..9f2da2a 100644
72707--- a/include/net/iucv/af_iucv.h
72708+++ b/include/net/iucv/af_iucv.h
72709@@ -141,7 +141,7 @@ struct iucv_sock {
72710 struct iucv_sock_list {
72711 struct hlist_head head;
72712 rwlock_t lock;
72713- atomic_t autobind_name;
72714+ atomic_unchecked_t autobind_name;
72715 };
72716
72717 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
72718diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
72719index df83f69..9b640b8 100644
72720--- a/include/net/llc_c_ac.h
72721+++ b/include/net/llc_c_ac.h
72722@@ -87,7 +87,7 @@
72723 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
72724 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
72725
72726-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
72727+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
72728
72729 extern int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
72730 extern int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
72731diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
72732index 6ca3113..f8026dd 100644
72733--- a/include/net/llc_c_ev.h
72734+++ b/include/net/llc_c_ev.h
72735@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
72736 return (struct llc_conn_state_ev *)skb->cb;
72737 }
72738
72739-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
72740-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
72741+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
72742+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
72743
72744 extern int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
72745 extern int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
72746diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
72747index 0e79cfb..f46db31 100644
72748--- a/include/net/llc_c_st.h
72749+++ b/include/net/llc_c_st.h
72750@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
72751 u8 next_state;
72752 llc_conn_ev_qfyr_t *ev_qualifiers;
72753 llc_conn_action_t *ev_actions;
72754-};
72755+} __do_const;
72756
72757 struct llc_conn_state {
72758 u8 current_state;
72759diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
72760index 37a3bbd..55a4241 100644
72761--- a/include/net/llc_s_ac.h
72762+++ b/include/net/llc_s_ac.h
72763@@ -23,7 +23,7 @@
72764 #define SAP_ACT_TEST_IND 9
72765
72766 /* All action functions must look like this */
72767-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
72768+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
72769
72770 extern int llc_sap_action_unitdata_ind(struct llc_sap *sap,
72771 struct sk_buff *skb);
72772diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
72773index 567c681..cd73ac0 100644
72774--- a/include/net/llc_s_st.h
72775+++ b/include/net/llc_s_st.h
72776@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
72777 llc_sap_ev_t ev;
72778 u8 next_state;
72779 llc_sap_action_t *ev_actions;
72780-};
72781+} __do_const;
72782
72783 struct llc_sap_state {
72784 u8 curr_state;
72785diff --git a/include/net/mac80211.h b/include/net/mac80211.h
72786index ee50c5e..1bc3b1a 100644
72787--- a/include/net/mac80211.h
72788+++ b/include/net/mac80211.h
72789@@ -3996,7 +3996,7 @@ struct rate_control_ops {
72790 void (*add_sta_debugfs)(void *priv, void *priv_sta,
72791 struct dentry *dir);
72792 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
72793-};
72794+} __do_const;
72795
72796 static inline int rate_supported(struct ieee80211_sta *sta,
72797 enum ieee80211_band band,
72798diff --git a/include/net/neighbour.h b/include/net/neighbour.h
72799index 0dab173..1b76af0 100644
72800--- a/include/net/neighbour.h
72801+++ b/include/net/neighbour.h
72802@@ -123,7 +123,7 @@ struct neigh_ops {
72803 void (*error_report)(struct neighbour *, struct sk_buff *);
72804 int (*output)(struct neighbour *, struct sk_buff *);
72805 int (*connected_output)(struct neighbour *, struct sk_buff *);
72806-};
72807+} __do_const;
72808
72809 struct pneigh_entry {
72810 struct pneigh_entry *next;
72811diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
72812index de644bc..dfbcc4c 100644
72813--- a/include/net/net_namespace.h
72814+++ b/include/net/net_namespace.h
72815@@ -115,7 +115,7 @@ struct net {
72816 #endif
72817 struct netns_ipvs *ipvs;
72818 struct sock *diag_nlsk;
72819- atomic_t rt_genid;
72820+ atomic_unchecked_t rt_genid;
72821 };
72822
72823 /*
72824@@ -272,7 +272,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
72825 #define __net_init __init
72826 #define __net_exit __exit_refok
72827 #define __net_initdata __initdata
72828+#ifdef CONSTIFY_PLUGIN
72829 #define __net_initconst __initconst
72830+#else
72831+#define __net_initconst __initdata
72832+#endif
72833 #endif
72834
72835 struct pernet_operations {
72836@@ -282,7 +286,7 @@ struct pernet_operations {
72837 void (*exit_batch)(struct list_head *net_exit_list);
72838 int *id;
72839 size_t size;
72840-};
72841+} __do_const;
72842
72843 /*
72844 * Use these carefully. If you implement a network device and it
72845@@ -330,12 +334,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
72846
72847 static inline int rt_genid(struct net *net)
72848 {
72849- return atomic_read(&net->rt_genid);
72850+ return atomic_read_unchecked(&net->rt_genid);
72851 }
72852
72853 static inline void rt_genid_bump(struct net *net)
72854 {
72855- atomic_inc(&net->rt_genid);
72856+ atomic_inc_unchecked(&net->rt_genid);
72857 }
72858
72859 #endif /* __NET_NET_NAMESPACE_H */
72860diff --git a/include/net/netdma.h b/include/net/netdma.h
72861index 8ba8ce2..99b7fff 100644
72862--- a/include/net/netdma.h
72863+++ b/include/net/netdma.h
72864@@ -24,7 +24,7 @@
72865 #include <linux/dmaengine.h>
72866 #include <linux/skbuff.h>
72867
72868-int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
72869+int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan,
72870 struct sk_buff *skb, int offset, struct iovec *to,
72871 size_t len, struct dma_pinned_list *pinned_list);
72872
72873diff --git a/include/net/netlink.h b/include/net/netlink.h
72874index 9690b0f..87aded7 100644
72875--- a/include/net/netlink.h
72876+++ b/include/net/netlink.h
72877@@ -534,7 +534,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
72878 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
72879 {
72880 if (mark)
72881- skb_trim(skb, (unsigned char *) mark - skb->data);
72882+ skb_trim(skb, (const unsigned char *) mark - skb->data);
72883 }
72884
72885 /**
72886diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
72887index 923cb20..deae816 100644
72888--- a/include/net/netns/conntrack.h
72889+++ b/include/net/netns/conntrack.h
72890@@ -12,10 +12,10 @@ struct nf_conntrack_ecache;
72891 struct nf_proto_net {
72892 #ifdef CONFIG_SYSCTL
72893 struct ctl_table_header *ctl_table_header;
72894- struct ctl_table *ctl_table;
72895+ ctl_table_no_const *ctl_table;
72896 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
72897 struct ctl_table_header *ctl_compat_header;
72898- struct ctl_table *ctl_compat_table;
72899+ ctl_table_no_const *ctl_compat_table;
72900 #endif
72901 #endif
72902 unsigned int users;
72903@@ -58,7 +58,7 @@ struct nf_ip_net {
72904 struct nf_icmp_net icmpv6;
72905 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
72906 struct ctl_table_header *ctl_table_header;
72907- struct ctl_table *ctl_table;
72908+ ctl_table_no_const *ctl_table;
72909 #endif
72910 };
72911
72912diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
72913index 2ae2b83..dbdc85e 100644
72914--- a/include/net/netns/ipv4.h
72915+++ b/include/net/netns/ipv4.h
72916@@ -64,7 +64,7 @@ struct netns_ipv4 {
72917 kgid_t sysctl_ping_group_range[2];
72918 long sysctl_tcp_mem[3];
72919
72920- atomic_t dev_addr_genid;
72921+ atomic_unchecked_t dev_addr_genid;
72922
72923 #ifdef CONFIG_IP_MROUTE
72924 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
72925diff --git a/include/net/protocol.h b/include/net/protocol.h
72926index 047c047..b9dad15 100644
72927--- a/include/net/protocol.h
72928+++ b/include/net/protocol.h
72929@@ -44,7 +44,7 @@ struct net_protocol {
72930 void (*err_handler)(struct sk_buff *skb, u32 info);
72931 unsigned int no_policy:1,
72932 netns_ok:1;
72933-};
72934+} __do_const;
72935
72936 #if IS_ENABLED(CONFIG_IPV6)
72937 struct inet6_protocol {
72938@@ -57,7 +57,7 @@ struct inet6_protocol {
72939 u8 type, u8 code, int offset,
72940 __be32 info);
72941 unsigned int flags; /* INET6_PROTO_xxx */
72942-};
72943+} __do_const;
72944
72945 #define INET6_PROTO_NOPOLICY 0x1
72946 #define INET6_PROTO_FINAL 0x2
72947diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
72948index 5a15fab..d799ea7 100644
72949--- a/include/net/rtnetlink.h
72950+++ b/include/net/rtnetlink.h
72951@@ -81,7 +81,7 @@ struct rtnl_link_ops {
72952 const struct net_device *dev);
72953 unsigned int (*get_num_tx_queues)(void);
72954 unsigned int (*get_num_rx_queues)(void);
72955-};
72956+} __do_const;
72957
72958 extern int __rtnl_link_register(struct rtnl_link_ops *ops);
72959 extern void __rtnl_link_unregister(struct rtnl_link_ops *ops);
72960diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
72961index 7fdf298..197e9f7 100644
72962--- a/include/net/sctp/sctp.h
72963+++ b/include/net/sctp/sctp.h
72964@@ -330,9 +330,9 @@ do { \
72965
72966 #else /* SCTP_DEBUG */
72967
72968-#define SCTP_DEBUG_PRINTK(whatever...)
72969-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
72970-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
72971+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
72972+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
72973+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
72974 #define SCTP_ENABLE_DEBUG
72975 #define SCTP_DISABLE_DEBUG
72976 #define SCTP_ASSERT(expr, str, func)
72977diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
72978index 2a82d13..62a31c2 100644
72979--- a/include/net/sctp/sm.h
72980+++ b/include/net/sctp/sm.h
72981@@ -87,7 +87,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
72982 typedef struct {
72983 sctp_state_fn_t *fn;
72984 const char *name;
72985-} sctp_sm_table_entry_t;
72986+} __do_const sctp_sm_table_entry_t;
72987
72988 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
72989 * currently in use.
72990@@ -299,7 +299,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
72991 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
72992
72993 /* Extern declarations for major data structures. */
72994-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
72995+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
72996
72997
72998 /* Get the size of a DATA chunk payload. */
72999diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
73000index fdeb85a..1329d95 100644
73001--- a/include/net/sctp/structs.h
73002+++ b/include/net/sctp/structs.h
73003@@ -517,7 +517,7 @@ struct sctp_pf {
73004 struct sctp_association *asoc);
73005 void (*addr_v4map) (struct sctp_sock *, union sctp_addr *);
73006 struct sctp_af *af;
73007-};
73008+} __do_const;
73009
73010
73011 /* Structure to track chunk fragments that have been acked, but peer
73012diff --git a/include/net/sock.h b/include/net/sock.h
73013index 25afaa0..8bb0070 100644
73014--- a/include/net/sock.h
73015+++ b/include/net/sock.h
73016@@ -322,7 +322,7 @@ struct sock {
73017 #ifdef CONFIG_RPS
73018 __u32 sk_rxhash;
73019 #endif
73020- atomic_t sk_drops;
73021+ atomic_unchecked_t sk_drops;
73022 int sk_rcvbuf;
73023
73024 struct sk_filter __rcu *sk_filter;
73025@@ -1781,7 +1781,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
73026 }
73027
73028 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
73029- char __user *from, char *to,
73030+ char __user *from, unsigned char *to,
73031 int copy, int offset)
73032 {
73033 if (skb->ip_summed == CHECKSUM_NONE) {
73034@@ -2040,7 +2040,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
73035 }
73036 }
73037
73038-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
73039+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
73040
73041 /**
73042 * sk_page_frag - return an appropriate page_frag
73043diff --git a/include/net/tcp.h b/include/net/tcp.h
73044index aed42c7..43890c6 100644
73045--- a/include/net/tcp.h
73046+++ b/include/net/tcp.h
73047@@ -530,7 +530,7 @@ extern void tcp_retransmit_timer(struct sock *sk);
73048 extern void tcp_xmit_retransmit_queue(struct sock *);
73049 extern void tcp_simple_retransmit(struct sock *);
73050 extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
73051-extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
73052+extern int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
73053
73054 extern void tcp_send_probe0(struct sock *);
73055 extern void tcp_send_partial(struct sock *);
73056@@ -701,8 +701,8 @@ struct tcp_skb_cb {
73057 struct inet6_skb_parm h6;
73058 #endif
73059 } header; /* For incoming frames */
73060- __u32 seq; /* Starting sequence number */
73061- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
73062+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
73063+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
73064 __u32 when; /* used to compute rtt's */
73065 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
73066
73067@@ -716,7 +716,7 @@ struct tcp_skb_cb {
73068
73069 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
73070 /* 1 byte hole */
73071- __u32 ack_seq; /* Sequence number ACK'd */
73072+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
73073 };
73074
73075 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
73076diff --git a/include/net/xfrm.h b/include/net/xfrm.h
73077index 63445ed..d6fc34f 100644
73078--- a/include/net/xfrm.h
73079+++ b/include/net/xfrm.h
73080@@ -304,7 +304,7 @@ struct xfrm_policy_afinfo {
73081 struct net_device *dev,
73082 const struct flowi *fl);
73083 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
73084-};
73085+} __do_const;
73086
73087 extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
73088 extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
73089@@ -340,7 +340,7 @@ struct xfrm_state_afinfo {
73090 struct sk_buff *skb);
73091 int (*transport_finish)(struct sk_buff *skb,
73092 int async);
73093-};
73094+} __do_const;
73095
73096 extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
73097 extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
73098@@ -423,7 +423,7 @@ struct xfrm_mode {
73099 struct module *owner;
73100 unsigned int encap;
73101 int flags;
73102-};
73103+} __do_const;
73104
73105 /* Flags for xfrm_mode. */
73106 enum {
73107@@ -514,7 +514,7 @@ struct xfrm_policy {
73108 struct timer_list timer;
73109
73110 struct flow_cache_object flo;
73111- atomic_t genid;
73112+ atomic_unchecked_t genid;
73113 u32 priority;
73114 u32 index;
73115 struct xfrm_mark mark;
73116diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
73117index 1a046b1..ee0bef0 100644
73118--- a/include/rdma/iw_cm.h
73119+++ b/include/rdma/iw_cm.h
73120@@ -122,7 +122,7 @@ struct iw_cm_verbs {
73121 int backlog);
73122
73123 int (*destroy_listen)(struct iw_cm_id *cm_id);
73124-};
73125+} __no_const;
73126
73127 /**
73128 * iw_create_cm_id - Create an IW CM identifier.
73129diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
73130index 399162b..b337f1a 100644
73131--- a/include/scsi/libfc.h
73132+++ b/include/scsi/libfc.h
73133@@ -762,6 +762,7 @@ struct libfc_function_template {
73134 */
73135 void (*disc_stop_final) (struct fc_lport *);
73136 };
73137+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
73138
73139 /**
73140 * struct fc_disc - Discovery context
73141@@ -866,7 +867,7 @@ struct fc_lport {
73142 struct fc_vport *vport;
73143
73144 /* Operational Information */
73145- struct libfc_function_template tt;
73146+ libfc_function_template_no_const tt;
73147 u8 link_up;
73148 u8 qfull;
73149 enum fc_lport_state state;
73150diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
73151index e65c62e..aa2e5a2 100644
73152--- a/include/scsi/scsi_device.h
73153+++ b/include/scsi/scsi_device.h
73154@@ -170,9 +170,9 @@ struct scsi_device {
73155 unsigned int max_device_blocked; /* what device_blocked counts down from */
73156 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
73157
73158- atomic_t iorequest_cnt;
73159- atomic_t iodone_cnt;
73160- atomic_t ioerr_cnt;
73161+ atomic_unchecked_t iorequest_cnt;
73162+ atomic_unchecked_t iodone_cnt;
73163+ atomic_unchecked_t ioerr_cnt;
73164
73165 struct device sdev_gendev,
73166 sdev_dev;
73167diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
73168index b797e8f..8e2c3aa 100644
73169--- a/include/scsi/scsi_transport_fc.h
73170+++ b/include/scsi/scsi_transport_fc.h
73171@@ -751,7 +751,8 @@ struct fc_function_template {
73172 unsigned long show_host_system_hostname:1;
73173
73174 unsigned long disable_target_scan:1;
73175-};
73176+} __do_const;
73177+typedef struct fc_function_template __no_const fc_function_template_no_const;
73178
73179
73180 /**
73181diff --git a/include/sound/soc.h b/include/sound/soc.h
73182index bc56738..a4be132 100644
73183--- a/include/sound/soc.h
73184+++ b/include/sound/soc.h
73185@@ -771,7 +771,7 @@ struct snd_soc_codec_driver {
73186 /* probe ordering - for components with runtime dependencies */
73187 int probe_order;
73188 int remove_order;
73189-};
73190+} __do_const;
73191
73192 /* SoC platform interface */
73193 struct snd_soc_platform_driver {
73194@@ -817,7 +817,7 @@ struct snd_soc_platform_driver {
73195 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
73196 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
73197 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
73198-};
73199+} __do_const;
73200
73201 struct snd_soc_platform {
73202 const char *name;
73203diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
73204index 663e34a..91b306a 100644
73205--- a/include/target/target_core_base.h
73206+++ b/include/target/target_core_base.h
73207@@ -654,7 +654,7 @@ struct se_device {
73208 spinlock_t stats_lock;
73209 /* Active commands on this virtual SE device */
73210 atomic_t simple_cmds;
73211- atomic_t dev_ordered_id;
73212+ atomic_unchecked_t dev_ordered_id;
73213 atomic_t dev_ordered_sync;
73214 atomic_t dev_qf_count;
73215 int export_count;
73216diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
73217new file mode 100644
73218index 0000000..fb634b7
73219--- /dev/null
73220+++ b/include/trace/events/fs.h
73221@@ -0,0 +1,53 @@
73222+#undef TRACE_SYSTEM
73223+#define TRACE_SYSTEM fs
73224+
73225+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
73226+#define _TRACE_FS_H
73227+
73228+#include <linux/fs.h>
73229+#include <linux/tracepoint.h>
73230+
73231+TRACE_EVENT(do_sys_open,
73232+
73233+ TP_PROTO(const char *filename, int flags, int mode),
73234+
73235+ TP_ARGS(filename, flags, mode),
73236+
73237+ TP_STRUCT__entry(
73238+ __string( filename, filename )
73239+ __field( int, flags )
73240+ __field( int, mode )
73241+ ),
73242+
73243+ TP_fast_assign(
73244+ __assign_str(filename, filename);
73245+ __entry->flags = flags;
73246+ __entry->mode = mode;
73247+ ),
73248+
73249+ TP_printk("\"%s\" %x %o",
73250+ __get_str(filename), __entry->flags, __entry->mode)
73251+);
73252+
73253+TRACE_EVENT(open_exec,
73254+
73255+ TP_PROTO(const char *filename),
73256+
73257+ TP_ARGS(filename),
73258+
73259+ TP_STRUCT__entry(
73260+ __string( filename, filename )
73261+ ),
73262+
73263+ TP_fast_assign(
73264+ __assign_str(filename, filename);
73265+ ),
73266+
73267+ TP_printk("\"%s\"",
73268+ __get_str(filename))
73269+);
73270+
73271+#endif /* _TRACE_FS_H */
73272+
73273+/* This part must be outside protection */
73274+#include <trace/define_trace.h>
73275diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
73276index 1c09820..7f5ec79 100644
73277--- a/include/trace/events/irq.h
73278+++ b/include/trace/events/irq.h
73279@@ -36,7 +36,7 @@ struct softirq_action;
73280 */
73281 TRACE_EVENT(irq_handler_entry,
73282
73283- TP_PROTO(int irq, struct irqaction *action),
73284+ TP_PROTO(int irq, const struct irqaction *action),
73285
73286 TP_ARGS(irq, action),
73287
73288@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
73289 */
73290 TRACE_EVENT(irq_handler_exit,
73291
73292- TP_PROTO(int irq, struct irqaction *action, int ret),
73293+ TP_PROTO(int irq, const struct irqaction *action, int ret),
73294
73295 TP_ARGS(irq, action, ret),
73296
73297diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
73298index 7caf44c..23c6f27 100644
73299--- a/include/uapi/linux/a.out.h
73300+++ b/include/uapi/linux/a.out.h
73301@@ -39,6 +39,14 @@ enum machine_type {
73302 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
73303 };
73304
73305+/* Constants for the N_FLAGS field */
73306+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
73307+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
73308+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
73309+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
73310+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
73311+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
73312+
73313 #if !defined (N_MAGIC)
73314 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
73315 #endif
73316diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
73317index d876736..ccce5c0 100644
73318--- a/include/uapi/linux/byteorder/little_endian.h
73319+++ b/include/uapi/linux/byteorder/little_endian.h
73320@@ -42,51 +42,51 @@
73321
73322 static inline __le64 __cpu_to_le64p(const __u64 *p)
73323 {
73324- return (__force __le64)*p;
73325+ return (__force const __le64)*p;
73326 }
73327-static inline __u64 __le64_to_cpup(const __le64 *p)
73328+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
73329 {
73330- return (__force __u64)*p;
73331+ return (__force const __u64)*p;
73332 }
73333 static inline __le32 __cpu_to_le32p(const __u32 *p)
73334 {
73335- return (__force __le32)*p;
73336+ return (__force const __le32)*p;
73337 }
73338 static inline __u32 __le32_to_cpup(const __le32 *p)
73339 {
73340- return (__force __u32)*p;
73341+ return (__force const __u32)*p;
73342 }
73343 static inline __le16 __cpu_to_le16p(const __u16 *p)
73344 {
73345- return (__force __le16)*p;
73346+ return (__force const __le16)*p;
73347 }
73348 static inline __u16 __le16_to_cpup(const __le16 *p)
73349 {
73350- return (__force __u16)*p;
73351+ return (__force const __u16)*p;
73352 }
73353 static inline __be64 __cpu_to_be64p(const __u64 *p)
73354 {
73355- return (__force __be64)__swab64p(p);
73356+ return (__force const __be64)__swab64p(p);
73357 }
73358 static inline __u64 __be64_to_cpup(const __be64 *p)
73359 {
73360- return __swab64p((__u64 *)p);
73361+ return __swab64p((const __u64 *)p);
73362 }
73363 static inline __be32 __cpu_to_be32p(const __u32 *p)
73364 {
73365- return (__force __be32)__swab32p(p);
73366+ return (__force const __be32)__swab32p(p);
73367 }
73368-static inline __u32 __be32_to_cpup(const __be32 *p)
73369+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
73370 {
73371- return __swab32p((__u32 *)p);
73372+ return __swab32p((const __u32 *)p);
73373 }
73374 static inline __be16 __cpu_to_be16p(const __u16 *p)
73375 {
73376- return (__force __be16)__swab16p(p);
73377+ return (__force const __be16)__swab16p(p);
73378 }
73379 static inline __u16 __be16_to_cpup(const __be16 *p)
73380 {
73381- return __swab16p((__u16 *)p);
73382+ return __swab16p((const __u16 *)p);
73383 }
73384 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
73385 #define __le64_to_cpus(x) do { (void)(x); } while (0)
73386diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
73387index 126a817..d522bd1 100644
73388--- a/include/uapi/linux/elf.h
73389+++ b/include/uapi/linux/elf.h
73390@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
73391 #define PT_GNU_EH_FRAME 0x6474e550
73392
73393 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
73394+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
73395+
73396+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
73397+
73398+/* Constants for the e_flags field */
73399+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
73400+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
73401+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
73402+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
73403+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
73404+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
73405
73406 /*
73407 * Extended Numbering
73408@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
73409 #define DT_DEBUG 21
73410 #define DT_TEXTREL 22
73411 #define DT_JMPREL 23
73412+#define DT_FLAGS 30
73413+ #define DF_TEXTREL 0x00000004
73414 #define DT_ENCODING 32
73415 #define OLD_DT_LOOS 0x60000000
73416 #define DT_LOOS 0x6000000d
73417@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
73418 #define PF_W 0x2
73419 #define PF_X 0x1
73420
73421+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
73422+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
73423+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
73424+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
73425+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
73426+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
73427+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
73428+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
73429+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
73430+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
73431+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
73432+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
73433+
73434 typedef struct elf32_phdr{
73435 Elf32_Word p_type;
73436 Elf32_Off p_offset;
73437@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
73438 #define EI_OSABI 7
73439 #define EI_PAD 8
73440
73441+#define EI_PAX 14
73442+
73443 #define ELFMAG0 0x7f /* EI_MAG */
73444 #define ELFMAG1 'E'
73445 #define ELFMAG2 'L'
73446diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
73447index aa169c4..6a2771d 100644
73448--- a/include/uapi/linux/personality.h
73449+++ b/include/uapi/linux/personality.h
73450@@ -30,6 +30,7 @@ enum {
73451 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
73452 ADDR_NO_RANDOMIZE | \
73453 ADDR_COMPAT_LAYOUT | \
73454+ ADDR_LIMIT_3GB | \
73455 MMAP_PAGE_ZERO)
73456
73457 /*
73458diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
73459index 7530e74..e714828 100644
73460--- a/include/uapi/linux/screen_info.h
73461+++ b/include/uapi/linux/screen_info.h
73462@@ -43,7 +43,8 @@ struct screen_info {
73463 __u16 pages; /* 0x32 */
73464 __u16 vesa_attributes; /* 0x34 */
73465 __u32 capabilities; /* 0x36 */
73466- __u8 _reserved[6]; /* 0x3a */
73467+ __u16 vesapm_size; /* 0x3a */
73468+ __u8 _reserved[4]; /* 0x3c */
73469 } __attribute__((packed));
73470
73471 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
73472diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
73473index 0e011eb..82681b1 100644
73474--- a/include/uapi/linux/swab.h
73475+++ b/include/uapi/linux/swab.h
73476@@ -43,7 +43,7 @@
73477 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
73478 */
73479
73480-static inline __attribute_const__ __u16 __fswab16(__u16 val)
73481+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
73482 {
73483 #ifdef __HAVE_BUILTIN_BSWAP16__
73484 return __builtin_bswap16(val);
73485@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
73486 #endif
73487 }
73488
73489-static inline __attribute_const__ __u32 __fswab32(__u32 val)
73490+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
73491 {
73492 #ifdef __HAVE_BUILTIN_BSWAP32__
73493 return __builtin_bswap32(val);
73494@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
73495 #endif
73496 }
73497
73498-static inline __attribute_const__ __u64 __fswab64(__u64 val)
73499+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
73500 {
73501 #ifdef __HAVE_BUILTIN_BSWAP64__
73502 return __builtin_bswap64(val);
73503diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
73504index 6d67213..8dab561 100644
73505--- a/include/uapi/linux/sysctl.h
73506+++ b/include/uapi/linux/sysctl.h
73507@@ -155,7 +155,11 @@ enum
73508 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
73509 };
73510
73511-
73512+#ifdef CONFIG_PAX_SOFTMODE
73513+enum {
73514+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
73515+};
73516+#endif
73517
73518 /* CTL_VM names: */
73519 enum
73520diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
73521index 26607bd..588b65f 100644
73522--- a/include/uapi/linux/xattr.h
73523+++ b/include/uapi/linux/xattr.h
73524@@ -60,5 +60,9 @@
73525 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
73526 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
73527
73528+/* User namespace */
73529+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
73530+#define XATTR_PAX_FLAGS_SUFFIX "flags"
73531+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
73532
73533 #endif /* _UAPI_LINUX_XATTR_H */
73534diff --git a/include/video/udlfb.h b/include/video/udlfb.h
73535index f9466fa..f4e2b81 100644
73536--- a/include/video/udlfb.h
73537+++ b/include/video/udlfb.h
73538@@ -53,10 +53,10 @@ struct dlfb_data {
73539 u32 pseudo_palette[256];
73540 int blank_mode; /*one of FB_BLANK_ */
73541 /* blit-only rendering path metrics, exposed through sysfs */
73542- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
73543- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
73544- atomic_t bytes_sent; /* to usb, after compression including overhead */
73545- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
73546+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
73547+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
73548+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
73549+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
73550 };
73551
73552 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
73553diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
73554index 0993a22..32ba2fe 100644
73555--- a/include/video/uvesafb.h
73556+++ b/include/video/uvesafb.h
73557@@ -177,6 +177,7 @@ struct uvesafb_par {
73558 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
73559 u8 pmi_setpal; /* PMI for palette changes */
73560 u16 *pmi_base; /* protected mode interface location */
73561+ u8 *pmi_code; /* protected mode code location */
73562 void *pmi_start;
73563 void *pmi_pal;
73564 u8 *vbe_state_orig; /*
73565diff --git a/init/Kconfig b/init/Kconfig
73566index be8b7f5..1eeca9b 100644
73567--- a/init/Kconfig
73568+++ b/init/Kconfig
73569@@ -990,6 +990,7 @@ endif # CGROUPS
73570
73571 config CHECKPOINT_RESTORE
73572 bool "Checkpoint/restore support" if EXPERT
73573+ depends on !GRKERNSEC
73574 default n
73575 help
73576 Enables additional kernel features in a sake of checkpoint/restore.
73577@@ -1468,7 +1469,7 @@ config SLUB_DEBUG
73578
73579 config COMPAT_BRK
73580 bool "Disable heap randomization"
73581- default y
73582+ default n
73583 help
73584 Randomizing heap placement makes heap exploits harder, but it
73585 also breaks ancient binaries (including anything libc5 based).
73586@@ -1711,7 +1712,7 @@ config INIT_ALL_POSSIBLE
73587 config STOP_MACHINE
73588 bool
73589 default y
73590- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
73591+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
73592 help
73593 Need stop_machine() primitive.
73594
73595diff --git a/init/Makefile b/init/Makefile
73596index 7bc47ee..6da2dc7 100644
73597--- a/init/Makefile
73598+++ b/init/Makefile
73599@@ -2,6 +2,9 @@
73600 # Makefile for the linux kernel.
73601 #
73602
73603+ccflags-y := $(GCC_PLUGINS_CFLAGS)
73604+asflags-y := $(GCC_PLUGINS_AFLAGS)
73605+
73606 obj-y := main.o version.o mounts.o
73607 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
73608 obj-y += noinitramfs.o
73609diff --git a/init/do_mounts.c b/init/do_mounts.c
73610index 1d1b634..a1c810f 100644
73611--- a/init/do_mounts.c
73612+++ b/init/do_mounts.c
73613@@ -355,11 +355,11 @@ static void __init get_fs_names(char *page)
73614 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
73615 {
73616 struct super_block *s;
73617- int err = sys_mount(name, "/root", fs, flags, data);
73618+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
73619 if (err)
73620 return err;
73621
73622- sys_chdir("/root");
73623+ sys_chdir((const char __force_user *)"/root");
73624 s = current->fs->pwd.dentry->d_sb;
73625 ROOT_DEV = s->s_dev;
73626 printk(KERN_INFO
73627@@ -480,18 +480,18 @@ void __init change_floppy(char *fmt, ...)
73628 va_start(args, fmt);
73629 vsprintf(buf, fmt, args);
73630 va_end(args);
73631- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
73632+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
73633 if (fd >= 0) {
73634 sys_ioctl(fd, FDEJECT, 0);
73635 sys_close(fd);
73636 }
73637 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
73638- fd = sys_open("/dev/console", O_RDWR, 0);
73639+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
73640 if (fd >= 0) {
73641 sys_ioctl(fd, TCGETS, (long)&termios);
73642 termios.c_lflag &= ~ICANON;
73643 sys_ioctl(fd, TCSETSF, (long)&termios);
73644- sys_read(fd, &c, 1);
73645+ sys_read(fd, (char __user *)&c, 1);
73646 termios.c_lflag |= ICANON;
73647 sys_ioctl(fd, TCSETSF, (long)&termios);
73648 sys_close(fd);
73649@@ -585,6 +585,6 @@ void __init prepare_namespace(void)
73650 mount_root();
73651 out:
73652 devtmpfs_mount("dev");
73653- sys_mount(".", "/", NULL, MS_MOVE, NULL);
73654- sys_chroot(".");
73655+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
73656+ sys_chroot((const char __force_user *)".");
73657 }
73658diff --git a/init/do_mounts.h b/init/do_mounts.h
73659index f5b978a..69dbfe8 100644
73660--- a/init/do_mounts.h
73661+++ b/init/do_mounts.h
73662@@ -15,15 +15,15 @@ extern int root_mountflags;
73663
73664 static inline int create_dev(char *name, dev_t dev)
73665 {
73666- sys_unlink(name);
73667- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
73668+ sys_unlink((char __force_user *)name);
73669+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
73670 }
73671
73672 #if BITS_PER_LONG == 32
73673 static inline u32 bstat(char *name)
73674 {
73675 struct stat64 stat;
73676- if (sys_stat64(name, &stat) != 0)
73677+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
73678 return 0;
73679 if (!S_ISBLK(stat.st_mode))
73680 return 0;
73681@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
73682 static inline u32 bstat(char *name)
73683 {
73684 struct stat stat;
73685- if (sys_newstat(name, &stat) != 0)
73686+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
73687 return 0;
73688 if (!S_ISBLK(stat.st_mode))
73689 return 0;
73690diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
73691index f9acf71..1e19144 100644
73692--- a/init/do_mounts_initrd.c
73693+++ b/init/do_mounts_initrd.c
73694@@ -58,8 +58,8 @@ static void __init handle_initrd(void)
73695 create_dev("/dev/root.old", Root_RAM0);
73696 /* mount initrd on rootfs' /root */
73697 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
73698- sys_mkdir("/old", 0700);
73699- sys_chdir("/old");
73700+ sys_mkdir((const char __force_user *)"/old", 0700);
73701+ sys_chdir((const char __force_user *)"/old");
73702
73703 /*
73704 * In case that a resume from disk is carried out by linuxrc or one of
73705@@ -73,31 +73,31 @@ static void __init handle_initrd(void)
73706 current->flags &= ~PF_FREEZER_SKIP;
73707
73708 /* move initrd to rootfs' /old */
73709- sys_mount("..", ".", NULL, MS_MOVE, NULL);
73710+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
73711 /* switch root and cwd back to / of rootfs */
73712- sys_chroot("..");
73713+ sys_chroot((const char __force_user *)"..");
73714
73715 if (new_decode_dev(real_root_dev) == Root_RAM0) {
73716- sys_chdir("/old");
73717+ sys_chdir((const char __force_user *)"/old");
73718 return;
73719 }
73720
73721- sys_chdir("/");
73722+ sys_chdir((const char __force_user *)"/");
73723 ROOT_DEV = new_decode_dev(real_root_dev);
73724 mount_root();
73725
73726 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
73727- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
73728+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
73729 if (!error)
73730 printk("okay\n");
73731 else {
73732- int fd = sys_open("/dev/root.old", O_RDWR, 0);
73733+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
73734 if (error == -ENOENT)
73735 printk("/initrd does not exist. Ignored.\n");
73736 else
73737 printk("failed\n");
73738 printk(KERN_NOTICE "Unmounting old root\n");
73739- sys_umount("/old", MNT_DETACH);
73740+ sys_umount((char __force_user *)"/old", MNT_DETACH);
73741 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
73742 if (fd < 0) {
73743 error = fd;
73744@@ -120,11 +120,11 @@ int __init initrd_load(void)
73745 * mounted in the normal path.
73746 */
73747 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
73748- sys_unlink("/initrd.image");
73749+ sys_unlink((const char __force_user *)"/initrd.image");
73750 handle_initrd();
73751 return 1;
73752 }
73753 }
73754- sys_unlink("/initrd.image");
73755+ sys_unlink((const char __force_user *)"/initrd.image");
73756 return 0;
73757 }
73758diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
73759index 8cb6db5..d729f50 100644
73760--- a/init/do_mounts_md.c
73761+++ b/init/do_mounts_md.c
73762@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
73763 partitioned ? "_d" : "", minor,
73764 md_setup_args[ent].device_names);
73765
73766- fd = sys_open(name, 0, 0);
73767+ fd = sys_open((char __force_user *)name, 0, 0);
73768 if (fd < 0) {
73769 printk(KERN_ERR "md: open failed - cannot start "
73770 "array %s\n", name);
73771@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
73772 * array without it
73773 */
73774 sys_close(fd);
73775- fd = sys_open(name, 0, 0);
73776+ fd = sys_open((char __force_user *)name, 0, 0);
73777 sys_ioctl(fd, BLKRRPART, 0);
73778 }
73779 sys_close(fd);
73780@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
73781
73782 wait_for_device_probe();
73783
73784- fd = sys_open("/dev/md0", 0, 0);
73785+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
73786 if (fd >= 0) {
73787 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
73788 sys_close(fd);
73789diff --git a/init/init_task.c b/init/init_task.c
73790index 8b2f399..f0797c9 100644
73791--- a/init/init_task.c
73792+++ b/init/init_task.c
73793@@ -20,5 +20,9 @@ EXPORT_SYMBOL(init_task);
73794 * Initial thread structure. Alignment of this is handled by a special
73795 * linker map entry.
73796 */
73797+#ifdef CONFIG_X86
73798+union thread_union init_thread_union __init_task_data;
73799+#else
73800 union thread_union init_thread_union __init_task_data =
73801 { INIT_THREAD_INFO(init_task) };
73802+#endif
73803diff --git a/init/initramfs.c b/init/initramfs.c
73804index 84c6bf1..8899338 100644
73805--- a/init/initramfs.c
73806+++ b/init/initramfs.c
73807@@ -84,7 +84,7 @@ static void __init free_hash(void)
73808 }
73809 }
73810
73811-static long __init do_utime(char *filename, time_t mtime)
73812+static long __init do_utime(char __force_user *filename, time_t mtime)
73813 {
73814 struct timespec t[2];
73815
73816@@ -119,7 +119,7 @@ static void __init dir_utime(void)
73817 struct dir_entry *de, *tmp;
73818 list_for_each_entry_safe(de, tmp, &dir_list, list) {
73819 list_del(&de->list);
73820- do_utime(de->name, de->mtime);
73821+ do_utime((char __force_user *)de->name, de->mtime);
73822 kfree(de->name);
73823 kfree(de);
73824 }
73825@@ -281,7 +281,7 @@ static int __init maybe_link(void)
73826 if (nlink >= 2) {
73827 char *old = find_link(major, minor, ino, mode, collected);
73828 if (old)
73829- return (sys_link(old, collected) < 0) ? -1 : 1;
73830+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
73831 }
73832 return 0;
73833 }
73834@@ -290,11 +290,11 @@ static void __init clean_path(char *path, umode_t mode)
73835 {
73836 struct stat st;
73837
73838- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
73839+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
73840 if (S_ISDIR(st.st_mode))
73841- sys_rmdir(path);
73842+ sys_rmdir((char __force_user *)path);
73843 else
73844- sys_unlink(path);
73845+ sys_unlink((char __force_user *)path);
73846 }
73847 }
73848
73849@@ -315,7 +315,7 @@ static int __init do_name(void)
73850 int openflags = O_WRONLY|O_CREAT;
73851 if (ml != 1)
73852 openflags |= O_TRUNC;
73853- wfd = sys_open(collected, openflags, mode);
73854+ wfd = sys_open((char __force_user *)collected, openflags, mode);
73855
73856 if (wfd >= 0) {
73857 sys_fchown(wfd, uid, gid);
73858@@ -327,17 +327,17 @@ static int __init do_name(void)
73859 }
73860 }
73861 } else if (S_ISDIR(mode)) {
73862- sys_mkdir(collected, mode);
73863- sys_chown(collected, uid, gid);
73864- sys_chmod(collected, mode);
73865+ sys_mkdir((char __force_user *)collected, mode);
73866+ sys_chown((char __force_user *)collected, uid, gid);
73867+ sys_chmod((char __force_user *)collected, mode);
73868 dir_add(collected, mtime);
73869 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
73870 S_ISFIFO(mode) || S_ISSOCK(mode)) {
73871 if (maybe_link() == 0) {
73872- sys_mknod(collected, mode, rdev);
73873- sys_chown(collected, uid, gid);
73874- sys_chmod(collected, mode);
73875- do_utime(collected, mtime);
73876+ sys_mknod((char __force_user *)collected, mode, rdev);
73877+ sys_chown((char __force_user *)collected, uid, gid);
73878+ sys_chmod((char __force_user *)collected, mode);
73879+ do_utime((char __force_user *)collected, mtime);
73880 }
73881 }
73882 return 0;
73883@@ -346,15 +346,15 @@ static int __init do_name(void)
73884 static int __init do_copy(void)
73885 {
73886 if (count >= body_len) {
73887- sys_write(wfd, victim, body_len);
73888+ sys_write(wfd, (char __force_user *)victim, body_len);
73889 sys_close(wfd);
73890- do_utime(vcollected, mtime);
73891+ do_utime((char __force_user *)vcollected, mtime);
73892 kfree(vcollected);
73893 eat(body_len);
73894 state = SkipIt;
73895 return 0;
73896 } else {
73897- sys_write(wfd, victim, count);
73898+ sys_write(wfd, (char __force_user *)victim, count);
73899 body_len -= count;
73900 eat(count);
73901 return 1;
73902@@ -365,9 +365,9 @@ static int __init do_symlink(void)
73903 {
73904 collected[N_ALIGN(name_len) + body_len] = '\0';
73905 clean_path(collected, 0);
73906- sys_symlink(collected + N_ALIGN(name_len), collected);
73907- sys_lchown(collected, uid, gid);
73908- do_utime(collected, mtime);
73909+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
73910+ sys_lchown((char __force_user *)collected, uid, gid);
73911+ do_utime((char __force_user *)collected, mtime);
73912 state = SkipIt;
73913 next_state = Reset;
73914 return 0;
73915diff --git a/init/main.c b/init/main.c
73916index cee4b5c..360e10a 100644
73917--- a/init/main.c
73918+++ b/init/main.c
73919@@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
73920 extern void tc_init(void);
73921 #endif
73922
73923+extern void grsecurity_init(void);
73924+
73925 /*
73926 * Debug helper: via this flag we know that we are in 'early bootup code'
73927 * where only the boot processor is running with IRQ disabled. This means
73928@@ -149,6 +151,61 @@ static int __init set_reset_devices(char *str)
73929
73930 __setup("reset_devices", set_reset_devices);
73931
73932+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
73933+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
73934+static int __init setup_grsec_proc_gid(char *str)
73935+{
73936+ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
73937+ return 1;
73938+}
73939+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
73940+#endif
73941+
73942+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
73943+extern char pax_enter_kernel_user[];
73944+extern char pax_exit_kernel_user[];
73945+extern pgdval_t clone_pgd_mask;
73946+#endif
73947+
73948+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
73949+static int __init setup_pax_nouderef(char *str)
73950+{
73951+#ifdef CONFIG_X86_32
73952+ unsigned int cpu;
73953+ struct desc_struct *gdt;
73954+
73955+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
73956+ gdt = get_cpu_gdt_table(cpu);
73957+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
73958+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
73959+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
73960+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
73961+ }
73962+ loadsegment(ds, __KERNEL_DS);
73963+ loadsegment(es, __KERNEL_DS);
73964+ loadsegment(ss, __KERNEL_DS);
73965+#else
73966+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
73967+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
73968+ clone_pgd_mask = ~(pgdval_t)0UL;
73969+#endif
73970+
73971+ return 0;
73972+}
73973+early_param("pax_nouderef", setup_pax_nouderef);
73974+#endif
73975+
73976+#ifdef CONFIG_PAX_SOFTMODE
73977+int pax_softmode;
73978+
73979+static int __init setup_pax_softmode(char *str)
73980+{
73981+ get_option(&str, &pax_softmode);
73982+ return 1;
73983+}
73984+__setup("pax_softmode=", setup_pax_softmode);
73985+#endif
73986+
73987 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
73988 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
73989 static const char *panic_later, *panic_param;
73990@@ -681,6 +738,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
73991 {
73992 int count = preempt_count();
73993 int ret;
73994+ const char *msg1 = "", *msg2 = "";
73995
73996 if (initcall_debug)
73997 ret = do_one_initcall_debug(fn);
73998@@ -693,15 +751,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
73999 sprintf(msgbuf, "error code %d ", ret);
74000
74001 if (preempt_count() != count) {
74002- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
74003+ msg1 = " preemption imbalance";
74004 preempt_count() = count;
74005 }
74006 if (irqs_disabled()) {
74007- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
74008+ msg2 = " disabled interrupts";
74009 local_irq_enable();
74010 }
74011- if (msgbuf[0]) {
74012- printk("initcall %pF returned with %s\n", fn, msgbuf);
74013+ if (msgbuf[0] || *msg1 || *msg2) {
74014+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
74015 }
74016
74017 return ret;
74018@@ -755,8 +813,14 @@ static void __init do_initcall_level(int level)
74019 level, level,
74020 &repair_env_string);
74021
74022- for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++)
74023+ for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++) {
74024 do_one_initcall(*fn);
74025+
74026+#ifdef LATENT_ENTROPY_PLUGIN
74027+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
74028+#endif
74029+
74030+ }
74031 }
74032
74033 static void __init do_initcalls(void)
74034@@ -790,8 +854,14 @@ static void __init do_pre_smp_initcalls(void)
74035 {
74036 initcall_t *fn;
74037
74038- for (fn = __initcall_start; fn < __initcall0_start; fn++)
74039+ for (fn = __initcall_start; fn < __initcall0_start; fn++) {
74040 do_one_initcall(*fn);
74041+
74042+#ifdef LATENT_ENTROPY_PLUGIN
74043+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
74044+#endif
74045+
74046+ }
74047 }
74048
74049 static int run_init_process(const char *init_filename)
74050@@ -877,7 +947,7 @@ static noinline void __init kernel_init_freeable(void)
74051 do_basic_setup();
74052
74053 /* Open the /dev/console on the rootfs, this should never fail */
74054- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
74055+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
74056 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
74057
74058 (void) sys_dup(0);
74059@@ -890,11 +960,13 @@ static noinline void __init kernel_init_freeable(void)
74060 if (!ramdisk_execute_command)
74061 ramdisk_execute_command = "/init";
74062
74063- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
74064+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
74065 ramdisk_execute_command = NULL;
74066 prepare_namespace();
74067 }
74068
74069+ grsecurity_init();
74070+
74071 /*
74072 * Ok, we have completed the initial bootup, and
74073 * we're essentially up and running. Get rid of the
74074diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
74075index 130dfec..cc88451 100644
74076--- a/ipc/ipc_sysctl.c
74077+++ b/ipc/ipc_sysctl.c
74078@@ -30,7 +30,7 @@ static void *get_ipc(ctl_table *table)
74079 static int proc_ipc_dointvec(ctl_table *table, int write,
74080 void __user *buffer, size_t *lenp, loff_t *ppos)
74081 {
74082- struct ctl_table ipc_table;
74083+ ctl_table_no_const ipc_table;
74084
74085 memcpy(&ipc_table, table, sizeof(ipc_table));
74086 ipc_table.data = get_ipc(table);
74087@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(ctl_table *table, int write,
74088 static int proc_ipc_dointvec_minmax(ctl_table *table, int write,
74089 void __user *buffer, size_t *lenp, loff_t *ppos)
74090 {
74091- struct ctl_table ipc_table;
74092+ ctl_table_no_const ipc_table;
74093
74094 memcpy(&ipc_table, table, sizeof(ipc_table));
74095 ipc_table.data = get_ipc(table);
74096@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(ctl_table *table, int write,
74097 static int proc_ipc_callback_dointvec(ctl_table *table, int write,
74098 void __user *buffer, size_t *lenp, loff_t *ppos)
74099 {
74100- struct ctl_table ipc_table;
74101+ ctl_table_no_const ipc_table;
74102 size_t lenp_bef = *lenp;
74103 int rc;
74104
74105@@ -88,7 +88,7 @@ static int proc_ipc_callback_dointvec(ctl_table *table, int write,
74106 static int proc_ipc_doulongvec_minmax(ctl_table *table, int write,
74107 void __user *buffer, size_t *lenp, loff_t *ppos)
74108 {
74109- struct ctl_table ipc_table;
74110+ ctl_table_no_const ipc_table;
74111 memcpy(&ipc_table, table, sizeof(ipc_table));
74112 ipc_table.data = get_ipc(table);
74113
74114@@ -122,7 +122,7 @@ static void ipc_auto_callback(int val)
74115 static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write,
74116 void __user *buffer, size_t *lenp, loff_t *ppos)
74117 {
74118- struct ctl_table ipc_table;
74119+ ctl_table_no_const ipc_table;
74120 size_t lenp_bef = *lenp;
74121 int oldval;
74122 int rc;
74123diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
74124index 383d638..943fdbb 100644
74125--- a/ipc/mq_sysctl.c
74126+++ b/ipc/mq_sysctl.c
74127@@ -25,7 +25,7 @@ static void *get_mq(ctl_table *table)
74128 static int proc_mq_dointvec_minmax(ctl_table *table, int write,
74129 void __user *buffer, size_t *lenp, loff_t *ppos)
74130 {
74131- struct ctl_table mq_table;
74132+ ctl_table_no_const mq_table;
74133 memcpy(&mq_table, table, sizeof(mq_table));
74134 mq_table.data = get_mq(table);
74135
74136diff --git a/ipc/mqueue.c b/ipc/mqueue.c
74137index f3f40dc..ffe5a3a 100644
74138--- a/ipc/mqueue.c
74139+++ b/ipc/mqueue.c
74140@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
74141 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
74142 info->attr.mq_msgsize);
74143
74144+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
74145 spin_lock(&mq_lock);
74146 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
74147 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
74148diff --git a/ipc/msg.c b/ipc/msg.c
74149index 31cd1bf..9778e0f8 100644
74150--- a/ipc/msg.c
74151+++ b/ipc/msg.c
74152@@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
74153 return security_msg_queue_associate(msq, msgflg);
74154 }
74155
74156+static struct ipc_ops msg_ops = {
74157+ .getnew = newque,
74158+ .associate = msg_security,
74159+ .more_checks = NULL
74160+};
74161+
74162 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
74163 {
74164 struct ipc_namespace *ns;
74165- struct ipc_ops msg_ops;
74166 struct ipc_params msg_params;
74167
74168 ns = current->nsproxy->ipc_ns;
74169
74170- msg_ops.getnew = newque;
74171- msg_ops.associate = msg_security;
74172- msg_ops.more_checks = NULL;
74173-
74174 msg_params.key = key;
74175 msg_params.flg = msgflg;
74176
74177@@ -872,6 +873,7 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp,
74178 goto out_unlock;
74179 break;
74180 }
74181+ msg = ERR_PTR(-EAGAIN);
74182 } else
74183 break;
74184 msg_counter++;
74185diff --git a/ipc/sem.c b/ipc/sem.c
74186index 58d31f1..cce7a55 100644
74187--- a/ipc/sem.c
74188+++ b/ipc/sem.c
74189@@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
74190 return 0;
74191 }
74192
74193+static struct ipc_ops sem_ops = {
74194+ .getnew = newary,
74195+ .associate = sem_security,
74196+ .more_checks = sem_more_checks
74197+};
74198+
74199 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
74200 {
74201 struct ipc_namespace *ns;
74202- struct ipc_ops sem_ops;
74203 struct ipc_params sem_params;
74204
74205 ns = current->nsproxy->ipc_ns;
74206@@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
74207 if (nsems < 0 || nsems > ns->sc_semmsl)
74208 return -EINVAL;
74209
74210- sem_ops.getnew = newary;
74211- sem_ops.associate = sem_security;
74212- sem_ops.more_checks = sem_more_checks;
74213-
74214 sem_params.key = key;
74215 sem_params.flg = semflg;
74216 sem_params.u.nsems = nsems;
74217diff --git a/ipc/shm.c b/ipc/shm.c
74218index 4fa6d8f..55cff14 100644
74219--- a/ipc/shm.c
74220+++ b/ipc/shm.c
74221@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
74222 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
74223 #endif
74224
74225+#ifdef CONFIG_GRKERNSEC
74226+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
74227+ const time_t shm_createtime, const kuid_t cuid,
74228+ const int shmid);
74229+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
74230+ const time_t shm_createtime);
74231+#endif
74232+
74233 void shm_init_ns(struct ipc_namespace *ns)
74234 {
74235 ns->shm_ctlmax = SHMMAX;
74236@@ -521,6 +529,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
74237 shp->shm_lprid = 0;
74238 shp->shm_atim = shp->shm_dtim = 0;
74239 shp->shm_ctim = get_seconds();
74240+#ifdef CONFIG_GRKERNSEC
74241+ {
74242+ struct timespec timeval;
74243+ do_posix_clock_monotonic_gettime(&timeval);
74244+
74245+ shp->shm_createtime = timeval.tv_sec;
74246+ }
74247+#endif
74248 shp->shm_segsz = size;
74249 shp->shm_nattch = 0;
74250 shp->shm_file = file;
74251@@ -572,18 +588,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
74252 return 0;
74253 }
74254
74255+static struct ipc_ops shm_ops = {
74256+ .getnew = newseg,
74257+ .associate = shm_security,
74258+ .more_checks = shm_more_checks
74259+};
74260+
74261 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
74262 {
74263 struct ipc_namespace *ns;
74264- struct ipc_ops shm_ops;
74265 struct ipc_params shm_params;
74266
74267 ns = current->nsproxy->ipc_ns;
74268
74269- shm_ops.getnew = newseg;
74270- shm_ops.associate = shm_security;
74271- shm_ops.more_checks = shm_more_checks;
74272-
74273 shm_params.key = key;
74274 shm_params.flg = shmflg;
74275 shm_params.u.size = size;
74276@@ -1004,6 +1021,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
74277 f_mode = FMODE_READ | FMODE_WRITE;
74278 }
74279 if (shmflg & SHM_EXEC) {
74280+
74281+#ifdef CONFIG_PAX_MPROTECT
74282+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
74283+ goto out;
74284+#endif
74285+
74286 prot |= PROT_EXEC;
74287 acc_mode |= S_IXUGO;
74288 }
74289@@ -1027,9 +1050,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
74290 if (err)
74291 goto out_unlock;
74292
74293+#ifdef CONFIG_GRKERNSEC
74294+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
74295+ shp->shm_perm.cuid, shmid) ||
74296+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
74297+ err = -EACCES;
74298+ goto out_unlock;
74299+ }
74300+#endif
74301+
74302 path = shp->shm_file->f_path;
74303 path_get(&path);
74304 shp->shm_nattch++;
74305+#ifdef CONFIG_GRKERNSEC
74306+ shp->shm_lapid = current->pid;
74307+#endif
74308 size = i_size_read(path.dentry->d_inode);
74309 shm_unlock(shp);
74310
74311diff --git a/kernel/acct.c b/kernel/acct.c
74312index 051e071..15e0920 100644
74313--- a/kernel/acct.c
74314+++ b/kernel/acct.c
74315@@ -550,7 +550,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
74316 */
74317 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
74318 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
74319- file->f_op->write(file, (char *)&ac,
74320+ file->f_op->write(file, (char __force_user *)&ac,
74321 sizeof(acct_t), &file->f_pos);
74322 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
74323 set_fs(fs);
74324diff --git a/kernel/audit.c b/kernel/audit.c
74325index d596e53..dbef3c3 100644
74326--- a/kernel/audit.c
74327+++ b/kernel/audit.c
74328@@ -116,7 +116,7 @@ u32 audit_sig_sid = 0;
74329 3) suppressed due to audit_rate_limit
74330 4) suppressed due to audit_backlog_limit
74331 */
74332-static atomic_t audit_lost = ATOMIC_INIT(0);
74333+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
74334
74335 /* The netlink socket. */
74336 static struct sock *audit_sock;
74337@@ -238,7 +238,7 @@ void audit_log_lost(const char *message)
74338 unsigned long now;
74339 int print;
74340
74341- atomic_inc(&audit_lost);
74342+ atomic_inc_unchecked(&audit_lost);
74343
74344 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
74345
74346@@ -257,7 +257,7 @@ void audit_log_lost(const char *message)
74347 printk(KERN_WARNING
74348 "audit: audit_lost=%d audit_rate_limit=%d "
74349 "audit_backlog_limit=%d\n",
74350- atomic_read(&audit_lost),
74351+ atomic_read_unchecked(&audit_lost),
74352 audit_rate_limit,
74353 audit_backlog_limit);
74354 audit_panic(message);
74355@@ -681,7 +681,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
74356 status_set.pid = audit_pid;
74357 status_set.rate_limit = audit_rate_limit;
74358 status_set.backlog_limit = audit_backlog_limit;
74359- status_set.lost = atomic_read(&audit_lost);
74360+ status_set.lost = atomic_read_unchecked(&audit_lost);
74361 status_set.backlog = skb_queue_len(&audit_skb_queue);
74362 audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0,
74363 &status_set, sizeof(status_set));
74364diff --git a/kernel/auditsc.c b/kernel/auditsc.c
74365index a371f85..da826c1 100644
74366--- a/kernel/auditsc.c
74367+++ b/kernel/auditsc.c
74368@@ -2292,7 +2292,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
74369 }
74370
74371 /* global counter which is incremented every time something logs in */
74372-static atomic_t session_id = ATOMIC_INIT(0);
74373+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
74374
74375 /**
74376 * audit_set_loginuid - set current task's audit_context loginuid
74377@@ -2316,7 +2316,7 @@ int audit_set_loginuid(kuid_t loginuid)
74378 return -EPERM;
74379 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
74380
74381- sessionid = atomic_inc_return(&session_id);
74382+ sessionid = atomic_inc_return_unchecked(&session_id);
74383 if (context && context->in_syscall) {
74384 struct audit_buffer *ab;
74385
74386diff --git a/kernel/capability.c b/kernel/capability.c
74387index 493d972..f87dfbd 100644
74388--- a/kernel/capability.c
74389+++ b/kernel/capability.c
74390@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
74391 * before modification is attempted and the application
74392 * fails.
74393 */
74394+ if (tocopy > ARRAY_SIZE(kdata))
74395+ return -EFAULT;
74396+
74397 if (copy_to_user(dataptr, kdata, tocopy
74398 * sizeof(struct __user_cap_data_struct))) {
74399 return -EFAULT;
74400@@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
74401 int ret;
74402
74403 rcu_read_lock();
74404- ret = security_capable(__task_cred(t), ns, cap);
74405+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
74406+ gr_task_is_capable(t, __task_cred(t), cap);
74407 rcu_read_unlock();
74408
74409- return (ret == 0);
74410+ return ret;
74411 }
74412
74413 /**
74414@@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
74415 int ret;
74416
74417 rcu_read_lock();
74418- ret = security_capable_noaudit(__task_cred(t), ns, cap);
74419+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
74420 rcu_read_unlock();
74421
74422- return (ret == 0);
74423+ return ret;
74424 }
74425
74426 /**
74427@@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
74428 BUG();
74429 }
74430
74431- if (security_capable(current_cred(), ns, cap) == 0) {
74432+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
74433 current->flags |= PF_SUPERPRIV;
74434 return true;
74435 }
74436@@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
74437 }
74438 EXPORT_SYMBOL(ns_capable);
74439
74440+bool ns_capable_nolog(struct user_namespace *ns, int cap)
74441+{
74442+ if (unlikely(!cap_valid(cap))) {
74443+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
74444+ BUG();
74445+ }
74446+
74447+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
74448+ current->flags |= PF_SUPERPRIV;
74449+ return true;
74450+ }
74451+ return false;
74452+}
74453+EXPORT_SYMBOL(ns_capable_nolog);
74454+
74455 /**
74456 * capable - Determine if the current task has a superior capability in effect
74457 * @cap: The capability to be tested for
74458@@ -408,6 +427,12 @@ bool capable(int cap)
74459 }
74460 EXPORT_SYMBOL(capable);
74461
74462+bool capable_nolog(int cap)
74463+{
74464+ return ns_capable_nolog(&init_user_ns, cap);
74465+}
74466+EXPORT_SYMBOL(capable_nolog);
74467+
74468 /**
74469 * nsown_capable - Check superior capability to one's own user_ns
74470 * @cap: The capability in question
74471@@ -440,3 +465,10 @@ bool inode_capable(const struct inode *inode, int cap)
74472
74473 return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
74474 }
74475+
74476+bool inode_capable_nolog(const struct inode *inode, int cap)
74477+{
74478+ struct user_namespace *ns = current_user_ns();
74479+
74480+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
74481+}
74482diff --git a/kernel/cgroup.c b/kernel/cgroup.c
74483index 1e23664..570a83d 100644
74484--- a/kernel/cgroup.c
74485+++ b/kernel/cgroup.c
74486@@ -5543,7 +5543,7 @@ static int cgroup_css_links_read(struct cgroup *cont,
74487 struct css_set *cg = link->cg;
74488 struct task_struct *task;
74489 int count = 0;
74490- seq_printf(seq, "css_set %p\n", cg);
74491+ seq_printf(seq, "css_set %pK\n", cg);
74492 list_for_each_entry(task, &cg->tasks, cg_list) {
74493 if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
74494 seq_puts(seq, " ...\n");
74495diff --git a/kernel/compat.c b/kernel/compat.c
74496index 36700e9..73d770c 100644
74497--- a/kernel/compat.c
74498+++ b/kernel/compat.c
74499@@ -13,6 +13,7 @@
74500
74501 #include <linux/linkage.h>
74502 #include <linux/compat.h>
74503+#include <linux/module.h>
74504 #include <linux/errno.h>
74505 #include <linux/time.h>
74506 #include <linux/signal.h>
74507@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
74508 mm_segment_t oldfs;
74509 long ret;
74510
74511- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
74512+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
74513 oldfs = get_fs();
74514 set_fs(KERNEL_DS);
74515 ret = hrtimer_nanosleep_restart(restart);
74516@@ -252,7 +253,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
74517 oldfs = get_fs();
74518 set_fs(KERNEL_DS);
74519 ret = hrtimer_nanosleep(&tu,
74520- rmtp ? (struct timespec __user *)&rmt : NULL,
74521+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
74522 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
74523 set_fs(oldfs);
74524
74525@@ -361,7 +362,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
74526 mm_segment_t old_fs = get_fs();
74527
74528 set_fs(KERNEL_DS);
74529- ret = sys_sigpending((old_sigset_t __user *) &s);
74530+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
74531 set_fs(old_fs);
74532 if (ret == 0)
74533 ret = put_user(s, set);
74534@@ -451,7 +452,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
74535 mm_segment_t old_fs = get_fs();
74536
74537 set_fs(KERNEL_DS);
74538- ret = sys_old_getrlimit(resource, &r);
74539+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
74540 set_fs(old_fs);
74541
74542 if (!ret) {
74543@@ -523,7 +524,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
74544 mm_segment_t old_fs = get_fs();
74545
74546 set_fs(KERNEL_DS);
74547- ret = sys_getrusage(who, (struct rusage __user *) &r);
74548+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
74549 set_fs(old_fs);
74550
74551 if (ret)
74552@@ -552,8 +553,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
74553 set_fs (KERNEL_DS);
74554 ret = sys_wait4(pid,
74555 (stat_addr ?
74556- (unsigned int __user *) &status : NULL),
74557- options, (struct rusage __user *) &r);
74558+ (unsigned int __force_user *) &status : NULL),
74559+ options, (struct rusage __force_user *) &r);
74560 set_fs (old_fs);
74561
74562 if (ret > 0) {
74563@@ -579,8 +580,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
74564 memset(&info, 0, sizeof(info));
74565
74566 set_fs(KERNEL_DS);
74567- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
74568- uru ? (struct rusage __user *)&ru : NULL);
74569+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
74570+ uru ? (struct rusage __force_user *)&ru : NULL);
74571 set_fs(old_fs);
74572
74573 if ((ret < 0) || (info.si_signo == 0))
74574@@ -714,8 +715,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
74575 oldfs = get_fs();
74576 set_fs(KERNEL_DS);
74577 err = sys_timer_settime(timer_id, flags,
74578- (struct itimerspec __user *) &newts,
74579- (struct itimerspec __user *) &oldts);
74580+ (struct itimerspec __force_user *) &newts,
74581+ (struct itimerspec __force_user *) &oldts);
74582 set_fs(oldfs);
74583 if (!err && old && put_compat_itimerspec(old, &oldts))
74584 return -EFAULT;
74585@@ -732,7 +733,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
74586 oldfs = get_fs();
74587 set_fs(KERNEL_DS);
74588 err = sys_timer_gettime(timer_id,
74589- (struct itimerspec __user *) &ts);
74590+ (struct itimerspec __force_user *) &ts);
74591 set_fs(oldfs);
74592 if (!err && put_compat_itimerspec(setting, &ts))
74593 return -EFAULT;
74594@@ -751,7 +752,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
74595 oldfs = get_fs();
74596 set_fs(KERNEL_DS);
74597 err = sys_clock_settime(which_clock,
74598- (struct timespec __user *) &ts);
74599+ (struct timespec __force_user *) &ts);
74600 set_fs(oldfs);
74601 return err;
74602 }
74603@@ -766,7 +767,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
74604 oldfs = get_fs();
74605 set_fs(KERNEL_DS);
74606 err = sys_clock_gettime(which_clock,
74607- (struct timespec __user *) &ts);
74608+ (struct timespec __force_user *) &ts);
74609 set_fs(oldfs);
74610 if (!err && put_compat_timespec(&ts, tp))
74611 return -EFAULT;
74612@@ -786,7 +787,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
74613
74614 oldfs = get_fs();
74615 set_fs(KERNEL_DS);
74616- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
74617+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
74618 set_fs(oldfs);
74619
74620 err = compat_put_timex(utp, &txc);
74621@@ -806,7 +807,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
74622 oldfs = get_fs();
74623 set_fs(KERNEL_DS);
74624 err = sys_clock_getres(which_clock,
74625- (struct timespec __user *) &ts);
74626+ (struct timespec __force_user *) &ts);
74627 set_fs(oldfs);
74628 if (!err && tp && put_compat_timespec(&ts, tp))
74629 return -EFAULT;
74630@@ -818,9 +819,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
74631 long err;
74632 mm_segment_t oldfs;
74633 struct timespec tu;
74634- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
74635+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
74636
74637- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
74638+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
74639 oldfs = get_fs();
74640 set_fs(KERNEL_DS);
74641 err = clock_nanosleep_restart(restart);
74642@@ -852,8 +853,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
74643 oldfs = get_fs();
74644 set_fs(KERNEL_DS);
74645 err = sys_clock_nanosleep(which_clock, flags,
74646- (struct timespec __user *) &in,
74647- (struct timespec __user *) &out);
74648+ (struct timespec __force_user *) &in,
74649+ (struct timespec __force_user *) &out);
74650 set_fs(oldfs);
74651
74652 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
74653diff --git a/kernel/configs.c b/kernel/configs.c
74654index 42e8fa0..9e7406b 100644
74655--- a/kernel/configs.c
74656+++ b/kernel/configs.c
74657@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
74658 struct proc_dir_entry *entry;
74659
74660 /* create the current config file */
74661+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
74662+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
74663+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
74664+ &ikconfig_file_ops);
74665+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
74666+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
74667+ &ikconfig_file_ops);
74668+#endif
74669+#else
74670 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
74671 &ikconfig_file_ops);
74672+#endif
74673+
74674 if (!entry)
74675 return -ENOMEM;
74676
74677diff --git a/kernel/cred.c b/kernel/cred.c
74678index e0573a4..3874e41 100644
74679--- a/kernel/cred.c
74680+++ b/kernel/cred.c
74681@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
74682 validate_creds(cred);
74683 alter_cred_subscribers(cred, -1);
74684 put_cred(cred);
74685+
74686+#ifdef CONFIG_GRKERNSEC_SETXID
74687+ cred = (struct cred *) tsk->delayed_cred;
74688+ if (cred != NULL) {
74689+ tsk->delayed_cred = NULL;
74690+ validate_creds(cred);
74691+ alter_cred_subscribers(cred, -1);
74692+ put_cred(cred);
74693+ }
74694+#endif
74695 }
74696
74697 /**
74698@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
74699 * Always returns 0 thus allowing this function to be tail-called at the end
74700 * of, say, sys_setgid().
74701 */
74702-int commit_creds(struct cred *new)
74703+static int __commit_creds(struct cred *new)
74704 {
74705 struct task_struct *task = current;
74706 const struct cred *old = task->real_cred;
74707@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
74708
74709 get_cred(new); /* we will require a ref for the subj creds too */
74710
74711+ gr_set_role_label(task, new->uid, new->gid);
74712+
74713 /* dumpability changes */
74714 if (!uid_eq(old->euid, new->euid) ||
74715 !gid_eq(old->egid, new->egid) ||
74716@@ -479,6 +491,102 @@ int commit_creds(struct cred *new)
74717 put_cred(old);
74718 return 0;
74719 }
74720+#ifdef CONFIG_GRKERNSEC_SETXID
74721+extern int set_user(struct cred *new);
74722+
74723+void gr_delayed_cred_worker(void)
74724+{
74725+ const struct cred *new = current->delayed_cred;
74726+ struct cred *ncred;
74727+
74728+ current->delayed_cred = NULL;
74729+
74730+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
74731+ // from doing get_cred on it when queueing this
74732+ put_cred(new);
74733+ return;
74734+ } else if (new == NULL)
74735+ return;
74736+
74737+ ncred = prepare_creds();
74738+ if (!ncred)
74739+ goto die;
74740+ // uids
74741+ ncred->uid = new->uid;
74742+ ncred->euid = new->euid;
74743+ ncred->suid = new->suid;
74744+ ncred->fsuid = new->fsuid;
74745+ // gids
74746+ ncred->gid = new->gid;
74747+ ncred->egid = new->egid;
74748+ ncred->sgid = new->sgid;
74749+ ncred->fsgid = new->fsgid;
74750+ // groups
74751+ if (set_groups(ncred, new->group_info) < 0) {
74752+ abort_creds(ncred);
74753+ goto die;
74754+ }
74755+ // caps
74756+ ncred->securebits = new->securebits;
74757+ ncred->cap_inheritable = new->cap_inheritable;
74758+ ncred->cap_permitted = new->cap_permitted;
74759+ ncred->cap_effective = new->cap_effective;
74760+ ncred->cap_bset = new->cap_bset;
74761+
74762+ if (set_user(ncred)) {
74763+ abort_creds(ncred);
74764+ goto die;
74765+ }
74766+
74767+ // from doing get_cred on it when queueing this
74768+ put_cred(new);
74769+
74770+ __commit_creds(ncred);
74771+ return;
74772+die:
74773+ // from doing get_cred on it when queueing this
74774+ put_cred(new);
74775+ do_group_exit(SIGKILL);
74776+}
74777+#endif
74778+
74779+int commit_creds(struct cred *new)
74780+{
74781+#ifdef CONFIG_GRKERNSEC_SETXID
74782+ int ret;
74783+ int schedule_it = 0;
74784+ struct task_struct *t;
74785+
74786+ /* we won't get called with tasklist_lock held for writing
74787+ and interrupts disabled as the cred struct in that case is
74788+ init_cred
74789+ */
74790+ if (grsec_enable_setxid && !current_is_single_threaded() &&
74791+ uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
74792+ !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
74793+ schedule_it = 1;
74794+ }
74795+ ret = __commit_creds(new);
74796+ if (schedule_it) {
74797+ rcu_read_lock();
74798+ read_lock(&tasklist_lock);
74799+ for (t = next_thread(current); t != current;
74800+ t = next_thread(t)) {
74801+ if (t->delayed_cred == NULL) {
74802+ t->delayed_cred = get_cred(new);
74803+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
74804+ set_tsk_need_resched(t);
74805+ }
74806+ }
74807+ read_unlock(&tasklist_lock);
74808+ rcu_read_unlock();
74809+ }
74810+ return ret;
74811+#else
74812+ return __commit_creds(new);
74813+#endif
74814+}
74815+
74816 EXPORT_SYMBOL(commit_creds);
74817
74818 /**
74819diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
74820index 9a61738..c5c8f3a 100644
74821--- a/kernel/debug/debug_core.c
74822+++ b/kernel/debug/debug_core.c
74823@@ -122,7 +122,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
74824 */
74825 static atomic_t masters_in_kgdb;
74826 static atomic_t slaves_in_kgdb;
74827-static atomic_t kgdb_break_tasklet_var;
74828+static atomic_unchecked_t kgdb_break_tasklet_var;
74829 atomic_t kgdb_setting_breakpoint;
74830
74831 struct task_struct *kgdb_usethread;
74832@@ -132,7 +132,7 @@ int kgdb_single_step;
74833 static pid_t kgdb_sstep_pid;
74834
74835 /* to keep track of the CPU which is doing the single stepping*/
74836-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
74837+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
74838
74839 /*
74840 * If you are debugging a problem where roundup (the collection of
74841@@ -540,7 +540,7 @@ return_normal:
74842 * kernel will only try for the value of sstep_tries before
74843 * giving up and continuing on.
74844 */
74845- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
74846+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
74847 (kgdb_info[cpu].task &&
74848 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
74849 atomic_set(&kgdb_active, -1);
74850@@ -634,8 +634,8 @@ cpu_master_loop:
74851 }
74852
74853 kgdb_restore:
74854- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
74855- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
74856+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
74857+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
74858 if (kgdb_info[sstep_cpu].task)
74859 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
74860 else
74861@@ -887,18 +887,18 @@ static void kgdb_unregister_callbacks(void)
74862 static void kgdb_tasklet_bpt(unsigned long ing)
74863 {
74864 kgdb_breakpoint();
74865- atomic_set(&kgdb_break_tasklet_var, 0);
74866+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
74867 }
74868
74869 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
74870
74871 void kgdb_schedule_breakpoint(void)
74872 {
74873- if (atomic_read(&kgdb_break_tasklet_var) ||
74874+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
74875 atomic_read(&kgdb_active) != -1 ||
74876 atomic_read(&kgdb_setting_breakpoint))
74877 return;
74878- atomic_inc(&kgdb_break_tasklet_var);
74879+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
74880 tasklet_schedule(&kgdb_tasklet_breakpoint);
74881 }
74882 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
74883diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
74884index 8875254..7cf4928 100644
74885--- a/kernel/debug/kdb/kdb_main.c
74886+++ b/kernel/debug/kdb/kdb_main.c
74887@@ -1974,7 +1974,7 @@ static int kdb_lsmod(int argc, const char **argv)
74888 continue;
74889
74890 kdb_printf("%-20s%8u 0x%p ", mod->name,
74891- mod->core_size, (void *)mod);
74892+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
74893 #ifdef CONFIG_MODULE_UNLOAD
74894 kdb_printf("%4ld ", module_refcount(mod));
74895 #endif
74896@@ -1984,7 +1984,7 @@ static int kdb_lsmod(int argc, const char **argv)
74897 kdb_printf(" (Loading)");
74898 else
74899 kdb_printf(" (Live)");
74900- kdb_printf(" 0x%p", mod->module_core);
74901+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
74902
74903 #ifdef CONFIG_MODULE_UNLOAD
74904 {
74905diff --git a/kernel/events/core.c b/kernel/events/core.c
74906index 7b6646a..3cb1135 100644
74907--- a/kernel/events/core.c
74908+++ b/kernel/events/core.c
74909@@ -182,7 +182,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
74910 return 0;
74911 }
74912
74913-static atomic64_t perf_event_id;
74914+static atomic64_unchecked_t perf_event_id;
74915
74916 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
74917 enum event_type_t event_type);
74918@@ -2677,7 +2677,7 @@ static void __perf_event_read(void *info)
74919
74920 static inline u64 perf_event_count(struct perf_event *event)
74921 {
74922- return local64_read(&event->count) + atomic64_read(&event->child_count);
74923+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
74924 }
74925
74926 static u64 perf_event_read(struct perf_event *event)
74927@@ -3007,9 +3007,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
74928 mutex_lock(&event->child_mutex);
74929 total += perf_event_read(event);
74930 *enabled += event->total_time_enabled +
74931- atomic64_read(&event->child_total_time_enabled);
74932+ atomic64_read_unchecked(&event->child_total_time_enabled);
74933 *running += event->total_time_running +
74934- atomic64_read(&event->child_total_time_running);
74935+ atomic64_read_unchecked(&event->child_total_time_running);
74936
74937 list_for_each_entry(child, &event->child_list, child_list) {
74938 total += perf_event_read(child);
74939@@ -3412,10 +3412,10 @@ void perf_event_update_userpage(struct perf_event *event)
74940 userpg->offset -= local64_read(&event->hw.prev_count);
74941
74942 userpg->time_enabled = enabled +
74943- atomic64_read(&event->child_total_time_enabled);
74944+ atomic64_read_unchecked(&event->child_total_time_enabled);
74945
74946 userpg->time_running = running +
74947- atomic64_read(&event->child_total_time_running);
74948+ atomic64_read_unchecked(&event->child_total_time_running);
74949
74950 arch_perf_update_userpage(userpg, now);
74951
74952@@ -3974,11 +3974,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
74953 values[n++] = perf_event_count(event);
74954 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
74955 values[n++] = enabled +
74956- atomic64_read(&event->child_total_time_enabled);
74957+ atomic64_read_unchecked(&event->child_total_time_enabled);
74958 }
74959 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
74960 values[n++] = running +
74961- atomic64_read(&event->child_total_time_running);
74962+ atomic64_read_unchecked(&event->child_total_time_running);
74963 }
74964 if (read_format & PERF_FORMAT_ID)
74965 values[n++] = primary_event_id(event);
74966@@ -4721,12 +4721,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
74967 * need to add enough zero bytes after the string to handle
74968 * the 64bit alignment we do later.
74969 */
74970- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
74971+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
74972 if (!buf) {
74973 name = strncpy(tmp, "//enomem", sizeof(tmp));
74974 goto got_name;
74975 }
74976- name = d_path(&file->f_path, buf, PATH_MAX);
74977+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
74978 if (IS_ERR(name)) {
74979 name = strncpy(tmp, "//toolong", sizeof(tmp));
74980 goto got_name;
74981@@ -6165,7 +6165,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
74982 event->parent = parent_event;
74983
74984 event->ns = get_pid_ns(task_active_pid_ns(current));
74985- event->id = atomic64_inc_return(&perf_event_id);
74986+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
74987
74988 event->state = PERF_EVENT_STATE_INACTIVE;
74989
74990@@ -6790,10 +6790,10 @@ static void sync_child_event(struct perf_event *child_event,
74991 /*
74992 * Add back the child's count to the parent's count:
74993 */
74994- atomic64_add(child_val, &parent_event->child_count);
74995- atomic64_add(child_event->total_time_enabled,
74996+ atomic64_add_unchecked(child_val, &parent_event->child_count);
74997+ atomic64_add_unchecked(child_event->total_time_enabled,
74998 &parent_event->child_total_time_enabled);
74999- atomic64_add(child_event->total_time_running,
75000+ atomic64_add_unchecked(child_event->total_time_running,
75001 &parent_event->child_total_time_running);
75002
75003 /*
75004diff --git a/kernel/exit.c b/kernel/exit.c
75005index b4df219..f13c02d 100644
75006--- a/kernel/exit.c
75007+++ b/kernel/exit.c
75008@@ -170,6 +170,10 @@ void release_task(struct task_struct * p)
75009 struct task_struct *leader;
75010 int zap_leader;
75011 repeat:
75012+#ifdef CONFIG_NET
75013+ gr_del_task_from_ip_table(p);
75014+#endif
75015+
75016 /* don't need to get the RCU readlock here - the process is dead and
75017 * can't be modifying its own credentials. But shut RCU-lockdep up */
75018 rcu_read_lock();
75019@@ -338,7 +342,7 @@ int allow_signal(int sig)
75020 * know it'll be handled, so that they don't get converted to
75021 * SIGKILL or just silently dropped.
75022 */
75023- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
75024+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
75025 recalc_sigpending();
75026 spin_unlock_irq(&current->sighand->siglock);
75027 return 0;
75028@@ -708,6 +712,8 @@ void do_exit(long code)
75029 struct task_struct *tsk = current;
75030 int group_dead;
75031
75032+ set_fs(USER_DS);
75033+
75034 profile_task_exit(tsk);
75035
75036 WARN_ON(blk_needs_flush_plug(tsk));
75037@@ -724,7 +730,6 @@ void do_exit(long code)
75038 * mm_release()->clear_child_tid() from writing to a user-controlled
75039 * kernel address.
75040 */
75041- set_fs(USER_DS);
75042
75043 ptrace_event(PTRACE_EVENT_EXIT, code);
75044
75045@@ -783,6 +788,9 @@ void do_exit(long code)
75046 tsk->exit_code = code;
75047 taskstats_exit(tsk, group_dead);
75048
75049+ gr_acl_handle_psacct(tsk, code);
75050+ gr_acl_handle_exit();
75051+
75052 exit_mm(tsk);
75053
75054 if (group_dead)
75055@@ -903,7 +911,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
75056 * Take down every thread in the group. This is called by fatal signals
75057 * as well as by sys_exit_group (below).
75058 */
75059-void
75060+__noreturn void
75061 do_group_exit(int exit_code)
75062 {
75063 struct signal_struct *sig = current->signal;
75064diff --git a/kernel/fork.c b/kernel/fork.c
75065index 5630e52..0cee608 100644
75066--- a/kernel/fork.c
75067+++ b/kernel/fork.c
75068@@ -318,7 +318,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
75069 *stackend = STACK_END_MAGIC; /* for overflow detection */
75070
75071 #ifdef CONFIG_CC_STACKPROTECTOR
75072- tsk->stack_canary = get_random_int();
75073+ tsk->stack_canary = pax_get_random_long();
75074 #endif
75075
75076 /*
75077@@ -344,13 +344,81 @@ free_tsk:
75078 }
75079
75080 #ifdef CONFIG_MMU
75081+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
75082+{
75083+ struct vm_area_struct *tmp;
75084+ unsigned long charge;
75085+ struct mempolicy *pol;
75086+ struct file *file;
75087+
75088+ charge = 0;
75089+ if (mpnt->vm_flags & VM_ACCOUNT) {
75090+ unsigned long len = vma_pages(mpnt);
75091+
75092+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
75093+ goto fail_nomem;
75094+ charge = len;
75095+ }
75096+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
75097+ if (!tmp)
75098+ goto fail_nomem;
75099+ *tmp = *mpnt;
75100+ tmp->vm_mm = mm;
75101+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
75102+ pol = mpol_dup(vma_policy(mpnt));
75103+ if (IS_ERR(pol))
75104+ goto fail_nomem_policy;
75105+ vma_set_policy(tmp, pol);
75106+ if (anon_vma_fork(tmp, mpnt))
75107+ goto fail_nomem_anon_vma_fork;
75108+ tmp->vm_flags &= ~VM_LOCKED;
75109+ tmp->vm_next = tmp->vm_prev = NULL;
75110+ tmp->vm_mirror = NULL;
75111+ file = tmp->vm_file;
75112+ if (file) {
75113+ struct inode *inode = file->f_path.dentry->d_inode;
75114+ struct address_space *mapping = file->f_mapping;
75115+
75116+ get_file(file);
75117+ if (tmp->vm_flags & VM_DENYWRITE)
75118+ atomic_dec(&inode->i_writecount);
75119+ mutex_lock(&mapping->i_mmap_mutex);
75120+ if (tmp->vm_flags & VM_SHARED)
75121+ mapping->i_mmap_writable++;
75122+ flush_dcache_mmap_lock(mapping);
75123+ /* insert tmp into the share list, just after mpnt */
75124+ if (unlikely(tmp->vm_flags & VM_NONLINEAR))
75125+ vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
75126+ else
75127+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
75128+ flush_dcache_mmap_unlock(mapping);
75129+ mutex_unlock(&mapping->i_mmap_mutex);
75130+ }
75131+
75132+ /*
75133+ * Clear hugetlb-related page reserves for children. This only
75134+ * affects MAP_PRIVATE mappings. Faults generated by the child
75135+ * are not guaranteed to succeed, even if read-only
75136+ */
75137+ if (is_vm_hugetlb_page(tmp))
75138+ reset_vma_resv_huge_pages(tmp);
75139+
75140+ return tmp;
75141+
75142+fail_nomem_anon_vma_fork:
75143+ mpol_put(pol);
75144+fail_nomem_policy:
75145+ kmem_cache_free(vm_area_cachep, tmp);
75146+fail_nomem:
75147+ vm_unacct_memory(charge);
75148+ return NULL;
75149+}
75150+
75151 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
75152 {
75153 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
75154 struct rb_node **rb_link, *rb_parent;
75155 int retval;
75156- unsigned long charge;
75157- struct mempolicy *pol;
75158
75159 uprobe_start_dup_mmap();
75160 down_write(&oldmm->mmap_sem);
75161@@ -364,8 +432,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
75162 mm->locked_vm = 0;
75163 mm->mmap = NULL;
75164 mm->mmap_cache = NULL;
75165- mm->free_area_cache = oldmm->mmap_base;
75166- mm->cached_hole_size = ~0UL;
75167+ mm->free_area_cache = oldmm->free_area_cache;
75168+ mm->cached_hole_size = oldmm->cached_hole_size;
75169 mm->map_count = 0;
75170 cpumask_clear(mm_cpumask(mm));
75171 mm->mm_rb = RB_ROOT;
75172@@ -381,57 +449,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
75173
75174 prev = NULL;
75175 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
75176- struct file *file;
75177-
75178 if (mpnt->vm_flags & VM_DONTCOPY) {
75179 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
75180 -vma_pages(mpnt));
75181 continue;
75182 }
75183- charge = 0;
75184- if (mpnt->vm_flags & VM_ACCOUNT) {
75185- unsigned long len = vma_pages(mpnt);
75186-
75187- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
75188- goto fail_nomem;
75189- charge = len;
75190- }
75191- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
75192- if (!tmp)
75193- goto fail_nomem;
75194- *tmp = *mpnt;
75195- INIT_LIST_HEAD(&tmp->anon_vma_chain);
75196- pol = mpol_dup(vma_policy(mpnt));
75197- retval = PTR_ERR(pol);
75198- if (IS_ERR(pol))
75199- goto fail_nomem_policy;
75200- vma_set_policy(tmp, pol);
75201- tmp->vm_mm = mm;
75202- if (anon_vma_fork(tmp, mpnt))
75203- goto fail_nomem_anon_vma_fork;
75204- tmp->vm_flags &= ~VM_LOCKED;
75205- tmp->vm_next = tmp->vm_prev = NULL;
75206- file = tmp->vm_file;
75207- if (file) {
75208- struct inode *inode = file->f_path.dentry->d_inode;
75209- struct address_space *mapping = file->f_mapping;
75210-
75211- get_file(file);
75212- if (tmp->vm_flags & VM_DENYWRITE)
75213- atomic_dec(&inode->i_writecount);
75214- mutex_lock(&mapping->i_mmap_mutex);
75215- if (tmp->vm_flags & VM_SHARED)
75216- mapping->i_mmap_writable++;
75217- flush_dcache_mmap_lock(mapping);
75218- /* insert tmp into the share list, just after mpnt */
75219- if (unlikely(tmp->vm_flags & VM_NONLINEAR))
75220- vma_nonlinear_insert(tmp,
75221- &mapping->i_mmap_nonlinear);
75222- else
75223- vma_interval_tree_insert_after(tmp, mpnt,
75224- &mapping->i_mmap);
75225- flush_dcache_mmap_unlock(mapping);
75226- mutex_unlock(&mapping->i_mmap_mutex);
75227+ tmp = dup_vma(mm, oldmm, mpnt);
75228+ if (!tmp) {
75229+ retval = -ENOMEM;
75230+ goto out;
75231 }
75232
75233 /*
75234@@ -463,6 +489,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
75235 if (retval)
75236 goto out;
75237 }
75238+
75239+#ifdef CONFIG_PAX_SEGMEXEC
75240+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
75241+ struct vm_area_struct *mpnt_m;
75242+
75243+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
75244+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
75245+
75246+ if (!mpnt->vm_mirror)
75247+ continue;
75248+
75249+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
75250+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
75251+ mpnt->vm_mirror = mpnt_m;
75252+ } else {
75253+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
75254+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
75255+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
75256+ mpnt->vm_mirror->vm_mirror = mpnt;
75257+ }
75258+ }
75259+ BUG_ON(mpnt_m);
75260+ }
75261+#endif
75262+
75263 /* a new mm has just been created */
75264 arch_dup_mmap(oldmm, mm);
75265 retval = 0;
75266@@ -472,14 +523,6 @@ out:
75267 up_write(&oldmm->mmap_sem);
75268 uprobe_end_dup_mmap();
75269 return retval;
75270-fail_nomem_anon_vma_fork:
75271- mpol_put(pol);
75272-fail_nomem_policy:
75273- kmem_cache_free(vm_area_cachep, tmp);
75274-fail_nomem:
75275- retval = -ENOMEM;
75276- vm_unacct_memory(charge);
75277- goto out;
75278 }
75279
75280 static inline int mm_alloc_pgd(struct mm_struct *mm)
75281@@ -694,8 +737,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
75282 return ERR_PTR(err);
75283
75284 mm = get_task_mm(task);
75285- if (mm && mm != current->mm &&
75286- !ptrace_may_access(task, mode)) {
75287+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
75288+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
75289 mmput(mm);
75290 mm = ERR_PTR(-EACCES);
75291 }
75292@@ -917,13 +960,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
75293 spin_unlock(&fs->lock);
75294 return -EAGAIN;
75295 }
75296- fs->users++;
75297+ atomic_inc(&fs->users);
75298 spin_unlock(&fs->lock);
75299 return 0;
75300 }
75301 tsk->fs = copy_fs_struct(fs);
75302 if (!tsk->fs)
75303 return -ENOMEM;
75304+ /* Carry through gr_chroot_dentry and is_chrooted instead
75305+ of recomputing it here. Already copied when the task struct
75306+ is duplicated. This allows pivot_root to not be treated as
75307+ a chroot
75308+ */
75309+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
75310+
75311 return 0;
75312 }
75313
75314@@ -1196,6 +1246,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
75315 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
75316 #endif
75317 retval = -EAGAIN;
75318+
75319+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
75320+
75321 if (atomic_read(&p->real_cred->user->processes) >=
75322 task_rlimit(p, RLIMIT_NPROC)) {
75323 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
75324@@ -1435,6 +1488,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
75325 goto bad_fork_free_pid;
75326 }
75327
75328+ /* synchronizes with gr_set_acls()
75329+ we need to call this past the point of no return for fork()
75330+ */
75331+ gr_copy_label(p);
75332+
75333 if (clone_flags & CLONE_THREAD) {
75334 current->signal->nr_threads++;
75335 atomic_inc(&current->signal->live);
75336@@ -1518,6 +1576,8 @@ bad_fork_cleanup_count:
75337 bad_fork_free:
75338 free_task(p);
75339 fork_out:
75340+ gr_log_forkfail(retval);
75341+
75342 return ERR_PTR(retval);
75343 }
75344
75345@@ -1568,6 +1628,23 @@ long do_fork(unsigned long clone_flags,
75346 return -EINVAL;
75347 }
75348
75349+#ifdef CONFIG_GRKERNSEC
75350+ if (clone_flags & CLONE_NEWUSER) {
75351+ /*
75352+ * This doesn't really inspire confidence:
75353+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
75354+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
75355+ * Increases kernel attack surface in areas developers
75356+ * previously cared little about ("low importance due
75357+ * to requiring "root" capability")
75358+ * To be removed when this code receives *proper* review
75359+ */
75360+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
75361+ !capable(CAP_SETGID))
75362+ return -EPERM;
75363+ }
75364+#endif
75365+
75366 /*
75367 * Determine whether and which event to report to ptracer. When
75368 * called from kernel_thread or CLONE_UNTRACED is explicitly
75369@@ -1602,6 +1679,8 @@ long do_fork(unsigned long clone_flags,
75370 if (clone_flags & CLONE_PARENT_SETTID)
75371 put_user(nr, parent_tidptr);
75372
75373+ gr_handle_brute_check();
75374+
75375 if (clone_flags & CLONE_VFORK) {
75376 p->vfork_done = &vfork;
75377 init_completion(&vfork);
75378@@ -1755,7 +1834,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
75379 return 0;
75380
75381 /* don't need lock here; in the worst case we'll do useless copy */
75382- if (fs->users == 1)
75383+ if (atomic_read(&fs->users) == 1)
75384 return 0;
75385
75386 *new_fsp = copy_fs_struct(fs);
75387@@ -1869,7 +1948,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
75388 fs = current->fs;
75389 spin_lock(&fs->lock);
75390 current->fs = new_fs;
75391- if (--fs->users)
75392+ gr_set_chroot_entries(current, &current->fs->root);
75393+ if (atomic_dec_return(&fs->users))
75394 new_fs = NULL;
75395 else
75396 new_fs = fs;
75397diff --git a/kernel/futex.c b/kernel/futex.c
75398index 8879430..31696f1 100644
75399--- a/kernel/futex.c
75400+++ b/kernel/futex.c
75401@@ -54,6 +54,7 @@
75402 #include <linux/mount.h>
75403 #include <linux/pagemap.h>
75404 #include <linux/syscalls.h>
75405+#include <linux/ptrace.h>
75406 #include <linux/signal.h>
75407 #include <linux/export.h>
75408 #include <linux/magic.h>
75409@@ -239,6 +240,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
75410 struct page *page, *page_head;
75411 int err, ro = 0;
75412
75413+#ifdef CONFIG_PAX_SEGMEXEC
75414+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
75415+ return -EFAULT;
75416+#endif
75417+
75418 /*
75419 * The futex address must be "naturally" aligned.
75420 */
75421@@ -2731,6 +2737,7 @@ static int __init futex_init(void)
75422 {
75423 u32 curval;
75424 int i;
75425+ mm_segment_t oldfs;
75426
75427 /*
75428 * This will fail and we want it. Some arch implementations do
75429@@ -2742,8 +2749,11 @@ static int __init futex_init(void)
75430 * implementation, the non-functional ones will return
75431 * -ENOSYS.
75432 */
75433+ oldfs = get_fs();
75434+ set_fs(USER_DS);
75435 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
75436 futex_cmpxchg_enabled = 1;
75437+ set_fs(oldfs);
75438
75439 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
75440 plist_head_init(&futex_queues[i].chain);
75441diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
75442index a9642d5..51eb98c 100644
75443--- a/kernel/futex_compat.c
75444+++ b/kernel/futex_compat.c
75445@@ -31,7 +31,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
75446 return 0;
75447 }
75448
75449-static void __user *futex_uaddr(struct robust_list __user *entry,
75450+static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
75451 compat_long_t futex_offset)
75452 {
75453 compat_uptr_t base = ptr_to_compat(entry);
75454diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
75455index 9b22d03..6295b62 100644
75456--- a/kernel/gcov/base.c
75457+++ b/kernel/gcov/base.c
75458@@ -102,11 +102,6 @@ void gcov_enable_events(void)
75459 }
75460
75461 #ifdef CONFIG_MODULES
75462-static inline int within(void *addr, void *start, unsigned long size)
75463-{
75464- return ((addr >= start) && (addr < start + size));
75465-}
75466-
75467 /* Update list and generate events when modules are unloaded. */
75468 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
75469 void *data)
75470@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
75471 prev = NULL;
75472 /* Remove entries located in module from linked list. */
75473 for (info = gcov_info_head; info; info = info->next) {
75474- if (within(info, mod->module_core, mod->core_size)) {
75475+ if (within_module_core_rw((unsigned long)info, mod)) {
75476 if (prev)
75477 prev->next = info->next;
75478 else
75479diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
75480index cdd5607..c3fc919 100644
75481--- a/kernel/hrtimer.c
75482+++ b/kernel/hrtimer.c
75483@@ -1407,7 +1407,7 @@ void hrtimer_peek_ahead_timers(void)
75484 local_irq_restore(flags);
75485 }
75486
75487-static void run_hrtimer_softirq(struct softirq_action *h)
75488+static void run_hrtimer_softirq(void)
75489 {
75490 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
75491
75492@@ -1751,7 +1751,7 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
75493 return NOTIFY_OK;
75494 }
75495
75496-static struct notifier_block __cpuinitdata hrtimers_nb = {
75497+static struct notifier_block hrtimers_nb = {
75498 .notifier_call = hrtimer_cpu_notify,
75499 };
75500
75501diff --git a/kernel/jump_label.c b/kernel/jump_label.c
75502index 60f48fa..7f3a770 100644
75503--- a/kernel/jump_label.c
75504+++ b/kernel/jump_label.c
75505@@ -13,6 +13,7 @@
75506 #include <linux/sort.h>
75507 #include <linux/err.h>
75508 #include <linux/static_key.h>
75509+#include <linux/mm.h>
75510
75511 #ifdef HAVE_JUMP_LABEL
75512
75513@@ -50,7 +51,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
75514
75515 size = (((unsigned long)stop - (unsigned long)start)
75516 / sizeof(struct jump_entry));
75517+ pax_open_kernel();
75518 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
75519+ pax_close_kernel();
75520 }
75521
75522 static void jump_label_update(struct static_key *key, int enable);
75523@@ -357,10 +360,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
75524 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
75525 struct jump_entry *iter;
75526
75527+ pax_open_kernel();
75528 for (iter = iter_start; iter < iter_stop; iter++) {
75529 if (within_module_init(iter->code, mod))
75530 iter->code = 0;
75531 }
75532+ pax_close_kernel();
75533 }
75534
75535 static int
75536diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
75537index 2169fee..706ccca 100644
75538--- a/kernel/kallsyms.c
75539+++ b/kernel/kallsyms.c
75540@@ -11,6 +11,9 @@
75541 * Changed the compression method from stem compression to "table lookup"
75542 * compression (see scripts/kallsyms.c for a more complete description)
75543 */
75544+#ifdef CONFIG_GRKERNSEC_HIDESYM
75545+#define __INCLUDED_BY_HIDESYM 1
75546+#endif
75547 #include <linux/kallsyms.h>
75548 #include <linux/module.h>
75549 #include <linux/init.h>
75550@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
75551
75552 static inline int is_kernel_inittext(unsigned long addr)
75553 {
75554+ if (system_state != SYSTEM_BOOTING)
75555+ return 0;
75556+
75557 if (addr >= (unsigned long)_sinittext
75558 && addr <= (unsigned long)_einittext)
75559 return 1;
75560 return 0;
75561 }
75562
75563+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
75564+#ifdef CONFIG_MODULES
75565+static inline int is_module_text(unsigned long addr)
75566+{
75567+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
75568+ return 1;
75569+
75570+ addr = ktla_ktva(addr);
75571+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
75572+}
75573+#else
75574+static inline int is_module_text(unsigned long addr)
75575+{
75576+ return 0;
75577+}
75578+#endif
75579+#endif
75580+
75581 static inline int is_kernel_text(unsigned long addr)
75582 {
75583 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
75584@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
75585
75586 static inline int is_kernel(unsigned long addr)
75587 {
75588+
75589+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
75590+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
75591+ return 1;
75592+
75593+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
75594+#else
75595 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
75596+#endif
75597+
75598 return 1;
75599 return in_gate_area_no_mm(addr);
75600 }
75601
75602 static int is_ksym_addr(unsigned long addr)
75603 {
75604+
75605+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
75606+ if (is_module_text(addr))
75607+ return 0;
75608+#endif
75609+
75610 if (all_var)
75611 return is_kernel(addr);
75612
75613@@ -470,7 +509,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
75614
75615 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
75616 {
75617- iter->name[0] = '\0';
75618 iter->nameoff = get_symbol_offset(new_pos);
75619 iter->pos = new_pos;
75620 }
75621@@ -518,6 +556,11 @@ static int s_show(struct seq_file *m, void *p)
75622 {
75623 struct kallsym_iter *iter = m->private;
75624
75625+#ifdef CONFIG_GRKERNSEC_HIDESYM
75626+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
75627+ return 0;
75628+#endif
75629+
75630 /* Some debugging symbols have no name. Ignore them. */
75631 if (!iter->name[0])
75632 return 0;
75633@@ -531,6 +574,7 @@ static int s_show(struct seq_file *m, void *p)
75634 */
75635 type = iter->exported ? toupper(iter->type) :
75636 tolower(iter->type);
75637+
75638 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
75639 type, iter->name, iter->module_name);
75640 } else
75641@@ -556,7 +600,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
75642 struct kallsym_iter *iter;
75643 int ret;
75644
75645- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
75646+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
75647 if (!iter)
75648 return -ENOMEM;
75649 reset_iter(iter, 0);
75650diff --git a/kernel/kcmp.c b/kernel/kcmp.c
75651index e30ac0f..3528cac 100644
75652--- a/kernel/kcmp.c
75653+++ b/kernel/kcmp.c
75654@@ -99,6 +99,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
75655 struct task_struct *task1, *task2;
75656 int ret;
75657
75658+#ifdef CONFIG_GRKERNSEC
75659+ return -ENOSYS;
75660+#endif
75661+
75662 rcu_read_lock();
75663
75664 /*
75665diff --git a/kernel/kexec.c b/kernel/kexec.c
75666index 5e4bd78..00c5b91 100644
75667--- a/kernel/kexec.c
75668+++ b/kernel/kexec.c
75669@@ -1045,7 +1045,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
75670 unsigned long flags)
75671 {
75672 struct compat_kexec_segment in;
75673- struct kexec_segment out, __user *ksegments;
75674+ struct kexec_segment out;
75675+ struct kexec_segment __user *ksegments;
75676 unsigned long i, result;
75677
75678 /* Don't allow clients that don't understand the native
75679diff --git a/kernel/kmod.c b/kernel/kmod.c
75680index 0023a87..9c0c068 100644
75681--- a/kernel/kmod.c
75682+++ b/kernel/kmod.c
75683@@ -74,7 +74,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
75684 kfree(info->argv);
75685 }
75686
75687-static int call_modprobe(char *module_name, int wait)
75688+static int call_modprobe(char *module_name, char *module_param, int wait)
75689 {
75690 static char *envp[] = {
75691 "HOME=/",
75692@@ -83,7 +83,7 @@ static int call_modprobe(char *module_name, int wait)
75693 NULL
75694 };
75695
75696- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
75697+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
75698 if (!argv)
75699 goto out;
75700
75701@@ -95,7 +95,8 @@ static int call_modprobe(char *module_name, int wait)
75702 argv[1] = "-q";
75703 argv[2] = "--";
75704 argv[3] = module_name; /* check free_modprobe_argv() */
75705- argv[4] = NULL;
75706+ argv[4] = module_param;
75707+ argv[5] = NULL;
75708
75709 return call_usermodehelper_fns(modprobe_path, argv, envp,
75710 wait | UMH_KILLABLE, NULL, free_modprobe_argv, NULL);
75711@@ -120,9 +121,8 @@ out:
75712 * If module auto-loading support is disabled then this function
75713 * becomes a no-operation.
75714 */
75715-int __request_module(bool wait, const char *fmt, ...)
75716+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
75717 {
75718- va_list args;
75719 char module_name[MODULE_NAME_LEN];
75720 unsigned int max_modprobes;
75721 int ret;
75722@@ -130,9 +130,7 @@ int __request_module(bool wait, const char *fmt, ...)
75723 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
75724 static int kmod_loop_msg;
75725
75726- va_start(args, fmt);
75727- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
75728- va_end(args);
75729+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
75730 if (ret >= MODULE_NAME_LEN)
75731 return -ENAMETOOLONG;
75732
75733@@ -140,6 +138,20 @@ int __request_module(bool wait, const char *fmt, ...)
75734 if (ret)
75735 return ret;
75736
75737+#ifdef CONFIG_GRKERNSEC_MODHARDEN
75738+ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
75739+ /* hack to workaround consolekit/udisks stupidity */
75740+ read_lock(&tasklist_lock);
75741+ if (!strcmp(current->comm, "mount") &&
75742+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
75743+ read_unlock(&tasklist_lock);
75744+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
75745+ return -EPERM;
75746+ }
75747+ read_unlock(&tasklist_lock);
75748+ }
75749+#endif
75750+
75751 /* If modprobe needs a service that is in a module, we get a recursive
75752 * loop. Limit the number of running kmod threads to max_threads/2 or
75753 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
75754@@ -168,11 +180,52 @@ int __request_module(bool wait, const char *fmt, ...)
75755
75756 trace_module_request(module_name, wait, _RET_IP_);
75757
75758- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
75759+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
75760
75761 atomic_dec(&kmod_concurrent);
75762 return ret;
75763 }
75764+
75765+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
75766+{
75767+ va_list args;
75768+ int ret;
75769+
75770+ va_start(args, fmt);
75771+ ret = ____request_module(wait, module_param, fmt, args);
75772+ va_end(args);
75773+
75774+ return ret;
75775+}
75776+
75777+int __request_module(bool wait, const char *fmt, ...)
75778+{
75779+ va_list args;
75780+ int ret;
75781+
75782+#ifdef CONFIG_GRKERNSEC_MODHARDEN
75783+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
75784+ char module_param[MODULE_NAME_LEN];
75785+
75786+ memset(module_param, 0, sizeof(module_param));
75787+
75788+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
75789+
75790+ va_start(args, fmt);
75791+ ret = ____request_module(wait, module_param, fmt, args);
75792+ va_end(args);
75793+
75794+ return ret;
75795+ }
75796+#endif
75797+
75798+ va_start(args, fmt);
75799+ ret = ____request_module(wait, NULL, fmt, args);
75800+ va_end(args);
75801+
75802+ return ret;
75803+}
75804+
75805 EXPORT_SYMBOL(__request_module);
75806 #endif /* CONFIG_MODULES */
75807
75808@@ -283,7 +336,7 @@ static int wait_for_helper(void *data)
75809 *
75810 * Thus the __user pointer cast is valid here.
75811 */
75812- sys_wait4(pid, (int __user *)&ret, 0, NULL);
75813+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
75814
75815 /*
75816 * If ret is 0, either ____call_usermodehelper failed and the
75817@@ -635,7 +688,7 @@ EXPORT_SYMBOL(call_usermodehelper_fns);
75818 static int proc_cap_handler(struct ctl_table *table, int write,
75819 void __user *buffer, size_t *lenp, loff_t *ppos)
75820 {
75821- struct ctl_table t;
75822+ ctl_table_no_const t;
75823 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
75824 kernel_cap_t new_cap;
75825 int err, i;
75826diff --git a/kernel/kprobes.c b/kernel/kprobes.c
75827index 098f396..fe85ff1 100644
75828--- a/kernel/kprobes.c
75829+++ b/kernel/kprobes.c
75830@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
75831 * kernel image and loaded module images reside. This is required
75832 * so x86_64 can correctly handle the %rip-relative fixups.
75833 */
75834- kip->insns = module_alloc(PAGE_SIZE);
75835+ kip->insns = module_alloc_exec(PAGE_SIZE);
75836 if (!kip->insns) {
75837 kfree(kip);
75838 return NULL;
75839@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
75840 */
75841 if (!list_is_singular(&kip->list)) {
75842 list_del(&kip->list);
75843- module_free(NULL, kip->insns);
75844+ module_free_exec(NULL, kip->insns);
75845 kfree(kip);
75846 }
75847 return 1;
75848@@ -2063,7 +2063,7 @@ static int __init init_kprobes(void)
75849 {
75850 int i, err = 0;
75851 unsigned long offset = 0, size = 0;
75852- char *modname, namebuf[128];
75853+ char *modname, namebuf[KSYM_NAME_LEN];
75854 const char *symbol_name;
75855 void *addr;
75856 struct kprobe_blackpoint *kb;
75857@@ -2148,11 +2148,11 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
75858 kprobe_type = "k";
75859
75860 if (sym)
75861- seq_printf(pi, "%p %s %s+0x%x %s ",
75862+ seq_printf(pi, "%pK %s %s+0x%x %s ",
75863 p->addr, kprobe_type, sym, offset,
75864 (modname ? modname : " "));
75865 else
75866- seq_printf(pi, "%p %s %p ",
75867+ seq_printf(pi, "%pK %s %pK ",
75868 p->addr, kprobe_type, p->addr);
75869
75870 if (!pp)
75871@@ -2190,7 +2190,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
75872 const char *sym = NULL;
75873 unsigned int i = *(loff_t *) v;
75874 unsigned long offset = 0;
75875- char *modname, namebuf[128];
75876+ char *modname, namebuf[KSYM_NAME_LEN];
75877
75878 head = &kprobe_table[i];
75879 preempt_disable();
75880diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
75881index 6ada93c..dce7d5d 100644
75882--- a/kernel/ksysfs.c
75883+++ b/kernel/ksysfs.c
75884@@ -46,6 +46,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
75885 {
75886 if (count+1 > UEVENT_HELPER_PATH_LEN)
75887 return -ENOENT;
75888+ if (!capable(CAP_SYS_ADMIN))
75889+ return -EPERM;
75890 memcpy(uevent_helper, buf, count);
75891 uevent_helper[count] = '\0';
75892 if (count && uevent_helper[count-1] == '\n')
75893@@ -172,7 +174,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
75894 return count;
75895 }
75896
75897-static struct bin_attribute notes_attr = {
75898+static bin_attribute_no_const notes_attr __read_only = {
75899 .attr = {
75900 .name = "notes",
75901 .mode = S_IRUGO,
75902diff --git a/kernel/lockdep.c b/kernel/lockdep.c
75903index 7981e5b..7f2105c 100644
75904--- a/kernel/lockdep.c
75905+++ b/kernel/lockdep.c
75906@@ -590,6 +590,10 @@ static int static_obj(void *obj)
75907 end = (unsigned long) &_end,
75908 addr = (unsigned long) obj;
75909
75910+#ifdef CONFIG_PAX_KERNEXEC
75911+ start = ktla_ktva(start);
75912+#endif
75913+
75914 /*
75915 * static variable?
75916 */
75917@@ -730,6 +734,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
75918 if (!static_obj(lock->key)) {
75919 debug_locks_off();
75920 printk("INFO: trying to register non-static key.\n");
75921+ printk("lock:%pS key:%pS.\n", lock, lock->key);
75922 printk("the code is fine but needs lockdep annotation.\n");
75923 printk("turning off the locking correctness validator.\n");
75924 dump_stack();
75925@@ -3078,7 +3083,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
75926 if (!class)
75927 return 0;
75928 }
75929- atomic_inc((atomic_t *)&class->ops);
75930+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
75931 if (very_verbose(class)) {
75932 printk("\nacquire class [%p] %s", class->key, class->name);
75933 if (class->name_version > 1)
75934diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
75935index b2c71c5..7b88d63 100644
75936--- a/kernel/lockdep_proc.c
75937+++ b/kernel/lockdep_proc.c
75938@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
75939 return 0;
75940 }
75941
75942- seq_printf(m, "%p", class->key);
75943+ seq_printf(m, "%pK", class->key);
75944 #ifdef CONFIG_DEBUG_LOCKDEP
75945 seq_printf(m, " OPS:%8ld", class->ops);
75946 #endif
75947@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
75948
75949 list_for_each_entry(entry, &class->locks_after, entry) {
75950 if (entry->distance == 1) {
75951- seq_printf(m, " -> [%p] ", entry->class->key);
75952+ seq_printf(m, " -> [%pK] ", entry->class->key);
75953 print_name(m, entry->class);
75954 seq_puts(m, "\n");
75955 }
75956@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
75957 if (!class->key)
75958 continue;
75959
75960- seq_printf(m, "[%p] ", class->key);
75961+ seq_printf(m, "[%pK] ", class->key);
75962 print_name(m, class);
75963 seq_puts(m, "\n");
75964 }
75965@@ -495,7 +495,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
75966 if (!i)
75967 seq_line(m, '-', 40-namelen, namelen);
75968
75969- snprintf(ip, sizeof(ip), "[<%p>]",
75970+ snprintf(ip, sizeof(ip), "[<%pK>]",
75971 (void *)class->contention_point[i]);
75972 seq_printf(m, "%40s %14lu %29s %pS\n",
75973 name, stats->contention_point[i],
75974@@ -510,7 +510,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
75975 if (!i)
75976 seq_line(m, '-', 40-namelen, namelen);
75977
75978- snprintf(ip, sizeof(ip), "[<%p>]",
75979+ snprintf(ip, sizeof(ip), "[<%pK>]",
75980 (void *)class->contending_point[i]);
75981 seq_printf(m, "%40s %14lu %29s %pS\n",
75982 name, stats->contending_point[i],
75983diff --git a/kernel/module.c b/kernel/module.c
75984index eab0827..f488603 100644
75985--- a/kernel/module.c
75986+++ b/kernel/module.c
75987@@ -61,6 +61,7 @@
75988 #include <linux/pfn.h>
75989 #include <linux/bsearch.h>
75990 #include <linux/fips.h>
75991+#include <linux/grsecurity.h>
75992 #include <uapi/linux/module.h>
75993 #include "module-internal.h"
75994
75995@@ -156,7 +157,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
75996
75997 /* Bounds of module allocation, for speeding __module_address.
75998 * Protected by module_mutex. */
75999-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
76000+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
76001+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
76002
76003 int register_module_notifier(struct notifier_block * nb)
76004 {
76005@@ -322,7 +324,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
76006 return true;
76007
76008 list_for_each_entry_rcu(mod, &modules, list) {
76009- struct symsearch arr[] = {
76010+ struct symsearch modarr[] = {
76011 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
76012 NOT_GPL_ONLY, false },
76013 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
76014@@ -347,7 +349,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
76015 if (mod->state == MODULE_STATE_UNFORMED)
76016 continue;
76017
76018- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
76019+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
76020 return true;
76021 }
76022 return false;
76023@@ -484,7 +486,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
76024 static int percpu_modalloc(struct module *mod,
76025 unsigned long size, unsigned long align)
76026 {
76027- if (align > PAGE_SIZE) {
76028+ if (align-1 >= PAGE_SIZE) {
76029 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
76030 mod->name, align, PAGE_SIZE);
76031 align = PAGE_SIZE;
76032@@ -1088,7 +1090,7 @@ struct module_attribute module_uevent =
76033 static ssize_t show_coresize(struct module_attribute *mattr,
76034 struct module_kobject *mk, char *buffer)
76035 {
76036- return sprintf(buffer, "%u\n", mk->mod->core_size);
76037+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
76038 }
76039
76040 static struct module_attribute modinfo_coresize =
76041@@ -1097,7 +1099,7 @@ static struct module_attribute modinfo_coresize =
76042 static ssize_t show_initsize(struct module_attribute *mattr,
76043 struct module_kobject *mk, char *buffer)
76044 {
76045- return sprintf(buffer, "%u\n", mk->mod->init_size);
76046+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
76047 }
76048
76049 static struct module_attribute modinfo_initsize =
76050@@ -1311,7 +1313,7 @@ resolve_symbol_wait(struct module *mod,
76051 */
76052 #ifdef CONFIG_SYSFS
76053
76054-#ifdef CONFIG_KALLSYMS
76055+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
76056 static inline bool sect_empty(const Elf_Shdr *sect)
76057 {
76058 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
76059@@ -1451,7 +1453,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
76060 {
76061 unsigned int notes, loaded, i;
76062 struct module_notes_attrs *notes_attrs;
76063- struct bin_attribute *nattr;
76064+ bin_attribute_no_const *nattr;
76065
76066 /* failed to create section attributes, so can't create notes */
76067 if (!mod->sect_attrs)
76068@@ -1563,7 +1565,7 @@ static void del_usage_links(struct module *mod)
76069 static int module_add_modinfo_attrs(struct module *mod)
76070 {
76071 struct module_attribute *attr;
76072- struct module_attribute *temp_attr;
76073+ module_attribute_no_const *temp_attr;
76074 int error = 0;
76075 int i;
76076
76077@@ -1777,21 +1779,21 @@ static void set_section_ro_nx(void *base,
76078
76079 static void unset_module_core_ro_nx(struct module *mod)
76080 {
76081- set_page_attributes(mod->module_core + mod->core_text_size,
76082- mod->module_core + mod->core_size,
76083+ set_page_attributes(mod->module_core_rw,
76084+ mod->module_core_rw + mod->core_size_rw,
76085 set_memory_x);
76086- set_page_attributes(mod->module_core,
76087- mod->module_core + mod->core_ro_size,
76088+ set_page_attributes(mod->module_core_rx,
76089+ mod->module_core_rx + mod->core_size_rx,
76090 set_memory_rw);
76091 }
76092
76093 static void unset_module_init_ro_nx(struct module *mod)
76094 {
76095- set_page_attributes(mod->module_init + mod->init_text_size,
76096- mod->module_init + mod->init_size,
76097+ set_page_attributes(mod->module_init_rw,
76098+ mod->module_init_rw + mod->init_size_rw,
76099 set_memory_x);
76100- set_page_attributes(mod->module_init,
76101- mod->module_init + mod->init_ro_size,
76102+ set_page_attributes(mod->module_init_rx,
76103+ mod->module_init_rx + mod->init_size_rx,
76104 set_memory_rw);
76105 }
76106
76107@@ -1804,14 +1806,14 @@ void set_all_modules_text_rw(void)
76108 list_for_each_entry_rcu(mod, &modules, list) {
76109 if (mod->state == MODULE_STATE_UNFORMED)
76110 continue;
76111- if ((mod->module_core) && (mod->core_text_size)) {
76112- set_page_attributes(mod->module_core,
76113- mod->module_core + mod->core_text_size,
76114+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
76115+ set_page_attributes(mod->module_core_rx,
76116+ mod->module_core_rx + mod->core_size_rx,
76117 set_memory_rw);
76118 }
76119- if ((mod->module_init) && (mod->init_text_size)) {
76120- set_page_attributes(mod->module_init,
76121- mod->module_init + mod->init_text_size,
76122+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
76123+ set_page_attributes(mod->module_init_rx,
76124+ mod->module_init_rx + mod->init_size_rx,
76125 set_memory_rw);
76126 }
76127 }
76128@@ -1827,14 +1829,14 @@ void set_all_modules_text_ro(void)
76129 list_for_each_entry_rcu(mod, &modules, list) {
76130 if (mod->state == MODULE_STATE_UNFORMED)
76131 continue;
76132- if ((mod->module_core) && (mod->core_text_size)) {
76133- set_page_attributes(mod->module_core,
76134- mod->module_core + mod->core_text_size,
76135+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
76136+ set_page_attributes(mod->module_core_rx,
76137+ mod->module_core_rx + mod->core_size_rx,
76138 set_memory_ro);
76139 }
76140- if ((mod->module_init) && (mod->init_text_size)) {
76141- set_page_attributes(mod->module_init,
76142- mod->module_init + mod->init_text_size,
76143+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
76144+ set_page_attributes(mod->module_init_rx,
76145+ mod->module_init_rx + mod->init_size_rx,
76146 set_memory_ro);
76147 }
76148 }
76149@@ -1880,16 +1882,19 @@ static void free_module(struct module *mod)
76150
76151 /* This may be NULL, but that's OK */
76152 unset_module_init_ro_nx(mod);
76153- module_free(mod, mod->module_init);
76154+ module_free(mod, mod->module_init_rw);
76155+ module_free_exec(mod, mod->module_init_rx);
76156 kfree(mod->args);
76157 percpu_modfree(mod);
76158
76159 /* Free lock-classes: */
76160- lockdep_free_key_range(mod->module_core, mod->core_size);
76161+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
76162+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
76163
76164 /* Finally, free the core (containing the module structure) */
76165 unset_module_core_ro_nx(mod);
76166- module_free(mod, mod->module_core);
76167+ module_free_exec(mod, mod->module_core_rx);
76168+ module_free(mod, mod->module_core_rw);
76169
76170 #ifdef CONFIG_MPU
76171 update_protections(current->mm);
76172@@ -1959,9 +1964,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
76173 int ret = 0;
76174 const struct kernel_symbol *ksym;
76175
76176+#ifdef CONFIG_GRKERNSEC_MODHARDEN
76177+ int is_fs_load = 0;
76178+ int register_filesystem_found = 0;
76179+ char *p;
76180+
76181+ p = strstr(mod->args, "grsec_modharden_fs");
76182+ if (p) {
76183+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
76184+ /* copy \0 as well */
76185+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
76186+ is_fs_load = 1;
76187+ }
76188+#endif
76189+
76190 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
76191 const char *name = info->strtab + sym[i].st_name;
76192
76193+#ifdef CONFIG_GRKERNSEC_MODHARDEN
76194+ /* it's a real shame this will never get ripped and copied
76195+ upstream! ;(
76196+ */
76197+ if (is_fs_load && !strcmp(name, "register_filesystem"))
76198+ register_filesystem_found = 1;
76199+#endif
76200+
76201 switch (sym[i].st_shndx) {
76202 case SHN_COMMON:
76203 /* We compiled with -fno-common. These are not
76204@@ -1982,7 +2009,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
76205 ksym = resolve_symbol_wait(mod, info, name);
76206 /* Ok if resolved. */
76207 if (ksym && !IS_ERR(ksym)) {
76208+ pax_open_kernel();
76209 sym[i].st_value = ksym->value;
76210+ pax_close_kernel();
76211 break;
76212 }
76213
76214@@ -2001,11 +2030,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
76215 secbase = (unsigned long)mod_percpu(mod);
76216 else
76217 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
76218+ pax_open_kernel();
76219 sym[i].st_value += secbase;
76220+ pax_close_kernel();
76221 break;
76222 }
76223 }
76224
76225+#ifdef CONFIG_GRKERNSEC_MODHARDEN
76226+ if (is_fs_load && !register_filesystem_found) {
76227+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
76228+ ret = -EPERM;
76229+ }
76230+#endif
76231+
76232 return ret;
76233 }
76234
76235@@ -2089,22 +2127,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
76236 || s->sh_entsize != ~0UL
76237 || strstarts(sname, ".init"))
76238 continue;
76239- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
76240+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
76241+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
76242+ else
76243+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
76244 pr_debug("\t%s\n", sname);
76245 }
76246- switch (m) {
76247- case 0: /* executable */
76248- mod->core_size = debug_align(mod->core_size);
76249- mod->core_text_size = mod->core_size;
76250- break;
76251- case 1: /* RO: text and ro-data */
76252- mod->core_size = debug_align(mod->core_size);
76253- mod->core_ro_size = mod->core_size;
76254- break;
76255- case 3: /* whole core */
76256- mod->core_size = debug_align(mod->core_size);
76257- break;
76258- }
76259 }
76260
76261 pr_debug("Init section allocation order:\n");
76262@@ -2118,23 +2146,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
76263 || s->sh_entsize != ~0UL
76264 || !strstarts(sname, ".init"))
76265 continue;
76266- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
76267- | INIT_OFFSET_MASK);
76268+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
76269+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
76270+ else
76271+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
76272+ s->sh_entsize |= INIT_OFFSET_MASK;
76273 pr_debug("\t%s\n", sname);
76274 }
76275- switch (m) {
76276- case 0: /* executable */
76277- mod->init_size = debug_align(mod->init_size);
76278- mod->init_text_size = mod->init_size;
76279- break;
76280- case 1: /* RO: text and ro-data */
76281- mod->init_size = debug_align(mod->init_size);
76282- mod->init_ro_size = mod->init_size;
76283- break;
76284- case 3: /* whole init */
76285- mod->init_size = debug_align(mod->init_size);
76286- break;
76287- }
76288 }
76289 }
76290
76291@@ -2306,7 +2324,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
76292
76293 /* Put symbol section at end of init part of module. */
76294 symsect->sh_flags |= SHF_ALLOC;
76295- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
76296+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
76297 info->index.sym) | INIT_OFFSET_MASK;
76298 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
76299
76300@@ -2323,13 +2341,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
76301 }
76302
76303 /* Append room for core symbols at end of core part. */
76304- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
76305- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
76306- mod->core_size += strtab_size;
76307+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
76308+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
76309+ mod->core_size_rx += strtab_size;
76310
76311 /* Put string table section at end of init part of module. */
76312 strsect->sh_flags |= SHF_ALLOC;
76313- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
76314+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
76315 info->index.str) | INIT_OFFSET_MASK;
76316 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
76317 }
76318@@ -2347,12 +2365,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
76319 /* Make sure we get permanent strtab: don't use info->strtab. */
76320 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
76321
76322+ pax_open_kernel();
76323+
76324 /* Set types up while we still have access to sections. */
76325 for (i = 0; i < mod->num_symtab; i++)
76326 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
76327
76328- mod->core_symtab = dst = mod->module_core + info->symoffs;
76329- mod->core_strtab = s = mod->module_core + info->stroffs;
76330+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
76331+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
76332 src = mod->symtab;
76333 for (ndst = i = 0; i < mod->num_symtab; i++) {
76334 if (i == 0 ||
76335@@ -2364,6 +2384,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
76336 }
76337 }
76338 mod->core_num_syms = ndst;
76339+
76340+ pax_close_kernel();
76341 }
76342 #else
76343 static inline void layout_symtab(struct module *mod, struct load_info *info)
76344@@ -2397,17 +2419,33 @@ void * __weak module_alloc(unsigned long size)
76345 return vmalloc_exec(size);
76346 }
76347
76348-static void *module_alloc_update_bounds(unsigned long size)
76349+static void *module_alloc_update_bounds_rw(unsigned long size)
76350 {
76351 void *ret = module_alloc(size);
76352
76353 if (ret) {
76354 mutex_lock(&module_mutex);
76355 /* Update module bounds. */
76356- if ((unsigned long)ret < module_addr_min)
76357- module_addr_min = (unsigned long)ret;
76358- if ((unsigned long)ret + size > module_addr_max)
76359- module_addr_max = (unsigned long)ret + size;
76360+ if ((unsigned long)ret < module_addr_min_rw)
76361+ module_addr_min_rw = (unsigned long)ret;
76362+ if ((unsigned long)ret + size > module_addr_max_rw)
76363+ module_addr_max_rw = (unsigned long)ret + size;
76364+ mutex_unlock(&module_mutex);
76365+ }
76366+ return ret;
76367+}
76368+
76369+static void *module_alloc_update_bounds_rx(unsigned long size)
76370+{
76371+ void *ret = module_alloc_exec(size);
76372+
76373+ if (ret) {
76374+ mutex_lock(&module_mutex);
76375+ /* Update module bounds. */
76376+ if ((unsigned long)ret < module_addr_min_rx)
76377+ module_addr_min_rx = (unsigned long)ret;
76378+ if ((unsigned long)ret + size > module_addr_max_rx)
76379+ module_addr_max_rx = (unsigned long)ret + size;
76380 mutex_unlock(&module_mutex);
76381 }
76382 return ret;
76383@@ -2683,8 +2721,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
76384 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
76385 {
76386 const char *modmagic = get_modinfo(info, "vermagic");
76387+ const char *license = get_modinfo(info, "license");
76388 int err;
76389
76390+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
76391+ if (!license || !license_is_gpl_compatible(license))
76392+ return -ENOEXEC;
76393+#endif
76394+
76395 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
76396 modmagic = NULL;
76397
76398@@ -2710,7 +2754,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
76399 }
76400
76401 /* Set up license info based on the info section */
76402- set_license(mod, get_modinfo(info, "license"));
76403+ set_license(mod, license);
76404
76405 return 0;
76406 }
76407@@ -2804,7 +2848,7 @@ static int move_module(struct module *mod, struct load_info *info)
76408 void *ptr;
76409
76410 /* Do the allocs. */
76411- ptr = module_alloc_update_bounds(mod->core_size);
76412+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
76413 /*
76414 * The pointer to this block is stored in the module structure
76415 * which is inside the block. Just mark it as not being a
76416@@ -2814,11 +2858,11 @@ static int move_module(struct module *mod, struct load_info *info)
76417 if (!ptr)
76418 return -ENOMEM;
76419
76420- memset(ptr, 0, mod->core_size);
76421- mod->module_core = ptr;
76422+ memset(ptr, 0, mod->core_size_rw);
76423+ mod->module_core_rw = ptr;
76424
76425- if (mod->init_size) {
76426- ptr = module_alloc_update_bounds(mod->init_size);
76427+ if (mod->init_size_rw) {
76428+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
76429 /*
76430 * The pointer to this block is stored in the module structure
76431 * which is inside the block. This block doesn't need to be
76432@@ -2827,13 +2871,45 @@ static int move_module(struct module *mod, struct load_info *info)
76433 */
76434 kmemleak_ignore(ptr);
76435 if (!ptr) {
76436- module_free(mod, mod->module_core);
76437+ module_free(mod, mod->module_core_rw);
76438 return -ENOMEM;
76439 }
76440- memset(ptr, 0, mod->init_size);
76441- mod->module_init = ptr;
76442+ memset(ptr, 0, mod->init_size_rw);
76443+ mod->module_init_rw = ptr;
76444 } else
76445- mod->module_init = NULL;
76446+ mod->module_init_rw = NULL;
76447+
76448+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
76449+ kmemleak_not_leak(ptr);
76450+ if (!ptr) {
76451+ if (mod->module_init_rw)
76452+ module_free(mod, mod->module_init_rw);
76453+ module_free(mod, mod->module_core_rw);
76454+ return -ENOMEM;
76455+ }
76456+
76457+ pax_open_kernel();
76458+ memset(ptr, 0, mod->core_size_rx);
76459+ pax_close_kernel();
76460+ mod->module_core_rx = ptr;
76461+
76462+ if (mod->init_size_rx) {
76463+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
76464+ kmemleak_ignore(ptr);
76465+ if (!ptr && mod->init_size_rx) {
76466+ module_free_exec(mod, mod->module_core_rx);
76467+ if (mod->module_init_rw)
76468+ module_free(mod, mod->module_init_rw);
76469+ module_free(mod, mod->module_core_rw);
76470+ return -ENOMEM;
76471+ }
76472+
76473+ pax_open_kernel();
76474+ memset(ptr, 0, mod->init_size_rx);
76475+ pax_close_kernel();
76476+ mod->module_init_rx = ptr;
76477+ } else
76478+ mod->module_init_rx = NULL;
76479
76480 /* Transfer each section which specifies SHF_ALLOC */
76481 pr_debug("final section addresses:\n");
76482@@ -2844,16 +2920,45 @@ static int move_module(struct module *mod, struct load_info *info)
76483 if (!(shdr->sh_flags & SHF_ALLOC))
76484 continue;
76485
76486- if (shdr->sh_entsize & INIT_OFFSET_MASK)
76487- dest = mod->module_init
76488- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
76489- else
76490- dest = mod->module_core + shdr->sh_entsize;
76491+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
76492+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
76493+ dest = mod->module_init_rw
76494+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
76495+ else
76496+ dest = mod->module_init_rx
76497+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
76498+ } else {
76499+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
76500+ dest = mod->module_core_rw + shdr->sh_entsize;
76501+ else
76502+ dest = mod->module_core_rx + shdr->sh_entsize;
76503+ }
76504+
76505+ if (shdr->sh_type != SHT_NOBITS) {
76506+
76507+#ifdef CONFIG_PAX_KERNEXEC
76508+#ifdef CONFIG_X86_64
76509+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
76510+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
76511+#endif
76512+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
76513+ pax_open_kernel();
76514+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
76515+ pax_close_kernel();
76516+ } else
76517+#endif
76518
76519- if (shdr->sh_type != SHT_NOBITS)
76520 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
76521+ }
76522 /* Update sh_addr to point to copy in image. */
76523- shdr->sh_addr = (unsigned long)dest;
76524+
76525+#ifdef CONFIG_PAX_KERNEXEC
76526+ if (shdr->sh_flags & SHF_EXECINSTR)
76527+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
76528+ else
76529+#endif
76530+
76531+ shdr->sh_addr = (unsigned long)dest;
76532 pr_debug("\t0x%lx %s\n",
76533 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
76534 }
76535@@ -2908,12 +3013,12 @@ static void flush_module_icache(const struct module *mod)
76536 * Do it before processing of module parameters, so the module
76537 * can provide parameter accessor functions of its own.
76538 */
76539- if (mod->module_init)
76540- flush_icache_range((unsigned long)mod->module_init,
76541- (unsigned long)mod->module_init
76542- + mod->init_size);
76543- flush_icache_range((unsigned long)mod->module_core,
76544- (unsigned long)mod->module_core + mod->core_size);
76545+ if (mod->module_init_rx)
76546+ flush_icache_range((unsigned long)mod->module_init_rx,
76547+ (unsigned long)mod->module_init_rx
76548+ + mod->init_size_rx);
76549+ flush_icache_range((unsigned long)mod->module_core_rx,
76550+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
76551
76552 set_fs(old_fs);
76553 }
76554@@ -2983,8 +3088,10 @@ out:
76555 static void module_deallocate(struct module *mod, struct load_info *info)
76556 {
76557 percpu_modfree(mod);
76558- module_free(mod, mod->module_init);
76559- module_free(mod, mod->module_core);
76560+ module_free_exec(mod, mod->module_init_rx);
76561+ module_free_exec(mod, mod->module_core_rx);
76562+ module_free(mod, mod->module_init_rw);
76563+ module_free(mod, mod->module_core_rw);
76564 }
76565
76566 int __weak module_finalize(const Elf_Ehdr *hdr,
76567@@ -2997,7 +3104,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
76568 static int post_relocation(struct module *mod, const struct load_info *info)
76569 {
76570 /* Sort exception table now relocations are done. */
76571+ pax_open_kernel();
76572 sort_extable(mod->extable, mod->extable + mod->num_exentries);
76573+ pax_close_kernel();
76574
76575 /* Copy relocated percpu area over. */
76576 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
76577@@ -3051,16 +3160,16 @@ static int do_init_module(struct module *mod)
76578 MODULE_STATE_COMING, mod);
76579
76580 /* Set RO and NX regions for core */
76581- set_section_ro_nx(mod->module_core,
76582- mod->core_text_size,
76583- mod->core_ro_size,
76584- mod->core_size);
76585+ set_section_ro_nx(mod->module_core_rx,
76586+ mod->core_size_rx,
76587+ mod->core_size_rx,
76588+ mod->core_size_rx);
76589
76590 /* Set RO and NX regions for init */
76591- set_section_ro_nx(mod->module_init,
76592- mod->init_text_size,
76593- mod->init_ro_size,
76594- mod->init_size);
76595+ set_section_ro_nx(mod->module_init_rx,
76596+ mod->init_size_rx,
76597+ mod->init_size_rx,
76598+ mod->init_size_rx);
76599
76600 do_mod_ctors(mod);
76601 /* Start the module */
76602@@ -3122,11 +3231,12 @@ static int do_init_module(struct module *mod)
76603 mod->strtab = mod->core_strtab;
76604 #endif
76605 unset_module_init_ro_nx(mod);
76606- module_free(mod, mod->module_init);
76607- mod->module_init = NULL;
76608- mod->init_size = 0;
76609- mod->init_ro_size = 0;
76610- mod->init_text_size = 0;
76611+ module_free(mod, mod->module_init_rw);
76612+ module_free_exec(mod, mod->module_init_rx);
76613+ mod->module_init_rw = NULL;
76614+ mod->module_init_rx = NULL;
76615+ mod->init_size_rw = 0;
76616+ mod->init_size_rx = 0;
76617 mutex_unlock(&module_mutex);
76618 wake_up_all(&module_wq);
76619
76620@@ -3209,9 +3319,38 @@ again:
76621 if (err)
76622 goto free_unload;
76623
76624+ /* Now copy in args */
76625+ mod->args = strndup_user(uargs, ~0UL >> 1);
76626+ if (IS_ERR(mod->args)) {
76627+ err = PTR_ERR(mod->args);
76628+ goto free_unload;
76629+ }
76630+
76631 /* Set up MODINFO_ATTR fields */
76632 setup_modinfo(mod, info);
76633
76634+#ifdef CONFIG_GRKERNSEC_MODHARDEN
76635+ {
76636+ char *p, *p2;
76637+
76638+ if (strstr(mod->args, "grsec_modharden_netdev")) {
76639+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
76640+ err = -EPERM;
76641+ goto free_modinfo;
76642+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
76643+ p += sizeof("grsec_modharden_normal") - 1;
76644+ p2 = strstr(p, "_");
76645+ if (p2) {
76646+ *p2 = '\0';
76647+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
76648+ *p2 = '_';
76649+ }
76650+ err = -EPERM;
76651+ goto free_modinfo;
76652+ }
76653+ }
76654+#endif
76655+
76656 /* Fix up syms, so that st_value is a pointer to location. */
76657 err = simplify_symbols(mod, info);
76658 if (err < 0)
76659@@ -3227,13 +3366,6 @@ again:
76660
76661 flush_module_icache(mod);
76662
76663- /* Now copy in args */
76664- mod->args = strndup_user(uargs, ~0UL >> 1);
76665- if (IS_ERR(mod->args)) {
76666- err = PTR_ERR(mod->args);
76667- goto free_arch_cleanup;
76668- }
76669-
76670 dynamic_debug_setup(info->debug, info->num_debug);
76671
76672 mutex_lock(&module_mutex);
76673@@ -3278,11 +3410,10 @@ again:
76674 mutex_unlock(&module_mutex);
76675 dynamic_debug_remove(info->debug);
76676 synchronize_sched();
76677- kfree(mod->args);
76678- free_arch_cleanup:
76679 module_arch_cleanup(mod);
76680 free_modinfo:
76681 free_modinfo(mod);
76682+ kfree(mod->args);
76683 free_unload:
76684 module_unload_free(mod);
76685 unlink_mod:
76686@@ -3365,10 +3496,16 @@ static const char *get_ksymbol(struct module *mod,
76687 unsigned long nextval;
76688
76689 /* At worse, next value is at end of module */
76690- if (within_module_init(addr, mod))
76691- nextval = (unsigned long)mod->module_init+mod->init_text_size;
76692+ if (within_module_init_rx(addr, mod))
76693+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
76694+ else if (within_module_init_rw(addr, mod))
76695+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
76696+ else if (within_module_core_rx(addr, mod))
76697+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
76698+ else if (within_module_core_rw(addr, mod))
76699+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
76700 else
76701- nextval = (unsigned long)mod->module_core+mod->core_text_size;
76702+ return NULL;
76703
76704 /* Scan for closest preceding symbol, and next symbol. (ELF
76705 starts real symbols at 1). */
76706@@ -3621,7 +3758,7 @@ static int m_show(struct seq_file *m, void *p)
76707 return 0;
76708
76709 seq_printf(m, "%s %u",
76710- mod->name, mod->init_size + mod->core_size);
76711+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
76712 print_unload_info(m, mod);
76713
76714 /* Informative for users. */
76715@@ -3630,7 +3767,7 @@ static int m_show(struct seq_file *m, void *p)
76716 mod->state == MODULE_STATE_COMING ? "Loading":
76717 "Live");
76718 /* Used by oprofile and other similar tools. */
76719- seq_printf(m, " 0x%pK", mod->module_core);
76720+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
76721
76722 /* Taints info */
76723 if (mod->taints)
76724@@ -3666,7 +3803,17 @@ static const struct file_operations proc_modules_operations = {
76725
76726 static int __init proc_modules_init(void)
76727 {
76728+#ifndef CONFIG_GRKERNSEC_HIDESYM
76729+#ifdef CONFIG_GRKERNSEC_PROC_USER
76730+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
76731+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
76732+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
76733+#else
76734 proc_create("modules", 0, NULL, &proc_modules_operations);
76735+#endif
76736+#else
76737+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
76738+#endif
76739 return 0;
76740 }
76741 module_init(proc_modules_init);
76742@@ -3727,14 +3874,14 @@ struct module *__module_address(unsigned long addr)
76743 {
76744 struct module *mod;
76745
76746- if (addr < module_addr_min || addr > module_addr_max)
76747+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
76748+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
76749 return NULL;
76750
76751 list_for_each_entry_rcu(mod, &modules, list) {
76752 if (mod->state == MODULE_STATE_UNFORMED)
76753 continue;
76754- if (within_module_core(addr, mod)
76755- || within_module_init(addr, mod))
76756+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
76757 return mod;
76758 }
76759 return NULL;
76760@@ -3769,11 +3916,20 @@ bool is_module_text_address(unsigned long addr)
76761 */
76762 struct module *__module_text_address(unsigned long addr)
76763 {
76764- struct module *mod = __module_address(addr);
76765+ struct module *mod;
76766+
76767+#ifdef CONFIG_X86_32
76768+ addr = ktla_ktva(addr);
76769+#endif
76770+
76771+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
76772+ return NULL;
76773+
76774+ mod = __module_address(addr);
76775+
76776 if (mod) {
76777 /* Make sure it's within the text section. */
76778- if (!within(addr, mod->module_init, mod->init_text_size)
76779- && !within(addr, mod->module_core, mod->core_text_size))
76780+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
76781 mod = NULL;
76782 }
76783 return mod;
76784diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
76785index 7e3443f..b2a1e6b 100644
76786--- a/kernel/mutex-debug.c
76787+++ b/kernel/mutex-debug.c
76788@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
76789 }
76790
76791 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
76792- struct thread_info *ti)
76793+ struct task_struct *task)
76794 {
76795 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
76796
76797 /* Mark the current thread as blocked on the lock: */
76798- ti->task->blocked_on = waiter;
76799+ task->blocked_on = waiter;
76800 }
76801
76802 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
76803- struct thread_info *ti)
76804+ struct task_struct *task)
76805 {
76806 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
76807- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
76808- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
76809- ti->task->blocked_on = NULL;
76810+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
76811+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
76812+ task->blocked_on = NULL;
76813
76814 list_del_init(&waiter->list);
76815 waiter->task = NULL;
76816diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
76817index 0799fd3..d06ae3b 100644
76818--- a/kernel/mutex-debug.h
76819+++ b/kernel/mutex-debug.h
76820@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
76821 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
76822 extern void debug_mutex_add_waiter(struct mutex *lock,
76823 struct mutex_waiter *waiter,
76824- struct thread_info *ti);
76825+ struct task_struct *task);
76826 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
76827- struct thread_info *ti);
76828+ struct task_struct *task);
76829 extern void debug_mutex_unlock(struct mutex *lock);
76830 extern void debug_mutex_init(struct mutex *lock, const char *name,
76831 struct lock_class_key *key);
76832diff --git a/kernel/mutex.c b/kernel/mutex.c
76833index a307cc9..27fd2e9 100644
76834--- a/kernel/mutex.c
76835+++ b/kernel/mutex.c
76836@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
76837 spin_lock_mutex(&lock->wait_lock, flags);
76838
76839 debug_mutex_lock_common(lock, &waiter);
76840- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
76841+ debug_mutex_add_waiter(lock, &waiter, task);
76842
76843 /* add waiting tasks to the end of the waitqueue (FIFO): */
76844 list_add_tail(&waiter.list, &lock->wait_list);
76845@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
76846 * TASK_UNINTERRUPTIBLE case.)
76847 */
76848 if (unlikely(signal_pending_state(state, task))) {
76849- mutex_remove_waiter(lock, &waiter,
76850- task_thread_info(task));
76851+ mutex_remove_waiter(lock, &waiter, task);
76852 mutex_release(&lock->dep_map, 1, ip);
76853 spin_unlock_mutex(&lock->wait_lock, flags);
76854
76855@@ -247,7 +246,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
76856 done:
76857 lock_acquired(&lock->dep_map, ip);
76858 /* got the lock - rejoice! */
76859- mutex_remove_waiter(lock, &waiter, current_thread_info());
76860+ mutex_remove_waiter(lock, &waiter, task);
76861 mutex_set_owner(lock);
76862
76863 /* set it to 0 if there are no waiters left: */
76864diff --git a/kernel/notifier.c b/kernel/notifier.c
76865index 2d5cc4c..d9ea600 100644
76866--- a/kernel/notifier.c
76867+++ b/kernel/notifier.c
76868@@ -5,6 +5,7 @@
76869 #include <linux/rcupdate.h>
76870 #include <linux/vmalloc.h>
76871 #include <linux/reboot.h>
76872+#include <linux/mm.h>
76873
76874 /*
76875 * Notifier list for kernel code which wants to be called
76876@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
76877 while ((*nl) != NULL) {
76878 if (n->priority > (*nl)->priority)
76879 break;
76880- nl = &((*nl)->next);
76881+ nl = (struct notifier_block **)&((*nl)->next);
76882 }
76883- n->next = *nl;
76884+ pax_open_kernel();
76885+ *(const void **)&n->next = *nl;
76886 rcu_assign_pointer(*nl, n);
76887+ pax_close_kernel();
76888 return 0;
76889 }
76890
76891@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
76892 return 0;
76893 if (n->priority > (*nl)->priority)
76894 break;
76895- nl = &((*nl)->next);
76896+ nl = (struct notifier_block **)&((*nl)->next);
76897 }
76898- n->next = *nl;
76899+ pax_open_kernel();
76900+ *(const void **)&n->next = *nl;
76901 rcu_assign_pointer(*nl, n);
76902+ pax_close_kernel();
76903 return 0;
76904 }
76905
76906@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
76907 {
76908 while ((*nl) != NULL) {
76909 if ((*nl) == n) {
76910+ pax_open_kernel();
76911 rcu_assign_pointer(*nl, n->next);
76912+ pax_close_kernel();
76913 return 0;
76914 }
76915- nl = &((*nl)->next);
76916+ nl = (struct notifier_block **)&((*nl)->next);
76917 }
76918 return -ENOENT;
76919 }
76920diff --git a/kernel/panic.c b/kernel/panic.c
76921index e1b2822..5edc1d9 100644
76922--- a/kernel/panic.c
76923+++ b/kernel/panic.c
76924@@ -410,7 +410,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
76925 const char *board;
76926
76927 printk(KERN_WARNING "------------[ cut here ]------------\n");
76928- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
76929+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
76930 board = dmi_get_system_info(DMI_PRODUCT_NAME);
76931 if (board)
76932 printk(KERN_WARNING "Hardware name: %s\n", board);
76933@@ -465,7 +465,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
76934 */
76935 void __stack_chk_fail(void)
76936 {
76937- panic("stack-protector: Kernel stack is corrupted in: %p\n",
76938+ dump_stack();
76939+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
76940 __builtin_return_address(0));
76941 }
76942 EXPORT_SYMBOL(__stack_chk_fail);
76943diff --git a/kernel/pid.c b/kernel/pid.c
76944index f2c6a68..4922d97 100644
76945--- a/kernel/pid.c
76946+++ b/kernel/pid.c
76947@@ -33,6 +33,7 @@
76948 #include <linux/rculist.h>
76949 #include <linux/bootmem.h>
76950 #include <linux/hash.h>
76951+#include <linux/security.h>
76952 #include <linux/pid_namespace.h>
76953 #include <linux/init_task.h>
76954 #include <linux/syscalls.h>
76955@@ -46,7 +47,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
76956
76957 int pid_max = PID_MAX_DEFAULT;
76958
76959-#define RESERVED_PIDS 300
76960+#define RESERVED_PIDS 500
76961
76962 int pid_max_min = RESERVED_PIDS + 1;
76963 int pid_max_max = PID_MAX_LIMIT;
76964@@ -441,10 +442,18 @@ EXPORT_SYMBOL(pid_task);
76965 */
76966 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
76967 {
76968+ struct task_struct *task;
76969+
76970 rcu_lockdep_assert(rcu_read_lock_held(),
76971 "find_task_by_pid_ns() needs rcu_read_lock()"
76972 " protection");
76973- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
76974+
76975+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
76976+
76977+ if (gr_pid_is_chrooted(task))
76978+ return NULL;
76979+
76980+ return task;
76981 }
76982
76983 struct task_struct *find_task_by_vpid(pid_t vnr)
76984@@ -452,6 +461,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
76985 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
76986 }
76987
76988+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
76989+{
76990+ rcu_lockdep_assert(rcu_read_lock_held(),
76991+ "find_task_by_pid_ns() needs rcu_read_lock()"
76992+ " protection");
76993+ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
76994+}
76995+
76996 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
76997 {
76998 struct pid *pid;
76999diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
77000index bea15bd..789f3d0 100644
77001--- a/kernel/pid_namespace.c
77002+++ b/kernel/pid_namespace.c
77003@@ -249,7 +249,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
77004 void __user *buffer, size_t *lenp, loff_t *ppos)
77005 {
77006 struct pid_namespace *pid_ns = task_active_pid_ns(current);
77007- struct ctl_table tmp = *table;
77008+ ctl_table_no_const tmp = *table;
77009
77010 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
77011 return -EPERM;
77012diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
77013index 942ca27..111e609 100644
77014--- a/kernel/posix-cpu-timers.c
77015+++ b/kernel/posix-cpu-timers.c
77016@@ -1576,14 +1576,14 @@ struct k_clock clock_posix_cpu = {
77017
77018 static __init int init_posix_cpu_timers(void)
77019 {
77020- struct k_clock process = {
77021+ static struct k_clock process = {
77022 .clock_getres = process_cpu_clock_getres,
77023 .clock_get = process_cpu_clock_get,
77024 .timer_create = process_cpu_timer_create,
77025 .nsleep = process_cpu_nsleep,
77026 .nsleep_restart = process_cpu_nsleep_restart,
77027 };
77028- struct k_clock thread = {
77029+ static struct k_clock thread = {
77030 .clock_getres = thread_cpu_clock_getres,
77031 .clock_get = thread_cpu_clock_get,
77032 .timer_create = thread_cpu_timer_create,
77033diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
77034index e885be1..380fe76 100644
77035--- a/kernel/posix-timers.c
77036+++ b/kernel/posix-timers.c
77037@@ -43,6 +43,7 @@
77038 #include <linux/idr.h>
77039 #include <linux/posix-clock.h>
77040 #include <linux/posix-timers.h>
77041+#include <linux/grsecurity.h>
77042 #include <linux/syscalls.h>
77043 #include <linux/wait.h>
77044 #include <linux/workqueue.h>
77045@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
77046 * which we beg off on and pass to do_sys_settimeofday().
77047 */
77048
77049-static struct k_clock posix_clocks[MAX_CLOCKS];
77050+static struct k_clock *posix_clocks[MAX_CLOCKS];
77051
77052 /*
77053 * These ones are defined below.
77054@@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
77055 */
77056 static __init int init_posix_timers(void)
77057 {
77058- struct k_clock clock_realtime = {
77059+ static struct k_clock clock_realtime = {
77060 .clock_getres = hrtimer_get_res,
77061 .clock_get = posix_clock_realtime_get,
77062 .clock_set = posix_clock_realtime_set,
77063@@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
77064 .timer_get = common_timer_get,
77065 .timer_del = common_timer_del,
77066 };
77067- struct k_clock clock_monotonic = {
77068+ static struct k_clock clock_monotonic = {
77069 .clock_getres = hrtimer_get_res,
77070 .clock_get = posix_ktime_get_ts,
77071 .nsleep = common_nsleep,
77072@@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
77073 .timer_get = common_timer_get,
77074 .timer_del = common_timer_del,
77075 };
77076- struct k_clock clock_monotonic_raw = {
77077+ static struct k_clock clock_monotonic_raw = {
77078 .clock_getres = hrtimer_get_res,
77079 .clock_get = posix_get_monotonic_raw,
77080 };
77081- struct k_clock clock_realtime_coarse = {
77082+ static struct k_clock clock_realtime_coarse = {
77083 .clock_getres = posix_get_coarse_res,
77084 .clock_get = posix_get_realtime_coarse,
77085 };
77086- struct k_clock clock_monotonic_coarse = {
77087+ static struct k_clock clock_monotonic_coarse = {
77088 .clock_getres = posix_get_coarse_res,
77089 .clock_get = posix_get_monotonic_coarse,
77090 };
77091- struct k_clock clock_boottime = {
77092+ static struct k_clock clock_boottime = {
77093 .clock_getres = hrtimer_get_res,
77094 .clock_get = posix_get_boottime,
77095 .nsleep = common_nsleep,
77096@@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
77097 return;
77098 }
77099
77100- posix_clocks[clock_id] = *new_clock;
77101+ posix_clocks[clock_id] = new_clock;
77102 }
77103 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
77104
77105@@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
77106 return (id & CLOCKFD_MASK) == CLOCKFD ?
77107 &clock_posix_dynamic : &clock_posix_cpu;
77108
77109- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
77110+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
77111 return NULL;
77112- return &posix_clocks[id];
77113+ return posix_clocks[id];
77114 }
77115
77116 static int common_timer_create(struct k_itimer *new_timer)
77117@@ -966,6 +967,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
77118 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
77119 return -EFAULT;
77120
77121+ /* only the CLOCK_REALTIME clock can be set, all other clocks
77122+ have their clock_set fptr set to a nosettime dummy function
77123+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
77124+ call common_clock_set, which calls do_sys_settimeofday, which
77125+ we hook
77126+ */
77127+
77128 return kc->clock_set(which_clock, &new_tp);
77129 }
77130
77131diff --git a/kernel/power/process.c b/kernel/power/process.c
77132index d5a258b..4271191 100644
77133--- a/kernel/power/process.c
77134+++ b/kernel/power/process.c
77135@@ -33,6 +33,7 @@ static int try_to_freeze_tasks(bool user_only)
77136 u64 elapsed_csecs64;
77137 unsigned int elapsed_csecs;
77138 bool wakeup = false;
77139+ bool timedout = false;
77140
77141 do_gettimeofday(&start);
77142
77143@@ -43,13 +44,20 @@ static int try_to_freeze_tasks(bool user_only)
77144
77145 while (true) {
77146 todo = 0;
77147+ if (time_after(jiffies, end_time))
77148+ timedout = true;
77149 read_lock(&tasklist_lock);
77150 do_each_thread(g, p) {
77151 if (p == current || !freeze_task(p))
77152 continue;
77153
77154- if (!freezer_should_skip(p))
77155+ if (!freezer_should_skip(p)) {
77156 todo++;
77157+ if (timedout) {
77158+ printk(KERN_ERR "Task refusing to freeze:\n");
77159+ sched_show_task(p);
77160+ }
77161+ }
77162 } while_each_thread(g, p);
77163 read_unlock(&tasklist_lock);
77164
77165@@ -58,7 +66,7 @@ static int try_to_freeze_tasks(bool user_only)
77166 todo += wq_busy;
77167 }
77168
77169- if (!todo || time_after(jiffies, end_time))
77170+ if (!todo || timedout)
77171 break;
77172
77173 if (pm_wakeup_pending()) {
77174diff --git a/kernel/printk.c b/kernel/printk.c
77175index 267ce78..2487112 100644
77176--- a/kernel/printk.c
77177+++ b/kernel/printk.c
77178@@ -609,11 +609,17 @@ static unsigned int devkmsg_poll(struct file *file, poll_table *wait)
77179 return ret;
77180 }
77181
77182+static int check_syslog_permissions(int type, bool from_file);
77183+
77184 static int devkmsg_open(struct inode *inode, struct file *file)
77185 {
77186 struct devkmsg_user *user;
77187 int err;
77188
77189+ err = check_syslog_permissions(SYSLOG_ACTION_OPEN, SYSLOG_FROM_FILE);
77190+ if (err)
77191+ return err;
77192+
77193 /* write-only does not need any file context */
77194 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
77195 return 0;
77196@@ -822,7 +828,7 @@ static int syslog_action_restricted(int type)
77197 if (dmesg_restrict)
77198 return 1;
77199 /* Unless restricted, we allow "read all" and "get buffer size" for everybody */
77200- return type != SYSLOG_ACTION_READ_ALL && type != SYSLOG_ACTION_SIZE_BUFFER;
77201+ return type != SYSLOG_ACTION_OPEN && type != SYSLOG_ACTION_READ_ALL && type != SYSLOG_ACTION_SIZE_BUFFER;
77202 }
77203
77204 static int check_syslog_permissions(int type, bool from_file)
77205@@ -834,6 +840,11 @@ static int check_syslog_permissions(int type, bool from_file)
77206 if (from_file && type != SYSLOG_ACTION_OPEN)
77207 return 0;
77208
77209+#ifdef CONFIG_GRKERNSEC_DMESG
77210+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
77211+ return -EPERM;
77212+#endif
77213+
77214 if (syslog_action_restricted(type)) {
77215 if (capable(CAP_SYSLOG))
77216 return 0;
77217diff --git a/kernel/profile.c b/kernel/profile.c
77218index 1f39181..86093471 100644
77219--- a/kernel/profile.c
77220+++ b/kernel/profile.c
77221@@ -40,7 +40,7 @@ struct profile_hit {
77222 /* Oprofile timer tick hook */
77223 static int (*timer_hook)(struct pt_regs *) __read_mostly;
77224
77225-static atomic_t *prof_buffer;
77226+static atomic_unchecked_t *prof_buffer;
77227 static unsigned long prof_len, prof_shift;
77228
77229 int prof_on __read_mostly;
77230@@ -282,7 +282,7 @@ static void profile_flip_buffers(void)
77231 hits[i].pc = 0;
77232 continue;
77233 }
77234- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
77235+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
77236 hits[i].hits = hits[i].pc = 0;
77237 }
77238 }
77239@@ -343,9 +343,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
77240 * Add the current hit(s) and flush the write-queue out
77241 * to the global buffer:
77242 */
77243- atomic_add(nr_hits, &prof_buffer[pc]);
77244+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
77245 for (i = 0; i < NR_PROFILE_HIT; ++i) {
77246- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
77247+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
77248 hits[i].pc = hits[i].hits = 0;
77249 }
77250 out:
77251@@ -420,7 +420,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
77252 {
77253 unsigned long pc;
77254 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
77255- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
77256+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
77257 }
77258 #endif /* !CONFIG_SMP */
77259
77260@@ -518,7 +518,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
77261 return -EFAULT;
77262 buf++; p++; count--; read++;
77263 }
77264- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
77265+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
77266 if (copy_to_user(buf, (void *)pnt, count))
77267 return -EFAULT;
77268 read += count;
77269@@ -549,7 +549,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
77270 }
77271 #endif
77272 profile_discard_flip_buffers();
77273- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
77274+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
77275 return count;
77276 }
77277
77278diff --git a/kernel/ptrace.c b/kernel/ptrace.c
77279index 6cbeaae..cfe7ff0 100644
77280--- a/kernel/ptrace.c
77281+++ b/kernel/ptrace.c
77282@@ -324,7 +324,7 @@ static int ptrace_attach(struct task_struct *task, long request,
77283 if (seize)
77284 flags |= PT_SEIZED;
77285 rcu_read_lock();
77286- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
77287+ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
77288 flags |= PT_PTRACE_CAP;
77289 rcu_read_unlock();
77290 task->ptrace = flags;
77291@@ -535,7 +535,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
77292 break;
77293 return -EIO;
77294 }
77295- if (copy_to_user(dst, buf, retval))
77296+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
77297 return -EFAULT;
77298 copied += retval;
77299 src += retval;
77300@@ -720,7 +720,7 @@ int ptrace_request(struct task_struct *child, long request,
77301 bool seized = child->ptrace & PT_SEIZED;
77302 int ret = -EIO;
77303 siginfo_t siginfo, *si;
77304- void __user *datavp = (void __user *) data;
77305+ void __user *datavp = (__force void __user *) data;
77306 unsigned long __user *datalp = datavp;
77307 unsigned long flags;
77308
77309@@ -922,14 +922,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
77310 goto out;
77311 }
77312
77313+ if (gr_handle_ptrace(child, request)) {
77314+ ret = -EPERM;
77315+ goto out_put_task_struct;
77316+ }
77317+
77318 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
77319 ret = ptrace_attach(child, request, addr, data);
77320 /*
77321 * Some architectures need to do book-keeping after
77322 * a ptrace attach.
77323 */
77324- if (!ret)
77325+ if (!ret) {
77326 arch_ptrace_attach(child);
77327+ gr_audit_ptrace(child);
77328+ }
77329 goto out_put_task_struct;
77330 }
77331
77332@@ -957,7 +964,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
77333 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
77334 if (copied != sizeof(tmp))
77335 return -EIO;
77336- return put_user(tmp, (unsigned long __user *)data);
77337+ return put_user(tmp, (__force unsigned long __user *)data);
77338 }
77339
77340 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
77341@@ -1051,7 +1058,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
77342 }
77343
77344 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
77345- compat_long_t addr, compat_long_t data)
77346+ compat_ulong_t addr, compat_ulong_t data)
77347 {
77348 struct task_struct *child;
77349 long ret;
77350@@ -1067,14 +1074,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
77351 goto out;
77352 }
77353
77354+ if (gr_handle_ptrace(child, request)) {
77355+ ret = -EPERM;
77356+ goto out_put_task_struct;
77357+ }
77358+
77359 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
77360 ret = ptrace_attach(child, request, addr, data);
77361 /*
77362 * Some architectures need to do book-keeping after
77363 * a ptrace attach.
77364 */
77365- if (!ret)
77366+ if (!ret) {
77367 arch_ptrace_attach(child);
77368+ gr_audit_ptrace(child);
77369+ }
77370 goto out_put_task_struct;
77371 }
77372
77373diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
77374index e7dce58..ad0d7b7 100644
77375--- a/kernel/rcutiny.c
77376+++ b/kernel/rcutiny.c
77377@@ -46,7 +46,7 @@
77378 struct rcu_ctrlblk;
77379 static void invoke_rcu_callbacks(void);
77380 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
77381-static void rcu_process_callbacks(struct softirq_action *unused);
77382+static void rcu_process_callbacks(void);
77383 static void __call_rcu(struct rcu_head *head,
77384 void (*func)(struct rcu_head *rcu),
77385 struct rcu_ctrlblk *rcp);
77386@@ -310,7 +310,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
77387 rcu_is_callbacks_kthread()));
77388 }
77389
77390-static void rcu_process_callbacks(struct softirq_action *unused)
77391+static void rcu_process_callbacks(void)
77392 {
77393 __rcu_process_callbacks(&rcu_sched_ctrlblk);
77394 __rcu_process_callbacks(&rcu_bh_ctrlblk);
77395diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
77396index f85016a..91cb03b 100644
77397--- a/kernel/rcutiny_plugin.h
77398+++ b/kernel/rcutiny_plugin.h
77399@@ -896,7 +896,7 @@ static int rcu_kthread(void *arg)
77400 have_rcu_kthread_work = morework;
77401 local_irq_restore(flags);
77402 if (work)
77403- rcu_process_callbacks(NULL);
77404+ rcu_process_callbacks();
77405 schedule_timeout_interruptible(1); /* Leave CPU for others. */
77406 }
77407
77408diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
77409index 31dea01..ad91ffb 100644
77410--- a/kernel/rcutorture.c
77411+++ b/kernel/rcutorture.c
77412@@ -163,12 +163,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
77413 { 0 };
77414 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
77415 { 0 };
77416-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
77417-static atomic_t n_rcu_torture_alloc;
77418-static atomic_t n_rcu_torture_alloc_fail;
77419-static atomic_t n_rcu_torture_free;
77420-static atomic_t n_rcu_torture_mberror;
77421-static atomic_t n_rcu_torture_error;
77422+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
77423+static atomic_unchecked_t n_rcu_torture_alloc;
77424+static atomic_unchecked_t n_rcu_torture_alloc_fail;
77425+static atomic_unchecked_t n_rcu_torture_free;
77426+static atomic_unchecked_t n_rcu_torture_mberror;
77427+static atomic_unchecked_t n_rcu_torture_error;
77428 static long n_rcu_torture_barrier_error;
77429 static long n_rcu_torture_boost_ktrerror;
77430 static long n_rcu_torture_boost_rterror;
77431@@ -272,11 +272,11 @@ rcu_torture_alloc(void)
77432
77433 spin_lock_bh(&rcu_torture_lock);
77434 if (list_empty(&rcu_torture_freelist)) {
77435- atomic_inc(&n_rcu_torture_alloc_fail);
77436+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
77437 spin_unlock_bh(&rcu_torture_lock);
77438 return NULL;
77439 }
77440- atomic_inc(&n_rcu_torture_alloc);
77441+ atomic_inc_unchecked(&n_rcu_torture_alloc);
77442 p = rcu_torture_freelist.next;
77443 list_del_init(p);
77444 spin_unlock_bh(&rcu_torture_lock);
77445@@ -289,7 +289,7 @@ rcu_torture_alloc(void)
77446 static void
77447 rcu_torture_free(struct rcu_torture *p)
77448 {
77449- atomic_inc(&n_rcu_torture_free);
77450+ atomic_inc_unchecked(&n_rcu_torture_free);
77451 spin_lock_bh(&rcu_torture_lock);
77452 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
77453 spin_unlock_bh(&rcu_torture_lock);
77454@@ -409,7 +409,7 @@ rcu_torture_cb(struct rcu_head *p)
77455 i = rp->rtort_pipe_count;
77456 if (i > RCU_TORTURE_PIPE_LEN)
77457 i = RCU_TORTURE_PIPE_LEN;
77458- atomic_inc(&rcu_torture_wcount[i]);
77459+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
77460 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
77461 rp->rtort_mbtest = 0;
77462 rcu_torture_free(rp);
77463@@ -457,7 +457,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
77464 i = rp->rtort_pipe_count;
77465 if (i > RCU_TORTURE_PIPE_LEN)
77466 i = RCU_TORTURE_PIPE_LEN;
77467- atomic_inc(&rcu_torture_wcount[i]);
77468+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
77469 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
77470 rp->rtort_mbtest = 0;
77471 list_del(&rp->rtort_free);
77472@@ -975,7 +975,7 @@ rcu_torture_writer(void *arg)
77473 i = old_rp->rtort_pipe_count;
77474 if (i > RCU_TORTURE_PIPE_LEN)
77475 i = RCU_TORTURE_PIPE_LEN;
77476- atomic_inc(&rcu_torture_wcount[i]);
77477+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
77478 old_rp->rtort_pipe_count++;
77479 cur_ops->deferred_free(old_rp);
77480 }
77481@@ -1060,7 +1060,7 @@ static void rcu_torture_timer(unsigned long unused)
77482 }
77483 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
77484 if (p->rtort_mbtest == 0)
77485- atomic_inc(&n_rcu_torture_mberror);
77486+ atomic_inc_unchecked(&n_rcu_torture_mberror);
77487 spin_lock(&rand_lock);
77488 cur_ops->read_delay(&rand);
77489 n_rcu_torture_timers++;
77490@@ -1124,7 +1124,7 @@ rcu_torture_reader(void *arg)
77491 }
77492 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
77493 if (p->rtort_mbtest == 0)
77494- atomic_inc(&n_rcu_torture_mberror);
77495+ atomic_inc_unchecked(&n_rcu_torture_mberror);
77496 cur_ops->read_delay(&rand);
77497 preempt_disable();
77498 pipe_count = p->rtort_pipe_count;
77499@@ -1183,11 +1183,11 @@ rcu_torture_printk(char *page)
77500 rcu_torture_current,
77501 rcu_torture_current_version,
77502 list_empty(&rcu_torture_freelist),
77503- atomic_read(&n_rcu_torture_alloc),
77504- atomic_read(&n_rcu_torture_alloc_fail),
77505- atomic_read(&n_rcu_torture_free));
77506+ atomic_read_unchecked(&n_rcu_torture_alloc),
77507+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
77508+ atomic_read_unchecked(&n_rcu_torture_free));
77509 cnt += sprintf(&page[cnt], "rtmbe: %d rtbke: %ld rtbre: %ld ",
77510- atomic_read(&n_rcu_torture_mberror),
77511+ atomic_read_unchecked(&n_rcu_torture_mberror),
77512 n_rcu_torture_boost_ktrerror,
77513 n_rcu_torture_boost_rterror);
77514 cnt += sprintf(&page[cnt], "rtbf: %ld rtb: %ld nt: %ld ",
77515@@ -1206,14 +1206,14 @@ rcu_torture_printk(char *page)
77516 n_barrier_attempts,
77517 n_rcu_torture_barrier_error);
77518 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
77519- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
77520+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
77521 n_rcu_torture_barrier_error != 0 ||
77522 n_rcu_torture_boost_ktrerror != 0 ||
77523 n_rcu_torture_boost_rterror != 0 ||
77524 n_rcu_torture_boost_failure != 0 ||
77525 i > 1) {
77526 cnt += sprintf(&page[cnt], "!!! ");
77527- atomic_inc(&n_rcu_torture_error);
77528+ atomic_inc_unchecked(&n_rcu_torture_error);
77529 WARN_ON_ONCE(1);
77530 }
77531 cnt += sprintf(&page[cnt], "Reader Pipe: ");
77532@@ -1227,7 +1227,7 @@ rcu_torture_printk(char *page)
77533 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
77534 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
77535 cnt += sprintf(&page[cnt], " %d",
77536- atomic_read(&rcu_torture_wcount[i]));
77537+ atomic_read_unchecked(&rcu_torture_wcount[i]));
77538 }
77539 cnt += sprintf(&page[cnt], "\n");
77540 if (cur_ops->stats)
77541@@ -1920,7 +1920,7 @@ rcu_torture_cleanup(void)
77542
77543 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
77544
77545- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
77546+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
77547 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
77548 else if (n_online_successes != n_online_attempts ||
77549 n_offline_successes != n_offline_attempts)
77550@@ -1989,18 +1989,18 @@ rcu_torture_init(void)
77551
77552 rcu_torture_current = NULL;
77553 rcu_torture_current_version = 0;
77554- atomic_set(&n_rcu_torture_alloc, 0);
77555- atomic_set(&n_rcu_torture_alloc_fail, 0);
77556- atomic_set(&n_rcu_torture_free, 0);
77557- atomic_set(&n_rcu_torture_mberror, 0);
77558- atomic_set(&n_rcu_torture_error, 0);
77559+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
77560+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
77561+ atomic_set_unchecked(&n_rcu_torture_free, 0);
77562+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
77563+ atomic_set_unchecked(&n_rcu_torture_error, 0);
77564 n_rcu_torture_barrier_error = 0;
77565 n_rcu_torture_boost_ktrerror = 0;
77566 n_rcu_torture_boost_rterror = 0;
77567 n_rcu_torture_boost_failure = 0;
77568 n_rcu_torture_boosts = 0;
77569 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
77570- atomic_set(&rcu_torture_wcount[i], 0);
77571+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
77572 for_each_possible_cpu(cpu) {
77573 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
77574 per_cpu(rcu_torture_count, cpu)[i] = 0;
77575diff --git a/kernel/rcutree.c b/kernel/rcutree.c
77576index e441b77..dd54f17 100644
77577--- a/kernel/rcutree.c
77578+++ b/kernel/rcutree.c
77579@@ -349,9 +349,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
77580 rcu_prepare_for_idle(smp_processor_id());
77581 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
77582 smp_mb__before_atomic_inc(); /* See above. */
77583- atomic_inc(&rdtp->dynticks);
77584+ atomic_inc_unchecked(&rdtp->dynticks);
77585 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
77586- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
77587+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
77588
77589 /*
77590 * It is illegal to enter an extended quiescent state while
77591@@ -487,10 +487,10 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
77592 int user)
77593 {
77594 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
77595- atomic_inc(&rdtp->dynticks);
77596+ atomic_inc_unchecked(&rdtp->dynticks);
77597 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
77598 smp_mb__after_atomic_inc(); /* See above. */
77599- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
77600+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
77601 rcu_cleanup_after_idle(smp_processor_id());
77602 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
77603 if (!user && !is_idle_task(current)) {
77604@@ -629,14 +629,14 @@ void rcu_nmi_enter(void)
77605 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
77606
77607 if (rdtp->dynticks_nmi_nesting == 0 &&
77608- (atomic_read(&rdtp->dynticks) & 0x1))
77609+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
77610 return;
77611 rdtp->dynticks_nmi_nesting++;
77612 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
77613- atomic_inc(&rdtp->dynticks);
77614+ atomic_inc_unchecked(&rdtp->dynticks);
77615 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
77616 smp_mb__after_atomic_inc(); /* See above. */
77617- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
77618+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
77619 }
77620
77621 /**
77622@@ -655,9 +655,9 @@ void rcu_nmi_exit(void)
77623 return;
77624 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
77625 smp_mb__before_atomic_inc(); /* See above. */
77626- atomic_inc(&rdtp->dynticks);
77627+ atomic_inc_unchecked(&rdtp->dynticks);
77628 smp_mb__after_atomic_inc(); /* Force delay to next write. */
77629- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
77630+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
77631 }
77632
77633 /**
77634@@ -671,7 +671,7 @@ int rcu_is_cpu_idle(void)
77635 int ret;
77636
77637 preempt_disable();
77638- ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
77639+ ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
77640 preempt_enable();
77641 return ret;
77642 }
77643@@ -739,7 +739,7 @@ int rcu_is_cpu_rrupt_from_idle(void)
77644 */
77645 static int dyntick_save_progress_counter(struct rcu_data *rdp)
77646 {
77647- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
77648+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
77649 return (rdp->dynticks_snap & 0x1) == 0;
77650 }
77651
77652@@ -754,7 +754,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
77653 unsigned int curr;
77654 unsigned int snap;
77655
77656- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
77657+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
77658 snap = (unsigned int)rdp->dynticks_snap;
77659
77660 /*
77661@@ -802,10 +802,10 @@ static int jiffies_till_stall_check(void)
77662 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
77663 */
77664 if (till_stall_check < 3) {
77665- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
77666+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
77667 till_stall_check = 3;
77668 } else if (till_stall_check > 300) {
77669- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
77670+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
77671 till_stall_check = 300;
77672 }
77673 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
77674@@ -1592,7 +1592,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
77675 rsp->qlen += rdp->qlen;
77676 rdp->n_cbs_orphaned += rdp->qlen;
77677 rdp->qlen_lazy = 0;
77678- ACCESS_ONCE(rdp->qlen) = 0;
77679+ ACCESS_ONCE_RW(rdp->qlen) = 0;
77680 }
77681
77682 /*
77683@@ -1838,7 +1838,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
77684 }
77685 smp_mb(); /* List handling before counting for rcu_barrier(). */
77686 rdp->qlen_lazy -= count_lazy;
77687- ACCESS_ONCE(rdp->qlen) -= count;
77688+ ACCESS_ONCE_RW(rdp->qlen) -= count;
77689 rdp->n_cbs_invoked += count;
77690
77691 /* Reinstate batch limit if we have worked down the excess. */
77692@@ -2031,7 +2031,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
77693 /*
77694 * Do RCU core processing for the current CPU.
77695 */
77696-static void rcu_process_callbacks(struct softirq_action *unused)
77697+static void rcu_process_callbacks(void)
77698 {
77699 struct rcu_state *rsp;
77700
77701@@ -2154,7 +2154,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
77702 local_irq_restore(flags);
77703 return;
77704 }
77705- ACCESS_ONCE(rdp->qlen)++;
77706+ ACCESS_ONCE_RW(rdp->qlen)++;
77707 if (lazy)
77708 rdp->qlen_lazy++;
77709 else
77710@@ -2363,11 +2363,11 @@ void synchronize_sched_expedited(void)
77711 * counter wrap on a 32-bit system. Quite a few more CPUs would of
77712 * course be required on a 64-bit system.
77713 */
77714- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
77715+ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
77716 (ulong)atomic_long_read(&rsp->expedited_done) +
77717 ULONG_MAX / 8)) {
77718 synchronize_sched();
77719- atomic_long_inc(&rsp->expedited_wrap);
77720+ atomic_long_inc_unchecked(&rsp->expedited_wrap);
77721 return;
77722 }
77723
77724@@ -2375,7 +2375,7 @@ void synchronize_sched_expedited(void)
77725 * Take a ticket. Note that atomic_inc_return() implies a
77726 * full memory barrier.
77727 */
77728- snap = atomic_long_inc_return(&rsp->expedited_start);
77729+ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
77730 firstsnap = snap;
77731 get_online_cpus();
77732 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
77733@@ -2388,14 +2388,14 @@ void synchronize_sched_expedited(void)
77734 synchronize_sched_expedited_cpu_stop,
77735 NULL) == -EAGAIN) {
77736 put_online_cpus();
77737- atomic_long_inc(&rsp->expedited_tryfail);
77738+ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
77739
77740 /* Check to see if someone else did our work for us. */
77741 s = atomic_long_read(&rsp->expedited_done);
77742 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
77743 /* ensure test happens before caller kfree */
77744 smp_mb__before_atomic_inc(); /* ^^^ */
77745- atomic_long_inc(&rsp->expedited_workdone1);
77746+ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
77747 return;
77748 }
77749
77750@@ -2404,7 +2404,7 @@ void synchronize_sched_expedited(void)
77751 udelay(trycount * num_online_cpus());
77752 } else {
77753 wait_rcu_gp(call_rcu_sched);
77754- atomic_long_inc(&rsp->expedited_normal);
77755+ atomic_long_inc_unchecked(&rsp->expedited_normal);
77756 return;
77757 }
77758
77759@@ -2413,7 +2413,7 @@ void synchronize_sched_expedited(void)
77760 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
77761 /* ensure test happens before caller kfree */
77762 smp_mb__before_atomic_inc(); /* ^^^ */
77763- atomic_long_inc(&rsp->expedited_workdone2);
77764+ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
77765 return;
77766 }
77767
77768@@ -2425,10 +2425,10 @@ void synchronize_sched_expedited(void)
77769 * period works for us.
77770 */
77771 get_online_cpus();
77772- snap = atomic_long_read(&rsp->expedited_start);
77773+ snap = atomic_long_read_unchecked(&rsp->expedited_start);
77774 smp_mb(); /* ensure read is before try_stop_cpus(). */
77775 }
77776- atomic_long_inc(&rsp->expedited_stoppedcpus);
77777+ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
77778
77779 /*
77780 * Everyone up to our most recent fetch is covered by our grace
77781@@ -2437,16 +2437,16 @@ void synchronize_sched_expedited(void)
77782 * than we did already did their update.
77783 */
77784 do {
77785- atomic_long_inc(&rsp->expedited_done_tries);
77786+ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
77787 s = atomic_long_read(&rsp->expedited_done);
77788 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
77789 /* ensure test happens before caller kfree */
77790 smp_mb__before_atomic_inc(); /* ^^^ */
77791- atomic_long_inc(&rsp->expedited_done_lost);
77792+ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
77793 break;
77794 }
77795 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
77796- atomic_long_inc(&rsp->expedited_done_exit);
77797+ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
77798
77799 put_online_cpus();
77800 }
77801@@ -2620,7 +2620,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
77802 * ACCESS_ONCE() to prevent the compiler from speculating
77803 * the increment to precede the early-exit check.
77804 */
77805- ACCESS_ONCE(rsp->n_barrier_done)++;
77806+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
77807 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
77808 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
77809 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
77810@@ -2670,7 +2670,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
77811
77812 /* Increment ->n_barrier_done to prevent duplicate work. */
77813 smp_mb(); /* Keep increment after above mechanism. */
77814- ACCESS_ONCE(rsp->n_barrier_done)++;
77815+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
77816 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
77817 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
77818 smp_mb(); /* Keep increment before caller's subsequent code. */
77819@@ -2715,10 +2715,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
77820 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
77821 init_callback_list(rdp);
77822 rdp->qlen_lazy = 0;
77823- ACCESS_ONCE(rdp->qlen) = 0;
77824+ ACCESS_ONCE_RW(rdp->qlen) = 0;
77825 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
77826 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
77827- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
77828+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
77829 #ifdef CONFIG_RCU_USER_QS
77830 WARN_ON_ONCE(rdp->dynticks->in_user);
77831 #endif
77832@@ -2754,8 +2754,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
77833 rdp->blimit = blimit;
77834 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
77835 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
77836- atomic_set(&rdp->dynticks->dynticks,
77837- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
77838+ atomic_set_unchecked(&rdp->dynticks->dynticks,
77839+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
77840 rcu_prepare_for_idle_init(cpu);
77841 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
77842
77843diff --git a/kernel/rcutree.h b/kernel/rcutree.h
77844index 4b69291..704c92e 100644
77845--- a/kernel/rcutree.h
77846+++ b/kernel/rcutree.h
77847@@ -86,7 +86,7 @@ struct rcu_dynticks {
77848 long long dynticks_nesting; /* Track irq/process nesting level. */
77849 /* Process level is worth LLONG_MAX/2. */
77850 int dynticks_nmi_nesting; /* Track NMI nesting level. */
77851- atomic_t dynticks; /* Even value for idle, else odd. */
77852+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
77853 #ifdef CONFIG_RCU_FAST_NO_HZ
77854 int dyntick_drain; /* Prepare-for-idle state variable. */
77855 unsigned long dyntick_holdoff;
77856@@ -423,17 +423,17 @@ struct rcu_state {
77857 /* _rcu_barrier(). */
77858 /* End of fields guarded by barrier_mutex. */
77859
77860- atomic_long_t expedited_start; /* Starting ticket. */
77861- atomic_long_t expedited_done; /* Done ticket. */
77862- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
77863- atomic_long_t expedited_tryfail; /* # acquisition failures. */
77864- atomic_long_t expedited_workdone1; /* # done by others #1. */
77865- atomic_long_t expedited_workdone2; /* # done by others #2. */
77866- atomic_long_t expedited_normal; /* # fallbacks to normal. */
77867- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
77868- atomic_long_t expedited_done_tries; /* # tries to update _done. */
77869- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
77870- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
77871+ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
77872+ atomic_long_t expedited_done; /* Done ticket. */
77873+ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
77874+ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
77875+ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
77876+ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
77877+ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
77878+ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
77879+ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
77880+ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
77881+ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
77882
77883 unsigned long jiffies_force_qs; /* Time at which to invoke */
77884 /* force_quiescent_state(). */
77885diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
77886index c1cc7e1..f62e436 100644
77887--- a/kernel/rcutree_plugin.h
77888+++ b/kernel/rcutree_plugin.h
77889@@ -892,7 +892,7 @@ void synchronize_rcu_expedited(void)
77890
77891 /* Clean up and exit. */
77892 smp_mb(); /* ensure expedited GP seen before counter increment. */
77893- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
77894+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
77895 unlock_mb_ret:
77896 mutex_unlock(&sync_rcu_preempt_exp_mutex);
77897 mb_ret:
77898@@ -1440,7 +1440,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
77899 free_cpumask_var(cm);
77900 }
77901
77902-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
77903+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
77904 .store = &rcu_cpu_kthread_task,
77905 .thread_should_run = rcu_cpu_kthread_should_run,
77906 .thread_fn = rcu_cpu_kthread,
77907@@ -2072,7 +2072,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
77908 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
77909 printk(KERN_ERR "\t%d: (%lu %s) idle=%03x/%llx/%d %s\n",
77910 cpu, ticks_value, ticks_title,
77911- atomic_read(&rdtp->dynticks) & 0xfff,
77912+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
77913 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
77914 fast_no_hz);
77915 }
77916@@ -2192,7 +2192,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
77917
77918 /* Enqueue the callback on the nocb list and update counts. */
77919 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
77920- ACCESS_ONCE(*old_rhpp) = rhp;
77921+ ACCESS_ONCE_RW(*old_rhpp) = rhp;
77922 atomic_long_add(rhcount, &rdp->nocb_q_count);
77923 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
77924
77925@@ -2384,12 +2384,12 @@ static int rcu_nocb_kthread(void *arg)
77926 * Extract queued callbacks, update counts, and wait
77927 * for a grace period to elapse.
77928 */
77929- ACCESS_ONCE(rdp->nocb_head) = NULL;
77930+ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
77931 tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
77932 c = atomic_long_xchg(&rdp->nocb_q_count, 0);
77933 cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
77934- ACCESS_ONCE(rdp->nocb_p_count) += c;
77935- ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
77936+ ACCESS_ONCE_RW(rdp->nocb_p_count) += c;
77937+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) += cl;
77938 wait_rcu_gp(rdp->rsp->call_remote);
77939
77940 /* Each pass through the following loop invokes a callback. */
77941@@ -2411,8 +2411,8 @@ static int rcu_nocb_kthread(void *arg)
77942 list = next;
77943 }
77944 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
77945- ACCESS_ONCE(rdp->nocb_p_count) -= c;
77946- ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
77947+ ACCESS_ONCE_RW(rdp->nocb_p_count) -= c;
77948+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) -= cl;
77949 rdp->n_nocbs_invoked += c;
77950 }
77951 return 0;
77952@@ -2438,7 +2438,7 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
77953 rdp = per_cpu_ptr(rsp->rda, cpu);
77954 t = kthread_run(rcu_nocb_kthread, rdp, "rcuo%d", cpu);
77955 BUG_ON(IS_ERR(t));
77956- ACCESS_ONCE(rdp->nocb_kthread) = t;
77957+ ACCESS_ONCE_RW(rdp->nocb_kthread) = t;
77958 }
77959 }
77960
77961diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
77962index 0d095dc..1985b19 100644
77963--- a/kernel/rcutree_trace.c
77964+++ b/kernel/rcutree_trace.c
77965@@ -123,7 +123,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
77966 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
77967 rdp->passed_quiesce, rdp->qs_pending);
77968 seq_printf(m, " dt=%d/%llx/%d df=%lu",
77969- atomic_read(&rdp->dynticks->dynticks),
77970+ atomic_read_unchecked(&rdp->dynticks->dynticks),
77971 rdp->dynticks->dynticks_nesting,
77972 rdp->dynticks->dynticks_nmi_nesting,
77973 rdp->dynticks_fqs);
77974@@ -184,17 +184,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
77975 struct rcu_state *rsp = (struct rcu_state *)m->private;
77976
77977 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
77978- atomic_long_read(&rsp->expedited_start),
77979+ atomic_long_read_unchecked(&rsp->expedited_start),
77980 atomic_long_read(&rsp->expedited_done),
77981- atomic_long_read(&rsp->expedited_wrap),
77982- atomic_long_read(&rsp->expedited_tryfail),
77983- atomic_long_read(&rsp->expedited_workdone1),
77984- atomic_long_read(&rsp->expedited_workdone2),
77985- atomic_long_read(&rsp->expedited_normal),
77986- atomic_long_read(&rsp->expedited_stoppedcpus),
77987- atomic_long_read(&rsp->expedited_done_tries),
77988- atomic_long_read(&rsp->expedited_done_lost),
77989- atomic_long_read(&rsp->expedited_done_exit));
77990+ atomic_long_read_unchecked(&rsp->expedited_wrap),
77991+ atomic_long_read_unchecked(&rsp->expedited_tryfail),
77992+ atomic_long_read_unchecked(&rsp->expedited_workdone1),
77993+ atomic_long_read_unchecked(&rsp->expedited_workdone2),
77994+ atomic_long_read_unchecked(&rsp->expedited_normal),
77995+ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
77996+ atomic_long_read_unchecked(&rsp->expedited_done_tries),
77997+ atomic_long_read_unchecked(&rsp->expedited_done_lost),
77998+ atomic_long_read_unchecked(&rsp->expedited_done_exit));
77999 return 0;
78000 }
78001
78002diff --git a/kernel/resource.c b/kernel/resource.c
78003index 73f35d4..4684fc4 100644
78004--- a/kernel/resource.c
78005+++ b/kernel/resource.c
78006@@ -143,8 +143,18 @@ static const struct file_operations proc_iomem_operations = {
78007
78008 static int __init ioresources_init(void)
78009 {
78010+#ifdef CONFIG_GRKERNSEC_PROC_ADD
78011+#ifdef CONFIG_GRKERNSEC_PROC_USER
78012+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
78013+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
78014+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
78015+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
78016+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
78017+#endif
78018+#else
78019 proc_create("ioports", 0, NULL, &proc_ioports_operations);
78020 proc_create("iomem", 0, NULL, &proc_iomem_operations);
78021+#endif
78022 return 0;
78023 }
78024 __initcall(ioresources_init);
78025diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
78026index 98ec494..4241d6d 100644
78027--- a/kernel/rtmutex-tester.c
78028+++ b/kernel/rtmutex-tester.c
78029@@ -20,7 +20,7 @@
78030 #define MAX_RT_TEST_MUTEXES 8
78031
78032 static spinlock_t rttest_lock;
78033-static atomic_t rttest_event;
78034+static atomic_unchecked_t rttest_event;
78035
78036 struct test_thread_data {
78037 int opcode;
78038@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
78039
78040 case RTTEST_LOCKCONT:
78041 td->mutexes[td->opdata] = 1;
78042- td->event = atomic_add_return(1, &rttest_event);
78043+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78044 return 0;
78045
78046 case RTTEST_RESET:
78047@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
78048 return 0;
78049
78050 case RTTEST_RESETEVENT:
78051- atomic_set(&rttest_event, 0);
78052+ atomic_set_unchecked(&rttest_event, 0);
78053 return 0;
78054
78055 default:
78056@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
78057 return ret;
78058
78059 td->mutexes[id] = 1;
78060- td->event = atomic_add_return(1, &rttest_event);
78061+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78062 rt_mutex_lock(&mutexes[id]);
78063- td->event = atomic_add_return(1, &rttest_event);
78064+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78065 td->mutexes[id] = 4;
78066 return 0;
78067
78068@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
78069 return ret;
78070
78071 td->mutexes[id] = 1;
78072- td->event = atomic_add_return(1, &rttest_event);
78073+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78074 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
78075- td->event = atomic_add_return(1, &rttest_event);
78076+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78077 td->mutexes[id] = ret ? 0 : 4;
78078 return ret ? -EINTR : 0;
78079
78080@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
78081 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
78082 return ret;
78083
78084- td->event = atomic_add_return(1, &rttest_event);
78085+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78086 rt_mutex_unlock(&mutexes[id]);
78087- td->event = atomic_add_return(1, &rttest_event);
78088+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78089 td->mutexes[id] = 0;
78090 return 0;
78091
78092@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
78093 break;
78094
78095 td->mutexes[dat] = 2;
78096- td->event = atomic_add_return(1, &rttest_event);
78097+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78098 break;
78099
78100 default:
78101@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
78102 return;
78103
78104 td->mutexes[dat] = 3;
78105- td->event = atomic_add_return(1, &rttest_event);
78106+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78107 break;
78108
78109 case RTTEST_LOCKNOWAIT:
78110@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
78111 return;
78112
78113 td->mutexes[dat] = 1;
78114- td->event = atomic_add_return(1, &rttest_event);
78115+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78116 return;
78117
78118 default:
78119diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
78120index 0984a21..939f183 100644
78121--- a/kernel/sched/auto_group.c
78122+++ b/kernel/sched/auto_group.c
78123@@ -11,7 +11,7 @@
78124
78125 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
78126 static struct autogroup autogroup_default;
78127-static atomic_t autogroup_seq_nr;
78128+static atomic_unchecked_t autogroup_seq_nr;
78129
78130 void __init autogroup_init(struct task_struct *init_task)
78131 {
78132@@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
78133
78134 kref_init(&ag->kref);
78135 init_rwsem(&ag->lock);
78136- ag->id = atomic_inc_return(&autogroup_seq_nr);
78137+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
78138 ag->tg = tg;
78139 #ifdef CONFIG_RT_GROUP_SCHED
78140 /*
78141diff --git a/kernel/sched/core.c b/kernel/sched/core.c
78142index 26058d0..e315889 100644
78143--- a/kernel/sched/core.c
78144+++ b/kernel/sched/core.c
78145@@ -3367,7 +3367,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
78146 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
78147 * positive (at least 1, or number of jiffies left till timeout) if completed.
78148 */
78149-long __sched
78150+long __sched __intentional_overflow(-1)
78151 wait_for_completion_interruptible_timeout(struct completion *x,
78152 unsigned long timeout)
78153 {
78154@@ -3384,7 +3384,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
78155 *
78156 * The return value is -ERESTARTSYS if interrupted, 0 if completed.
78157 */
78158-int __sched wait_for_completion_killable(struct completion *x)
78159+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
78160 {
78161 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
78162 if (t == -ERESTARTSYS)
78163@@ -3405,7 +3405,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
78164 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
78165 * positive (at least 1, or number of jiffies left till timeout) if completed.
78166 */
78167-long __sched
78168+long __sched __intentional_overflow(-1)
78169 wait_for_completion_killable_timeout(struct completion *x,
78170 unsigned long timeout)
78171 {
78172@@ -3631,6 +3631,8 @@ int can_nice(const struct task_struct *p, const int nice)
78173 /* convert nice value [19,-20] to rlimit style value [1,40] */
78174 int nice_rlim = 20 - nice;
78175
78176+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
78177+
78178 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
78179 capable(CAP_SYS_NICE));
78180 }
78181@@ -3664,7 +3666,8 @@ SYSCALL_DEFINE1(nice, int, increment)
78182 if (nice > 19)
78183 nice = 19;
78184
78185- if (increment < 0 && !can_nice(current, nice))
78186+ if (increment < 0 && (!can_nice(current, nice) ||
78187+ gr_handle_chroot_nice()))
78188 return -EPERM;
78189
78190 retval = security_task_setnice(current, nice);
78191@@ -3818,6 +3821,7 @@ recheck:
78192 unsigned long rlim_rtprio =
78193 task_rlimit(p, RLIMIT_RTPRIO);
78194
78195+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
78196 /* can't set/change the rt policy */
78197 if (policy != p->policy && !rlim_rtprio)
78198 return -EPERM;
78199@@ -4901,7 +4905,7 @@ static void migrate_tasks(unsigned int dead_cpu)
78200
78201 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
78202
78203-static struct ctl_table sd_ctl_dir[] = {
78204+static ctl_table_no_const sd_ctl_dir[] __read_only = {
78205 {
78206 .procname = "sched_domain",
78207 .mode = 0555,
78208@@ -4918,17 +4922,17 @@ static struct ctl_table sd_ctl_root[] = {
78209 {}
78210 };
78211
78212-static struct ctl_table *sd_alloc_ctl_entry(int n)
78213+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
78214 {
78215- struct ctl_table *entry =
78216+ ctl_table_no_const *entry =
78217 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
78218
78219 return entry;
78220 }
78221
78222-static void sd_free_ctl_entry(struct ctl_table **tablep)
78223+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
78224 {
78225- struct ctl_table *entry;
78226+ ctl_table_no_const *entry;
78227
78228 /*
78229 * In the intermediate directories, both the child directory and
78230@@ -4936,22 +4940,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
78231 * will always be set. In the lowest directory the names are
78232 * static strings and all have proc handlers.
78233 */
78234- for (entry = *tablep; entry->mode; entry++) {
78235- if (entry->child)
78236- sd_free_ctl_entry(&entry->child);
78237+ for (entry = tablep; entry->mode; entry++) {
78238+ if (entry->child) {
78239+ sd_free_ctl_entry(entry->child);
78240+ pax_open_kernel();
78241+ entry->child = NULL;
78242+ pax_close_kernel();
78243+ }
78244 if (entry->proc_handler == NULL)
78245 kfree(entry->procname);
78246 }
78247
78248- kfree(*tablep);
78249- *tablep = NULL;
78250+ kfree(tablep);
78251 }
78252
78253 static int min_load_idx = 0;
78254 static int max_load_idx = CPU_LOAD_IDX_MAX;
78255
78256 static void
78257-set_table_entry(struct ctl_table *entry,
78258+set_table_entry(ctl_table_no_const *entry,
78259 const char *procname, void *data, int maxlen,
78260 umode_t mode, proc_handler *proc_handler,
78261 bool load_idx)
78262@@ -4971,7 +4978,7 @@ set_table_entry(struct ctl_table *entry,
78263 static struct ctl_table *
78264 sd_alloc_ctl_domain_table(struct sched_domain *sd)
78265 {
78266- struct ctl_table *table = sd_alloc_ctl_entry(13);
78267+ ctl_table_no_const *table = sd_alloc_ctl_entry(13);
78268
78269 if (table == NULL)
78270 return NULL;
78271@@ -5006,9 +5013,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
78272 return table;
78273 }
78274
78275-static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
78276+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
78277 {
78278- struct ctl_table *entry, *table;
78279+ ctl_table_no_const *entry, *table;
78280 struct sched_domain *sd;
78281 int domain_num = 0, i;
78282 char buf[32];
78283@@ -5035,11 +5042,13 @@ static struct ctl_table_header *sd_sysctl_header;
78284 static void register_sched_domain_sysctl(void)
78285 {
78286 int i, cpu_num = num_possible_cpus();
78287- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
78288+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
78289 char buf[32];
78290
78291 WARN_ON(sd_ctl_dir[0].child);
78292+ pax_open_kernel();
78293 sd_ctl_dir[0].child = entry;
78294+ pax_close_kernel();
78295
78296 if (entry == NULL)
78297 return;
78298@@ -5062,8 +5071,12 @@ static void unregister_sched_domain_sysctl(void)
78299 if (sd_sysctl_header)
78300 unregister_sysctl_table(sd_sysctl_header);
78301 sd_sysctl_header = NULL;
78302- if (sd_ctl_dir[0].child)
78303- sd_free_ctl_entry(&sd_ctl_dir[0].child);
78304+ if (sd_ctl_dir[0].child) {
78305+ sd_free_ctl_entry(sd_ctl_dir[0].child);
78306+ pax_open_kernel();
78307+ sd_ctl_dir[0].child = NULL;
78308+ pax_close_kernel();
78309+ }
78310 }
78311 #else
78312 static void register_sched_domain_sysctl(void)
78313@@ -5162,7 +5175,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
78314 * happens before everything else. This has to be lower priority than
78315 * the notifier in the perf_event subsystem, though.
78316 */
78317-static struct notifier_block __cpuinitdata migration_notifier = {
78318+static struct notifier_block migration_notifier = {
78319 .notifier_call = migration_call,
78320 .priority = CPU_PRI_MIGRATION,
78321 };
78322diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
78323index 81fa536..6ccf96a 100644
78324--- a/kernel/sched/fair.c
78325+++ b/kernel/sched/fair.c
78326@@ -830,7 +830,7 @@ void task_numa_fault(int node, int pages, bool migrated)
78327
78328 static void reset_ptenuma_scan(struct task_struct *p)
78329 {
78330- ACCESS_ONCE(p->mm->numa_scan_seq)++;
78331+ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
78332 p->mm->numa_scan_offset = 0;
78333 }
78334
78335@@ -3254,25 +3254,18 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
78336 */
78337 static int select_idle_sibling(struct task_struct *p, int target)
78338 {
78339- int cpu = smp_processor_id();
78340- int prev_cpu = task_cpu(p);
78341 struct sched_domain *sd;
78342 struct sched_group *sg;
78343- int i;
78344+ int i = task_cpu(p);
78345
78346- /*
78347- * If the task is going to be woken-up on this cpu and if it is
78348- * already idle, then it is the right target.
78349- */
78350- if (target == cpu && idle_cpu(cpu))
78351- return cpu;
78352+ if (idle_cpu(target))
78353+ return target;
78354
78355 /*
78356- * If the task is going to be woken-up on the cpu where it previously
78357- * ran and if it is currently idle, then it the right target.
78358+ * If the prevous cpu is cache affine and idle, don't be stupid.
78359 */
78360- if (target == prev_cpu && idle_cpu(prev_cpu))
78361- return prev_cpu;
78362+ if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
78363+ return i;
78364
78365 /*
78366 * Otherwise, iterate the domains and find an elegible idle cpu.
78367@@ -3286,7 +3279,7 @@ static int select_idle_sibling(struct task_struct *p, int target)
78368 goto next;
78369
78370 for_each_cpu(i, sched_group_cpus(sg)) {
78371- if (!idle_cpu(i))
78372+ if (i == target || !idle_cpu(i))
78373 goto next;
78374 }
78375
78376@@ -5663,7 +5656,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
78377 * run_rebalance_domains is triggered when needed from the scheduler tick.
78378 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
78379 */
78380-static void run_rebalance_domains(struct softirq_action *h)
78381+static void run_rebalance_domains(void)
78382 {
78383 int this_cpu = smp_processor_id();
78384 struct rq *this_rq = cpu_rq(this_cpu);
78385diff --git a/kernel/signal.c b/kernel/signal.c
78386index dec9c30..d1da15b 100644
78387--- a/kernel/signal.c
78388+++ b/kernel/signal.c
78389@@ -50,12 +50,12 @@ static struct kmem_cache *sigqueue_cachep;
78390
78391 int print_fatal_signals __read_mostly;
78392
78393-static void __user *sig_handler(struct task_struct *t, int sig)
78394+static __sighandler_t sig_handler(struct task_struct *t, int sig)
78395 {
78396 return t->sighand->action[sig - 1].sa.sa_handler;
78397 }
78398
78399-static int sig_handler_ignored(void __user *handler, int sig)
78400+static int sig_handler_ignored(__sighandler_t handler, int sig)
78401 {
78402 /* Is it explicitly or implicitly ignored? */
78403 return handler == SIG_IGN ||
78404@@ -64,7 +64,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
78405
78406 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
78407 {
78408- void __user *handler;
78409+ __sighandler_t handler;
78410
78411 handler = sig_handler(t, sig);
78412
78413@@ -368,6 +368,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
78414 atomic_inc(&user->sigpending);
78415 rcu_read_unlock();
78416
78417+ if (!override_rlimit)
78418+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
78419+
78420 if (override_rlimit ||
78421 atomic_read(&user->sigpending) <=
78422 task_rlimit(t, RLIMIT_SIGPENDING)) {
78423@@ -495,7 +498,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
78424
78425 int unhandled_signal(struct task_struct *tsk, int sig)
78426 {
78427- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
78428+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
78429 if (is_global_init(tsk))
78430 return 1;
78431 if (handler != SIG_IGN && handler != SIG_DFL)
78432@@ -815,6 +818,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
78433 }
78434 }
78435
78436+ /* allow glibc communication via tgkill to other threads in our
78437+ thread group */
78438+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
78439+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
78440+ && gr_handle_signal(t, sig))
78441+ return -EPERM;
78442+
78443 return security_task_kill(t, info, sig, 0);
78444 }
78445
78446@@ -1197,7 +1207,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
78447 return send_signal(sig, info, p, 1);
78448 }
78449
78450-static int
78451+int
78452 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
78453 {
78454 return send_signal(sig, info, t, 0);
78455@@ -1234,6 +1244,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
78456 unsigned long int flags;
78457 int ret, blocked, ignored;
78458 struct k_sigaction *action;
78459+ int is_unhandled = 0;
78460
78461 spin_lock_irqsave(&t->sighand->siglock, flags);
78462 action = &t->sighand->action[sig-1];
78463@@ -1248,9 +1259,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
78464 }
78465 if (action->sa.sa_handler == SIG_DFL)
78466 t->signal->flags &= ~SIGNAL_UNKILLABLE;
78467+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
78468+ is_unhandled = 1;
78469 ret = specific_send_sig_info(sig, info, t);
78470 spin_unlock_irqrestore(&t->sighand->siglock, flags);
78471
78472+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
78473+ normal operation */
78474+ if (is_unhandled) {
78475+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
78476+ gr_handle_crash(t, sig);
78477+ }
78478+
78479 return ret;
78480 }
78481
78482@@ -1317,8 +1337,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
78483 ret = check_kill_permission(sig, info, p);
78484 rcu_read_unlock();
78485
78486- if (!ret && sig)
78487+ if (!ret && sig) {
78488 ret = do_send_sig_info(sig, info, p, true);
78489+ if (!ret)
78490+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
78491+ }
78492
78493 return ret;
78494 }
78495@@ -2855,7 +2878,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
78496 int error = -ESRCH;
78497
78498 rcu_read_lock();
78499- p = find_task_by_vpid(pid);
78500+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
78501+ /* allow glibc communication via tgkill to other threads in our
78502+ thread group */
78503+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
78504+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
78505+ p = find_task_by_vpid_unrestricted(pid);
78506+ else
78507+#endif
78508+ p = find_task_by_vpid(pid);
78509 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
78510 error = check_kill_permission(sig, info, p);
78511 /*
78512@@ -3138,8 +3169,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
78513 }
78514 seg = get_fs();
78515 set_fs(KERNEL_DS);
78516- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
78517- (stack_t __force __user *) &uoss,
78518+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
78519+ (stack_t __force_user *) &uoss,
78520 compat_user_stack_pointer());
78521 set_fs(seg);
78522 if (ret >= 0 && uoss_ptr) {
78523diff --git a/kernel/smp.c b/kernel/smp.c
78524index 69f38bd..77bbf12 100644
78525--- a/kernel/smp.c
78526+++ b/kernel/smp.c
78527@@ -77,7 +77,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
78528 return NOTIFY_OK;
78529 }
78530
78531-static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
78532+static struct notifier_block hotplug_cfd_notifier = {
78533 .notifier_call = hotplug_cfd,
78534 };
78535
78536diff --git a/kernel/smpboot.c b/kernel/smpboot.c
78537index d6c5fc0..530560c 100644
78538--- a/kernel/smpboot.c
78539+++ b/kernel/smpboot.c
78540@@ -275,7 +275,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
78541 }
78542 smpboot_unpark_thread(plug_thread, cpu);
78543 }
78544- list_add(&plug_thread->list, &hotplug_threads);
78545+ pax_list_add(&plug_thread->list, &hotplug_threads);
78546 out:
78547 mutex_unlock(&smpboot_threads_lock);
78548 return ret;
78549@@ -292,7 +292,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
78550 {
78551 get_online_cpus();
78552 mutex_lock(&smpboot_threads_lock);
78553- list_del(&plug_thread->list);
78554+ pax_list_del(&plug_thread->list);
78555 smpboot_destroy_threads(plug_thread);
78556 mutex_unlock(&smpboot_threads_lock);
78557 put_online_cpus();
78558diff --git a/kernel/softirq.c b/kernel/softirq.c
78559index ed567ba..e71dabf 100644
78560--- a/kernel/softirq.c
78561+++ b/kernel/softirq.c
78562@@ -53,11 +53,11 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
78563 EXPORT_SYMBOL(irq_stat);
78564 #endif
78565
78566-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
78567+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
78568
78569 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
78570
78571-char *softirq_to_name[NR_SOFTIRQS] = {
78572+const char * const softirq_to_name[NR_SOFTIRQS] = {
78573 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
78574 "TASKLET", "SCHED", "HRTIMER", "RCU"
78575 };
78576@@ -244,7 +244,7 @@ restart:
78577 kstat_incr_softirqs_this_cpu(vec_nr);
78578
78579 trace_softirq_entry(vec_nr);
78580- h->action(h);
78581+ h->action();
78582 trace_softirq_exit(vec_nr);
78583 if (unlikely(prev_count != preempt_count())) {
78584 printk(KERN_ERR "huh, entered softirq %u %s %p"
78585@@ -391,7 +391,7 @@ void __raise_softirq_irqoff(unsigned int nr)
78586 or_softirq_pending(1UL << nr);
78587 }
78588
78589-void open_softirq(int nr, void (*action)(struct softirq_action *))
78590+void __init open_softirq(int nr, void (*action)(void))
78591 {
78592 softirq_vec[nr].action = action;
78593 }
78594@@ -447,7 +447,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
78595
78596 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
78597
78598-static void tasklet_action(struct softirq_action *a)
78599+static void tasklet_action(void)
78600 {
78601 struct tasklet_struct *list;
78602
78603@@ -482,7 +482,7 @@ static void tasklet_action(struct softirq_action *a)
78604 }
78605 }
78606
78607-static void tasklet_hi_action(struct softirq_action *a)
78608+static void tasklet_hi_action(void)
78609 {
78610 struct tasklet_struct *list;
78611
78612@@ -718,7 +718,7 @@ static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
78613 return NOTIFY_OK;
78614 }
78615
78616-static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
78617+static struct notifier_block remote_softirq_cpu_notifier = {
78618 .notifier_call = remote_softirq_cpu_notify,
78619 };
78620
78621@@ -835,11 +835,11 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
78622 return NOTIFY_OK;
78623 }
78624
78625-static struct notifier_block __cpuinitdata cpu_nfb = {
78626+static struct notifier_block cpu_nfb = {
78627 .notifier_call = cpu_callback
78628 };
78629
78630-static struct smp_hotplug_thread softirq_threads = {
78631+static struct smp_hotplug_thread softirq_threads __read_only = {
78632 .store = &ksoftirqd,
78633 .thread_should_run = ksoftirqd_should_run,
78634 .thread_fn = run_ksoftirqd,
78635diff --git a/kernel/srcu.c b/kernel/srcu.c
78636index 2b85982..d52ab26 100644
78637--- a/kernel/srcu.c
78638+++ b/kernel/srcu.c
78639@@ -305,9 +305,9 @@ int __srcu_read_lock(struct srcu_struct *sp)
78640 preempt_disable();
78641 idx = rcu_dereference_index_check(sp->completed,
78642 rcu_read_lock_sched_held()) & 0x1;
78643- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
78644+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
78645 smp_mb(); /* B */ /* Avoid leaking the critical section. */
78646- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
78647+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
78648 preempt_enable();
78649 return idx;
78650 }
78651@@ -323,7 +323,7 @@ void __srcu_read_unlock(struct srcu_struct *sp, int idx)
78652 {
78653 preempt_disable();
78654 smp_mb(); /* C */ /* Avoid leaking the critical section. */
78655- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) -= 1;
78656+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) -= 1;
78657 preempt_enable();
78658 }
78659 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
78660diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
78661index 2f194e9..2c05ea9 100644
78662--- a/kernel/stop_machine.c
78663+++ b/kernel/stop_machine.c
78664@@ -362,7 +362,7 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
78665 * cpu notifiers. It currently shares the same priority as sched
78666 * migration_notifier.
78667 */
78668-static struct notifier_block __cpuinitdata cpu_stop_cpu_notifier = {
78669+static struct notifier_block cpu_stop_cpu_notifier = {
78670 .notifier_call = cpu_stop_cpu_callback,
78671 .priority = 10,
78672 };
78673diff --git a/kernel/sys.c b/kernel/sys.c
78674index 265b376..4e42ef5 100644
78675--- a/kernel/sys.c
78676+++ b/kernel/sys.c
78677@@ -157,6 +157,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
78678 error = -EACCES;
78679 goto out;
78680 }
78681+
78682+ if (gr_handle_chroot_setpriority(p, niceval)) {
78683+ error = -EACCES;
78684+ goto out;
78685+ }
78686+
78687 no_nice = security_task_setnice(p, niceval);
78688 if (no_nice) {
78689 error = no_nice;
78690@@ -595,6 +601,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
78691 goto error;
78692 }
78693
78694+ if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
78695+ goto error;
78696+
78697 if (rgid != (gid_t) -1 ||
78698 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
78699 new->sgid = new->egid;
78700@@ -630,6 +639,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
78701 old = current_cred();
78702
78703 retval = -EPERM;
78704+
78705+ if (gr_check_group_change(kgid, kgid, kgid))
78706+ goto error;
78707+
78708 if (nsown_capable(CAP_SETGID))
78709 new->gid = new->egid = new->sgid = new->fsgid = kgid;
78710 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
78711@@ -647,7 +660,7 @@ error:
78712 /*
78713 * change the user struct in a credentials set to match the new UID
78714 */
78715-static int set_user(struct cred *new)
78716+int set_user(struct cred *new)
78717 {
78718 struct user_struct *new_user;
78719
78720@@ -727,6 +740,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
78721 goto error;
78722 }
78723
78724+ if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
78725+ goto error;
78726+
78727 if (!uid_eq(new->uid, old->uid)) {
78728 retval = set_user(new);
78729 if (retval < 0)
78730@@ -777,6 +793,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
78731 old = current_cred();
78732
78733 retval = -EPERM;
78734+
78735+ if (gr_check_crash_uid(kuid))
78736+ goto error;
78737+ if (gr_check_user_change(kuid, kuid, kuid))
78738+ goto error;
78739+
78740 if (nsown_capable(CAP_SETUID)) {
78741 new->suid = new->uid = kuid;
78742 if (!uid_eq(kuid, old->uid)) {
78743@@ -846,6 +868,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
78744 goto error;
78745 }
78746
78747+ if (gr_check_user_change(kruid, keuid, INVALID_UID))
78748+ goto error;
78749+
78750 if (ruid != (uid_t) -1) {
78751 new->uid = kruid;
78752 if (!uid_eq(kruid, old->uid)) {
78753@@ -928,6 +953,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
78754 goto error;
78755 }
78756
78757+ if (gr_check_group_change(krgid, kegid, INVALID_GID))
78758+ goto error;
78759+
78760 if (rgid != (gid_t) -1)
78761 new->gid = krgid;
78762 if (egid != (gid_t) -1)
78763@@ -981,6 +1009,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
78764 if (!uid_valid(kuid))
78765 return old_fsuid;
78766
78767+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
78768+ goto error;
78769+
78770 new = prepare_creds();
78771 if (!new)
78772 return old_fsuid;
78773@@ -995,6 +1026,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
78774 }
78775 }
78776
78777+error:
78778 abort_creds(new);
78779 return old_fsuid;
78780
78781@@ -1027,12 +1059,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
78782 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
78783 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
78784 nsown_capable(CAP_SETGID)) {
78785+ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
78786+ goto error;
78787+
78788 if (!gid_eq(kgid, old->fsgid)) {
78789 new->fsgid = kgid;
78790 goto change_okay;
78791 }
78792 }
78793
78794+error:
78795 abort_creds(new);
78796 return old_fsgid;
78797
78798@@ -1340,19 +1376,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
78799 return -EFAULT;
78800
78801 down_read(&uts_sem);
78802- error = __copy_to_user(&name->sysname, &utsname()->sysname,
78803+ error = __copy_to_user(name->sysname, &utsname()->sysname,
78804 __OLD_UTS_LEN);
78805 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
78806- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
78807+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
78808 __OLD_UTS_LEN);
78809 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
78810- error |= __copy_to_user(&name->release, &utsname()->release,
78811+ error |= __copy_to_user(name->release, &utsname()->release,
78812 __OLD_UTS_LEN);
78813 error |= __put_user(0, name->release + __OLD_UTS_LEN);
78814- error |= __copy_to_user(&name->version, &utsname()->version,
78815+ error |= __copy_to_user(name->version, &utsname()->version,
78816 __OLD_UTS_LEN);
78817 error |= __put_user(0, name->version + __OLD_UTS_LEN);
78818- error |= __copy_to_user(&name->machine, &utsname()->machine,
78819+ error |= __copy_to_user(name->machine, &utsname()->machine,
78820 __OLD_UTS_LEN);
78821 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
78822 up_read(&uts_sem);
78823@@ -2026,7 +2062,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
78824 error = get_dumpable(me->mm);
78825 break;
78826 case PR_SET_DUMPABLE:
78827- if (arg2 < 0 || arg2 > 1) {
78828+ if (arg2 > 1) {
78829 error = -EINVAL;
78830 break;
78831 }
78832diff --git a/kernel/sysctl.c b/kernel/sysctl.c
78833index c88878d..e4fa5d1 100644
78834--- a/kernel/sysctl.c
78835+++ b/kernel/sysctl.c
78836@@ -92,7 +92,6 @@
78837
78838
78839 #if defined(CONFIG_SYSCTL)
78840-
78841 /* External variables not in a header file. */
78842 extern int sysctl_overcommit_memory;
78843 extern int sysctl_overcommit_ratio;
78844@@ -172,10 +171,8 @@ static int proc_taint(struct ctl_table *table, int write,
78845 void __user *buffer, size_t *lenp, loff_t *ppos);
78846 #endif
78847
78848-#ifdef CONFIG_PRINTK
78849 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
78850 void __user *buffer, size_t *lenp, loff_t *ppos);
78851-#endif
78852
78853 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
78854 void __user *buffer, size_t *lenp, loff_t *ppos);
78855@@ -206,6 +203,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
78856
78857 #endif
78858
78859+extern struct ctl_table grsecurity_table[];
78860+
78861 static struct ctl_table kern_table[];
78862 static struct ctl_table vm_table[];
78863 static struct ctl_table fs_table[];
78864@@ -220,6 +219,20 @@ extern struct ctl_table epoll_table[];
78865 int sysctl_legacy_va_layout;
78866 #endif
78867
78868+#ifdef CONFIG_PAX_SOFTMODE
78869+static ctl_table pax_table[] = {
78870+ {
78871+ .procname = "softmode",
78872+ .data = &pax_softmode,
78873+ .maxlen = sizeof(unsigned int),
78874+ .mode = 0600,
78875+ .proc_handler = &proc_dointvec,
78876+ },
78877+
78878+ { }
78879+};
78880+#endif
78881+
78882 /* The default sysctl tables: */
78883
78884 static struct ctl_table sysctl_base_table[] = {
78885@@ -268,6 +281,22 @@ static int max_extfrag_threshold = 1000;
78886 #endif
78887
78888 static struct ctl_table kern_table[] = {
78889+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
78890+ {
78891+ .procname = "grsecurity",
78892+ .mode = 0500,
78893+ .child = grsecurity_table,
78894+ },
78895+#endif
78896+
78897+#ifdef CONFIG_PAX_SOFTMODE
78898+ {
78899+ .procname = "pax",
78900+ .mode = 0500,
78901+ .child = pax_table,
78902+ },
78903+#endif
78904+
78905 {
78906 .procname = "sched_child_runs_first",
78907 .data = &sysctl_sched_child_runs_first,
78908@@ -593,7 +622,7 @@ static struct ctl_table kern_table[] = {
78909 .data = &modprobe_path,
78910 .maxlen = KMOD_PATH_LEN,
78911 .mode = 0644,
78912- .proc_handler = proc_dostring,
78913+ .proc_handler = proc_dostring_modpriv,
78914 },
78915 {
78916 .procname = "modules_disabled",
78917@@ -760,16 +789,20 @@ static struct ctl_table kern_table[] = {
78918 .extra1 = &zero,
78919 .extra2 = &one,
78920 },
78921+#endif
78922 {
78923 .procname = "kptr_restrict",
78924 .data = &kptr_restrict,
78925 .maxlen = sizeof(int),
78926 .mode = 0644,
78927 .proc_handler = proc_dointvec_minmax_sysadmin,
78928+#ifdef CONFIG_GRKERNSEC_HIDESYM
78929+ .extra1 = &two,
78930+#else
78931 .extra1 = &zero,
78932+#endif
78933 .extra2 = &two,
78934 },
78935-#endif
78936 {
78937 .procname = "ngroups_max",
78938 .data = &ngroups_max,
78939@@ -1266,6 +1299,13 @@ static struct ctl_table vm_table[] = {
78940 .proc_handler = proc_dointvec_minmax,
78941 .extra1 = &zero,
78942 },
78943+ {
78944+ .procname = "heap_stack_gap",
78945+ .data = &sysctl_heap_stack_gap,
78946+ .maxlen = sizeof(sysctl_heap_stack_gap),
78947+ .mode = 0644,
78948+ .proc_handler = proc_doulongvec_minmax,
78949+ },
78950 #else
78951 {
78952 .procname = "nr_trim_pages",
78953@@ -1716,6 +1756,16 @@ int proc_dostring(struct ctl_table *table, int write,
78954 buffer, lenp, ppos);
78955 }
78956
78957+int proc_dostring_modpriv(struct ctl_table *table, int write,
78958+ void __user *buffer, size_t *lenp, loff_t *ppos)
78959+{
78960+ if (write && !capable(CAP_SYS_MODULE))
78961+ return -EPERM;
78962+
78963+ return _proc_do_string(table->data, table->maxlen, write,
78964+ buffer, lenp, ppos);
78965+}
78966+
78967 static size_t proc_skip_spaces(char **buf)
78968 {
78969 size_t ret;
78970@@ -1821,6 +1871,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
78971 len = strlen(tmp);
78972 if (len > *size)
78973 len = *size;
78974+ if (len > sizeof(tmp))
78975+ len = sizeof(tmp);
78976 if (copy_to_user(*buf, tmp, len))
78977 return -EFAULT;
78978 *size -= len;
78979@@ -1985,7 +2037,7 @@ int proc_dointvec(struct ctl_table *table, int write,
78980 static int proc_taint(struct ctl_table *table, int write,
78981 void __user *buffer, size_t *lenp, loff_t *ppos)
78982 {
78983- struct ctl_table t;
78984+ ctl_table_no_const t;
78985 unsigned long tmptaint = get_taint();
78986 int err;
78987
78988@@ -2013,7 +2065,6 @@ static int proc_taint(struct ctl_table *table, int write,
78989 return err;
78990 }
78991
78992-#ifdef CONFIG_PRINTK
78993 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
78994 void __user *buffer, size_t *lenp, loff_t *ppos)
78995 {
78996@@ -2022,7 +2073,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
78997
78998 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
78999 }
79000-#endif
79001
79002 struct do_proc_dointvec_minmax_conv_param {
79003 int *min;
79004@@ -2169,8 +2219,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
79005 *i = val;
79006 } else {
79007 val = convdiv * (*i) / convmul;
79008- if (!first)
79009+ if (!first) {
79010 err = proc_put_char(&buffer, &left, '\t');
79011+ if (err)
79012+ break;
79013+ }
79014 err = proc_put_long(&buffer, &left, val, false);
79015 if (err)
79016 break;
79017@@ -2562,6 +2615,12 @@ int proc_dostring(struct ctl_table *table, int write,
79018 return -ENOSYS;
79019 }
79020
79021+int proc_dostring_modpriv(struct ctl_table *table, int write,
79022+ void __user *buffer, size_t *lenp, loff_t *ppos)
79023+{
79024+ return -ENOSYS;
79025+}
79026+
79027 int proc_dointvec(struct ctl_table *table, int write,
79028 void __user *buffer, size_t *lenp, loff_t *ppos)
79029 {
79030@@ -2618,5 +2677,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
79031 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
79032 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
79033 EXPORT_SYMBOL(proc_dostring);
79034+EXPORT_SYMBOL(proc_dostring_modpriv);
79035 EXPORT_SYMBOL(proc_doulongvec_minmax);
79036 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
79037diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
79038index 0ddf3a0..a199f50 100644
79039--- a/kernel/sysctl_binary.c
79040+++ b/kernel/sysctl_binary.c
79041@@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
79042 int i;
79043
79044 set_fs(KERNEL_DS);
79045- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
79046+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
79047 set_fs(old_fs);
79048 if (result < 0)
79049 goto out_kfree;
79050@@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
79051 }
79052
79053 set_fs(KERNEL_DS);
79054- result = vfs_write(file, buffer, str - buffer, &pos);
79055+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
79056 set_fs(old_fs);
79057 if (result < 0)
79058 goto out_kfree;
79059@@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
79060 int i;
79061
79062 set_fs(KERNEL_DS);
79063- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
79064+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
79065 set_fs(old_fs);
79066 if (result < 0)
79067 goto out_kfree;
79068@@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
79069 }
79070
79071 set_fs(KERNEL_DS);
79072- result = vfs_write(file, buffer, str - buffer, &pos);
79073+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
79074 set_fs(old_fs);
79075 if (result < 0)
79076 goto out_kfree;
79077@@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
79078 int i;
79079
79080 set_fs(KERNEL_DS);
79081- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
79082+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
79083 set_fs(old_fs);
79084 if (result < 0)
79085 goto out;
79086@@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
79087 __le16 dnaddr;
79088
79089 set_fs(KERNEL_DS);
79090- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
79091+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
79092 set_fs(old_fs);
79093 if (result < 0)
79094 goto out;
79095@@ -1234,7 +1234,7 @@ static ssize_t bin_dn_node_address(struct file *file,
79096 le16_to_cpu(dnaddr) & 0x3ff);
79097
79098 set_fs(KERNEL_DS);
79099- result = vfs_write(file, buf, len, &pos);
79100+ result = vfs_write(file, (const char __force_user *)buf, len, &pos);
79101 set_fs(old_fs);
79102 if (result < 0)
79103 goto out;
79104diff --git a/kernel/taskstats.c b/kernel/taskstats.c
79105index 145bb4d..b2aa969 100644
79106--- a/kernel/taskstats.c
79107+++ b/kernel/taskstats.c
79108@@ -28,9 +28,12 @@
79109 #include <linux/fs.h>
79110 #include <linux/file.h>
79111 #include <linux/pid_namespace.h>
79112+#include <linux/grsecurity.h>
79113 #include <net/genetlink.h>
79114 #include <linux/atomic.h>
79115
79116+extern int gr_is_taskstats_denied(int pid);
79117+
79118 /*
79119 * Maximum length of a cpumask that can be specified in
79120 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
79121@@ -570,6 +573,9 @@ err:
79122
79123 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
79124 {
79125+ if (gr_is_taskstats_denied(current->pid))
79126+ return -EACCES;
79127+
79128 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
79129 return cmd_attr_register_cpumask(info);
79130 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
79131diff --git a/kernel/time.c b/kernel/time.c
79132index d226c6a..2f0d217 100644
79133--- a/kernel/time.c
79134+++ b/kernel/time.c
79135@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
79136 return error;
79137
79138 if (tz) {
79139+ /* we log in do_settimeofday called below, so don't log twice
79140+ */
79141+ if (!tv)
79142+ gr_log_timechange();
79143+
79144 sys_tz = *tz;
79145 update_vsyscall_tz();
79146 if (firsttime) {
79147@@ -493,7 +498,7 @@ EXPORT_SYMBOL(usecs_to_jiffies);
79148 * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
79149 * value to a scaled second value.
79150 */
79151-unsigned long
79152+unsigned long __intentional_overflow(-1)
79153 timespec_to_jiffies(const struct timespec *value)
79154 {
79155 unsigned long sec = value->tv_sec;
79156diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
79157index f11d83b..d016d91 100644
79158--- a/kernel/time/alarmtimer.c
79159+++ b/kernel/time/alarmtimer.c
79160@@ -750,7 +750,7 @@ static int __init alarmtimer_init(void)
79161 struct platform_device *pdev;
79162 int error = 0;
79163 int i;
79164- struct k_clock alarm_clock = {
79165+ static struct k_clock alarm_clock = {
79166 .clock_getres = alarm_clock_getres,
79167 .clock_get = alarm_clock_get,
79168 .timer_create = alarm_timer_create,
79169diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
79170index a13987a..36cd791 100644
79171--- a/kernel/time/tick-broadcast.c
79172+++ b/kernel/time/tick-broadcast.c
79173@@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
79174 * then clear the broadcast bit.
79175 */
79176 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
79177- int cpu = smp_processor_id();
79178+ cpu = smp_processor_id();
79179
79180 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
79181 tick_broadcast_clear_oneshot(cpu);
79182diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
79183index cbc6acb..3a77191 100644
79184--- a/kernel/time/timekeeping.c
79185+++ b/kernel/time/timekeeping.c
79186@@ -15,6 +15,7 @@
79187 #include <linux/init.h>
79188 #include <linux/mm.h>
79189 #include <linux/sched.h>
79190+#include <linux/grsecurity.h>
79191 #include <linux/syscore_ops.h>
79192 #include <linux/clocksource.h>
79193 #include <linux/jiffies.h>
79194@@ -412,6 +413,8 @@ int do_settimeofday(const struct timespec *tv)
79195 if (!timespec_valid_strict(tv))
79196 return -EINVAL;
79197
79198+ gr_log_timechange();
79199+
79200 write_seqlock_irqsave(&tk->lock, flags);
79201
79202 timekeeping_forward_now(tk);
79203diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
79204index af5a7e9..715611a 100644
79205--- a/kernel/time/timer_list.c
79206+++ b/kernel/time/timer_list.c
79207@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
79208
79209 static void print_name_offset(struct seq_file *m, void *sym)
79210 {
79211+#ifdef CONFIG_GRKERNSEC_HIDESYM
79212+ SEQ_printf(m, "<%p>", NULL);
79213+#else
79214 char symname[KSYM_NAME_LEN];
79215
79216 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
79217 SEQ_printf(m, "<%pK>", sym);
79218 else
79219 SEQ_printf(m, "%s", symname);
79220+#endif
79221 }
79222
79223 static void
79224@@ -112,7 +116,11 @@ next_one:
79225 static void
79226 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
79227 {
79228+#ifdef CONFIG_GRKERNSEC_HIDESYM
79229+ SEQ_printf(m, " .base: %p\n", NULL);
79230+#else
79231 SEQ_printf(m, " .base: %pK\n", base);
79232+#endif
79233 SEQ_printf(m, " .index: %d\n",
79234 base->index);
79235 SEQ_printf(m, " .resolution: %Lu nsecs\n",
79236@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
79237 {
79238 struct proc_dir_entry *pe;
79239
79240+#ifdef CONFIG_GRKERNSEC_PROC_ADD
79241+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
79242+#else
79243 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
79244+#endif
79245 if (!pe)
79246 return -ENOMEM;
79247 return 0;
79248diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
79249index 0b537f2..40d6c20 100644
79250--- a/kernel/time/timer_stats.c
79251+++ b/kernel/time/timer_stats.c
79252@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
79253 static unsigned long nr_entries;
79254 static struct entry entries[MAX_ENTRIES];
79255
79256-static atomic_t overflow_count;
79257+static atomic_unchecked_t overflow_count;
79258
79259 /*
79260 * The entries are in a hash-table, for fast lookup:
79261@@ -140,7 +140,7 @@ static void reset_entries(void)
79262 nr_entries = 0;
79263 memset(entries, 0, sizeof(entries));
79264 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
79265- atomic_set(&overflow_count, 0);
79266+ atomic_set_unchecked(&overflow_count, 0);
79267 }
79268
79269 static struct entry *alloc_entry(void)
79270@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
79271 if (likely(entry))
79272 entry->count++;
79273 else
79274- atomic_inc(&overflow_count);
79275+ atomic_inc_unchecked(&overflow_count);
79276
79277 out_unlock:
79278 raw_spin_unlock_irqrestore(lock, flags);
79279@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
79280
79281 static void print_name_offset(struct seq_file *m, unsigned long addr)
79282 {
79283+#ifdef CONFIG_GRKERNSEC_HIDESYM
79284+ seq_printf(m, "<%p>", NULL);
79285+#else
79286 char symname[KSYM_NAME_LEN];
79287
79288 if (lookup_symbol_name(addr, symname) < 0)
79289- seq_printf(m, "<%p>", (void *)addr);
79290+ seq_printf(m, "<%pK>", (void *)addr);
79291 else
79292 seq_printf(m, "%s", symname);
79293+#endif
79294 }
79295
79296 static int tstats_show(struct seq_file *m, void *v)
79297@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
79298
79299 seq_puts(m, "Timer Stats Version: v0.2\n");
79300 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
79301- if (atomic_read(&overflow_count))
79302+ if (atomic_read_unchecked(&overflow_count))
79303 seq_printf(m, "Overflow: %d entries\n",
79304- atomic_read(&overflow_count));
79305+ atomic_read_unchecked(&overflow_count));
79306
79307 for (i = 0; i < nr_entries; i++) {
79308 entry = entries + i;
79309@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
79310 {
79311 struct proc_dir_entry *pe;
79312
79313+#ifdef CONFIG_GRKERNSEC_PROC_ADD
79314+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
79315+#else
79316 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
79317+#endif
79318 if (!pe)
79319 return -ENOMEM;
79320 return 0;
79321diff --git a/kernel/timer.c b/kernel/timer.c
79322index 367d008..5dee98f 100644
79323--- a/kernel/timer.c
79324+++ b/kernel/timer.c
79325@@ -1363,7 +1363,7 @@ void update_process_times(int user_tick)
79326 /*
79327 * This function runs timers and the timer-tq in bottom half context.
79328 */
79329-static void run_timer_softirq(struct softirq_action *h)
79330+static void run_timer_softirq(void)
79331 {
79332 struct tvec_base *base = __this_cpu_read(tvec_bases);
79333
79334@@ -1481,7 +1481,7 @@ static void process_timeout(unsigned long __data)
79335 *
79336 * In all cases the return value is guaranteed to be non-negative.
79337 */
79338-signed long __sched schedule_timeout(signed long timeout)
79339+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
79340 {
79341 struct timer_list timer;
79342 unsigned long expire;
79343@@ -1772,7 +1772,7 @@ static int __cpuinit timer_cpu_notify(struct notifier_block *self,
79344 return NOTIFY_OK;
79345 }
79346
79347-static struct notifier_block __cpuinitdata timers_nb = {
79348+static struct notifier_block timers_nb = {
79349 .notifier_call = timer_cpu_notify,
79350 };
79351
79352diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
79353index c0bd030..62a1927 100644
79354--- a/kernel/trace/blktrace.c
79355+++ b/kernel/trace/blktrace.c
79356@@ -317,7 +317,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
79357 struct blk_trace *bt = filp->private_data;
79358 char buf[16];
79359
79360- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
79361+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
79362
79363 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
79364 }
79365@@ -375,7 +375,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
79366 return 1;
79367
79368 bt = buf->chan->private_data;
79369- atomic_inc(&bt->dropped);
79370+ atomic_inc_unchecked(&bt->dropped);
79371 return 0;
79372 }
79373
79374@@ -476,7 +476,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
79375
79376 bt->dir = dir;
79377 bt->dev = dev;
79378- atomic_set(&bt->dropped, 0);
79379+ atomic_set_unchecked(&bt->dropped, 0);
79380
79381 ret = -EIO;
79382 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
79383diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
79384index b27052c..0e1af95 100644
79385--- a/kernel/trace/ftrace.c
79386+++ b/kernel/trace/ftrace.c
79387@@ -1874,12 +1874,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
79388 if (unlikely(ftrace_disabled))
79389 return 0;
79390
79391+ ret = ftrace_arch_code_modify_prepare();
79392+ FTRACE_WARN_ON(ret);
79393+ if (ret)
79394+ return 0;
79395+
79396 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
79397+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
79398 if (ret) {
79399 ftrace_bug(ret, ip);
79400- return 0;
79401 }
79402- return 1;
79403+ return ret ? 0 : 1;
79404 }
79405
79406 /*
79407@@ -2965,7 +2970,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
79408
79409 int
79410 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
79411- void *data)
79412+ void *data)
79413 {
79414 struct ftrace_func_probe *entry;
79415 struct ftrace_page *pg;
79416@@ -3832,8 +3837,10 @@ static int ftrace_process_locs(struct module *mod,
79417 if (!count)
79418 return 0;
79419
79420+ pax_open_kernel();
79421 sort(start, count, sizeof(*start),
79422 ftrace_cmp_ips, ftrace_swap_ips);
79423+ pax_close_kernel();
79424
79425 start_pg = ftrace_allocate_pages(count);
79426 if (!start_pg)
79427@@ -4559,8 +4566,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
79428 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
79429
79430 static int ftrace_graph_active;
79431-static struct notifier_block ftrace_suspend_notifier;
79432-
79433 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
79434 {
79435 return 0;
79436@@ -4704,6 +4709,10 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
79437 return NOTIFY_DONE;
79438 }
79439
79440+static struct notifier_block ftrace_suspend_notifier = {
79441+ .notifier_call = ftrace_suspend_notifier_call
79442+};
79443+
79444 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
79445 trace_func_graph_ent_t entryfunc)
79446 {
79447@@ -4717,7 +4726,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
79448 goto out;
79449 }
79450
79451- ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
79452 register_pm_notifier(&ftrace_suspend_notifier);
79453
79454 ftrace_graph_active++;
79455diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
79456index ce8514f..8233573 100644
79457--- a/kernel/trace/ring_buffer.c
79458+++ b/kernel/trace/ring_buffer.c
79459@@ -346,9 +346,9 @@ struct buffer_data_page {
79460 */
79461 struct buffer_page {
79462 struct list_head list; /* list of buffer pages */
79463- local_t write; /* index for next write */
79464+ local_unchecked_t write; /* index for next write */
79465 unsigned read; /* index for next read */
79466- local_t entries; /* entries on this page */
79467+ local_unchecked_t entries; /* entries on this page */
79468 unsigned long real_end; /* real end of data */
79469 struct buffer_data_page *page; /* Actual data page */
79470 };
79471@@ -461,8 +461,8 @@ struct ring_buffer_per_cpu {
79472 unsigned long last_overrun;
79473 local_t entries_bytes;
79474 local_t entries;
79475- local_t overrun;
79476- local_t commit_overrun;
79477+ local_unchecked_t overrun;
79478+ local_unchecked_t commit_overrun;
79479 local_t dropped_events;
79480 local_t committing;
79481 local_t commits;
79482@@ -861,8 +861,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
79483 *
79484 * We add a counter to the write field to denote this.
79485 */
79486- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
79487- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
79488+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
79489+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
79490
79491 /*
79492 * Just make sure we have seen our old_write and synchronize
79493@@ -890,8 +890,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
79494 * cmpxchg to only update if an interrupt did not already
79495 * do it for us. If the cmpxchg fails, we don't care.
79496 */
79497- (void)local_cmpxchg(&next_page->write, old_write, val);
79498- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
79499+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
79500+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
79501
79502 /*
79503 * No need to worry about races with clearing out the commit.
79504@@ -1250,12 +1250,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
79505
79506 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
79507 {
79508- return local_read(&bpage->entries) & RB_WRITE_MASK;
79509+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
79510 }
79511
79512 static inline unsigned long rb_page_write(struct buffer_page *bpage)
79513 {
79514- return local_read(&bpage->write) & RB_WRITE_MASK;
79515+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
79516 }
79517
79518 static int
79519@@ -1350,7 +1350,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
79520 * bytes consumed in ring buffer from here.
79521 * Increment overrun to account for the lost events.
79522 */
79523- local_add(page_entries, &cpu_buffer->overrun);
79524+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
79525 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
79526 }
79527
79528@@ -1906,7 +1906,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
79529 * it is our responsibility to update
79530 * the counters.
79531 */
79532- local_add(entries, &cpu_buffer->overrun);
79533+ local_add_unchecked(entries, &cpu_buffer->overrun);
79534 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
79535
79536 /*
79537@@ -2056,7 +2056,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
79538 if (tail == BUF_PAGE_SIZE)
79539 tail_page->real_end = 0;
79540
79541- local_sub(length, &tail_page->write);
79542+ local_sub_unchecked(length, &tail_page->write);
79543 return;
79544 }
79545
79546@@ -2091,7 +2091,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
79547 rb_event_set_padding(event);
79548
79549 /* Set the write back to the previous setting */
79550- local_sub(length, &tail_page->write);
79551+ local_sub_unchecked(length, &tail_page->write);
79552 return;
79553 }
79554
79555@@ -2103,7 +2103,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
79556
79557 /* Set write to end of buffer */
79558 length = (tail + length) - BUF_PAGE_SIZE;
79559- local_sub(length, &tail_page->write);
79560+ local_sub_unchecked(length, &tail_page->write);
79561 }
79562
79563 /*
79564@@ -2129,7 +2129,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
79565 * about it.
79566 */
79567 if (unlikely(next_page == commit_page)) {
79568- local_inc(&cpu_buffer->commit_overrun);
79569+ local_inc_unchecked(&cpu_buffer->commit_overrun);
79570 goto out_reset;
79571 }
79572
79573@@ -2185,7 +2185,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
79574 cpu_buffer->tail_page) &&
79575 (cpu_buffer->commit_page ==
79576 cpu_buffer->reader_page))) {
79577- local_inc(&cpu_buffer->commit_overrun);
79578+ local_inc_unchecked(&cpu_buffer->commit_overrun);
79579 goto out_reset;
79580 }
79581 }
79582@@ -2233,7 +2233,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
79583 length += RB_LEN_TIME_EXTEND;
79584
79585 tail_page = cpu_buffer->tail_page;
79586- write = local_add_return(length, &tail_page->write);
79587+ write = local_add_return_unchecked(length, &tail_page->write);
79588
79589 /* set write to only the index of the write */
79590 write &= RB_WRITE_MASK;
79591@@ -2250,7 +2250,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
79592 kmemcheck_annotate_bitfield(event, bitfield);
79593 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
79594
79595- local_inc(&tail_page->entries);
79596+ local_inc_unchecked(&tail_page->entries);
79597
79598 /*
79599 * If this is the first commit on the page, then update
79600@@ -2283,7 +2283,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
79601
79602 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
79603 unsigned long write_mask =
79604- local_read(&bpage->write) & ~RB_WRITE_MASK;
79605+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
79606 unsigned long event_length = rb_event_length(event);
79607 /*
79608 * This is on the tail page. It is possible that
79609@@ -2293,7 +2293,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
79610 */
79611 old_index += write_mask;
79612 new_index += write_mask;
79613- index = local_cmpxchg(&bpage->write, old_index, new_index);
79614+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
79615 if (index == old_index) {
79616 /* update counters */
79617 local_sub(event_length, &cpu_buffer->entries_bytes);
79618@@ -2632,7 +2632,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
79619
79620 /* Do the likely case first */
79621 if (likely(bpage->page == (void *)addr)) {
79622- local_dec(&bpage->entries);
79623+ local_dec_unchecked(&bpage->entries);
79624 return;
79625 }
79626
79627@@ -2644,7 +2644,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
79628 start = bpage;
79629 do {
79630 if (bpage->page == (void *)addr) {
79631- local_dec(&bpage->entries);
79632+ local_dec_unchecked(&bpage->entries);
79633 return;
79634 }
79635 rb_inc_page(cpu_buffer, &bpage);
79636@@ -2926,7 +2926,7 @@ static inline unsigned long
79637 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
79638 {
79639 return local_read(&cpu_buffer->entries) -
79640- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
79641+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
79642 }
79643
79644 /**
79645@@ -3015,7 +3015,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
79646 return 0;
79647
79648 cpu_buffer = buffer->buffers[cpu];
79649- ret = local_read(&cpu_buffer->overrun);
79650+ ret = local_read_unchecked(&cpu_buffer->overrun);
79651
79652 return ret;
79653 }
79654@@ -3038,7 +3038,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
79655 return 0;
79656
79657 cpu_buffer = buffer->buffers[cpu];
79658- ret = local_read(&cpu_buffer->commit_overrun);
79659+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
79660
79661 return ret;
79662 }
79663@@ -3105,7 +3105,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
79664 /* if you care about this being correct, lock the buffer */
79665 for_each_buffer_cpu(buffer, cpu) {
79666 cpu_buffer = buffer->buffers[cpu];
79667- overruns += local_read(&cpu_buffer->overrun);
79668+ overruns += local_read_unchecked(&cpu_buffer->overrun);
79669 }
79670
79671 return overruns;
79672@@ -3281,8 +3281,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
79673 /*
79674 * Reset the reader page to size zero.
79675 */
79676- local_set(&cpu_buffer->reader_page->write, 0);
79677- local_set(&cpu_buffer->reader_page->entries, 0);
79678+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
79679+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
79680 local_set(&cpu_buffer->reader_page->page->commit, 0);
79681 cpu_buffer->reader_page->real_end = 0;
79682
79683@@ -3316,7 +3316,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
79684 * want to compare with the last_overrun.
79685 */
79686 smp_mb();
79687- overwrite = local_read(&(cpu_buffer->overrun));
79688+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
79689
79690 /*
79691 * Here's the tricky part.
79692@@ -3886,8 +3886,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
79693
79694 cpu_buffer->head_page
79695 = list_entry(cpu_buffer->pages, struct buffer_page, list);
79696- local_set(&cpu_buffer->head_page->write, 0);
79697- local_set(&cpu_buffer->head_page->entries, 0);
79698+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
79699+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
79700 local_set(&cpu_buffer->head_page->page->commit, 0);
79701
79702 cpu_buffer->head_page->read = 0;
79703@@ -3897,14 +3897,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
79704
79705 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
79706 INIT_LIST_HEAD(&cpu_buffer->new_pages);
79707- local_set(&cpu_buffer->reader_page->write, 0);
79708- local_set(&cpu_buffer->reader_page->entries, 0);
79709+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
79710+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
79711 local_set(&cpu_buffer->reader_page->page->commit, 0);
79712 cpu_buffer->reader_page->read = 0;
79713
79714 local_set(&cpu_buffer->entries_bytes, 0);
79715- local_set(&cpu_buffer->overrun, 0);
79716- local_set(&cpu_buffer->commit_overrun, 0);
79717+ local_set_unchecked(&cpu_buffer->overrun, 0);
79718+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
79719 local_set(&cpu_buffer->dropped_events, 0);
79720 local_set(&cpu_buffer->entries, 0);
79721 local_set(&cpu_buffer->committing, 0);
79722@@ -4308,8 +4308,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
79723 rb_init_page(bpage);
79724 bpage = reader->page;
79725 reader->page = *data_page;
79726- local_set(&reader->write, 0);
79727- local_set(&reader->entries, 0);
79728+ local_set_unchecked(&reader->write, 0);
79729+ local_set_unchecked(&reader->entries, 0);
79730 reader->read = 0;
79731 *data_page = bpage;
79732
79733diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
79734index fe1d581..43a0f38 100644
79735--- a/kernel/trace/trace.c
79736+++ b/kernel/trace/trace.c
79737@@ -4494,10 +4494,9 @@ static const struct file_operations tracing_dyn_info_fops = {
79738 };
79739 #endif
79740
79741-static struct dentry *d_tracer;
79742-
79743 struct dentry *tracing_init_dentry(void)
79744 {
79745+ static struct dentry *d_tracer;
79746 static int once;
79747
79748 if (d_tracer)
79749@@ -4517,10 +4516,9 @@ struct dentry *tracing_init_dentry(void)
79750 return d_tracer;
79751 }
79752
79753-static struct dentry *d_percpu;
79754-
79755 struct dentry *tracing_dentry_percpu(void)
79756 {
79757+ static struct dentry *d_percpu;
79758 static int once;
79759 struct dentry *d_tracer;
79760
79761diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
79762index 880073d..42db7c3 100644
79763--- a/kernel/trace/trace_events.c
79764+++ b/kernel/trace/trace_events.c
79765@@ -1330,10 +1330,6 @@ static LIST_HEAD(ftrace_module_file_list);
79766 struct ftrace_module_file_ops {
79767 struct list_head list;
79768 struct module *mod;
79769- struct file_operations id;
79770- struct file_operations enable;
79771- struct file_operations format;
79772- struct file_operations filter;
79773 };
79774
79775 static struct ftrace_module_file_ops *
79776@@ -1354,17 +1350,12 @@ trace_create_file_ops(struct module *mod)
79777
79778 file_ops->mod = mod;
79779
79780- file_ops->id = ftrace_event_id_fops;
79781- file_ops->id.owner = mod;
79782-
79783- file_ops->enable = ftrace_enable_fops;
79784- file_ops->enable.owner = mod;
79785-
79786- file_ops->filter = ftrace_event_filter_fops;
79787- file_ops->filter.owner = mod;
79788-
79789- file_ops->format = ftrace_event_format_fops;
79790- file_ops->format.owner = mod;
79791+ pax_open_kernel();
79792+ mod->trace_id.owner = mod;
79793+ mod->trace_enable.owner = mod;
79794+ mod->trace_filter.owner = mod;
79795+ mod->trace_format.owner = mod;
79796+ pax_close_kernel();
79797
79798 list_add(&file_ops->list, &ftrace_module_file_list);
79799
79800@@ -1388,8 +1379,8 @@ static void trace_module_add_events(struct module *mod)
79801
79802 for_each_event(call, start, end) {
79803 __trace_add_event_call(*call, mod,
79804- &file_ops->id, &file_ops->enable,
79805- &file_ops->filter, &file_ops->format);
79806+ &mod->trace_id, &mod->trace_enable,
79807+ &mod->trace_filter, &mod->trace_format);
79808 }
79809 }
79810
79811diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
79812index fd3c8aa..5f324a6 100644
79813--- a/kernel/trace/trace_mmiotrace.c
79814+++ b/kernel/trace/trace_mmiotrace.c
79815@@ -24,7 +24,7 @@ struct header_iter {
79816 static struct trace_array *mmio_trace_array;
79817 static bool overrun_detected;
79818 static unsigned long prev_overruns;
79819-static atomic_t dropped_count;
79820+static atomic_unchecked_t dropped_count;
79821
79822 static void mmio_reset_data(struct trace_array *tr)
79823 {
79824@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
79825
79826 static unsigned long count_overruns(struct trace_iterator *iter)
79827 {
79828- unsigned long cnt = atomic_xchg(&dropped_count, 0);
79829+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
79830 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
79831
79832 if (over > prev_overruns)
79833@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
79834 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
79835 sizeof(*entry), 0, pc);
79836 if (!event) {
79837- atomic_inc(&dropped_count);
79838+ atomic_inc_unchecked(&dropped_count);
79839 return;
79840 }
79841 entry = ring_buffer_event_data(event);
79842@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
79843 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
79844 sizeof(*entry), 0, pc);
79845 if (!event) {
79846- atomic_inc(&dropped_count);
79847+ atomic_inc_unchecked(&dropped_count);
79848 return;
79849 }
79850 entry = ring_buffer_event_data(event);
79851diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
79852index 194d796..76edb8f 100644
79853--- a/kernel/trace/trace_output.c
79854+++ b/kernel/trace/trace_output.c
79855@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
79856
79857 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
79858 if (!IS_ERR(p)) {
79859- p = mangle_path(s->buffer + s->len, p, "\n");
79860+ p = mangle_path(s->buffer + s->len, p, "\n\\");
79861 if (p) {
79862 s->len = p - s->buffer;
79863 return 1;
79864@@ -852,14 +852,16 @@ int register_ftrace_event(struct trace_event *event)
79865 goto out;
79866 }
79867
79868+ pax_open_kernel();
79869 if (event->funcs->trace == NULL)
79870- event->funcs->trace = trace_nop_print;
79871+ *(void **)&event->funcs->trace = trace_nop_print;
79872 if (event->funcs->raw == NULL)
79873- event->funcs->raw = trace_nop_print;
79874+ *(void **)&event->funcs->raw = trace_nop_print;
79875 if (event->funcs->hex == NULL)
79876- event->funcs->hex = trace_nop_print;
79877+ *(void **)&event->funcs->hex = trace_nop_print;
79878 if (event->funcs->binary == NULL)
79879- event->funcs->binary = trace_nop_print;
79880+ *(void **)&event->funcs->binary = trace_nop_print;
79881+ pax_close_kernel();
79882
79883 key = event->type & (EVENT_HASHSIZE - 1);
79884
79885diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
79886index 42ca822..cdcacc6 100644
79887--- a/kernel/trace/trace_stack.c
79888+++ b/kernel/trace/trace_stack.c
79889@@ -52,7 +52,7 @@ static inline void check_stack(void)
79890 return;
79891
79892 /* we do not handle interrupt stacks yet */
79893- if (!object_is_on_stack(&this_size))
79894+ if (!object_starts_on_stack(&this_size))
79895 return;
79896
79897 local_irq_save(flags);
79898diff --git a/kernel/user.c b/kernel/user.c
79899index 7f6ff2b..1ac8f18 100644
79900--- a/kernel/user.c
79901+++ b/kernel/user.c
79902@@ -47,9 +47,7 @@ struct user_namespace init_user_ns = {
79903 .count = 4294967295U,
79904 },
79905 },
79906- .kref = {
79907- .refcount = ATOMIC_INIT(3),
79908- },
79909+ .count = ATOMIC_INIT(3),
79910 .owner = GLOBAL_ROOT_UID,
79911 .group = GLOBAL_ROOT_GID,
79912 .proc_inum = PROC_USER_INIT_INO,
79913diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
79914index f45e128..a5a5fb6 100644
79915--- a/kernel/user_namespace.c
79916+++ b/kernel/user_namespace.c
79917@@ -88,7 +88,7 @@ int create_user_ns(struct cred *new)
79918 return ret;
79919 }
79920
79921- kref_init(&ns->kref);
79922+ atomic_set(&ns->count, 1);
79923 /* Leave the new->user_ns reference with the new user namespace. */
79924 ns->parent = parent_ns;
79925 ns->owner = owner;
79926@@ -116,15 +116,16 @@ int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
79927 return create_user_ns(cred);
79928 }
79929
79930-void free_user_ns(struct kref *kref)
79931+void free_user_ns(struct user_namespace *ns)
79932 {
79933- struct user_namespace *parent, *ns =
79934- container_of(kref, struct user_namespace, kref);
79935+ struct user_namespace *parent;
79936
79937- parent = ns->parent;
79938- proc_free_inum(ns->proc_inum);
79939- kmem_cache_free(user_ns_cachep, ns);
79940- put_user_ns(parent);
79941+ do {
79942+ parent = ns->parent;
79943+ proc_free_inum(ns->proc_inum);
79944+ kmem_cache_free(user_ns_cachep, ns);
79945+ ns = parent;
79946+ } while (atomic_dec_and_test(&parent->count));
79947 }
79948 EXPORT_SYMBOL(free_user_ns);
79949
79950@@ -815,7 +816,7 @@ static int userns_install(struct nsproxy *nsproxy, void *ns)
79951 if (atomic_read(&current->mm->mm_users) > 1)
79952 return -EINVAL;
79953
79954- if (current->fs->users != 1)
79955+ if (atomic_read(&current->fs->users) != 1)
79956 return -EINVAL;
79957
79958 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
79959diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
79960index 63da38c..639904e 100644
79961--- a/kernel/utsname_sysctl.c
79962+++ b/kernel/utsname_sysctl.c
79963@@ -46,7 +46,7 @@ static void put_uts(ctl_table *table, int write, void *which)
79964 static int proc_do_uts_string(ctl_table *table, int write,
79965 void __user *buffer, size_t *lenp, loff_t *ppos)
79966 {
79967- struct ctl_table uts_table;
79968+ ctl_table_no_const uts_table;
79969 int r;
79970 memcpy(&uts_table, table, sizeof(uts_table));
79971 uts_table.data = get_uts(table, write);
79972diff --git a/kernel/watchdog.c b/kernel/watchdog.c
79973index 75a2ab3..5961da7 100644
79974--- a/kernel/watchdog.c
79975+++ b/kernel/watchdog.c
79976@@ -527,7 +527,7 @@ int proc_dowatchdog(struct ctl_table *table, int write,
79977 }
79978 #endif /* CONFIG_SYSCTL */
79979
79980-static struct smp_hotplug_thread watchdog_threads = {
79981+static struct smp_hotplug_thread watchdog_threads __read_only = {
79982 .store = &softlockup_watchdog,
79983 .thread_should_run = watchdog_should_run,
79984 .thread_fn = watchdog,
79985diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
79986index 67604e5..fe94fb1 100644
79987--- a/lib/Kconfig.debug
79988+++ b/lib/Kconfig.debug
79989@@ -550,7 +550,7 @@ config DEBUG_MUTEXES
79990
79991 config DEBUG_LOCK_ALLOC
79992 bool "Lock debugging: detect incorrect freeing of live locks"
79993- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
79994+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
79995 select DEBUG_SPINLOCK
79996 select DEBUG_MUTEXES
79997 select LOCKDEP
79998@@ -564,7 +564,7 @@ config DEBUG_LOCK_ALLOC
79999
80000 config PROVE_LOCKING
80001 bool "Lock debugging: prove locking correctness"
80002- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
80003+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
80004 select LOCKDEP
80005 select DEBUG_SPINLOCK
80006 select DEBUG_MUTEXES
80007@@ -670,7 +670,7 @@ config LOCKDEP
80008
80009 config LOCK_STAT
80010 bool "Lock usage statistics"
80011- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
80012+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
80013 select LOCKDEP
80014 select DEBUG_SPINLOCK
80015 select DEBUG_MUTEXES
80016@@ -1278,6 +1278,7 @@ config LATENCYTOP
80017 depends on DEBUG_KERNEL
80018 depends on STACKTRACE_SUPPORT
80019 depends on PROC_FS
80020+ depends on !GRKERNSEC_HIDESYM
80021 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
80022 select KALLSYMS
80023 select KALLSYMS_ALL
80024@@ -1306,7 +1307,7 @@ config INTERVAL_TREE_TEST
80025
80026 config PROVIDE_OHCI1394_DMA_INIT
80027 bool "Remote debugging over FireWire early on boot"
80028- depends on PCI && X86
80029+ depends on PCI && X86 && !GRKERNSEC
80030 help
80031 If you want to debug problems which hang or crash the kernel early
80032 on boot and the crashing machine has a FireWire port, you can use
80033@@ -1335,7 +1336,7 @@ config PROVIDE_OHCI1394_DMA_INIT
80034
80035 config FIREWIRE_OHCI_REMOTE_DMA
80036 bool "Remote debugging over FireWire with firewire-ohci"
80037- depends on FIREWIRE_OHCI
80038+ depends on FIREWIRE_OHCI && !GRKERNSEC
80039 help
80040 This option lets you use the FireWire bus for remote debugging
80041 with help of the firewire-ohci driver. It enables unfiltered
80042diff --git a/lib/Makefile b/lib/Makefile
80043index 02ed6c0..bd243da 100644
80044--- a/lib/Makefile
80045+++ b/lib/Makefile
80046@@ -47,7 +47,7 @@ obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
80047
80048 obj-$(CONFIG_BTREE) += btree.o
80049 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
80050-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
80051+obj-y += list_debug.o
80052 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
80053
80054 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
80055diff --git a/lib/bitmap.c b/lib/bitmap.c
80056index 06f7e4f..f3cf2b0 100644
80057--- a/lib/bitmap.c
80058+++ b/lib/bitmap.c
80059@@ -422,7 +422,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
80060 {
80061 int c, old_c, totaldigits, ndigits, nchunks, nbits;
80062 u32 chunk;
80063- const char __user __force *ubuf = (const char __user __force *)buf;
80064+ const char __user *ubuf = (const char __force_user *)buf;
80065
80066 bitmap_zero(maskp, nmaskbits);
80067
80068@@ -507,7 +507,7 @@ int bitmap_parse_user(const char __user *ubuf,
80069 {
80070 if (!access_ok(VERIFY_READ, ubuf, ulen))
80071 return -EFAULT;
80072- return __bitmap_parse((const char __force *)ubuf,
80073+ return __bitmap_parse((const char __force_kernel *)ubuf,
80074 ulen, 1, maskp, nmaskbits);
80075
80076 }
80077@@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
80078 {
80079 unsigned a, b;
80080 int c, old_c, totaldigits;
80081- const char __user __force *ubuf = (const char __user __force *)buf;
80082+ const char __user *ubuf = (const char __force_user *)buf;
80083 int exp_digit, in_range;
80084
80085 totaldigits = c = 0;
80086@@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
80087 {
80088 if (!access_ok(VERIFY_READ, ubuf, ulen))
80089 return -EFAULT;
80090- return __bitmap_parselist((const char __force *)ubuf,
80091+ return __bitmap_parselist((const char __force_kernel *)ubuf,
80092 ulen, 1, maskp, nmaskbits);
80093 }
80094 EXPORT_SYMBOL(bitmap_parselist_user);
80095diff --git a/lib/bug.c b/lib/bug.c
80096index d0cdf14..4d07bd2 100644
80097--- a/lib/bug.c
80098+++ b/lib/bug.c
80099@@ -134,6 +134,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
80100 return BUG_TRAP_TYPE_NONE;
80101
80102 bug = find_bug(bugaddr);
80103+ if (!bug)
80104+ return BUG_TRAP_TYPE_NONE;
80105
80106 file = NULL;
80107 line = 0;
80108diff --git a/lib/debugobjects.c b/lib/debugobjects.c
80109index d11808c..dc2d6f8 100644
80110--- a/lib/debugobjects.c
80111+++ b/lib/debugobjects.c
80112@@ -287,7 +287,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
80113 if (limit > 4)
80114 return;
80115
80116- is_on_stack = object_is_on_stack(addr);
80117+ is_on_stack = object_starts_on_stack(addr);
80118 if (is_on_stack == onstack)
80119 return;
80120
80121diff --git a/lib/devres.c b/lib/devres.c
80122index 80b9c76..9e32279 100644
80123--- a/lib/devres.c
80124+++ b/lib/devres.c
80125@@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
80126 void devm_iounmap(struct device *dev, void __iomem *addr)
80127 {
80128 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
80129- (void *)addr));
80130+ (void __force *)addr));
80131 iounmap(addr);
80132 }
80133 EXPORT_SYMBOL(devm_iounmap);
80134@@ -192,7 +192,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
80135 {
80136 ioport_unmap(addr);
80137 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
80138- devm_ioport_map_match, (void *)addr));
80139+ devm_ioport_map_match, (void __force *)addr));
80140 }
80141 EXPORT_SYMBOL(devm_ioport_unmap);
80142
80143diff --git a/lib/div64.c b/lib/div64.c
80144index a163b6c..9618fa5 100644
80145--- a/lib/div64.c
80146+++ b/lib/div64.c
80147@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
80148 EXPORT_SYMBOL(__div64_32);
80149
80150 #ifndef div_s64_rem
80151-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
80152+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
80153 {
80154 u64 quotient;
80155
80156@@ -90,7 +90,7 @@ EXPORT_SYMBOL(div_s64_rem);
80157 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
80158 */
80159 #ifndef div64_u64
80160-u64 div64_u64(u64 dividend, u64 divisor)
80161+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
80162 {
80163 u32 high = divisor >> 32;
80164 u64 quot;
80165diff --git a/lib/dma-debug.c b/lib/dma-debug.c
80166index 5e396ac..58d5de1 100644
80167--- a/lib/dma-debug.c
80168+++ b/lib/dma-debug.c
80169@@ -768,7 +768,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
80170
80171 void dma_debug_add_bus(struct bus_type *bus)
80172 {
80173- struct notifier_block *nb;
80174+ notifier_block_no_const *nb;
80175
80176 if (global_disable)
80177 return;
80178@@ -942,7 +942,7 @@ out:
80179
80180 static void check_for_stack(struct device *dev, void *addr)
80181 {
80182- if (object_is_on_stack(addr))
80183+ if (object_starts_on_stack(addr))
80184 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
80185 "stack [addr=%p]\n", addr);
80186 }
80187diff --git a/lib/inflate.c b/lib/inflate.c
80188index 013a761..c28f3fc 100644
80189--- a/lib/inflate.c
80190+++ b/lib/inflate.c
80191@@ -269,7 +269,7 @@ static void free(void *where)
80192 malloc_ptr = free_mem_ptr;
80193 }
80194 #else
80195-#define malloc(a) kmalloc(a, GFP_KERNEL)
80196+#define malloc(a) kmalloc((a), GFP_KERNEL)
80197 #define free(a) kfree(a)
80198 #endif
80199
80200diff --git a/lib/ioremap.c b/lib/ioremap.c
80201index 0c9216c..863bd89 100644
80202--- a/lib/ioremap.c
80203+++ b/lib/ioremap.c
80204@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
80205 unsigned long next;
80206
80207 phys_addr -= addr;
80208- pmd = pmd_alloc(&init_mm, pud, addr);
80209+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
80210 if (!pmd)
80211 return -ENOMEM;
80212 do {
80213@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
80214 unsigned long next;
80215
80216 phys_addr -= addr;
80217- pud = pud_alloc(&init_mm, pgd, addr);
80218+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
80219 if (!pud)
80220 return -ENOMEM;
80221 do {
80222diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
80223index bd2bea9..6b3c95e 100644
80224--- a/lib/is_single_threaded.c
80225+++ b/lib/is_single_threaded.c
80226@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
80227 struct task_struct *p, *t;
80228 bool ret;
80229
80230+ if (!mm)
80231+ return true;
80232+
80233 if (atomic_read(&task->signal->live) != 1)
80234 return false;
80235
80236diff --git a/lib/kobject.c b/lib/kobject.c
80237index e07ee1f..998489d 100644
80238--- a/lib/kobject.c
80239+++ b/lib/kobject.c
80240@@ -852,9 +852,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
80241
80242
80243 static DEFINE_SPINLOCK(kobj_ns_type_lock);
80244-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
80245+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
80246
80247-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
80248+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
80249 {
80250 enum kobj_ns_type type = ops->type;
80251 int error;
80252diff --git a/lib/list_debug.c b/lib/list_debug.c
80253index c24c2f7..06e070b 100644
80254--- a/lib/list_debug.c
80255+++ b/lib/list_debug.c
80256@@ -11,7 +11,9 @@
80257 #include <linux/bug.h>
80258 #include <linux/kernel.h>
80259 #include <linux/rculist.h>
80260+#include <linux/mm.h>
80261
80262+#ifdef CONFIG_DEBUG_LIST
80263 /*
80264 * Insert a new entry between two known consecutive entries.
80265 *
80266@@ -19,21 +21,32 @@
80267 * the prev/next entries already!
80268 */
80269
80270-void __list_add(struct list_head *new,
80271- struct list_head *prev,
80272- struct list_head *next)
80273+static bool __list_add_debug(struct list_head *new,
80274+ struct list_head *prev,
80275+ struct list_head *next)
80276 {
80277- WARN(next->prev != prev,
80278+ if (WARN(next->prev != prev,
80279 "list_add corruption. next->prev should be "
80280 "prev (%p), but was %p. (next=%p).\n",
80281- prev, next->prev, next);
80282- WARN(prev->next != next,
80283+ prev, next->prev, next) ||
80284+ WARN(prev->next != next,
80285 "list_add corruption. prev->next should be "
80286 "next (%p), but was %p. (prev=%p).\n",
80287- next, prev->next, prev);
80288- WARN(new == prev || new == next,
80289- "list_add double add: new=%p, prev=%p, next=%p.\n",
80290- new, prev, next);
80291+ next, prev->next, prev) ||
80292+ WARN(new == prev || new == next,
80293+ "list_add double add: new=%p, prev=%p, next=%p.\n",
80294+ new, prev, next))
80295+ return false;
80296+ return true;
80297+}
80298+
80299+void __list_add(struct list_head *new,
80300+ struct list_head *prev,
80301+ struct list_head *next)
80302+{
80303+ if (!__list_add_debug(new, prev, next))
80304+ return;
80305+
80306 next->prev = new;
80307 new->next = next;
80308 new->prev = prev;
80309@@ -41,7 +54,7 @@ void __list_add(struct list_head *new,
80310 }
80311 EXPORT_SYMBOL(__list_add);
80312
80313-void __list_del_entry(struct list_head *entry)
80314+static bool __list_del_entry_debug(struct list_head *entry)
80315 {
80316 struct list_head *prev, *next;
80317
80318@@ -60,9 +73,16 @@ void __list_del_entry(struct list_head *entry)
80319 WARN(next->prev != entry,
80320 "list_del corruption. next->prev should be %p, "
80321 "but was %p\n", entry, next->prev))
80322+ return false;
80323+ return true;
80324+}
80325+
80326+void __list_del_entry(struct list_head *entry)
80327+{
80328+ if (!__list_del_entry_debug(entry))
80329 return;
80330
80331- __list_del(prev, next);
80332+ __list_del(entry->prev, entry->next);
80333 }
80334 EXPORT_SYMBOL(__list_del_entry);
80335
80336@@ -86,15 +106,85 @@ EXPORT_SYMBOL(list_del);
80337 void __list_add_rcu(struct list_head *new,
80338 struct list_head *prev, struct list_head *next)
80339 {
80340- WARN(next->prev != prev,
80341- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
80342- prev, next->prev, next);
80343- WARN(prev->next != next,
80344- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
80345- next, prev->next, prev);
80346+ if (!__list_add_debug(new, prev, next))
80347+ return;
80348+
80349 new->next = next;
80350 new->prev = prev;
80351 rcu_assign_pointer(list_next_rcu(prev), new);
80352 next->prev = new;
80353 }
80354 EXPORT_SYMBOL(__list_add_rcu);
80355+#endif
80356+
80357+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
80358+{
80359+#ifdef CONFIG_DEBUG_LIST
80360+ if (!__list_add_debug(new, prev, next))
80361+ return;
80362+#endif
80363+
80364+ pax_open_kernel();
80365+ next->prev = new;
80366+ new->next = next;
80367+ new->prev = prev;
80368+ prev->next = new;
80369+ pax_close_kernel();
80370+}
80371+EXPORT_SYMBOL(__pax_list_add);
80372+
80373+void pax_list_del(struct list_head *entry)
80374+{
80375+#ifdef CONFIG_DEBUG_LIST
80376+ if (!__list_del_entry_debug(entry))
80377+ return;
80378+#endif
80379+
80380+ pax_open_kernel();
80381+ __list_del(entry->prev, entry->next);
80382+ entry->next = LIST_POISON1;
80383+ entry->prev = LIST_POISON2;
80384+ pax_close_kernel();
80385+}
80386+EXPORT_SYMBOL(pax_list_del);
80387+
80388+void pax_list_del_init(struct list_head *entry)
80389+{
80390+ pax_open_kernel();
80391+ __list_del(entry->prev, entry->next);
80392+ INIT_LIST_HEAD(entry);
80393+ pax_close_kernel();
80394+}
80395+EXPORT_SYMBOL(pax_list_del_init);
80396+
80397+void __pax_list_add_rcu(struct list_head *new,
80398+ struct list_head *prev, struct list_head *next)
80399+{
80400+#ifdef CONFIG_DEBUG_LIST
80401+ if (!__list_add_debug(new, prev, next))
80402+ return;
80403+#endif
80404+
80405+ pax_open_kernel();
80406+ new->next = next;
80407+ new->prev = prev;
80408+ rcu_assign_pointer(list_next_rcu(prev), new);
80409+ next->prev = new;
80410+ pax_close_kernel();
80411+}
80412+EXPORT_SYMBOL(__pax_list_add_rcu);
80413+
80414+void pax_list_del_rcu(struct list_head *entry)
80415+{
80416+#ifdef CONFIG_DEBUG_LIST
80417+ if (!__list_del_entry_debug(entry))
80418+ return;
80419+#endif
80420+
80421+ pax_open_kernel();
80422+ __list_del(entry->prev, entry->next);
80423+ entry->next = LIST_POISON1;
80424+ entry->prev = LIST_POISON2;
80425+ pax_close_kernel();
80426+}
80427+EXPORT_SYMBOL(pax_list_del_rcu);
80428diff --git a/lib/radix-tree.c b/lib/radix-tree.c
80429index e796429..6e38f9f 100644
80430--- a/lib/radix-tree.c
80431+++ b/lib/radix-tree.c
80432@@ -92,7 +92,7 @@ struct radix_tree_preload {
80433 int nr;
80434 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
80435 };
80436-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
80437+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
80438
80439 static inline void *ptr_to_indirect(void *ptr)
80440 {
80441diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
80442index bb2b201..46abaf9 100644
80443--- a/lib/strncpy_from_user.c
80444+++ b/lib/strncpy_from_user.c
80445@@ -21,7 +21,7 @@
80446 */
80447 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
80448 {
80449- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
80450+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
80451 long res = 0;
80452
80453 /*
80454diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
80455index a28df52..3d55877 100644
80456--- a/lib/strnlen_user.c
80457+++ b/lib/strnlen_user.c
80458@@ -26,7 +26,7 @@
80459 */
80460 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
80461 {
80462- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
80463+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
80464 long align, res = 0;
80465 unsigned long c;
80466
80467diff --git a/lib/swiotlb.c b/lib/swiotlb.c
80468index 196b069..358f342 100644
80469--- a/lib/swiotlb.c
80470+++ b/lib/swiotlb.c
80471@@ -642,7 +642,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
80472
80473 void
80474 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
80475- dma_addr_t dev_addr)
80476+ dma_addr_t dev_addr, struct dma_attrs *attrs)
80477 {
80478 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
80479
80480diff --git a/lib/vsprintf.c b/lib/vsprintf.c
80481index fab33a9..3b5fe68 100644
80482--- a/lib/vsprintf.c
80483+++ b/lib/vsprintf.c
80484@@ -16,6 +16,9 @@
80485 * - scnprintf and vscnprintf
80486 */
80487
80488+#ifdef CONFIG_GRKERNSEC_HIDESYM
80489+#define __INCLUDED_BY_HIDESYM 1
80490+#endif
80491 #include <stdarg.h>
80492 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
80493 #include <linux/types.h>
80494@@ -541,7 +544,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
80495 char sym[KSYM_SYMBOL_LEN];
80496 if (ext == 'B')
80497 sprint_backtrace(sym, value);
80498- else if (ext != 'f' && ext != 's')
80499+ else if (ext != 'f' && ext != 's' && ext != 'a')
80500 sprint_symbol(sym, value);
80501 else
80502 sprint_symbol_no_offset(sym, value);
80503@@ -974,7 +977,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
80504 return number(buf, end, *(const netdev_features_t *)addr, spec);
80505 }
80506
80507+#ifdef CONFIG_GRKERNSEC_HIDESYM
80508+int kptr_restrict __read_mostly = 2;
80509+#else
80510 int kptr_restrict __read_mostly;
80511+#endif
80512
80513 /*
80514 * Show a '%p' thing. A kernel extension is that the '%p' is followed
80515@@ -988,6 +995,8 @@ int kptr_restrict __read_mostly;
80516 * - 'S' For symbolic direct pointers with offset
80517 * - 's' For symbolic direct pointers without offset
80518 * - 'B' For backtraced symbolic direct pointers with offset
80519+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
80520+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
80521 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
80522 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
80523 * - 'M' For a 6-byte MAC address, it prints the address in the
80524@@ -1043,12 +1052,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
80525
80526 if (!ptr && *fmt != 'K') {
80527 /*
80528- * Print (null) with the same width as a pointer so it makes
80529+ * Print (nil) with the same width as a pointer so it makes
80530 * tabular output look nice.
80531 */
80532 if (spec.field_width == -1)
80533 spec.field_width = default_width;
80534- return string(buf, end, "(null)", spec);
80535+ return string(buf, end, "(nil)", spec);
80536 }
80537
80538 switch (*fmt) {
80539@@ -1058,6 +1067,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
80540 /* Fallthrough */
80541 case 'S':
80542 case 's':
80543+#ifdef CONFIG_GRKERNSEC_HIDESYM
80544+ break;
80545+#else
80546+ return symbol_string(buf, end, ptr, spec, *fmt);
80547+#endif
80548+ case 'A':
80549+ case 'a':
80550 case 'B':
80551 return symbol_string(buf, end, ptr, spec, *fmt);
80552 case 'R':
80553@@ -1098,6 +1114,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
80554 va_end(va);
80555 return buf;
80556 }
80557+ case 'P':
80558+ break;
80559 case 'K':
80560 /*
80561 * %pK cannot be used in IRQ context because its test
80562@@ -1121,6 +1139,21 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
80563 }
80564 break;
80565 }
80566+
80567+#ifdef CONFIG_GRKERNSEC_HIDESYM
80568+ /* 'P' = approved pointers to copy to userland,
80569+ as in the /proc/kallsyms case, as we make it display nothing
80570+ for non-root users, and the real contents for root users
80571+ Also ignore 'K' pointers, since we force their NULLing for non-root users
80572+ above
80573+ */
80574+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'K' && is_usercopy_object(buf)) {
80575+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
80576+ dump_stack();
80577+ ptr = NULL;
80578+ }
80579+#endif
80580+
80581 spec.flags |= SMALL;
80582 if (spec.field_width == -1) {
80583 spec.field_width = default_width;
80584@@ -1842,11 +1875,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
80585 typeof(type) value; \
80586 if (sizeof(type) == 8) { \
80587 args = PTR_ALIGN(args, sizeof(u32)); \
80588- *(u32 *)&value = *(u32 *)args; \
80589- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
80590+ *(u32 *)&value = *(const u32 *)args; \
80591+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
80592 } else { \
80593 args = PTR_ALIGN(args, sizeof(type)); \
80594- value = *(typeof(type) *)args; \
80595+ value = *(const typeof(type) *)args; \
80596 } \
80597 args += sizeof(type); \
80598 value; \
80599@@ -1909,7 +1942,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
80600 case FORMAT_TYPE_STR: {
80601 const char *str_arg = args;
80602 args += strlen(str_arg) + 1;
80603- str = string(str, end, (char *)str_arg, spec);
80604+ str = string(str, end, str_arg, spec);
80605 break;
80606 }
80607
80608diff --git a/localversion-grsec b/localversion-grsec
80609new file mode 100644
80610index 0000000..7cd6065
80611--- /dev/null
80612+++ b/localversion-grsec
80613@@ -0,0 +1 @@
80614+-grsec
80615diff --git a/mm/Kconfig b/mm/Kconfig
80616index 278e3ab..87c384d 100644
80617--- a/mm/Kconfig
80618+++ b/mm/Kconfig
80619@@ -286,10 +286,10 @@ config KSM
80620 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
80621
80622 config DEFAULT_MMAP_MIN_ADDR
80623- int "Low address space to protect from user allocation"
80624+ int "Low address space to protect from user allocation"
80625 depends on MMU
80626- default 4096
80627- help
80628+ default 65536
80629+ help
80630 This is the portion of low virtual memory which should be protected
80631 from userspace allocation. Keeping a user from writing to low pages
80632 can help reduce the impact of kernel NULL pointer bugs.
80633@@ -320,7 +320,7 @@ config MEMORY_FAILURE
80634
80635 config HWPOISON_INJECT
80636 tristate "HWPoison pages injector"
80637- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
80638+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
80639 select PROC_PAGE_MONITOR
80640
80641 config NOMMU_INITIAL_TRIM_EXCESS
80642diff --git a/mm/filemap.c b/mm/filemap.c
80643index 83efee7..3f99381 100644
80644--- a/mm/filemap.c
80645+++ b/mm/filemap.c
80646@@ -1747,7 +1747,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
80647 struct address_space *mapping = file->f_mapping;
80648
80649 if (!mapping->a_ops->readpage)
80650- return -ENOEXEC;
80651+ return -ENODEV;
80652 file_accessed(file);
80653 vma->vm_ops = &generic_file_vm_ops;
80654 return 0;
80655@@ -2087,6 +2087,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
80656 *pos = i_size_read(inode);
80657
80658 if (limit != RLIM_INFINITY) {
80659+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
80660 if (*pos >= limit) {
80661 send_sig(SIGXFSZ, current, 0);
80662 return -EFBIG;
80663diff --git a/mm/fremap.c b/mm/fremap.c
80664index a0aaf0e..20325c3 100644
80665--- a/mm/fremap.c
80666+++ b/mm/fremap.c
80667@@ -157,6 +157,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
80668 retry:
80669 vma = find_vma(mm, start);
80670
80671+#ifdef CONFIG_PAX_SEGMEXEC
80672+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
80673+ goto out;
80674+#endif
80675+
80676 /*
80677 * Make sure the vma is shared, that it supports prefaulting,
80678 * and that the remapped range is valid and fully within
80679diff --git a/mm/highmem.c b/mm/highmem.c
80680index b32b70c..e512eb0 100644
80681--- a/mm/highmem.c
80682+++ b/mm/highmem.c
80683@@ -138,8 +138,9 @@ static void flush_all_zero_pkmaps(void)
80684 * So no dangers, even with speculative execution.
80685 */
80686 page = pte_page(pkmap_page_table[i]);
80687+ pax_open_kernel();
80688 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
80689-
80690+ pax_close_kernel();
80691 set_page_address(page, NULL);
80692 need_flush = 1;
80693 }
80694@@ -198,9 +199,11 @@ start:
80695 }
80696 }
80697 vaddr = PKMAP_ADDR(last_pkmap_nr);
80698+
80699+ pax_open_kernel();
80700 set_pte_at(&init_mm, vaddr,
80701 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
80702-
80703+ pax_close_kernel();
80704 pkmap_count[last_pkmap_nr] = 1;
80705 set_page_address(page, (void *)vaddr);
80706
80707diff --git a/mm/hugetlb.c b/mm/hugetlb.c
80708index d7cec92..b05cc33 100644
80709--- a/mm/hugetlb.c
80710+++ b/mm/hugetlb.c
80711@@ -2008,15 +2008,17 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
80712 struct hstate *h = &default_hstate;
80713 unsigned long tmp;
80714 int ret;
80715+ ctl_table_no_const hugetlb_table;
80716
80717 tmp = h->max_huge_pages;
80718
80719 if (write && h->order >= MAX_ORDER)
80720 return -EINVAL;
80721
80722- table->data = &tmp;
80723- table->maxlen = sizeof(unsigned long);
80724- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
80725+ hugetlb_table = *table;
80726+ hugetlb_table.data = &tmp;
80727+ hugetlb_table.maxlen = sizeof(unsigned long);
80728+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
80729 if (ret)
80730 goto out;
80731
80732@@ -2073,15 +2075,17 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
80733 struct hstate *h = &default_hstate;
80734 unsigned long tmp;
80735 int ret;
80736+ ctl_table_no_const hugetlb_table;
80737
80738 tmp = h->nr_overcommit_huge_pages;
80739
80740 if (write && h->order >= MAX_ORDER)
80741 return -EINVAL;
80742
80743- table->data = &tmp;
80744- table->maxlen = sizeof(unsigned long);
80745- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
80746+ hugetlb_table = *table;
80747+ hugetlb_table.data = &tmp;
80748+ hugetlb_table.maxlen = sizeof(unsigned long);
80749+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
80750 if (ret)
80751 goto out;
80752
80753@@ -2515,6 +2519,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
80754 return 1;
80755 }
80756
80757+#ifdef CONFIG_PAX_SEGMEXEC
80758+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
80759+{
80760+ struct mm_struct *mm = vma->vm_mm;
80761+ struct vm_area_struct *vma_m;
80762+ unsigned long address_m;
80763+ pte_t *ptep_m;
80764+
80765+ vma_m = pax_find_mirror_vma(vma);
80766+ if (!vma_m)
80767+ return;
80768+
80769+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
80770+ address_m = address + SEGMEXEC_TASK_SIZE;
80771+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
80772+ get_page(page_m);
80773+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
80774+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
80775+}
80776+#endif
80777+
80778 /*
80779 * Hugetlb_cow() should be called with page lock of the original hugepage held.
80780 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
80781@@ -2633,6 +2658,11 @@ retry_avoidcopy:
80782 make_huge_pte(vma, new_page, 1));
80783 page_remove_rmap(old_page);
80784 hugepage_add_new_anon_rmap(new_page, vma, address);
80785+
80786+#ifdef CONFIG_PAX_SEGMEXEC
80787+ pax_mirror_huge_pte(vma, address, new_page);
80788+#endif
80789+
80790 /* Make the old page be freed below */
80791 new_page = old_page;
80792 }
80793@@ -2792,6 +2822,10 @@ retry:
80794 && (vma->vm_flags & VM_SHARED)));
80795 set_huge_pte_at(mm, address, ptep, new_pte);
80796
80797+#ifdef CONFIG_PAX_SEGMEXEC
80798+ pax_mirror_huge_pte(vma, address, page);
80799+#endif
80800+
80801 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
80802 /* Optimization, do the COW without a second fault */
80803 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
80804@@ -2821,6 +2855,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
80805 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
80806 struct hstate *h = hstate_vma(vma);
80807
80808+#ifdef CONFIG_PAX_SEGMEXEC
80809+ struct vm_area_struct *vma_m;
80810+#endif
80811+
80812 address &= huge_page_mask(h);
80813
80814 ptep = huge_pte_offset(mm, address);
80815@@ -2834,6 +2872,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
80816 VM_FAULT_SET_HINDEX(hstate_index(h));
80817 }
80818
80819+#ifdef CONFIG_PAX_SEGMEXEC
80820+ vma_m = pax_find_mirror_vma(vma);
80821+ if (vma_m) {
80822+ unsigned long address_m;
80823+
80824+ if (vma->vm_start > vma_m->vm_start) {
80825+ address_m = address;
80826+ address -= SEGMEXEC_TASK_SIZE;
80827+ vma = vma_m;
80828+ h = hstate_vma(vma);
80829+ } else
80830+ address_m = address + SEGMEXEC_TASK_SIZE;
80831+
80832+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
80833+ return VM_FAULT_OOM;
80834+ address_m &= HPAGE_MASK;
80835+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
80836+ }
80837+#endif
80838+
80839 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
80840 if (!ptep)
80841 return VM_FAULT_OOM;
80842diff --git a/mm/internal.h b/mm/internal.h
80843index 9ba2110..eaf0674 100644
80844--- a/mm/internal.h
80845+++ b/mm/internal.h
80846@@ -100,6 +100,7 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
80847 * in mm/page_alloc.c
80848 */
80849 extern void __free_pages_bootmem(struct page *page, unsigned int order);
80850+extern void free_compound_page(struct page *page);
80851 extern void prep_compound_page(struct page *page, unsigned long order);
80852 #ifdef CONFIG_MEMORY_FAILURE
80853 extern bool is_free_buddy_page(struct page *page);
80854diff --git a/mm/kmemleak.c b/mm/kmemleak.c
80855index 752a705..6c3102e 100644
80856--- a/mm/kmemleak.c
80857+++ b/mm/kmemleak.c
80858@@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
80859
80860 for (i = 0; i < object->trace_len; i++) {
80861 void *ptr = (void *)object->trace[i];
80862- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
80863+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
80864 }
80865 }
80866
80867@@ -1853,7 +1853,7 @@ static int __init kmemleak_late_init(void)
80868 return -ENOMEM;
80869 }
80870
80871- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
80872+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
80873 &kmemleak_fops);
80874 if (!dentry)
80875 pr_warning("Failed to create the debugfs kmemleak file\n");
80876diff --git a/mm/maccess.c b/mm/maccess.c
80877index d53adf9..03a24bf 100644
80878--- a/mm/maccess.c
80879+++ b/mm/maccess.c
80880@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
80881 set_fs(KERNEL_DS);
80882 pagefault_disable();
80883 ret = __copy_from_user_inatomic(dst,
80884- (__force const void __user *)src, size);
80885+ (const void __force_user *)src, size);
80886 pagefault_enable();
80887 set_fs(old_fs);
80888
80889@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
80890
80891 set_fs(KERNEL_DS);
80892 pagefault_disable();
80893- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
80894+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
80895 pagefault_enable();
80896 set_fs(old_fs);
80897
80898diff --git a/mm/madvise.c b/mm/madvise.c
80899index 03dfa5c..b032917 100644
80900--- a/mm/madvise.c
80901+++ b/mm/madvise.c
80902@@ -48,6 +48,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
80903 pgoff_t pgoff;
80904 unsigned long new_flags = vma->vm_flags;
80905
80906+#ifdef CONFIG_PAX_SEGMEXEC
80907+ struct vm_area_struct *vma_m;
80908+#endif
80909+
80910 switch (behavior) {
80911 case MADV_NORMAL:
80912 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
80913@@ -123,6 +127,13 @@ success:
80914 /*
80915 * vm_flags is protected by the mmap_sem held in write mode.
80916 */
80917+
80918+#ifdef CONFIG_PAX_SEGMEXEC
80919+ vma_m = pax_find_mirror_vma(vma);
80920+ if (vma_m)
80921+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
80922+#endif
80923+
80924 vma->vm_flags = new_flags;
80925
80926 out:
80927@@ -181,6 +192,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
80928 struct vm_area_struct ** prev,
80929 unsigned long start, unsigned long end)
80930 {
80931+
80932+#ifdef CONFIG_PAX_SEGMEXEC
80933+ struct vm_area_struct *vma_m;
80934+#endif
80935+
80936 *prev = vma;
80937 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
80938 return -EINVAL;
80939@@ -193,6 +209,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
80940 zap_page_range(vma, start, end - start, &details);
80941 } else
80942 zap_page_range(vma, start, end - start, NULL);
80943+
80944+#ifdef CONFIG_PAX_SEGMEXEC
80945+ vma_m = pax_find_mirror_vma(vma);
80946+ if (vma_m) {
80947+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
80948+ struct zap_details details = {
80949+ .nonlinear_vma = vma_m,
80950+ .last_index = ULONG_MAX,
80951+ };
80952+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
80953+ } else
80954+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
80955+ }
80956+#endif
80957+
80958 return 0;
80959 }
80960
80961@@ -397,6 +428,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
80962 if (end < start)
80963 goto out;
80964
80965+#ifdef CONFIG_PAX_SEGMEXEC
80966+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
80967+ if (end > SEGMEXEC_TASK_SIZE)
80968+ goto out;
80969+ } else
80970+#endif
80971+
80972+ if (end > TASK_SIZE)
80973+ goto out;
80974+
80975 error = 0;
80976 if (end == start)
80977 goto out;
80978diff --git a/mm/memory-failure.c b/mm/memory-failure.c
80979index c6e4dd3..1f41988 100644
80980--- a/mm/memory-failure.c
80981+++ b/mm/memory-failure.c
80982@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
80983
80984 int sysctl_memory_failure_recovery __read_mostly = 1;
80985
80986-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
80987+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
80988
80989 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
80990
80991@@ -202,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
80992 pfn, t->comm, t->pid);
80993 si.si_signo = SIGBUS;
80994 si.si_errno = 0;
80995- si.si_addr = (void *)addr;
80996+ si.si_addr = (void __user *)addr;
80997 #ifdef __ARCH_SI_TRAPNO
80998 si.si_trapno = trapno;
80999 #endif
81000@@ -760,7 +760,7 @@ static struct page_state {
81001 unsigned long res;
81002 char *msg;
81003 int (*action)(struct page *p, unsigned long pfn);
81004-} error_states[] = {
81005+} __do_const error_states[] = {
81006 { reserved, reserved, "reserved kernel", me_kernel },
81007 /*
81008 * free pages are specially detected outside this table:
81009@@ -1040,7 +1040,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
81010 }
81011
81012 nr_pages = 1 << compound_trans_order(hpage);
81013- atomic_long_add(nr_pages, &mce_bad_pages);
81014+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
81015
81016 /*
81017 * We need/can do nothing about count=0 pages.
81018@@ -1070,7 +1070,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
81019 if (!PageHWPoison(hpage)
81020 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
81021 || (p != hpage && TestSetPageHWPoison(hpage))) {
81022- atomic_long_sub(nr_pages, &mce_bad_pages);
81023+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
81024 return 0;
81025 }
81026 set_page_hwpoison_huge_page(hpage);
81027@@ -1128,7 +1128,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
81028 }
81029 if (hwpoison_filter(p)) {
81030 if (TestClearPageHWPoison(p))
81031- atomic_long_sub(nr_pages, &mce_bad_pages);
81032+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
81033 unlock_page(hpage);
81034 put_page(hpage);
81035 return 0;
81036@@ -1323,7 +1323,7 @@ int unpoison_memory(unsigned long pfn)
81037 return 0;
81038 }
81039 if (TestClearPageHWPoison(p))
81040- atomic_long_sub(nr_pages, &mce_bad_pages);
81041+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
81042 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
81043 return 0;
81044 }
81045@@ -1337,7 +1337,7 @@ int unpoison_memory(unsigned long pfn)
81046 */
81047 if (TestClearPageHWPoison(page)) {
81048 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
81049- atomic_long_sub(nr_pages, &mce_bad_pages);
81050+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
81051 freeit = 1;
81052 if (PageHuge(page))
81053 clear_page_hwpoison_huge_page(page);
81054@@ -1442,7 +1442,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
81055 }
81056 done:
81057 if (!PageHWPoison(hpage))
81058- atomic_long_add(1 << compound_trans_order(hpage),
81059+ atomic_long_add_unchecked(1 << compound_trans_order(hpage),
81060 &mce_bad_pages);
81061 set_page_hwpoison_huge_page(hpage);
81062 dequeue_hwpoisoned_huge_page(hpage);
81063@@ -1583,7 +1583,7 @@ int soft_offline_page(struct page *page, int flags)
81064 return ret;
81065
81066 done:
81067- atomic_long_add(1, &mce_bad_pages);
81068+ atomic_long_add_unchecked(1, &mce_bad_pages);
81069 SetPageHWPoison(page);
81070 /* keep elevated page count for bad page */
81071 return ret;
81072diff --git a/mm/memory.c b/mm/memory.c
81073index bb1369f..b9631d2 100644
81074--- a/mm/memory.c
81075+++ b/mm/memory.c
81076@@ -433,6 +433,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
81077 free_pte_range(tlb, pmd, addr);
81078 } while (pmd++, addr = next, addr != end);
81079
81080+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
81081 start &= PUD_MASK;
81082 if (start < floor)
81083 return;
81084@@ -447,6 +448,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
81085 pmd = pmd_offset(pud, start);
81086 pud_clear(pud);
81087 pmd_free_tlb(tlb, pmd, start);
81088+#endif
81089+
81090 }
81091
81092 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
81093@@ -466,6 +469,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
81094 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
81095 } while (pud++, addr = next, addr != end);
81096
81097+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
81098 start &= PGDIR_MASK;
81099 if (start < floor)
81100 return;
81101@@ -480,6 +484,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
81102 pud = pud_offset(pgd, start);
81103 pgd_clear(pgd);
81104 pud_free_tlb(tlb, pud, start);
81105+#endif
81106+
81107 }
81108
81109 /*
81110@@ -1618,12 +1624,6 @@ no_page_table:
81111 return page;
81112 }
81113
81114-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
81115-{
81116- return stack_guard_page_start(vma, addr) ||
81117- stack_guard_page_end(vma, addr+PAGE_SIZE);
81118-}
81119-
81120 /**
81121 * __get_user_pages() - pin user pages in memory
81122 * @tsk: task_struct of target task
81123@@ -1709,10 +1709,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
81124
81125 i = 0;
81126
81127- do {
81128+ while (nr_pages) {
81129 struct vm_area_struct *vma;
81130
81131- vma = find_extend_vma(mm, start);
81132+ vma = find_vma(mm, start);
81133 if (!vma && in_gate_area(mm, start)) {
81134 unsigned long pg = start & PAGE_MASK;
81135 pgd_t *pgd;
81136@@ -1760,7 +1760,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
81137 goto next_page;
81138 }
81139
81140- if (!vma ||
81141+ if (!vma || start < vma->vm_start ||
81142 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
81143 !(vm_flags & vma->vm_flags))
81144 return i ? : -EFAULT;
81145@@ -1787,11 +1787,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
81146 int ret;
81147 unsigned int fault_flags = 0;
81148
81149- /* For mlock, just skip the stack guard page. */
81150- if (foll_flags & FOLL_MLOCK) {
81151- if (stack_guard_page(vma, start))
81152- goto next_page;
81153- }
81154 if (foll_flags & FOLL_WRITE)
81155 fault_flags |= FAULT_FLAG_WRITE;
81156 if (nonblocking)
81157@@ -1865,7 +1860,7 @@ next_page:
81158 start += PAGE_SIZE;
81159 nr_pages--;
81160 } while (nr_pages && start < vma->vm_end);
81161- } while (nr_pages);
81162+ }
81163 return i;
81164 }
81165 EXPORT_SYMBOL(__get_user_pages);
81166@@ -2072,6 +2067,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
81167 page_add_file_rmap(page);
81168 set_pte_at(mm, addr, pte, mk_pte(page, prot));
81169
81170+#ifdef CONFIG_PAX_SEGMEXEC
81171+ pax_mirror_file_pte(vma, addr, page, ptl);
81172+#endif
81173+
81174 retval = 0;
81175 pte_unmap_unlock(pte, ptl);
81176 return retval;
81177@@ -2116,9 +2115,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
81178 if (!page_count(page))
81179 return -EINVAL;
81180 if (!(vma->vm_flags & VM_MIXEDMAP)) {
81181+
81182+#ifdef CONFIG_PAX_SEGMEXEC
81183+ struct vm_area_struct *vma_m;
81184+#endif
81185+
81186 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
81187 BUG_ON(vma->vm_flags & VM_PFNMAP);
81188 vma->vm_flags |= VM_MIXEDMAP;
81189+
81190+#ifdef CONFIG_PAX_SEGMEXEC
81191+ vma_m = pax_find_mirror_vma(vma);
81192+ if (vma_m)
81193+ vma_m->vm_flags |= VM_MIXEDMAP;
81194+#endif
81195+
81196 }
81197 return insert_page(vma, addr, page, vma->vm_page_prot);
81198 }
81199@@ -2201,6 +2212,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
81200 unsigned long pfn)
81201 {
81202 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
81203+ BUG_ON(vma->vm_mirror);
81204
81205 if (addr < vma->vm_start || addr >= vma->vm_end)
81206 return -EFAULT;
81207@@ -2401,7 +2413,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
81208
81209 BUG_ON(pud_huge(*pud));
81210
81211- pmd = pmd_alloc(mm, pud, addr);
81212+ pmd = (mm == &init_mm) ?
81213+ pmd_alloc_kernel(mm, pud, addr) :
81214+ pmd_alloc(mm, pud, addr);
81215 if (!pmd)
81216 return -ENOMEM;
81217 do {
81218@@ -2421,7 +2435,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
81219 unsigned long next;
81220 int err;
81221
81222- pud = pud_alloc(mm, pgd, addr);
81223+ pud = (mm == &init_mm) ?
81224+ pud_alloc_kernel(mm, pgd, addr) :
81225+ pud_alloc(mm, pgd, addr);
81226 if (!pud)
81227 return -ENOMEM;
81228 do {
81229@@ -2509,6 +2525,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
81230 copy_user_highpage(dst, src, va, vma);
81231 }
81232
81233+#ifdef CONFIG_PAX_SEGMEXEC
81234+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
81235+{
81236+ struct mm_struct *mm = vma->vm_mm;
81237+ spinlock_t *ptl;
81238+ pte_t *pte, entry;
81239+
81240+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
81241+ entry = *pte;
81242+ if (!pte_present(entry)) {
81243+ if (!pte_none(entry)) {
81244+ BUG_ON(pte_file(entry));
81245+ free_swap_and_cache(pte_to_swp_entry(entry));
81246+ pte_clear_not_present_full(mm, address, pte, 0);
81247+ }
81248+ } else {
81249+ struct page *page;
81250+
81251+ flush_cache_page(vma, address, pte_pfn(entry));
81252+ entry = ptep_clear_flush(vma, address, pte);
81253+ BUG_ON(pte_dirty(entry));
81254+ page = vm_normal_page(vma, address, entry);
81255+ if (page) {
81256+ update_hiwater_rss(mm);
81257+ if (PageAnon(page))
81258+ dec_mm_counter_fast(mm, MM_ANONPAGES);
81259+ else
81260+ dec_mm_counter_fast(mm, MM_FILEPAGES);
81261+ page_remove_rmap(page);
81262+ page_cache_release(page);
81263+ }
81264+ }
81265+ pte_unmap_unlock(pte, ptl);
81266+}
81267+
81268+/* PaX: if vma is mirrored, synchronize the mirror's PTE
81269+ *
81270+ * the ptl of the lower mapped page is held on entry and is not released on exit
81271+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
81272+ */
81273+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
81274+{
81275+ struct mm_struct *mm = vma->vm_mm;
81276+ unsigned long address_m;
81277+ spinlock_t *ptl_m;
81278+ struct vm_area_struct *vma_m;
81279+ pmd_t *pmd_m;
81280+ pte_t *pte_m, entry_m;
81281+
81282+ BUG_ON(!page_m || !PageAnon(page_m));
81283+
81284+ vma_m = pax_find_mirror_vma(vma);
81285+ if (!vma_m)
81286+ return;
81287+
81288+ BUG_ON(!PageLocked(page_m));
81289+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
81290+ address_m = address + SEGMEXEC_TASK_SIZE;
81291+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
81292+ pte_m = pte_offset_map(pmd_m, address_m);
81293+ ptl_m = pte_lockptr(mm, pmd_m);
81294+ if (ptl != ptl_m) {
81295+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
81296+ if (!pte_none(*pte_m))
81297+ goto out;
81298+ }
81299+
81300+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
81301+ page_cache_get(page_m);
81302+ page_add_anon_rmap(page_m, vma_m, address_m);
81303+ inc_mm_counter_fast(mm, MM_ANONPAGES);
81304+ set_pte_at(mm, address_m, pte_m, entry_m);
81305+ update_mmu_cache(vma_m, address_m, entry_m);
81306+out:
81307+ if (ptl != ptl_m)
81308+ spin_unlock(ptl_m);
81309+ pte_unmap(pte_m);
81310+ unlock_page(page_m);
81311+}
81312+
81313+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
81314+{
81315+ struct mm_struct *mm = vma->vm_mm;
81316+ unsigned long address_m;
81317+ spinlock_t *ptl_m;
81318+ struct vm_area_struct *vma_m;
81319+ pmd_t *pmd_m;
81320+ pte_t *pte_m, entry_m;
81321+
81322+ BUG_ON(!page_m || PageAnon(page_m));
81323+
81324+ vma_m = pax_find_mirror_vma(vma);
81325+ if (!vma_m)
81326+ return;
81327+
81328+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
81329+ address_m = address + SEGMEXEC_TASK_SIZE;
81330+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
81331+ pte_m = pte_offset_map(pmd_m, address_m);
81332+ ptl_m = pte_lockptr(mm, pmd_m);
81333+ if (ptl != ptl_m) {
81334+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
81335+ if (!pte_none(*pte_m))
81336+ goto out;
81337+ }
81338+
81339+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
81340+ page_cache_get(page_m);
81341+ page_add_file_rmap(page_m);
81342+ inc_mm_counter_fast(mm, MM_FILEPAGES);
81343+ set_pte_at(mm, address_m, pte_m, entry_m);
81344+ update_mmu_cache(vma_m, address_m, entry_m);
81345+out:
81346+ if (ptl != ptl_m)
81347+ spin_unlock(ptl_m);
81348+ pte_unmap(pte_m);
81349+}
81350+
81351+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
81352+{
81353+ struct mm_struct *mm = vma->vm_mm;
81354+ unsigned long address_m;
81355+ spinlock_t *ptl_m;
81356+ struct vm_area_struct *vma_m;
81357+ pmd_t *pmd_m;
81358+ pte_t *pte_m, entry_m;
81359+
81360+ vma_m = pax_find_mirror_vma(vma);
81361+ if (!vma_m)
81362+ return;
81363+
81364+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
81365+ address_m = address + SEGMEXEC_TASK_SIZE;
81366+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
81367+ pte_m = pte_offset_map(pmd_m, address_m);
81368+ ptl_m = pte_lockptr(mm, pmd_m);
81369+ if (ptl != ptl_m) {
81370+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
81371+ if (!pte_none(*pte_m))
81372+ goto out;
81373+ }
81374+
81375+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
81376+ set_pte_at(mm, address_m, pte_m, entry_m);
81377+out:
81378+ if (ptl != ptl_m)
81379+ spin_unlock(ptl_m);
81380+ pte_unmap(pte_m);
81381+}
81382+
81383+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
81384+{
81385+ struct page *page_m;
81386+ pte_t entry;
81387+
81388+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
81389+ goto out;
81390+
81391+ entry = *pte;
81392+ page_m = vm_normal_page(vma, address, entry);
81393+ if (!page_m)
81394+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
81395+ else if (PageAnon(page_m)) {
81396+ if (pax_find_mirror_vma(vma)) {
81397+ pte_unmap_unlock(pte, ptl);
81398+ lock_page(page_m);
81399+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
81400+ if (pte_same(entry, *pte))
81401+ pax_mirror_anon_pte(vma, address, page_m, ptl);
81402+ else
81403+ unlock_page(page_m);
81404+ }
81405+ } else
81406+ pax_mirror_file_pte(vma, address, page_m, ptl);
81407+
81408+out:
81409+ pte_unmap_unlock(pte, ptl);
81410+}
81411+#endif
81412+
81413 /*
81414 * This routine handles present pages, when users try to write
81415 * to a shared page. It is done by copying the page to a new address
81416@@ -2725,6 +2921,12 @@ gotten:
81417 */
81418 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
81419 if (likely(pte_same(*page_table, orig_pte))) {
81420+
81421+#ifdef CONFIG_PAX_SEGMEXEC
81422+ if (pax_find_mirror_vma(vma))
81423+ BUG_ON(!trylock_page(new_page));
81424+#endif
81425+
81426 if (old_page) {
81427 if (!PageAnon(old_page)) {
81428 dec_mm_counter_fast(mm, MM_FILEPAGES);
81429@@ -2776,6 +2978,10 @@ gotten:
81430 page_remove_rmap(old_page);
81431 }
81432
81433+#ifdef CONFIG_PAX_SEGMEXEC
81434+ pax_mirror_anon_pte(vma, address, new_page, ptl);
81435+#endif
81436+
81437 /* Free the old page.. */
81438 new_page = old_page;
81439 ret |= VM_FAULT_WRITE;
81440@@ -3051,6 +3257,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
81441 swap_free(entry);
81442 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
81443 try_to_free_swap(page);
81444+
81445+#ifdef CONFIG_PAX_SEGMEXEC
81446+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
81447+#endif
81448+
81449 unlock_page(page);
81450 if (swapcache) {
81451 /*
81452@@ -3074,6 +3285,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
81453
81454 /* No need to invalidate - it was non-present before */
81455 update_mmu_cache(vma, address, page_table);
81456+
81457+#ifdef CONFIG_PAX_SEGMEXEC
81458+ pax_mirror_anon_pte(vma, address, page, ptl);
81459+#endif
81460+
81461 unlock:
81462 pte_unmap_unlock(page_table, ptl);
81463 out:
81464@@ -3093,40 +3309,6 @@ out_release:
81465 }
81466
81467 /*
81468- * This is like a special single-page "expand_{down|up}wards()",
81469- * except we must first make sure that 'address{-|+}PAGE_SIZE'
81470- * doesn't hit another vma.
81471- */
81472-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
81473-{
81474- address &= PAGE_MASK;
81475- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
81476- struct vm_area_struct *prev = vma->vm_prev;
81477-
81478- /*
81479- * Is there a mapping abutting this one below?
81480- *
81481- * That's only ok if it's the same stack mapping
81482- * that has gotten split..
81483- */
81484- if (prev && prev->vm_end == address)
81485- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
81486-
81487- expand_downwards(vma, address - PAGE_SIZE);
81488- }
81489- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
81490- struct vm_area_struct *next = vma->vm_next;
81491-
81492- /* As VM_GROWSDOWN but s/below/above/ */
81493- if (next && next->vm_start == address + PAGE_SIZE)
81494- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
81495-
81496- expand_upwards(vma, address + PAGE_SIZE);
81497- }
81498- return 0;
81499-}
81500-
81501-/*
81502 * We enter with non-exclusive mmap_sem (to exclude vma changes,
81503 * but allow concurrent faults), and pte mapped but not yet locked.
81504 * We return with mmap_sem still held, but pte unmapped and unlocked.
81505@@ -3135,27 +3317,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
81506 unsigned long address, pte_t *page_table, pmd_t *pmd,
81507 unsigned int flags)
81508 {
81509- struct page *page;
81510+ struct page *page = NULL;
81511 spinlock_t *ptl;
81512 pte_t entry;
81513
81514- pte_unmap(page_table);
81515-
81516- /* Check if we need to add a guard page to the stack */
81517- if (check_stack_guard_page(vma, address) < 0)
81518- return VM_FAULT_SIGBUS;
81519-
81520- /* Use the zero-page for reads */
81521 if (!(flags & FAULT_FLAG_WRITE)) {
81522 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
81523 vma->vm_page_prot));
81524- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
81525+ ptl = pte_lockptr(mm, pmd);
81526+ spin_lock(ptl);
81527 if (!pte_none(*page_table))
81528 goto unlock;
81529 goto setpte;
81530 }
81531
81532 /* Allocate our own private page. */
81533+ pte_unmap(page_table);
81534+
81535 if (unlikely(anon_vma_prepare(vma)))
81536 goto oom;
81537 page = alloc_zeroed_user_highpage_movable(vma, address);
81538@@ -3174,6 +3352,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
81539 if (!pte_none(*page_table))
81540 goto release;
81541
81542+#ifdef CONFIG_PAX_SEGMEXEC
81543+ if (pax_find_mirror_vma(vma))
81544+ BUG_ON(!trylock_page(page));
81545+#endif
81546+
81547 inc_mm_counter_fast(mm, MM_ANONPAGES);
81548 page_add_new_anon_rmap(page, vma, address);
81549 setpte:
81550@@ -3181,6 +3364,12 @@ setpte:
81551
81552 /* No need to invalidate - it was non-present before */
81553 update_mmu_cache(vma, address, page_table);
81554+
81555+#ifdef CONFIG_PAX_SEGMEXEC
81556+ if (page)
81557+ pax_mirror_anon_pte(vma, address, page, ptl);
81558+#endif
81559+
81560 unlock:
81561 pte_unmap_unlock(page_table, ptl);
81562 return 0;
81563@@ -3324,6 +3513,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
81564 */
81565 /* Only go through if we didn't race with anybody else... */
81566 if (likely(pte_same(*page_table, orig_pte))) {
81567+
81568+#ifdef CONFIG_PAX_SEGMEXEC
81569+ if (anon && pax_find_mirror_vma(vma))
81570+ BUG_ON(!trylock_page(page));
81571+#endif
81572+
81573 flush_icache_page(vma, page);
81574 entry = mk_pte(page, vma->vm_page_prot);
81575 if (flags & FAULT_FLAG_WRITE)
81576@@ -3343,6 +3538,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
81577
81578 /* no need to invalidate: a not-present page won't be cached */
81579 update_mmu_cache(vma, address, page_table);
81580+
81581+#ifdef CONFIG_PAX_SEGMEXEC
81582+ if (anon)
81583+ pax_mirror_anon_pte(vma, address, page, ptl);
81584+ else
81585+ pax_mirror_file_pte(vma, address, page, ptl);
81586+#endif
81587+
81588 } else {
81589 if (cow_page)
81590 mem_cgroup_uncharge_page(cow_page);
81591@@ -3664,6 +3867,12 @@ int handle_pte_fault(struct mm_struct *mm,
81592 if (flags & FAULT_FLAG_WRITE)
81593 flush_tlb_fix_spurious_fault(vma, address);
81594 }
81595+
81596+#ifdef CONFIG_PAX_SEGMEXEC
81597+ pax_mirror_pte(vma, address, pte, pmd, ptl);
81598+ return 0;
81599+#endif
81600+
81601 unlock:
81602 pte_unmap_unlock(pte, ptl);
81603 return 0;
81604@@ -3680,6 +3889,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
81605 pmd_t *pmd;
81606 pte_t *pte;
81607
81608+#ifdef CONFIG_PAX_SEGMEXEC
81609+ struct vm_area_struct *vma_m;
81610+#endif
81611+
81612 __set_current_state(TASK_RUNNING);
81613
81614 count_vm_event(PGFAULT);
81615@@ -3691,6 +3904,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
81616 if (unlikely(is_vm_hugetlb_page(vma)))
81617 return hugetlb_fault(mm, vma, address, flags);
81618
81619+#ifdef CONFIG_PAX_SEGMEXEC
81620+ vma_m = pax_find_mirror_vma(vma);
81621+ if (vma_m) {
81622+ unsigned long address_m;
81623+ pgd_t *pgd_m;
81624+ pud_t *pud_m;
81625+ pmd_t *pmd_m;
81626+
81627+ if (vma->vm_start > vma_m->vm_start) {
81628+ address_m = address;
81629+ address -= SEGMEXEC_TASK_SIZE;
81630+ vma = vma_m;
81631+ } else
81632+ address_m = address + SEGMEXEC_TASK_SIZE;
81633+
81634+ pgd_m = pgd_offset(mm, address_m);
81635+ pud_m = pud_alloc(mm, pgd_m, address_m);
81636+ if (!pud_m)
81637+ return VM_FAULT_OOM;
81638+ pmd_m = pmd_alloc(mm, pud_m, address_m);
81639+ if (!pmd_m)
81640+ return VM_FAULT_OOM;
81641+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
81642+ return VM_FAULT_OOM;
81643+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
81644+ }
81645+#endif
81646+
81647 retry:
81648 pgd = pgd_offset(mm, address);
81649 pud = pud_alloc(mm, pgd, address);
81650@@ -3789,6 +4030,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
81651 spin_unlock(&mm->page_table_lock);
81652 return 0;
81653 }
81654+
81655+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
81656+{
81657+ pud_t *new = pud_alloc_one(mm, address);
81658+ if (!new)
81659+ return -ENOMEM;
81660+
81661+ smp_wmb(); /* See comment in __pte_alloc */
81662+
81663+ spin_lock(&mm->page_table_lock);
81664+ if (pgd_present(*pgd)) /* Another has populated it */
81665+ pud_free(mm, new);
81666+ else
81667+ pgd_populate_kernel(mm, pgd, new);
81668+ spin_unlock(&mm->page_table_lock);
81669+ return 0;
81670+}
81671 #endif /* __PAGETABLE_PUD_FOLDED */
81672
81673 #ifndef __PAGETABLE_PMD_FOLDED
81674@@ -3819,11 +4077,35 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
81675 spin_unlock(&mm->page_table_lock);
81676 return 0;
81677 }
81678+
81679+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
81680+{
81681+ pmd_t *new = pmd_alloc_one(mm, address);
81682+ if (!new)
81683+ return -ENOMEM;
81684+
81685+ smp_wmb(); /* See comment in __pte_alloc */
81686+
81687+ spin_lock(&mm->page_table_lock);
81688+#ifndef __ARCH_HAS_4LEVEL_HACK
81689+ if (pud_present(*pud)) /* Another has populated it */
81690+ pmd_free(mm, new);
81691+ else
81692+ pud_populate_kernel(mm, pud, new);
81693+#else
81694+ if (pgd_present(*pud)) /* Another has populated it */
81695+ pmd_free(mm, new);
81696+ else
81697+ pgd_populate_kernel(mm, pud, new);
81698+#endif /* __ARCH_HAS_4LEVEL_HACK */
81699+ spin_unlock(&mm->page_table_lock);
81700+ return 0;
81701+}
81702 #endif /* __PAGETABLE_PMD_FOLDED */
81703
81704-int make_pages_present(unsigned long addr, unsigned long end)
81705+ssize_t make_pages_present(unsigned long addr, unsigned long end)
81706 {
81707- int ret, len, write;
81708+ ssize_t ret, len, write;
81709 struct vm_area_struct * vma;
81710
81711 vma = find_vma(current->mm, addr);
81712@@ -3856,7 +4138,7 @@ static int __init gate_vma_init(void)
81713 gate_vma.vm_start = FIXADDR_USER_START;
81714 gate_vma.vm_end = FIXADDR_USER_END;
81715 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
81716- gate_vma.vm_page_prot = __P101;
81717+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
81718
81719 return 0;
81720 }
81721@@ -3990,8 +4272,8 @@ out:
81722 return ret;
81723 }
81724
81725-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
81726- void *buf, int len, int write)
81727+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
81728+ void *buf, size_t len, int write)
81729 {
81730 resource_size_t phys_addr;
81731 unsigned long prot = 0;
81732@@ -4016,8 +4298,8 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
81733 * Access another process' address space as given in mm. If non-NULL, use the
81734 * given task for page fault accounting.
81735 */
81736-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
81737- unsigned long addr, void *buf, int len, int write)
81738+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
81739+ unsigned long addr, void *buf, size_t len, int write)
81740 {
81741 struct vm_area_struct *vma;
81742 void *old_buf = buf;
81743@@ -4025,7 +4307,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
81744 down_read(&mm->mmap_sem);
81745 /* ignore errors, just check how much was successfully transferred */
81746 while (len) {
81747- int bytes, ret, offset;
81748+ ssize_t bytes, ret, offset;
81749 void *maddr;
81750 struct page *page = NULL;
81751
81752@@ -4084,8 +4366,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
81753 *
81754 * The caller must hold a reference on @mm.
81755 */
81756-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
81757- void *buf, int len, int write)
81758+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
81759+ void *buf, size_t len, int write)
81760 {
81761 return __access_remote_vm(NULL, mm, addr, buf, len, write);
81762 }
81763@@ -4095,11 +4377,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
81764 * Source/target buffer must be kernel space,
81765 * Do not walk the page table directly, use get_user_pages
81766 */
81767-int access_process_vm(struct task_struct *tsk, unsigned long addr,
81768- void *buf, int len, int write)
81769+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
81770+ void *buf, size_t len, int write)
81771 {
81772 struct mm_struct *mm;
81773- int ret;
81774+ ssize_t ret;
81775
81776 mm = get_task_mm(tsk);
81777 if (!mm)
81778diff --git a/mm/mempolicy.c b/mm/mempolicy.c
81779index 3df6d12..a11056a 100644
81780--- a/mm/mempolicy.c
81781+++ b/mm/mempolicy.c
81782@@ -721,6 +721,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
81783 unsigned long vmstart;
81784 unsigned long vmend;
81785
81786+#ifdef CONFIG_PAX_SEGMEXEC
81787+ struct vm_area_struct *vma_m;
81788+#endif
81789+
81790 vma = find_vma(mm, start);
81791 if (!vma || vma->vm_start > start)
81792 return -EFAULT;
81793@@ -757,9 +761,20 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
81794 if (err)
81795 goto out;
81796 }
81797+
81798 err = vma_replace_policy(vma, new_pol);
81799 if (err)
81800 goto out;
81801+
81802+#ifdef CONFIG_PAX_SEGMEXEC
81803+ vma_m = pax_find_mirror_vma(vma);
81804+ if (vma_m) {
81805+ err = vma_replace_policy(vma_m, new_pol);
81806+ if (err)
81807+ goto out;
81808+ }
81809+#endif
81810+
81811 }
81812
81813 out:
81814@@ -1216,6 +1231,17 @@ static long do_mbind(unsigned long start, unsigned long len,
81815
81816 if (end < start)
81817 return -EINVAL;
81818+
81819+#ifdef CONFIG_PAX_SEGMEXEC
81820+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
81821+ if (end > SEGMEXEC_TASK_SIZE)
81822+ return -EINVAL;
81823+ } else
81824+#endif
81825+
81826+ if (end > TASK_SIZE)
81827+ return -EINVAL;
81828+
81829 if (end == start)
81830 return 0;
81831
81832@@ -1445,8 +1471,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
81833 */
81834 tcred = __task_cred(task);
81835 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
81836- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
81837- !capable(CAP_SYS_NICE)) {
81838+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
81839 rcu_read_unlock();
81840 err = -EPERM;
81841 goto out_put;
81842@@ -1477,6 +1502,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
81843 goto out;
81844 }
81845
81846+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
81847+ if (mm != current->mm &&
81848+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
81849+ mmput(mm);
81850+ err = -EPERM;
81851+ goto out;
81852+ }
81853+#endif
81854+
81855 err = do_migrate_pages(mm, old, new,
81856 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
81857
81858diff --git a/mm/migrate.c b/mm/migrate.c
81859index 2fd8b4a..d70358f 100644
81860--- a/mm/migrate.c
81861+++ b/mm/migrate.c
81862@@ -1401,8 +1401,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
81863 */
81864 tcred = __task_cred(task);
81865 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
81866- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
81867- !capable(CAP_SYS_NICE)) {
81868+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
81869 rcu_read_unlock();
81870 err = -EPERM;
81871 goto out;
81872diff --git a/mm/mlock.c b/mm/mlock.c
81873index c9bd528..da8d069 100644
81874--- a/mm/mlock.c
81875+++ b/mm/mlock.c
81876@@ -13,6 +13,7 @@
81877 #include <linux/pagemap.h>
81878 #include <linux/mempolicy.h>
81879 #include <linux/syscalls.h>
81880+#include <linux/security.h>
81881 #include <linux/sched.h>
81882 #include <linux/export.h>
81883 #include <linux/rmap.h>
81884@@ -369,7 +370,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
81885 {
81886 unsigned long nstart, end, tmp;
81887 struct vm_area_struct * vma, * prev;
81888- int error;
81889+ int error = 0;
81890
81891 VM_BUG_ON(start & ~PAGE_MASK);
81892 VM_BUG_ON(len != PAGE_ALIGN(len));
81893@@ -378,6 +379,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
81894 return -EINVAL;
81895 if (end == start)
81896 return 0;
81897+ if (end > TASK_SIZE)
81898+ return -EINVAL;
81899+
81900 vma = find_vma(current->mm, start);
81901 if (!vma || vma->vm_start > start)
81902 return -ENOMEM;
81903@@ -389,6 +393,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
81904 for (nstart = start ; ; ) {
81905 vm_flags_t newflags;
81906
81907+#ifdef CONFIG_PAX_SEGMEXEC
81908+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
81909+ break;
81910+#endif
81911+
81912 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
81913
81914 newflags = vma->vm_flags | VM_LOCKED;
81915@@ -494,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
81916 lock_limit >>= PAGE_SHIFT;
81917
81918 /* check against resource limits */
81919+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
81920 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
81921 error = do_mlock(start, len, 1);
81922 up_write(&current->mm->mmap_sem);
81923@@ -528,6 +538,12 @@ static int do_mlockall(int flags)
81924 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
81925 vm_flags_t newflags;
81926
81927+#ifdef CONFIG_PAX_SEGMEXEC
81928+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
81929+ break;
81930+#endif
81931+
81932+ BUG_ON(vma->vm_end > TASK_SIZE);
81933 newflags = vma->vm_flags | VM_LOCKED;
81934 if (!(flags & MCL_CURRENT))
81935 newflags &= ~VM_LOCKED;
81936@@ -560,6 +576,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
81937 lock_limit >>= PAGE_SHIFT;
81938
81939 ret = -ENOMEM;
81940+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
81941 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
81942 capable(CAP_IPC_LOCK))
81943 ret = do_mlockall(flags);
81944diff --git a/mm/mmap.c b/mm/mmap.c
81945index 8832b87..04240d1 100644
81946--- a/mm/mmap.c
81947+++ b/mm/mmap.c
81948@@ -32,6 +32,7 @@
81949 #include <linux/khugepaged.h>
81950 #include <linux/uprobes.h>
81951 #include <linux/rbtree_augmented.h>
81952+#include <linux/random.h>
81953
81954 #include <asm/uaccess.h>
81955 #include <asm/cacheflush.h>
81956@@ -48,6 +49,16 @@
81957 #define arch_rebalance_pgtables(addr, len) (addr)
81958 #endif
81959
81960+static inline void verify_mm_writelocked(struct mm_struct *mm)
81961+{
81962+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
81963+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
81964+ up_read(&mm->mmap_sem);
81965+ BUG();
81966+ }
81967+#endif
81968+}
81969+
81970 static void unmap_region(struct mm_struct *mm,
81971 struct vm_area_struct *vma, struct vm_area_struct *prev,
81972 unsigned long start, unsigned long end);
81973@@ -67,22 +78,32 @@ static void unmap_region(struct mm_struct *mm,
81974 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
81975 *
81976 */
81977-pgprot_t protection_map[16] = {
81978+pgprot_t protection_map[16] __read_only = {
81979 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
81980 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
81981 };
81982
81983-pgprot_t vm_get_page_prot(unsigned long vm_flags)
81984+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
81985 {
81986- return __pgprot(pgprot_val(protection_map[vm_flags &
81987+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
81988 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
81989 pgprot_val(arch_vm_get_page_prot(vm_flags)));
81990+
81991+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
81992+ if (!(__supported_pte_mask & _PAGE_NX) &&
81993+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
81994+ (vm_flags & (VM_READ | VM_WRITE)))
81995+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
81996+#endif
81997+
81998+ return prot;
81999 }
82000 EXPORT_SYMBOL(vm_get_page_prot);
82001
82002 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
82003 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
82004 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
82005+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
82006 /*
82007 * Make sure vm_committed_as in one cacheline and not cacheline shared with
82008 * other variables. It can be updated by several CPUs frequently.
82009@@ -238,6 +259,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
82010 struct vm_area_struct *next = vma->vm_next;
82011
82012 might_sleep();
82013+ BUG_ON(vma->vm_mirror);
82014 if (vma->vm_ops && vma->vm_ops->close)
82015 vma->vm_ops->close(vma);
82016 if (vma->vm_file)
82017@@ -281,6 +303,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
82018 * not page aligned -Ram Gupta
82019 */
82020 rlim = rlimit(RLIMIT_DATA);
82021+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
82022 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
82023 (mm->end_data - mm->start_data) > rlim)
82024 goto out;
82025@@ -888,6 +911,12 @@ static int
82026 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
82027 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
82028 {
82029+
82030+#ifdef CONFIG_PAX_SEGMEXEC
82031+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
82032+ return 0;
82033+#endif
82034+
82035 if (is_mergeable_vma(vma, file, vm_flags) &&
82036 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
82037 if (vma->vm_pgoff == vm_pgoff)
82038@@ -907,6 +936,12 @@ static int
82039 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
82040 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
82041 {
82042+
82043+#ifdef CONFIG_PAX_SEGMEXEC
82044+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
82045+ return 0;
82046+#endif
82047+
82048 if (is_mergeable_vma(vma, file, vm_flags) &&
82049 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
82050 pgoff_t vm_pglen;
82051@@ -949,13 +984,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
82052 struct vm_area_struct *vma_merge(struct mm_struct *mm,
82053 struct vm_area_struct *prev, unsigned long addr,
82054 unsigned long end, unsigned long vm_flags,
82055- struct anon_vma *anon_vma, struct file *file,
82056+ struct anon_vma *anon_vma, struct file *file,
82057 pgoff_t pgoff, struct mempolicy *policy)
82058 {
82059 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
82060 struct vm_area_struct *area, *next;
82061 int err;
82062
82063+#ifdef CONFIG_PAX_SEGMEXEC
82064+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
82065+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
82066+
82067+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
82068+#endif
82069+
82070 /*
82071 * We later require that vma->vm_flags == vm_flags,
82072 * so this tests vma->vm_flags & VM_SPECIAL, too.
82073@@ -971,6 +1013,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
82074 if (next && next->vm_end == end) /* cases 6, 7, 8 */
82075 next = next->vm_next;
82076
82077+#ifdef CONFIG_PAX_SEGMEXEC
82078+ if (prev)
82079+ prev_m = pax_find_mirror_vma(prev);
82080+ if (area)
82081+ area_m = pax_find_mirror_vma(area);
82082+ if (next)
82083+ next_m = pax_find_mirror_vma(next);
82084+#endif
82085+
82086 /*
82087 * Can it merge with the predecessor?
82088 */
82089@@ -990,9 +1041,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
82090 /* cases 1, 6 */
82091 err = vma_adjust(prev, prev->vm_start,
82092 next->vm_end, prev->vm_pgoff, NULL);
82093- } else /* cases 2, 5, 7 */
82094+
82095+#ifdef CONFIG_PAX_SEGMEXEC
82096+ if (!err && prev_m)
82097+ err = vma_adjust(prev_m, prev_m->vm_start,
82098+ next_m->vm_end, prev_m->vm_pgoff, NULL);
82099+#endif
82100+
82101+ } else { /* cases 2, 5, 7 */
82102 err = vma_adjust(prev, prev->vm_start,
82103 end, prev->vm_pgoff, NULL);
82104+
82105+#ifdef CONFIG_PAX_SEGMEXEC
82106+ if (!err && prev_m)
82107+ err = vma_adjust(prev_m, prev_m->vm_start,
82108+ end_m, prev_m->vm_pgoff, NULL);
82109+#endif
82110+
82111+ }
82112 if (err)
82113 return NULL;
82114 khugepaged_enter_vma_merge(prev);
82115@@ -1006,12 +1072,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
82116 mpol_equal(policy, vma_policy(next)) &&
82117 can_vma_merge_before(next, vm_flags,
82118 anon_vma, file, pgoff+pglen)) {
82119- if (prev && addr < prev->vm_end) /* case 4 */
82120+ if (prev && addr < prev->vm_end) { /* case 4 */
82121 err = vma_adjust(prev, prev->vm_start,
82122 addr, prev->vm_pgoff, NULL);
82123- else /* cases 3, 8 */
82124+
82125+#ifdef CONFIG_PAX_SEGMEXEC
82126+ if (!err && prev_m)
82127+ err = vma_adjust(prev_m, prev_m->vm_start,
82128+ addr_m, prev_m->vm_pgoff, NULL);
82129+#endif
82130+
82131+ } else { /* cases 3, 8 */
82132 err = vma_adjust(area, addr, next->vm_end,
82133 next->vm_pgoff - pglen, NULL);
82134+
82135+#ifdef CONFIG_PAX_SEGMEXEC
82136+ if (!err && area_m)
82137+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
82138+ next_m->vm_pgoff - pglen, NULL);
82139+#endif
82140+
82141+ }
82142 if (err)
82143 return NULL;
82144 khugepaged_enter_vma_merge(area);
82145@@ -1120,8 +1201,10 @@ none:
82146 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
82147 struct file *file, long pages)
82148 {
82149- const unsigned long stack_flags
82150- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
82151+
82152+#ifdef CONFIG_PAX_RANDMMAP
82153+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
82154+#endif
82155
82156 mm->total_vm += pages;
82157
82158@@ -1129,7 +1212,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
82159 mm->shared_vm += pages;
82160 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
82161 mm->exec_vm += pages;
82162- } else if (flags & stack_flags)
82163+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
82164 mm->stack_vm += pages;
82165 }
82166 #endif /* CONFIG_PROC_FS */
82167@@ -1165,7 +1248,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
82168 * (the exception is when the underlying filesystem is noexec
82169 * mounted, in which case we dont add PROT_EXEC.)
82170 */
82171- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
82172+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
82173 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
82174 prot |= PROT_EXEC;
82175
82176@@ -1191,7 +1274,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
82177 /* Obtain the address to map to. we verify (or select) it and ensure
82178 * that it represents a valid section of the address space.
82179 */
82180- addr = get_unmapped_area(file, addr, len, pgoff, flags);
82181+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
82182 if (addr & ~PAGE_MASK)
82183 return addr;
82184
82185@@ -1202,6 +1285,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
82186 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
82187 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
82188
82189+#ifdef CONFIG_PAX_MPROTECT
82190+ if (mm->pax_flags & MF_PAX_MPROTECT) {
82191+#ifndef CONFIG_PAX_MPROTECT_COMPAT
82192+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
82193+ gr_log_rwxmmap(file);
82194+
82195+#ifdef CONFIG_PAX_EMUPLT
82196+ vm_flags &= ~VM_EXEC;
82197+#else
82198+ return -EPERM;
82199+#endif
82200+
82201+ }
82202+
82203+ if (!(vm_flags & VM_EXEC))
82204+ vm_flags &= ~VM_MAYEXEC;
82205+#else
82206+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
82207+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
82208+#endif
82209+ else
82210+ vm_flags &= ~VM_MAYWRITE;
82211+ }
82212+#endif
82213+
82214+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
82215+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
82216+ vm_flags &= ~VM_PAGEEXEC;
82217+#endif
82218+
82219 if (flags & MAP_LOCKED)
82220 if (!can_do_mlock())
82221 return -EPERM;
82222@@ -1213,6 +1326,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
82223 locked += mm->locked_vm;
82224 lock_limit = rlimit(RLIMIT_MEMLOCK);
82225 lock_limit >>= PAGE_SHIFT;
82226+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
82227 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
82228 return -EAGAIN;
82229 }
82230@@ -1279,6 +1393,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
82231 }
82232 }
82233
82234+ if (!gr_acl_handle_mmap(file, prot))
82235+ return -EACCES;
82236+
82237 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
82238 }
82239
82240@@ -1356,7 +1473,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
82241 vm_flags_t vm_flags = vma->vm_flags;
82242
82243 /* If it was private or non-writable, the write bit is already clear */
82244- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
82245+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
82246 return 0;
82247
82248 /* The backer wishes to know when pages are first written to? */
82249@@ -1405,16 +1522,30 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
82250 unsigned long charged = 0;
82251 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
82252
82253+#ifdef CONFIG_PAX_SEGMEXEC
82254+ struct vm_area_struct *vma_m = NULL;
82255+#endif
82256+
82257+ /*
82258+ * mm->mmap_sem is required to protect against another thread
82259+ * changing the mappings in case we sleep.
82260+ */
82261+ verify_mm_writelocked(mm);
82262+
82263 /* Clear old maps */
82264 error = -ENOMEM;
82265-munmap_back:
82266 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
82267 if (do_munmap(mm, addr, len))
82268 return -ENOMEM;
82269- goto munmap_back;
82270+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
82271 }
82272
82273 /* Check against address space limit. */
82274+
82275+#ifdef CONFIG_PAX_RANDMMAP
82276+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
82277+#endif
82278+
82279 if (!may_expand_vm(mm, len >> PAGE_SHIFT))
82280 return -ENOMEM;
82281
82282@@ -1460,6 +1591,16 @@ munmap_back:
82283 goto unacct_error;
82284 }
82285
82286+#ifdef CONFIG_PAX_SEGMEXEC
82287+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
82288+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
82289+ if (!vma_m) {
82290+ error = -ENOMEM;
82291+ goto free_vma;
82292+ }
82293+ }
82294+#endif
82295+
82296 vma->vm_mm = mm;
82297 vma->vm_start = addr;
82298 vma->vm_end = addr + len;
82299@@ -1484,6 +1625,13 @@ munmap_back:
82300 if (error)
82301 goto unmap_and_free_vma;
82302
82303+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
82304+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
82305+ vma->vm_flags |= VM_PAGEEXEC;
82306+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
82307+ }
82308+#endif
82309+
82310 /* Can addr have changed??
82311 *
82312 * Answer: Yes, several device drivers can do it in their
82313@@ -1522,6 +1670,11 @@ munmap_back:
82314 vma_link(mm, vma, prev, rb_link, rb_parent);
82315 file = vma->vm_file;
82316
82317+#ifdef CONFIG_PAX_SEGMEXEC
82318+ if (vma_m)
82319+ BUG_ON(pax_mirror_vma(vma_m, vma));
82320+#endif
82321+
82322 /* Once vma denies write, undo our temporary denial count */
82323 if (correct_wcount)
82324 atomic_inc(&inode->i_writecount);
82325@@ -1529,6 +1682,7 @@ out:
82326 perf_event_mmap(vma);
82327
82328 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
82329+ track_exec_limit(mm, addr, addr + len, vm_flags);
82330 if (vm_flags & VM_LOCKED) {
82331 if (!mlock_vma_pages_range(vma, addr, addr + len))
82332 mm->locked_vm += (len >> PAGE_SHIFT);
82333@@ -1550,6 +1704,12 @@ unmap_and_free_vma:
82334 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
82335 charged = 0;
82336 free_vma:
82337+
82338+#ifdef CONFIG_PAX_SEGMEXEC
82339+ if (vma_m)
82340+ kmem_cache_free(vm_area_cachep, vma_m);
82341+#endif
82342+
82343 kmem_cache_free(vm_area_cachep, vma);
82344 unacct_error:
82345 if (charged)
82346@@ -1557,6 +1717,62 @@ unacct_error:
82347 return error;
82348 }
82349
82350+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
82351+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
82352+{
82353+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
82354+ return (random32() & 0xFF) << PAGE_SHIFT;
82355+
82356+ return 0;
82357+}
82358+#endif
82359+
82360+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
82361+{
82362+ if (!vma) {
82363+#ifdef CONFIG_STACK_GROWSUP
82364+ if (addr > sysctl_heap_stack_gap)
82365+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
82366+ else
82367+ vma = find_vma(current->mm, 0);
82368+ if (vma && (vma->vm_flags & VM_GROWSUP))
82369+ return false;
82370+#endif
82371+ return true;
82372+ }
82373+
82374+ if (addr + len > vma->vm_start)
82375+ return false;
82376+
82377+ if (vma->vm_flags & VM_GROWSDOWN)
82378+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
82379+#ifdef CONFIG_STACK_GROWSUP
82380+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
82381+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
82382+#endif
82383+ else if (offset)
82384+ return offset <= vma->vm_start - addr - len;
82385+
82386+ return true;
82387+}
82388+
82389+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
82390+{
82391+ if (vma->vm_start < len)
82392+ return -ENOMEM;
82393+
82394+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
82395+ if (offset <= vma->vm_start - len)
82396+ return vma->vm_start - len - offset;
82397+ else
82398+ return -ENOMEM;
82399+ }
82400+
82401+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
82402+ return vma->vm_start - len - sysctl_heap_stack_gap;
82403+ return -ENOMEM;
82404+}
82405+
82406 unsigned long unmapped_area(struct vm_unmapped_area_info *info)
82407 {
82408 /*
82409@@ -1776,6 +1992,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
82410 struct mm_struct *mm = current->mm;
82411 struct vm_area_struct *vma;
82412 struct vm_unmapped_area_info info;
82413+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
82414
82415 if (len > TASK_SIZE)
82416 return -ENOMEM;
82417@@ -1783,17 +2000,26 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
82418 if (flags & MAP_FIXED)
82419 return addr;
82420
82421+#ifdef CONFIG_PAX_RANDMMAP
82422+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
82423+#endif
82424+
82425 if (addr) {
82426 addr = PAGE_ALIGN(addr);
82427 vma = find_vma(mm, addr);
82428- if (TASK_SIZE - len >= addr &&
82429- (!vma || addr + len <= vma->vm_start))
82430+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
82431 return addr;
82432 }
82433
82434 info.flags = 0;
82435 info.length = len;
82436 info.low_limit = TASK_UNMAPPED_BASE;
82437+
82438+#ifdef CONFIG_PAX_RANDMMAP
82439+ if (mm->pax_flags & MF_PAX_RANDMMAP)
82440+ info.low_limit += mm->delta_mmap;
82441+#endif
82442+
82443 info.high_limit = TASK_SIZE;
82444 info.align_mask = 0;
82445 return vm_unmapped_area(&info);
82446@@ -1802,10 +2028,16 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
82447
82448 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
82449 {
82450+
82451+#ifdef CONFIG_PAX_SEGMEXEC
82452+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
82453+ return;
82454+#endif
82455+
82456 /*
82457 * Is this a new hole at the lowest possible address?
82458 */
82459- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
82460+ if (addr >= mm->mmap_base && addr < mm->free_area_cache)
82461 mm->free_area_cache = addr;
82462 }
82463
82464@@ -1823,6 +2055,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
82465 struct mm_struct *mm = current->mm;
82466 unsigned long addr = addr0;
82467 struct vm_unmapped_area_info info;
82468+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
82469
82470 /* requested length too big for entire address space */
82471 if (len > TASK_SIZE)
82472@@ -1831,12 +2064,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
82473 if (flags & MAP_FIXED)
82474 return addr;
82475
82476+#ifdef CONFIG_PAX_RANDMMAP
82477+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
82478+#endif
82479+
82480 /* requesting a specific address */
82481 if (addr) {
82482 addr = PAGE_ALIGN(addr);
82483 vma = find_vma(mm, addr);
82484- if (TASK_SIZE - len >= addr &&
82485- (!vma || addr + len <= vma->vm_start))
82486+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
82487 return addr;
82488 }
82489
82490@@ -1857,6 +2093,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
82491 VM_BUG_ON(addr != -ENOMEM);
82492 info.flags = 0;
82493 info.low_limit = TASK_UNMAPPED_BASE;
82494+
82495+#ifdef CONFIG_PAX_RANDMMAP
82496+ if (mm->pax_flags & MF_PAX_RANDMMAP)
82497+ info.low_limit += mm->delta_mmap;
82498+#endif
82499+
82500 info.high_limit = TASK_SIZE;
82501 addr = vm_unmapped_area(&info);
82502 }
82503@@ -1867,6 +2109,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
82504
82505 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
82506 {
82507+
82508+#ifdef CONFIG_PAX_SEGMEXEC
82509+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
82510+ return;
82511+#endif
82512+
82513 /*
82514 * Is this a new hole at the highest possible address?
82515 */
82516@@ -1874,8 +2122,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
82517 mm->free_area_cache = addr;
82518
82519 /* dont allow allocations above current base */
82520- if (mm->free_area_cache > mm->mmap_base)
82521+ if (mm->free_area_cache > mm->mmap_base) {
82522 mm->free_area_cache = mm->mmap_base;
82523+ mm->cached_hole_size = ~0UL;
82524+ }
82525 }
82526
82527 unsigned long
82528@@ -1922,7 +2172,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
82529
82530 /* Check the cache first. */
82531 /* (Cache hit rate is typically around 35%.) */
82532- vma = mm->mmap_cache;
82533+ vma = ACCESS_ONCE(mm->mmap_cache);
82534 if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
82535 struct rb_node *rb_node;
82536
82537@@ -1974,6 +2224,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
82538 return vma;
82539 }
82540
82541+#ifdef CONFIG_PAX_SEGMEXEC
82542+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
82543+{
82544+ struct vm_area_struct *vma_m;
82545+
82546+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
82547+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
82548+ BUG_ON(vma->vm_mirror);
82549+ return NULL;
82550+ }
82551+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
82552+ vma_m = vma->vm_mirror;
82553+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
82554+ BUG_ON(vma->vm_file != vma_m->vm_file);
82555+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
82556+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
82557+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
82558+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
82559+ return vma_m;
82560+}
82561+#endif
82562+
82563 /*
82564 * Verify that the stack growth is acceptable and
82565 * update accounting. This is shared with both the
82566@@ -1990,6 +2262,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
82567 return -ENOMEM;
82568
82569 /* Stack limit test */
82570+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
82571 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
82572 return -ENOMEM;
82573
82574@@ -2000,6 +2273,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
82575 locked = mm->locked_vm + grow;
82576 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
82577 limit >>= PAGE_SHIFT;
82578+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
82579 if (locked > limit && !capable(CAP_IPC_LOCK))
82580 return -ENOMEM;
82581 }
82582@@ -2029,37 +2303,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
82583 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
82584 * vma is the last one with address > vma->vm_end. Have to extend vma.
82585 */
82586+#ifndef CONFIG_IA64
82587+static
82588+#endif
82589 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
82590 {
82591 int error;
82592+ bool locknext;
82593
82594 if (!(vma->vm_flags & VM_GROWSUP))
82595 return -EFAULT;
82596
82597+ /* Also guard against wrapping around to address 0. */
82598+ if (address < PAGE_ALIGN(address+1))
82599+ address = PAGE_ALIGN(address+1);
82600+ else
82601+ return -ENOMEM;
82602+
82603 /*
82604 * We must make sure the anon_vma is allocated
82605 * so that the anon_vma locking is not a noop.
82606 */
82607 if (unlikely(anon_vma_prepare(vma)))
82608 return -ENOMEM;
82609+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
82610+ if (locknext && anon_vma_prepare(vma->vm_next))
82611+ return -ENOMEM;
82612 vma_lock_anon_vma(vma);
82613+ if (locknext)
82614+ vma_lock_anon_vma(vma->vm_next);
82615
82616 /*
82617 * vma->vm_start/vm_end cannot change under us because the caller
82618 * is required to hold the mmap_sem in read mode. We need the
82619- * anon_vma lock to serialize against concurrent expand_stacks.
82620- * Also guard against wrapping around to address 0.
82621+ * anon_vma locks to serialize against concurrent expand_stacks
82622+ * and expand_upwards.
82623 */
82624- if (address < PAGE_ALIGN(address+4))
82625- address = PAGE_ALIGN(address+4);
82626- else {
82627- vma_unlock_anon_vma(vma);
82628- return -ENOMEM;
82629- }
82630 error = 0;
82631
82632 /* Somebody else might have raced and expanded it already */
82633- if (address > vma->vm_end) {
82634+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
82635+ error = -ENOMEM;
82636+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
82637 unsigned long size, grow;
82638
82639 size = address - vma->vm_start;
82640@@ -2094,6 +2379,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
82641 }
82642 }
82643 }
82644+ if (locknext)
82645+ vma_unlock_anon_vma(vma->vm_next);
82646 vma_unlock_anon_vma(vma);
82647 khugepaged_enter_vma_merge(vma);
82648 validate_mm(vma->vm_mm);
82649@@ -2108,6 +2395,8 @@ int expand_downwards(struct vm_area_struct *vma,
82650 unsigned long address)
82651 {
82652 int error;
82653+ bool lockprev = false;
82654+ struct vm_area_struct *prev;
82655
82656 /*
82657 * We must make sure the anon_vma is allocated
82658@@ -2121,6 +2410,15 @@ int expand_downwards(struct vm_area_struct *vma,
82659 if (error)
82660 return error;
82661
82662+ prev = vma->vm_prev;
82663+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
82664+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
82665+#endif
82666+ if (lockprev && anon_vma_prepare(prev))
82667+ return -ENOMEM;
82668+ if (lockprev)
82669+ vma_lock_anon_vma(prev);
82670+
82671 vma_lock_anon_vma(vma);
82672
82673 /*
82674@@ -2130,9 +2428,17 @@ int expand_downwards(struct vm_area_struct *vma,
82675 */
82676
82677 /* Somebody else might have raced and expanded it already */
82678- if (address < vma->vm_start) {
82679+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
82680+ error = -ENOMEM;
82681+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
82682 unsigned long size, grow;
82683
82684+#ifdef CONFIG_PAX_SEGMEXEC
82685+ struct vm_area_struct *vma_m;
82686+
82687+ vma_m = pax_find_mirror_vma(vma);
82688+#endif
82689+
82690 size = vma->vm_end - address;
82691 grow = (vma->vm_start - address) >> PAGE_SHIFT;
82692
82693@@ -2157,6 +2463,18 @@ int expand_downwards(struct vm_area_struct *vma,
82694 vma->vm_pgoff -= grow;
82695 anon_vma_interval_tree_post_update_vma(vma);
82696 vma_gap_update(vma);
82697+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
82698+
82699+#ifdef CONFIG_PAX_SEGMEXEC
82700+ if (vma_m) {
82701+ anon_vma_interval_tree_pre_update_vma(vma_m);
82702+ vma_m->vm_start -= grow << PAGE_SHIFT;
82703+ vma_m->vm_pgoff -= grow;
82704+ anon_vma_interval_tree_post_update_vma(vma_m);
82705+ vma_gap_update(vma_m);
82706+ }
82707+#endif
82708+
82709 spin_unlock(&vma->vm_mm->page_table_lock);
82710
82711 perf_event_mmap(vma);
82712@@ -2263,6 +2581,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
82713 do {
82714 long nrpages = vma_pages(vma);
82715
82716+#ifdef CONFIG_PAX_SEGMEXEC
82717+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
82718+ vma = remove_vma(vma);
82719+ continue;
82720+ }
82721+#endif
82722+
82723 if (vma->vm_flags & VM_ACCOUNT)
82724 nr_accounted += nrpages;
82725 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
82726@@ -2308,6 +2633,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
82727 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
82728 vma->vm_prev = NULL;
82729 do {
82730+
82731+#ifdef CONFIG_PAX_SEGMEXEC
82732+ if (vma->vm_mirror) {
82733+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
82734+ vma->vm_mirror->vm_mirror = NULL;
82735+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
82736+ vma->vm_mirror = NULL;
82737+ }
82738+#endif
82739+
82740 vma_rb_erase(vma, &mm->mm_rb);
82741 mm->map_count--;
82742 tail_vma = vma;
82743@@ -2339,14 +2674,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
82744 struct vm_area_struct *new;
82745 int err = -ENOMEM;
82746
82747+#ifdef CONFIG_PAX_SEGMEXEC
82748+ struct vm_area_struct *vma_m, *new_m = NULL;
82749+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
82750+#endif
82751+
82752 if (is_vm_hugetlb_page(vma) && (addr &
82753 ~(huge_page_mask(hstate_vma(vma)))))
82754 return -EINVAL;
82755
82756+#ifdef CONFIG_PAX_SEGMEXEC
82757+ vma_m = pax_find_mirror_vma(vma);
82758+#endif
82759+
82760 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
82761 if (!new)
82762 goto out_err;
82763
82764+#ifdef CONFIG_PAX_SEGMEXEC
82765+ if (vma_m) {
82766+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
82767+ if (!new_m) {
82768+ kmem_cache_free(vm_area_cachep, new);
82769+ goto out_err;
82770+ }
82771+ }
82772+#endif
82773+
82774 /* most fields are the same, copy all, and then fixup */
82775 *new = *vma;
82776
82777@@ -2359,6 +2713,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
82778 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
82779 }
82780
82781+#ifdef CONFIG_PAX_SEGMEXEC
82782+ if (vma_m) {
82783+ *new_m = *vma_m;
82784+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
82785+ new_m->vm_mirror = new;
82786+ new->vm_mirror = new_m;
82787+
82788+ if (new_below)
82789+ new_m->vm_end = addr_m;
82790+ else {
82791+ new_m->vm_start = addr_m;
82792+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
82793+ }
82794+ }
82795+#endif
82796+
82797 pol = mpol_dup(vma_policy(vma));
82798 if (IS_ERR(pol)) {
82799 err = PTR_ERR(pol);
82800@@ -2381,6 +2751,36 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
82801 else
82802 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
82803
82804+#ifdef CONFIG_PAX_SEGMEXEC
82805+ if (!err && vma_m) {
82806+ if (anon_vma_clone(new_m, vma_m))
82807+ goto out_free_mpol;
82808+
82809+ mpol_get(pol);
82810+ vma_set_policy(new_m, pol);
82811+
82812+ if (new_m->vm_file)
82813+ get_file(new_m->vm_file);
82814+
82815+ if (new_m->vm_ops && new_m->vm_ops->open)
82816+ new_m->vm_ops->open(new_m);
82817+
82818+ if (new_below)
82819+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
82820+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
82821+ else
82822+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
82823+
82824+ if (err) {
82825+ if (new_m->vm_ops && new_m->vm_ops->close)
82826+ new_m->vm_ops->close(new_m);
82827+ if (new_m->vm_file)
82828+ fput(new_m->vm_file);
82829+ mpol_put(pol);
82830+ }
82831+ }
82832+#endif
82833+
82834 /* Success. */
82835 if (!err)
82836 return 0;
82837@@ -2390,10 +2790,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
82838 new->vm_ops->close(new);
82839 if (new->vm_file)
82840 fput(new->vm_file);
82841- unlink_anon_vmas(new);
82842 out_free_mpol:
82843 mpol_put(pol);
82844 out_free_vma:
82845+
82846+#ifdef CONFIG_PAX_SEGMEXEC
82847+ if (new_m) {
82848+ unlink_anon_vmas(new_m);
82849+ kmem_cache_free(vm_area_cachep, new_m);
82850+ }
82851+#endif
82852+
82853+ unlink_anon_vmas(new);
82854 kmem_cache_free(vm_area_cachep, new);
82855 out_err:
82856 return err;
82857@@ -2406,6 +2814,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
82858 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
82859 unsigned long addr, int new_below)
82860 {
82861+
82862+#ifdef CONFIG_PAX_SEGMEXEC
82863+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
82864+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
82865+ if (mm->map_count >= sysctl_max_map_count-1)
82866+ return -ENOMEM;
82867+ } else
82868+#endif
82869+
82870 if (mm->map_count >= sysctl_max_map_count)
82871 return -ENOMEM;
82872
82873@@ -2417,11 +2834,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
82874 * work. This now handles partial unmappings.
82875 * Jeremy Fitzhardinge <jeremy@goop.org>
82876 */
82877+#ifdef CONFIG_PAX_SEGMEXEC
82878 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
82879 {
82880+ int ret = __do_munmap(mm, start, len);
82881+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
82882+ return ret;
82883+
82884+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
82885+}
82886+
82887+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
82888+#else
82889+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
82890+#endif
82891+{
82892 unsigned long end;
82893 struct vm_area_struct *vma, *prev, *last;
82894
82895+ /*
82896+ * mm->mmap_sem is required to protect against another thread
82897+ * changing the mappings in case we sleep.
82898+ */
82899+ verify_mm_writelocked(mm);
82900+
82901 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
82902 return -EINVAL;
82903
82904@@ -2496,6 +2932,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
82905 /* Fix up all other VM information */
82906 remove_vma_list(mm, vma);
82907
82908+ track_exec_limit(mm, start, end, 0UL);
82909+
82910 return 0;
82911 }
82912
82913@@ -2504,6 +2942,13 @@ int vm_munmap(unsigned long start, size_t len)
82914 int ret;
82915 struct mm_struct *mm = current->mm;
82916
82917+
82918+#ifdef CONFIG_PAX_SEGMEXEC
82919+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
82920+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
82921+ return -EINVAL;
82922+#endif
82923+
82924 down_write(&mm->mmap_sem);
82925 ret = do_munmap(mm, start, len);
82926 up_write(&mm->mmap_sem);
82927@@ -2517,16 +2962,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
82928 return vm_munmap(addr, len);
82929 }
82930
82931-static inline void verify_mm_writelocked(struct mm_struct *mm)
82932-{
82933-#ifdef CONFIG_DEBUG_VM
82934- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
82935- WARN_ON(1);
82936- up_read(&mm->mmap_sem);
82937- }
82938-#endif
82939-}
82940-
82941 /*
82942 * this is really a simplified "do_mmap". it only handles
82943 * anonymous maps. eventually we may be able to do some
82944@@ -2540,6 +2975,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
82945 struct rb_node ** rb_link, * rb_parent;
82946 pgoff_t pgoff = addr >> PAGE_SHIFT;
82947 int error;
82948+ unsigned long charged;
82949
82950 len = PAGE_ALIGN(len);
82951 if (!len)
82952@@ -2547,16 +2983,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
82953
82954 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
82955
82956+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
82957+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
82958+ flags &= ~VM_EXEC;
82959+
82960+#ifdef CONFIG_PAX_MPROTECT
82961+ if (mm->pax_flags & MF_PAX_MPROTECT)
82962+ flags &= ~VM_MAYEXEC;
82963+#endif
82964+
82965+ }
82966+#endif
82967+
82968 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
82969 if (error & ~PAGE_MASK)
82970 return error;
82971
82972+ charged = len >> PAGE_SHIFT;
82973+
82974 /*
82975 * mlock MCL_FUTURE?
82976 */
82977 if (mm->def_flags & VM_LOCKED) {
82978 unsigned long locked, lock_limit;
82979- locked = len >> PAGE_SHIFT;
82980+ locked = charged;
82981 locked += mm->locked_vm;
82982 lock_limit = rlimit(RLIMIT_MEMLOCK);
82983 lock_limit >>= PAGE_SHIFT;
82984@@ -2573,21 +3023,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
82985 /*
82986 * Clear old maps. this also does some error checking for us
82987 */
82988- munmap_back:
82989 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
82990 if (do_munmap(mm, addr, len))
82991 return -ENOMEM;
82992- goto munmap_back;
82993+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
82994 }
82995
82996 /* Check against address space limits *after* clearing old maps... */
82997- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
82998+ if (!may_expand_vm(mm, charged))
82999 return -ENOMEM;
83000
83001 if (mm->map_count > sysctl_max_map_count)
83002 return -ENOMEM;
83003
83004- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
83005+ if (security_vm_enough_memory_mm(mm, charged))
83006 return -ENOMEM;
83007
83008 /* Can we just expand an old private anonymous mapping? */
83009@@ -2601,7 +3050,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
83010 */
83011 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
83012 if (!vma) {
83013- vm_unacct_memory(len >> PAGE_SHIFT);
83014+ vm_unacct_memory(charged);
83015 return -ENOMEM;
83016 }
83017
83018@@ -2615,11 +3064,12 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
83019 vma_link(mm, vma, prev, rb_link, rb_parent);
83020 out:
83021 perf_event_mmap(vma);
83022- mm->total_vm += len >> PAGE_SHIFT;
83023+ mm->total_vm += charged;
83024 if (flags & VM_LOCKED) {
83025 if (!mlock_vma_pages_range(vma, addr, addr + len))
83026- mm->locked_vm += (len >> PAGE_SHIFT);
83027+ mm->locked_vm += charged;
83028 }
83029+ track_exec_limit(mm, addr, addr + len, flags);
83030 return addr;
83031 }
83032
83033@@ -2677,6 +3127,7 @@ void exit_mmap(struct mm_struct *mm)
83034 while (vma) {
83035 if (vma->vm_flags & VM_ACCOUNT)
83036 nr_accounted += vma_pages(vma);
83037+ vma->vm_mirror = NULL;
83038 vma = remove_vma(vma);
83039 }
83040 vm_unacct_memory(nr_accounted);
83041@@ -2693,6 +3144,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
83042 struct vm_area_struct *prev;
83043 struct rb_node **rb_link, *rb_parent;
83044
83045+#ifdef CONFIG_PAX_SEGMEXEC
83046+ struct vm_area_struct *vma_m = NULL;
83047+#endif
83048+
83049+ if (security_mmap_addr(vma->vm_start))
83050+ return -EPERM;
83051+
83052 /*
83053 * The vm_pgoff of a purely anonymous vma should be irrelevant
83054 * until its first write fault, when page's anon_vma and index
83055@@ -2716,7 +3174,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
83056 security_vm_enough_memory_mm(mm, vma_pages(vma)))
83057 return -ENOMEM;
83058
83059+#ifdef CONFIG_PAX_SEGMEXEC
83060+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
83061+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
83062+ if (!vma_m)
83063+ return -ENOMEM;
83064+ }
83065+#endif
83066+
83067 vma_link(mm, vma, prev, rb_link, rb_parent);
83068+
83069+#ifdef CONFIG_PAX_SEGMEXEC
83070+ if (vma_m)
83071+ BUG_ON(pax_mirror_vma(vma_m, vma));
83072+#endif
83073+
83074 return 0;
83075 }
83076
83077@@ -2736,6 +3208,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
83078 struct mempolicy *pol;
83079 bool faulted_in_anon_vma = true;
83080
83081+ BUG_ON(vma->vm_mirror);
83082+
83083 /*
83084 * If anonymous vma has not yet been faulted, update new pgoff
83085 * to match new location, to increase its chance of merging.
83086@@ -2802,6 +3276,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
83087 return NULL;
83088 }
83089
83090+#ifdef CONFIG_PAX_SEGMEXEC
83091+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
83092+{
83093+ struct vm_area_struct *prev_m;
83094+ struct rb_node **rb_link_m, *rb_parent_m;
83095+ struct mempolicy *pol_m;
83096+
83097+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
83098+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
83099+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
83100+ *vma_m = *vma;
83101+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
83102+ if (anon_vma_clone(vma_m, vma))
83103+ return -ENOMEM;
83104+ pol_m = vma_policy(vma_m);
83105+ mpol_get(pol_m);
83106+ vma_set_policy(vma_m, pol_m);
83107+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
83108+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
83109+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
83110+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
83111+ if (vma_m->vm_file)
83112+ get_file(vma_m->vm_file);
83113+ if (vma_m->vm_ops && vma_m->vm_ops->open)
83114+ vma_m->vm_ops->open(vma_m);
83115+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
83116+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
83117+ vma_m->vm_mirror = vma;
83118+ vma->vm_mirror = vma_m;
83119+ return 0;
83120+}
83121+#endif
83122+
83123 /*
83124 * Return true if the calling process may expand its vm space by the passed
83125 * number of pages
83126@@ -2813,6 +3320,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
83127
83128 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
83129
83130+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
83131 if (cur + npages > lim)
83132 return 0;
83133 return 1;
83134@@ -2883,6 +3391,22 @@ int install_special_mapping(struct mm_struct *mm,
83135 vma->vm_start = addr;
83136 vma->vm_end = addr + len;
83137
83138+#ifdef CONFIG_PAX_MPROTECT
83139+ if (mm->pax_flags & MF_PAX_MPROTECT) {
83140+#ifndef CONFIG_PAX_MPROTECT_COMPAT
83141+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
83142+ return -EPERM;
83143+ if (!(vm_flags & VM_EXEC))
83144+ vm_flags &= ~VM_MAYEXEC;
83145+#else
83146+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
83147+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
83148+#endif
83149+ else
83150+ vm_flags &= ~VM_MAYWRITE;
83151+ }
83152+#endif
83153+
83154 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
83155 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
83156
83157diff --git a/mm/mprotect.c b/mm/mprotect.c
83158index 94722a4..9837984 100644
83159--- a/mm/mprotect.c
83160+++ b/mm/mprotect.c
83161@@ -23,10 +23,17 @@
83162 #include <linux/mmu_notifier.h>
83163 #include <linux/migrate.h>
83164 #include <linux/perf_event.h>
83165+
83166+#ifdef CONFIG_PAX_MPROTECT
83167+#include <linux/elf.h>
83168+#include <linux/binfmts.h>
83169+#endif
83170+
83171 #include <asm/uaccess.h>
83172 #include <asm/pgtable.h>
83173 #include <asm/cacheflush.h>
83174 #include <asm/tlbflush.h>
83175+#include <asm/mmu_context.h>
83176
83177 #ifndef pgprot_modify
83178 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
83179@@ -233,6 +240,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
83180 return pages;
83181 }
83182
83183+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
83184+/* called while holding the mmap semaphor for writing except stack expansion */
83185+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
83186+{
83187+ unsigned long oldlimit, newlimit = 0UL;
83188+
83189+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
83190+ return;
83191+
83192+ spin_lock(&mm->page_table_lock);
83193+ oldlimit = mm->context.user_cs_limit;
83194+ if ((prot & VM_EXEC) && oldlimit < end)
83195+ /* USER_CS limit moved up */
83196+ newlimit = end;
83197+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
83198+ /* USER_CS limit moved down */
83199+ newlimit = start;
83200+
83201+ if (newlimit) {
83202+ mm->context.user_cs_limit = newlimit;
83203+
83204+#ifdef CONFIG_SMP
83205+ wmb();
83206+ cpus_clear(mm->context.cpu_user_cs_mask);
83207+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
83208+#endif
83209+
83210+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
83211+ }
83212+ spin_unlock(&mm->page_table_lock);
83213+ if (newlimit == end) {
83214+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
83215+
83216+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
83217+ if (is_vm_hugetlb_page(vma))
83218+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
83219+ else
83220+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
83221+ }
83222+}
83223+#endif
83224+
83225 int
83226 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
83227 unsigned long start, unsigned long end, unsigned long newflags)
83228@@ -245,11 +294,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
83229 int error;
83230 int dirty_accountable = 0;
83231
83232+#ifdef CONFIG_PAX_SEGMEXEC
83233+ struct vm_area_struct *vma_m = NULL;
83234+ unsigned long start_m, end_m;
83235+
83236+ start_m = start + SEGMEXEC_TASK_SIZE;
83237+ end_m = end + SEGMEXEC_TASK_SIZE;
83238+#endif
83239+
83240 if (newflags == oldflags) {
83241 *pprev = vma;
83242 return 0;
83243 }
83244
83245+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
83246+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
83247+
83248+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
83249+ return -ENOMEM;
83250+
83251+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
83252+ return -ENOMEM;
83253+ }
83254+
83255 /*
83256 * If we make a private mapping writable we increase our commit;
83257 * but (without finer accounting) cannot reduce our commit if we
83258@@ -266,6 +333,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
83259 }
83260 }
83261
83262+#ifdef CONFIG_PAX_SEGMEXEC
83263+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
83264+ if (start != vma->vm_start) {
83265+ error = split_vma(mm, vma, start, 1);
83266+ if (error)
83267+ goto fail;
83268+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
83269+ *pprev = (*pprev)->vm_next;
83270+ }
83271+
83272+ if (end != vma->vm_end) {
83273+ error = split_vma(mm, vma, end, 0);
83274+ if (error)
83275+ goto fail;
83276+ }
83277+
83278+ if (pax_find_mirror_vma(vma)) {
83279+ error = __do_munmap(mm, start_m, end_m - start_m);
83280+ if (error)
83281+ goto fail;
83282+ } else {
83283+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
83284+ if (!vma_m) {
83285+ error = -ENOMEM;
83286+ goto fail;
83287+ }
83288+ vma->vm_flags = newflags;
83289+ error = pax_mirror_vma(vma_m, vma);
83290+ if (error) {
83291+ vma->vm_flags = oldflags;
83292+ goto fail;
83293+ }
83294+ }
83295+ }
83296+#endif
83297+
83298 /*
83299 * First try to merge with previous and/or next vma.
83300 */
83301@@ -296,9 +399,21 @@ success:
83302 * vm_flags and vm_page_prot are protected by the mmap_sem
83303 * held in write mode.
83304 */
83305+
83306+#ifdef CONFIG_PAX_SEGMEXEC
83307+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
83308+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
83309+#endif
83310+
83311 vma->vm_flags = newflags;
83312+
83313+#ifdef CONFIG_PAX_MPROTECT
83314+ if (mm->binfmt && mm->binfmt->handle_mprotect)
83315+ mm->binfmt->handle_mprotect(vma, newflags);
83316+#endif
83317+
83318 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
83319- vm_get_page_prot(newflags));
83320+ vm_get_page_prot(vma->vm_flags));
83321
83322 if (vma_wants_writenotify(vma)) {
83323 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
83324@@ -337,6 +452,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
83325 end = start + len;
83326 if (end <= start)
83327 return -ENOMEM;
83328+
83329+#ifdef CONFIG_PAX_SEGMEXEC
83330+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
83331+ if (end > SEGMEXEC_TASK_SIZE)
83332+ return -EINVAL;
83333+ } else
83334+#endif
83335+
83336+ if (end > TASK_SIZE)
83337+ return -EINVAL;
83338+
83339 if (!arch_validate_prot(prot))
83340 return -EINVAL;
83341
83342@@ -344,7 +470,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
83343 /*
83344 * Does the application expect PROT_READ to imply PROT_EXEC:
83345 */
83346- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
83347+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
83348 prot |= PROT_EXEC;
83349
83350 vm_flags = calc_vm_prot_bits(prot);
83351@@ -376,6 +502,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
83352 if (start > vma->vm_start)
83353 prev = vma;
83354
83355+#ifdef CONFIG_PAX_MPROTECT
83356+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
83357+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
83358+#endif
83359+
83360 for (nstart = start ; ; ) {
83361 unsigned long newflags;
83362
83363@@ -386,6 +517,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
83364
83365 /* newflags >> 4 shift VM_MAY% in place of VM_% */
83366 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
83367+ if (prot & (PROT_WRITE | PROT_EXEC))
83368+ gr_log_rwxmprotect(vma->vm_file);
83369+
83370+ error = -EACCES;
83371+ goto out;
83372+ }
83373+
83374+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
83375 error = -EACCES;
83376 goto out;
83377 }
83378@@ -400,6 +539,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
83379 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
83380 if (error)
83381 goto out;
83382+
83383+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
83384+
83385 nstart = tmp;
83386
83387 if (nstart < prev->vm_end)
83388diff --git a/mm/mremap.c b/mm/mremap.c
83389index e1031e1..1f2a0a1 100644
83390--- a/mm/mremap.c
83391+++ b/mm/mremap.c
83392@@ -125,6 +125,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
83393 continue;
83394 pte = ptep_get_and_clear(mm, old_addr, old_pte);
83395 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
83396+
83397+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
83398+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
83399+ pte = pte_exprotect(pte);
83400+#endif
83401+
83402 set_pte_at(mm, new_addr, new_pte, pte);
83403 }
83404
83405@@ -319,6 +325,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
83406 if (is_vm_hugetlb_page(vma))
83407 goto Einval;
83408
83409+#ifdef CONFIG_PAX_SEGMEXEC
83410+ if (pax_find_mirror_vma(vma))
83411+ goto Einval;
83412+#endif
83413+
83414 /* We can't remap across vm area boundaries */
83415 if (old_len > vma->vm_end - addr)
83416 goto Efault;
83417@@ -375,20 +386,25 @@ static unsigned long mremap_to(unsigned long addr,
83418 unsigned long ret = -EINVAL;
83419 unsigned long charged = 0;
83420 unsigned long map_flags;
83421+ unsigned long pax_task_size = TASK_SIZE;
83422
83423 if (new_addr & ~PAGE_MASK)
83424 goto out;
83425
83426- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
83427+#ifdef CONFIG_PAX_SEGMEXEC
83428+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
83429+ pax_task_size = SEGMEXEC_TASK_SIZE;
83430+#endif
83431+
83432+ pax_task_size -= PAGE_SIZE;
83433+
83434+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
83435 goto out;
83436
83437 /* Check if the location we're moving into overlaps the
83438 * old location at all, and fail if it does.
83439 */
83440- if ((new_addr <= addr) && (new_addr+new_len) > addr)
83441- goto out;
83442-
83443- if ((addr <= new_addr) && (addr+old_len) > new_addr)
83444+ if (addr + old_len > new_addr && new_addr + new_len > addr)
83445 goto out;
83446
83447 ret = do_munmap(mm, new_addr, new_len);
83448@@ -456,6 +472,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
83449 struct vm_area_struct *vma;
83450 unsigned long ret = -EINVAL;
83451 unsigned long charged = 0;
83452+ unsigned long pax_task_size = TASK_SIZE;
83453
83454 down_write(&current->mm->mmap_sem);
83455
83456@@ -476,6 +493,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
83457 if (!new_len)
83458 goto out;
83459
83460+#ifdef CONFIG_PAX_SEGMEXEC
83461+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
83462+ pax_task_size = SEGMEXEC_TASK_SIZE;
83463+#endif
83464+
83465+ pax_task_size -= PAGE_SIZE;
83466+
83467+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
83468+ old_len > pax_task_size || addr > pax_task_size-old_len)
83469+ goto out;
83470+
83471 if (flags & MREMAP_FIXED) {
83472 if (flags & MREMAP_MAYMOVE)
83473 ret = mremap_to(addr, old_len, new_addr, new_len);
83474@@ -524,6 +552,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
83475 addr + new_len);
83476 }
83477 ret = addr;
83478+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
83479 goto out;
83480 }
83481 }
83482@@ -547,7 +576,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
83483 goto out;
83484 }
83485
83486+ map_flags = vma->vm_flags;
83487 ret = move_vma(vma, addr, old_len, new_len, new_addr);
83488+ if (!(ret & ~PAGE_MASK)) {
83489+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
83490+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
83491+ }
83492 }
83493 out:
83494 if (ret & ~PAGE_MASK)
83495diff --git a/mm/nommu.c b/mm/nommu.c
83496index 79c3cac..b2601ea 100644
83497--- a/mm/nommu.c
83498+++ b/mm/nommu.c
83499@@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
83500 int sysctl_overcommit_ratio = 50; /* default is 50% */
83501 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
83502 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
83503-int heap_stack_gap = 0;
83504
83505 atomic_long_t mmap_pages_allocated;
83506
83507@@ -819,7 +818,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
83508 struct vm_area_struct *vma;
83509
83510 /* check the cache first */
83511- vma = mm->mmap_cache;
83512+ vma = ACCESS_ONCE(mm->mmap_cache);
83513 if (vma && vma->vm_start <= addr && vma->vm_end > addr)
83514 return vma;
83515
83516@@ -839,15 +838,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
83517 EXPORT_SYMBOL(find_vma);
83518
83519 /*
83520- * find a VMA
83521- * - we don't extend stack VMAs under NOMMU conditions
83522- */
83523-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
83524-{
83525- return find_vma(mm, addr);
83526-}
83527-
83528-/*
83529 * expand a stack to a given address
83530 * - not supported under NOMMU conditions
83531 */
83532@@ -1555,6 +1545,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
83533
83534 /* most fields are the same, copy all, and then fixup */
83535 *new = *vma;
83536+ INIT_LIST_HEAD(&new->anon_vma_chain);
83537 *region = *vma->vm_region;
83538 new->vm_region = region;
83539
83540@@ -1975,8 +1966,8 @@ int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
83541 }
83542 EXPORT_SYMBOL(generic_file_remap_pages);
83543
83544-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
83545- unsigned long addr, void *buf, int len, int write)
83546+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
83547+ unsigned long addr, void *buf, size_t len, int write)
83548 {
83549 struct vm_area_struct *vma;
83550
83551@@ -2017,8 +2008,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
83552 *
83553 * The caller must hold a reference on @mm.
83554 */
83555-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
83556- void *buf, int len, int write)
83557+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
83558+ void *buf, size_t len, int write)
83559 {
83560 return __access_remote_vm(NULL, mm, addr, buf, len, write);
83561 }
83562@@ -2027,7 +2018,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
83563 * Access another process' address space.
83564 * - source/target buffer must be kernel space
83565 */
83566-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
83567+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
83568 {
83569 struct mm_struct *mm;
83570
83571diff --git a/mm/page-writeback.c b/mm/page-writeback.c
83572index 0713bfb..b95bb87 100644
83573--- a/mm/page-writeback.c
83574+++ b/mm/page-writeback.c
83575@@ -655,7 +655,7 @@ unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
83576 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
83577 * - the bdi dirty thresh drops quickly due to change of JBOD workload
83578 */
83579-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
83580+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
83581 unsigned long thresh,
83582 unsigned long bg_thresh,
83583 unsigned long dirty,
83584@@ -1630,7 +1630,7 @@ ratelimit_handler(struct notifier_block *self, unsigned long action,
83585 }
83586 }
83587
83588-static struct notifier_block __cpuinitdata ratelimit_nb = {
83589+static struct notifier_block ratelimit_nb = {
83590 .notifier_call = ratelimit_handler,
83591 .next = NULL,
83592 };
83593diff --git a/mm/page_alloc.c b/mm/page_alloc.c
83594index 6a83cd3..3ab04ef 100644
83595--- a/mm/page_alloc.c
83596+++ b/mm/page_alloc.c
83597@@ -58,6 +58,7 @@
83598 #include <linux/prefetch.h>
83599 #include <linux/migrate.h>
83600 #include <linux/page-debug-flags.h>
83601+#include <linux/random.h>
83602
83603 #include <asm/tlbflush.h>
83604 #include <asm/div64.h>
83605@@ -338,7 +339,7 @@ out:
83606 * This usage means that zero-order pages may not be compound.
83607 */
83608
83609-static void free_compound_page(struct page *page)
83610+void free_compound_page(struct page *page)
83611 {
83612 __free_pages_ok(page, compound_order(page));
83613 }
83614@@ -693,6 +694,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
83615 int i;
83616 int bad = 0;
83617
83618+#ifdef CONFIG_PAX_MEMORY_SANITIZE
83619+ unsigned long index = 1UL << order;
83620+#endif
83621+
83622 trace_mm_page_free(page, order);
83623 kmemcheck_free_shadow(page, order);
83624
83625@@ -708,6 +713,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
83626 debug_check_no_obj_freed(page_address(page),
83627 PAGE_SIZE << order);
83628 }
83629+
83630+#ifdef CONFIG_PAX_MEMORY_SANITIZE
83631+ for (; index; --index)
83632+ sanitize_highpage(page + index - 1);
83633+#endif
83634+
83635 arch_free_page(page, order);
83636 kernel_map_pages(page, 1 << order, 0);
83637
83638@@ -730,6 +741,19 @@ static void __free_pages_ok(struct page *page, unsigned int order)
83639 local_irq_restore(flags);
83640 }
83641
83642+#ifdef CONFIG_PAX_LATENT_ENTROPY
83643+bool __meminitdata extra_latent_entropy;
83644+
83645+static int __init setup_pax_extra_latent_entropy(char *str)
83646+{
83647+ extra_latent_entropy = true;
83648+ return 0;
83649+}
83650+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
83651+
83652+volatile u64 latent_entropy;
83653+#endif
83654+
83655 /*
83656 * Read access to zone->managed_pages is safe because it's unsigned long,
83657 * but we still need to serialize writers. Currently all callers of
83658@@ -752,6 +776,19 @@ void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
83659 set_page_count(p, 0);
83660 }
83661
83662+#ifdef CONFIG_PAX_LATENT_ENTROPY
83663+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
83664+ u64 hash = 0;
83665+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
83666+ const u64 *data = lowmem_page_address(page);
83667+
83668+ for (index = 0; index < end; index++)
83669+ hash ^= hash + data[index];
83670+ latent_entropy ^= hash;
83671+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
83672+ }
83673+#endif
83674+
83675 page_zone(page)->managed_pages += 1 << order;
83676 set_page_refcounted(page);
83677 __free_pages(page, order);
83678@@ -861,8 +898,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
83679 arch_alloc_page(page, order);
83680 kernel_map_pages(page, 1 << order, 1);
83681
83682+#ifndef CONFIG_PAX_MEMORY_SANITIZE
83683 if (gfp_flags & __GFP_ZERO)
83684 prep_zero_page(page, order, gfp_flags);
83685+#endif
83686
83687 if (order && (gfp_flags & __GFP_COMP))
83688 prep_compound_page(page, order);
83689@@ -3752,7 +3791,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
83690 unsigned long pfn;
83691
83692 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
83693+#ifdef CONFIG_X86_32
83694+ /* boot failures in VMware 8 on 32bit vanilla since
83695+ this change */
83696+ if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
83697+#else
83698 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
83699+#endif
83700 return 1;
83701 }
83702 return 0;
83703diff --git a/mm/percpu.c b/mm/percpu.c
83704index 8c8e08f..73a5cda 100644
83705--- a/mm/percpu.c
83706+++ b/mm/percpu.c
83707@@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
83708 static unsigned int pcpu_high_unit_cpu __read_mostly;
83709
83710 /* the address of the first chunk which starts with the kernel static area */
83711-void *pcpu_base_addr __read_mostly;
83712+void *pcpu_base_addr __read_only;
83713 EXPORT_SYMBOL_GPL(pcpu_base_addr);
83714
83715 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
83716diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
83717index fd26d04..0cea1b0 100644
83718--- a/mm/process_vm_access.c
83719+++ b/mm/process_vm_access.c
83720@@ -13,6 +13,7 @@
83721 #include <linux/uio.h>
83722 #include <linux/sched.h>
83723 #include <linux/highmem.h>
83724+#include <linux/security.h>
83725 #include <linux/ptrace.h>
83726 #include <linux/slab.h>
83727 #include <linux/syscalls.h>
83728@@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
83729 size_t iov_l_curr_offset = 0;
83730 ssize_t iov_len;
83731
83732+ return -ENOSYS; // PaX: until properly audited
83733+
83734 /*
83735 * Work out how many pages of struct pages we're going to need
83736 * when eventually calling get_user_pages
83737 */
83738 for (i = 0; i < riovcnt; i++) {
83739 iov_len = rvec[i].iov_len;
83740- if (iov_len > 0) {
83741- nr_pages_iov = ((unsigned long)rvec[i].iov_base
83742- + iov_len)
83743- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
83744- / PAGE_SIZE + 1;
83745- nr_pages = max(nr_pages, nr_pages_iov);
83746- }
83747+ if (iov_len <= 0)
83748+ continue;
83749+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
83750+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
83751+ nr_pages = max(nr_pages, nr_pages_iov);
83752 }
83753
83754 if (nr_pages == 0)
83755@@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
83756 goto free_proc_pages;
83757 }
83758
83759+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
83760+ rc = -EPERM;
83761+ goto put_task_struct;
83762+ }
83763+
83764 mm = mm_access(task, PTRACE_MODE_ATTACH);
83765 if (!mm || IS_ERR(mm)) {
83766 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
83767diff --git a/mm/rmap.c b/mm/rmap.c
83768index 2c78f8c..9e9c624 100644
83769--- a/mm/rmap.c
83770+++ b/mm/rmap.c
83771@@ -163,6 +163,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
83772 struct anon_vma *anon_vma = vma->anon_vma;
83773 struct anon_vma_chain *avc;
83774
83775+#ifdef CONFIG_PAX_SEGMEXEC
83776+ struct anon_vma_chain *avc_m = NULL;
83777+#endif
83778+
83779 might_sleep();
83780 if (unlikely(!anon_vma)) {
83781 struct mm_struct *mm = vma->vm_mm;
83782@@ -172,6 +176,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
83783 if (!avc)
83784 goto out_enomem;
83785
83786+#ifdef CONFIG_PAX_SEGMEXEC
83787+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
83788+ if (!avc_m)
83789+ goto out_enomem_free_avc;
83790+#endif
83791+
83792 anon_vma = find_mergeable_anon_vma(vma);
83793 allocated = NULL;
83794 if (!anon_vma) {
83795@@ -185,6 +195,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
83796 /* page_table_lock to protect against threads */
83797 spin_lock(&mm->page_table_lock);
83798 if (likely(!vma->anon_vma)) {
83799+
83800+#ifdef CONFIG_PAX_SEGMEXEC
83801+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
83802+
83803+ if (vma_m) {
83804+ BUG_ON(vma_m->anon_vma);
83805+ vma_m->anon_vma = anon_vma;
83806+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
83807+ avc_m = NULL;
83808+ }
83809+#endif
83810+
83811 vma->anon_vma = anon_vma;
83812 anon_vma_chain_link(vma, avc, anon_vma);
83813 allocated = NULL;
83814@@ -195,12 +217,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
83815
83816 if (unlikely(allocated))
83817 put_anon_vma(allocated);
83818+
83819+#ifdef CONFIG_PAX_SEGMEXEC
83820+ if (unlikely(avc_m))
83821+ anon_vma_chain_free(avc_m);
83822+#endif
83823+
83824 if (unlikely(avc))
83825 anon_vma_chain_free(avc);
83826 }
83827 return 0;
83828
83829 out_enomem_free_avc:
83830+
83831+#ifdef CONFIG_PAX_SEGMEXEC
83832+ if (avc_m)
83833+ anon_vma_chain_free(avc_m);
83834+#endif
83835+
83836 anon_vma_chain_free(avc);
83837 out_enomem:
83838 return -ENOMEM;
83839@@ -236,7 +270,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
83840 * Attach the anon_vmas from src to dst.
83841 * Returns 0 on success, -ENOMEM on failure.
83842 */
83843-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
83844+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
83845 {
83846 struct anon_vma_chain *avc, *pavc;
83847 struct anon_vma *root = NULL;
83848@@ -269,7 +303,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
83849 * the corresponding VMA in the parent process is attached to.
83850 * Returns 0 on success, non-zero on failure.
83851 */
83852-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
83853+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
83854 {
83855 struct anon_vma_chain *avc;
83856 struct anon_vma *anon_vma;
83857diff --git a/mm/shmem.c b/mm/shmem.c
83858index efd0b3a..994b702 100644
83859--- a/mm/shmem.c
83860+++ b/mm/shmem.c
83861@@ -31,7 +31,7 @@
83862 #include <linux/export.h>
83863 #include <linux/swap.h>
83864
83865-static struct vfsmount *shm_mnt;
83866+struct vfsmount *shm_mnt;
83867
83868 #ifdef CONFIG_SHMEM
83869 /*
83870@@ -75,7 +75,7 @@ static struct vfsmount *shm_mnt;
83871 #define BOGO_DIRENT_SIZE 20
83872
83873 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
83874-#define SHORT_SYMLINK_LEN 128
83875+#define SHORT_SYMLINK_LEN 64
83876
83877 /*
83878 * shmem_fallocate and shmem_writepage communicate via inode->i_private
83879@@ -2202,6 +2202,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
83880 static int shmem_xattr_validate(const char *name)
83881 {
83882 struct { const char *prefix; size_t len; } arr[] = {
83883+
83884+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
83885+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
83886+#endif
83887+
83888 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
83889 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
83890 };
83891@@ -2257,6 +2262,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
83892 if (err)
83893 return err;
83894
83895+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
83896+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
83897+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
83898+ return -EOPNOTSUPP;
83899+ if (size > 8)
83900+ return -EINVAL;
83901+ }
83902+#endif
83903+
83904 return simple_xattr_set(&info->xattrs, name, value, size, flags);
83905 }
83906
83907@@ -2562,8 +2576,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
83908 int err = -ENOMEM;
83909
83910 /* Round up to L1_CACHE_BYTES to resist false sharing */
83911- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
83912- L1_CACHE_BYTES), GFP_KERNEL);
83913+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
83914 if (!sbinfo)
83915 return -ENOMEM;
83916
83917diff --git a/mm/slab.c b/mm/slab.c
83918index e7667a3..a48e73b 100644
83919--- a/mm/slab.c
83920+++ b/mm/slab.c
83921@@ -306,7 +306,7 @@ struct kmem_list3 {
83922 * Need this for bootstrapping a per node allocator.
83923 */
83924 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
83925-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
83926+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
83927 #define CACHE_CACHE 0
83928 #define SIZE_AC MAX_NUMNODES
83929 #define SIZE_L3 (2 * MAX_NUMNODES)
83930@@ -407,10 +407,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
83931 if ((x)->max_freeable < i) \
83932 (x)->max_freeable = i; \
83933 } while (0)
83934-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
83935-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
83936-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
83937-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
83938+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
83939+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
83940+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
83941+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
83942 #else
83943 #define STATS_INC_ACTIVE(x) do { } while (0)
83944 #define STATS_DEC_ACTIVE(x) do { } while (0)
83945@@ -518,7 +518,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
83946 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
83947 */
83948 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
83949- const struct slab *slab, void *obj)
83950+ const struct slab *slab, const void *obj)
83951 {
83952 u32 offset = (obj - slab->s_mem);
83953 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
83954@@ -539,12 +539,13 @@ EXPORT_SYMBOL(malloc_sizes);
83955 struct cache_names {
83956 char *name;
83957 char *name_dma;
83958+ char *name_usercopy;
83959 };
83960
83961 static struct cache_names __initdata cache_names[] = {
83962-#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
83963+#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)", .name_usercopy = "size-" #x "(USERCOPY)" },
83964 #include <linux/kmalloc_sizes.h>
83965- {NULL,}
83966+ {NULL}
83967 #undef CACHE
83968 };
83969
83970@@ -729,6 +730,12 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
83971 if (unlikely(gfpflags & GFP_DMA))
83972 return csizep->cs_dmacachep;
83973 #endif
83974+
83975+#ifdef CONFIG_PAX_USERCOPY_SLABS
83976+ if (unlikely(gfpflags & GFP_USERCOPY))
83977+ return csizep->cs_usercopycachep;
83978+#endif
83979+
83980 return csizep->cs_cachep;
83981 }
83982
83983@@ -1482,7 +1489,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
83984 return notifier_from_errno(err);
83985 }
83986
83987-static struct notifier_block __cpuinitdata cpucache_notifier = {
83988+static struct notifier_block cpucache_notifier = {
83989 &cpuup_callback, NULL, 0
83990 };
83991
83992@@ -1667,12 +1674,12 @@ void __init kmem_cache_init(void)
83993 */
83994
83995 sizes[INDEX_AC].cs_cachep = create_kmalloc_cache(names[INDEX_AC].name,
83996- sizes[INDEX_AC].cs_size, ARCH_KMALLOC_FLAGS);
83997+ sizes[INDEX_AC].cs_size, ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
83998
83999 if (INDEX_AC != INDEX_L3)
84000 sizes[INDEX_L3].cs_cachep =
84001 create_kmalloc_cache(names[INDEX_L3].name,
84002- sizes[INDEX_L3].cs_size, ARCH_KMALLOC_FLAGS);
84003+ sizes[INDEX_L3].cs_size, ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
84004
84005 slab_early_init = 0;
84006
84007@@ -1686,13 +1693,20 @@ void __init kmem_cache_init(void)
84008 */
84009 if (!sizes->cs_cachep)
84010 sizes->cs_cachep = create_kmalloc_cache(names->name,
84011- sizes->cs_size, ARCH_KMALLOC_FLAGS);
84012+ sizes->cs_size, ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
84013
84014 #ifdef CONFIG_ZONE_DMA
84015 sizes->cs_dmacachep = create_kmalloc_cache(
84016 names->name_dma, sizes->cs_size,
84017 SLAB_CACHE_DMA|ARCH_KMALLOC_FLAGS);
84018 #endif
84019+
84020+#ifdef CONFIG_PAX_USERCOPY_SLABS
84021+ sizes->cs_usercopycachep = create_kmalloc_cache(
84022+ names->name_usercopy, sizes->cs_size,
84023+ ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
84024+#endif
84025+
84026 sizes++;
84027 names++;
84028 }
84029@@ -3924,6 +3938,7 @@ void kfree(const void *objp)
84030
84031 if (unlikely(ZERO_OR_NULL_PTR(objp)))
84032 return;
84033+ VM_BUG_ON(!virt_addr_valid(objp));
84034 local_irq_save(flags);
84035 kfree_debugcheck(objp);
84036 c = virt_to_cache(objp);
84037@@ -4365,10 +4380,10 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
84038 }
84039 /* cpu stats */
84040 {
84041- unsigned long allochit = atomic_read(&cachep->allochit);
84042- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
84043- unsigned long freehit = atomic_read(&cachep->freehit);
84044- unsigned long freemiss = atomic_read(&cachep->freemiss);
84045+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
84046+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
84047+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
84048+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
84049
84050 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
84051 allochit, allocmiss, freehit, freemiss);
84052@@ -4600,13 +4615,71 @@ static const struct file_operations proc_slabstats_operations = {
84053 static int __init slab_proc_init(void)
84054 {
84055 #ifdef CONFIG_DEBUG_SLAB_LEAK
84056- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
84057+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
84058 #endif
84059 return 0;
84060 }
84061 module_init(slab_proc_init);
84062 #endif
84063
84064+bool is_usercopy_object(const void *ptr)
84065+{
84066+ struct page *page;
84067+ struct kmem_cache *cachep;
84068+
84069+ if (ZERO_OR_NULL_PTR(ptr))
84070+ return false;
84071+
84072+ if (!slab_is_available())
84073+ return false;
84074+
84075+ if (!virt_addr_valid(ptr))
84076+ return false;
84077+
84078+ page = virt_to_head_page(ptr);
84079+
84080+ if (!PageSlab(page))
84081+ return false;
84082+
84083+ cachep = page->slab_cache;
84084+ return cachep->flags & SLAB_USERCOPY;
84085+}
84086+
84087+#ifdef CONFIG_PAX_USERCOPY
84088+const char *check_heap_object(const void *ptr, unsigned long n)
84089+{
84090+ struct page *page;
84091+ struct kmem_cache *cachep;
84092+ struct slab *slabp;
84093+ unsigned int objnr;
84094+ unsigned long offset;
84095+
84096+ if (ZERO_OR_NULL_PTR(ptr))
84097+ return "<null>";
84098+
84099+ if (!virt_addr_valid(ptr))
84100+ return NULL;
84101+
84102+ page = virt_to_head_page(ptr);
84103+
84104+ if (!PageSlab(page))
84105+ return NULL;
84106+
84107+ cachep = page->slab_cache;
84108+ if (!(cachep->flags & SLAB_USERCOPY))
84109+ return cachep->name;
84110+
84111+ slabp = page->slab_page;
84112+ objnr = obj_to_index(cachep, slabp, ptr);
84113+ BUG_ON(objnr >= cachep->num);
84114+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
84115+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
84116+ return NULL;
84117+
84118+ return cachep->name;
84119+}
84120+#endif
84121+
84122 /**
84123 * ksize - get the actual amount of memory allocated for a given object
84124 * @objp: Pointer to the object
84125diff --git a/mm/slab.h b/mm/slab.h
84126index 34a98d6..73633d1 100644
84127--- a/mm/slab.h
84128+++ b/mm/slab.h
84129@@ -58,7 +58,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
84130
84131 /* Legal flag mask for kmem_cache_create(), for various configurations */
84132 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
84133- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
84134+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | SLAB_USERCOPY)
84135
84136 #if defined(CONFIG_DEBUG_SLAB)
84137 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
84138@@ -220,6 +220,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
84139 return s;
84140
84141 page = virt_to_head_page(x);
84142+
84143+ BUG_ON(!PageSlab(page));
84144+
84145 cachep = page->slab_cache;
84146 if (slab_equal_or_root(cachep, s))
84147 return cachep;
84148diff --git a/mm/slab_common.c b/mm/slab_common.c
84149index 3f3cd97..93b0236 100644
84150--- a/mm/slab_common.c
84151+++ b/mm/slab_common.c
84152@@ -22,7 +22,7 @@
84153
84154 #include "slab.h"
84155
84156-enum slab_state slab_state;
84157+enum slab_state slab_state __read_only;
84158 LIST_HEAD(slab_caches);
84159 DEFINE_MUTEX(slab_mutex);
84160 struct kmem_cache *kmem_cache;
84161@@ -209,7 +209,7 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
84162
84163 err = __kmem_cache_create(s, flags);
84164 if (!err) {
84165- s->refcount = 1;
84166+ atomic_set(&s->refcount, 1);
84167 list_add(&s->list, &slab_caches);
84168 memcg_cache_list_add(memcg, s);
84169 } else {
84170@@ -255,8 +255,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
84171
84172 get_online_cpus();
84173 mutex_lock(&slab_mutex);
84174- s->refcount--;
84175- if (!s->refcount) {
84176+ if (atomic_dec_and_test(&s->refcount)) {
84177 list_del(&s->list);
84178
84179 if (!__kmem_cache_shutdown(s)) {
84180@@ -302,7 +301,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
84181 panic("Creation of kmalloc slab %s size=%zd failed. Reason %d\n",
84182 name, size, err);
84183
84184- s->refcount = -1; /* Exempt from merging for now */
84185+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
84186 }
84187
84188 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
84189@@ -315,7 +314,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
84190
84191 create_boot_cache(s, name, size, flags);
84192 list_add(&s->list, &slab_caches);
84193- s->refcount = 1;
84194+ atomic_set(&s->refcount, 1);
84195 return s;
84196 }
84197
84198diff --git a/mm/slob.c b/mm/slob.c
84199index a99fdf7..6ee34ec 100644
84200--- a/mm/slob.c
84201+++ b/mm/slob.c
84202@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
84203 /*
84204 * Return the size of a slob block.
84205 */
84206-static slobidx_t slob_units(slob_t *s)
84207+static slobidx_t slob_units(const slob_t *s)
84208 {
84209 if (s->units > 0)
84210 return s->units;
84211@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
84212 /*
84213 * Return the next free slob block pointer after this one.
84214 */
84215-static slob_t *slob_next(slob_t *s)
84216+static slob_t *slob_next(const slob_t *s)
84217 {
84218 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
84219 slobidx_t next;
84220@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
84221 /*
84222 * Returns true if s is the last free block in its page.
84223 */
84224-static int slob_last(slob_t *s)
84225+static int slob_last(const slob_t *s)
84226 {
84227 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
84228 }
84229
84230-static void *slob_new_pages(gfp_t gfp, int order, int node)
84231+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
84232 {
84233- void *page;
84234+ struct page *page;
84235
84236 #ifdef CONFIG_NUMA
84237 if (node != NUMA_NO_NODE)
84238@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
84239 if (!page)
84240 return NULL;
84241
84242- return page_address(page);
84243+ __SetPageSlab(page);
84244+ return page;
84245 }
84246
84247-static void slob_free_pages(void *b, int order)
84248+static void slob_free_pages(struct page *sp, int order)
84249 {
84250 if (current->reclaim_state)
84251 current->reclaim_state->reclaimed_slab += 1 << order;
84252- free_pages((unsigned long)b, order);
84253+ __ClearPageSlab(sp);
84254+ reset_page_mapcount(sp);
84255+ sp->private = 0;
84256+ __free_pages(sp, order);
84257 }
84258
84259 /*
84260@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
84261
84262 /* Not enough space: must allocate a new page */
84263 if (!b) {
84264- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
84265- if (!b)
84266+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
84267+ if (!sp)
84268 return NULL;
84269- sp = virt_to_page(b);
84270- __SetPageSlab(sp);
84271+ b = page_address(sp);
84272
84273 spin_lock_irqsave(&slob_lock, flags);
84274 sp->units = SLOB_UNITS(PAGE_SIZE);
84275 sp->freelist = b;
84276+ sp->private = 0;
84277 INIT_LIST_HEAD(&sp->list);
84278 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
84279 set_slob_page_free(sp, slob_list);
84280@@ -359,9 +363,7 @@ static void slob_free(void *block, int size)
84281 if (slob_page_free(sp))
84282 clear_slob_page_free(sp);
84283 spin_unlock_irqrestore(&slob_lock, flags);
84284- __ClearPageSlab(sp);
84285- reset_page_mapcount(sp);
84286- slob_free_pages(b, 0);
84287+ slob_free_pages(sp, 0);
84288 return;
84289 }
84290
84291@@ -424,11 +426,10 @@ out:
84292 */
84293
84294 static __always_inline void *
84295-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
84296+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
84297 {
84298- unsigned int *m;
84299- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
84300- void *ret;
84301+ slob_t *m;
84302+ void *ret = NULL;
84303
84304 gfp &= gfp_allowed_mask;
84305
84306@@ -442,23 +443,41 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
84307
84308 if (!m)
84309 return NULL;
84310- *m = size;
84311+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
84312+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
84313+ m[0].units = size;
84314+ m[1].units = align;
84315 ret = (void *)m + align;
84316
84317 trace_kmalloc_node(caller, ret,
84318 size, size + align, gfp, node);
84319 } else {
84320 unsigned int order = get_order(size);
84321+ struct page *page;
84322
84323 if (likely(order))
84324 gfp |= __GFP_COMP;
84325- ret = slob_new_pages(gfp, order, node);
84326+ page = slob_new_pages(gfp, order, node);
84327+ if (page) {
84328+ ret = page_address(page);
84329+ page->private = size;
84330+ }
84331
84332 trace_kmalloc_node(caller, ret,
84333 size, PAGE_SIZE << order, gfp, node);
84334 }
84335
84336- kmemleak_alloc(ret, size, 1, gfp);
84337+ return ret;
84338+}
84339+
84340+static __always_inline void *
84341+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
84342+{
84343+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
84344+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
84345+
84346+ if (!ZERO_OR_NULL_PTR(ret))
84347+ kmemleak_alloc(ret, size, 1, gfp);
84348 return ret;
84349 }
84350
84351@@ -493,34 +512,112 @@ void kfree(const void *block)
84352 return;
84353 kmemleak_free(block);
84354
84355+ VM_BUG_ON(!virt_addr_valid(block));
84356 sp = virt_to_page(block);
84357- if (PageSlab(sp)) {
84358+ VM_BUG_ON(!PageSlab(sp));
84359+ if (!sp->private) {
84360 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
84361- unsigned int *m = (unsigned int *)(block - align);
84362- slob_free(m, *m + align);
84363- } else
84364+ slob_t *m = (slob_t *)(block - align);
84365+ slob_free(m, m[0].units + align);
84366+ } else {
84367+ __ClearPageSlab(sp);
84368+ reset_page_mapcount(sp);
84369+ sp->private = 0;
84370 __free_pages(sp, compound_order(sp));
84371+ }
84372 }
84373 EXPORT_SYMBOL(kfree);
84374
84375+bool is_usercopy_object(const void *ptr)
84376+{
84377+ if (!slab_is_available())
84378+ return false;
84379+
84380+ // PAX: TODO
84381+
84382+ return false;
84383+}
84384+
84385+#ifdef CONFIG_PAX_USERCOPY
84386+const char *check_heap_object(const void *ptr, unsigned long n)
84387+{
84388+ struct page *page;
84389+ const slob_t *free;
84390+ const void *base;
84391+ unsigned long flags;
84392+
84393+ if (ZERO_OR_NULL_PTR(ptr))
84394+ return "<null>";
84395+
84396+ if (!virt_addr_valid(ptr))
84397+ return NULL;
84398+
84399+ page = virt_to_head_page(ptr);
84400+ if (!PageSlab(page))
84401+ return NULL;
84402+
84403+ if (page->private) {
84404+ base = page;
84405+ if (base <= ptr && n <= page->private - (ptr - base))
84406+ return NULL;
84407+ return "<slob>";
84408+ }
84409+
84410+ /* some tricky double walking to find the chunk */
84411+ spin_lock_irqsave(&slob_lock, flags);
84412+ base = (void *)((unsigned long)ptr & PAGE_MASK);
84413+ free = page->freelist;
84414+
84415+ while (!slob_last(free) && (void *)free <= ptr) {
84416+ base = free + slob_units(free);
84417+ free = slob_next(free);
84418+ }
84419+
84420+ while (base < (void *)free) {
84421+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
84422+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
84423+ int offset;
84424+
84425+ if (ptr < base + align)
84426+ break;
84427+
84428+ offset = ptr - base - align;
84429+ if (offset >= m) {
84430+ base += size;
84431+ continue;
84432+ }
84433+
84434+ if (n > m - offset)
84435+ break;
84436+
84437+ spin_unlock_irqrestore(&slob_lock, flags);
84438+ return NULL;
84439+ }
84440+
84441+ spin_unlock_irqrestore(&slob_lock, flags);
84442+ return "<slob>";
84443+}
84444+#endif
84445+
84446 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
84447 size_t ksize(const void *block)
84448 {
84449 struct page *sp;
84450 int align;
84451- unsigned int *m;
84452+ slob_t *m;
84453
84454 BUG_ON(!block);
84455 if (unlikely(block == ZERO_SIZE_PTR))
84456 return 0;
84457
84458 sp = virt_to_page(block);
84459- if (unlikely(!PageSlab(sp)))
84460- return PAGE_SIZE << compound_order(sp);
84461+ VM_BUG_ON(!PageSlab(sp));
84462+ if (sp->private)
84463+ return sp->private;
84464
84465 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
84466- m = (unsigned int *)(block - align);
84467- return SLOB_UNITS(*m) * SLOB_UNIT;
84468+ m = (slob_t *)(block - align);
84469+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
84470 }
84471 EXPORT_SYMBOL(ksize);
84472
84473@@ -536,23 +633,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
84474
84475 void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
84476 {
84477- void *b;
84478+ void *b = NULL;
84479
84480 flags &= gfp_allowed_mask;
84481
84482 lockdep_trace_alloc(flags);
84483
84484+#ifdef CONFIG_PAX_USERCOPY_SLABS
84485+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
84486+#else
84487 if (c->size < PAGE_SIZE) {
84488 b = slob_alloc(c->size, flags, c->align, node);
84489 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
84490 SLOB_UNITS(c->size) * SLOB_UNIT,
84491 flags, node);
84492 } else {
84493- b = slob_new_pages(flags, get_order(c->size), node);
84494+ struct page *sp;
84495+
84496+ sp = slob_new_pages(flags, get_order(c->size), node);
84497+ if (sp) {
84498+ b = page_address(sp);
84499+ sp->private = c->size;
84500+ }
84501 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
84502 PAGE_SIZE << get_order(c->size),
84503 flags, node);
84504 }
84505+#endif
84506
84507 if (c->ctor)
84508 c->ctor(b);
84509@@ -564,10 +671,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
84510
84511 static void __kmem_cache_free(void *b, int size)
84512 {
84513- if (size < PAGE_SIZE)
84514+ struct page *sp;
84515+
84516+ sp = virt_to_page(b);
84517+ BUG_ON(!PageSlab(sp));
84518+ if (!sp->private)
84519 slob_free(b, size);
84520 else
84521- slob_free_pages(b, get_order(size));
84522+ slob_free_pages(sp, get_order(size));
84523 }
84524
84525 static void kmem_rcu_free(struct rcu_head *head)
84526@@ -580,17 +691,31 @@ static void kmem_rcu_free(struct rcu_head *head)
84527
84528 void kmem_cache_free(struct kmem_cache *c, void *b)
84529 {
84530+ int size = c->size;
84531+
84532+#ifdef CONFIG_PAX_USERCOPY_SLABS
84533+ if (size + c->align < PAGE_SIZE) {
84534+ size += c->align;
84535+ b -= c->align;
84536+ }
84537+#endif
84538+
84539 kmemleak_free_recursive(b, c->flags);
84540 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
84541 struct slob_rcu *slob_rcu;
84542- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
84543- slob_rcu->size = c->size;
84544+ slob_rcu = b + (size - sizeof(struct slob_rcu));
84545+ slob_rcu->size = size;
84546 call_rcu(&slob_rcu->head, kmem_rcu_free);
84547 } else {
84548- __kmem_cache_free(b, c->size);
84549+ __kmem_cache_free(b, size);
84550 }
84551
84552+#ifdef CONFIG_PAX_USERCOPY_SLABS
84553+ trace_kfree(_RET_IP_, b);
84554+#else
84555 trace_kmem_cache_free(_RET_IP_, b);
84556+#endif
84557+
84558 }
84559 EXPORT_SYMBOL(kmem_cache_free);
84560
84561diff --git a/mm/slub.c b/mm/slub.c
84562index ba2ca53..991c4f7 100644
84563--- a/mm/slub.c
84564+++ b/mm/slub.c
84565@@ -197,7 +197,7 @@ struct track {
84566
84567 enum track_item { TRACK_ALLOC, TRACK_FREE };
84568
84569-#ifdef CONFIG_SYSFS
84570+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
84571 static int sysfs_slab_add(struct kmem_cache *);
84572 static int sysfs_slab_alias(struct kmem_cache *, const char *);
84573 static void sysfs_slab_remove(struct kmem_cache *);
84574@@ -518,7 +518,7 @@ static void print_track(const char *s, struct track *t)
84575 if (!t->addr)
84576 return;
84577
84578- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
84579+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
84580 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
84581 #ifdef CONFIG_STACKTRACE
84582 {
84583@@ -2653,7 +2653,7 @@ static int slub_min_objects;
84584 * Merge control. If this is set then no merging of slab caches will occur.
84585 * (Could be removed. This was introduced to pacify the merge skeptics.)
84586 */
84587-static int slub_nomerge;
84588+static int slub_nomerge = 1;
84589
84590 /*
84591 * Calculate the order of allocation given an slab object size.
84592@@ -3181,6 +3181,10 @@ EXPORT_SYMBOL(kmalloc_caches);
84593 static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
84594 #endif
84595
84596+#ifdef CONFIG_PAX_USERCOPY_SLABS
84597+static struct kmem_cache *kmalloc_usercopy_caches[SLUB_PAGE_SHIFT];
84598+#endif
84599+
84600 static int __init setup_slub_min_order(char *str)
84601 {
84602 get_option(&str, &slub_min_order);
84603@@ -3272,6 +3276,13 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
84604 return kmalloc_dma_caches[index];
84605
84606 #endif
84607+
84608+#ifdef CONFIG_PAX_USERCOPY_SLABS
84609+ if (flags & SLAB_USERCOPY)
84610+ return kmalloc_usercopy_caches[index];
84611+
84612+#endif
84613+
84614 return kmalloc_caches[index];
84615 }
84616
84617@@ -3340,6 +3351,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
84618 EXPORT_SYMBOL(__kmalloc_node);
84619 #endif
84620
84621+bool is_usercopy_object(const void *ptr)
84622+{
84623+ struct page *page;
84624+ struct kmem_cache *s;
84625+
84626+ if (ZERO_OR_NULL_PTR(ptr))
84627+ return false;
84628+
84629+ if (!slab_is_available())
84630+ return false;
84631+
84632+ if (!virt_addr_valid(ptr))
84633+ return false;
84634+
84635+ page = virt_to_head_page(ptr);
84636+
84637+ if (!PageSlab(page))
84638+ return false;
84639+
84640+ s = page->slab_cache;
84641+ return s->flags & SLAB_USERCOPY;
84642+}
84643+
84644+#ifdef CONFIG_PAX_USERCOPY
84645+const char *check_heap_object(const void *ptr, unsigned long n)
84646+{
84647+ struct page *page;
84648+ struct kmem_cache *s;
84649+ unsigned long offset;
84650+
84651+ if (ZERO_OR_NULL_PTR(ptr))
84652+ return "<null>";
84653+
84654+ if (!virt_addr_valid(ptr))
84655+ return NULL;
84656+
84657+ page = virt_to_head_page(ptr);
84658+
84659+ if (!PageSlab(page))
84660+ return NULL;
84661+
84662+ s = page->slab_cache;
84663+ if (!(s->flags & SLAB_USERCOPY))
84664+ return s->name;
84665+
84666+ offset = (ptr - page_address(page)) % s->size;
84667+ if (offset <= s->object_size && n <= s->object_size - offset)
84668+ return NULL;
84669+
84670+ return s->name;
84671+}
84672+#endif
84673+
84674 size_t ksize(const void *object)
84675 {
84676 struct page *page;
84677@@ -3404,6 +3468,7 @@ void kfree(const void *x)
84678 if (unlikely(ZERO_OR_NULL_PTR(x)))
84679 return;
84680
84681+ VM_BUG_ON(!virt_addr_valid(x));
84682 page = virt_to_head_page(x);
84683 if (unlikely(!PageSlab(page))) {
84684 BUG_ON(!PageCompound(page));
84685@@ -3712,17 +3777,17 @@ void __init kmem_cache_init(void)
84686
84687 /* Caches that are not of the two-to-the-power-of size */
84688 if (KMALLOC_MIN_SIZE <= 32) {
84689- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
84690+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
84691 caches++;
84692 }
84693
84694 if (KMALLOC_MIN_SIZE <= 64) {
84695- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
84696+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
84697 caches++;
84698 }
84699
84700 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
84701- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
84702+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
84703 caches++;
84704 }
84705
84706@@ -3764,6 +3829,22 @@ void __init kmem_cache_init(void)
84707 }
84708 }
84709 #endif
84710+
84711+#ifdef CONFIG_PAX_USERCOPY_SLABS
84712+ for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
84713+ struct kmem_cache *s = kmalloc_caches[i];
84714+
84715+ if (s && s->size) {
84716+ char *name = kasprintf(GFP_NOWAIT,
84717+ "usercopy-kmalloc-%d", s->object_size);
84718+
84719+ BUG_ON(!name);
84720+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(name,
84721+ s->object_size, SLAB_USERCOPY);
84722+ }
84723+ }
84724+#endif
84725+
84726 printk(KERN_INFO
84727 "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
84728 " CPUs=%d, Nodes=%d\n",
84729@@ -3790,7 +3871,7 @@ static int slab_unmergeable(struct kmem_cache *s)
84730 /*
84731 * We may have set a slab to be unmergeable during bootstrap.
84732 */
84733- if (s->refcount < 0)
84734+ if (atomic_read(&s->refcount) < 0)
84735 return 1;
84736
84737 return 0;
84738@@ -3848,7 +3929,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
84739
84740 s = find_mergeable(memcg, size, align, flags, name, ctor);
84741 if (s) {
84742- s->refcount++;
84743+ atomic_inc(&s->refcount);
84744 /*
84745 * Adjust the object sizes so that we clear
84746 * the complete object on kzalloc.
84747@@ -3857,7 +3938,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
84748 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
84749
84750 if (sysfs_slab_alias(s, name)) {
84751- s->refcount--;
84752+ atomic_dec(&s->refcount);
84753 s = NULL;
84754 }
84755 }
84756@@ -3919,7 +4000,7 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
84757 return NOTIFY_OK;
84758 }
84759
84760-static struct notifier_block __cpuinitdata slab_notifier = {
84761+static struct notifier_block slab_notifier = {
84762 .notifier_call = slab_cpuup_callback
84763 };
84764
84765@@ -3977,7 +4058,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
84766 }
84767 #endif
84768
84769-#ifdef CONFIG_SYSFS
84770+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
84771 static int count_inuse(struct page *page)
84772 {
84773 return page->inuse;
84774@@ -4364,12 +4445,12 @@ static void resiliency_test(void)
84775 validate_slab_cache(kmalloc_caches[9]);
84776 }
84777 #else
84778-#ifdef CONFIG_SYSFS
84779+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
84780 static void resiliency_test(void) {};
84781 #endif
84782 #endif
84783
84784-#ifdef CONFIG_SYSFS
84785+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
84786 enum slab_stat_type {
84787 SL_ALL, /* All slabs */
84788 SL_PARTIAL, /* Only partially allocated slabs */
84789@@ -4613,7 +4694,7 @@ SLAB_ATTR_RO(ctor);
84790
84791 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
84792 {
84793- return sprintf(buf, "%d\n", s->refcount - 1);
84794+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
84795 }
84796 SLAB_ATTR_RO(aliases);
84797
84798@@ -5266,6 +5347,7 @@ static char *create_unique_id(struct kmem_cache *s)
84799 return name;
84800 }
84801
84802+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
84803 static int sysfs_slab_add(struct kmem_cache *s)
84804 {
84805 int err;
84806@@ -5323,6 +5405,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
84807 kobject_del(&s->kobj);
84808 kobject_put(&s->kobj);
84809 }
84810+#endif
84811
84812 /*
84813 * Need to buffer aliases during bootup until sysfs becomes
84814@@ -5336,6 +5419,7 @@ struct saved_alias {
84815
84816 static struct saved_alias *alias_list;
84817
84818+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
84819 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
84820 {
84821 struct saved_alias *al;
84822@@ -5358,6 +5442,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
84823 alias_list = al;
84824 return 0;
84825 }
84826+#endif
84827
84828 static int __init slab_sysfs_init(void)
84829 {
84830diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
84831index 1b7e22a..3fcd4f3 100644
84832--- a/mm/sparse-vmemmap.c
84833+++ b/mm/sparse-vmemmap.c
84834@@ -128,7 +128,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
84835 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
84836 if (!p)
84837 return NULL;
84838- pud_populate(&init_mm, pud, p);
84839+ pud_populate_kernel(&init_mm, pud, p);
84840 }
84841 return pud;
84842 }
84843@@ -140,7 +140,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
84844 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
84845 if (!p)
84846 return NULL;
84847- pgd_populate(&init_mm, pgd, p);
84848+ pgd_populate_kernel(&init_mm, pgd, p);
84849 }
84850 return pgd;
84851 }
84852diff --git a/mm/sparse.c b/mm/sparse.c
84853index 6b5fb76..db0c190 100644
84854--- a/mm/sparse.c
84855+++ b/mm/sparse.c
84856@@ -782,7 +782,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
84857
84858 for (i = 0; i < PAGES_PER_SECTION; i++) {
84859 if (PageHWPoison(&memmap[i])) {
84860- atomic_long_sub(1, &mce_bad_pages);
84861+ atomic_long_sub_unchecked(1, &mce_bad_pages);
84862 ClearPageHWPoison(&memmap[i]);
84863 }
84864 }
84865diff --git a/mm/swap.c b/mm/swap.c
84866index 6310dc2..3662b3f 100644
84867--- a/mm/swap.c
84868+++ b/mm/swap.c
84869@@ -30,6 +30,7 @@
84870 #include <linux/backing-dev.h>
84871 #include <linux/memcontrol.h>
84872 #include <linux/gfp.h>
84873+#include <linux/hugetlb.h>
84874
84875 #include "internal.h"
84876
84877@@ -72,6 +73,8 @@ static void __put_compound_page(struct page *page)
84878
84879 __page_cache_release(page);
84880 dtor = get_compound_page_dtor(page);
84881+ if (!PageHuge(page))
84882+ BUG_ON(dtor != free_compound_page);
84883 (*dtor)(page);
84884 }
84885
84886diff --git a/mm/swapfile.c b/mm/swapfile.c
84887index e97a0e5..b50e796 100644
84888--- a/mm/swapfile.c
84889+++ b/mm/swapfile.c
84890@@ -64,7 +64,7 @@ static DEFINE_MUTEX(swapon_mutex);
84891
84892 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
84893 /* Activity counter to indicate that a swapon or swapoff has occurred */
84894-static atomic_t proc_poll_event = ATOMIC_INIT(0);
84895+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
84896
84897 static inline unsigned char swap_count(unsigned char ent)
84898 {
84899@@ -1608,7 +1608,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
84900 }
84901 filp_close(swap_file, NULL);
84902 err = 0;
84903- atomic_inc(&proc_poll_event);
84904+ atomic_inc_unchecked(&proc_poll_event);
84905 wake_up_interruptible(&proc_poll_wait);
84906
84907 out_dput:
84908@@ -1625,8 +1625,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
84909
84910 poll_wait(file, &proc_poll_wait, wait);
84911
84912- if (seq->poll_event != atomic_read(&proc_poll_event)) {
84913- seq->poll_event = atomic_read(&proc_poll_event);
84914+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
84915+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
84916 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
84917 }
84918
84919@@ -1724,7 +1724,7 @@ static int swaps_open(struct inode *inode, struct file *file)
84920 return ret;
84921
84922 seq = file->private_data;
84923- seq->poll_event = atomic_read(&proc_poll_event);
84924+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
84925 return 0;
84926 }
84927
84928@@ -2066,7 +2066,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
84929 (frontswap_map) ? "FS" : "");
84930
84931 mutex_unlock(&swapon_mutex);
84932- atomic_inc(&proc_poll_event);
84933+ atomic_inc_unchecked(&proc_poll_event);
84934 wake_up_interruptible(&proc_poll_wait);
84935
84936 if (S_ISREG(inode->i_mode))
84937diff --git a/mm/util.c b/mm/util.c
84938index c55e26b..3f913a9 100644
84939--- a/mm/util.c
84940+++ b/mm/util.c
84941@@ -292,6 +292,12 @@ done:
84942 void arch_pick_mmap_layout(struct mm_struct *mm)
84943 {
84944 mm->mmap_base = TASK_UNMAPPED_BASE;
84945+
84946+#ifdef CONFIG_PAX_RANDMMAP
84947+ if (mm->pax_flags & MF_PAX_RANDMMAP)
84948+ mm->mmap_base += mm->delta_mmap;
84949+#endif
84950+
84951 mm->get_unmapped_area = arch_get_unmapped_area;
84952 mm->unmap_area = arch_unmap_area;
84953 }
84954diff --git a/mm/vmalloc.c b/mm/vmalloc.c
84955index 5123a16..f234a48 100644
84956--- a/mm/vmalloc.c
84957+++ b/mm/vmalloc.c
84958@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
84959
84960 pte = pte_offset_kernel(pmd, addr);
84961 do {
84962- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
84963- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
84964+
84965+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
84966+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
84967+ BUG_ON(!pte_exec(*pte));
84968+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
84969+ continue;
84970+ }
84971+#endif
84972+
84973+ {
84974+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
84975+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
84976+ }
84977 } while (pte++, addr += PAGE_SIZE, addr != end);
84978 }
84979
84980@@ -100,16 +111,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
84981 pte = pte_alloc_kernel(pmd, addr);
84982 if (!pte)
84983 return -ENOMEM;
84984+
84985+ pax_open_kernel();
84986 do {
84987 struct page *page = pages[*nr];
84988
84989- if (WARN_ON(!pte_none(*pte)))
84990+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
84991+ if (pgprot_val(prot) & _PAGE_NX)
84992+#endif
84993+
84994+ if (!pte_none(*pte)) {
84995+ pax_close_kernel();
84996+ WARN_ON(1);
84997 return -EBUSY;
84998- if (WARN_ON(!page))
84999+ }
85000+ if (!page) {
85001+ pax_close_kernel();
85002+ WARN_ON(1);
85003 return -ENOMEM;
85004+ }
85005 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
85006 (*nr)++;
85007 } while (pte++, addr += PAGE_SIZE, addr != end);
85008+ pax_close_kernel();
85009 return 0;
85010 }
85011
85012@@ -119,7 +143,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
85013 pmd_t *pmd;
85014 unsigned long next;
85015
85016- pmd = pmd_alloc(&init_mm, pud, addr);
85017+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
85018 if (!pmd)
85019 return -ENOMEM;
85020 do {
85021@@ -136,7 +160,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
85022 pud_t *pud;
85023 unsigned long next;
85024
85025- pud = pud_alloc(&init_mm, pgd, addr);
85026+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
85027 if (!pud)
85028 return -ENOMEM;
85029 do {
85030@@ -191,11 +215,20 @@ int is_vmalloc_or_module_addr(const void *x)
85031 * and fall back on vmalloc() if that fails. Others
85032 * just put it in the vmalloc space.
85033 */
85034-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
85035+#ifdef CONFIG_MODULES
85036+#ifdef MODULES_VADDR
85037 unsigned long addr = (unsigned long)x;
85038 if (addr >= MODULES_VADDR && addr < MODULES_END)
85039 return 1;
85040 #endif
85041+
85042+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
85043+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
85044+ return 1;
85045+#endif
85046+
85047+#endif
85048+
85049 return is_vmalloc_addr(x);
85050 }
85051
85052@@ -216,8 +249,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
85053
85054 if (!pgd_none(*pgd)) {
85055 pud_t *pud = pud_offset(pgd, addr);
85056+#ifdef CONFIG_X86
85057+ if (!pud_large(*pud))
85058+#endif
85059 if (!pud_none(*pud)) {
85060 pmd_t *pmd = pmd_offset(pud, addr);
85061+#ifdef CONFIG_X86
85062+ if (!pmd_large(*pmd))
85063+#endif
85064 if (!pmd_none(*pmd)) {
85065 pte_t *ptep, pte;
85066
85067@@ -329,7 +368,7 @@ static void purge_vmap_area_lazy(void);
85068 * Allocate a region of KVA of the specified size and alignment, within the
85069 * vstart and vend.
85070 */
85071-static struct vmap_area *alloc_vmap_area(unsigned long size,
85072+static __size_overflow(1) struct vmap_area *alloc_vmap_area(unsigned long size,
85073 unsigned long align,
85074 unsigned long vstart, unsigned long vend,
85075 int node, gfp_t gfp_mask)
85076@@ -1328,6 +1367,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
85077 struct vm_struct *area;
85078
85079 BUG_ON(in_interrupt());
85080+
85081+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
85082+ if (flags & VM_KERNEXEC) {
85083+ if (start != VMALLOC_START || end != VMALLOC_END)
85084+ return NULL;
85085+ start = (unsigned long)MODULES_EXEC_VADDR;
85086+ end = (unsigned long)MODULES_EXEC_END;
85087+ }
85088+#endif
85089+
85090 if (flags & VM_IOREMAP) {
85091 int bit = fls(size);
85092
85093@@ -1568,6 +1617,11 @@ void *vmap(struct page **pages, unsigned int count,
85094 if (count > totalram_pages)
85095 return NULL;
85096
85097+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
85098+ if (!(pgprot_val(prot) & _PAGE_NX))
85099+ flags |= VM_KERNEXEC;
85100+#endif
85101+
85102 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
85103 __builtin_return_address(0));
85104 if (!area)
85105@@ -1669,6 +1723,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
85106 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
85107 goto fail;
85108
85109+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
85110+ if (!(pgprot_val(prot) & _PAGE_NX))
85111+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
85112+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
85113+ else
85114+#endif
85115+
85116 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
85117 start, end, node, gfp_mask, caller);
85118 if (!area)
85119@@ -1842,10 +1903,9 @@ EXPORT_SYMBOL(vzalloc_node);
85120 * For tight control over page level allocator and protection flags
85121 * use __vmalloc() instead.
85122 */
85123-
85124 void *vmalloc_exec(unsigned long size)
85125 {
85126- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
85127+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
85128 -1, __builtin_return_address(0));
85129 }
85130
85131@@ -2136,6 +2196,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
85132 unsigned long uaddr = vma->vm_start;
85133 unsigned long usize = vma->vm_end - vma->vm_start;
85134
85135+ BUG_ON(vma->vm_mirror);
85136+
85137 if ((PAGE_SIZE-1) & (unsigned long)addr)
85138 return -EINVAL;
85139
85140@@ -2575,7 +2637,11 @@ static int s_show(struct seq_file *m, void *p)
85141 v->addr, v->addr + v->size, v->size);
85142
85143 if (v->caller)
85144+#ifdef CONFIG_GRKERNSEC_HIDESYM
85145+ seq_printf(m, " %pK", v->caller);
85146+#else
85147 seq_printf(m, " %pS", v->caller);
85148+#endif
85149
85150 if (v->nr_pages)
85151 seq_printf(m, " pages=%d", v->nr_pages);
85152diff --git a/mm/vmstat.c b/mm/vmstat.c
85153index 9800306..76b4b27 100644
85154--- a/mm/vmstat.c
85155+++ b/mm/vmstat.c
85156@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
85157 *
85158 * vm_stat contains the global counters
85159 */
85160-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
85161+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
85162 EXPORT_SYMBOL(vm_stat);
85163
85164 #ifdef CONFIG_SMP
85165@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
85166 v = p->vm_stat_diff[i];
85167 p->vm_stat_diff[i] = 0;
85168 local_irq_restore(flags);
85169- atomic_long_add(v, &zone->vm_stat[i]);
85170+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
85171 global_diff[i] += v;
85172 #ifdef CONFIG_NUMA
85173 /* 3 seconds idle till flush */
85174@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
85175
85176 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
85177 if (global_diff[i])
85178- atomic_long_add(global_diff[i], &vm_stat[i]);
85179+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
85180 }
85181
85182 void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
85183@@ -503,8 +503,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
85184 if (pset->vm_stat_diff[i]) {
85185 int v = pset->vm_stat_diff[i];
85186 pset->vm_stat_diff[i] = 0;
85187- atomic_long_add(v, &zone->vm_stat[i]);
85188- atomic_long_add(v, &vm_stat[i]);
85189+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
85190+ atomic_long_add_unchecked(v, &vm_stat[i]);
85191 }
85192 }
85193 #endif
85194@@ -1223,7 +1223,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
85195 return NOTIFY_OK;
85196 }
85197
85198-static struct notifier_block __cpuinitdata vmstat_notifier =
85199+static struct notifier_block vmstat_notifier =
85200 { &vmstat_cpuup_callback, NULL, 0 };
85201 #endif
85202
85203@@ -1238,10 +1238,20 @@ static int __init setup_vmstat(void)
85204 start_cpu_timer(cpu);
85205 #endif
85206 #ifdef CONFIG_PROC_FS
85207- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
85208- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
85209- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
85210- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
85211+ {
85212+ mode_t gr_mode = S_IRUGO;
85213+#ifdef CONFIG_GRKERNSEC_PROC_ADD
85214+ gr_mode = S_IRUSR;
85215+#endif
85216+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
85217+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
85218+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
85219+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
85220+#else
85221+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
85222+#endif
85223+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
85224+ }
85225 #endif
85226 return 0;
85227 }
85228diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
85229index acc74ad..be02639 100644
85230--- a/net/8021q/vlan.c
85231+++ b/net/8021q/vlan.c
85232@@ -108,6 +108,13 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
85233 if (vlan_id)
85234 vlan_vid_del(real_dev, vlan_id);
85235
85236+ /* Take it out of our own structures, but be sure to interlock with
85237+ * HW accelerating devices or SW vlan input packet processing if
85238+ * VLAN is not 0 (leave it there for 802.1p).
85239+ */
85240+ if (vlan_id)
85241+ vlan_vid_del(real_dev, vlan_id);
85242+
85243 /* Get rid of the vlan's reference to real_dev */
85244 dev_put(real_dev);
85245 }
85246@@ -485,7 +492,7 @@ out:
85247 return NOTIFY_DONE;
85248 }
85249
85250-static struct notifier_block vlan_notifier_block __read_mostly = {
85251+static struct notifier_block vlan_notifier_block = {
85252 .notifier_call = vlan_device_event,
85253 };
85254
85255@@ -560,8 +567,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
85256 err = -EPERM;
85257 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
85258 break;
85259- if ((args.u.name_type >= 0) &&
85260- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
85261+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
85262 struct vlan_net *vn;
85263
85264 vn = net_generic(net, vlan_net_id);
85265diff --git a/net/9p/mod.c b/net/9p/mod.c
85266index 6ab36ae..6f1841b 100644
85267--- a/net/9p/mod.c
85268+++ b/net/9p/mod.c
85269@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
85270 void v9fs_register_trans(struct p9_trans_module *m)
85271 {
85272 spin_lock(&v9fs_trans_lock);
85273- list_add_tail(&m->list, &v9fs_trans_list);
85274+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
85275 spin_unlock(&v9fs_trans_lock);
85276 }
85277 EXPORT_SYMBOL(v9fs_register_trans);
85278@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
85279 void v9fs_unregister_trans(struct p9_trans_module *m)
85280 {
85281 spin_lock(&v9fs_trans_lock);
85282- list_del_init(&m->list);
85283+ pax_list_del_init((struct list_head *)&m->list);
85284 spin_unlock(&v9fs_trans_lock);
85285 }
85286 EXPORT_SYMBOL(v9fs_unregister_trans);
85287diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
85288index 02efb25..41541a9 100644
85289--- a/net/9p/trans_fd.c
85290+++ b/net/9p/trans_fd.c
85291@@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
85292 oldfs = get_fs();
85293 set_fs(get_ds());
85294 /* The cast to a user pointer is valid due to the set_fs() */
85295- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
85296+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
85297 set_fs(oldfs);
85298
85299 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
85300diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
85301index 876fbe8..8bbea9f 100644
85302--- a/net/atm/atm_misc.c
85303+++ b/net/atm/atm_misc.c
85304@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
85305 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
85306 return 1;
85307 atm_return(vcc, truesize);
85308- atomic_inc(&vcc->stats->rx_drop);
85309+ atomic_inc_unchecked(&vcc->stats->rx_drop);
85310 return 0;
85311 }
85312 EXPORT_SYMBOL(atm_charge);
85313@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
85314 }
85315 }
85316 atm_return(vcc, guess);
85317- atomic_inc(&vcc->stats->rx_drop);
85318+ atomic_inc_unchecked(&vcc->stats->rx_drop);
85319 return NULL;
85320 }
85321 EXPORT_SYMBOL(atm_alloc_charge);
85322@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
85323
85324 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
85325 {
85326-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
85327+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
85328 __SONET_ITEMS
85329 #undef __HANDLE_ITEM
85330 }
85331@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
85332
85333 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
85334 {
85335-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
85336+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
85337 __SONET_ITEMS
85338 #undef __HANDLE_ITEM
85339 }
85340diff --git a/net/atm/lec.h b/net/atm/lec.h
85341index a86aff9..3a0d6f6 100644
85342--- a/net/atm/lec.h
85343+++ b/net/atm/lec.h
85344@@ -48,7 +48,7 @@ struct lane2_ops {
85345 const u8 *tlvs, u32 sizeoftlvs);
85346 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
85347 const u8 *tlvs, u32 sizeoftlvs);
85348-};
85349+} __no_const;
85350
85351 /*
85352 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
85353diff --git a/net/atm/proc.c b/net/atm/proc.c
85354index 0d020de..011c7bb 100644
85355--- a/net/atm/proc.c
85356+++ b/net/atm/proc.c
85357@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
85358 const struct k_atm_aal_stats *stats)
85359 {
85360 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
85361- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
85362- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
85363- atomic_read(&stats->rx_drop));
85364+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
85365+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
85366+ atomic_read_unchecked(&stats->rx_drop));
85367 }
85368
85369 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
85370diff --git a/net/atm/resources.c b/net/atm/resources.c
85371index 0447d5d..3cf4728 100644
85372--- a/net/atm/resources.c
85373+++ b/net/atm/resources.c
85374@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
85375 static void copy_aal_stats(struct k_atm_aal_stats *from,
85376 struct atm_aal_stats *to)
85377 {
85378-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
85379+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
85380 __AAL_STAT_ITEMS
85381 #undef __HANDLE_ITEM
85382 }
85383@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
85384 static void subtract_aal_stats(struct k_atm_aal_stats *from,
85385 struct atm_aal_stats *to)
85386 {
85387-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
85388+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
85389 __AAL_STAT_ITEMS
85390 #undef __HANDLE_ITEM
85391 }
85392diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
85393index d5744b7..506bae3 100644
85394--- a/net/ax25/sysctl_net_ax25.c
85395+++ b/net/ax25/sysctl_net_ax25.c
85396@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
85397 {
85398 char path[sizeof("net/ax25/") + IFNAMSIZ];
85399 int k;
85400- struct ctl_table *table;
85401+ ctl_table_no_const *table;
85402
85403 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
85404 if (!table)
85405diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
85406index 1ee94d0..14beea2 100644
85407--- a/net/batman-adv/bat_iv_ogm.c
85408+++ b/net/batman-adv/bat_iv_ogm.c
85409@@ -63,7 +63,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
85410
85411 /* randomize initial seqno to avoid collision */
85412 get_random_bytes(&random_seqno, sizeof(random_seqno));
85413- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
85414+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
85415
85416 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
85417 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
85418@@ -615,9 +615,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
85419 batadv_ogm_packet = (struct batadv_ogm_packet *)(*ogm_buff);
85420
85421 /* change sequence number to network order */
85422- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
85423+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
85424 batadv_ogm_packet->seqno = htonl(seqno);
85425- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
85426+ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
85427
85428 batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn);
85429 batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc);
85430@@ -1022,7 +1022,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
85431 return;
85432
85433 /* could be changed by schedule_own_packet() */
85434- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
85435+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
85436
85437 if (batadv_ogm_packet->flags & BATADV_DIRECTLINK)
85438 has_directlink_flag = 1;
85439diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
85440index f1d37cd..4190879 100644
85441--- a/net/batman-adv/hard-interface.c
85442+++ b/net/batman-adv/hard-interface.c
85443@@ -370,7 +370,7 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
85444 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
85445 dev_add_pack(&hard_iface->batman_adv_ptype);
85446
85447- atomic_set(&hard_iface->frag_seqno, 1);
85448+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
85449 batadv_info(hard_iface->soft_iface, "Adding interface: %s\n",
85450 hard_iface->net_dev->name);
85451
85452@@ -493,7 +493,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
85453 /* This can't be called via a bat_priv callback because
85454 * we have no bat_priv yet.
85455 */
85456- atomic_set(&hard_iface->bat_iv.ogm_seqno, 1);
85457+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, 1);
85458 hard_iface->bat_iv.ogm_buff = NULL;
85459
85460 return hard_iface;
85461diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
85462index 6b548fd..fc32c8d 100644
85463--- a/net/batman-adv/soft-interface.c
85464+++ b/net/batman-adv/soft-interface.c
85465@@ -252,7 +252,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
85466 primary_if->net_dev->dev_addr, ETH_ALEN);
85467
85468 /* set broadcast sequence number */
85469- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
85470+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
85471 bcast_packet->seqno = htonl(seqno);
85472
85473 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
85474@@ -497,7 +497,7 @@ struct net_device *batadv_softif_create(const char *name)
85475 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
85476
85477 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
85478- atomic_set(&bat_priv->bcast_seqno, 1);
85479+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
85480 atomic_set(&bat_priv->tt.vn, 0);
85481 atomic_set(&bat_priv->tt.local_changes, 0);
85482 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
85483diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
85484index ae9ac9a..11e0fe7 100644
85485--- a/net/batman-adv/types.h
85486+++ b/net/batman-adv/types.h
85487@@ -48,7 +48,7 @@
85488 struct batadv_hard_iface_bat_iv {
85489 unsigned char *ogm_buff;
85490 int ogm_buff_len;
85491- atomic_t ogm_seqno;
85492+ atomic_unchecked_t ogm_seqno;
85493 };
85494
85495 struct batadv_hard_iface {
85496@@ -56,7 +56,7 @@ struct batadv_hard_iface {
85497 int16_t if_num;
85498 char if_status;
85499 struct net_device *net_dev;
85500- atomic_t frag_seqno;
85501+ atomic_unchecked_t frag_seqno;
85502 struct kobject *hardif_obj;
85503 atomic_t refcount;
85504 struct packet_type batman_adv_ptype;
85505@@ -284,7 +284,7 @@ struct batadv_priv {
85506 atomic_t orig_interval; /* uint */
85507 atomic_t hop_penalty; /* uint */
85508 atomic_t log_level; /* uint */
85509- atomic_t bcast_seqno;
85510+ atomic_unchecked_t bcast_seqno;
85511 atomic_t bcast_queue_left;
85512 atomic_t batman_queue_left;
85513 char num_ifaces;
85514diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
85515index 10aff49..ea8e021 100644
85516--- a/net/batman-adv/unicast.c
85517+++ b/net/batman-adv/unicast.c
85518@@ -272,7 +272,7 @@ int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
85519 frag1->flags = BATADV_UNI_FRAG_HEAD | large_tail;
85520 frag2->flags = large_tail;
85521
85522- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
85523+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
85524 frag1->seqno = htons(seqno - 1);
85525 frag2->seqno = htons(seqno);
85526
85527diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
85528index 07f0739..3c42e34 100644
85529--- a/net/bluetooth/hci_sock.c
85530+++ b/net/bluetooth/hci_sock.c
85531@@ -934,7 +934,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
85532 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
85533 }
85534
85535- len = min_t(unsigned int, len, sizeof(uf));
85536+ len = min((size_t)len, sizeof(uf));
85537 if (copy_from_user(&uf, optval, len)) {
85538 err = -EFAULT;
85539 break;
85540diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
85541index 22e6583..426e2f3 100644
85542--- a/net/bluetooth/l2cap_core.c
85543+++ b/net/bluetooth/l2cap_core.c
85544@@ -3400,8 +3400,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
85545 break;
85546
85547 case L2CAP_CONF_RFC:
85548- if (olen == sizeof(rfc))
85549- memcpy(&rfc, (void *)val, olen);
85550+ if (olen != sizeof(rfc))
85551+ break;
85552+
85553+ memcpy(&rfc, (void *)val, olen);
85554
85555 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
85556 rfc.mode != chan->mode)
85557diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
85558index 1bcfb84..dad9f98 100644
85559--- a/net/bluetooth/l2cap_sock.c
85560+++ b/net/bluetooth/l2cap_sock.c
85561@@ -479,7 +479,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
85562 struct sock *sk = sock->sk;
85563 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
85564 struct l2cap_options opts;
85565- int len, err = 0;
85566+ int err = 0;
85567+ size_t len = optlen;
85568 u32 opt;
85569
85570 BT_DBG("sk %p", sk);
85571@@ -501,7 +502,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
85572 opts.max_tx = chan->max_tx;
85573 opts.txwin_size = chan->tx_win;
85574
85575- len = min_t(unsigned int, sizeof(opts), optlen);
85576+ len = min(sizeof(opts), len);
85577 if (copy_from_user((char *) &opts, optval, len)) {
85578 err = -EFAULT;
85579 break;
85580@@ -581,7 +582,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
85581 struct bt_security sec;
85582 struct bt_power pwr;
85583 struct l2cap_conn *conn;
85584- int len, err = 0;
85585+ int err = 0;
85586+ size_t len = optlen;
85587 u32 opt;
85588
85589 BT_DBG("sk %p", sk);
85590@@ -604,7 +606,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
85591
85592 sec.level = BT_SECURITY_LOW;
85593
85594- len = min_t(unsigned int, sizeof(sec), optlen);
85595+ len = min(sizeof(sec), len);
85596 if (copy_from_user((char *) &sec, optval, len)) {
85597 err = -EFAULT;
85598 break;
85599@@ -701,7 +703,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
85600
85601 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
85602
85603- len = min_t(unsigned int, sizeof(pwr), optlen);
85604+ len = min(sizeof(pwr), len);
85605 if (copy_from_user((char *) &pwr, optval, len)) {
85606 err = -EFAULT;
85607 break;
85608diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
85609index ce3f665..2c7d08f 100644
85610--- a/net/bluetooth/rfcomm/sock.c
85611+++ b/net/bluetooth/rfcomm/sock.c
85612@@ -667,7 +667,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
85613 struct sock *sk = sock->sk;
85614 struct bt_security sec;
85615 int err = 0;
85616- size_t len;
85617+ size_t len = optlen;
85618 u32 opt;
85619
85620 BT_DBG("sk %p", sk);
85621@@ -689,7 +689,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
85622
85623 sec.level = BT_SECURITY_LOW;
85624
85625- len = min_t(unsigned int, sizeof(sec), optlen);
85626+ len = min(sizeof(sec), len);
85627 if (copy_from_user((char *) &sec, optval, len)) {
85628 err = -EFAULT;
85629 break;
85630diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
85631index bd6fd0f..6492cba 100644
85632--- a/net/bluetooth/rfcomm/tty.c
85633+++ b/net/bluetooth/rfcomm/tty.c
85634@@ -309,7 +309,7 @@ static void rfcomm_dev_del(struct rfcomm_dev *dev)
85635 BUG_ON(test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags));
85636
85637 spin_lock_irqsave(&dev->port.lock, flags);
85638- if (dev->port.count > 0) {
85639+ if (atomic_read(&dev->port.count) > 0) {
85640 spin_unlock_irqrestore(&dev->port.lock, flags);
85641 return;
85642 }
85643@@ -664,10 +664,10 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
85644 return -ENODEV;
85645
85646 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
85647- dev->channel, dev->port.count);
85648+ dev->channel, atomic_read(&dev->port.count));
85649
85650 spin_lock_irqsave(&dev->port.lock, flags);
85651- if (++dev->port.count > 1) {
85652+ if (atomic_inc_return(&dev->port.count) > 1) {
85653 spin_unlock_irqrestore(&dev->port.lock, flags);
85654 return 0;
85655 }
85656@@ -732,10 +732,10 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
85657 return;
85658
85659 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
85660- dev->port.count);
85661+ atomic_read(&dev->port.count));
85662
85663 spin_lock_irqsave(&dev->port.lock, flags);
85664- if (!--dev->port.count) {
85665+ if (!atomic_dec_return(&dev->port.count)) {
85666 spin_unlock_irqrestore(&dev->port.lock, flags);
85667 if (dev->tty_dev->parent)
85668 device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST);
85669diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
85670index d9576e6..85f4f4e 100644
85671--- a/net/bridge/br_fdb.c
85672+++ b/net/bridge/br_fdb.c
85673@@ -386,7 +386,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
85674 return 0;
85675 br_warn(br, "adding interface %s with same address "
85676 "as a received packet\n",
85677- source->dev->name);
85678+ source ? source->dev->name : br->dev->name);
85679 fdb_delete(br, fdb);
85680 }
85681
85682diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
85683index 5fe2ff3..121d696 100644
85684--- a/net/bridge/netfilter/ebtables.c
85685+++ b/net/bridge/netfilter/ebtables.c
85686@@ -1523,7 +1523,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
85687 tmp.valid_hooks = t->table->valid_hooks;
85688 }
85689 mutex_unlock(&ebt_mutex);
85690- if (copy_to_user(user, &tmp, *len) != 0){
85691+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
85692 BUGPRINT("c2u Didn't work\n");
85693 ret = -EFAULT;
85694 break;
85695@@ -2327,7 +2327,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
85696 goto out;
85697 tmp.valid_hooks = t->valid_hooks;
85698
85699- if (copy_to_user(user, &tmp, *len) != 0) {
85700+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
85701 ret = -EFAULT;
85702 break;
85703 }
85704@@ -2338,7 +2338,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
85705 tmp.entries_size = t->table->entries_size;
85706 tmp.valid_hooks = t->table->valid_hooks;
85707
85708- if (copy_to_user(user, &tmp, *len) != 0) {
85709+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
85710 ret = -EFAULT;
85711 break;
85712 }
85713diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
85714index a376ec1..1fbd6be 100644
85715--- a/net/caif/cfctrl.c
85716+++ b/net/caif/cfctrl.c
85717@@ -10,6 +10,7 @@
85718 #include <linux/spinlock.h>
85719 #include <linux/slab.h>
85720 #include <linux/pkt_sched.h>
85721+#include <linux/sched.h>
85722 #include <net/caif/caif_layer.h>
85723 #include <net/caif/cfpkt.h>
85724 #include <net/caif/cfctrl.h>
85725@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
85726 memset(&dev_info, 0, sizeof(dev_info));
85727 dev_info.id = 0xff;
85728 cfsrvl_init(&this->serv, 0, &dev_info, false);
85729- atomic_set(&this->req_seq_no, 1);
85730- atomic_set(&this->rsp_seq_no, 1);
85731+ atomic_set_unchecked(&this->req_seq_no, 1);
85732+ atomic_set_unchecked(&this->rsp_seq_no, 1);
85733 this->serv.layer.receive = cfctrl_recv;
85734 sprintf(this->serv.layer.name, "ctrl");
85735 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
85736@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
85737 struct cfctrl_request_info *req)
85738 {
85739 spin_lock_bh(&ctrl->info_list_lock);
85740- atomic_inc(&ctrl->req_seq_no);
85741- req->sequence_no = atomic_read(&ctrl->req_seq_no);
85742+ atomic_inc_unchecked(&ctrl->req_seq_no);
85743+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
85744 list_add_tail(&req->list, &ctrl->list);
85745 spin_unlock_bh(&ctrl->info_list_lock);
85746 }
85747@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
85748 if (p != first)
85749 pr_warn("Requests are not received in order\n");
85750
85751- atomic_set(&ctrl->rsp_seq_no,
85752+ atomic_set_unchecked(&ctrl->rsp_seq_no,
85753 p->sequence_no);
85754 list_del(&p->list);
85755 goto out;
85756diff --git a/net/can/af_can.c b/net/can/af_can.c
85757index ddac1ee..3ee0a78 100644
85758--- a/net/can/af_can.c
85759+++ b/net/can/af_can.c
85760@@ -872,7 +872,7 @@ static const struct net_proto_family can_family_ops = {
85761 };
85762
85763 /* notifier block for netdevice event */
85764-static struct notifier_block can_netdev_notifier __read_mostly = {
85765+static struct notifier_block can_netdev_notifier = {
85766 .notifier_call = can_notifier,
85767 };
85768
85769diff --git a/net/can/gw.c b/net/can/gw.c
85770index 574dda78e..3d2b3da 100644
85771--- a/net/can/gw.c
85772+++ b/net/can/gw.c
85773@@ -67,7 +67,6 @@ MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
85774 MODULE_ALIAS("can-gw");
85775
85776 static HLIST_HEAD(cgw_list);
85777-static struct notifier_block notifier;
85778
85779 static struct kmem_cache *cgw_cache __read_mostly;
85780
85781@@ -893,6 +892,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
85782 return err;
85783 }
85784
85785+static struct notifier_block notifier = {
85786+ .notifier_call = cgw_notifier
85787+};
85788+
85789 static __init int cgw_module_init(void)
85790 {
85791 printk(banner);
85792@@ -904,7 +907,6 @@ static __init int cgw_module_init(void)
85793 return -ENOMEM;
85794
85795 /* set notifier */
85796- notifier.notifier_call = cgw_notifier;
85797 register_netdevice_notifier(&notifier);
85798
85799 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
85800diff --git a/net/compat.c b/net/compat.c
85801index 79ae884..17c5c09 100644
85802--- a/net/compat.c
85803+++ b/net/compat.c
85804@@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
85805 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
85806 __get_user(kmsg->msg_flags, &umsg->msg_flags))
85807 return -EFAULT;
85808- kmsg->msg_name = compat_ptr(tmp1);
85809- kmsg->msg_iov = compat_ptr(tmp2);
85810- kmsg->msg_control = compat_ptr(tmp3);
85811+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
85812+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
85813+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
85814 return 0;
85815 }
85816
85817@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
85818
85819 if (kern_msg->msg_namelen) {
85820 if (mode == VERIFY_READ) {
85821- int err = move_addr_to_kernel(kern_msg->msg_name,
85822+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
85823 kern_msg->msg_namelen,
85824 kern_address);
85825 if (err < 0)
85826@@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
85827 kern_msg->msg_name = NULL;
85828
85829 tot_len = iov_from_user_compat_to_kern(kern_iov,
85830- (struct compat_iovec __user *)kern_msg->msg_iov,
85831+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
85832 kern_msg->msg_iovlen);
85833 if (tot_len >= 0)
85834 kern_msg->msg_iov = kern_iov;
85835@@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
85836
85837 #define CMSG_COMPAT_FIRSTHDR(msg) \
85838 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
85839- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
85840+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
85841 (struct compat_cmsghdr __user *)NULL)
85842
85843 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
85844 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
85845 (ucmlen) <= (unsigned long) \
85846 ((mhdr)->msg_controllen - \
85847- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
85848+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
85849
85850 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
85851 struct compat_cmsghdr __user *cmsg, int cmsg_len)
85852 {
85853 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
85854- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
85855+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
85856 msg->msg_controllen)
85857 return NULL;
85858 return (struct compat_cmsghdr __user *)ptr;
85859@@ -219,7 +219,7 @@ Efault:
85860
85861 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
85862 {
85863- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
85864+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
85865 struct compat_cmsghdr cmhdr;
85866 struct compat_timeval ctv;
85867 struct compat_timespec cts[3];
85868@@ -275,7 +275,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
85869
85870 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
85871 {
85872- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
85873+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
85874 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
85875 int fdnum = scm->fp->count;
85876 struct file **fp = scm->fp->fp;
85877@@ -363,7 +363,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
85878 return -EFAULT;
85879 old_fs = get_fs();
85880 set_fs(KERNEL_DS);
85881- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
85882+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
85883 set_fs(old_fs);
85884
85885 return err;
85886@@ -424,7 +424,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
85887 len = sizeof(ktime);
85888 old_fs = get_fs();
85889 set_fs(KERNEL_DS);
85890- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
85891+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
85892 set_fs(old_fs);
85893
85894 if (!err) {
85895@@ -567,7 +567,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
85896 case MCAST_JOIN_GROUP:
85897 case MCAST_LEAVE_GROUP:
85898 {
85899- struct compat_group_req __user *gr32 = (void *)optval;
85900+ struct compat_group_req __user *gr32 = (void __user *)optval;
85901 struct group_req __user *kgr =
85902 compat_alloc_user_space(sizeof(struct group_req));
85903 u32 interface;
85904@@ -588,7 +588,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
85905 case MCAST_BLOCK_SOURCE:
85906 case MCAST_UNBLOCK_SOURCE:
85907 {
85908- struct compat_group_source_req __user *gsr32 = (void *)optval;
85909+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
85910 struct group_source_req __user *kgsr = compat_alloc_user_space(
85911 sizeof(struct group_source_req));
85912 u32 interface;
85913@@ -609,7 +609,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
85914 }
85915 case MCAST_MSFILTER:
85916 {
85917- struct compat_group_filter __user *gf32 = (void *)optval;
85918+ struct compat_group_filter __user *gf32 = (void __user *)optval;
85919 struct group_filter __user *kgf;
85920 u32 interface, fmode, numsrc;
85921
85922@@ -647,7 +647,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
85923 char __user *optval, int __user *optlen,
85924 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
85925 {
85926- struct compat_group_filter __user *gf32 = (void *)optval;
85927+ struct compat_group_filter __user *gf32 = (void __user *)optval;
85928 struct group_filter __user *kgf;
85929 int __user *koptlen;
85930 u32 interface, fmode, numsrc;
85931@@ -796,7 +796,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
85932
85933 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
85934 return -EINVAL;
85935- if (copy_from_user(a, args, nas[call]))
85936+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
85937 return -EFAULT;
85938 a0 = a[0];
85939 a1 = a[1];
85940diff --git a/net/core/datagram.c b/net/core/datagram.c
85941index 368f9c3..f82d4a3 100644
85942--- a/net/core/datagram.c
85943+++ b/net/core/datagram.c
85944@@ -289,7 +289,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
85945 }
85946
85947 kfree_skb(skb);
85948- atomic_inc(&sk->sk_drops);
85949+ atomic_inc_unchecked(&sk->sk_drops);
85950 sk_mem_reclaim_partial(sk);
85951
85952 return err;
85953diff --git a/net/core/dev.c b/net/core/dev.c
85954index 5d9c43d..b471558 100644
85955--- a/net/core/dev.c
85956+++ b/net/core/dev.c
85957@@ -1250,9 +1250,13 @@ void dev_load(struct net *net, const char *name)
85958 if (no_module && capable(CAP_NET_ADMIN))
85959 no_module = request_module("netdev-%s", name);
85960 if (no_module && capable(CAP_SYS_MODULE)) {
85961+#ifdef CONFIG_GRKERNSEC_MODHARDEN
85962+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
85963+#else
85964 if (!request_module("%s", name))
85965 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
85966 name);
85967+#endif
85968 }
85969 }
85970 EXPORT_SYMBOL(dev_load);
85971@@ -1714,7 +1718,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
85972 {
85973 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
85974 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
85975- atomic_long_inc(&dev->rx_dropped);
85976+ atomic_long_inc_unchecked(&dev->rx_dropped);
85977 kfree_skb(skb);
85978 return NET_RX_DROP;
85979 }
85980@@ -1724,7 +1728,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
85981 nf_reset(skb);
85982
85983 if (unlikely(!is_skb_forwardable(dev, skb))) {
85984- atomic_long_inc(&dev->rx_dropped);
85985+ atomic_long_inc_unchecked(&dev->rx_dropped);
85986 kfree_skb(skb);
85987 return NET_RX_DROP;
85988 }
85989@@ -2179,7 +2183,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
85990
85991 struct dev_gso_cb {
85992 void (*destructor)(struct sk_buff *skb);
85993-};
85994+} __no_const;
85995
85996 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
85997
85998@@ -3052,7 +3056,7 @@ enqueue:
85999
86000 local_irq_restore(flags);
86001
86002- atomic_long_inc(&skb->dev->rx_dropped);
86003+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
86004 kfree_skb(skb);
86005 return NET_RX_DROP;
86006 }
86007@@ -3124,7 +3128,7 @@ int netif_rx_ni(struct sk_buff *skb)
86008 }
86009 EXPORT_SYMBOL(netif_rx_ni);
86010
86011-static void net_tx_action(struct softirq_action *h)
86012+static void net_tx_action(void)
86013 {
86014 struct softnet_data *sd = &__get_cpu_var(softnet_data);
86015
86016@@ -3462,7 +3466,7 @@ ncls:
86017 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
86018 } else {
86019 drop:
86020- atomic_long_inc(&skb->dev->rx_dropped);
86021+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
86022 kfree_skb(skb);
86023 /* Jamal, now you will not able to escape explaining
86024 * me how you were going to use this. :-)
86025@@ -4045,7 +4049,7 @@ void netif_napi_del(struct napi_struct *napi)
86026 }
86027 EXPORT_SYMBOL(netif_napi_del);
86028
86029-static void net_rx_action(struct softirq_action *h)
86030+static void net_rx_action(void)
86031 {
86032 struct softnet_data *sd = &__get_cpu_var(softnet_data);
86033 unsigned long time_limit = jiffies + 2;
86034@@ -4529,8 +4533,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
86035 else
86036 seq_printf(seq, "%04x", ntohs(pt->type));
86037
86038+#ifdef CONFIG_GRKERNSEC_HIDESYM
86039+ seq_printf(seq, " %-8s %p\n",
86040+ pt->dev ? pt->dev->name : "", NULL);
86041+#else
86042 seq_printf(seq, " %-8s %pF\n",
86043 pt->dev ? pt->dev->name : "", pt->func);
86044+#endif
86045 }
86046
86047 return 0;
86048@@ -6102,7 +6111,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
86049 } else {
86050 netdev_stats_to_stats64(storage, &dev->stats);
86051 }
86052- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
86053+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
86054 return storage;
86055 }
86056 EXPORT_SYMBOL(dev_get_stats);
86057diff --git a/net/core/flow.c b/net/core/flow.c
86058index 3bad824..2071a55 100644
86059--- a/net/core/flow.c
86060+++ b/net/core/flow.c
86061@@ -61,7 +61,7 @@ struct flow_cache {
86062 struct timer_list rnd_timer;
86063 };
86064
86065-atomic_t flow_cache_genid = ATOMIC_INIT(0);
86066+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
86067 EXPORT_SYMBOL(flow_cache_genid);
86068 static struct flow_cache flow_cache_global;
86069 static struct kmem_cache *flow_cachep __read_mostly;
86070@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
86071
86072 static int flow_entry_valid(struct flow_cache_entry *fle)
86073 {
86074- if (atomic_read(&flow_cache_genid) != fle->genid)
86075+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
86076 return 0;
86077 if (fle->object && !fle->object->ops->check(fle->object))
86078 return 0;
86079@@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
86080 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
86081 fcp->hash_count++;
86082 }
86083- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
86084+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
86085 flo = fle->object;
86086 if (!flo)
86087 goto ret_object;
86088@@ -280,7 +280,7 @@ nocache:
86089 }
86090 flo = resolver(net, key, family, dir, flo, ctx);
86091 if (fle) {
86092- fle->genid = atomic_read(&flow_cache_genid);
86093+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
86094 if (!IS_ERR(flo))
86095 fle->object = flo;
86096 else
86097diff --git a/net/core/iovec.c b/net/core/iovec.c
86098index 7e7aeb0..2a998cb 100644
86099--- a/net/core/iovec.c
86100+++ b/net/core/iovec.c
86101@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
86102 if (m->msg_namelen) {
86103 if (mode == VERIFY_READ) {
86104 void __user *namep;
86105- namep = (void __user __force *) m->msg_name;
86106+ namep = (void __force_user *) m->msg_name;
86107 err = move_addr_to_kernel(namep, m->msg_namelen,
86108 address);
86109 if (err < 0)
86110@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
86111 }
86112
86113 size = m->msg_iovlen * sizeof(struct iovec);
86114- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
86115+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
86116 return -EFAULT;
86117
86118 m->msg_iov = iov;
86119diff --git a/net/core/neighbour.c b/net/core/neighbour.c
86120index c815f28..e6403f2 100644
86121--- a/net/core/neighbour.c
86122+++ b/net/core/neighbour.c
86123@@ -2776,7 +2776,7 @@ static int proc_unres_qlen(ctl_table *ctl, int write, void __user *buffer,
86124 size_t *lenp, loff_t *ppos)
86125 {
86126 int size, ret;
86127- ctl_table tmp = *ctl;
86128+ ctl_table_no_const tmp = *ctl;
86129
86130 tmp.extra1 = &zero;
86131 tmp.extra2 = &unres_qlen_max;
86132diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
86133index 28c5f5a..7edf2e2 100644
86134--- a/net/core/net-sysfs.c
86135+++ b/net/core/net-sysfs.c
86136@@ -1455,7 +1455,7 @@ void netdev_class_remove_file(struct class_attribute *class_attr)
86137 }
86138 EXPORT_SYMBOL(netdev_class_remove_file);
86139
86140-int netdev_kobject_init(void)
86141+int __init netdev_kobject_init(void)
86142 {
86143 kobj_ns_type_register(&net_ns_type_operations);
86144 return class_register(&net_class);
86145diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
86146index 8acce01..2e306bb 100644
86147--- a/net/core/net_namespace.c
86148+++ b/net/core/net_namespace.c
86149@@ -442,7 +442,7 @@ static int __register_pernet_operations(struct list_head *list,
86150 int error;
86151 LIST_HEAD(net_exit_list);
86152
86153- list_add_tail(&ops->list, list);
86154+ pax_list_add_tail((struct list_head *)&ops->list, list);
86155 if (ops->init || (ops->id && ops->size)) {
86156 for_each_net(net) {
86157 error = ops_init(ops, net);
86158@@ -455,7 +455,7 @@ static int __register_pernet_operations(struct list_head *list,
86159
86160 out_undo:
86161 /* If I have an error cleanup all namespaces I initialized */
86162- list_del(&ops->list);
86163+ pax_list_del((struct list_head *)&ops->list);
86164 ops_exit_list(ops, &net_exit_list);
86165 ops_free_list(ops, &net_exit_list);
86166 return error;
86167@@ -466,7 +466,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
86168 struct net *net;
86169 LIST_HEAD(net_exit_list);
86170
86171- list_del(&ops->list);
86172+ pax_list_del((struct list_head *)&ops->list);
86173 for_each_net(net)
86174 list_add_tail(&net->exit_list, &net_exit_list);
86175 ops_exit_list(ops, &net_exit_list);
86176@@ -600,7 +600,7 @@ int register_pernet_device(struct pernet_operations *ops)
86177 mutex_lock(&net_mutex);
86178 error = register_pernet_operations(&pernet_list, ops);
86179 if (!error && (first_device == &pernet_list))
86180- first_device = &ops->list;
86181+ first_device = (struct list_head *)&ops->list;
86182 mutex_unlock(&net_mutex);
86183 return error;
86184 }
86185diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
86186index 6212ec9..5ee16b2 100644
86187--- a/net/core/rtnetlink.c
86188+++ b/net/core/rtnetlink.c
86189@@ -58,7 +58,7 @@ struct rtnl_link {
86190 rtnl_doit_func doit;
86191 rtnl_dumpit_func dumpit;
86192 rtnl_calcit_func calcit;
86193-};
86194+} __no_const;
86195
86196 static DEFINE_MUTEX(rtnl_mutex);
86197
86198@@ -299,10 +299,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
86199 if (rtnl_link_ops_get(ops->kind))
86200 return -EEXIST;
86201
86202- if (!ops->dellink)
86203- ops->dellink = unregister_netdevice_queue;
86204+ if (!ops->dellink) {
86205+ pax_open_kernel();
86206+ *(void **)&ops->dellink = unregister_netdevice_queue;
86207+ pax_close_kernel();
86208+ }
86209
86210- list_add_tail(&ops->list, &link_ops);
86211+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
86212 return 0;
86213 }
86214 EXPORT_SYMBOL_GPL(__rtnl_link_register);
86215@@ -349,7 +352,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
86216 for_each_net(net) {
86217 __rtnl_kill_links(net, ops);
86218 }
86219- list_del(&ops->list);
86220+ pax_list_del((struct list_head *)&ops->list);
86221 }
86222 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
86223
86224@@ -1068,7 +1071,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
86225 rcu_read_lock();
86226 cb->seq = net->dev_base_seq;
86227
86228- if (nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
86229+ if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
86230 ifla_policy) >= 0) {
86231
86232 if (tb[IFLA_EXT_MASK])
86233@@ -1924,7 +1927,7 @@ static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
86234 u32 ext_filter_mask = 0;
86235 u16 min_ifinfo_dump_size = 0;
86236
86237- if (nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
86238+ if (nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
86239 ifla_policy) >= 0) {
86240 if (tb[IFLA_EXT_MASK])
86241 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
86242diff --git a/net/core/scm.c b/net/core/scm.c
86243index 2dc6cda..2159524 100644
86244--- a/net/core/scm.c
86245+++ b/net/core/scm.c
86246@@ -226,7 +226,7 @@ EXPORT_SYMBOL(__scm_send);
86247 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
86248 {
86249 struct cmsghdr __user *cm
86250- = (__force struct cmsghdr __user *)msg->msg_control;
86251+ = (struct cmsghdr __force_user *)msg->msg_control;
86252 struct cmsghdr cmhdr;
86253 int cmlen = CMSG_LEN(len);
86254 int err;
86255@@ -249,7 +249,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
86256 err = -EFAULT;
86257 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
86258 goto out;
86259- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
86260+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
86261 goto out;
86262 cmlen = CMSG_SPACE(len);
86263 if (msg->msg_controllen < cmlen)
86264@@ -265,7 +265,7 @@ EXPORT_SYMBOL(put_cmsg);
86265 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
86266 {
86267 struct cmsghdr __user *cm
86268- = (__force struct cmsghdr __user*)msg->msg_control;
86269+ = (struct cmsghdr __force_user *)msg->msg_control;
86270
86271 int fdmax = 0;
86272 int fdnum = scm->fp->count;
86273@@ -285,7 +285,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
86274 if (fdnum < fdmax)
86275 fdmax = fdnum;
86276
86277- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
86278+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
86279 i++, cmfptr++)
86280 {
86281 struct socket *sock;
86282diff --git a/net/core/sock.c b/net/core/sock.c
86283index bc131d4..029e378 100644
86284--- a/net/core/sock.c
86285+++ b/net/core/sock.c
86286@@ -388,7 +388,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
86287 struct sk_buff_head *list = &sk->sk_receive_queue;
86288
86289 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
86290- atomic_inc(&sk->sk_drops);
86291+ atomic_inc_unchecked(&sk->sk_drops);
86292 trace_sock_rcvqueue_full(sk, skb);
86293 return -ENOMEM;
86294 }
86295@@ -398,7 +398,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
86296 return err;
86297
86298 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
86299- atomic_inc(&sk->sk_drops);
86300+ atomic_inc_unchecked(&sk->sk_drops);
86301 return -ENOBUFS;
86302 }
86303
86304@@ -418,7 +418,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
86305 skb_dst_force(skb);
86306
86307 spin_lock_irqsave(&list->lock, flags);
86308- skb->dropcount = atomic_read(&sk->sk_drops);
86309+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
86310 __skb_queue_tail(list, skb);
86311 spin_unlock_irqrestore(&list->lock, flags);
86312
86313@@ -438,7 +438,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
86314 skb->dev = NULL;
86315
86316 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
86317- atomic_inc(&sk->sk_drops);
86318+ atomic_inc_unchecked(&sk->sk_drops);
86319 goto discard_and_relse;
86320 }
86321 if (nested)
86322@@ -456,7 +456,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
86323 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
86324 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
86325 bh_unlock_sock(sk);
86326- atomic_inc(&sk->sk_drops);
86327+ atomic_inc_unchecked(&sk->sk_drops);
86328 goto discard_and_relse;
86329 }
86330
86331@@ -930,12 +930,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
86332 struct timeval tm;
86333 } v;
86334
86335- int lv = sizeof(int);
86336- int len;
86337+ unsigned int lv = sizeof(int);
86338+ unsigned int len;
86339
86340 if (get_user(len, optlen))
86341 return -EFAULT;
86342- if (len < 0)
86343+ if (len > INT_MAX)
86344 return -EINVAL;
86345
86346 memset(&v, 0, sizeof(v));
86347@@ -1083,11 +1083,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
86348
86349 case SO_PEERNAME:
86350 {
86351- char address[128];
86352+ char address[_K_SS_MAXSIZE];
86353
86354 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
86355 return -ENOTCONN;
86356- if (lv < len)
86357+ if (lv < len || sizeof address < len)
86358 return -EINVAL;
86359 if (copy_to_user(optval, address, len))
86360 return -EFAULT;
86361@@ -1146,7 +1146,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
86362
86363 if (len > lv)
86364 len = lv;
86365- if (copy_to_user(optval, &v, len))
86366+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
86367 return -EFAULT;
86368 lenout:
86369 if (put_user(len, optlen))
86370@@ -2276,7 +2276,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
86371 */
86372 smp_wmb();
86373 atomic_set(&sk->sk_refcnt, 1);
86374- atomic_set(&sk->sk_drops, 0);
86375+ atomic_set_unchecked(&sk->sk_drops, 0);
86376 }
86377 EXPORT_SYMBOL(sock_init_data);
86378
86379diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
86380index 750f44f..922399c 100644
86381--- a/net/core/sock_diag.c
86382+++ b/net/core/sock_diag.c
86383@@ -9,26 +9,33 @@
86384 #include <linux/inet_diag.h>
86385 #include <linux/sock_diag.h>
86386
86387-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
86388+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
86389 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
86390 static DEFINE_MUTEX(sock_diag_table_mutex);
86391
86392 int sock_diag_check_cookie(void *sk, __u32 *cookie)
86393 {
86394+#ifndef CONFIG_GRKERNSEC_HIDESYM
86395 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
86396 cookie[1] != INET_DIAG_NOCOOKIE) &&
86397 ((u32)(unsigned long)sk != cookie[0] ||
86398 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
86399 return -ESTALE;
86400 else
86401+#endif
86402 return 0;
86403 }
86404 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
86405
86406 void sock_diag_save_cookie(void *sk, __u32 *cookie)
86407 {
86408+#ifdef CONFIG_GRKERNSEC_HIDESYM
86409+ cookie[0] = 0;
86410+ cookie[1] = 0;
86411+#else
86412 cookie[0] = (u32)(unsigned long)sk;
86413 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
86414+#endif
86415 }
86416 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
86417
86418@@ -75,8 +82,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
86419 mutex_lock(&sock_diag_table_mutex);
86420 if (sock_diag_handlers[hndl->family])
86421 err = -EBUSY;
86422- else
86423+ else {
86424+ pax_open_kernel();
86425 sock_diag_handlers[hndl->family] = hndl;
86426+ pax_close_kernel();
86427+ }
86428 mutex_unlock(&sock_diag_table_mutex);
86429
86430 return err;
86431@@ -92,26 +102,13 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
86432
86433 mutex_lock(&sock_diag_table_mutex);
86434 BUG_ON(sock_diag_handlers[family] != hnld);
86435+ pax_open_kernel();
86436 sock_diag_handlers[family] = NULL;
86437+ pax_close_kernel();
86438 mutex_unlock(&sock_diag_table_mutex);
86439 }
86440 EXPORT_SYMBOL_GPL(sock_diag_unregister);
86441
86442-static const inline struct sock_diag_handler *sock_diag_lock_handler(int family)
86443-{
86444- if (sock_diag_handlers[family] == NULL)
86445- request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
86446- NETLINK_SOCK_DIAG, family);
86447-
86448- mutex_lock(&sock_diag_table_mutex);
86449- return sock_diag_handlers[family];
86450-}
86451-
86452-static inline void sock_diag_unlock_handler(const struct sock_diag_handler *h)
86453-{
86454- mutex_unlock(&sock_diag_table_mutex);
86455-}
86456-
86457 static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
86458 {
86459 int err;
86460@@ -124,12 +121,17 @@ static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
86461 if (req->sdiag_family >= AF_MAX)
86462 return -EINVAL;
86463
86464- hndl = sock_diag_lock_handler(req->sdiag_family);
86465+ if (sock_diag_handlers[req->sdiag_family] == NULL)
86466+ request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
86467+ NETLINK_SOCK_DIAG, req->sdiag_family);
86468+
86469+ mutex_lock(&sock_diag_table_mutex);
86470+ hndl = sock_diag_handlers[req->sdiag_family];
86471 if (hndl == NULL)
86472 err = -ENOENT;
86473 else
86474 err = hndl->dump(skb, nlh);
86475- sock_diag_unlock_handler(hndl);
86476+ mutex_unlock(&sock_diag_table_mutex);
86477
86478 return err;
86479 }
86480diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
86481index d1b0804..98cf5f7 100644
86482--- a/net/core/sysctl_net_core.c
86483+++ b/net/core/sysctl_net_core.c
86484@@ -26,7 +26,7 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write,
86485 {
86486 unsigned int orig_size, size;
86487 int ret, i;
86488- ctl_table tmp = {
86489+ ctl_table_no_const tmp = {
86490 .data = &size,
86491 .maxlen = sizeof(size),
86492 .mode = table->mode
86493@@ -205,13 +205,12 @@ static struct ctl_table netns_core_table[] = {
86494
86495 static __net_init int sysctl_core_net_init(struct net *net)
86496 {
86497- struct ctl_table *tbl;
86498+ ctl_table_no_const *tbl = NULL;
86499
86500 net->core.sysctl_somaxconn = SOMAXCONN;
86501
86502- tbl = netns_core_table;
86503 if (!net_eq(net, &init_net)) {
86504- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
86505+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
86506 if (tbl == NULL)
86507 goto err_dup;
86508
86509@@ -221,17 +220,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
86510 if (net->user_ns != &init_user_ns) {
86511 tbl[0].procname = NULL;
86512 }
86513- }
86514-
86515- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
86516+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
86517+ } else
86518+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
86519 if (net->core.sysctl_hdr == NULL)
86520 goto err_reg;
86521
86522 return 0;
86523
86524 err_reg:
86525- if (tbl != netns_core_table)
86526- kfree(tbl);
86527+ kfree(tbl);
86528 err_dup:
86529 return -ENOMEM;
86530 }
86531@@ -246,7 +244,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
86532 kfree(tbl);
86533 }
86534
86535-static __net_initdata struct pernet_operations sysctl_core_ops = {
86536+static __net_initconst struct pernet_operations sysctl_core_ops = {
86537 .init = sysctl_core_net_init,
86538 .exit = sysctl_core_net_exit,
86539 };
86540diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
86541index 307c322..78a4c6f 100644
86542--- a/net/decnet/af_decnet.c
86543+++ b/net/decnet/af_decnet.c
86544@@ -468,6 +468,7 @@ static struct proto dn_proto = {
86545 .sysctl_rmem = sysctl_decnet_rmem,
86546 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
86547 .obj_size = sizeof(struct dn_sock),
86548+ .slab_flags = SLAB_USERCOPY,
86549 };
86550
86551 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
86552diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
86553index a55eecc..dd8428c 100644
86554--- a/net/decnet/sysctl_net_decnet.c
86555+++ b/net/decnet/sysctl_net_decnet.c
86556@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
86557
86558 if (len > *lenp) len = *lenp;
86559
86560- if (copy_to_user(buffer, addr, len))
86561+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
86562 return -EFAULT;
86563
86564 *lenp = len;
86565@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
86566
86567 if (len > *lenp) len = *lenp;
86568
86569- if (copy_to_user(buffer, devname, len))
86570+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
86571 return -EFAULT;
86572
86573 *lenp = len;
86574diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
86575index fcf104e..95552d4 100644
86576--- a/net/ipv4/af_inet.c
86577+++ b/net/ipv4/af_inet.c
86578@@ -1717,13 +1717,9 @@ static int __init inet_init(void)
86579
86580 BUILD_BUG_ON(sizeof(struct inet_skb_parm) > sizeof(dummy_skb->cb));
86581
86582- sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
86583- if (!sysctl_local_reserved_ports)
86584- goto out;
86585-
86586 rc = proto_register(&tcp_prot, 1);
86587 if (rc)
86588- goto out_free_reserved_ports;
86589+ goto out;
86590
86591 rc = proto_register(&udp_prot, 1);
86592 if (rc)
86593@@ -1832,8 +1828,6 @@ out_unregister_udp_proto:
86594 proto_unregister(&udp_prot);
86595 out_unregister_tcp_proto:
86596 proto_unregister(&tcp_prot);
86597-out_free_reserved_ports:
86598- kfree(sysctl_local_reserved_ports);
86599 goto out;
86600 }
86601
86602diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
86603index a69b4e4..dbccba5 100644
86604--- a/net/ipv4/ah4.c
86605+++ b/net/ipv4/ah4.c
86606@@ -421,7 +421,7 @@ static void ah4_err(struct sk_buff *skb, u32 info)
86607 return;
86608
86609 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
86610- atomic_inc(&flow_cache_genid);
86611+ atomic_inc_unchecked(&flow_cache_genid);
86612 rt_genid_bump(net);
86613
86614 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0);
86615diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
86616index a8e4f26..25e5f40 100644
86617--- a/net/ipv4/devinet.c
86618+++ b/net/ipv4/devinet.c
86619@@ -1763,7 +1763,7 @@ static int ipv4_doint_and_flush(ctl_table *ctl, int write,
86620 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
86621 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
86622
86623-static struct devinet_sysctl_table {
86624+static const struct devinet_sysctl_table {
86625 struct ctl_table_header *sysctl_header;
86626 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
86627 } devinet_sysctl = {
86628@@ -1881,7 +1881,7 @@ static __net_init int devinet_init_net(struct net *net)
86629 int err;
86630 struct ipv4_devconf *all, *dflt;
86631 #ifdef CONFIG_SYSCTL
86632- struct ctl_table *tbl = ctl_forward_entry;
86633+ ctl_table_no_const *tbl = NULL;
86634 struct ctl_table_header *forw_hdr;
86635 #endif
86636
86637@@ -1899,7 +1899,7 @@ static __net_init int devinet_init_net(struct net *net)
86638 goto err_alloc_dflt;
86639
86640 #ifdef CONFIG_SYSCTL
86641- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
86642+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
86643 if (tbl == NULL)
86644 goto err_alloc_ctl;
86645
86646@@ -1919,7 +1919,10 @@ static __net_init int devinet_init_net(struct net *net)
86647 goto err_reg_dflt;
86648
86649 err = -ENOMEM;
86650- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
86651+ if (!net_eq(net, &init_net))
86652+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
86653+ else
86654+ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
86655 if (forw_hdr == NULL)
86656 goto err_reg_ctl;
86657 net->ipv4.forw_hdr = forw_hdr;
86658@@ -1935,8 +1938,7 @@ err_reg_ctl:
86659 err_reg_dflt:
86660 __devinet_sysctl_unregister(all);
86661 err_reg_all:
86662- if (tbl != ctl_forward_entry)
86663- kfree(tbl);
86664+ kfree(tbl);
86665 err_alloc_ctl:
86666 #endif
86667 if (dflt != &ipv4_devconf_dflt)
86668diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
86669index 3b4f0cd..8cb864c 100644
86670--- a/net/ipv4/esp4.c
86671+++ b/net/ipv4/esp4.c
86672@@ -503,7 +503,7 @@ static void esp4_err(struct sk_buff *skb, u32 info)
86673 return;
86674
86675 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
86676- atomic_inc(&flow_cache_genid);
86677+ atomic_inc_unchecked(&flow_cache_genid);
86678 rt_genid_bump(net);
86679
86680 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0);
86681diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
86682index 5cd75e2..f57ef39 100644
86683--- a/net/ipv4/fib_frontend.c
86684+++ b/net/ipv4/fib_frontend.c
86685@@ -1020,12 +1020,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
86686 #ifdef CONFIG_IP_ROUTE_MULTIPATH
86687 fib_sync_up(dev);
86688 #endif
86689- atomic_inc(&net->ipv4.dev_addr_genid);
86690+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
86691 rt_cache_flush(dev_net(dev));
86692 break;
86693 case NETDEV_DOWN:
86694 fib_del_ifaddr(ifa, NULL);
86695- atomic_inc(&net->ipv4.dev_addr_genid);
86696+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
86697 if (ifa->ifa_dev->ifa_list == NULL) {
86698 /* Last address was deleted from this interface.
86699 * Disable IP.
86700@@ -1061,7 +1061,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
86701 #ifdef CONFIG_IP_ROUTE_MULTIPATH
86702 fib_sync_up(dev);
86703 #endif
86704- atomic_inc(&net->ipv4.dev_addr_genid);
86705+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
86706 rt_cache_flush(net);
86707 break;
86708 case NETDEV_DOWN:
86709diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
86710index 4797a80..2bd54e9 100644
86711--- a/net/ipv4/fib_semantics.c
86712+++ b/net/ipv4/fib_semantics.c
86713@@ -767,7 +767,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
86714 nh->nh_saddr = inet_select_addr(nh->nh_dev,
86715 nh->nh_gw,
86716 nh->nh_parent->fib_scope);
86717- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
86718+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
86719
86720 return nh->nh_saddr;
86721 }
86722diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
86723index d0670f0..744ac80 100644
86724--- a/net/ipv4/inet_connection_sock.c
86725+++ b/net/ipv4/inet_connection_sock.c
86726@@ -37,7 +37,7 @@ struct local_ports sysctl_local_ports __read_mostly = {
86727 .range = { 32768, 61000 },
86728 };
86729
86730-unsigned long *sysctl_local_reserved_ports;
86731+unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
86732 EXPORT_SYMBOL(sysctl_local_reserved_ports);
86733
86734 void inet_get_local_port_range(int *low, int *high)
86735diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
86736index fa3ae81..0dbe6b8 100644
86737--- a/net/ipv4/inet_hashtables.c
86738+++ b/net/ipv4/inet_hashtables.c
86739@@ -18,12 +18,15 @@
86740 #include <linux/sched.h>
86741 #include <linux/slab.h>
86742 #include <linux/wait.h>
86743+#include <linux/security.h>
86744
86745 #include <net/inet_connection_sock.h>
86746 #include <net/inet_hashtables.h>
86747 #include <net/secure_seq.h>
86748 #include <net/ip.h>
86749
86750+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
86751+
86752 /*
86753 * Allocate and initialize a new local port bind bucket.
86754 * The bindhash mutex for snum's hash chain must be held here.
86755@@ -540,6 +543,8 @@ ok:
86756 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
86757 spin_unlock(&head->lock);
86758
86759+ gr_update_task_in_ip_table(current, inet_sk(sk));
86760+
86761 if (tw) {
86762 inet_twsk_deschedule(tw, death_row);
86763 while (twrefcnt) {
86764diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
86765index 000e3d2..5472da3 100644
86766--- a/net/ipv4/inetpeer.c
86767+++ b/net/ipv4/inetpeer.c
86768@@ -503,8 +503,8 @@ relookup:
86769 if (p) {
86770 p->daddr = *daddr;
86771 atomic_set(&p->refcnt, 1);
86772- atomic_set(&p->rid, 0);
86773- atomic_set(&p->ip_id_count,
86774+ atomic_set_unchecked(&p->rid, 0);
86775+ atomic_set_unchecked(&p->ip_id_count,
86776 (daddr->family == AF_INET) ?
86777 secure_ip_id(daddr->addr.a4) :
86778 secure_ipv6_id(daddr->addr.a6));
86779diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
86780index a8fc332..4ca4ca65 100644
86781--- a/net/ipv4/ip_fragment.c
86782+++ b/net/ipv4/ip_fragment.c
86783@@ -319,7 +319,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
86784 return 0;
86785
86786 start = qp->rid;
86787- end = atomic_inc_return(&peer->rid);
86788+ end = atomic_inc_return_unchecked(&peer->rid);
86789 qp->rid = end;
86790
86791 rc = qp->q.fragments && (end - start) > max;
86792@@ -786,12 +786,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
86793
86794 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
86795 {
86796- struct ctl_table *table;
86797+ ctl_table_no_const *table = NULL;
86798 struct ctl_table_header *hdr;
86799
86800- table = ip4_frags_ns_ctl_table;
86801 if (!net_eq(net, &init_net)) {
86802- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
86803+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
86804 if (table == NULL)
86805 goto err_alloc;
86806
86807@@ -802,9 +801,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
86808 /* Don't export sysctls to unprivileged users */
86809 if (net->user_ns != &init_user_ns)
86810 table[0].procname = NULL;
86811- }
86812+ hdr = register_net_sysctl(net, "net/ipv4", table);
86813+ } else
86814+ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
86815
86816- hdr = register_net_sysctl(net, "net/ipv4", table);
86817 if (hdr == NULL)
86818 goto err_reg;
86819
86820@@ -812,8 +812,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
86821 return 0;
86822
86823 err_reg:
86824- if (!net_eq(net, &init_net))
86825- kfree(table);
86826+ kfree(table);
86827 err_alloc:
86828 return -ENOMEM;
86829 }
86830diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
86831index a85062b..2958a9b 100644
86832--- a/net/ipv4/ip_gre.c
86833+++ b/net/ipv4/ip_gre.c
86834@@ -124,7 +124,7 @@ static bool log_ecn_error = true;
86835 module_param(log_ecn_error, bool, 0644);
86836 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
86837
86838-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
86839+static struct rtnl_link_ops ipgre_link_ops;
86840 static int ipgre_tunnel_init(struct net_device *dev);
86841 static void ipgre_tunnel_setup(struct net_device *dev);
86842 static int ipgre_tunnel_bind_dev(struct net_device *dev);
86843@@ -1753,7 +1753,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
86844 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
86845 };
86846
86847-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
86848+static struct rtnl_link_ops ipgre_link_ops = {
86849 .kind = "gre",
86850 .maxtype = IFLA_GRE_MAX,
86851 .policy = ipgre_policy,
86852@@ -1766,7 +1766,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
86853 .fill_info = ipgre_fill_info,
86854 };
86855
86856-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
86857+static struct rtnl_link_ops ipgre_tap_ops = {
86858 .kind = "gretap",
86859 .maxtype = IFLA_GRE_MAX,
86860 .policy = ipgre_policy,
86861diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
86862index d9c4f11..02b82dbc 100644
86863--- a/net/ipv4/ip_sockglue.c
86864+++ b/net/ipv4/ip_sockglue.c
86865@@ -1152,7 +1152,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
86866 len = min_t(unsigned int, len, opt->optlen);
86867 if (put_user(len, optlen))
86868 return -EFAULT;
86869- if (copy_to_user(optval, opt->__data, len))
86870+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
86871+ copy_to_user(optval, opt->__data, len))
86872 return -EFAULT;
86873 return 0;
86874 }
86875@@ -1283,7 +1284,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
86876 if (sk->sk_type != SOCK_STREAM)
86877 return -ENOPROTOOPT;
86878
86879- msg.msg_control = optval;
86880+ msg.msg_control = (void __force_kernel *)optval;
86881 msg.msg_controllen = len;
86882 msg.msg_flags = flags;
86883
86884diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
86885index c3a4233..1412161 100644
86886--- a/net/ipv4/ip_vti.c
86887+++ b/net/ipv4/ip_vti.c
86888@@ -47,7 +47,7 @@
86889 #define HASH_SIZE 16
86890 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&(HASH_SIZE-1))
86891
86892-static struct rtnl_link_ops vti_link_ops __read_mostly;
86893+static struct rtnl_link_ops vti_link_ops;
86894
86895 static int vti_net_id __read_mostly;
86896 struct vti_net {
86897@@ -886,7 +886,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
86898 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
86899 };
86900
86901-static struct rtnl_link_ops vti_link_ops __read_mostly = {
86902+static struct rtnl_link_ops vti_link_ops = {
86903 .kind = "vti",
86904 .maxtype = IFLA_VTI_MAX,
86905 .policy = vti_policy,
86906diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
86907index 9a46dae..5f793a0 100644
86908--- a/net/ipv4/ipcomp.c
86909+++ b/net/ipv4/ipcomp.c
86910@@ -48,7 +48,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
86911 return;
86912
86913 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
86914- atomic_inc(&flow_cache_genid);
86915+ atomic_inc_unchecked(&flow_cache_genid);
86916 rt_genid_bump(net);
86917
86918 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_COMP, 0);
86919diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
86920index a2e50ae..e152b7c 100644
86921--- a/net/ipv4/ipconfig.c
86922+++ b/net/ipv4/ipconfig.c
86923@@ -323,7 +323,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
86924
86925 mm_segment_t oldfs = get_fs();
86926 set_fs(get_ds());
86927- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
86928+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
86929 set_fs(oldfs);
86930 return res;
86931 }
86932@@ -334,7 +334,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
86933
86934 mm_segment_t oldfs = get_fs();
86935 set_fs(get_ds());
86936- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
86937+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
86938 set_fs(oldfs);
86939 return res;
86940 }
86941@@ -345,7 +345,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
86942
86943 mm_segment_t oldfs = get_fs();
86944 set_fs(get_ds());
86945- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
86946+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
86947 set_fs(oldfs);
86948 return res;
86949 }
86950diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
86951index 191fc24..1b3b804 100644
86952--- a/net/ipv4/ipip.c
86953+++ b/net/ipv4/ipip.c
86954@@ -138,7 +138,7 @@ struct ipip_net {
86955 static int ipip_tunnel_init(struct net_device *dev);
86956 static void ipip_tunnel_setup(struct net_device *dev);
86957 static void ipip_dev_free(struct net_device *dev);
86958-static struct rtnl_link_ops ipip_link_ops __read_mostly;
86959+static struct rtnl_link_ops ipip_link_ops;
86960
86961 static struct rtnl_link_stats64 *ipip_get_stats64(struct net_device *dev,
86962 struct rtnl_link_stats64 *tot)
86963@@ -972,7 +972,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
86964 [IFLA_IPTUN_PMTUDISC] = { .type = NLA_U8 },
86965 };
86966
86967-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
86968+static struct rtnl_link_ops ipip_link_ops = {
86969 .kind = "ipip",
86970 .maxtype = IFLA_IPTUN_MAX,
86971 .policy = ipip_policy,
86972diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
86973index 3ea4127..849297b 100644
86974--- a/net/ipv4/netfilter/arp_tables.c
86975+++ b/net/ipv4/netfilter/arp_tables.c
86976@@ -879,14 +879,14 @@ static int compat_table_info(const struct xt_table_info *info,
86977 #endif
86978
86979 static int get_info(struct net *net, void __user *user,
86980- const int *len, int compat)
86981+ int len, int compat)
86982 {
86983 char name[XT_TABLE_MAXNAMELEN];
86984 struct xt_table *t;
86985 int ret;
86986
86987- if (*len != sizeof(struct arpt_getinfo)) {
86988- duprintf("length %u != %Zu\n", *len,
86989+ if (len != sizeof(struct arpt_getinfo)) {
86990+ duprintf("length %u != %Zu\n", len,
86991 sizeof(struct arpt_getinfo));
86992 return -EINVAL;
86993 }
86994@@ -923,7 +923,7 @@ static int get_info(struct net *net, void __user *user,
86995 info.size = private->size;
86996 strcpy(info.name, name);
86997
86998- if (copy_to_user(user, &info, *len) != 0)
86999+ if (copy_to_user(user, &info, len) != 0)
87000 ret = -EFAULT;
87001 else
87002 ret = 0;
87003@@ -1682,7 +1682,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
87004
87005 switch (cmd) {
87006 case ARPT_SO_GET_INFO:
87007- ret = get_info(sock_net(sk), user, len, 1);
87008+ ret = get_info(sock_net(sk), user, *len, 1);
87009 break;
87010 case ARPT_SO_GET_ENTRIES:
87011 ret = compat_get_entries(sock_net(sk), user, len);
87012@@ -1727,7 +1727,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
87013
87014 switch (cmd) {
87015 case ARPT_SO_GET_INFO:
87016- ret = get_info(sock_net(sk), user, len, 0);
87017+ ret = get_info(sock_net(sk), user, *len, 0);
87018 break;
87019
87020 case ARPT_SO_GET_ENTRIES:
87021diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
87022index 17c5e06..1b91206 100644
87023--- a/net/ipv4/netfilter/ip_tables.c
87024+++ b/net/ipv4/netfilter/ip_tables.c
87025@@ -1068,14 +1068,14 @@ static int compat_table_info(const struct xt_table_info *info,
87026 #endif
87027
87028 static int get_info(struct net *net, void __user *user,
87029- const int *len, int compat)
87030+ int len, int compat)
87031 {
87032 char name[XT_TABLE_MAXNAMELEN];
87033 struct xt_table *t;
87034 int ret;
87035
87036- if (*len != sizeof(struct ipt_getinfo)) {
87037- duprintf("length %u != %zu\n", *len,
87038+ if (len != sizeof(struct ipt_getinfo)) {
87039+ duprintf("length %u != %zu\n", len,
87040 sizeof(struct ipt_getinfo));
87041 return -EINVAL;
87042 }
87043@@ -1112,7 +1112,7 @@ static int get_info(struct net *net, void __user *user,
87044 info.size = private->size;
87045 strcpy(info.name, name);
87046
87047- if (copy_to_user(user, &info, *len) != 0)
87048+ if (copy_to_user(user, &info, len) != 0)
87049 ret = -EFAULT;
87050 else
87051 ret = 0;
87052@@ -1966,7 +1966,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
87053
87054 switch (cmd) {
87055 case IPT_SO_GET_INFO:
87056- ret = get_info(sock_net(sk), user, len, 1);
87057+ ret = get_info(sock_net(sk), user, *len, 1);
87058 break;
87059 case IPT_SO_GET_ENTRIES:
87060 ret = compat_get_entries(sock_net(sk), user, len);
87061@@ -2013,7 +2013,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
87062
87063 switch (cmd) {
87064 case IPT_SO_GET_INFO:
87065- ret = get_info(sock_net(sk), user, len, 0);
87066+ ret = get_info(sock_net(sk), user, *len, 0);
87067 break;
87068
87069 case IPT_SO_GET_ENTRIES:
87070diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
87071index dc454cc..5bb917f 100644
87072--- a/net/ipv4/ping.c
87073+++ b/net/ipv4/ping.c
87074@@ -844,7 +844,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
87075 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
87076 0, sock_i_ino(sp),
87077 atomic_read(&sp->sk_refcnt), sp,
87078- atomic_read(&sp->sk_drops), len);
87079+ atomic_read_unchecked(&sp->sk_drops), len);
87080 }
87081
87082 static int ping_seq_show(struct seq_file *seq, void *v)
87083diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
87084index 6f08991..55867ad 100644
87085--- a/net/ipv4/raw.c
87086+++ b/net/ipv4/raw.c
87087@@ -311,7 +311,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
87088 int raw_rcv(struct sock *sk, struct sk_buff *skb)
87089 {
87090 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
87091- atomic_inc(&sk->sk_drops);
87092+ atomic_inc_unchecked(&sk->sk_drops);
87093 kfree_skb(skb);
87094 return NET_RX_DROP;
87095 }
87096@@ -747,16 +747,20 @@ static int raw_init(struct sock *sk)
87097
87098 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
87099 {
87100+ struct icmp_filter filter;
87101+
87102 if (optlen > sizeof(struct icmp_filter))
87103 optlen = sizeof(struct icmp_filter);
87104- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
87105+ if (copy_from_user(&filter, optval, optlen))
87106 return -EFAULT;
87107+ raw_sk(sk)->filter = filter;
87108 return 0;
87109 }
87110
87111 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
87112 {
87113 int len, ret = -EFAULT;
87114+ struct icmp_filter filter;
87115
87116 if (get_user(len, optlen))
87117 goto out;
87118@@ -766,8 +770,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
87119 if (len > sizeof(struct icmp_filter))
87120 len = sizeof(struct icmp_filter);
87121 ret = -EFAULT;
87122- if (put_user(len, optlen) ||
87123- copy_to_user(optval, &raw_sk(sk)->filter, len))
87124+ filter = raw_sk(sk)->filter;
87125+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
87126 goto out;
87127 ret = 0;
87128 out: return ret;
87129@@ -998,7 +1002,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
87130 0, 0L, 0,
87131 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
87132 0, sock_i_ino(sp),
87133- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
87134+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
87135 }
87136
87137 static int raw_seq_show(struct seq_file *seq, void *v)
87138diff --git a/net/ipv4/route.c b/net/ipv4/route.c
87139index a0fcc47..32e2c89 100644
87140--- a/net/ipv4/route.c
87141+++ b/net/ipv4/route.c
87142@@ -2552,34 +2552,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
87143 .maxlen = sizeof(int),
87144 .mode = 0200,
87145 .proc_handler = ipv4_sysctl_rtcache_flush,
87146+ .extra1 = &init_net,
87147 },
87148 { },
87149 };
87150
87151 static __net_init int sysctl_route_net_init(struct net *net)
87152 {
87153- struct ctl_table *tbl;
87154+ ctl_table_no_const *tbl = NULL;
87155
87156- tbl = ipv4_route_flush_table;
87157 if (!net_eq(net, &init_net)) {
87158- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
87159+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
87160 if (tbl == NULL)
87161 goto err_dup;
87162
87163 /* Don't export sysctls to unprivileged users */
87164 if (net->user_ns != &init_user_ns)
87165 tbl[0].procname = NULL;
87166- }
87167- tbl[0].extra1 = net;
87168+ tbl[0].extra1 = net;
87169+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
87170+ } else
87171+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
87172
87173- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
87174 if (net->ipv4.route_hdr == NULL)
87175 goto err_reg;
87176 return 0;
87177
87178 err_reg:
87179- if (tbl != ipv4_route_flush_table)
87180- kfree(tbl);
87181+ kfree(tbl);
87182 err_dup:
87183 return -ENOMEM;
87184 }
87185@@ -2602,7 +2602,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
87186
87187 static __net_init int rt_genid_init(struct net *net)
87188 {
87189- atomic_set(&net->rt_genid, 0);
87190+ atomic_set_unchecked(&net->rt_genid, 0);
87191 get_random_bytes(&net->ipv4.dev_addr_genid,
87192 sizeof(net->ipv4.dev_addr_genid));
87193 return 0;
87194diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
87195index d84400b..62e066e 100644
87196--- a/net/ipv4/sysctl_net_ipv4.c
87197+++ b/net/ipv4/sysctl_net_ipv4.c
87198@@ -54,7 +54,7 @@ static int ipv4_local_port_range(ctl_table *table, int write,
87199 {
87200 int ret;
87201 int range[2];
87202- ctl_table tmp = {
87203+ ctl_table_no_const tmp = {
87204 .data = &range,
87205 .maxlen = sizeof(range),
87206 .mode = table->mode,
87207@@ -107,7 +107,7 @@ static int ipv4_ping_group_range(ctl_table *table, int write,
87208 int ret;
87209 gid_t urange[2];
87210 kgid_t low, high;
87211- ctl_table tmp = {
87212+ ctl_table_no_const tmp = {
87213 .data = &urange,
87214 .maxlen = sizeof(urange),
87215 .mode = table->mode,
87216@@ -138,7 +138,7 @@ static int proc_tcp_congestion_control(ctl_table *ctl, int write,
87217 void __user *buffer, size_t *lenp, loff_t *ppos)
87218 {
87219 char val[TCP_CA_NAME_MAX];
87220- ctl_table tbl = {
87221+ ctl_table_no_const tbl = {
87222 .data = val,
87223 .maxlen = TCP_CA_NAME_MAX,
87224 };
87225@@ -157,7 +157,7 @@ static int proc_tcp_available_congestion_control(ctl_table *ctl,
87226 void __user *buffer, size_t *lenp,
87227 loff_t *ppos)
87228 {
87229- ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
87230+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
87231 int ret;
87232
87233 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
87234@@ -174,7 +174,7 @@ static int proc_allowed_congestion_control(ctl_table *ctl,
87235 void __user *buffer, size_t *lenp,
87236 loff_t *ppos)
87237 {
87238- ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
87239+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
87240 int ret;
87241
87242 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
87243@@ -200,15 +200,17 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
87244 struct mem_cgroup *memcg;
87245 #endif
87246
87247- ctl_table tmp = {
87248+ ctl_table_no_const tmp = {
87249 .data = &vec,
87250 .maxlen = sizeof(vec),
87251 .mode = ctl->mode,
87252 };
87253
87254 if (!write) {
87255- ctl->data = &net->ipv4.sysctl_tcp_mem;
87256- return proc_doulongvec_minmax(ctl, write, buffer, lenp, ppos);
87257+ ctl_table_no_const tcp_mem = *ctl;
87258+
87259+ tcp_mem.data = &net->ipv4.sysctl_tcp_mem;
87260+ return proc_doulongvec_minmax(&tcp_mem, write, buffer, lenp, ppos);
87261 }
87262
87263 ret = proc_doulongvec_minmax(&tmp, write, buffer, lenp, ppos);
87264@@ -235,7 +237,7 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
87265 int proc_tcp_fastopen_key(ctl_table *ctl, int write, void __user *buffer,
87266 size_t *lenp, loff_t *ppos)
87267 {
87268- ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
87269+ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
87270 struct tcp_fastopen_context *ctxt;
87271 int ret;
87272 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
87273@@ -476,7 +478,7 @@ static struct ctl_table ipv4_table[] = {
87274 },
87275 {
87276 .procname = "ip_local_reserved_ports",
87277- .data = NULL, /* initialized in sysctl_ipv4_init */
87278+ .data = sysctl_local_reserved_ports,
87279 .maxlen = 65536,
87280 .mode = 0644,
87281 .proc_handler = proc_do_large_bitmap,
87282@@ -860,11 +862,10 @@ static struct ctl_table ipv4_net_table[] = {
87283
87284 static __net_init int ipv4_sysctl_init_net(struct net *net)
87285 {
87286- struct ctl_table *table;
87287+ ctl_table_no_const *table = NULL;
87288
87289- table = ipv4_net_table;
87290 if (!net_eq(net, &init_net)) {
87291- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
87292+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
87293 if (table == NULL)
87294 goto err_alloc;
87295
87296@@ -897,15 +898,17 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
87297
87298 tcp_init_mem(net);
87299
87300- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
87301+ if (!net_eq(net, &init_net))
87302+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
87303+ else
87304+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
87305 if (net->ipv4.ipv4_hdr == NULL)
87306 goto err_reg;
87307
87308 return 0;
87309
87310 err_reg:
87311- if (!net_eq(net, &init_net))
87312- kfree(table);
87313+ kfree(table);
87314 err_alloc:
87315 return -ENOMEM;
87316 }
87317@@ -927,16 +930,6 @@ static __net_initdata struct pernet_operations ipv4_sysctl_ops = {
87318 static __init int sysctl_ipv4_init(void)
87319 {
87320 struct ctl_table_header *hdr;
87321- struct ctl_table *i;
87322-
87323- for (i = ipv4_table; i->procname; i++) {
87324- if (strcmp(i->procname, "ip_local_reserved_ports") == 0) {
87325- i->data = sysctl_local_reserved_ports;
87326- break;
87327- }
87328- }
87329- if (!i->procname)
87330- return -EINVAL;
87331
87332 hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table);
87333 if (hdr == NULL)
87334diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
87335index 9841a71..ef60409 100644
87336--- a/net/ipv4/tcp_input.c
87337+++ b/net/ipv4/tcp_input.c
87338@@ -4730,7 +4730,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
87339 * simplifies code)
87340 */
87341 static void
87342-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
87343+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
87344 struct sk_buff *head, struct sk_buff *tail,
87345 u32 start, u32 end)
87346 {
87347@@ -5847,6 +5847,7 @@ discard:
87348 tcp_paws_reject(&tp->rx_opt, 0))
87349 goto discard_and_undo;
87350
87351+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
87352 if (th->syn) {
87353 /* We see SYN without ACK. It is attempt of
87354 * simultaneous connect with crossed SYNs.
87355@@ -5897,6 +5898,7 @@ discard:
87356 goto discard;
87357 #endif
87358 }
87359+#endif
87360 /* "fifth, if neither of the SYN or RST bits is set then
87361 * drop the segment and return."
87362 */
87363@@ -5941,7 +5943,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
87364 goto discard;
87365
87366 if (th->syn) {
87367- if (th->fin)
87368+ if (th->fin || th->urg || th->psh)
87369 goto discard;
87370 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
87371 return 1;
87372diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
87373index d9130a9..00328ff 100644
87374--- a/net/ipv4/tcp_ipv4.c
87375+++ b/net/ipv4/tcp_ipv4.c
87376@@ -90,6 +90,10 @@ int sysctl_tcp_low_latency __read_mostly;
87377 EXPORT_SYMBOL(sysctl_tcp_low_latency);
87378
87379
87380+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87381+extern int grsec_enable_blackhole;
87382+#endif
87383+
87384 #ifdef CONFIG_TCP_MD5SIG
87385 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
87386 __be32 daddr, __be32 saddr, const struct tcphdr *th);
87387@@ -1895,6 +1899,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
87388 return 0;
87389
87390 reset:
87391+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87392+ if (!grsec_enable_blackhole)
87393+#endif
87394 tcp_v4_send_reset(rsk, skb);
87395 discard:
87396 kfree_skb(skb);
87397@@ -1994,12 +2001,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
87398 TCP_SKB_CB(skb)->sacked = 0;
87399
87400 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
87401- if (!sk)
87402+ if (!sk) {
87403+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87404+ ret = 1;
87405+#endif
87406 goto no_tcp_socket;
87407-
87408+ }
87409 process:
87410- if (sk->sk_state == TCP_TIME_WAIT)
87411+ if (sk->sk_state == TCP_TIME_WAIT) {
87412+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87413+ ret = 2;
87414+#endif
87415 goto do_time_wait;
87416+ }
87417
87418 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
87419 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
87420@@ -2050,6 +2064,10 @@ no_tcp_socket:
87421 bad_packet:
87422 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
87423 } else {
87424+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87425+ if (!grsec_enable_blackhole || (ret == 1 &&
87426+ (skb->dev->flags & IFF_LOOPBACK)))
87427+#endif
87428 tcp_v4_send_reset(NULL, skb);
87429 }
87430
87431diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
87432index f35f2df..ccb5ca6 100644
87433--- a/net/ipv4/tcp_minisocks.c
87434+++ b/net/ipv4/tcp_minisocks.c
87435@@ -27,6 +27,10 @@
87436 #include <net/inet_common.h>
87437 #include <net/xfrm.h>
87438
87439+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87440+extern int grsec_enable_blackhole;
87441+#endif
87442+
87443 int sysctl_tcp_syncookies __read_mostly = 1;
87444 EXPORT_SYMBOL(sysctl_tcp_syncookies);
87445
87446@@ -742,7 +746,10 @@ embryonic_reset:
87447 * avoid becoming vulnerable to outside attack aiming at
87448 * resetting legit local connections.
87449 */
87450- req->rsk_ops->send_reset(sk, skb);
87451+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87452+ if (!grsec_enable_blackhole)
87453+#endif
87454+ req->rsk_ops->send_reset(sk, skb);
87455 } else if (fastopen) { /* received a valid RST pkt */
87456 reqsk_fastopen_remove(sk, req, true);
87457 tcp_reset(sk);
87458diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
87459index 4526fe6..1a34e43 100644
87460--- a/net/ipv4/tcp_probe.c
87461+++ b/net/ipv4/tcp_probe.c
87462@@ -204,7 +204,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
87463 if (cnt + width >= len)
87464 break;
87465
87466- if (copy_to_user(buf + cnt, tbuf, width))
87467+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
87468 return -EFAULT;
87469 cnt += width;
87470 }
87471diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
87472index b78aac3..e18230b 100644
87473--- a/net/ipv4/tcp_timer.c
87474+++ b/net/ipv4/tcp_timer.c
87475@@ -22,6 +22,10 @@
87476 #include <linux/gfp.h>
87477 #include <net/tcp.h>
87478
87479+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87480+extern int grsec_lastack_retries;
87481+#endif
87482+
87483 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
87484 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
87485 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
87486@@ -185,6 +189,13 @@ static int tcp_write_timeout(struct sock *sk)
87487 }
87488 }
87489
87490+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87491+ if ((sk->sk_state == TCP_LAST_ACK) &&
87492+ (grsec_lastack_retries > 0) &&
87493+ (grsec_lastack_retries < retry_until))
87494+ retry_until = grsec_lastack_retries;
87495+#endif
87496+
87497 if (retransmits_timed_out(sk, retry_until,
87498 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
87499 /* Has it gone just too far? */
87500diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
87501index 1f4d405..3524677 100644
87502--- a/net/ipv4/udp.c
87503+++ b/net/ipv4/udp.c
87504@@ -87,6 +87,7 @@
87505 #include <linux/types.h>
87506 #include <linux/fcntl.h>
87507 #include <linux/module.h>
87508+#include <linux/security.h>
87509 #include <linux/socket.h>
87510 #include <linux/sockios.h>
87511 #include <linux/igmp.h>
87512@@ -111,6 +112,10 @@
87513 #include <trace/events/skb.h>
87514 #include "udp_impl.h"
87515
87516+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87517+extern int grsec_enable_blackhole;
87518+#endif
87519+
87520 struct udp_table udp_table __read_mostly;
87521 EXPORT_SYMBOL(udp_table);
87522
87523@@ -569,6 +574,9 @@ found:
87524 return s;
87525 }
87526
87527+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
87528+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
87529+
87530 /*
87531 * This routine is called by the ICMP module when it gets some
87532 * sort of error condition. If err < 0 then the socket should
87533@@ -864,9 +872,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
87534 dport = usin->sin_port;
87535 if (dport == 0)
87536 return -EINVAL;
87537+
87538+ err = gr_search_udp_sendmsg(sk, usin);
87539+ if (err)
87540+ return err;
87541 } else {
87542 if (sk->sk_state != TCP_ESTABLISHED)
87543 return -EDESTADDRREQ;
87544+
87545+ err = gr_search_udp_sendmsg(sk, NULL);
87546+ if (err)
87547+ return err;
87548+
87549 daddr = inet->inet_daddr;
87550 dport = inet->inet_dport;
87551 /* Open fast path for connected socket.
87552@@ -1108,7 +1125,7 @@ static unsigned int first_packet_length(struct sock *sk)
87553 udp_lib_checksum_complete(skb)) {
87554 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
87555 IS_UDPLITE(sk));
87556- atomic_inc(&sk->sk_drops);
87557+ atomic_inc_unchecked(&sk->sk_drops);
87558 __skb_unlink(skb, rcvq);
87559 __skb_queue_tail(&list_kill, skb);
87560 }
87561@@ -1194,6 +1211,10 @@ try_again:
87562 if (!skb)
87563 goto out;
87564
87565+ err = gr_search_udp_recvmsg(sk, skb);
87566+ if (err)
87567+ goto out_free;
87568+
87569 ulen = skb->len - sizeof(struct udphdr);
87570 copied = len;
87571 if (copied > ulen)
87572@@ -1227,7 +1248,7 @@ try_again:
87573 if (unlikely(err)) {
87574 trace_kfree_skb(skb, udp_recvmsg);
87575 if (!peeked) {
87576- atomic_inc(&sk->sk_drops);
87577+ atomic_inc_unchecked(&sk->sk_drops);
87578 UDP_INC_STATS_USER(sock_net(sk),
87579 UDP_MIB_INERRORS, is_udplite);
87580 }
87581@@ -1510,7 +1531,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
87582
87583 drop:
87584 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
87585- atomic_inc(&sk->sk_drops);
87586+ atomic_inc_unchecked(&sk->sk_drops);
87587 kfree_skb(skb);
87588 return -1;
87589 }
87590@@ -1529,7 +1550,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
87591 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
87592
87593 if (!skb1) {
87594- atomic_inc(&sk->sk_drops);
87595+ atomic_inc_unchecked(&sk->sk_drops);
87596 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
87597 IS_UDPLITE(sk));
87598 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
87599@@ -1698,6 +1719,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
87600 goto csum_error;
87601
87602 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
87603+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87604+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
87605+#endif
87606 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
87607
87608 /*
87609@@ -2120,7 +2144,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
87610 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
87611 0, sock_i_ino(sp),
87612 atomic_read(&sp->sk_refcnt), sp,
87613- atomic_read(&sp->sk_drops), len);
87614+ atomic_read_unchecked(&sp->sk_drops), len);
87615 }
87616
87617 int udp4_seq_show(struct seq_file *seq, void *v)
87618diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
87619index a36d17e..96d099f 100644
87620--- a/net/ipv6/addrconf.c
87621+++ b/net/ipv6/addrconf.c
87622@@ -2272,7 +2272,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
87623 p.iph.ihl = 5;
87624 p.iph.protocol = IPPROTO_IPV6;
87625 p.iph.ttl = 64;
87626- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
87627+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
87628
87629 if (ops->ndo_do_ioctl) {
87630 mm_segment_t oldfs = get_fs();
87631@@ -4388,7 +4388,7 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write,
87632 int *valp = ctl->data;
87633 int val = *valp;
87634 loff_t pos = *ppos;
87635- ctl_table lctl;
87636+ ctl_table_no_const lctl;
87637 int ret;
87638
87639 /*
87640@@ -4470,7 +4470,7 @@ int addrconf_sysctl_disable(ctl_table *ctl, int write,
87641 int *valp = ctl->data;
87642 int val = *valp;
87643 loff_t pos = *ppos;
87644- ctl_table lctl;
87645+ ctl_table_no_const lctl;
87646 int ret;
87647
87648 /*
87649diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
87650index fff5bdd..15194fb 100644
87651--- a/net/ipv6/icmp.c
87652+++ b/net/ipv6/icmp.c
87653@@ -973,7 +973,7 @@ ctl_table ipv6_icmp_table_template[] = {
87654
87655 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
87656 {
87657- struct ctl_table *table;
87658+ ctl_table_no_const *table;
87659
87660 table = kmemdup(ipv6_icmp_table_template,
87661 sizeof(ipv6_icmp_table_template),
87662diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
87663index 131dd09..f7ed64f 100644
87664--- a/net/ipv6/ip6_gre.c
87665+++ b/net/ipv6/ip6_gre.c
87666@@ -73,7 +73,7 @@ struct ip6gre_net {
87667 struct net_device *fb_tunnel_dev;
87668 };
87669
87670-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
87671+static struct rtnl_link_ops ip6gre_link_ops;
87672 static int ip6gre_tunnel_init(struct net_device *dev);
87673 static void ip6gre_tunnel_setup(struct net_device *dev);
87674 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
87675@@ -1337,7 +1337,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
87676 }
87677
87678
87679-static struct inet6_protocol ip6gre_protocol __read_mostly = {
87680+static struct inet6_protocol ip6gre_protocol = {
87681 .handler = ip6gre_rcv,
87682 .err_handler = ip6gre_err,
87683 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
87684@@ -1671,7 +1671,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
87685 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
87686 };
87687
87688-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
87689+static struct rtnl_link_ops ip6gre_link_ops = {
87690 .kind = "ip6gre",
87691 .maxtype = IFLA_GRE_MAX,
87692 .policy = ip6gre_policy,
87693@@ -1684,7 +1684,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
87694 .fill_info = ip6gre_fill_info,
87695 };
87696
87697-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
87698+static struct rtnl_link_ops ip6gre_tap_ops = {
87699 .kind = "ip6gretap",
87700 .maxtype = IFLA_GRE_MAX,
87701 .policy = ip6gre_policy,
87702diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
87703index a14f28b..b4b8956 100644
87704--- a/net/ipv6/ip6_tunnel.c
87705+++ b/net/ipv6/ip6_tunnel.c
87706@@ -87,7 +87,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
87707
87708 static int ip6_tnl_dev_init(struct net_device *dev);
87709 static void ip6_tnl_dev_setup(struct net_device *dev);
87710-static struct rtnl_link_ops ip6_link_ops __read_mostly;
87711+static struct rtnl_link_ops ip6_link_ops;
87712
87713 static int ip6_tnl_net_id __read_mostly;
87714 struct ip6_tnl_net {
87715@@ -1686,7 +1686,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
87716 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
87717 };
87718
87719-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
87720+static struct rtnl_link_ops ip6_link_ops = {
87721 .kind = "ip6tnl",
87722 .maxtype = IFLA_IPTUN_MAX,
87723 .policy = ip6_tnl_policy,
87724diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
87725index d1e2e8e..51c19ae 100644
87726--- a/net/ipv6/ipv6_sockglue.c
87727+++ b/net/ipv6/ipv6_sockglue.c
87728@@ -991,7 +991,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
87729 if (sk->sk_type != SOCK_STREAM)
87730 return -ENOPROTOOPT;
87731
87732- msg.msg_control = optval;
87733+ msg.msg_control = (void __force_kernel *)optval;
87734 msg.msg_controllen = len;
87735 msg.msg_flags = flags;
87736
87737diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
87738index 125a90d..2a11f36 100644
87739--- a/net/ipv6/netfilter/ip6_tables.c
87740+++ b/net/ipv6/netfilter/ip6_tables.c
87741@@ -1076,14 +1076,14 @@ static int compat_table_info(const struct xt_table_info *info,
87742 #endif
87743
87744 static int get_info(struct net *net, void __user *user,
87745- const int *len, int compat)
87746+ int len, int compat)
87747 {
87748 char name[XT_TABLE_MAXNAMELEN];
87749 struct xt_table *t;
87750 int ret;
87751
87752- if (*len != sizeof(struct ip6t_getinfo)) {
87753- duprintf("length %u != %zu\n", *len,
87754+ if (len != sizeof(struct ip6t_getinfo)) {
87755+ duprintf("length %u != %zu\n", len,
87756 sizeof(struct ip6t_getinfo));
87757 return -EINVAL;
87758 }
87759@@ -1120,7 +1120,7 @@ static int get_info(struct net *net, void __user *user,
87760 info.size = private->size;
87761 strcpy(info.name, name);
87762
87763- if (copy_to_user(user, &info, *len) != 0)
87764+ if (copy_to_user(user, &info, len) != 0)
87765 ret = -EFAULT;
87766 else
87767 ret = 0;
87768@@ -1974,7 +1974,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
87769
87770 switch (cmd) {
87771 case IP6T_SO_GET_INFO:
87772- ret = get_info(sock_net(sk), user, len, 1);
87773+ ret = get_info(sock_net(sk), user, *len, 1);
87774 break;
87775 case IP6T_SO_GET_ENTRIES:
87776 ret = compat_get_entries(sock_net(sk), user, len);
87777@@ -2021,7 +2021,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
87778
87779 switch (cmd) {
87780 case IP6T_SO_GET_INFO:
87781- ret = get_info(sock_net(sk), user, len, 0);
87782+ ret = get_info(sock_net(sk), user, *len, 0);
87783 break;
87784
87785 case IP6T_SO_GET_ENTRIES:
87786diff --git a/net/ipv6/netfilter/ip6t_NPT.c b/net/ipv6/netfilter/ip6t_NPT.c
87787index 83acc14..0ea43c7 100644
87788--- a/net/ipv6/netfilter/ip6t_NPT.c
87789+++ b/net/ipv6/netfilter/ip6t_NPT.c
87790@@ -57,7 +57,7 @@ static bool ip6t_npt_map_pfx(const struct ip6t_npt_tginfo *npt,
87791 if (pfx_len - i >= 32)
87792 mask = 0;
87793 else
87794- mask = htonl(~((1 << (pfx_len - i)) - 1));
87795+ mask = htonl((1 << (i - pfx_len + 32)) - 1);
87796
87797 idx = i / 32;
87798 addr->s6_addr32[idx] &= mask;
87799diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
87800index 2f3a018..8bca195 100644
87801--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
87802+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
87803@@ -89,12 +89,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
87804
87805 static int nf_ct_frag6_sysctl_register(struct net *net)
87806 {
87807- struct ctl_table *table;
87808+ ctl_table_no_const *table = NULL;
87809 struct ctl_table_header *hdr;
87810
87811- table = nf_ct_frag6_sysctl_table;
87812 if (!net_eq(net, &init_net)) {
87813- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
87814+ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
87815 GFP_KERNEL);
87816 if (table == NULL)
87817 goto err_alloc;
87818@@ -102,9 +101,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
87819 table[0].data = &net->ipv6.frags.high_thresh;
87820 table[1].data = &net->ipv6.frags.low_thresh;
87821 table[2].data = &net->ipv6.frags.timeout;
87822- }
87823-
87824- hdr = register_net_sysctl(net, "net/netfilter", table);
87825+ hdr = register_net_sysctl(net, "net/netfilter", table);
87826+ } else
87827+ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
87828 if (hdr == NULL)
87829 goto err_reg;
87830
87831@@ -112,8 +111,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
87832 return 0;
87833
87834 err_reg:
87835- if (!net_eq(net, &init_net))
87836- kfree(table);
87837+ kfree(table);
87838 err_alloc:
87839 return -ENOMEM;
87840 }
87841diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
87842index 70fa814..d70c28c 100644
87843--- a/net/ipv6/raw.c
87844+++ b/net/ipv6/raw.c
87845@@ -379,7 +379,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
87846 {
87847 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
87848 skb_checksum_complete(skb)) {
87849- atomic_inc(&sk->sk_drops);
87850+ atomic_inc_unchecked(&sk->sk_drops);
87851 kfree_skb(skb);
87852 return NET_RX_DROP;
87853 }
87854@@ -407,7 +407,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
87855 struct raw6_sock *rp = raw6_sk(sk);
87856
87857 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
87858- atomic_inc(&sk->sk_drops);
87859+ atomic_inc_unchecked(&sk->sk_drops);
87860 kfree_skb(skb);
87861 return NET_RX_DROP;
87862 }
87863@@ -431,7 +431,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
87864
87865 if (inet->hdrincl) {
87866 if (skb_checksum_complete(skb)) {
87867- atomic_inc(&sk->sk_drops);
87868+ atomic_inc_unchecked(&sk->sk_drops);
87869 kfree_skb(skb);
87870 return NET_RX_DROP;
87871 }
87872@@ -604,7 +604,7 @@ out:
87873 return err;
87874 }
87875
87876-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
87877+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
87878 struct flowi6 *fl6, struct dst_entry **dstp,
87879 unsigned int flags)
87880 {
87881@@ -916,12 +916,15 @@ do_confirm:
87882 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
87883 char __user *optval, int optlen)
87884 {
87885+ struct icmp6_filter filter;
87886+
87887 switch (optname) {
87888 case ICMPV6_FILTER:
87889 if (optlen > sizeof(struct icmp6_filter))
87890 optlen = sizeof(struct icmp6_filter);
87891- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
87892+ if (copy_from_user(&filter, optval, optlen))
87893 return -EFAULT;
87894+ raw6_sk(sk)->filter = filter;
87895 return 0;
87896 default:
87897 return -ENOPROTOOPT;
87898@@ -934,6 +937,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
87899 char __user *optval, int __user *optlen)
87900 {
87901 int len;
87902+ struct icmp6_filter filter;
87903
87904 switch (optname) {
87905 case ICMPV6_FILTER:
87906@@ -945,7 +949,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
87907 len = sizeof(struct icmp6_filter);
87908 if (put_user(len, optlen))
87909 return -EFAULT;
87910- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
87911+ filter = raw6_sk(sk)->filter;
87912+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
87913 return -EFAULT;
87914 return 0;
87915 default:
87916@@ -1253,7 +1258,7 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
87917 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
87918 0,
87919 sock_i_ino(sp),
87920- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
87921+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
87922 }
87923
87924 static int raw6_seq_show(struct seq_file *seq, void *v)
87925diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
87926index d9ba8a2..f3f9e14 100644
87927--- a/net/ipv6/reassembly.c
87928+++ b/net/ipv6/reassembly.c
87929@@ -608,12 +608,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
87930
87931 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
87932 {
87933- struct ctl_table *table;
87934+ ctl_table_no_const *table = NULL;
87935 struct ctl_table_header *hdr;
87936
87937- table = ip6_frags_ns_ctl_table;
87938 if (!net_eq(net, &init_net)) {
87939- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
87940+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
87941 if (table == NULL)
87942 goto err_alloc;
87943
87944@@ -624,9 +623,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
87945 /* Don't export sysctls to unprivileged users */
87946 if (net->user_ns != &init_user_ns)
87947 table[0].procname = NULL;
87948- }
87949+ hdr = register_net_sysctl(net, "net/ipv6", table);
87950+ } else
87951+ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
87952
87953- hdr = register_net_sysctl(net, "net/ipv6", table);
87954 if (hdr == NULL)
87955 goto err_reg;
87956
87957@@ -634,8 +634,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
87958 return 0;
87959
87960 err_reg:
87961- if (!net_eq(net, &init_net))
87962- kfree(table);
87963+ kfree(table);
87964 err_alloc:
87965 return -ENOMEM;
87966 }
87967diff --git a/net/ipv6/route.c b/net/ipv6/route.c
87968index 5845613..3af8fc7 100644
87969--- a/net/ipv6/route.c
87970+++ b/net/ipv6/route.c
87971@@ -2966,7 +2966,7 @@ ctl_table ipv6_route_table_template[] = {
87972
87973 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
87974 {
87975- struct ctl_table *table;
87976+ ctl_table_no_const *table;
87977
87978 table = kmemdup(ipv6_route_table_template,
87979 sizeof(ipv6_route_table_template),
87980diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
87981index cfba99b..20ca511 100644
87982--- a/net/ipv6/sit.c
87983+++ b/net/ipv6/sit.c
87984@@ -72,7 +72,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
87985 static int ipip6_tunnel_init(struct net_device *dev);
87986 static void ipip6_tunnel_setup(struct net_device *dev);
87987 static void ipip6_dev_free(struct net_device *dev);
87988-static struct rtnl_link_ops sit_link_ops __read_mostly;
87989+static struct rtnl_link_ops sit_link_ops;
87990
87991 static int sit_net_id __read_mostly;
87992 struct sit_net {
87993@@ -1463,7 +1463,7 @@ static const struct nla_policy ipip6_policy[IFLA_IPTUN_MAX + 1] = {
87994 #endif
87995 };
87996
87997-static struct rtnl_link_ops sit_link_ops __read_mostly = {
87998+static struct rtnl_link_ops sit_link_ops = {
87999 .kind = "sit",
88000 .maxtype = IFLA_IPTUN_MAX,
88001 .policy = ipip6_policy,
88002diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
88003index e85c48b..b8268d3 100644
88004--- a/net/ipv6/sysctl_net_ipv6.c
88005+++ b/net/ipv6/sysctl_net_ipv6.c
88006@@ -40,7 +40,7 @@ static ctl_table ipv6_rotable[] = {
88007
88008 static int __net_init ipv6_sysctl_net_init(struct net *net)
88009 {
88010- struct ctl_table *ipv6_table;
88011+ ctl_table_no_const *ipv6_table;
88012 struct ctl_table *ipv6_route_table;
88013 struct ctl_table *ipv6_icmp_table;
88014 int err;
88015diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
88016index 8d19346..e47216f 100644
88017--- a/net/ipv6/tcp_ipv6.c
88018+++ b/net/ipv6/tcp_ipv6.c
88019@@ -103,6 +103,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
88020 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
88021 }
88022
88023+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88024+extern int grsec_enable_blackhole;
88025+#endif
88026+
88027 static void tcp_v6_hash(struct sock *sk)
88028 {
88029 if (sk->sk_state != TCP_CLOSE) {
88030@@ -1440,6 +1444,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
88031 return 0;
88032
88033 reset:
88034+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88035+ if (!grsec_enable_blackhole)
88036+#endif
88037 tcp_v6_send_reset(sk, skb);
88038 discard:
88039 if (opt_skb)
88040@@ -1521,12 +1528,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
88041 TCP_SKB_CB(skb)->sacked = 0;
88042
88043 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
88044- if (!sk)
88045+ if (!sk) {
88046+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88047+ ret = 1;
88048+#endif
88049 goto no_tcp_socket;
88050+ }
88051
88052 process:
88053- if (sk->sk_state == TCP_TIME_WAIT)
88054+ if (sk->sk_state == TCP_TIME_WAIT) {
88055+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88056+ ret = 2;
88057+#endif
88058 goto do_time_wait;
88059+ }
88060
88061 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
88062 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
88063@@ -1575,6 +1590,10 @@ no_tcp_socket:
88064 bad_packet:
88065 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
88066 } else {
88067+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88068+ if (!grsec_enable_blackhole || (ret == 1 &&
88069+ (skb->dev->flags & IFF_LOOPBACK)))
88070+#endif
88071 tcp_v6_send_reset(NULL, skb);
88072 }
88073
88074diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
88075index fb08329..2d6919e 100644
88076--- a/net/ipv6/udp.c
88077+++ b/net/ipv6/udp.c
88078@@ -51,6 +51,10 @@
88079 #include <trace/events/skb.h>
88080 #include "udp_impl.h"
88081
88082+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88083+extern int grsec_enable_blackhole;
88084+#endif
88085+
88086 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
88087 {
88088 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
88089@@ -395,7 +399,7 @@ try_again:
88090 if (unlikely(err)) {
88091 trace_kfree_skb(skb, udpv6_recvmsg);
88092 if (!peeked) {
88093- atomic_inc(&sk->sk_drops);
88094+ atomic_inc_unchecked(&sk->sk_drops);
88095 if (is_udp4)
88096 UDP_INC_STATS_USER(sock_net(sk),
88097 UDP_MIB_INERRORS,
88098@@ -633,7 +637,7 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
88099 return rc;
88100 drop:
88101 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
88102- atomic_inc(&sk->sk_drops);
88103+ atomic_inc_unchecked(&sk->sk_drops);
88104 kfree_skb(skb);
88105 return -1;
88106 }
88107@@ -691,7 +695,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
88108 if (likely(skb1 == NULL))
88109 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
88110 if (!skb1) {
88111- atomic_inc(&sk->sk_drops);
88112+ atomic_inc_unchecked(&sk->sk_drops);
88113 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
88114 IS_UDPLITE(sk));
88115 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
88116@@ -862,6 +866,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
88117 goto discard;
88118
88119 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
88120+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88121+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
88122+#endif
88123 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
88124
88125 kfree_skb(skb);
88126@@ -1379,7 +1386,7 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
88127 0,
88128 sock_i_ino(sp),
88129 atomic_read(&sp->sk_refcnt), sp,
88130- atomic_read(&sp->sk_drops));
88131+ atomic_read_unchecked(&sp->sk_drops));
88132 }
88133
88134 int udp6_seq_show(struct seq_file *seq, void *v)
88135diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
88136index a68c88c..d55b0c5 100644
88137--- a/net/irda/ircomm/ircomm_tty.c
88138+++ b/net/irda/ircomm/ircomm_tty.c
88139@@ -312,12 +312,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
88140 add_wait_queue(&port->open_wait, &wait);
88141
88142 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
88143- __FILE__, __LINE__, tty->driver->name, port->count);
88144+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
88145
88146 spin_lock_irqsave(&port->lock, flags);
88147 if (!tty_hung_up_p(filp)) {
88148 extra_count = 1;
88149- port->count--;
88150+ atomic_dec(&port->count);
88151 }
88152 spin_unlock_irqrestore(&port->lock, flags);
88153 port->blocked_open++;
88154@@ -353,7 +353,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
88155 }
88156
88157 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
88158- __FILE__, __LINE__, tty->driver->name, port->count);
88159+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
88160
88161 schedule();
88162 }
88163@@ -364,13 +364,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
88164 if (extra_count) {
88165 /* ++ is not atomic, so this should be protected - Jean II */
88166 spin_lock_irqsave(&port->lock, flags);
88167- port->count++;
88168+ atomic_inc(&port->count);
88169 spin_unlock_irqrestore(&port->lock, flags);
88170 }
88171 port->blocked_open--;
88172
88173 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
88174- __FILE__, __LINE__, tty->driver->name, port->count);
88175+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
88176
88177 if (!retval)
88178 port->flags |= ASYNC_NORMAL_ACTIVE;
88179@@ -444,12 +444,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
88180
88181 /* ++ is not atomic, so this should be protected - Jean II */
88182 spin_lock_irqsave(&self->port.lock, flags);
88183- self->port.count++;
88184+ atomic_inc(&self->port.count);
88185 spin_unlock_irqrestore(&self->port.lock, flags);
88186 tty_port_tty_set(&self->port, tty);
88187
88188 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
88189- self->line, self->port.count);
88190+ self->line, atomic_read(&self->port.count));
88191
88192 /* Not really used by us, but lets do it anyway */
88193 tty->low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
88194@@ -986,7 +986,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
88195 tty_kref_put(port->tty);
88196 }
88197 port->tty = NULL;
88198- port->count = 0;
88199+ atomic_set(&port->count, 0);
88200 spin_unlock_irqrestore(&port->lock, flags);
88201
88202 wake_up_interruptible(&port->open_wait);
88203@@ -1343,7 +1343,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
88204 seq_putc(m, '\n');
88205
88206 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
88207- seq_printf(m, "Open count: %d\n", self->port.count);
88208+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
88209 seq_printf(m, "Max data size: %d\n", self->max_data_size);
88210 seq_printf(m, "Max header size: %d\n", self->max_header_size);
88211
88212diff --git a/net/irda/iriap.c b/net/irda/iriap.c
88213index e71e85b..29340a9 100644
88214--- a/net/irda/iriap.c
88215+++ b/net/irda/iriap.c
88216@@ -495,8 +495,11 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self,
88217 /* case CS_ISO_8859_9: */
88218 /* case CS_UNICODE: */
88219 default:
88220- IRDA_DEBUG(0, "%s(), charset %s, not supported\n",
88221- __func__, ias_charset_types[charset]);
88222+ IRDA_DEBUG(0, "%s(), charset [%d] %s, not supported\n",
88223+ __func__, charset,
88224+ charset < ARRAY_SIZE(ias_charset_types) ?
88225+ ias_charset_types[charset] :
88226+ "(unknown)");
88227
88228 /* Aborting, close connection! */
88229 iriap_disconnect_request(self);
88230diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
88231index cd6f7a9..e63fe89 100644
88232--- a/net/iucv/af_iucv.c
88233+++ b/net/iucv/af_iucv.c
88234@@ -782,10 +782,10 @@ static int iucv_sock_autobind(struct sock *sk)
88235
88236 write_lock_bh(&iucv_sk_list.lock);
88237
88238- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
88239+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
88240 while (__iucv_get_sock_by_name(name)) {
88241 sprintf(name, "%08x",
88242- atomic_inc_return(&iucv_sk_list.autobind_name));
88243+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
88244 }
88245
88246 write_unlock_bh(&iucv_sk_list.lock);
88247diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
88248index df08250..02021fe 100644
88249--- a/net/iucv/iucv.c
88250+++ b/net/iucv/iucv.c
88251@@ -690,7 +690,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
88252 return NOTIFY_OK;
88253 }
88254
88255-static struct notifier_block __refdata iucv_cpu_notifier = {
88256+static struct notifier_block iucv_cpu_notifier = {
88257 .notifier_call = iucv_cpu_notify,
88258 };
88259
88260diff --git a/net/key/af_key.c b/net/key/af_key.c
88261index 5b426a6..970032b 100644
88262--- a/net/key/af_key.c
88263+++ b/net/key/af_key.c
88264@@ -3019,10 +3019,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
88265 static u32 get_acqseq(void)
88266 {
88267 u32 res;
88268- static atomic_t acqseq;
88269+ static atomic_unchecked_t acqseq;
88270
88271 do {
88272- res = atomic_inc_return(&acqseq);
88273+ res = atomic_inc_return_unchecked(&acqseq);
88274 } while (!res);
88275 return res;
88276 }
88277diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
88278index 0479c64..9e72ff4 100644
88279--- a/net/mac80211/cfg.c
88280+++ b/net/mac80211/cfg.c
88281@@ -790,7 +790,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
88282 ret = ieee80211_vif_use_channel(sdata, chandef,
88283 IEEE80211_CHANCTX_EXCLUSIVE);
88284 }
88285- } else if (local->open_count == local->monitors) {
88286+ } else if (local_read(&local->open_count) == local->monitors) {
88287 local->_oper_channel = chandef->chan;
88288 local->_oper_channel_type = cfg80211_get_chandef_type(chandef);
88289 ieee80211_hw_config(local, 0);
88290@@ -2499,7 +2499,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local,
88291 list_del(&dep->list);
88292 mutex_unlock(&local->mtx);
88293
88294- ieee80211_roc_notify_destroy(dep);
88295+ ieee80211_roc_notify_destroy(dep, true);
88296 return 0;
88297 }
88298
88299@@ -2539,7 +2539,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local,
88300 ieee80211_start_next_roc(local);
88301 mutex_unlock(&local->mtx);
88302
88303- ieee80211_roc_notify_destroy(found);
88304+ ieee80211_roc_notify_destroy(found, true);
88305 } else {
88306 /* work may be pending so use it all the time */
88307 found->abort = true;
88308@@ -2549,6 +2549,8 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local,
88309
88310 /* work will clean up etc */
88311 flush_delayed_work(&found->work);
88312+ WARN_ON(!found->to_be_freed);
88313+ kfree(found);
88314 }
88315
88316 return 0;
88317@@ -2716,7 +2718,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
88318 else
88319 local->probe_req_reg--;
88320
88321- if (!local->open_count)
88322+ if (!local_read(&local->open_count))
88323 break;
88324
88325 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
88326diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
88327index 2ed065c..bec0c2b 100644
88328--- a/net/mac80211/ieee80211_i.h
88329+++ b/net/mac80211/ieee80211_i.h
88330@@ -28,6 +28,7 @@
88331 #include <net/ieee80211_radiotap.h>
88332 #include <net/cfg80211.h>
88333 #include <net/mac80211.h>
88334+#include <asm/local.h>
88335 #include "key.h"
88336 #include "sta_info.h"
88337 #include "debug.h"
88338@@ -346,6 +347,7 @@ struct ieee80211_roc_work {
88339 struct ieee80211_channel *chan;
88340
88341 bool started, abort, hw_begun, notified;
88342+ bool to_be_freed;
88343
88344 unsigned long hw_start_time;
88345
88346@@ -909,7 +911,7 @@ struct ieee80211_local {
88347 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
88348 spinlock_t queue_stop_reason_lock;
88349
88350- int open_count;
88351+ local_t open_count;
88352 int monitors, cooked_mntrs;
88353 /* number of interfaces with corresponding FIF_ flags */
88354 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
88355@@ -1363,7 +1365,7 @@ void ieee80211_offchannel_return(struct ieee80211_local *local);
88356 void ieee80211_roc_setup(struct ieee80211_local *local);
88357 void ieee80211_start_next_roc(struct ieee80211_local *local);
88358 void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata);
88359-void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc);
88360+void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc, bool free);
88361 void ieee80211_sw_roc_work(struct work_struct *work);
88362 void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc);
88363
88364diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
88365index 8be854e..ad72a69 100644
88366--- a/net/mac80211/iface.c
88367+++ b/net/mac80211/iface.c
88368@@ -546,7 +546,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
88369 break;
88370 }
88371
88372- if (local->open_count == 0) {
88373+ if (local_read(&local->open_count) == 0) {
88374 res = drv_start(local);
88375 if (res)
88376 goto err_del_bss;
88377@@ -591,7 +591,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
88378 break;
88379 }
88380
88381- if (local->monitors == 0 && local->open_count == 0) {
88382+ if (local->monitors == 0 && local_read(&local->open_count) == 0) {
88383 res = ieee80211_add_virtual_monitor(local);
88384 if (res)
88385 goto err_stop;
88386@@ -699,7 +699,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
88387 mutex_unlock(&local->mtx);
88388
88389 if (coming_up)
88390- local->open_count++;
88391+ local_inc(&local->open_count);
88392
88393 if (hw_reconf_flags)
88394 ieee80211_hw_config(local, hw_reconf_flags);
88395@@ -713,7 +713,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
88396 err_del_interface:
88397 drv_remove_interface(local, sdata);
88398 err_stop:
88399- if (!local->open_count)
88400+ if (!local_read(&local->open_count))
88401 drv_stop(local);
88402 err_del_bss:
88403 sdata->bss = NULL;
88404@@ -827,7 +827,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
88405 }
88406
88407 if (going_down)
88408- local->open_count--;
88409+ local_dec(&local->open_count);
88410
88411 switch (sdata->vif.type) {
88412 case NL80211_IFTYPE_AP_VLAN:
88413@@ -884,7 +884,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
88414
88415 ieee80211_recalc_ps(local, -1);
88416
88417- if (local->open_count == 0) {
88418+ if (local_read(&local->open_count) == 0) {
88419 if (local->ops->napi_poll)
88420 napi_disable(&local->napi);
88421 ieee80211_clear_tx_pending(local);
88422@@ -910,7 +910,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
88423 }
88424 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
88425
88426- if (local->monitors == local->open_count && local->monitors > 0)
88427+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
88428 ieee80211_add_virtual_monitor(local);
88429 }
88430
88431diff --git a/net/mac80211/main.c b/net/mac80211/main.c
88432index 1b087ff..bf600e9 100644
88433--- a/net/mac80211/main.c
88434+++ b/net/mac80211/main.c
88435@@ -181,7 +181,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
88436 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
88437 IEEE80211_CONF_CHANGE_POWER);
88438
88439- if (changed && local->open_count) {
88440+ if (changed && local_read(&local->open_count)) {
88441 ret = drv_config(local, changed);
88442 /*
88443 * Goal:
88444diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
88445index a3ad4c3..7acbdaa 100644
88446--- a/net/mac80211/offchannel.c
88447+++ b/net/mac80211/offchannel.c
88448@@ -299,10 +299,13 @@ void ieee80211_start_next_roc(struct ieee80211_local *local)
88449 }
88450 }
88451
88452-void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc)
88453+void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc, bool free)
88454 {
88455 struct ieee80211_roc_work *dep, *tmp;
88456
88457+ if (WARN_ON(roc->to_be_freed))
88458+ return;
88459+
88460 /* was never transmitted */
88461 if (roc->frame) {
88462 cfg80211_mgmt_tx_status(&roc->sdata->wdev,
88463@@ -318,9 +321,12 @@ void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc)
88464 GFP_KERNEL);
88465
88466 list_for_each_entry_safe(dep, tmp, &roc->dependents, list)
88467- ieee80211_roc_notify_destroy(dep);
88468+ ieee80211_roc_notify_destroy(dep, true);
88469
88470- kfree(roc);
88471+ if (free)
88472+ kfree(roc);
88473+ else
88474+ roc->to_be_freed = true;
88475 }
88476
88477 void ieee80211_sw_roc_work(struct work_struct *work)
88478@@ -333,6 +339,9 @@ void ieee80211_sw_roc_work(struct work_struct *work)
88479
88480 mutex_lock(&local->mtx);
88481
88482+ if (roc->to_be_freed)
88483+ goto out_unlock;
88484+
88485 if (roc->abort)
88486 goto finish;
88487
88488@@ -372,7 +381,7 @@ void ieee80211_sw_roc_work(struct work_struct *work)
88489 finish:
88490 list_del(&roc->list);
88491 started = roc->started;
88492- ieee80211_roc_notify_destroy(roc);
88493+ ieee80211_roc_notify_destroy(roc, !roc->abort);
88494
88495 if (started) {
88496 drv_flush(local, false);
88497@@ -412,7 +421,7 @@ static void ieee80211_hw_roc_done(struct work_struct *work)
88498
88499 list_del(&roc->list);
88500
88501- ieee80211_roc_notify_destroy(roc);
88502+ ieee80211_roc_notify_destroy(roc, true);
88503
88504 /* if there's another roc, start it now */
88505 ieee80211_start_next_roc(local);
88506@@ -462,12 +471,14 @@ void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata)
88507 list_for_each_entry_safe(roc, tmp, &tmp_list, list) {
88508 if (local->ops->remain_on_channel) {
88509 list_del(&roc->list);
88510- ieee80211_roc_notify_destroy(roc);
88511+ ieee80211_roc_notify_destroy(roc, true);
88512 } else {
88513 ieee80211_queue_delayed_work(&local->hw, &roc->work, 0);
88514
88515 /* work will clean up etc */
88516 flush_delayed_work(&roc->work);
88517+ WARN_ON(!roc->to_be_freed);
88518+ kfree(roc);
88519 }
88520 }
88521
88522diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
88523index 79a48f3..5e185c9 100644
88524--- a/net/mac80211/pm.c
88525+++ b/net/mac80211/pm.c
88526@@ -35,7 +35,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
88527 struct sta_info *sta;
88528 struct ieee80211_chanctx *ctx;
88529
88530- if (!local->open_count)
88531+ if (!local_read(&local->open_count))
88532 goto suspend;
88533
88534 ieee80211_scan_cancel(local);
88535@@ -73,7 +73,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
88536 cancel_work_sync(&local->dynamic_ps_enable_work);
88537 del_timer_sync(&local->dynamic_ps_timer);
88538
88539- local->wowlan = wowlan && local->open_count;
88540+ local->wowlan = wowlan && local_read(&local->open_count);
88541 if (local->wowlan) {
88542 int err = drv_suspend(local, wowlan);
88543 if (err < 0) {
88544@@ -187,7 +187,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
88545 mutex_unlock(&local->chanctx_mtx);
88546
88547 /* stop hardware - this must stop RX */
88548- if (local->open_count)
88549+ if (local_read(&local->open_count))
88550 ieee80211_stop_device(local);
88551
88552 suspend:
88553diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
88554index dd88381..eef4dd6 100644
88555--- a/net/mac80211/rate.c
88556+++ b/net/mac80211/rate.c
88557@@ -493,7 +493,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
88558
88559 ASSERT_RTNL();
88560
88561- if (local->open_count)
88562+ if (local_read(&local->open_count))
88563 return -EBUSY;
88564
88565 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
88566diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
88567index c97a065..ff61928 100644
88568--- a/net/mac80211/rc80211_pid_debugfs.c
88569+++ b/net/mac80211/rc80211_pid_debugfs.c
88570@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
88571
88572 spin_unlock_irqrestore(&events->lock, status);
88573
88574- if (copy_to_user(buf, pb, p))
88575+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
88576 return -EFAULT;
88577
88578 return p;
88579diff --git a/net/mac80211/util.c b/net/mac80211/util.c
88580index f11e8c5..08d0013 100644
88581--- a/net/mac80211/util.c
88582+++ b/net/mac80211/util.c
88583@@ -1380,7 +1380,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
88584 }
88585 #endif
88586 /* everything else happens only if HW was up & running */
88587- if (!local->open_count)
88588+ if (!local_read(&local->open_count))
88589 goto wake_up;
88590
88591 /*
88592diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
88593index 49e96df..63a51c3 100644
88594--- a/net/netfilter/Kconfig
88595+++ b/net/netfilter/Kconfig
88596@@ -936,6 +936,16 @@ config NETFILTER_XT_MATCH_ESP
88597
88598 To compile it as a module, choose M here. If unsure, say N.
88599
88600+config NETFILTER_XT_MATCH_GRADM
88601+ tristate '"gradm" match support'
88602+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
88603+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
88604+ ---help---
88605+ The gradm match allows to match on grsecurity RBAC being enabled.
88606+ It is useful when iptables rules are applied early on bootup to
88607+ prevent connections to the machine (except from a trusted host)
88608+ while the RBAC system is disabled.
88609+
88610 config NETFILTER_XT_MATCH_HASHLIMIT
88611 tristate '"hashlimit" match support'
88612 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
88613diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
88614index 3259697..54d5393 100644
88615--- a/net/netfilter/Makefile
88616+++ b/net/netfilter/Makefile
88617@@ -109,6 +109,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
88618 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
88619 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
88620 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
88621+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
88622 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
88623 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
88624 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
88625diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
88626index 6d6d8f2..a676749 100644
88627--- a/net/netfilter/ipset/ip_set_core.c
88628+++ b/net/netfilter/ipset/ip_set_core.c
88629@@ -1800,7 +1800,7 @@ done:
88630 return ret;
88631 }
88632
88633-static struct nf_sockopt_ops so_set __read_mostly = {
88634+static struct nf_sockopt_ops so_set = {
88635 .pf = PF_INET,
88636 .get_optmin = SO_IP_SET,
88637 .get_optmax = SO_IP_SET + 1,
88638diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
88639index 30e764a..c3b6a9d 100644
88640--- a/net/netfilter/ipvs/ip_vs_conn.c
88641+++ b/net/netfilter/ipvs/ip_vs_conn.c
88642@@ -554,7 +554,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
88643 /* Increase the refcnt counter of the dest */
88644 atomic_inc(&dest->refcnt);
88645
88646- conn_flags = atomic_read(&dest->conn_flags);
88647+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
88648 if (cp->protocol != IPPROTO_UDP)
88649 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
88650 flags = cp->flags;
88651@@ -899,7 +899,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
88652 atomic_set(&cp->refcnt, 1);
88653
88654 atomic_set(&cp->n_control, 0);
88655- atomic_set(&cp->in_pkts, 0);
88656+ atomic_set_unchecked(&cp->in_pkts, 0);
88657
88658 atomic_inc(&ipvs->conn_count);
88659 if (flags & IP_VS_CONN_F_NO_CPORT)
88660@@ -1180,7 +1180,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
88661
88662 /* Don't drop the entry if its number of incoming packets is not
88663 located in [0, 8] */
88664- i = atomic_read(&cp->in_pkts);
88665+ i = atomic_read_unchecked(&cp->in_pkts);
88666 if (i > 8 || i < 0) return 0;
88667
88668 if (!todrop_rate[i]) return 0;
88669diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
88670index 47edf5a..235b07d 100644
88671--- a/net/netfilter/ipvs/ip_vs_core.c
88672+++ b/net/netfilter/ipvs/ip_vs_core.c
88673@@ -559,7 +559,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
88674 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
88675 /* do not touch skb anymore */
88676
88677- atomic_inc(&cp->in_pkts);
88678+ atomic_inc_unchecked(&cp->in_pkts);
88679 ip_vs_conn_put(cp);
88680 return ret;
88681 }
88682@@ -1691,7 +1691,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
88683 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
88684 pkts = sysctl_sync_threshold(ipvs);
88685 else
88686- pkts = atomic_add_return(1, &cp->in_pkts);
88687+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
88688
88689 if (ipvs->sync_state & IP_VS_STATE_MASTER)
88690 ip_vs_sync_conn(net, cp, pkts);
88691diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
88692index ec664cb..7f34a77 100644
88693--- a/net/netfilter/ipvs/ip_vs_ctl.c
88694+++ b/net/netfilter/ipvs/ip_vs_ctl.c
88695@@ -787,7 +787,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
88696 ip_vs_rs_hash(ipvs, dest);
88697 write_unlock_bh(&ipvs->rs_lock);
88698 }
88699- atomic_set(&dest->conn_flags, conn_flags);
88700+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
88701
88702 /* bind the service */
88703 if (!dest->svc) {
88704@@ -1688,7 +1688,7 @@ proc_do_sync_ports(ctl_table *table, int write,
88705 * align with netns init in ip_vs_control_net_init()
88706 */
88707
88708-static struct ctl_table vs_vars[] = {
88709+static ctl_table_no_const vs_vars[] __read_only = {
88710 {
88711 .procname = "amemthresh",
88712 .maxlen = sizeof(int),
88713@@ -2081,7 +2081,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
88714 " %-7s %-6d %-10d %-10d\n",
88715 &dest->addr.in6,
88716 ntohs(dest->port),
88717- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
88718+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
88719 atomic_read(&dest->weight),
88720 atomic_read(&dest->activeconns),
88721 atomic_read(&dest->inactconns));
88722@@ -2092,7 +2092,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
88723 "%-7s %-6d %-10d %-10d\n",
88724 ntohl(dest->addr.ip),
88725 ntohs(dest->port),
88726- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
88727+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
88728 atomic_read(&dest->weight),
88729 atomic_read(&dest->activeconns),
88730 atomic_read(&dest->inactconns));
88731@@ -2562,7 +2562,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
88732
88733 entry.addr = dest->addr.ip;
88734 entry.port = dest->port;
88735- entry.conn_flags = atomic_read(&dest->conn_flags);
88736+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
88737 entry.weight = atomic_read(&dest->weight);
88738 entry.u_threshold = dest->u_threshold;
88739 entry.l_threshold = dest->l_threshold;
88740@@ -3098,7 +3098,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
88741 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
88742 nla_put_u16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
88743 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
88744- (atomic_read(&dest->conn_flags) &
88745+ (atomic_read_unchecked(&dest->conn_flags) &
88746 IP_VS_CONN_F_FWD_MASK)) ||
88747 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
88748 atomic_read(&dest->weight)) ||
88749@@ -3688,7 +3688,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
88750 {
88751 int idx;
88752 struct netns_ipvs *ipvs = net_ipvs(net);
88753- struct ctl_table *tbl;
88754+ ctl_table_no_const *tbl;
88755
88756 atomic_set(&ipvs->dropentry, 0);
88757 spin_lock_init(&ipvs->dropentry_lock);
88758diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
88759index fdd89b9..bd96aa9 100644
88760--- a/net/netfilter/ipvs/ip_vs_lblc.c
88761+++ b/net/netfilter/ipvs/ip_vs_lblc.c
88762@@ -115,7 +115,7 @@ struct ip_vs_lblc_table {
88763 * IPVS LBLC sysctl table
88764 */
88765 #ifdef CONFIG_SYSCTL
88766-static ctl_table vs_vars_table[] = {
88767+static ctl_table_no_const vs_vars_table[] __read_only = {
88768 {
88769 .procname = "lblc_expiration",
88770 .data = NULL,
88771diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
88772index c03b6a3..8ce3681 100644
88773--- a/net/netfilter/ipvs/ip_vs_lblcr.c
88774+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
88775@@ -288,7 +288,7 @@ struct ip_vs_lblcr_table {
88776 * IPVS LBLCR sysctl table
88777 */
88778
88779-static ctl_table vs_vars_table[] = {
88780+static ctl_table_no_const vs_vars_table[] __read_only = {
88781 {
88782 .procname = "lblcr_expiration",
88783 .data = NULL,
88784diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
88785index 44fd10c..2a163b3 100644
88786--- a/net/netfilter/ipvs/ip_vs_sync.c
88787+++ b/net/netfilter/ipvs/ip_vs_sync.c
88788@@ -596,7 +596,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
88789 cp = cp->control;
88790 if (cp) {
88791 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
88792- pkts = atomic_add_return(1, &cp->in_pkts);
88793+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
88794 else
88795 pkts = sysctl_sync_threshold(ipvs);
88796 ip_vs_sync_conn(net, cp->control, pkts);
88797@@ -758,7 +758,7 @@ control:
88798 if (!cp)
88799 return;
88800 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
88801- pkts = atomic_add_return(1, &cp->in_pkts);
88802+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
88803 else
88804 pkts = sysctl_sync_threshold(ipvs);
88805 goto sloop;
88806@@ -885,7 +885,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
88807
88808 if (opt)
88809 memcpy(&cp->in_seq, opt, sizeof(*opt));
88810- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
88811+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
88812 cp->state = state;
88813 cp->old_state = cp->state;
88814 /*
88815diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
88816index ee6b7a9..f9a89f6 100644
88817--- a/net/netfilter/ipvs/ip_vs_xmit.c
88818+++ b/net/netfilter/ipvs/ip_vs_xmit.c
88819@@ -1210,7 +1210,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
88820 else
88821 rc = NF_ACCEPT;
88822 /* do not touch skb anymore */
88823- atomic_inc(&cp->in_pkts);
88824+ atomic_inc_unchecked(&cp->in_pkts);
88825 goto out;
88826 }
88827
88828@@ -1332,7 +1332,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
88829 else
88830 rc = NF_ACCEPT;
88831 /* do not touch skb anymore */
88832- atomic_inc(&cp->in_pkts);
88833+ atomic_inc_unchecked(&cp->in_pkts);
88834 goto out;
88835 }
88836
88837diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
88838index 7df424e..a527b02 100644
88839--- a/net/netfilter/nf_conntrack_acct.c
88840+++ b/net/netfilter/nf_conntrack_acct.c
88841@@ -60,7 +60,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
88842 #ifdef CONFIG_SYSCTL
88843 static int nf_conntrack_acct_init_sysctl(struct net *net)
88844 {
88845- struct ctl_table *table;
88846+ ctl_table_no_const *table;
88847
88848 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
88849 GFP_KERNEL);
88850diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
88851index e4a0c4f..c263f28 100644
88852--- a/net/netfilter/nf_conntrack_core.c
88853+++ b/net/netfilter/nf_conntrack_core.c
88854@@ -1529,6 +1529,10 @@ err_extend:
88855 #define DYING_NULLS_VAL ((1<<30)+1)
88856 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
88857
88858+#ifdef CONFIG_GRKERNSEC_HIDESYM
88859+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
88860+#endif
88861+
88862 static int nf_conntrack_init_net(struct net *net)
88863 {
88864 int ret;
88865@@ -1543,7 +1547,11 @@ static int nf_conntrack_init_net(struct net *net)
88866 goto err_stat;
88867 }
88868
88869+#ifdef CONFIG_GRKERNSEC_HIDESYM
88870+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08lx", atomic_inc_return_unchecked(&conntrack_cache_id));
88871+#else
88872 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
88873+#endif
88874 if (!net->ct.slabname) {
88875 ret = -ENOMEM;
88876 goto err_slabname;
88877diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
88878index faa978f..1afb18f 100644
88879--- a/net/netfilter/nf_conntrack_ecache.c
88880+++ b/net/netfilter/nf_conntrack_ecache.c
88881@@ -186,7 +186,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
88882 #ifdef CONFIG_SYSCTL
88883 static int nf_conntrack_event_init_sysctl(struct net *net)
88884 {
88885- struct ctl_table *table;
88886+ ctl_table_no_const *table;
88887
88888 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
88889 GFP_KERNEL);
88890diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
88891index 884f2b3..d53b33a 100644
88892--- a/net/netfilter/nf_conntrack_helper.c
88893+++ b/net/netfilter/nf_conntrack_helper.c
88894@@ -55,7 +55,7 @@ static struct ctl_table helper_sysctl_table[] = {
88895
88896 static int nf_conntrack_helper_init_sysctl(struct net *net)
88897 {
88898- struct ctl_table *table;
88899+ ctl_table_no_const *table;
88900
88901 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
88902 GFP_KERNEL);
88903diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
88904index 51e928d..72a413a 100644
88905--- a/net/netfilter/nf_conntrack_proto.c
88906+++ b/net/netfilter/nf_conntrack_proto.c
88907@@ -51,7 +51,7 @@ nf_ct_register_sysctl(struct net *net,
88908
88909 static void
88910 nf_ct_unregister_sysctl(struct ctl_table_header **header,
88911- struct ctl_table **table,
88912+ ctl_table_no_const **table,
88913 unsigned int users)
88914 {
88915 if (users > 0)
88916diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
88917index e7185c6..4ad6c9c 100644
88918--- a/net/netfilter/nf_conntrack_standalone.c
88919+++ b/net/netfilter/nf_conntrack_standalone.c
88920@@ -470,7 +470,7 @@ static ctl_table nf_ct_netfilter_table[] = {
88921
88922 static int nf_conntrack_standalone_init_sysctl(struct net *net)
88923 {
88924- struct ctl_table *table;
88925+ ctl_table_no_const *table;
88926
88927 if (net_eq(net, &init_net)) {
88928 nf_ct_netfilter_header =
88929diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
88930index 7ea8026..bc9512d 100644
88931--- a/net/netfilter/nf_conntrack_timestamp.c
88932+++ b/net/netfilter/nf_conntrack_timestamp.c
88933@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
88934 #ifdef CONFIG_SYSCTL
88935 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
88936 {
88937- struct ctl_table *table;
88938+ ctl_table_no_const *table;
88939
88940 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
88941 GFP_KERNEL);
88942diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
88943index 9e31269..bc4c1b7 100644
88944--- a/net/netfilter/nf_log.c
88945+++ b/net/netfilter/nf_log.c
88946@@ -215,7 +215,7 @@ static const struct file_operations nflog_file_ops = {
88947
88948 #ifdef CONFIG_SYSCTL
88949 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
88950-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
88951+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
88952 static struct ctl_table_header *nf_log_dir_header;
88953
88954 static int nf_log_proc_dostring(ctl_table *table, int write,
88955@@ -246,14 +246,16 @@ static int nf_log_proc_dostring(ctl_table *table, int write,
88956 rcu_assign_pointer(nf_loggers[tindex], logger);
88957 mutex_unlock(&nf_log_mutex);
88958 } else {
88959+ ctl_table_no_const nf_log_table = *table;
88960+
88961 mutex_lock(&nf_log_mutex);
88962 logger = rcu_dereference_protected(nf_loggers[tindex],
88963 lockdep_is_held(&nf_log_mutex));
88964 if (!logger)
88965- table->data = "NONE";
88966+ nf_log_table.data = "NONE";
88967 else
88968- table->data = logger->name;
88969- r = proc_dostring(table, write, buffer, lenp, ppos);
88970+ nf_log_table.data = logger->name;
88971+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
88972 mutex_unlock(&nf_log_mutex);
88973 }
88974
88975diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
88976index f042ae5..30ea486 100644
88977--- a/net/netfilter/nf_sockopt.c
88978+++ b/net/netfilter/nf_sockopt.c
88979@@ -45,7 +45,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
88980 }
88981 }
88982
88983- list_add(&reg->list, &nf_sockopts);
88984+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
88985 out:
88986 mutex_unlock(&nf_sockopt_mutex);
88987 return ret;
88988@@ -55,7 +55,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
88989 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
88990 {
88991 mutex_lock(&nf_sockopt_mutex);
88992- list_del(&reg->list);
88993+ pax_list_del((struct list_head *)&reg->list);
88994 mutex_unlock(&nf_sockopt_mutex);
88995 }
88996 EXPORT_SYMBOL(nf_unregister_sockopt);
88997diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
88998index 589d686..dc3fd5d 100644
88999--- a/net/netfilter/nfnetlink_acct.c
89000+++ b/net/netfilter/nfnetlink_acct.c
89001@@ -49,6 +49,8 @@ nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb,
89002 return -EINVAL;
89003
89004 acct_name = nla_data(tb[NFACCT_NAME]);
89005+ if (strlen(acct_name) == 0)
89006+ return -EINVAL;
89007
89008 list_for_each_entry(nfacct, &nfnl_acct_list, head) {
89009 if (strncmp(nfacct->name, acct_name, NFACCT_NAME_MAX) != 0)
89010diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
89011index 92fd8ec..3f6ea4b 100644
89012--- a/net/netfilter/nfnetlink_log.c
89013+++ b/net/netfilter/nfnetlink_log.c
89014@@ -72,7 +72,7 @@ struct nfulnl_instance {
89015 };
89016
89017 static DEFINE_SPINLOCK(instances_lock);
89018-static atomic_t global_seq;
89019+static atomic_unchecked_t global_seq;
89020
89021 #define INSTANCE_BUCKETS 16
89022 static struct hlist_head instance_table[INSTANCE_BUCKETS];
89023@@ -537,7 +537,7 @@ __build_packet_message(struct nfulnl_instance *inst,
89024 /* global sequence number */
89025 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
89026 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
89027- htonl(atomic_inc_return(&global_seq))))
89028+ htonl(atomic_inc_return_unchecked(&global_seq))))
89029 goto nla_put_failure;
89030
89031 if (data_len) {
89032diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
89033index 3158d87..39006c9 100644
89034--- a/net/netfilter/nfnetlink_queue_core.c
89035+++ b/net/netfilter/nfnetlink_queue_core.c
89036@@ -1064,8 +1064,10 @@ static int __init nfnetlink_queue_init(void)
89037
89038 #ifdef CONFIG_PROC_FS
89039 if (!proc_create("nfnetlink_queue", 0440,
89040- proc_net_netfilter, &nfqnl_file_ops))
89041+ proc_net_netfilter, &nfqnl_file_ops)) {
89042+ status = -ENOMEM;
89043 goto cleanup_subsys;
89044+ }
89045 #endif
89046
89047 register_netdevice_notifier(&nfqnl_dev_notifier);
89048diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
89049new file mode 100644
89050index 0000000..c566332
89051--- /dev/null
89052+++ b/net/netfilter/xt_gradm.c
89053@@ -0,0 +1,51 @@
89054+/*
89055+ * gradm match for netfilter
89056