]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.9.1-3.8.6-201304061345.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9.1-3.8.6-201304061345.patch
CommitLineData
cff17952
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index b89a739..b47493f 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -14,6 +16,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -48,14 +51,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -69,6 +75,7 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52@@ -80,6 +87,7 @@ aic7*seq.h*
53 aicasm
54 aicdb.h*
55 altivec*.c
56+ashldi3.S
57 asm-offsets.h
58 asm_offsets.h
59 autoconf.h*
60@@ -92,19 +100,24 @@ bounds.h
61 bsetup
62 btfixupprep
63 build
64+builtin-policy.h
65 bvmlinux
66 bzImage*
67 capability_names.h
68 capflags.c
69 classlist.h*
70+clut_vga16.c
71+common-cmds.h
72 comp*.log
73 compile.h*
74 conf
75 config
76 config-*
77 config_data.h*
78+config.c
79 config.mak
80 config.mak.autogen
81+config.tmp
82 conmakehash
83 consolemap_deftbl.c*
84 cpustr.h
85@@ -115,9 +128,11 @@ devlist.h*
86 dnotify_test
87 docproc
88 dslm
89+dtc-lexer.lex.c
90 elf2ecoff
91 elfconfig.h*
92 evergreen_reg_safe.h
93+exception_policy.conf
94 fixdep
95 flask.h
96 fore200e_mkfirm
97@@ -125,12 +140,15 @@ fore200e_pca_fw.c*
98 gconf
99 gconf.glade.h
100 gen-devlist
101+gen-kdb_cmds.c
102 gen_crc32table
103 gen_init_cpio
104 generated
105 genheaders
106 genksyms
107 *_gray256.c
108+hash
109+hid-example
110 hpet_example
111 hugepage-mmap
112 hugepage-shm
113@@ -145,14 +163,14 @@ int32.c
114 int4.c
115 int8.c
116 kallsyms
117-kconfig
118+kern_constants.h
119 keywords.c
120 ksym.c*
121 ksym.h*
122 kxgettext
123 lex.c
124 lex.*.c
125-linux
126+lib1funcs.S
127 logo_*.c
128 logo_*_clut224.c
129 logo_*_mono.c
130@@ -162,14 +180,15 @@ mach-types.h
131 machtypes.h
132 map
133 map_hugetlb
134-media
135 mconf
136+mdp
137 miboot*
138 mk_elfconfig
139 mkboot
140 mkbugboot
141 mkcpustr
142 mkdep
143+mkpiggy
144 mkprep
145 mkregtable
146 mktables
147@@ -185,6 +204,8 @@ oui.c*
148 page-types
149 parse.c
150 parse.h
151+parse-events*
152+pasyms.h
153 patches*
154 pca200e.bin
155 pca200e_ecd.bin2
156@@ -194,6 +215,7 @@ perf-archive
157 piggyback
158 piggy.gzip
159 piggy.S
160+pmu-*
161 pnmtologo
162 ppc_defs.h*
163 pss_boot.h
164@@ -203,7 +225,10 @@ r200_reg_safe.h
165 r300_reg_safe.h
166 r420_reg_safe.h
167 r600_reg_safe.h
168+realmode.lds
169+realmode.relocs
170 recordmcount
171+regdb.c
172 relocs
173 rlim_names.h
174 rn50_reg_safe.h
175@@ -213,8 +238,12 @@ series
176 setup
177 setup.bin
178 setup.elf
179+signing_key*
180+size_overflow_hash.h
181 sImage
182+slabinfo
183 sm_tbl*
184+sortextable
185 split-include
186 syscalltab.h
187 tables.c
188@@ -224,6 +253,7 @@ tftpboot.img
189 timeconst.h
190 times.h*
191 trix_boot.h
192+user_constants.h
193 utsrelease.h*
194 vdso-syms.lds
195 vdso.lds
196@@ -235,13 +265,17 @@ vdso32.lds
197 vdso32.so.dbg
198 vdso64.lds
199 vdso64.so.dbg
200+vdsox32.lds
201+vdsox32-syms.lds
202 version.h*
203 vmImage
204 vmlinux
205 vmlinux-*
206 vmlinux.aout
207 vmlinux.bin.all
208+vmlinux.bin.bz2
209 vmlinux.lds
210+vmlinux.relocs
211 vmlinuz
212 voffset.h
213 vsyscall.lds
214@@ -249,9 +283,12 @@ vsyscall_32.lds
215 wanxlfw.inc
216 uImage
217 unifdef
218+utsrelease.h
219 wakeup.bin
220 wakeup.elf
221 wakeup.lds
222+x509*
223 zImage*
224 zconf.hash.c
225+zconf.lex.c
226 zoffset.h
227diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
228index 986614d..e8bfedc 100644
229--- a/Documentation/kernel-parameters.txt
230+++ b/Documentation/kernel-parameters.txt
231@@ -922,6 +922,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
232 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
233 Default: 1024
234
235+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
236+ ignore grsecurity's /proc restrictions
237+
238+
239 hashdist= [KNL,NUMA] Large hashes allocated during boot
240 are distributed across NUMA nodes. Defaults on
241 for 64-bit NUMA, off otherwise.
242@@ -2121,6 +2125,18 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
243 the specified number of seconds. This is to be used if
244 your oopses keep scrolling off the screen.
245
246+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
247+ virtualization environments that don't cope well with the
248+ expand down segment used by UDEREF on X86-32 or the frequent
249+ page table updates on X86-64.
250+
251+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
252+
253+ pax_extra_latent_entropy
254+ Enable a very simple form of latent entropy extraction
255+ from the first 4GB of memory as the bootmem allocator
256+ passes the memory pages to the buddy allocator.
257+
258 pcbit= [HW,ISDN]
259
260 pcd. [PARIDE]
261diff --git a/Makefile b/Makefile
262index 10075d6..dcb3e14 100644
263--- a/Makefile
264+++ b/Makefile
265@@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
266
267 HOSTCC = gcc
268 HOSTCXX = g++
269-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
270-HOSTCXXFLAGS = -O2
271+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
272+HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
273+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
274
275 # Decide whether to build built-in, modular, or both.
276 # Normally, just do built-in.
277@@ -414,8 +415,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
278 # Rules shared between *config targets and build targets
279
280 # Basic helpers built in scripts/
281-PHONY += scripts_basic
282-scripts_basic:
283+PHONY += scripts_basic gcc-plugins
284+scripts_basic: gcc-plugins
285 $(Q)$(MAKE) $(build)=scripts/basic
286 $(Q)rm -f .tmp_quiet_recordmcount
287
288@@ -575,6 +576,65 @@ else
289 KBUILD_CFLAGS += -O2
290 endif
291
292+ifndef DISABLE_PAX_PLUGINS
293+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
294+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
295+else
296+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
297+endif
298+ifneq ($(PLUGINCC),)
299+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
300+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
301+endif
302+ifdef CONFIG_PAX_MEMORY_STACKLEAK
303+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
304+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
305+endif
306+ifdef CONFIG_KALLOCSTAT_PLUGIN
307+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
308+endif
309+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
310+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
311+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
312+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
313+endif
314+ifdef CONFIG_CHECKER_PLUGIN
315+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
316+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
317+endif
318+endif
319+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
320+ifdef CONFIG_PAX_SIZE_OVERFLOW
321+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
322+endif
323+ifdef CONFIG_PAX_LATENT_ENTROPY
324+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
325+endif
326+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
327+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
328+endif
329+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
330+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
331+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
332+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
333+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN
334+ifeq ($(KBUILD_EXTMOD),)
335+gcc-plugins:
336+ $(Q)$(MAKE) $(build)=tools/gcc
337+else
338+gcc-plugins: ;
339+endif
340+else
341+gcc-plugins:
342+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
343+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
344+else
345+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
346+endif
347+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
348+endif
349+endif
350+
351 include $(srctree)/arch/$(SRCARCH)/Makefile
352
353 ifdef CONFIG_READABLE_ASM
354@@ -731,7 +791,7 @@ export mod_sign_cmd
355
356
357 ifeq ($(KBUILD_EXTMOD),)
358-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
359+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
360
361 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
362 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
363@@ -778,6 +838,8 @@ endif
364
365 # The actual objects are generated when descending,
366 # make sure no implicit rule kicks in
367+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
368+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
369 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
370
371 # Handle descending into subdirectories listed in $(vmlinux-dirs)
372@@ -787,7 +849,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
373 # Error messages still appears in the original language
374
375 PHONY += $(vmlinux-dirs)
376-$(vmlinux-dirs): prepare scripts
377+$(vmlinux-dirs): gcc-plugins prepare scripts
378 $(Q)$(MAKE) $(build)=$@
379
380 # Store (new) KERNELRELASE string in include/config/kernel.release
381@@ -831,6 +893,7 @@ prepare0: archprepare FORCE
382 $(Q)$(MAKE) $(build)=.
383
384 # All the preparing..
385+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
386 prepare: prepare0
387
388 # Generate some files
389@@ -938,6 +1001,8 @@ all: modules
390 # using awk while concatenating to the final file.
391
392 PHONY += modules
393+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
394+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
395 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
396 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
397 @$(kecho) ' Building modules, stage 2.';
398@@ -953,7 +1018,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
399
400 # Target to prepare building external modules
401 PHONY += modules_prepare
402-modules_prepare: prepare scripts
403+modules_prepare: gcc-plugins prepare scripts
404
405 # Target to install modules
406 PHONY += modules_install
407@@ -1019,7 +1084,7 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
408 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
409 signing_key.priv signing_key.x509 x509.genkey \
410 extra_certificates signing_key.x509.keyid \
411- signing_key.x509.signer
412+ signing_key.x509.signer tools/gcc/size_overflow_hash.h
413
414 # clean - Delete most, but leave enough to build external modules
415 #
416@@ -1059,6 +1124,7 @@ distclean: mrproper
417 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
418 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
419 -o -name '.*.rej' \
420+ -o -name '.*.rej' -o -name '*.so' \
421 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
422 -type f -print | xargs rm -f
423
424@@ -1219,6 +1285,8 @@ PHONY += $(module-dirs) modules
425 $(module-dirs): crmodverdir $(objtree)/Module.symvers
426 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
427
428+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
429+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
430 modules: $(module-dirs)
431 @$(kecho) ' Building modules, stage 2.';
432 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
433@@ -1355,17 +1423,21 @@ else
434 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
435 endif
436
437-%.s: %.c prepare scripts FORCE
438+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
439+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
440+%.s: %.c gcc-plugins prepare scripts FORCE
441 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
442 %.i: %.c prepare scripts FORCE
443 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
444-%.o: %.c prepare scripts FORCE
445+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
446+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
447+%.o: %.c gcc-plugins prepare scripts FORCE
448 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
449 %.lst: %.c prepare scripts FORCE
450 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
451-%.s: %.S prepare scripts FORCE
452+%.s: %.S gcc-plugins prepare scripts FORCE
453 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
454-%.o: %.S prepare scripts FORCE
455+%.o: %.S gcc-plugins prepare scripts FORCE
456 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
457 %.symtypes: %.c prepare scripts FORCE
458 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
459@@ -1375,11 +1447,15 @@ endif
460 $(cmd_crmodverdir)
461 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
462 $(build)=$(build-dir)
463-%/: prepare scripts FORCE
464+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
465+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
466+%/: gcc-plugins prepare scripts FORCE
467 $(cmd_crmodverdir)
468 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
469 $(build)=$(build-dir)
470-%.ko: prepare scripts FORCE
471+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
472+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
473+%.ko: gcc-plugins prepare scripts FORCE
474 $(cmd_crmodverdir)
475 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
476 $(build)=$(build-dir) $(@:.ko=.o)
477diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
478index c2cbe4f..f7264b4 100644
479--- a/arch/alpha/include/asm/atomic.h
480+++ b/arch/alpha/include/asm/atomic.h
481@@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
482 #define atomic_dec(v) atomic_sub(1,(v))
483 #define atomic64_dec(v) atomic64_sub(1,(v))
484
485+#define atomic64_read_unchecked(v) atomic64_read(v)
486+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
487+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
488+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
489+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
490+#define atomic64_inc_unchecked(v) atomic64_inc(v)
491+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
492+#define atomic64_dec_unchecked(v) atomic64_dec(v)
493+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
494+
495 #define smp_mb__before_atomic_dec() smp_mb()
496 #define smp_mb__after_atomic_dec() smp_mb()
497 #define smp_mb__before_atomic_inc() smp_mb()
498diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
499index ad368a9..fbe0f25 100644
500--- a/arch/alpha/include/asm/cache.h
501+++ b/arch/alpha/include/asm/cache.h
502@@ -4,19 +4,19 @@
503 #ifndef __ARCH_ALPHA_CACHE_H
504 #define __ARCH_ALPHA_CACHE_H
505
506+#include <linux/const.h>
507
508 /* Bytes per L1 (data) cache line. */
509 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
510-# define L1_CACHE_BYTES 64
511 # define L1_CACHE_SHIFT 6
512 #else
513 /* Both EV4 and EV5 are write-through, read-allocate,
514 direct-mapped, physical.
515 */
516-# define L1_CACHE_BYTES 32
517 # define L1_CACHE_SHIFT 5
518 #endif
519
520+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
521 #define SMP_CACHE_BYTES L1_CACHE_BYTES
522
523 #endif
524diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
525index 968d999..d36b2df 100644
526--- a/arch/alpha/include/asm/elf.h
527+++ b/arch/alpha/include/asm/elf.h
528@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
529
530 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
531
532+#ifdef CONFIG_PAX_ASLR
533+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
534+
535+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
536+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
537+#endif
538+
539 /* $0 is set by ld.so to a pointer to a function which might be
540 registered using atexit. This provides a mean for the dynamic
541 linker to call DT_FINI functions for shared libraries that have
542diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
543index bc2a0da..8ad11ee 100644
544--- a/arch/alpha/include/asm/pgalloc.h
545+++ b/arch/alpha/include/asm/pgalloc.h
546@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
547 pgd_set(pgd, pmd);
548 }
549
550+static inline void
551+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
552+{
553+ pgd_populate(mm, pgd, pmd);
554+}
555+
556 extern pgd_t *pgd_alloc(struct mm_struct *mm);
557
558 static inline void
559diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
560index 81a4342..348b927 100644
561--- a/arch/alpha/include/asm/pgtable.h
562+++ b/arch/alpha/include/asm/pgtable.h
563@@ -102,6 +102,17 @@ struct vm_area_struct;
564 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
565 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
566 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
567+
568+#ifdef CONFIG_PAX_PAGEEXEC
569+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
570+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
571+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
572+#else
573+# define PAGE_SHARED_NOEXEC PAGE_SHARED
574+# define PAGE_COPY_NOEXEC PAGE_COPY
575+# define PAGE_READONLY_NOEXEC PAGE_READONLY
576+#endif
577+
578 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
579
580 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
581diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
582index 2fd00b7..cfd5069 100644
583--- a/arch/alpha/kernel/module.c
584+++ b/arch/alpha/kernel/module.c
585@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
586
587 /* The small sections were sorted to the end of the segment.
588 The following should definitely cover them. */
589- gp = (u64)me->module_core + me->core_size - 0x8000;
590+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
591 got = sechdrs[me->arch.gotsecindex].sh_addr;
592
593 for (i = 0; i < n; i++) {
594diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
595index 14db93e..47bed62 100644
596--- a/arch/alpha/kernel/osf_sys.c
597+++ b/arch/alpha/kernel/osf_sys.c
598@@ -1295,16 +1295,16 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
599 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
600
601 static unsigned long
602-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
603- unsigned long limit)
604+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
605+ unsigned long limit, unsigned long flags)
606 {
607 struct vm_area_struct *vma = find_vma(current->mm, addr);
608-
609+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
610 while (1) {
611 /* At this point: (!vma || addr < vma->vm_end). */
612 if (limit - len < addr)
613 return -ENOMEM;
614- if (!vma || addr + len <= vma->vm_start)
615+ if (check_heap_stack_gap(vma, addr, len, offset))
616 return addr;
617 addr = vma->vm_end;
618 vma = vma->vm_next;
619@@ -1340,20 +1340,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
620 merely specific addresses, but regions of memory -- perhaps
621 this feature should be incorporated into all ports? */
622
623+#ifdef CONFIG_PAX_RANDMMAP
624+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
625+#endif
626+
627 if (addr) {
628- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
629+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
630 if (addr != (unsigned long) -ENOMEM)
631 return addr;
632 }
633
634 /* Next, try allocating at TASK_UNMAPPED_BASE. */
635- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
636- len, limit);
637+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
638+
639 if (addr != (unsigned long) -ENOMEM)
640 return addr;
641
642 /* Finally, try allocating in low memory. */
643- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
644+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
645
646 return addr;
647 }
648diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
649index 0c4132d..88f0d53 100644
650--- a/arch/alpha/mm/fault.c
651+++ b/arch/alpha/mm/fault.c
652@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
653 __reload_thread(pcb);
654 }
655
656+#ifdef CONFIG_PAX_PAGEEXEC
657+/*
658+ * PaX: decide what to do with offenders (regs->pc = fault address)
659+ *
660+ * returns 1 when task should be killed
661+ * 2 when patched PLT trampoline was detected
662+ * 3 when unpatched PLT trampoline was detected
663+ */
664+static int pax_handle_fetch_fault(struct pt_regs *regs)
665+{
666+
667+#ifdef CONFIG_PAX_EMUPLT
668+ int err;
669+
670+ do { /* PaX: patched PLT emulation #1 */
671+ unsigned int ldah, ldq, jmp;
672+
673+ err = get_user(ldah, (unsigned int *)regs->pc);
674+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
675+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
676+
677+ if (err)
678+ break;
679+
680+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
681+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
682+ jmp == 0x6BFB0000U)
683+ {
684+ unsigned long r27, addr;
685+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
686+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
687+
688+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
689+ err = get_user(r27, (unsigned long *)addr);
690+ if (err)
691+ break;
692+
693+ regs->r27 = r27;
694+ regs->pc = r27;
695+ return 2;
696+ }
697+ } while (0);
698+
699+ do { /* PaX: patched PLT emulation #2 */
700+ unsigned int ldah, lda, br;
701+
702+ err = get_user(ldah, (unsigned int *)regs->pc);
703+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
704+ err |= get_user(br, (unsigned int *)(regs->pc+8));
705+
706+ if (err)
707+ break;
708+
709+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
710+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
711+ (br & 0xFFE00000U) == 0xC3E00000U)
712+ {
713+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
714+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
715+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
716+
717+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
718+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
719+ return 2;
720+ }
721+ } while (0);
722+
723+ do { /* PaX: unpatched PLT emulation */
724+ unsigned int br;
725+
726+ err = get_user(br, (unsigned int *)regs->pc);
727+
728+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
729+ unsigned int br2, ldq, nop, jmp;
730+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
731+
732+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
733+ err = get_user(br2, (unsigned int *)addr);
734+ err |= get_user(ldq, (unsigned int *)(addr+4));
735+ err |= get_user(nop, (unsigned int *)(addr+8));
736+ err |= get_user(jmp, (unsigned int *)(addr+12));
737+ err |= get_user(resolver, (unsigned long *)(addr+16));
738+
739+ if (err)
740+ break;
741+
742+ if (br2 == 0xC3600000U &&
743+ ldq == 0xA77B000CU &&
744+ nop == 0x47FF041FU &&
745+ jmp == 0x6B7B0000U)
746+ {
747+ regs->r28 = regs->pc+4;
748+ regs->r27 = addr+16;
749+ regs->pc = resolver;
750+ return 3;
751+ }
752+ }
753+ } while (0);
754+#endif
755+
756+ return 1;
757+}
758+
759+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
760+{
761+ unsigned long i;
762+
763+ printk(KERN_ERR "PAX: bytes at PC: ");
764+ for (i = 0; i < 5; i++) {
765+ unsigned int c;
766+ if (get_user(c, (unsigned int *)pc+i))
767+ printk(KERN_CONT "???????? ");
768+ else
769+ printk(KERN_CONT "%08x ", c);
770+ }
771+ printk("\n");
772+}
773+#endif
774
775 /*
776 * This routine handles page faults. It determines the address,
777@@ -133,8 +251,29 @@ retry:
778 good_area:
779 si_code = SEGV_ACCERR;
780 if (cause < 0) {
781- if (!(vma->vm_flags & VM_EXEC))
782+ if (!(vma->vm_flags & VM_EXEC)) {
783+
784+#ifdef CONFIG_PAX_PAGEEXEC
785+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
786+ goto bad_area;
787+
788+ up_read(&mm->mmap_sem);
789+ switch (pax_handle_fetch_fault(regs)) {
790+
791+#ifdef CONFIG_PAX_EMUPLT
792+ case 2:
793+ case 3:
794+ return;
795+#endif
796+
797+ }
798+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
799+ do_group_exit(SIGKILL);
800+#else
801 goto bad_area;
802+#endif
803+
804+ }
805 } else if (!cause) {
806 /* Allow reads even for write-only mappings */
807 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
808diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
809index 67874b8..9aa2d62 100644
810--- a/arch/arm/Kconfig
811+++ b/arch/arm/Kconfig
812@@ -1427,6 +1427,16 @@ config ARM_ERRATA_775420
813 to deadlock. This workaround puts DSB before executing ISB if
814 an abort may occur on cache maintenance.
815
816+config ARM_ERRATA_798181
817+ bool "ARM errata: TLBI/DSB failure on Cortex-A15"
818+ depends on CPU_V7 && SMP
819+ help
820+ On Cortex-A15 (r0p0..r3p2) the TLBI*IS/DSB operations are not
821+ adequately shooting down all use of the old entries. This
822+ option enables the Linux kernel workaround for this erratum
823+ which sends an IPI to the CPUs that are running the same ASID
824+ as the one being invalidated.
825+
826 endmenu
827
828 source "arch/arm/common/Kconfig"
829@@ -1813,7 +1823,7 @@ config ALIGNMENT_TRAP
830
831 config UACCESS_WITH_MEMCPY
832 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
833- depends on MMU
834+ depends on MMU && !PAX_MEMORY_UDEREF
835 default y if CPU_FEROCEON
836 help
837 Implement faster copy_to_user and clear_user methods for CPU
838diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
839index 87dfa902..3a523fc 100644
840--- a/arch/arm/common/gic.c
841+++ b/arch/arm/common/gic.c
842@@ -81,7 +81,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
843 * Supported arch specific GIC irq extension.
844 * Default make them NULL.
845 */
846-struct irq_chip gic_arch_extn = {
847+irq_chip_no_const gic_arch_extn __read_only = {
848 .irq_eoi = NULL,
849 .irq_mask = NULL,
850 .irq_unmask = NULL,
851@@ -329,7 +329,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
852 chained_irq_exit(chip, desc);
853 }
854
855-static struct irq_chip gic_chip = {
856+static irq_chip_no_const gic_chip __read_only = {
857 .name = "GIC",
858 .irq_mask = gic_mask_irq,
859 .irq_unmask = gic_unmask_irq,
860diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
861index c79f61f..9ac0642 100644
862--- a/arch/arm/include/asm/atomic.h
863+++ b/arch/arm/include/asm/atomic.h
864@@ -17,17 +17,35 @@
865 #include <asm/barrier.h>
866 #include <asm/cmpxchg.h>
867
868+#ifdef CONFIG_GENERIC_ATOMIC64
869+#include <asm-generic/atomic64.h>
870+#endif
871+
872 #define ATOMIC_INIT(i) { (i) }
873
874 #ifdef __KERNEL__
875
876+#define _ASM_EXTABLE(from, to) \
877+" .pushsection __ex_table,\"a\"\n"\
878+" .align 3\n" \
879+" .long " #from ", " #to"\n" \
880+" .popsection"
881+
882 /*
883 * On ARM, ordinary assignment (str instruction) doesn't clear the local
884 * strex/ldrex monitor on some implementations. The reason we can use it for
885 * atomic_set() is the clrex or dummy strex done on every exception return.
886 */
887 #define atomic_read(v) (*(volatile int *)&(v)->counter)
888+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
889+{
890+ return v->counter;
891+}
892 #define atomic_set(v,i) (((v)->counter) = (i))
893+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
894+{
895+ v->counter = i;
896+}
897
898 #if __LINUX_ARM_ARCH__ >= 6
899
900@@ -42,6 +60,35 @@ static inline void atomic_add(int i, atomic_t *v)
901 int result;
902
903 __asm__ __volatile__("@ atomic_add\n"
904+"1: ldrex %1, [%3]\n"
905+" adds %0, %1, %4\n"
906+
907+#ifdef CONFIG_PAX_REFCOUNT
908+" bvc 3f\n"
909+"2: bkpt 0xf103\n"
910+"3:\n"
911+#endif
912+
913+" strex %1, %0, [%3]\n"
914+" teq %1, #0\n"
915+" bne 1b"
916+
917+#ifdef CONFIG_PAX_REFCOUNT
918+"\n4:\n"
919+ _ASM_EXTABLE(2b, 4b)
920+#endif
921+
922+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
923+ : "r" (&v->counter), "Ir" (i)
924+ : "cc");
925+}
926+
927+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
928+{
929+ unsigned long tmp;
930+ int result;
931+
932+ __asm__ __volatile__("@ atomic_add_unchecked\n"
933 "1: ldrex %0, [%3]\n"
934 " add %0, %0, %4\n"
935 " strex %1, %0, [%3]\n"
936@@ -60,6 +107,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
937 smp_mb();
938
939 __asm__ __volatile__("@ atomic_add_return\n"
940+"1: ldrex %1, [%3]\n"
941+" adds %0, %1, %4\n"
942+
943+#ifdef CONFIG_PAX_REFCOUNT
944+" bvc 3f\n"
945+" mov %0, %1\n"
946+"2: bkpt 0xf103\n"
947+"3:\n"
948+#endif
949+
950+" strex %1, %0, [%3]\n"
951+" teq %1, #0\n"
952+" bne 1b"
953+
954+#ifdef CONFIG_PAX_REFCOUNT
955+"\n4:\n"
956+ _ASM_EXTABLE(2b, 4b)
957+#endif
958+
959+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
960+ : "r" (&v->counter), "Ir" (i)
961+ : "cc");
962+
963+ smp_mb();
964+
965+ return result;
966+}
967+
968+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
969+{
970+ unsigned long tmp;
971+ int result;
972+
973+ smp_mb();
974+
975+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
976 "1: ldrex %0, [%3]\n"
977 " add %0, %0, %4\n"
978 " strex %1, %0, [%3]\n"
979@@ -80,6 +163,35 @@ static inline void atomic_sub(int i, atomic_t *v)
980 int result;
981
982 __asm__ __volatile__("@ atomic_sub\n"
983+"1: ldrex %1, [%3]\n"
984+" subs %0, %1, %4\n"
985+
986+#ifdef CONFIG_PAX_REFCOUNT
987+" bvc 3f\n"
988+"2: bkpt 0xf103\n"
989+"3:\n"
990+#endif
991+
992+" strex %1, %0, [%3]\n"
993+" teq %1, #0\n"
994+" bne 1b"
995+
996+#ifdef CONFIG_PAX_REFCOUNT
997+"\n4:\n"
998+ _ASM_EXTABLE(2b, 4b)
999+#endif
1000+
1001+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1002+ : "r" (&v->counter), "Ir" (i)
1003+ : "cc");
1004+}
1005+
1006+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1007+{
1008+ unsigned long tmp;
1009+ int result;
1010+
1011+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
1012 "1: ldrex %0, [%3]\n"
1013 " sub %0, %0, %4\n"
1014 " strex %1, %0, [%3]\n"
1015@@ -98,11 +210,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1016 smp_mb();
1017
1018 __asm__ __volatile__("@ atomic_sub_return\n"
1019-"1: ldrex %0, [%3]\n"
1020-" sub %0, %0, %4\n"
1021+"1: ldrex %1, [%3]\n"
1022+" subs %0, %1, %4\n"
1023+
1024+#ifdef CONFIG_PAX_REFCOUNT
1025+" bvc 3f\n"
1026+" mov %0, %1\n"
1027+"2: bkpt 0xf103\n"
1028+"3:\n"
1029+#endif
1030+
1031 " strex %1, %0, [%3]\n"
1032 " teq %1, #0\n"
1033 " bne 1b"
1034+
1035+#ifdef CONFIG_PAX_REFCOUNT
1036+"\n4:\n"
1037+ _ASM_EXTABLE(2b, 4b)
1038+#endif
1039+
1040 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1041 : "r" (&v->counter), "Ir" (i)
1042 : "cc");
1043@@ -134,6 +260,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
1044 return oldval;
1045 }
1046
1047+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1048+{
1049+ unsigned long oldval, res;
1050+
1051+ smp_mb();
1052+
1053+ do {
1054+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1055+ "ldrex %1, [%3]\n"
1056+ "mov %0, #0\n"
1057+ "teq %1, %4\n"
1058+ "strexeq %0, %5, [%3]\n"
1059+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1060+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1061+ : "cc");
1062+ } while (res);
1063+
1064+ smp_mb();
1065+
1066+ return oldval;
1067+}
1068+
1069 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1070 {
1071 unsigned long tmp, tmp2;
1072@@ -167,7 +315,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
1073
1074 return val;
1075 }
1076+
1077+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1078+{
1079+ return atomic_add_return(i, v);
1080+}
1081+
1082 #define atomic_add(i, v) (void) atomic_add_return(i, v)
1083+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1084+{
1085+ (void) atomic_add_return(i, v);
1086+}
1087
1088 static inline int atomic_sub_return(int i, atomic_t *v)
1089 {
1090@@ -182,6 +340,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1091 return val;
1092 }
1093 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
1094+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1095+{
1096+ (void) atomic_sub_return(i, v);
1097+}
1098
1099 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1100 {
1101@@ -197,6 +359,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1102 return ret;
1103 }
1104
1105+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1106+{
1107+ return atomic_cmpxchg(v, old, new);
1108+}
1109+
1110 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1111 {
1112 unsigned long flags;
1113@@ -209,6 +376,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1114 #endif /* __LINUX_ARM_ARCH__ */
1115
1116 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1117+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1118+{
1119+ return xchg(&v->counter, new);
1120+}
1121
1122 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1123 {
1124@@ -221,11 +392,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1125 }
1126
1127 #define atomic_inc(v) atomic_add(1, v)
1128+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1129+{
1130+ atomic_add_unchecked(1, v);
1131+}
1132 #define atomic_dec(v) atomic_sub(1, v)
1133+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1134+{
1135+ atomic_sub_unchecked(1, v);
1136+}
1137
1138 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1139+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1140+{
1141+ return atomic_add_return_unchecked(1, v) == 0;
1142+}
1143 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1144 #define atomic_inc_return(v) (atomic_add_return(1, v))
1145+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1146+{
1147+ return atomic_add_return_unchecked(1, v);
1148+}
1149 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1150 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1151
1152@@ -241,6 +428,14 @@ typedef struct {
1153 u64 __aligned(8) counter;
1154 } atomic64_t;
1155
1156+#ifdef CONFIG_PAX_REFCOUNT
1157+typedef struct {
1158+ u64 __aligned(8) counter;
1159+} atomic64_unchecked_t;
1160+#else
1161+typedef atomic64_t atomic64_unchecked_t;
1162+#endif
1163+
1164 #define ATOMIC64_INIT(i) { (i) }
1165
1166 static inline u64 atomic64_read(const atomic64_t *v)
1167@@ -256,6 +451,19 @@ static inline u64 atomic64_read(const atomic64_t *v)
1168 return result;
1169 }
1170
1171+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
1172+{
1173+ u64 result;
1174+
1175+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1176+" ldrexd %0, %H0, [%1]"
1177+ : "=&r" (result)
1178+ : "r" (&v->counter), "Qo" (v->counter)
1179+ );
1180+
1181+ return result;
1182+}
1183+
1184 static inline void atomic64_set(atomic64_t *v, u64 i)
1185 {
1186 u64 tmp;
1187@@ -270,6 +478,20 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1188 : "cc");
1189 }
1190
1191+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1192+{
1193+ u64 tmp;
1194+
1195+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1196+"1: ldrexd %0, %H0, [%2]\n"
1197+" strexd %0, %3, %H3, [%2]\n"
1198+" teq %0, #0\n"
1199+" bne 1b"
1200+ : "=&r" (tmp), "=Qo" (v->counter)
1201+ : "r" (&v->counter), "r" (i)
1202+ : "cc");
1203+}
1204+
1205 static inline void atomic64_add(u64 i, atomic64_t *v)
1206 {
1207 u64 result;
1208@@ -278,6 +500,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1209 __asm__ __volatile__("@ atomic64_add\n"
1210 "1: ldrexd %0, %H0, [%3]\n"
1211 " adds %0, %0, %4\n"
1212+" adcs %H0, %H0, %H4\n"
1213+
1214+#ifdef CONFIG_PAX_REFCOUNT
1215+" bvc 3f\n"
1216+"2: bkpt 0xf103\n"
1217+"3:\n"
1218+#endif
1219+
1220+" strexd %1, %0, %H0, [%3]\n"
1221+" teq %1, #0\n"
1222+" bne 1b"
1223+
1224+#ifdef CONFIG_PAX_REFCOUNT
1225+"\n4:\n"
1226+ _ASM_EXTABLE(2b, 4b)
1227+#endif
1228+
1229+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1230+ : "r" (&v->counter), "r" (i)
1231+ : "cc");
1232+}
1233+
1234+static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1235+{
1236+ u64 result;
1237+ unsigned long tmp;
1238+
1239+ __asm__ __volatile__("@ atomic64_add_unchecked\n"
1240+"1: ldrexd %0, %H0, [%3]\n"
1241+" adds %0, %0, %4\n"
1242 " adc %H0, %H0, %H4\n"
1243 " strexd %1, %0, %H0, [%3]\n"
1244 " teq %1, #0\n"
1245@@ -289,12 +541,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1246
1247 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1248 {
1249- u64 result;
1250- unsigned long tmp;
1251+ u64 result, tmp;
1252
1253 smp_mb();
1254
1255 __asm__ __volatile__("@ atomic64_add_return\n"
1256+"1: ldrexd %1, %H1, [%3]\n"
1257+" adds %0, %1, %4\n"
1258+" adcs %H0, %H1, %H4\n"
1259+
1260+#ifdef CONFIG_PAX_REFCOUNT
1261+" bvc 3f\n"
1262+" mov %0, %1\n"
1263+" mov %H0, %H1\n"
1264+"2: bkpt 0xf103\n"
1265+"3:\n"
1266+#endif
1267+
1268+" strexd %1, %0, %H0, [%3]\n"
1269+" teq %1, #0\n"
1270+" bne 1b"
1271+
1272+#ifdef CONFIG_PAX_REFCOUNT
1273+"\n4:\n"
1274+ _ASM_EXTABLE(2b, 4b)
1275+#endif
1276+
1277+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1278+ : "r" (&v->counter), "r" (i)
1279+ : "cc");
1280+
1281+ smp_mb();
1282+
1283+ return result;
1284+}
1285+
1286+static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1287+{
1288+ u64 result;
1289+ unsigned long tmp;
1290+
1291+ smp_mb();
1292+
1293+ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1294 "1: ldrexd %0, %H0, [%3]\n"
1295 " adds %0, %0, %4\n"
1296 " adc %H0, %H0, %H4\n"
1297@@ -318,6 +607,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1298 __asm__ __volatile__("@ atomic64_sub\n"
1299 "1: ldrexd %0, %H0, [%3]\n"
1300 " subs %0, %0, %4\n"
1301+" sbcs %H0, %H0, %H4\n"
1302+
1303+#ifdef CONFIG_PAX_REFCOUNT
1304+" bvc 3f\n"
1305+"2: bkpt 0xf103\n"
1306+"3:\n"
1307+#endif
1308+
1309+" strexd %1, %0, %H0, [%3]\n"
1310+" teq %1, #0\n"
1311+" bne 1b"
1312+
1313+#ifdef CONFIG_PAX_REFCOUNT
1314+"\n4:\n"
1315+ _ASM_EXTABLE(2b, 4b)
1316+#endif
1317+
1318+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1319+ : "r" (&v->counter), "r" (i)
1320+ : "cc");
1321+}
1322+
1323+static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1324+{
1325+ u64 result;
1326+ unsigned long tmp;
1327+
1328+ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1329+"1: ldrexd %0, %H0, [%3]\n"
1330+" subs %0, %0, %4\n"
1331 " sbc %H0, %H0, %H4\n"
1332 " strexd %1, %0, %H0, [%3]\n"
1333 " teq %1, #0\n"
1334@@ -329,18 +648,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1335
1336 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1337 {
1338- u64 result;
1339- unsigned long tmp;
1340+ u64 result, tmp;
1341
1342 smp_mb();
1343
1344 __asm__ __volatile__("@ atomic64_sub_return\n"
1345-"1: ldrexd %0, %H0, [%3]\n"
1346-" subs %0, %0, %4\n"
1347-" sbc %H0, %H0, %H4\n"
1348+"1: ldrexd %1, %H1, [%3]\n"
1349+" subs %0, %1, %4\n"
1350+" sbcs %H0, %H1, %H4\n"
1351+
1352+#ifdef CONFIG_PAX_REFCOUNT
1353+" bvc 3f\n"
1354+" mov %0, %1\n"
1355+" mov %H0, %H1\n"
1356+"2: bkpt 0xf103\n"
1357+"3:\n"
1358+#endif
1359+
1360 " strexd %1, %0, %H0, [%3]\n"
1361 " teq %1, #0\n"
1362 " bne 1b"
1363+
1364+#ifdef CONFIG_PAX_REFCOUNT
1365+"\n4:\n"
1366+ _ASM_EXTABLE(2b, 4b)
1367+#endif
1368+
1369 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1370 : "r" (&v->counter), "r" (i)
1371 : "cc");
1372@@ -374,6 +707,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1373 return oldval;
1374 }
1375
1376+static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1377+{
1378+ u64 oldval;
1379+ unsigned long res;
1380+
1381+ smp_mb();
1382+
1383+ do {
1384+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1385+ "ldrexd %1, %H1, [%3]\n"
1386+ "mov %0, #0\n"
1387+ "teq %1, %4\n"
1388+ "teqeq %H1, %H4\n"
1389+ "strexdeq %0, %5, %H5, [%3]"
1390+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1391+ : "r" (&ptr->counter), "r" (old), "r" (new)
1392+ : "cc");
1393+ } while (res);
1394+
1395+ smp_mb();
1396+
1397+ return oldval;
1398+}
1399+
1400 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1401 {
1402 u64 result;
1403@@ -397,21 +754,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1404
1405 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1406 {
1407- u64 result;
1408- unsigned long tmp;
1409+ u64 result, tmp;
1410
1411 smp_mb();
1412
1413 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1414-"1: ldrexd %0, %H0, [%3]\n"
1415-" subs %0, %0, #1\n"
1416-" sbc %H0, %H0, #0\n"
1417+"1: ldrexd %1, %H1, [%3]\n"
1418+" subs %0, %1, #1\n"
1419+" sbcs %H0, %H1, #0\n"
1420+
1421+#ifdef CONFIG_PAX_REFCOUNT
1422+" bvc 3f\n"
1423+" mov %0, %1\n"
1424+" mov %H0, %H1\n"
1425+"2: bkpt 0xf103\n"
1426+"3:\n"
1427+#endif
1428+
1429 " teq %H0, #0\n"
1430-" bmi 2f\n"
1431+" bmi 4f\n"
1432 " strexd %1, %0, %H0, [%3]\n"
1433 " teq %1, #0\n"
1434 " bne 1b\n"
1435-"2:"
1436+"4:\n"
1437+
1438+#ifdef CONFIG_PAX_REFCOUNT
1439+ _ASM_EXTABLE(2b, 4b)
1440+#endif
1441+
1442 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1443 : "r" (&v->counter)
1444 : "cc");
1445@@ -434,13 +804,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1446 " teq %0, %5\n"
1447 " teqeq %H0, %H5\n"
1448 " moveq %1, #0\n"
1449-" beq 2f\n"
1450+" beq 4f\n"
1451 " adds %0, %0, %6\n"
1452-" adc %H0, %H0, %H6\n"
1453+" adcs %H0, %H0, %H6\n"
1454+
1455+#ifdef CONFIG_PAX_REFCOUNT
1456+" bvc 3f\n"
1457+"2: bkpt 0xf103\n"
1458+"3:\n"
1459+#endif
1460+
1461 " strexd %2, %0, %H0, [%4]\n"
1462 " teq %2, #0\n"
1463 " bne 1b\n"
1464-"2:"
1465+"4:\n"
1466+
1467+#ifdef CONFIG_PAX_REFCOUNT
1468+ _ASM_EXTABLE(2b, 4b)
1469+#endif
1470+
1471 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1472 : "r" (&v->counter), "r" (u), "r" (a)
1473 : "cc");
1474@@ -453,10 +835,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1475
1476 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1477 #define atomic64_inc(v) atomic64_add(1LL, (v))
1478+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1479 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1480+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1481 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1482 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1483 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1484+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1485 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1486 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1487 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1488diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1489index 75fe66b..ba3dee4 100644
1490--- a/arch/arm/include/asm/cache.h
1491+++ b/arch/arm/include/asm/cache.h
1492@@ -4,8 +4,10 @@
1493 #ifndef __ASMARM_CACHE_H
1494 #define __ASMARM_CACHE_H
1495
1496+#include <linux/const.h>
1497+
1498 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1499-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1500+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1501
1502 /*
1503 * Memory returned by kmalloc() may be used for DMA, so we must make
1504@@ -24,5 +26,6 @@
1505 #endif
1506
1507 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1508+#define __read_only __attribute__ ((__section__(".data..read_only")))
1509
1510 #endif
1511diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1512index e1489c5..d418304 100644
1513--- a/arch/arm/include/asm/cacheflush.h
1514+++ b/arch/arm/include/asm/cacheflush.h
1515@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1516 void (*dma_unmap_area)(const void *, size_t, int);
1517
1518 void (*dma_flush_range)(const void *, const void *);
1519-};
1520+} __no_const;
1521
1522 /*
1523 * Select the calling method
1524diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1525index 6dcc164..b14d917 100644
1526--- a/arch/arm/include/asm/checksum.h
1527+++ b/arch/arm/include/asm/checksum.h
1528@@ -37,7 +37,19 @@ __wsum
1529 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1530
1531 __wsum
1532-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1533+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1534+
1535+static inline __wsum
1536+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1537+{
1538+ __wsum ret;
1539+ pax_open_userland();
1540+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1541+ pax_close_userland();
1542+ return ret;
1543+}
1544+
1545+
1546
1547 /*
1548 * Fold a partial checksum without adding pseudo headers
1549diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1550index 7eb18c1..e38b6d2 100644
1551--- a/arch/arm/include/asm/cmpxchg.h
1552+++ b/arch/arm/include/asm/cmpxchg.h
1553@@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1554
1555 #define xchg(ptr,x) \
1556 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1557+#define xchg_unchecked(ptr,x) \
1558+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1559
1560 #include <asm-generic/cmpxchg-local.h>
1561
1562diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h
1563index 720799f..2f67631 100644
1564--- a/arch/arm/include/asm/delay.h
1565+++ b/arch/arm/include/asm/delay.h
1566@@ -25,9 +25,9 @@ extern struct arm_delay_ops {
1567 void (*const_udelay)(unsigned long);
1568 void (*udelay)(unsigned long);
1569 bool const_clock;
1570-} arm_delay_ops;
1571+} *arm_delay_ops;
1572
1573-#define __delay(n) arm_delay_ops.delay(n)
1574+#define __delay(n) arm_delay_ops->delay(n)
1575
1576 /*
1577 * This function intentionally does not exist; if you see references to
1578@@ -48,8 +48,8 @@ extern void __bad_udelay(void);
1579 * first constant multiplications gets optimized away if the delay is
1580 * a constant)
1581 */
1582-#define __udelay(n) arm_delay_ops.udelay(n)
1583-#define __const_udelay(n) arm_delay_ops.const_udelay(n)
1584+#define __udelay(n) arm_delay_ops->udelay(n)
1585+#define __const_udelay(n) arm_delay_ops->const_udelay(n)
1586
1587 #define udelay(n) \
1588 (__builtin_constant_p(n) ? \
1589diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1590index 6ddbe44..b5e38b1 100644
1591--- a/arch/arm/include/asm/domain.h
1592+++ b/arch/arm/include/asm/domain.h
1593@@ -48,18 +48,37 @@
1594 * Domain types
1595 */
1596 #define DOMAIN_NOACCESS 0
1597-#define DOMAIN_CLIENT 1
1598 #ifdef CONFIG_CPU_USE_DOMAINS
1599+#define DOMAIN_USERCLIENT 1
1600+#define DOMAIN_KERNELCLIENT 1
1601 #define DOMAIN_MANAGER 3
1602+#define DOMAIN_VECTORS DOMAIN_USER
1603 #else
1604+
1605+#ifdef CONFIG_PAX_KERNEXEC
1606 #define DOMAIN_MANAGER 1
1607+#define DOMAIN_KERNEXEC 3
1608+#else
1609+#define DOMAIN_MANAGER 1
1610+#endif
1611+
1612+#ifdef CONFIG_PAX_MEMORY_UDEREF
1613+#define DOMAIN_USERCLIENT 0
1614+#define DOMAIN_UDEREF 1
1615+#define DOMAIN_VECTORS DOMAIN_KERNEL
1616+#else
1617+#define DOMAIN_USERCLIENT 1
1618+#define DOMAIN_VECTORS DOMAIN_USER
1619+#endif
1620+#define DOMAIN_KERNELCLIENT 1
1621+
1622 #endif
1623
1624 #define domain_val(dom,type) ((type) << (2*(dom)))
1625
1626 #ifndef __ASSEMBLY__
1627
1628-#ifdef CONFIG_CPU_USE_DOMAINS
1629+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1630 static inline void set_domain(unsigned val)
1631 {
1632 asm volatile(
1633@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1634 isb();
1635 }
1636
1637-#define modify_domain(dom,type) \
1638- do { \
1639- struct thread_info *thread = current_thread_info(); \
1640- unsigned int domain = thread->cpu_domain; \
1641- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1642- thread->cpu_domain = domain | domain_val(dom, type); \
1643- set_domain(thread->cpu_domain); \
1644- } while (0)
1645-
1646+extern void modify_domain(unsigned int dom, unsigned int type);
1647 #else
1648 static inline void set_domain(unsigned val) { }
1649 static inline void modify_domain(unsigned dom, unsigned type) { }
1650diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1651index 38050b1..9d90e8b 100644
1652--- a/arch/arm/include/asm/elf.h
1653+++ b/arch/arm/include/asm/elf.h
1654@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1655 the loader. We need to make sure that it is out of the way of the program
1656 that it will "exec", and that there is sufficient room for the brk. */
1657
1658-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1659+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1660+
1661+#ifdef CONFIG_PAX_ASLR
1662+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1663+
1664+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1665+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1666+#endif
1667
1668 /* When the program starts, a1 contains a pointer to a function to be
1669 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1670@@ -126,8 +133,4 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1671 extern void elf_set_personality(const struct elf32_hdr *);
1672 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1673
1674-struct mm_struct;
1675-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1676-#define arch_randomize_brk arch_randomize_brk
1677-
1678 #endif
1679diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1680index de53547..52b9a28 100644
1681--- a/arch/arm/include/asm/fncpy.h
1682+++ b/arch/arm/include/asm/fncpy.h
1683@@ -81,7 +81,9 @@
1684 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1685 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1686 \
1687+ pax_open_kernel(); \
1688 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1689+ pax_close_kernel(); \
1690 flush_icache_range((unsigned long)(dest_buf), \
1691 (unsigned long)(dest_buf) + (size)); \
1692 \
1693diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1694index e42cf59..7b94b8f 100644
1695--- a/arch/arm/include/asm/futex.h
1696+++ b/arch/arm/include/asm/futex.h
1697@@ -50,6 +50,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1698 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1699 return -EFAULT;
1700
1701+ pax_open_userland();
1702+
1703 smp_mb();
1704 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1705 "1: ldrex %1, [%4]\n"
1706@@ -65,6 +67,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1707 : "cc", "memory");
1708 smp_mb();
1709
1710+ pax_close_userland();
1711+
1712 *uval = val;
1713 return ret;
1714 }
1715@@ -95,6 +99,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1716 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1717 return -EFAULT;
1718
1719+ pax_open_userland();
1720+
1721 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1722 "1: " TUSER(ldr) " %1, [%4]\n"
1723 " teq %1, %2\n"
1724@@ -105,6 +111,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1725 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1726 : "cc", "memory");
1727
1728+ pax_close_userland();
1729+
1730 *uval = val;
1731 return ret;
1732 }
1733@@ -127,6 +135,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1734 return -EFAULT;
1735
1736 pagefault_disable(); /* implies preempt_disable() */
1737+ pax_open_userland();
1738
1739 switch (op) {
1740 case FUTEX_OP_SET:
1741@@ -148,6 +157,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1742 ret = -ENOSYS;
1743 }
1744
1745+ pax_close_userland();
1746 pagefault_enable(); /* subsumes preempt_enable() */
1747
1748 if (!ret) {
1749diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h
1750index 4b1ce6c..bea3f73 100644
1751--- a/arch/arm/include/asm/hardware/gic.h
1752+++ b/arch/arm/include/asm/hardware/gic.h
1753@@ -34,9 +34,10 @@
1754
1755 #ifndef __ASSEMBLY__
1756 #include <linux/irqdomain.h>
1757+#include <linux/irq.h>
1758 struct device_node;
1759
1760-extern struct irq_chip gic_arch_extn;
1761+extern irq_chip_no_const gic_arch_extn;
1762
1763 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
1764 u32 offset, struct device_node *);
1765diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h
1766index 8c5e828..91b99ab 100644
1767--- a/arch/arm/include/asm/highmem.h
1768+++ b/arch/arm/include/asm/highmem.h
1769@@ -41,6 +41,13 @@ extern void kunmap_high(struct page *page);
1770 #endif
1771 #endif
1772
1773+/*
1774+ * Needed to be able to broadcast the TLB invalidation for kmap.
1775+ */
1776+#ifdef CONFIG_ARM_ERRATA_798181
1777+#undef ARCH_NEEDS_KMAP_HIGH_GET
1778+#endif
1779+
1780 #ifdef ARCH_NEEDS_KMAP_HIGH_GET
1781 extern void *kmap_high_get(struct page *page);
1782 #else
1783diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1784index 83eb2f7..ed77159 100644
1785--- a/arch/arm/include/asm/kmap_types.h
1786+++ b/arch/arm/include/asm/kmap_types.h
1787@@ -4,6 +4,6 @@
1788 /*
1789 * This is the "bare minimum". AIO seems to require this.
1790 */
1791-#define KM_TYPE_NR 16
1792+#define KM_TYPE_NR 17
1793
1794 #endif
1795diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1796index 9e614a1..3302cca 100644
1797--- a/arch/arm/include/asm/mach/dma.h
1798+++ b/arch/arm/include/asm/mach/dma.h
1799@@ -22,7 +22,7 @@ struct dma_ops {
1800 int (*residue)(unsigned int, dma_t *); /* optional */
1801 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1802 const char *type;
1803-};
1804+} __do_const;
1805
1806 struct dma_struct {
1807 void *addr; /* single DMA address */
1808diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1809index 2fe141f..192dc01 100644
1810--- a/arch/arm/include/asm/mach/map.h
1811+++ b/arch/arm/include/asm/mach/map.h
1812@@ -27,13 +27,16 @@ struct map_desc {
1813 #define MT_MINICLEAN 6
1814 #define MT_LOW_VECTORS 7
1815 #define MT_HIGH_VECTORS 8
1816-#define MT_MEMORY 9
1817+#define MT_MEMORY_RWX 9
1818 #define MT_ROM 10
1819-#define MT_MEMORY_NONCACHED 11
1820+#define MT_MEMORY_NONCACHED_RX 11
1821 #define MT_MEMORY_DTCM 12
1822 #define MT_MEMORY_ITCM 13
1823 #define MT_MEMORY_SO 14
1824 #define MT_MEMORY_DMA_READY 15
1825+#define MT_MEMORY_RW 16
1826+#define MT_MEMORY_RX 17
1827+#define MT_MEMORY_NONCACHED_RW 18
1828
1829 #ifdef CONFIG_MMU
1830 extern void iotable_init(struct map_desc *, int);
1831diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
1832index 863a661..a7b85e0 100644
1833--- a/arch/arm/include/asm/mmu_context.h
1834+++ b/arch/arm/include/asm/mmu_context.h
1835@@ -27,6 +27,8 @@ void __check_vmalloc_seq(struct mm_struct *mm);
1836 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
1837 #define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; })
1838
1839+DECLARE_PER_CPU(atomic64_t, active_asids);
1840+
1841 #else /* !CONFIG_CPU_HAS_ASID */
1842
1843 #ifdef CONFIG_MMU
1844diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1845index 53426c6..c7baff3 100644
1846--- a/arch/arm/include/asm/outercache.h
1847+++ b/arch/arm/include/asm/outercache.h
1848@@ -35,7 +35,7 @@ struct outer_cache_fns {
1849 #endif
1850 void (*set_debug)(unsigned long);
1851 void (*resume)(void);
1852-};
1853+} __no_const;
1854
1855 #ifdef CONFIG_OUTER_CACHE
1856
1857diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1858index 812a494..71fc0b6 100644
1859--- a/arch/arm/include/asm/page.h
1860+++ b/arch/arm/include/asm/page.h
1861@@ -114,7 +114,7 @@ struct cpu_user_fns {
1862 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1863 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1864 unsigned long vaddr, struct vm_area_struct *vma);
1865-};
1866+} __no_const;
1867
1868 #ifdef MULTI_USER
1869 extern struct cpu_user_fns cpu_user;
1870diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1871index 943504f..c37a730 100644
1872--- a/arch/arm/include/asm/pgalloc.h
1873+++ b/arch/arm/include/asm/pgalloc.h
1874@@ -17,6 +17,7 @@
1875 #include <asm/processor.h>
1876 #include <asm/cacheflush.h>
1877 #include <asm/tlbflush.h>
1878+#include <asm/system_info.h>
1879
1880 #define check_pgt_cache() do { } while (0)
1881
1882@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1883 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1884 }
1885
1886+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1887+{
1888+ pud_populate(mm, pud, pmd);
1889+}
1890+
1891 #else /* !CONFIG_ARM_LPAE */
1892
1893 /*
1894@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1895 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1896 #define pmd_free(mm, pmd) do { } while (0)
1897 #define pud_populate(mm,pmd,pte) BUG()
1898+#define pud_populate_kernel(mm,pmd,pte) BUG()
1899
1900 #endif /* CONFIG_ARM_LPAE */
1901
1902@@ -126,6 +133,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1903 __free_page(pte);
1904 }
1905
1906+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1907+{
1908+#ifdef CONFIG_ARM_LPAE
1909+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1910+#else
1911+ if (addr & SECTION_SIZE)
1912+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1913+ else
1914+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1915+#endif
1916+ flush_pmd_entry(pmdp);
1917+}
1918+
1919 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1920 pmdval_t prot)
1921 {
1922@@ -155,7 +175,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
1923 static inline void
1924 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
1925 {
1926- __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
1927+ __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE | __supported_pmd_mask);
1928 }
1929 #define pmd_pgtable(pmd) pmd_page(pmd)
1930
1931diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1932index 5cfba15..f415e1a 100644
1933--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1934+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1935@@ -20,12 +20,15 @@
1936 #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
1937 #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
1938 #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
1939+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
1940 #define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
1941 #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
1942 #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
1943+
1944 /*
1945 * - section
1946 */
1947+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1948 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1949 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1950 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1951@@ -37,6 +40,7 @@
1952 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1953 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1954 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1955+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
1956
1957 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1958 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1959@@ -66,6 +70,7 @@
1960 * - extended small page/tiny page
1961 */
1962 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
1963+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
1964 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
1965 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
1966 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
1967diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
1968index f97ee02..07f1be5 100644
1969--- a/arch/arm/include/asm/pgtable-2level.h
1970+++ b/arch/arm/include/asm/pgtable-2level.h
1971@@ -125,6 +125,7 @@
1972 #define L_PTE_XN (_AT(pteval_t, 1) << 9)
1973 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
1974 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
1975+#define L_PTE_PXN (_AT(pteval_t, 1) << 12) /* v7*/
1976
1977 /*
1978 * These are the memory types, defined to be compatible with
1979diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
1980index d795282..a43ea90 100644
1981--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
1982+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
1983@@ -32,15 +32,18 @@
1984 #define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0)
1985 #define PMD_BIT4 (_AT(pmdval_t, 0))
1986 #define PMD_DOMAIN(x) (_AT(pmdval_t, 0))
1987+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 59) /* PXNTable */
1988
1989 /*
1990 * - section
1991 */
1992 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1993 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1994+#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7)
1995 #define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
1996 #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
1997 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 11)
1998+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 53)
1999 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 54)
2000 #define PMD_SECT_AP_WRITE (_AT(pmdval_t, 0))
2001 #define PMD_SECT_AP_READ (_AT(pmdval_t, 0))
2002@@ -66,6 +69,7 @@
2003 #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
2004 #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
2005 #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
2006+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
2007 #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
2008
2009 /*
2010diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
2011index a3f3792..7b932a6 100644
2012--- a/arch/arm/include/asm/pgtable-3level.h
2013+++ b/arch/arm/include/asm/pgtable-3level.h
2014@@ -74,6 +74,7 @@
2015 #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
2016 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
2017 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
2018+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
2019 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
2020 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
2021 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
2022@@ -82,6 +83,7 @@
2023 /*
2024 * To be used in assembly code with the upper page attributes.
2025 */
2026+#define L_PTE_PXN_HIGH (1 << (53 - 32))
2027 #define L_PTE_XN_HIGH (1 << (54 - 32))
2028 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
2029
2030diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
2031index c094749..a6ff605 100644
2032--- a/arch/arm/include/asm/pgtable.h
2033+++ b/arch/arm/include/asm/pgtable.h
2034@@ -30,6 +30,9 @@
2035 #include <asm/pgtable-2level.h>
2036 #endif
2037
2038+#define ktla_ktva(addr) (addr)
2039+#define ktva_ktla(addr) (addr)
2040+
2041 /*
2042 * Just any arbitrary offset to the start of the vmalloc VM area: the
2043 * current 8MB value just means that there will be a 8MB "hole" after the
2044@@ -45,6 +48,9 @@
2045 #define LIBRARY_TEXT_START 0x0c000000
2046
2047 #ifndef __ASSEMBLY__
2048+extern pteval_t __supported_pte_mask;
2049+extern pmdval_t __supported_pmd_mask;
2050+
2051 extern void __pte_error(const char *file, int line, pte_t);
2052 extern void __pmd_error(const char *file, int line, pmd_t);
2053 extern void __pgd_error(const char *file, int line, pgd_t);
2054@@ -53,6 +59,50 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2055 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
2056 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
2057
2058+#define __HAVE_ARCH_PAX_OPEN_KERNEL
2059+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2060+
2061+#ifdef CONFIG_PAX_KERNEXEC
2062+#include <asm/domain.h>
2063+#include <linux/thread_info.h>
2064+#include <linux/preempt.h>
2065+#endif
2066+
2067+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2068+static inline int test_domain(int domain, int domaintype)
2069+{
2070+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2071+}
2072+#endif
2073+
2074+#ifdef CONFIG_PAX_KERNEXEC
2075+static inline unsigned long pax_open_kernel(void) {
2076+#ifdef CONFIG_ARM_LPAE
2077+ /* TODO */
2078+#else
2079+ preempt_disable();
2080+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2081+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2082+#endif
2083+ return 0;
2084+}
2085+
2086+static inline unsigned long pax_close_kernel(void) {
2087+#ifdef CONFIG_ARM_LPAE
2088+ /* TODO */
2089+#else
2090+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2091+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2092+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2093+ preempt_enable_no_resched();
2094+#endif
2095+ return 0;
2096+}
2097+#else
2098+static inline unsigned long pax_open_kernel(void) { return 0; }
2099+static inline unsigned long pax_close_kernel(void) { return 0; }
2100+#endif
2101+
2102 /*
2103 * This is the lowest virtual address we can permit any user space
2104 * mapping to be mapped at. This is particularly important for
2105@@ -63,8 +113,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2106 /*
2107 * The pgprot_* and protection_map entries will be fixed up in runtime
2108 * to include the cachable and bufferable bits based on memory policy,
2109- * as well as any architecture dependent bits like global/ASID and SMP
2110- * shared mapping bits.
2111+ * as well as any architecture dependent bits like global/ASID, PXN,
2112+ * and SMP shared mapping bits.
2113 */
2114 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2115
2116@@ -241,7 +291,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
2117 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2118 {
2119 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2120- L_PTE_NONE | L_PTE_VALID;
2121+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2122 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2123 return pte;
2124 }
2125diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
2126index f3628fb..a0672dd 100644
2127--- a/arch/arm/include/asm/proc-fns.h
2128+++ b/arch/arm/include/asm/proc-fns.h
2129@@ -75,7 +75,7 @@ extern struct processor {
2130 unsigned int suspend_size;
2131 void (*do_suspend)(void *);
2132 void (*do_resume)(void *);
2133-} processor;
2134+} __do_const processor;
2135
2136 #ifndef MULTI_CPU
2137 extern void cpu_proc_init(void);
2138diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
2139index 06e7d50..8a8e251 100644
2140--- a/arch/arm/include/asm/processor.h
2141+++ b/arch/arm/include/asm/processor.h
2142@@ -65,9 +65,8 @@ struct thread_struct {
2143 regs->ARM_cpsr |= PSR_ENDSTATE; \
2144 regs->ARM_pc = pc & ~1; /* pc */ \
2145 regs->ARM_sp = sp; /* sp */ \
2146- regs->ARM_r2 = stack[2]; /* r2 (envp) */ \
2147- regs->ARM_r1 = stack[1]; /* r1 (argv) */ \
2148- regs->ARM_r0 = stack[0]; /* r0 (argc) */ \
2149+ /* r2 (envp), r1 (argv), r0 (argc) */ \
2150+ (void)copy_from_user(&regs->ARM_r0, (const char __user *)stack, 3 * sizeof(unsigned long)); \
2151 nommu_start_thread(regs); \
2152 })
2153
2154diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2155index d3a22be..3a69ad5 100644
2156--- a/arch/arm/include/asm/smp.h
2157+++ b/arch/arm/include/asm/smp.h
2158@@ -107,7 +107,7 @@ struct smp_operations {
2159 int (*cpu_disable)(unsigned int cpu);
2160 #endif
2161 #endif
2162-};
2163+} __no_const;
2164
2165 /*
2166 * set platform specific SMP operations
2167diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2168index cddda1f..ff357f7 100644
2169--- a/arch/arm/include/asm/thread_info.h
2170+++ b/arch/arm/include/asm/thread_info.h
2171@@ -77,9 +77,9 @@ struct thread_info {
2172 .flags = 0, \
2173 .preempt_count = INIT_PREEMPT_COUNT, \
2174 .addr_limit = KERNEL_DS, \
2175- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2176- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2177- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2178+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2179+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2180+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2181 .restart_block = { \
2182 .fn = do_no_restart_syscall, \
2183 }, \
2184@@ -152,6 +152,12 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2185 #define TIF_SYSCALL_AUDIT 9
2186 #define TIF_SYSCALL_TRACEPOINT 10
2187 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2188+
2189+/* within 8 bits of TIF_SYSCALL_TRACE
2190+ * to meet flexible second operand requirements
2191+ */
2192+#define TIF_GRSEC_SETXID 12
2193+
2194 #define TIF_USING_IWMMXT 17
2195 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2196 #define TIF_RESTORE_SIGMASK 20
2197@@ -165,10 +171,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2198 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2199 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2200 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2201+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2202
2203 /* Checks for any syscall work in entry-common.S */
2204 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2205- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2206+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2207
2208 /*
2209 * Change these and you break ASM code in entry-common.S
2210diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
2211index 6e924d3..a9f3ddf 100644
2212--- a/arch/arm/include/asm/tlbflush.h
2213+++ b/arch/arm/include/asm/tlbflush.h
2214@@ -430,6 +430,21 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
2215 }
2216 }
2217
2218+#ifdef CONFIG_ARM_ERRATA_798181
2219+static inline void dummy_flush_tlb_a15_erratum(void)
2220+{
2221+ /*
2222+ * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0.
2223+ */
2224+ asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
2225+ dsb();
2226+}
2227+#else
2228+static inline void dummy_flush_tlb_a15_erratum(void)
2229+{
2230+}
2231+#endif
2232+
2233 /*
2234 * flush_pmd_entry
2235 *
2236diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2237index 7e1f760..752fcb7 100644
2238--- a/arch/arm/include/asm/uaccess.h
2239+++ b/arch/arm/include/asm/uaccess.h
2240@@ -18,6 +18,7 @@
2241 #include <asm/domain.h>
2242 #include <asm/unified.h>
2243 #include <asm/compiler.h>
2244+#include <asm/pgtable.h>
2245
2246 #define VERIFY_READ 0
2247 #define VERIFY_WRITE 1
2248@@ -60,10 +61,34 @@ extern int __put_user_bad(void);
2249 #define USER_DS TASK_SIZE
2250 #define get_fs() (current_thread_info()->addr_limit)
2251
2252+static inline void pax_open_userland(void)
2253+{
2254+
2255+#ifdef CONFIG_PAX_MEMORY_UDEREF
2256+ if (get_fs() == USER_DS) {
2257+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2258+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2259+ }
2260+#endif
2261+
2262+}
2263+
2264+static inline void pax_close_userland(void)
2265+{
2266+
2267+#ifdef CONFIG_PAX_MEMORY_UDEREF
2268+ if (get_fs() == USER_DS) {
2269+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2270+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2271+ }
2272+#endif
2273+
2274+}
2275+
2276 static inline void set_fs(mm_segment_t fs)
2277 {
2278 current_thread_info()->addr_limit = fs;
2279- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2280+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2281 }
2282
2283 #define segment_eq(a,b) ((a) == (b))
2284@@ -143,8 +168,12 @@ extern int __get_user_4(void *);
2285
2286 #define get_user(x,p) \
2287 ({ \
2288+ int __e; \
2289 might_fault(); \
2290- __get_user_check(x,p); \
2291+ pax_open_userland(); \
2292+ __e = __get_user_check(x,p); \
2293+ pax_close_userland(); \
2294+ __e; \
2295 })
2296
2297 extern int __put_user_1(void *, unsigned int);
2298@@ -188,8 +217,12 @@ extern int __put_user_8(void *, unsigned long long);
2299
2300 #define put_user(x,p) \
2301 ({ \
2302+ int __e; \
2303 might_fault(); \
2304- __put_user_check(x,p); \
2305+ pax_open_userland(); \
2306+ __e = __put_user_check(x,p); \
2307+ pax_close_userland(); \
2308+ __e; \
2309 })
2310
2311 #else /* CONFIG_MMU */
2312@@ -230,13 +263,17 @@ static inline void set_fs(mm_segment_t fs)
2313 #define __get_user(x,ptr) \
2314 ({ \
2315 long __gu_err = 0; \
2316+ pax_open_userland(); \
2317 __get_user_err((x),(ptr),__gu_err); \
2318+ pax_close_userland(); \
2319 __gu_err; \
2320 })
2321
2322 #define __get_user_error(x,ptr,err) \
2323 ({ \
2324+ pax_open_userland(); \
2325 __get_user_err((x),(ptr),err); \
2326+ pax_close_userland(); \
2327 (void) 0; \
2328 })
2329
2330@@ -312,13 +349,17 @@ do { \
2331 #define __put_user(x,ptr) \
2332 ({ \
2333 long __pu_err = 0; \
2334+ pax_open_userland(); \
2335 __put_user_err((x),(ptr),__pu_err); \
2336+ pax_close_userland(); \
2337 __pu_err; \
2338 })
2339
2340 #define __put_user_error(x,ptr,err) \
2341 ({ \
2342+ pax_open_userland(); \
2343 __put_user_err((x),(ptr),err); \
2344+ pax_close_userland(); \
2345 (void) 0; \
2346 })
2347
2348@@ -418,11 +459,44 @@ do { \
2349
2350
2351 #ifdef CONFIG_MMU
2352-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2353-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2354+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2355+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2356+
2357+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2358+{
2359+ unsigned long ret;
2360+
2361+ check_object_size(to, n, false);
2362+ pax_open_userland();
2363+ ret = ___copy_from_user(to, from, n);
2364+ pax_close_userland();
2365+ return ret;
2366+}
2367+
2368+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2369+{
2370+ unsigned long ret;
2371+
2372+ check_object_size(from, n, true);
2373+ pax_open_userland();
2374+ ret = ___copy_to_user(to, from, n);
2375+ pax_close_userland();
2376+ return ret;
2377+}
2378+
2379 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2380-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2381+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2382 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2383+
2384+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2385+{
2386+ unsigned long ret;
2387+ pax_open_userland();
2388+ ret = ___clear_user(addr, n);
2389+ pax_close_userland();
2390+ return ret;
2391+}
2392+
2393 #else
2394 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
2395 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
2396@@ -431,6 +505,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2397
2398 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2399 {
2400+ if ((long)n < 0)
2401+ return n;
2402+
2403 if (access_ok(VERIFY_READ, from, n))
2404 n = __copy_from_user(to, from, n);
2405 else /* security hole - plug it */
2406@@ -440,6 +517,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2407
2408 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2409 {
2410+ if ((long)n < 0)
2411+ return n;
2412+
2413 if (access_ok(VERIFY_WRITE, to, n))
2414 n = __copy_to_user(to, from, n);
2415 return n;
2416diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2417index 96ee092..37f1844 100644
2418--- a/arch/arm/include/uapi/asm/ptrace.h
2419+++ b/arch/arm/include/uapi/asm/ptrace.h
2420@@ -73,7 +73,7 @@
2421 * ARMv7 groups of PSR bits
2422 */
2423 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2424-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2425+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2426 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2427 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2428
2429diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2430index 60d3b73..d27ee09 100644
2431--- a/arch/arm/kernel/armksyms.c
2432+++ b/arch/arm/kernel/armksyms.c
2433@@ -89,9 +89,9 @@ EXPORT_SYMBOL(__memzero);
2434 #ifdef CONFIG_MMU
2435 EXPORT_SYMBOL(copy_page);
2436
2437-EXPORT_SYMBOL(__copy_from_user);
2438-EXPORT_SYMBOL(__copy_to_user);
2439-EXPORT_SYMBOL(__clear_user);
2440+EXPORT_SYMBOL(___copy_from_user);
2441+EXPORT_SYMBOL(___copy_to_user);
2442+EXPORT_SYMBOL(___clear_user);
2443
2444 EXPORT_SYMBOL(__get_user_1);
2445 EXPORT_SYMBOL(__get_user_2);
2446diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2447index 0f82098..3dbd3ee 100644
2448--- a/arch/arm/kernel/entry-armv.S
2449+++ b/arch/arm/kernel/entry-armv.S
2450@@ -47,6 +47,87 @@
2451 9997:
2452 .endm
2453
2454+ .macro pax_enter_kernel
2455+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2456+ @ make aligned space for saved DACR
2457+ sub sp, sp, #8
2458+ @ save regs
2459+ stmdb sp!, {r1, r2}
2460+ @ read DACR from cpu_domain into r1
2461+ mov r2, sp
2462+ @ assume 8K pages, since we have to split the immediate in two
2463+ bic r2, r2, #(0x1fc0)
2464+ bic r2, r2, #(0x3f)
2465+ ldr r1, [r2, #TI_CPU_DOMAIN]
2466+ @ store old DACR on stack
2467+ str r1, [sp, #8]
2468+#ifdef CONFIG_PAX_KERNEXEC
2469+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2470+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2471+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2472+#endif
2473+#ifdef CONFIG_PAX_MEMORY_UDEREF
2474+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2475+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2476+#endif
2477+ @ write r1 to current_thread_info()->cpu_domain
2478+ str r1, [r2, #TI_CPU_DOMAIN]
2479+ @ write r1 to DACR
2480+ mcr p15, 0, r1, c3, c0, 0
2481+ @ instruction sync
2482+ instr_sync
2483+ @ restore regs
2484+ ldmia sp!, {r1, r2}
2485+#endif
2486+ .endm
2487+
2488+ .macro pax_open_userland
2489+#ifdef CONFIG_PAX_MEMORY_UDEREF
2490+ @ save regs
2491+ stmdb sp!, {r0, r1}
2492+ @ read DACR from cpu_domain into r1
2493+ mov r0, sp
2494+ @ assume 8K pages, since we have to split the immediate in two
2495+ bic r0, r0, #(0x1fc0)
2496+ bic r0, r0, #(0x3f)
2497+ ldr r1, [r0, #TI_CPU_DOMAIN]
2498+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2499+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2500+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2501+ @ write r1 to current_thread_info()->cpu_domain
2502+ str r1, [r0, #TI_CPU_DOMAIN]
2503+ @ write r1 to DACR
2504+ mcr p15, 0, r1, c3, c0, 0
2505+ @ instruction sync
2506+ instr_sync
2507+ @ restore regs
2508+ ldmia sp!, {r0, r1}
2509+#endif
2510+ .endm
2511+
2512+ .macro pax_close_userland
2513+#ifdef CONFIG_PAX_MEMORY_UDEREF
2514+ @ save regs
2515+ stmdb sp!, {r0, r1}
2516+ @ read DACR from cpu_domain into r1
2517+ mov r0, sp
2518+ @ assume 8K pages, since we have to split the immediate in two
2519+ bic r0, r0, #(0x1fc0)
2520+ bic r0, r0, #(0x3f)
2521+ ldr r1, [r0, #TI_CPU_DOMAIN]
2522+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2523+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2524+ @ write r1 to current_thread_info()->cpu_domain
2525+ str r1, [r0, #TI_CPU_DOMAIN]
2526+ @ write r1 to DACR
2527+ mcr p15, 0, r1, c3, c0, 0
2528+ @ instruction sync
2529+ instr_sync
2530+ @ restore regs
2531+ ldmia sp!, {r0, r1}
2532+#endif
2533+ .endm
2534+
2535 .macro pabt_helper
2536 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2537 #ifdef MULTI_PABORT
2538@@ -89,11 +170,15 @@
2539 * Invalid mode handlers
2540 */
2541 .macro inv_entry, reason
2542+
2543+ pax_enter_kernel
2544+
2545 sub sp, sp, #S_FRAME_SIZE
2546 ARM( stmib sp, {r1 - lr} )
2547 THUMB( stmia sp, {r0 - r12} )
2548 THUMB( str sp, [sp, #S_SP] )
2549 THUMB( str lr, [sp, #S_LR] )
2550+
2551 mov r1, #\reason
2552 .endm
2553
2554@@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
2555 .macro svc_entry, stack_hole=0
2556 UNWIND(.fnstart )
2557 UNWIND(.save {r0 - pc} )
2558+
2559+ pax_enter_kernel
2560+
2561 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2562+
2563 #ifdef CONFIG_THUMB2_KERNEL
2564 SPFIX( str r0, [sp] ) @ temporarily saved
2565 SPFIX( mov r0, sp )
2566@@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
2567 ldmia r0, {r3 - r5}
2568 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2569 mov r6, #-1 @ "" "" "" ""
2570+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2571+ @ offset sp by 8 as done in pax_enter_kernel
2572+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2573+#else
2574 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2575+#endif
2576 SPFIX( addeq r2, r2, #4 )
2577 str r3, [sp, #-4]! @ save the "real" r0 copied
2578 @ from the exception stack
2579@@ -359,6 +453,9 @@ ENDPROC(__pabt_svc)
2580 .macro usr_entry
2581 UNWIND(.fnstart )
2582 UNWIND(.cantunwind ) @ don't unwind the user space
2583+
2584+ pax_enter_kernel_user
2585+
2586 sub sp, sp, #S_FRAME_SIZE
2587 ARM( stmib sp, {r1 - r12} )
2588 THUMB( stmia sp, {r0 - r12} )
2589@@ -456,7 +553,9 @@ __und_usr:
2590 tst r3, #PSR_T_BIT @ Thumb mode?
2591 bne __und_usr_thumb
2592 sub r4, r2, #4 @ ARM instr at LR - 4
2593+ pax_open_userland
2594 1: ldrt r0, [r4]
2595+ pax_close_userland
2596 #ifdef CONFIG_CPU_ENDIAN_BE8
2597 rev r0, r0 @ little endian instruction
2598 #endif
2599@@ -491,10 +590,14 @@ __und_usr_thumb:
2600 */
2601 .arch armv6t2
2602 #endif
2603+ pax_open_userland
2604 2: ldrht r5, [r4]
2605+ pax_close_userland
2606 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2607 blo __und_usr_fault_16 @ 16bit undefined instruction
2608+ pax_open_userland
2609 3: ldrht r0, [r2]
2610+ pax_close_userland
2611 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2612 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2613 orr r0, r0, r5, lsl #16
2614@@ -733,7 +836,7 @@ ENTRY(__switch_to)
2615 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
2616 THUMB( str sp, [ip], #4 )
2617 THUMB( str lr, [ip], #4 )
2618-#ifdef CONFIG_CPU_USE_DOMAINS
2619+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC)
2620 ldr r6, [r2, #TI_CPU_DOMAIN]
2621 #endif
2622 set_tls r3, r4, r5
2623@@ -742,7 +845,7 @@ ENTRY(__switch_to)
2624 ldr r8, =__stack_chk_guard
2625 ldr r7, [r7, #TSK_STACK_CANARY]
2626 #endif
2627-#ifdef CONFIG_CPU_USE_DOMAINS
2628+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC)
2629 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2630 #endif
2631 mov r5, r0
2632diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2633index a6c301e..908821b 100644
2634--- a/arch/arm/kernel/entry-common.S
2635+++ b/arch/arm/kernel/entry-common.S
2636@@ -10,18 +10,46 @@
2637
2638 #include <asm/unistd.h>
2639 #include <asm/ftrace.h>
2640+#include <asm/domain.h>
2641 #include <asm/unwind.h>
2642
2643+#include "entry-header.S"
2644+
2645 #ifdef CONFIG_NEED_RET_TO_USER
2646 #include <mach/entry-macro.S>
2647 #else
2648 .macro arch_ret_to_user, tmp1, tmp2
2649+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2650+ @ save regs
2651+ stmdb sp!, {r1, r2}
2652+ @ read DACR from cpu_domain into r1
2653+ mov r2, sp
2654+ @ assume 8K pages, since we have to split the immediate in two
2655+ bic r2, r2, #(0x1fc0)
2656+ bic r2, r2, #(0x3f)
2657+ ldr r1, [r2, #TI_CPU_DOMAIN]
2658+#ifdef CONFIG_PAX_KERNEXEC
2659+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2660+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2661+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2662+#endif
2663+#ifdef CONFIG_PAX_MEMORY_UDEREF
2664+ @ set current DOMAIN_USER to DOMAIN_UDEREF
2665+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2666+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2667+#endif
2668+ @ write r1 to current_thread_info()->cpu_domain
2669+ str r1, [r2, #TI_CPU_DOMAIN]
2670+ @ write r1 to DACR
2671+ mcr p15, 0, r1, c3, c0, 0
2672+ @ instruction sync
2673+ instr_sync
2674+ @ restore regs
2675+ ldmia sp!, {r1, r2}
2676+#endif
2677 .endm
2678 #endif
2679
2680-#include "entry-header.S"
2681-
2682-
2683 .align 5
2684 /*
2685 * This is the fast syscall return path. We do as little as
2686@@ -339,6 +367,7 @@ ENDPROC(ftrace_stub)
2687
2688 .align 5
2689 ENTRY(vector_swi)
2690+
2691 sub sp, sp, #S_FRAME_SIZE
2692 stmia sp, {r0 - r12} @ Calling r0 - r12
2693 ARM( add r8, sp, #S_PC )
2694@@ -388,6 +417,12 @@ ENTRY(vector_swi)
2695 ldr scno, [lr, #-4] @ get SWI instruction
2696 #endif
2697
2698+ /*
2699+ * do this here to avoid a performance hit of wrapping the code above
2700+ * that directly dereferences userland to parse the SWI instruction
2701+ */
2702+ pax_enter_kernel_user
2703+
2704 #ifdef CONFIG_ALIGNMENT_TRAP
2705 ldr ip, __cr_alignment
2706 ldr ip, [ip]
2707diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2708index 9a8531e..812e287 100644
2709--- a/arch/arm/kernel/entry-header.S
2710+++ b/arch/arm/kernel/entry-header.S
2711@@ -73,9 +73,66 @@
2712 msr cpsr_c, \rtemp @ switch back to the SVC mode
2713 .endm
2714
2715+ .macro pax_enter_kernel_user
2716+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2717+ @ save regs
2718+ stmdb sp!, {r0, r1}
2719+ @ read DACR from cpu_domain into r1
2720+ mov r0, sp
2721+ @ assume 8K pages, since we have to split the immediate in two
2722+ bic r0, r0, #(0x1fc0)
2723+ bic r0, r0, #(0x3f)
2724+ ldr r1, [r0, #TI_CPU_DOMAIN]
2725+#ifdef CONFIG_PAX_MEMORY_UDEREF
2726+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2727+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2728+#endif
2729+#ifdef CONFIG_PAX_KERNEXEC
2730+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2731+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2732+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2733+#endif
2734+ @ write r1 to current_thread_info()->cpu_domain
2735+ str r1, [r0, #TI_CPU_DOMAIN]
2736+ @ write r1 to DACR
2737+ mcr p15, 0, r1, c3, c0, 0
2738+ @ instruction sync
2739+ instr_sync
2740+ @ restore regs
2741+ ldmia sp!, {r0, r1}
2742+#endif
2743+ .endm
2744+
2745+ .macro pax_exit_kernel
2746+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2747+ @ save regs
2748+ stmdb sp!, {r0, r1}
2749+ @ read old DACR from stack into r1
2750+ ldr r1, [sp, #(8 + S_SP)]
2751+ sub r1, r1, #8
2752+ ldr r1, [r1]
2753+
2754+ @ write r1 to current_thread_info()->cpu_domain
2755+ mov r0, sp
2756+ @ assume 8K pages, since we have to split the immediate in two
2757+ bic r0, r0, #(0x1fc0)
2758+ bic r0, r0, #(0x3f)
2759+ str r1, [r0, #TI_CPU_DOMAIN]
2760+ @ write r1 to DACR
2761+ mcr p15, 0, r1, c3, c0, 0
2762+ @ instruction sync
2763+ instr_sync
2764+ @ restore regs
2765+ ldmia sp!, {r0, r1}
2766+#endif
2767+ .endm
2768+
2769 #ifndef CONFIG_THUMB2_KERNEL
2770 .macro svc_exit, rpsr
2771 msr spsr_cxsf, \rpsr
2772+
2773+ pax_exit_kernel
2774+
2775 #if defined(CONFIG_CPU_V6)
2776 ldr r0, [sp]
2777 strex r1, r2, [sp] @ clear the exclusive monitor
2778@@ -121,6 +178,9 @@
2779 .endm
2780 #else /* CONFIG_THUMB2_KERNEL */
2781 .macro svc_exit, rpsr
2782+
2783+ pax_exit_kernel
2784+
2785 ldr lr, [sp, #S_SP] @ top of the stack
2786 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2787 clrex @ clear the exclusive monitor
2788diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2789index 2adda11..7fbe958 100644
2790--- a/arch/arm/kernel/fiq.c
2791+++ b/arch/arm/kernel/fiq.c
2792@@ -82,7 +82,9 @@ void set_fiq_handler(void *start, unsigned int length)
2793 #if defined(CONFIG_CPU_USE_DOMAINS)
2794 memcpy((void *)0xffff001c, start, length);
2795 #else
2796+ pax_open_kernel();
2797 memcpy(vectors_page + 0x1c, start, length);
2798+ pax_close_kernel();
2799 #endif
2800 flush_icache_range(0xffff001c, 0xffff001c + length);
2801 if (!vectors_high())
2802diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2803index e0eb9a1..caee108 100644
2804--- a/arch/arm/kernel/head.S
2805+++ b/arch/arm/kernel/head.S
2806@@ -52,7 +52,9 @@
2807 .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
2808
2809 .macro pgtbl, rd, phys
2810- add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE
2811+ mov \rd, #TEXT_OFFSET
2812+ sub \rd, #PG_DIR_SIZE
2813+ add \rd, \rd, \phys
2814 .endm
2815
2816 /*
2817@@ -267,7 +269,7 @@ __create_page_tables:
2818 addne r6, r6, #1 << SECTION_SHIFT
2819 strne r6, [r3]
2820
2821-#if defined(CONFIG_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)
2822+#if defined(CONFIG_ARM_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)
2823 sub r4, r4, #4 @ Fixup page table pointer
2824 @ for 64-bit descriptors
2825 #endif
2826@@ -434,7 +436,7 @@ __enable_mmu:
2827 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2828 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2829 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2830- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2831+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2832 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2833 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2834 #endif
2835diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
2836index 5ff2e77..556d030 100644
2837--- a/arch/arm/kernel/hw_breakpoint.c
2838+++ b/arch/arm/kernel/hw_breakpoint.c
2839@@ -1011,7 +1011,7 @@ static int __cpuinit dbg_reset_notify(struct notifier_block *self,
2840 return NOTIFY_OK;
2841 }
2842
2843-static struct notifier_block __cpuinitdata dbg_reset_nb = {
2844+static struct notifier_block dbg_reset_nb = {
2845 .notifier_call = dbg_reset_notify,
2846 };
2847
2848diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2849index 1e9be5d..03edbc2 100644
2850--- a/arch/arm/kernel/module.c
2851+++ b/arch/arm/kernel/module.c
2852@@ -37,12 +37,37 @@
2853 #endif
2854
2855 #ifdef CONFIG_MMU
2856-void *module_alloc(unsigned long size)
2857+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2858 {
2859+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2860+ return NULL;
2861 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2862- GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
2863+ GFP_KERNEL, prot, -1,
2864 __builtin_return_address(0));
2865 }
2866+
2867+void *module_alloc(unsigned long size)
2868+{
2869+
2870+#ifdef CONFIG_PAX_KERNEXEC
2871+ return __module_alloc(size, PAGE_KERNEL);
2872+#else
2873+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2874+#endif
2875+
2876+}
2877+
2878+#ifdef CONFIG_PAX_KERNEXEC
2879+void module_free_exec(struct module *mod, void *module_region)
2880+{
2881+ module_free(mod, module_region);
2882+}
2883+
2884+void *module_alloc_exec(unsigned long size)
2885+{
2886+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2887+}
2888+#endif
2889 #endif
2890
2891 int
2892diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2893index 07314af..c46655c 100644
2894--- a/arch/arm/kernel/patch.c
2895+++ b/arch/arm/kernel/patch.c
2896@@ -18,6 +18,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2897 bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
2898 int size;
2899
2900+ pax_open_kernel();
2901 if (thumb2 && __opcode_is_thumb16(insn)) {
2902 *(u16 *)addr = __opcode_to_mem_thumb16(insn);
2903 size = sizeof(u16);
2904@@ -39,6 +40,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2905 *(u32 *)addr = insn;
2906 size = sizeof(u32);
2907 }
2908+ pax_close_kernel();
2909
2910 flush_icache_range((uintptr_t)(addr),
2911 (uintptr_t)(addr) + size);
2912diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
2913index 5f66206..dce492f 100644
2914--- a/arch/arm/kernel/perf_event_cpu.c
2915+++ b/arch/arm/kernel/perf_event_cpu.c
2916@@ -171,7 +171,7 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
2917 return NOTIFY_OK;
2918 }
2919
2920-static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = {
2921+static struct notifier_block cpu_pmu_hotplug_notifier = {
2922 .notifier_call = cpu_pmu_notify,
2923 };
2924
2925diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2926index c6dec5f..e0fddd1 100644
2927--- a/arch/arm/kernel/process.c
2928+++ b/arch/arm/kernel/process.c
2929@@ -28,7 +28,6 @@
2930 #include <linux/tick.h>
2931 #include <linux/utsname.h>
2932 #include <linux/uaccess.h>
2933-#include <linux/random.h>
2934 #include <linux/hw_breakpoint.h>
2935 #include <linux/cpuidle.h>
2936 #include <linux/leds.h>
2937@@ -256,9 +255,10 @@ void machine_power_off(void)
2938 machine_shutdown();
2939 if (pm_power_off)
2940 pm_power_off();
2941+ BUG();
2942 }
2943
2944-void machine_restart(char *cmd)
2945+__noreturn void machine_restart(char *cmd)
2946 {
2947 machine_shutdown();
2948
2949@@ -283,8 +283,8 @@ void __show_regs(struct pt_regs *regs)
2950 init_utsname()->release,
2951 (int)strcspn(init_utsname()->version, " "),
2952 init_utsname()->version);
2953- print_symbol("PC is at %s\n", instruction_pointer(regs));
2954- print_symbol("LR is at %s\n", regs->ARM_lr);
2955+ printk("PC is at %pA\n", instruction_pointer(regs));
2956+ printk("LR is at %pA\n", regs->ARM_lr);
2957 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2958 "sp : %08lx ip : %08lx fp : %08lx\n",
2959 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2960@@ -452,12 +452,6 @@ unsigned long get_wchan(struct task_struct *p)
2961 return 0;
2962 }
2963
2964-unsigned long arch_randomize_brk(struct mm_struct *mm)
2965-{
2966- unsigned long range_end = mm->brk + 0x02000000;
2967- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2968-}
2969-
2970 #ifdef CONFIG_MMU
2971 /*
2972 * The vectors page is always readable from user space for the
2973@@ -470,9 +464,8 @@ static int __init gate_vma_init(void)
2974 {
2975 gate_vma.vm_start = 0xffff0000;
2976 gate_vma.vm_end = 0xffff0000 + PAGE_SIZE;
2977- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
2978- gate_vma.vm_flags = VM_READ | VM_EXEC |
2979- VM_MAYREAD | VM_MAYEXEC;
2980+ gate_vma.vm_flags = VM_NONE;
2981+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
2982 return 0;
2983 }
2984 arch_initcall(gate_vma_init);
2985diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
2986index 03deeff..741ce88 100644
2987--- a/arch/arm/kernel/ptrace.c
2988+++ b/arch/arm/kernel/ptrace.c
2989@@ -937,10 +937,19 @@ static int tracehook_report_syscall(struct pt_regs *regs,
2990 return current_thread_info()->syscall;
2991 }
2992
2993+#ifdef CONFIG_GRKERNSEC_SETXID
2994+extern void gr_delayed_cred_worker(void);
2995+#endif
2996+
2997 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
2998 {
2999 current_thread_info()->syscall = scno;
3000
3001+#ifdef CONFIG_GRKERNSEC_SETXID
3002+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3003+ gr_delayed_cred_worker();
3004+#endif
3005+
3006 /* Do the secure computing check first; failures should be fast. */
3007 if (secure_computing(scno) == -1)
3008 return -1;
3009diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
3010index 3f6cbb2..39305c7 100644
3011--- a/arch/arm/kernel/setup.c
3012+++ b/arch/arm/kernel/setup.c
3013@@ -97,21 +97,23 @@ EXPORT_SYMBOL(system_serial_high);
3014 unsigned int elf_hwcap __read_mostly;
3015 EXPORT_SYMBOL(elf_hwcap);
3016
3017+pteval_t __supported_pte_mask __read_only;
3018+pmdval_t __supported_pmd_mask __read_only;
3019
3020 #ifdef MULTI_CPU
3021-struct processor processor __read_mostly;
3022+struct processor processor;
3023 #endif
3024 #ifdef MULTI_TLB
3025-struct cpu_tlb_fns cpu_tlb __read_mostly;
3026+struct cpu_tlb_fns cpu_tlb __read_only;
3027 #endif
3028 #ifdef MULTI_USER
3029-struct cpu_user_fns cpu_user __read_mostly;
3030+struct cpu_user_fns cpu_user __read_only;
3031 #endif
3032 #ifdef MULTI_CACHE
3033-struct cpu_cache_fns cpu_cache __read_mostly;
3034+struct cpu_cache_fns cpu_cache __read_only;
3035 #endif
3036 #ifdef CONFIG_OUTER_CACHE
3037-struct outer_cache_fns outer_cache __read_mostly;
3038+struct outer_cache_fns outer_cache __read_only;
3039 EXPORT_SYMBOL(outer_cache);
3040 #endif
3041
3042@@ -236,9 +238,13 @@ static int __get_cpu_architecture(void)
3043 asm("mrc p15, 0, %0, c0, c1, 4"
3044 : "=r" (mmfr0));
3045 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
3046- (mmfr0 & 0x000000f0) >= 0x00000030)
3047+ (mmfr0 & 0x000000f0) >= 0x00000030) {
3048 cpu_arch = CPU_ARCH_ARMv7;
3049- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3050+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
3051+ __supported_pte_mask |= L_PTE_PXN;
3052+ __supported_pmd_mask |= PMD_PXNTABLE;
3053+ }
3054+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3055 (mmfr0 & 0x000000f0) == 0x00000020)
3056 cpu_arch = CPU_ARCH_ARMv6;
3057 else
3058@@ -462,7 +468,7 @@ static void __init setup_processor(void)
3059 __cpu_architecture = __get_cpu_architecture();
3060
3061 #ifdef MULTI_CPU
3062- processor = *list->proc;
3063+ memcpy((void *)&processor, list->proc, sizeof processor);
3064 #endif
3065 #ifdef MULTI_TLB
3066 cpu_tlb = *list->tlb;
3067@@ -524,7 +530,7 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
3068 size -= start & ~PAGE_MASK;
3069 bank->start = PAGE_ALIGN(start);
3070
3071-#ifndef CONFIG_LPAE
3072+#ifndef CONFIG_ARM_LPAE
3073 if (bank->start + size < bank->start) {
3074 printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
3075 "32-bit physical address space\n", (long long)start);
3076diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3077index 56f72d2..6924200 100644
3078--- a/arch/arm/kernel/signal.c
3079+++ b/arch/arm/kernel/signal.c
3080@@ -433,22 +433,14 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka,
3081 __put_user(sigreturn_codes[idx+1], rc+1))
3082 return 1;
3083
3084- if (cpsr & MODE32_BIT) {
3085- /*
3086- * 32-bit code can use the new high-page
3087- * signal return code support.
3088- */
3089- retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb;
3090- } else {
3091- /*
3092- * Ensure that the instruction cache sees
3093- * the return code written onto the stack.
3094- */
3095- flush_icache_range((unsigned long)rc,
3096- (unsigned long)(rc + 2));
3097+ /*
3098+ * Ensure that the instruction cache sees
3099+ * the return code written onto the stack.
3100+ */
3101+ flush_icache_range((unsigned long)rc,
3102+ (unsigned long)(rc + 2));
3103
3104- retcode = ((unsigned long)rc) + thumb;
3105- }
3106+ retcode = ((unsigned long)rc) + thumb;
3107 }
3108
3109 regs->ARM_r0 = usig;
3110diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3111index 58af91c..343ce99 100644
3112--- a/arch/arm/kernel/smp.c
3113+++ b/arch/arm/kernel/smp.c
3114@@ -70,7 +70,7 @@ enum ipi_msg_type {
3115
3116 static DECLARE_COMPLETION(cpu_running);
3117
3118-static struct smp_operations smp_ops;
3119+static struct smp_operations smp_ops __read_only;
3120
3121 void __init smp_set_ops(struct smp_operations *ops)
3122 {
3123diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
3124index 02c5d2c..e5695ad 100644
3125--- a/arch/arm/kernel/smp_tlb.c
3126+++ b/arch/arm/kernel/smp_tlb.c
3127@@ -12,6 +12,7 @@
3128
3129 #include <asm/smp_plat.h>
3130 #include <asm/tlbflush.h>
3131+#include <asm/mmu_context.h>
3132
3133 /**********************************************************************/
3134
3135@@ -64,12 +65,72 @@ static inline void ipi_flush_tlb_kernel_range(void *arg)
3136 local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
3137 }
3138
3139+#ifdef CONFIG_ARM_ERRATA_798181
3140+static int erratum_a15_798181(void)
3141+{
3142+ unsigned int midr = read_cpuid_id();
3143+
3144+ /* Cortex-A15 r0p0..r3p2 affected */
3145+ if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2)
3146+ return 0;
3147+ return 1;
3148+}
3149+#else
3150+static int erratum_a15_798181(void)
3151+{
3152+ return 0;
3153+}
3154+#endif
3155+
3156+static void ipi_flush_tlb_a15_erratum(void *arg)
3157+{
3158+ dmb();
3159+}
3160+
3161+static void broadcast_tlb_a15_erratum(void)
3162+{
3163+ if (!erratum_a15_798181())
3164+ return;
3165+
3166+ dummy_flush_tlb_a15_erratum();
3167+ smp_call_function_many(cpu_online_mask, ipi_flush_tlb_a15_erratum,
3168+ NULL, 1);
3169+}
3170+
3171+static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
3172+{
3173+ int cpu;
3174+ cpumask_t mask = { CPU_BITS_NONE };
3175+
3176+ if (!erratum_a15_798181())
3177+ return;
3178+
3179+ dummy_flush_tlb_a15_erratum();
3180+ for_each_online_cpu(cpu) {
3181+ if (cpu == smp_processor_id())
3182+ continue;
3183+ /*
3184+ * We only need to send an IPI if the other CPUs are running
3185+ * the same ASID as the one being invalidated. There is no
3186+ * need for locking around the active_asids check since the
3187+ * switch_mm() function has at least one dmb() (as required by
3188+ * this workaround) in case a context switch happens on
3189+ * another CPU after the condition below.
3190+ */
3191+ if (atomic64_read(&mm->context.id) ==
3192+ atomic64_read(&per_cpu(active_asids, cpu)))
3193+ cpumask_set_cpu(cpu, &mask);
3194+ }
3195+ smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
3196+}
3197+
3198 void flush_tlb_all(void)
3199 {
3200 if (tlb_ops_need_broadcast())
3201 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
3202 else
3203 local_flush_tlb_all();
3204+ broadcast_tlb_a15_erratum();
3205 }
3206
3207 void flush_tlb_mm(struct mm_struct *mm)
3208@@ -78,6 +139,7 @@ void flush_tlb_mm(struct mm_struct *mm)
3209 on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1);
3210 else
3211 local_flush_tlb_mm(mm);
3212+ broadcast_tlb_mm_a15_erratum(mm);
3213 }
3214
3215 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
3216@@ -90,6 +152,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
3217 &ta, 1);
3218 } else
3219 local_flush_tlb_page(vma, uaddr);
3220+ broadcast_tlb_mm_a15_erratum(vma->vm_mm);
3221 }
3222
3223 void flush_tlb_kernel_page(unsigned long kaddr)
3224@@ -100,6 +163,7 @@ void flush_tlb_kernel_page(unsigned long kaddr)
3225 on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
3226 } else
3227 local_flush_tlb_kernel_page(kaddr);
3228+ broadcast_tlb_a15_erratum();
3229 }
3230
3231 void flush_tlb_range(struct vm_area_struct *vma,
3232@@ -114,6 +178,7 @@ void flush_tlb_range(struct vm_area_struct *vma,
3233 &ta, 1);
3234 } else
3235 local_flush_tlb_range(vma, start, end);
3236+ broadcast_tlb_mm_a15_erratum(vma->vm_mm);
3237 }
3238
3239 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
3240@@ -125,5 +190,6 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
3241 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
3242 } else
3243 local_flush_tlb_kernel_range(start, end);
3244+ broadcast_tlb_a15_erratum();
3245 }
3246
3247diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3248index b0179b8..829510e 100644
3249--- a/arch/arm/kernel/traps.c
3250+++ b/arch/arm/kernel/traps.c
3251@@ -57,7 +57,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3252 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3253 {
3254 #ifdef CONFIG_KALLSYMS
3255- printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3256+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3257 #else
3258 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3259 #endif
3260@@ -266,6 +266,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3261 static int die_owner = -1;
3262 static unsigned int die_nest_count;
3263
3264+extern void gr_handle_kernel_exploit(void);
3265+
3266 static unsigned long oops_begin(void)
3267 {
3268 int cpu;
3269@@ -308,6 +310,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3270 panic("Fatal exception in interrupt");
3271 if (panic_on_oops)
3272 panic("Fatal exception");
3273+
3274+ gr_handle_kernel_exploit();
3275+
3276 if (signr)
3277 do_exit(signr);
3278 }
3279@@ -601,7 +606,9 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
3280 * The user helper at 0xffff0fe0 must be used instead.
3281 * (see entry-armv.S for details)
3282 */
3283+ pax_open_kernel();
3284 *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
3285+ pax_close_kernel();
3286 }
3287 return 0;
3288
3289@@ -841,13 +848,10 @@ void __init early_trap_init(void *vectors_base)
3290 */
3291 kuser_get_tls_init(vectors);
3292
3293- /*
3294- * Copy signal return handlers into the vector page, and
3295- * set sigreturn to be a pointer to these.
3296- */
3297- memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
3298- sigreturn_codes, sizeof(sigreturn_codes));
3299-
3300 flush_icache_range(vectors, vectors + PAGE_SIZE);
3301- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3302+
3303+#ifndef CONFIG_PAX_MEMORY_UDEREF
3304+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3305+#endif
3306+
3307 }
3308diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3309index 11c1785..c67d54c 100644
3310--- a/arch/arm/kernel/vmlinux.lds.S
3311+++ b/arch/arm/kernel/vmlinux.lds.S
3312@@ -8,7 +8,11 @@
3313 #include <asm/thread_info.h>
3314 #include <asm/memory.h>
3315 #include <asm/page.h>
3316-
3317+
3318+#ifdef CONFIG_PAX_KERNEXEC
3319+#include <asm/pgtable.h>
3320+#endif
3321+
3322 #define PROC_INFO \
3323 . = ALIGN(4); \
3324 VMLINUX_SYMBOL(__proc_info_begin) = .; \
3325@@ -90,6 +94,11 @@ SECTIONS
3326 _text = .;
3327 HEAD_TEXT
3328 }
3329+
3330+#ifdef CONFIG_PAX_KERNEXEC
3331+ . = ALIGN(1<<SECTION_SHIFT);
3332+#endif
3333+
3334 .text : { /* Real text segment */
3335 _stext = .; /* Text and read-only data */
3336 __exception_text_start = .;
3337@@ -144,6 +153,10 @@ SECTIONS
3338
3339 _etext = .; /* End of text and rodata section */
3340
3341+#ifdef CONFIG_PAX_KERNEXEC
3342+ . = ALIGN(1<<SECTION_SHIFT);
3343+#endif
3344+
3345 #ifndef CONFIG_XIP_KERNEL
3346 . = ALIGN(PAGE_SIZE);
3347 __init_begin = .;
3348@@ -203,6 +216,11 @@ SECTIONS
3349 . = PAGE_OFFSET + TEXT_OFFSET;
3350 #else
3351 __init_end = .;
3352+
3353+#ifdef CONFIG_PAX_KERNEXEC
3354+ . = ALIGN(1<<SECTION_SHIFT);
3355+#endif
3356+
3357 . = ALIGN(THREAD_SIZE);
3358 __data_loc = .;
3359 #endif
3360diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3361index 14a0d98..7771a7d 100644
3362--- a/arch/arm/lib/clear_user.S
3363+++ b/arch/arm/lib/clear_user.S
3364@@ -12,14 +12,14 @@
3365
3366 .text
3367
3368-/* Prototype: int __clear_user(void *addr, size_t sz)
3369+/* Prototype: int ___clear_user(void *addr, size_t sz)
3370 * Purpose : clear some user memory
3371 * Params : addr - user memory address to clear
3372 * : sz - number of bytes to clear
3373 * Returns : number of bytes NOT cleared
3374 */
3375 ENTRY(__clear_user_std)
3376-WEAK(__clear_user)
3377+WEAK(___clear_user)
3378 stmfd sp!, {r1, lr}
3379 mov r2, #0
3380 cmp r1, #4
3381@@ -44,7 +44,7 @@ WEAK(__clear_user)
3382 USER( strnebt r2, [r0])
3383 mov r0, #0
3384 ldmfd sp!, {r1, pc}
3385-ENDPROC(__clear_user)
3386+ENDPROC(___clear_user)
3387 ENDPROC(__clear_user_std)
3388
3389 .pushsection .fixup,"ax"
3390diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3391index 66a477a..bee61d3 100644
3392--- a/arch/arm/lib/copy_from_user.S
3393+++ b/arch/arm/lib/copy_from_user.S
3394@@ -16,7 +16,7 @@
3395 /*
3396 * Prototype:
3397 *
3398- * size_t __copy_from_user(void *to, const void *from, size_t n)
3399+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3400 *
3401 * Purpose:
3402 *
3403@@ -84,11 +84,11 @@
3404
3405 .text
3406
3407-ENTRY(__copy_from_user)
3408+ENTRY(___copy_from_user)
3409
3410 #include "copy_template.S"
3411
3412-ENDPROC(__copy_from_user)
3413+ENDPROC(___copy_from_user)
3414
3415 .pushsection .fixup,"ax"
3416 .align 0
3417diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3418index 6ee2f67..d1cce76 100644
3419--- a/arch/arm/lib/copy_page.S
3420+++ b/arch/arm/lib/copy_page.S
3421@@ -10,6 +10,7 @@
3422 * ASM optimised string functions
3423 */
3424 #include <linux/linkage.h>
3425+#include <linux/const.h>
3426 #include <asm/assembler.h>
3427 #include <asm/asm-offsets.h>
3428 #include <asm/cache.h>
3429diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3430index d066df6..df28194 100644
3431--- a/arch/arm/lib/copy_to_user.S
3432+++ b/arch/arm/lib/copy_to_user.S
3433@@ -16,7 +16,7 @@
3434 /*
3435 * Prototype:
3436 *
3437- * size_t __copy_to_user(void *to, const void *from, size_t n)
3438+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3439 *
3440 * Purpose:
3441 *
3442@@ -88,11 +88,11 @@
3443 .text
3444
3445 ENTRY(__copy_to_user_std)
3446-WEAK(__copy_to_user)
3447+WEAK(___copy_to_user)
3448
3449 #include "copy_template.S"
3450
3451-ENDPROC(__copy_to_user)
3452+ENDPROC(___copy_to_user)
3453 ENDPROC(__copy_to_user_std)
3454
3455 .pushsection .fixup,"ax"
3456diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3457index 7d08b43..f7ca7ea 100644
3458--- a/arch/arm/lib/csumpartialcopyuser.S
3459+++ b/arch/arm/lib/csumpartialcopyuser.S
3460@@ -57,8 +57,8 @@
3461 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3462 */
3463
3464-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3465-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3466+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3467+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3468
3469 #include "csumpartialcopygeneric.S"
3470
3471diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3472index 6b93f6a..1aa92d0 100644
3473--- a/arch/arm/lib/delay.c
3474+++ b/arch/arm/lib/delay.c
3475@@ -28,12 +28,15 @@
3476 /*
3477 * Default to the loop-based delay implementation.
3478 */
3479-struct arm_delay_ops arm_delay_ops = {
3480+static struct arm_delay_ops arm_loop_delay_ops = {
3481 .delay = __loop_delay,
3482 .const_udelay = __loop_const_udelay,
3483 .udelay = __loop_udelay,
3484+ .const_clock = false,
3485 };
3486
3487+struct arm_delay_ops *arm_delay_ops __read_only = &arm_loop_delay_ops;
3488+
3489 static const struct delay_timer *delay_timer;
3490 static bool delay_calibrated;
3491
3492@@ -67,6 +70,13 @@ static void __timer_udelay(unsigned long usecs)
3493 __timer_const_udelay(usecs * UDELAY_MULT);
3494 }
3495
3496+static struct arm_delay_ops arm_timer_delay_ops = {
3497+ .delay = __timer_delay,
3498+ .const_udelay = __timer_const_udelay,
3499+ .udelay = __timer_udelay,
3500+ .const_clock = true,
3501+};
3502+
3503 void __init register_current_timer_delay(const struct delay_timer *timer)
3504 {
3505 if (!delay_calibrated) {
3506@@ -74,10 +84,7 @@ void __init register_current_timer_delay(const struct delay_timer *timer)
3507 delay_timer = timer;
3508 lpj_fine = timer->freq / HZ;
3509 loops_per_jiffy = lpj_fine;
3510- arm_delay_ops.delay = __timer_delay;
3511- arm_delay_ops.const_udelay = __timer_const_udelay;
3512- arm_delay_ops.udelay = __timer_udelay;
3513- arm_delay_ops.const_clock = true;
3514+ arm_delay_ops = &arm_timer_delay_ops;
3515 delay_calibrated = true;
3516 } else {
3517 pr_info("Ignoring duplicate/late registration of read_current_timer delay\n");
3518diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3519index 025f742..8432b08 100644
3520--- a/arch/arm/lib/uaccess_with_memcpy.c
3521+++ b/arch/arm/lib/uaccess_with_memcpy.c
3522@@ -104,7 +104,7 @@ out:
3523 }
3524
3525 unsigned long
3526-__copy_to_user(void __user *to, const void *from, unsigned long n)
3527+___copy_to_user(void __user *to, const void *from, unsigned long n)
3528 {
3529 /*
3530 * This test is stubbed out of the main function above to keep
3531diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
3532index bac21a5..b67ef8e 100644
3533--- a/arch/arm/mach-kirkwood/common.c
3534+++ b/arch/arm/mach-kirkwood/common.c
3535@@ -150,7 +150,16 @@ static void clk_gate_fn_disable(struct clk_hw *hw)
3536 clk_gate_ops.disable(hw);
3537 }
3538
3539-static struct clk_ops clk_gate_fn_ops;
3540+static int clk_gate_fn_is_enabled(struct clk_hw *hw)
3541+{
3542+ return clk_gate_ops.is_enabled(hw);
3543+}
3544+
3545+static struct clk_ops clk_gate_fn_ops = {
3546+ .enable = clk_gate_fn_enable,
3547+ .disable = clk_gate_fn_disable,
3548+ .is_enabled = clk_gate_fn_is_enabled,
3549+};
3550
3551 static struct clk __init *clk_register_gate_fn(struct device *dev,
3552 const char *name,
3553@@ -184,14 +193,6 @@ static struct clk __init *clk_register_gate_fn(struct device *dev,
3554 gate_fn->fn_en = fn_en;
3555 gate_fn->fn_dis = fn_dis;
3556
3557- /* ops is the gate ops, but with our enable/disable functions */
3558- if (clk_gate_fn_ops.enable != clk_gate_fn_enable ||
3559- clk_gate_fn_ops.disable != clk_gate_fn_disable) {
3560- clk_gate_fn_ops = clk_gate_ops;
3561- clk_gate_fn_ops.enable = clk_gate_fn_enable;
3562- clk_gate_fn_ops.disable = clk_gate_fn_disable;
3563- }
3564-
3565 clk = clk_register(dev, &gate_fn->gate.hw);
3566
3567 if (IS_ERR(clk))
3568diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3569index 0abb30f..54064da 100644
3570--- a/arch/arm/mach-omap2/board-n8x0.c
3571+++ b/arch/arm/mach-omap2/board-n8x0.c
3572@@ -631,7 +631,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3573 }
3574 #endif
3575
3576-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3577+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3578 .late_init = n8x0_menelaus_late_init,
3579 };
3580
3581diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
3582index 8033cb7..2f7cb62 100644
3583--- a/arch/arm/mach-omap2/gpmc.c
3584+++ b/arch/arm/mach-omap2/gpmc.c
3585@@ -139,7 +139,6 @@ struct omap3_gpmc_regs {
3586 };
3587
3588 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
3589-static struct irq_chip gpmc_irq_chip;
3590 static unsigned gpmc_irq_start;
3591
3592 static struct resource gpmc_mem_root;
3593@@ -700,6 +699,18 @@ static void gpmc_irq_noop(struct irq_data *data) { }
3594
3595 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
3596
3597+static struct irq_chip gpmc_irq_chip = {
3598+ .name = "gpmc",
3599+ .irq_startup = gpmc_irq_noop_ret,
3600+ .irq_enable = gpmc_irq_enable,
3601+ .irq_disable = gpmc_irq_disable,
3602+ .irq_shutdown = gpmc_irq_noop,
3603+ .irq_ack = gpmc_irq_noop,
3604+ .irq_mask = gpmc_irq_noop,
3605+ .irq_unmask = gpmc_irq_noop,
3606+
3607+};
3608+
3609 static int gpmc_setup_irq(void)
3610 {
3611 int i;
3612@@ -714,15 +725,6 @@ static int gpmc_setup_irq(void)
3613 return gpmc_irq_start;
3614 }
3615
3616- gpmc_irq_chip.name = "gpmc";
3617- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
3618- gpmc_irq_chip.irq_enable = gpmc_irq_enable;
3619- gpmc_irq_chip.irq_disable = gpmc_irq_disable;
3620- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
3621- gpmc_irq_chip.irq_ack = gpmc_irq_noop;
3622- gpmc_irq_chip.irq_mask = gpmc_irq_noop;
3623- gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
3624-
3625 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
3626 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
3627
3628diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3629index 5d3b4f4..ddba3c0 100644
3630--- a/arch/arm/mach-omap2/omap-wakeupgen.c
3631+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3632@@ -340,7 +340,7 @@ static int __cpuinit irq_cpu_hotplug_notify(struct notifier_block *self,
3633 return NOTIFY_OK;
3634 }
3635
3636-static struct notifier_block __refdata irq_hotplug_notifier = {
3637+static struct notifier_block irq_hotplug_notifier = {
3638 .notifier_call = irq_cpu_hotplug_notify,
3639 };
3640
3641diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3642index e065daa..7b1ad9b 100644
3643--- a/arch/arm/mach-omap2/omap_device.c
3644+++ b/arch/arm/mach-omap2/omap_device.c
3645@@ -686,7 +686,7 @@ void omap_device_delete(struct omap_device *od)
3646 * passes along the return value of omap_device_build_ss().
3647 */
3648 struct platform_device __init *omap_device_build(const char *pdev_name, int pdev_id,
3649- struct omap_hwmod *oh, void *pdata,
3650+ struct omap_hwmod *oh, const void *pdata,
3651 int pdata_len,
3652 struct omap_device_pm_latency *pm_lats,
3653 int pm_lats_cnt, int is_early_device)
3654@@ -720,7 +720,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name, int pdev
3655 */
3656 struct platform_device __init *omap_device_build_ss(const char *pdev_name, int pdev_id,
3657 struct omap_hwmod **ohs, int oh_cnt,
3658- void *pdata, int pdata_len,
3659+ const void *pdata, int pdata_len,
3660 struct omap_device_pm_latency *pm_lats,
3661 int pm_lats_cnt, int is_early_device)
3662 {
3663diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
3664index 0933c59..42b8e2d 100644
3665--- a/arch/arm/mach-omap2/omap_device.h
3666+++ b/arch/arm/mach-omap2/omap_device.h
3667@@ -91,14 +91,14 @@ int omap_device_shutdown(struct platform_device *pdev);
3668 /* Core code interface */
3669
3670 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
3671- struct omap_hwmod *oh, void *pdata,
3672+ struct omap_hwmod *oh, const void *pdata,
3673 int pdata_len,
3674 struct omap_device_pm_latency *pm_lats,
3675 int pm_lats_cnt, int is_early_device);
3676
3677 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
3678 struct omap_hwmod **oh, int oh_cnt,
3679- void *pdata, int pdata_len,
3680+ const void *pdata, int pdata_len,
3681 struct omap_device_pm_latency *pm_lats,
3682 int pm_lats_cnt, int is_early_device);
3683
3684diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3685index 4653efb..8c60bf7 100644
3686--- a/arch/arm/mach-omap2/omap_hwmod.c
3687+++ b/arch/arm/mach-omap2/omap_hwmod.c
3688@@ -189,10 +189,10 @@ struct omap_hwmod_soc_ops {
3689 int (*init_clkdm)(struct omap_hwmod *oh);
3690 void (*update_context_lost)(struct omap_hwmod *oh);
3691 int (*get_context_lost)(struct omap_hwmod *oh);
3692-};
3693+} __no_const;
3694
3695 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3696-static struct omap_hwmod_soc_ops soc_ops;
3697+static struct omap_hwmod_soc_ops soc_ops __read_only;
3698
3699 /* omap_hwmod_list contains all registered struct omap_hwmods */
3700 static LIST_HEAD(omap_hwmod_list);
3701diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3702index 7c2b4ed..b2ea51f 100644
3703--- a/arch/arm/mach-omap2/wd_timer.c
3704+++ b/arch/arm/mach-omap2/wd_timer.c
3705@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3706 struct omap_hwmod *oh;
3707 char *oh_name = "wd_timer2";
3708 char *dev_name = "omap_wdt";
3709- struct omap_wd_timer_platform_data pdata;
3710+ static struct omap_wd_timer_platform_data pdata = {
3711+ .read_reset_sources = prm_read_reset_sources
3712+ };
3713
3714 if (!cpu_class_is_omap2() || of_have_populated_dt())
3715 return 0;
3716@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3717 return -EINVAL;
3718 }
3719
3720- pdata.read_reset_sources = prm_read_reset_sources;
3721-
3722 pdev = omap_device_build(dev_name, id, oh, &pdata,
3723 sizeof(struct omap_wd_timer_platform_data),
3724 NULL, 0, 0);
3725diff --git a/arch/arm/mach-ux500/include/mach/setup.h b/arch/arm/mach-ux500/include/mach/setup.h
3726index 6be4c4d..32ac32a 100644
3727--- a/arch/arm/mach-ux500/include/mach/setup.h
3728+++ b/arch/arm/mach-ux500/include/mach/setup.h
3729@@ -38,13 +38,6 @@ extern struct sys_timer ux500_timer;
3730 .type = MT_DEVICE, \
3731 }
3732
3733-#define __MEM_DEV_DESC(x, sz) { \
3734- .virtual = IO_ADDRESS(x), \
3735- .pfn = __phys_to_pfn(x), \
3736- .length = sz, \
3737- .type = MT_MEMORY, \
3738-}
3739-
3740 extern struct smp_operations ux500_smp_ops;
3741 extern void ux500_cpu_die(unsigned int cpu);
3742
3743diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3744index 3fd629d..8b1aca9 100644
3745--- a/arch/arm/mm/Kconfig
3746+++ b/arch/arm/mm/Kconfig
3747@@ -425,7 +425,7 @@ config CPU_32v5
3748
3749 config CPU_32v6
3750 bool
3751- select CPU_USE_DOMAINS if CPU_V6 && MMU
3752+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC
3753 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3754
3755 config CPU_32v6K
3756@@ -577,6 +577,7 @@ config CPU_CP15_MPU
3757
3758 config CPU_USE_DOMAINS
3759 bool
3760+ depends on !ARM_LPAE && !PAX_KERNEXEC
3761 help
3762 This option enables or disables the use of domain switching
3763 via the set_fs() function.
3764diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
3765index db26e2e..ee44569 100644
3766--- a/arch/arm/mm/alignment.c
3767+++ b/arch/arm/mm/alignment.c
3768@@ -211,10 +211,12 @@ union offset_union {
3769 #define __get16_unaligned_check(ins,val,addr) \
3770 do { \
3771 unsigned int err = 0, v, a = addr; \
3772+ pax_open_userland(); \
3773 __get8_unaligned_check(ins,v,a,err); \
3774 val = v << ((BE) ? 8 : 0); \
3775 __get8_unaligned_check(ins,v,a,err); \
3776 val |= v << ((BE) ? 0 : 8); \
3777+ pax_close_userland(); \
3778 if (err) \
3779 goto fault; \
3780 } while (0)
3781@@ -228,6 +230,7 @@ union offset_union {
3782 #define __get32_unaligned_check(ins,val,addr) \
3783 do { \
3784 unsigned int err = 0, v, a = addr; \
3785+ pax_open_userland(); \
3786 __get8_unaligned_check(ins,v,a,err); \
3787 val = v << ((BE) ? 24 : 0); \
3788 __get8_unaligned_check(ins,v,a,err); \
3789@@ -236,6 +239,7 @@ union offset_union {
3790 val |= v << ((BE) ? 8 : 16); \
3791 __get8_unaligned_check(ins,v,a,err); \
3792 val |= v << ((BE) ? 0 : 24); \
3793+ pax_close_userland(); \
3794 if (err) \
3795 goto fault; \
3796 } while (0)
3797@@ -249,6 +253,7 @@ union offset_union {
3798 #define __put16_unaligned_check(ins,val,addr) \
3799 do { \
3800 unsigned int err = 0, v = val, a = addr; \
3801+ pax_open_userland(); \
3802 __asm__( FIRST_BYTE_16 \
3803 ARM( "1: "ins" %1, [%2], #1\n" ) \
3804 THUMB( "1: "ins" %1, [%2]\n" ) \
3805@@ -268,6 +273,7 @@ union offset_union {
3806 " .popsection\n" \
3807 : "=r" (err), "=&r" (v), "=&r" (a) \
3808 : "0" (err), "1" (v), "2" (a)); \
3809+ pax_close_userland(); \
3810 if (err) \
3811 goto fault; \
3812 } while (0)
3813@@ -281,6 +287,7 @@ union offset_union {
3814 #define __put32_unaligned_check(ins,val,addr) \
3815 do { \
3816 unsigned int err = 0, v = val, a = addr; \
3817+ pax_open_userland(); \
3818 __asm__( FIRST_BYTE_32 \
3819 ARM( "1: "ins" %1, [%2], #1\n" ) \
3820 THUMB( "1: "ins" %1, [%2]\n" ) \
3821@@ -310,6 +317,7 @@ union offset_union {
3822 " .popsection\n" \
3823 : "=r" (err), "=&r" (v), "=&r" (a) \
3824 : "0" (err), "1" (v), "2" (a)); \
3825+ pax_close_userland(); \
3826 if (err) \
3827 goto fault; \
3828 } while (0)
3829diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
3830index d07df17..59d5493 100644
3831--- a/arch/arm/mm/context.c
3832+++ b/arch/arm/mm/context.c
3833@@ -45,7 +45,7 @@ static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
3834 static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3835 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
3836
3837-static DEFINE_PER_CPU(atomic64_t, active_asids);
3838+DEFINE_PER_CPU(atomic64_t, active_asids);
3839 static DEFINE_PER_CPU(u64, reserved_asids);
3840 static cpumask_t tlb_flush_pending;
3841
3842@@ -209,8 +209,10 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
3843 atomic64_set(&mm->context.id, asid);
3844 }
3845
3846- if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
3847+ if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
3848 local_flush_tlb_all();
3849+ dummy_flush_tlb_a15_erratum();
3850+ }
3851
3852 atomic64_set(&per_cpu(active_asids, cpu), asid);
3853 cpumask_set_cpu(cpu, mm_cpumask(mm));
3854diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
3855index 5dbf13f..1a60561 100644
3856--- a/arch/arm/mm/fault.c
3857+++ b/arch/arm/mm/fault.c
3858@@ -25,6 +25,7 @@
3859 #include <asm/system_misc.h>
3860 #include <asm/system_info.h>
3861 #include <asm/tlbflush.h>
3862+#include <asm/sections.h>
3863
3864 #include "fault.h"
3865
3866@@ -138,6 +139,20 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
3867 if (fixup_exception(regs))
3868 return;
3869
3870+#ifdef CONFIG_PAX_KERNEXEC
3871+ if ((fsr & FSR_WRITE) &&
3872+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
3873+ (MODULES_VADDR <= addr && addr < MODULES_END)))
3874+ {
3875+ if (current->signal->curr_ip)
3876+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3877+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3878+ else
3879+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
3880+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3881+ }
3882+#endif
3883+
3884 /*
3885 * No handler, we'll have to terminate things with extreme prejudice.
3886 */
3887@@ -174,6 +189,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
3888 }
3889 #endif
3890
3891+#ifdef CONFIG_PAX_PAGEEXEC
3892+ if (fsr & FSR_LNX_PF) {
3893+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
3894+ do_group_exit(SIGKILL);
3895+ }
3896+#endif
3897+
3898 tsk->thread.address = addr;
3899 tsk->thread.error_code = fsr;
3900 tsk->thread.trap_no = 14;
3901@@ -398,6 +420,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3902 }
3903 #endif /* CONFIG_MMU */
3904
3905+#ifdef CONFIG_PAX_PAGEEXEC
3906+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3907+{
3908+ long i;
3909+
3910+ printk(KERN_ERR "PAX: bytes at PC: ");
3911+ for (i = 0; i < 20; i++) {
3912+ unsigned char c;
3913+ if (get_user(c, (__force unsigned char __user *)pc+i))
3914+ printk(KERN_CONT "?? ");
3915+ else
3916+ printk(KERN_CONT "%02x ", c);
3917+ }
3918+ printk("\n");
3919+
3920+ printk(KERN_ERR "PAX: bytes at SP-4: ");
3921+ for (i = -1; i < 20; i++) {
3922+ unsigned long c;
3923+ if (get_user(c, (__force unsigned long __user *)sp+i))
3924+ printk(KERN_CONT "???????? ");
3925+ else
3926+ printk(KERN_CONT "%08lx ", c);
3927+ }
3928+ printk("\n");
3929+}
3930+#endif
3931+
3932 /*
3933 * First Level Translation Fault Handler
3934 *
3935@@ -543,9 +592,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3936 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
3937 struct siginfo info;
3938
3939+#ifdef CONFIG_PAX_MEMORY_UDEREF
3940+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
3941+ if (current->signal->curr_ip)
3942+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3943+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3944+ else
3945+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3946+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3947+ goto die;
3948+ }
3949+#endif
3950+
3951 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
3952 return;
3953
3954+die:
3955 printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
3956 inf->name, fsr, addr);
3957
3958@@ -575,9 +637,49 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
3959 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
3960 struct siginfo info;
3961
3962+ if (user_mode(regs)) {
3963+ if (addr == 0xffff0fe0UL) {
3964+ /*
3965+ * PaX: __kuser_get_tls emulation
3966+ */
3967+ regs->ARM_r0 = current_thread_info()->tp_value;
3968+ regs->ARM_pc = regs->ARM_lr;
3969+ return;
3970+ }
3971+ }
3972+
3973+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3974+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
3975+ if (current->signal->curr_ip)
3976+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3977+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
3978+ addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr);
3979+ else
3980+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
3981+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
3982+ addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr);
3983+ goto die;
3984+ }
3985+#endif
3986+
3987+#ifdef CONFIG_PAX_REFCOUNT
3988+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
3989+ unsigned int bkpt;
3990+
3991+ if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
3992+ current->thread.error_code = ifsr;
3993+ current->thread.trap_no = 0;
3994+ pax_report_refcount_overflow(regs);
3995+ fixup_exception(regs);
3996+ return;
3997+ }
3998+ }
3999+#endif
4000+
4001 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
4002 return;
4003
4004+die:
4005 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
4006 inf->name, ifsr, addr);
4007
4008diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
4009index cf08bdf..772656c 100644
4010--- a/arch/arm/mm/fault.h
4011+++ b/arch/arm/mm/fault.h
4012@@ -3,6 +3,7 @@
4013
4014 /*
4015 * Fault status register encodings. We steal bit 31 for our own purposes.
4016+ * Set when the FSR value is from an instruction fault.
4017 */
4018 #define FSR_LNX_PF (1 << 31)
4019 #define FSR_WRITE (1 << 11)
4020@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
4021 }
4022 #endif
4023
4024+/* valid for LPAE and !LPAE */
4025+static inline int is_xn_fault(unsigned int fsr)
4026+{
4027+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
4028+}
4029+
4030+static inline int is_domain_fault(unsigned int fsr)
4031+{
4032+ return ((fsr_fs(fsr) & 0xD) == 0x9);
4033+}
4034+
4035 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
4036 unsigned long search_exception_table(unsigned long addr);
4037
4038diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
4039index ad722f1..763fdd3 100644
4040--- a/arch/arm/mm/init.c
4041+++ b/arch/arm/mm/init.c
4042@@ -30,6 +30,8 @@
4043 #include <asm/setup.h>
4044 #include <asm/tlb.h>
4045 #include <asm/fixmap.h>
4046+#include <asm/system_info.h>
4047+#include <asm/cp15.h>
4048
4049 #include <asm/mach/arch.h>
4050 #include <asm/mach/map.h>
4051@@ -736,7 +738,46 @@ void free_initmem(void)
4052 {
4053 #ifdef CONFIG_HAVE_TCM
4054 extern char __tcm_start, __tcm_end;
4055+#endif
4056
4057+#ifdef CONFIG_PAX_KERNEXEC
4058+ unsigned long addr;
4059+ pgd_t *pgd;
4060+ pud_t *pud;
4061+ pmd_t *pmd;
4062+ int cpu_arch = cpu_architecture();
4063+ unsigned int cr = get_cr();
4064+
4065+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
4066+ /* make pages tables, etc before .text NX */
4067+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
4068+ pgd = pgd_offset_k(addr);
4069+ pud = pud_offset(pgd, addr);
4070+ pmd = pmd_offset(pud, addr);
4071+ __section_update(pmd, addr, PMD_SECT_XN);
4072+ }
4073+ /* make init NX */
4074+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
4075+ pgd = pgd_offset_k(addr);
4076+ pud = pud_offset(pgd, addr);
4077+ pmd = pmd_offset(pud, addr);
4078+ __section_update(pmd, addr, PMD_SECT_XN);
4079+ }
4080+ /* make kernel code/rodata RX */
4081+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
4082+ pgd = pgd_offset_k(addr);
4083+ pud = pud_offset(pgd, addr);
4084+ pmd = pmd_offset(pud, addr);
4085+#ifdef CONFIG_ARM_LPAE
4086+ __section_update(pmd, addr, PMD_SECT_RDONLY);
4087+#else
4088+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
4089+#endif
4090+ }
4091+ }
4092+#endif
4093+
4094+#ifdef CONFIG_HAVE_TCM
4095 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
4096 totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
4097 __phys_to_pfn(__pa(&__tcm_end)),
4098diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
4099index 88fd86c..7a224ce 100644
4100--- a/arch/arm/mm/ioremap.c
4101+++ b/arch/arm/mm/ioremap.c
4102@@ -335,9 +335,9 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
4103 unsigned int mtype;
4104
4105 if (cached)
4106- mtype = MT_MEMORY;
4107+ mtype = MT_MEMORY_RX;
4108 else
4109- mtype = MT_MEMORY_NONCACHED;
4110+ mtype = MT_MEMORY_NONCACHED_RX;
4111
4112 return __arm_ioremap_caller(phys_addr, size, mtype,
4113 __builtin_return_address(0));
4114diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
4115index 10062ce..aa96dd7 100644
4116--- a/arch/arm/mm/mmap.c
4117+++ b/arch/arm/mm/mmap.c
4118@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4119 struct vm_area_struct *vma;
4120 int do_align = 0;
4121 int aliasing = cache_is_vipt_aliasing();
4122+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4123 struct vm_unmapped_area_info info;
4124
4125 /*
4126@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4127 if (len > TASK_SIZE)
4128 return -ENOMEM;
4129
4130+#ifdef CONFIG_PAX_RANDMMAP
4131+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4132+#endif
4133+
4134 if (addr) {
4135 if (do_align)
4136 addr = COLOUR_ALIGN(addr, pgoff);
4137@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4138 addr = PAGE_ALIGN(addr);
4139
4140 vma = find_vma(mm, addr);
4141- if (TASK_SIZE - len >= addr &&
4142- (!vma || addr + len <= vma->vm_start))
4143+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4144 return addr;
4145 }
4146
4147@@ -112,6 +116,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4148 unsigned long addr = addr0;
4149 int do_align = 0;
4150 int aliasing = cache_is_vipt_aliasing();
4151+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4152 struct vm_unmapped_area_info info;
4153
4154 /*
4155@@ -132,6 +137,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4156 return addr;
4157 }
4158
4159+#ifdef CONFIG_PAX_RANDMMAP
4160+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4161+#endif
4162+
4163 /* requesting a specific address */
4164 if (addr) {
4165 if (do_align)
4166@@ -139,8 +148,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4167 else
4168 addr = PAGE_ALIGN(addr);
4169 vma = find_vma(mm, addr);
4170- if (TASK_SIZE - len >= addr &&
4171- (!vma || addr + len <= vma->vm_start))
4172+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4173 return addr;
4174 }
4175
4176@@ -162,6 +170,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4177 VM_BUG_ON(addr != -ENOMEM);
4178 info.flags = 0;
4179 info.low_limit = mm->mmap_base;
4180+
4181+#ifdef CONFIG_PAX_RANDMMAP
4182+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4183+ info.low_limit += mm->delta_mmap;
4184+#endif
4185+
4186 info.high_limit = TASK_SIZE;
4187 addr = vm_unmapped_area(&info);
4188 }
4189@@ -173,6 +187,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4190 {
4191 unsigned long random_factor = 0UL;
4192
4193+#ifdef CONFIG_PAX_RANDMMAP
4194+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4195+#endif
4196+
4197 /* 8 bits of randomness in 20 address space bits */
4198 if ((current->flags & PF_RANDOMIZE) &&
4199 !(current->personality & ADDR_NO_RANDOMIZE))
4200@@ -180,10 +198,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4201
4202 if (mmap_is_legacy()) {
4203 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4204+
4205+#ifdef CONFIG_PAX_RANDMMAP
4206+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4207+ mm->mmap_base += mm->delta_mmap;
4208+#endif
4209+
4210 mm->get_unmapped_area = arch_get_unmapped_area;
4211 mm->unmap_area = arch_unmap_area;
4212 } else {
4213 mm->mmap_base = mmap_base(random_factor);
4214+
4215+#ifdef CONFIG_PAX_RANDMMAP
4216+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4217+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4218+#endif
4219+
4220 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4221 mm->unmap_area = arch_unmap_area_topdown;
4222 }
4223diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
4224index ce328c7..35b88dc 100644
4225--- a/arch/arm/mm/mmu.c
4226+++ b/arch/arm/mm/mmu.c
4227@@ -35,6 +35,23 @@
4228
4229 #include "mm.h"
4230
4231+
4232+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4233+void modify_domain(unsigned int dom, unsigned int type)
4234+{
4235+ struct thread_info *thread = current_thread_info();
4236+ unsigned int domain = thread->cpu_domain;
4237+ /*
4238+ * DOMAIN_MANAGER might be defined to some other value,
4239+ * use the arch-defined constant
4240+ */
4241+ domain &= ~domain_val(dom, 3);
4242+ thread->cpu_domain = domain | domain_val(dom, type);
4243+ set_domain(thread->cpu_domain);
4244+}
4245+EXPORT_SYMBOL(modify_domain);
4246+#endif
4247+
4248 /*
4249 * empty_zero_page is a special page that is used for
4250 * zero-initialized data and COW.
4251@@ -195,10 +212,18 @@ void adjust_cr(unsigned long mask, unsigned long set)
4252 }
4253 #endif
4254
4255-#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
4256+#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY
4257 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4258
4259-static struct mem_type mem_types[] = {
4260+#ifdef CONFIG_PAX_KERNEXEC
4261+#define L_PTE_KERNEXEC L_PTE_RDONLY
4262+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4263+#else
4264+#define L_PTE_KERNEXEC L_PTE_DIRTY
4265+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4266+#endif
4267+
4268+static struct mem_type mem_types[] __read_only = {
4269 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4270 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4271 L_PTE_SHARED,
4272@@ -227,16 +252,16 @@ static struct mem_type mem_types[] = {
4273 [MT_UNCACHED] = {
4274 .prot_pte = PROT_PTE_DEVICE,
4275 .prot_l1 = PMD_TYPE_TABLE,
4276- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4277+ .prot_sect = PROT_SECT_DEVICE,
4278 .domain = DOMAIN_IO,
4279 },
4280 [MT_CACHECLEAN] = {
4281- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4282+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4283 .domain = DOMAIN_KERNEL,
4284 },
4285 #ifndef CONFIG_ARM_LPAE
4286 [MT_MINICLEAN] = {
4287- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4288+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_RDONLY,
4289 .domain = DOMAIN_KERNEL,
4290 },
4291 #endif
4292@@ -244,36 +269,54 @@ static struct mem_type mem_types[] = {
4293 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4294 L_PTE_RDONLY,
4295 .prot_l1 = PMD_TYPE_TABLE,
4296- .domain = DOMAIN_USER,
4297+ .domain = DOMAIN_VECTORS,
4298 },
4299 [MT_HIGH_VECTORS] = {
4300 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4301- L_PTE_USER | L_PTE_RDONLY,
4302+ L_PTE_RDONLY,
4303 .prot_l1 = PMD_TYPE_TABLE,
4304- .domain = DOMAIN_USER,
4305+ .domain = DOMAIN_VECTORS,
4306 },
4307- [MT_MEMORY] = {
4308+ [MT_MEMORY_RWX] = {
4309 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4310 .prot_l1 = PMD_TYPE_TABLE,
4311 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4312 .domain = DOMAIN_KERNEL,
4313 },
4314+ [MT_MEMORY_RW] = {
4315+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4316+ .prot_l1 = PMD_TYPE_TABLE,
4317+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4318+ .domain = DOMAIN_KERNEL,
4319+ },
4320+ [MT_MEMORY_RX] = {
4321+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4322+ .prot_l1 = PMD_TYPE_TABLE,
4323+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4324+ .domain = DOMAIN_KERNEL,
4325+ },
4326 [MT_ROM] = {
4327- .prot_sect = PMD_TYPE_SECT,
4328+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4329 .domain = DOMAIN_KERNEL,
4330 },
4331- [MT_MEMORY_NONCACHED] = {
4332+ [MT_MEMORY_NONCACHED_RW] = {
4333 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4334 L_PTE_MT_BUFFERABLE,
4335 .prot_l1 = PMD_TYPE_TABLE,
4336 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4337 .domain = DOMAIN_KERNEL,
4338 },
4339+ [MT_MEMORY_NONCACHED_RX] = {
4340+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4341+ L_PTE_MT_BUFFERABLE,
4342+ .prot_l1 = PMD_TYPE_TABLE,
4343+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4344+ .domain = DOMAIN_KERNEL,
4345+ },
4346 [MT_MEMORY_DTCM] = {
4347- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4348- L_PTE_XN,
4349+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4350 .prot_l1 = PMD_TYPE_TABLE,
4351- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4352+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4353 .domain = DOMAIN_KERNEL,
4354 },
4355 [MT_MEMORY_ITCM] = {
4356@@ -283,10 +326,10 @@ static struct mem_type mem_types[] = {
4357 },
4358 [MT_MEMORY_SO] = {
4359 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4360- L_PTE_MT_UNCACHED | L_PTE_XN,
4361+ L_PTE_MT_UNCACHED,
4362 .prot_l1 = PMD_TYPE_TABLE,
4363 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
4364- PMD_SECT_UNCACHED | PMD_SECT_XN,
4365+ PMD_SECT_UNCACHED,
4366 .domain = DOMAIN_KERNEL,
4367 },
4368 [MT_MEMORY_DMA_READY] = {
4369@@ -371,9 +414,35 @@ static void __init build_mem_type_table(void)
4370 * to prevent speculative instruction fetches.
4371 */
4372 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
4373+ mem_types[MT_DEVICE].prot_pte |= L_PTE_XN;
4374 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
4375+ mem_types[MT_DEVICE_NONSHARED].prot_pte |= L_PTE_XN;
4376 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
4377+ mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_XN;
4378 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
4379+ mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_XN;
4380+
4381+ /* Mark other regions on ARMv6+ as execute-never */
4382+
4383+#ifdef CONFIG_PAX_KERNEXEC
4384+ mem_types[MT_UNCACHED].prot_sect |= PMD_SECT_XN;
4385+ mem_types[MT_UNCACHED].prot_pte |= L_PTE_XN;
4386+ mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_XN;
4387+ mem_types[MT_CACHECLEAN].prot_pte |= L_PTE_XN;
4388+#ifndef CONFIG_ARM_LPAE
4389+ mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_XN;
4390+ mem_types[MT_MINICLEAN].prot_pte |= L_PTE_XN;
4391+#endif
4392+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;
4393+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_XN;
4394+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_XN;
4395+ mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= PMD_SECT_XN;
4396+ mem_types[MT_MEMORY_DTCM].prot_sect |= PMD_SECT_XN;
4397+ mem_types[MT_MEMORY_DTCM].prot_pte |= L_PTE_XN;
4398+#endif
4399+
4400+ mem_types[MT_MEMORY_SO].prot_sect |= PMD_SECT_XN;
4401+ mem_types[MT_MEMORY_SO].prot_pte |= L_PTE_XN;
4402 }
4403 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4404 /*
4405@@ -432,6 +501,9 @@ static void __init build_mem_type_table(void)
4406 * from SVC mode and no access from userspace.
4407 */
4408 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4409+#ifdef CONFIG_PAX_KERNEXEC
4410+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4411+#endif
4412 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4413 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4414 #endif
4415@@ -448,11 +520,17 @@ static void __init build_mem_type_table(void)
4416 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4417 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4418 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4419- mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
4420- mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
4421+ mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4422+ mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4423+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
4424+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
4425+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
4426+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
4427 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
4428- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
4429- mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
4430+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_S;
4431+ mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= L_PTE_SHARED;
4432+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_S;
4433+ mem_types[MT_MEMORY_NONCACHED_RX].prot_pte |= L_PTE_SHARED;
4434 }
4435 }
4436
4437@@ -463,15 +541,20 @@ static void __init build_mem_type_table(void)
4438 if (cpu_arch >= CPU_ARCH_ARMv6) {
4439 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4440 /* Non-cacheable Normal is XCB = 001 */
4441- mem_types[MT_MEMORY_NONCACHED].prot_sect |=
4442+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
4443+ PMD_SECT_BUFFERED;
4444+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
4445 PMD_SECT_BUFFERED;
4446 } else {
4447 /* For both ARMv6 and non-TEX-remapping ARMv7 */
4448- mem_types[MT_MEMORY_NONCACHED].prot_sect |=
4449+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
4450+ PMD_SECT_TEX(1);
4451+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
4452 PMD_SECT_TEX(1);
4453 }
4454 } else {
4455- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4456+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_BUFFERABLE;
4457+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_BUFFERABLE;
4458 }
4459
4460 #ifdef CONFIG_ARM_LPAE
4461@@ -487,6 +570,8 @@ static void __init build_mem_type_table(void)
4462 vecs_pgprot |= PTE_EXT_AF;
4463 #endif
4464
4465+ user_pgprot |= __supported_pte_mask;
4466+
4467 for (i = 0; i < 16; i++) {
4468 pteval_t v = pgprot_val(protection_map[i]);
4469 protection_map[i] = __pgprot(v | user_pgprot);
4470@@ -501,10 +586,15 @@ static void __init build_mem_type_table(void)
4471
4472 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4473 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4474- mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
4475- mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
4476+ mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4477+ mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4478+ mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4479+ mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4480+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4481+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4482 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4483- mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
4484+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= ecc_mask;
4485+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= ecc_mask;
4486 mem_types[MT_ROM].prot_sect |= cp->pmd;
4487
4488 switch (cp->pmd) {
4489@@ -1105,18 +1195,15 @@ void __init arm_mm_memblock_reserve(void)
4490 * called function. This means you can't use any function or debugging
4491 * method which may touch any device, otherwise the kernel _will_ crash.
4492 */
4493+
4494+static char vectors[PAGE_SIZE] __read_only __aligned(PAGE_SIZE);
4495+
4496 static void __init devicemaps_init(struct machine_desc *mdesc)
4497 {
4498 struct map_desc map;
4499 unsigned long addr;
4500- void *vectors;
4501
4502- /*
4503- * Allocate the vector page early.
4504- */
4505- vectors = early_alloc(PAGE_SIZE);
4506-
4507- early_trap_init(vectors);
4508+ early_trap_init(&vectors);
4509
4510 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4511 pmd_clear(pmd_off_k(addr));
4512@@ -1156,7 +1243,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
4513 * location (0xffff0000). If we aren't using high-vectors, also
4514 * create a mapping at the low-vectors virtual address.
4515 */
4516- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4517+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4518 map.virtual = 0xffff0000;
4519 map.length = PAGE_SIZE;
4520 map.type = MT_HIGH_VECTORS;
4521@@ -1214,8 +1301,39 @@ static void __init map_lowmem(void)
4522 map.pfn = __phys_to_pfn(start);
4523 map.virtual = __phys_to_virt(start);
4524 map.length = end - start;
4525- map.type = MT_MEMORY;
4526
4527+#ifdef CONFIG_PAX_KERNEXEC
4528+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4529+ struct map_desc kernel;
4530+ struct map_desc initmap;
4531+
4532+ /* when freeing initmem we will make this RW */
4533+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4534+ initmap.virtual = (unsigned long)__init_begin;
4535+ initmap.length = _sdata - __init_begin;
4536+ initmap.type = MT_MEMORY_RWX;
4537+ create_mapping(&initmap);
4538+
4539+ /* when freeing initmem we will make this RX */
4540+ kernel.pfn = __phys_to_pfn(__pa(_stext));
4541+ kernel.virtual = (unsigned long)_stext;
4542+ kernel.length = __init_begin - _stext;
4543+ kernel.type = MT_MEMORY_RWX;
4544+ create_mapping(&kernel);
4545+
4546+ if (map.virtual < (unsigned long)_stext) {
4547+ map.length = (unsigned long)_stext - map.virtual;
4548+ map.type = MT_MEMORY_RWX;
4549+ create_mapping(&map);
4550+ }
4551+
4552+ map.pfn = __phys_to_pfn(__pa(_sdata));
4553+ map.virtual = (unsigned long)_sdata;
4554+ map.length = end - __pa(_sdata);
4555+ }
4556+#endif
4557+
4558+ map.type = MT_MEMORY_RW;
4559 create_mapping(&map);
4560 }
4561 }
4562diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
4563index 6d98c13..3cfb174 100644
4564--- a/arch/arm/mm/proc-v7-2level.S
4565+++ b/arch/arm/mm/proc-v7-2level.S
4566@@ -99,6 +99,9 @@ ENTRY(cpu_v7_set_pte_ext)
4567 tst r1, #L_PTE_XN
4568 orrne r3, r3, #PTE_EXT_XN
4569
4570+ tst r1, #L_PTE_PXN
4571+ orrne r3, r3, #PTE_EXT_PXN
4572+
4573 tst r1, #L_PTE_YOUNG
4574 tstne r1, #L_PTE_VALID
4575 #ifndef CONFIG_CPU_USE_DOMAINS
4576diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4577index a5bc92d..0bb4730 100644
4578--- a/arch/arm/plat-omap/sram.c
4579+++ b/arch/arm/plat-omap/sram.c
4580@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4581 * Looks like we need to preserve some bootloader code at the
4582 * beginning of SRAM for jumping to flash for reboot to work...
4583 */
4584+ pax_open_kernel();
4585 memset_io(omap_sram_base + omap_sram_skip, 0,
4586 omap_sram_size - omap_sram_skip);
4587+ pax_close_kernel();
4588 }
4589diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
4590index f5144cd..71f6d1f 100644
4591--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
4592+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
4593@@ -47,7 +47,7 @@ struct samsung_dma_ops {
4594 int (*started)(unsigned ch);
4595 int (*flush)(unsigned ch);
4596 int (*stop)(unsigned ch);
4597-};
4598+} __no_const;
4599
4600 extern void *samsung_dmadev_get_ops(void);
4601 extern void *s3c_dma_get_ops(void);
4602diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
4603index 0c3ba9f..95722b3 100644
4604--- a/arch/arm64/kernel/debug-monitors.c
4605+++ b/arch/arm64/kernel/debug-monitors.c
4606@@ -151,7 +151,7 @@ static int __cpuinit os_lock_notify(struct notifier_block *self,
4607 return NOTIFY_OK;
4608 }
4609
4610-static struct notifier_block __cpuinitdata os_lock_nb = {
4611+static struct notifier_block os_lock_nb = {
4612 .notifier_call = os_lock_notify,
4613 };
4614
4615diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
4616index 5ab825c..96aaec8 100644
4617--- a/arch/arm64/kernel/hw_breakpoint.c
4618+++ b/arch/arm64/kernel/hw_breakpoint.c
4619@@ -831,7 +831,7 @@ static int __cpuinit hw_breakpoint_reset_notify(struct notifier_block *self,
4620 return NOTIFY_OK;
4621 }
4622
4623-static struct notifier_block __cpuinitdata hw_breakpoint_reset_nb = {
4624+static struct notifier_block hw_breakpoint_reset_nb = {
4625 .notifier_call = hw_breakpoint_reset_notify,
4626 };
4627
4628diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4629index c3a58a1..78fbf54 100644
4630--- a/arch/avr32/include/asm/cache.h
4631+++ b/arch/avr32/include/asm/cache.h
4632@@ -1,8 +1,10 @@
4633 #ifndef __ASM_AVR32_CACHE_H
4634 #define __ASM_AVR32_CACHE_H
4635
4636+#include <linux/const.h>
4637+
4638 #define L1_CACHE_SHIFT 5
4639-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4640+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4641
4642 /*
4643 * Memory returned by kmalloc() may be used for DMA, so we must make
4644diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
4645index e2c3287..6c4f98c 100644
4646--- a/arch/avr32/include/asm/elf.h
4647+++ b/arch/avr32/include/asm/elf.h
4648@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
4649 the loader. We need to make sure that it is out of the way of the program
4650 that it will "exec", and that there is sufficient room for the brk. */
4651
4652-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
4653+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
4654
4655+#ifdef CONFIG_PAX_ASLR
4656+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
4657+
4658+#define PAX_DELTA_MMAP_LEN 15
4659+#define PAX_DELTA_STACK_LEN 15
4660+#endif
4661
4662 /* This yields a mask that user programs can use to figure out what
4663 instruction set this CPU supports. This could be done in user space,
4664diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
4665index 479330b..53717a8 100644
4666--- a/arch/avr32/include/asm/kmap_types.h
4667+++ b/arch/avr32/include/asm/kmap_types.h
4668@@ -2,9 +2,9 @@
4669 #define __ASM_AVR32_KMAP_TYPES_H
4670
4671 #ifdef CONFIG_DEBUG_HIGHMEM
4672-# define KM_TYPE_NR 29
4673+# define KM_TYPE_NR 30
4674 #else
4675-# define KM_TYPE_NR 14
4676+# define KM_TYPE_NR 15
4677 #endif
4678
4679 #endif /* __ASM_AVR32_KMAP_TYPES_H */
4680diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
4681index b2f2d2d..d1c85cb 100644
4682--- a/arch/avr32/mm/fault.c
4683+++ b/arch/avr32/mm/fault.c
4684@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
4685
4686 int exception_trace = 1;
4687
4688+#ifdef CONFIG_PAX_PAGEEXEC
4689+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4690+{
4691+ unsigned long i;
4692+
4693+ printk(KERN_ERR "PAX: bytes at PC: ");
4694+ for (i = 0; i < 20; i++) {
4695+ unsigned char c;
4696+ if (get_user(c, (unsigned char *)pc+i))
4697+ printk(KERN_CONT "???????? ");
4698+ else
4699+ printk(KERN_CONT "%02x ", c);
4700+ }
4701+ printk("\n");
4702+}
4703+#endif
4704+
4705 /*
4706 * This routine handles page faults. It determines the address and the
4707 * problem, and then passes it off to one of the appropriate routines.
4708@@ -174,6 +191,16 @@ bad_area:
4709 up_read(&mm->mmap_sem);
4710
4711 if (user_mode(regs)) {
4712+
4713+#ifdef CONFIG_PAX_PAGEEXEC
4714+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4715+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
4716+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
4717+ do_group_exit(SIGKILL);
4718+ }
4719+ }
4720+#endif
4721+
4722 if (exception_trace && printk_ratelimit())
4723 printk("%s%s[%d]: segfault at %08lx pc %08lx "
4724 "sp %08lx ecr %lu\n",
4725diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
4726index 568885a..f8008df 100644
4727--- a/arch/blackfin/include/asm/cache.h
4728+++ b/arch/blackfin/include/asm/cache.h
4729@@ -7,6 +7,7 @@
4730 #ifndef __ARCH_BLACKFIN_CACHE_H
4731 #define __ARCH_BLACKFIN_CACHE_H
4732
4733+#include <linux/const.h>
4734 #include <linux/linkage.h> /* for asmlinkage */
4735
4736 /*
4737@@ -14,7 +15,7 @@
4738 * Blackfin loads 32 bytes for cache
4739 */
4740 #define L1_CACHE_SHIFT 5
4741-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4742+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4743 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4744
4745 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
4746diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
4747index aea2718..3639a60 100644
4748--- a/arch/cris/include/arch-v10/arch/cache.h
4749+++ b/arch/cris/include/arch-v10/arch/cache.h
4750@@ -1,8 +1,9 @@
4751 #ifndef _ASM_ARCH_CACHE_H
4752 #define _ASM_ARCH_CACHE_H
4753
4754+#include <linux/const.h>
4755 /* Etrax 100LX have 32-byte cache-lines. */
4756-#define L1_CACHE_BYTES 32
4757 #define L1_CACHE_SHIFT 5
4758+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4759
4760 #endif /* _ASM_ARCH_CACHE_H */
4761diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
4762index 7caf25d..ee65ac5 100644
4763--- a/arch/cris/include/arch-v32/arch/cache.h
4764+++ b/arch/cris/include/arch-v32/arch/cache.h
4765@@ -1,11 +1,12 @@
4766 #ifndef _ASM_CRIS_ARCH_CACHE_H
4767 #define _ASM_CRIS_ARCH_CACHE_H
4768
4769+#include <linux/const.h>
4770 #include <arch/hwregs/dma.h>
4771
4772 /* A cache-line is 32 bytes. */
4773-#define L1_CACHE_BYTES 32
4774 #define L1_CACHE_SHIFT 5
4775+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4776
4777 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4778
4779diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
4780index b86329d..6709906 100644
4781--- a/arch/frv/include/asm/atomic.h
4782+++ b/arch/frv/include/asm/atomic.h
4783@@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v)
4784 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
4785 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
4786
4787+#define atomic64_read_unchecked(v) atomic64_read(v)
4788+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4789+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4790+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4791+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4792+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4793+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4794+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4795+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4796+
4797 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
4798 {
4799 int c, old;
4800diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
4801index 2797163..c2a401d 100644
4802--- a/arch/frv/include/asm/cache.h
4803+++ b/arch/frv/include/asm/cache.h
4804@@ -12,10 +12,11 @@
4805 #ifndef __ASM_CACHE_H
4806 #define __ASM_CACHE_H
4807
4808+#include <linux/const.h>
4809
4810 /* bytes per L1 cache line */
4811 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
4812-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4813+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4814
4815 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4816 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4817diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
4818index 43901f2..0d8b865 100644
4819--- a/arch/frv/include/asm/kmap_types.h
4820+++ b/arch/frv/include/asm/kmap_types.h
4821@@ -2,6 +2,6 @@
4822 #ifndef _ASM_KMAP_TYPES_H
4823 #define _ASM_KMAP_TYPES_H
4824
4825-#define KM_TYPE_NR 17
4826+#define KM_TYPE_NR 18
4827
4828 #endif
4829diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
4830index 385fd30..3aaf4fe 100644
4831--- a/arch/frv/mm/elf-fdpic.c
4832+++ b/arch/frv/mm/elf-fdpic.c
4833@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4834 {
4835 struct vm_area_struct *vma;
4836 unsigned long limit;
4837+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
4838
4839 if (len > TASK_SIZE)
4840 return -ENOMEM;
4841@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4842 if (addr) {
4843 addr = PAGE_ALIGN(addr);
4844 vma = find_vma(current->mm, addr);
4845- if (TASK_SIZE - len >= addr &&
4846- (!vma || addr + len <= vma->vm_start))
4847+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4848 goto success;
4849 }
4850
4851@@ -89,7 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4852 for (; vma; vma = vma->vm_next) {
4853 if (addr > limit)
4854 break;
4855- if (addr + len <= vma->vm_start)
4856+ if (check_heap_stack_gap(vma, addr, len, offset))
4857 goto success;
4858 addr = vma->vm_end;
4859 }
4860@@ -104,7 +104,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4861 for (; vma; vma = vma->vm_next) {
4862 if (addr > limit)
4863 break;
4864- if (addr + len <= vma->vm_start)
4865+ if (check_heap_stack_gap(vma, addr, len, offset))
4866 goto success;
4867 addr = vma->vm_end;
4868 }
4869diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
4870index f4ca594..adc72fd6 100644
4871--- a/arch/hexagon/include/asm/cache.h
4872+++ b/arch/hexagon/include/asm/cache.h
4873@@ -21,9 +21,11 @@
4874 #ifndef __ASM_CACHE_H
4875 #define __ASM_CACHE_H
4876
4877+#include <linux/const.h>
4878+
4879 /* Bytes per L1 cache line */
4880-#define L1_CACHE_SHIFT (5)
4881-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4882+#define L1_CACHE_SHIFT 5
4883+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4884
4885 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
4886 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
4887diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
4888index 6e6fe18..a6ae668 100644
4889--- a/arch/ia64/include/asm/atomic.h
4890+++ b/arch/ia64/include/asm/atomic.h
4891@@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
4892 #define atomic64_inc(v) atomic64_add(1, (v))
4893 #define atomic64_dec(v) atomic64_sub(1, (v))
4894
4895+#define atomic64_read_unchecked(v) atomic64_read(v)
4896+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4897+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4898+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4899+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4900+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4901+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4902+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4903+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4904+
4905 /* Atomic operations are already serializing */
4906 #define smp_mb__before_atomic_dec() barrier()
4907 #define smp_mb__after_atomic_dec() barrier()
4908diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
4909index 988254a..e1ee885 100644
4910--- a/arch/ia64/include/asm/cache.h
4911+++ b/arch/ia64/include/asm/cache.h
4912@@ -1,6 +1,7 @@
4913 #ifndef _ASM_IA64_CACHE_H
4914 #define _ASM_IA64_CACHE_H
4915
4916+#include <linux/const.h>
4917
4918 /*
4919 * Copyright (C) 1998-2000 Hewlett-Packard Co
4920@@ -9,7 +10,7 @@
4921
4922 /* Bytes per L1 (data) cache line. */
4923 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
4924-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4925+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4926
4927 #ifdef CONFIG_SMP
4928 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
4929diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
4930index b5298eb..67c6e62 100644
4931--- a/arch/ia64/include/asm/elf.h
4932+++ b/arch/ia64/include/asm/elf.h
4933@@ -42,6 +42,13 @@
4934 */
4935 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
4936
4937+#ifdef CONFIG_PAX_ASLR
4938+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
4939+
4940+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4941+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4942+#endif
4943+
4944 #define PT_IA_64_UNWIND 0x70000001
4945
4946 /* IA-64 relocations: */
4947diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
4948index 96a8d92..617a1cf 100644
4949--- a/arch/ia64/include/asm/pgalloc.h
4950+++ b/arch/ia64/include/asm/pgalloc.h
4951@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4952 pgd_val(*pgd_entry) = __pa(pud);
4953 }
4954
4955+static inline void
4956+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4957+{
4958+ pgd_populate(mm, pgd_entry, pud);
4959+}
4960+
4961 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
4962 {
4963 return quicklist_alloc(0, GFP_KERNEL, NULL);
4964@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
4965 pud_val(*pud_entry) = __pa(pmd);
4966 }
4967
4968+static inline void
4969+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
4970+{
4971+ pud_populate(mm, pud_entry, pmd);
4972+}
4973+
4974 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
4975 {
4976 return quicklist_alloc(0, GFP_KERNEL, NULL);
4977diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
4978index 815810c..d60bd4c 100644
4979--- a/arch/ia64/include/asm/pgtable.h
4980+++ b/arch/ia64/include/asm/pgtable.h
4981@@ -12,7 +12,7 @@
4982 * David Mosberger-Tang <davidm@hpl.hp.com>
4983 */
4984
4985-
4986+#include <linux/const.h>
4987 #include <asm/mman.h>
4988 #include <asm/page.h>
4989 #include <asm/processor.h>
4990@@ -142,6 +142,17 @@
4991 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4992 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4993 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
4994+
4995+#ifdef CONFIG_PAX_PAGEEXEC
4996+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
4997+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4998+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4999+#else
5000+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5001+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5002+# define PAGE_COPY_NOEXEC PAGE_COPY
5003+#endif
5004+
5005 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
5006 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
5007 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
5008diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
5009index 54ff557..70c88b7 100644
5010--- a/arch/ia64/include/asm/spinlock.h
5011+++ b/arch/ia64/include/asm/spinlock.h
5012@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
5013 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
5014
5015 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
5016- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
5017+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
5018 }
5019
5020 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
5021diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
5022index 449c8c0..18965fb 100644
5023--- a/arch/ia64/include/asm/uaccess.h
5024+++ b/arch/ia64/include/asm/uaccess.h
5025@@ -240,12 +240,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
5026 static inline unsigned long
5027 __copy_to_user (void __user *to, const void *from, unsigned long count)
5028 {
5029+ if (count > INT_MAX)
5030+ return count;
5031+
5032+ if (!__builtin_constant_p(count))
5033+ check_object_size(from, count, true);
5034+
5035 return __copy_user(to, (__force void __user *) from, count);
5036 }
5037
5038 static inline unsigned long
5039 __copy_from_user (void *to, const void __user *from, unsigned long count)
5040 {
5041+ if (count > INT_MAX)
5042+ return count;
5043+
5044+ if (!__builtin_constant_p(count))
5045+ check_object_size(to, count, false);
5046+
5047 return __copy_user((__force void __user *) to, from, count);
5048 }
5049
5050@@ -255,10 +267,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5051 ({ \
5052 void __user *__cu_to = (to); \
5053 const void *__cu_from = (from); \
5054- long __cu_len = (n); \
5055+ unsigned long __cu_len = (n); \
5056 \
5057- if (__access_ok(__cu_to, __cu_len, get_fs())) \
5058+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
5059+ if (!__builtin_constant_p(n)) \
5060+ check_object_size(__cu_from, __cu_len, true); \
5061 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
5062+ } \
5063 __cu_len; \
5064 })
5065
5066@@ -266,11 +281,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5067 ({ \
5068 void *__cu_to = (to); \
5069 const void __user *__cu_from = (from); \
5070- long __cu_len = (n); \
5071+ unsigned long __cu_len = (n); \
5072 \
5073 __chk_user_ptr(__cu_from); \
5074- if (__access_ok(__cu_from, __cu_len, get_fs())) \
5075+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
5076+ if (!__builtin_constant_p(n)) \
5077+ check_object_size(__cu_to, __cu_len, false); \
5078 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
5079+ } \
5080 __cu_len; \
5081 })
5082
5083diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
5084index 2d67317..07d8bfa 100644
5085--- a/arch/ia64/kernel/err_inject.c
5086+++ b/arch/ia64/kernel/err_inject.c
5087@@ -256,7 +256,7 @@ static int __cpuinit err_inject_cpu_callback(struct notifier_block *nfb,
5088 return NOTIFY_OK;
5089 }
5090
5091-static struct notifier_block __cpuinitdata err_inject_cpu_notifier =
5092+static struct notifier_block err_inject_cpu_notifier =
5093 {
5094 .notifier_call = err_inject_cpu_callback,
5095 };
5096diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
5097index 65bf9cd..794f06b 100644
5098--- a/arch/ia64/kernel/mca.c
5099+++ b/arch/ia64/kernel/mca.c
5100@@ -1922,7 +1922,7 @@ static int __cpuinit mca_cpu_callback(struct notifier_block *nfb,
5101 return NOTIFY_OK;
5102 }
5103
5104-static struct notifier_block mca_cpu_notifier __cpuinitdata = {
5105+static struct notifier_block mca_cpu_notifier = {
5106 .notifier_call = mca_cpu_callback
5107 };
5108
5109diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
5110index 24603be..948052d 100644
5111--- a/arch/ia64/kernel/module.c
5112+++ b/arch/ia64/kernel/module.c
5113@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
5114 void
5115 module_free (struct module *mod, void *module_region)
5116 {
5117- if (mod && mod->arch.init_unw_table &&
5118- module_region == mod->module_init) {
5119+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
5120 unw_remove_unwind_table(mod->arch.init_unw_table);
5121 mod->arch.init_unw_table = NULL;
5122 }
5123@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
5124 }
5125
5126 static inline int
5127+in_init_rx (const struct module *mod, uint64_t addr)
5128+{
5129+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
5130+}
5131+
5132+static inline int
5133+in_init_rw (const struct module *mod, uint64_t addr)
5134+{
5135+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
5136+}
5137+
5138+static inline int
5139 in_init (const struct module *mod, uint64_t addr)
5140 {
5141- return addr - (uint64_t) mod->module_init < mod->init_size;
5142+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
5143+}
5144+
5145+static inline int
5146+in_core_rx (const struct module *mod, uint64_t addr)
5147+{
5148+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
5149+}
5150+
5151+static inline int
5152+in_core_rw (const struct module *mod, uint64_t addr)
5153+{
5154+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
5155 }
5156
5157 static inline int
5158 in_core (const struct module *mod, uint64_t addr)
5159 {
5160- return addr - (uint64_t) mod->module_core < mod->core_size;
5161+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
5162 }
5163
5164 static inline int
5165@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
5166 break;
5167
5168 case RV_BDREL:
5169- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
5170+ if (in_init_rx(mod, val))
5171+ val -= (uint64_t) mod->module_init_rx;
5172+ else if (in_init_rw(mod, val))
5173+ val -= (uint64_t) mod->module_init_rw;
5174+ else if (in_core_rx(mod, val))
5175+ val -= (uint64_t) mod->module_core_rx;
5176+ else if (in_core_rw(mod, val))
5177+ val -= (uint64_t) mod->module_core_rw;
5178 break;
5179
5180 case RV_LTV:
5181@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
5182 * addresses have been selected...
5183 */
5184 uint64_t gp;
5185- if (mod->core_size > MAX_LTOFF)
5186+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
5187 /*
5188 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
5189 * at the end of the module.
5190 */
5191- gp = mod->core_size - MAX_LTOFF / 2;
5192+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
5193 else
5194- gp = mod->core_size / 2;
5195- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
5196+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
5197+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
5198 mod->arch.gp = gp;
5199 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
5200 }
5201diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
5202index 77597e5..6f28f3f 100644
5203--- a/arch/ia64/kernel/palinfo.c
5204+++ b/arch/ia64/kernel/palinfo.c
5205@@ -1045,7 +1045,7 @@ static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
5206 return NOTIFY_OK;
5207 }
5208
5209-static struct notifier_block __refdata palinfo_cpu_notifier =
5210+static struct notifier_block palinfo_cpu_notifier =
5211 {
5212 .notifier_call = palinfo_cpu_callback,
5213 .priority = 0,
5214diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
5215index 79802e5..1a89ec5 100644
5216--- a/arch/ia64/kernel/salinfo.c
5217+++ b/arch/ia64/kernel/salinfo.c
5218@@ -616,7 +616,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu
5219 return NOTIFY_OK;
5220 }
5221
5222-static struct notifier_block salinfo_cpu_notifier __cpuinitdata =
5223+static struct notifier_block salinfo_cpu_notifier =
5224 {
5225 .notifier_call = salinfo_cpu_callback,
5226 .priority = 0,
5227diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
5228index d9439ef..d0cac6b 100644
5229--- a/arch/ia64/kernel/sys_ia64.c
5230+++ b/arch/ia64/kernel/sys_ia64.c
5231@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5232 unsigned long start_addr, align_mask = PAGE_SIZE - 1;
5233 struct mm_struct *mm = current->mm;
5234 struct vm_area_struct *vma;
5235+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5236
5237 if (len > RGN_MAP_LIMIT)
5238 return -ENOMEM;
5239@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5240 if (REGION_NUMBER(addr) == RGN_HPAGE)
5241 addr = 0;
5242 #endif
5243+
5244+#ifdef CONFIG_PAX_RANDMMAP
5245+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5246+ addr = mm->free_area_cache;
5247+ else
5248+#endif
5249+
5250 if (!addr)
5251 addr = mm->free_area_cache;
5252
5253@@ -61,14 +69,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5254 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
5255 /* At this point: (!vma || addr < vma->vm_end). */
5256 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
5257- if (start_addr != TASK_UNMAPPED_BASE) {
5258+ if (start_addr != mm->mmap_base) {
5259 /* Start a new search --- just in case we missed some holes. */
5260- addr = TASK_UNMAPPED_BASE;
5261+ addr = mm->mmap_base;
5262 goto full_search;
5263 }
5264 return -ENOMEM;
5265 }
5266- if (!vma || addr + len <= vma->vm_start) {
5267+ if (check_heap_stack_gap(vma, addr, len, offset)) {
5268 /* Remember the address where we stopped this search: */
5269 mm->free_area_cache = addr + len;
5270 return addr;
5271diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
5272index dc00b2c..cce53c2 100644
5273--- a/arch/ia64/kernel/topology.c
5274+++ b/arch/ia64/kernel/topology.c
5275@@ -445,7 +445,7 @@ static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
5276 return NOTIFY_OK;
5277 }
5278
5279-static struct notifier_block __cpuinitdata cache_cpu_notifier =
5280+static struct notifier_block cache_cpu_notifier =
5281 {
5282 .notifier_call = cache_cpu_callback
5283 };
5284diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5285index 0ccb28f..8992469 100644
5286--- a/arch/ia64/kernel/vmlinux.lds.S
5287+++ b/arch/ia64/kernel/vmlinux.lds.S
5288@@ -198,7 +198,7 @@ SECTIONS {
5289 /* Per-cpu data: */
5290 . = ALIGN(PERCPU_PAGE_SIZE);
5291 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5292- __phys_per_cpu_start = __per_cpu_load;
5293+ __phys_per_cpu_start = per_cpu_load;
5294 /*
5295 * ensure percpu data fits
5296 * into percpu page size
5297diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5298index 6cf0341..d352594 100644
5299--- a/arch/ia64/mm/fault.c
5300+++ b/arch/ia64/mm/fault.c
5301@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5302 return pte_present(pte);
5303 }
5304
5305+#ifdef CONFIG_PAX_PAGEEXEC
5306+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5307+{
5308+ unsigned long i;
5309+
5310+ printk(KERN_ERR "PAX: bytes at PC: ");
5311+ for (i = 0; i < 8; i++) {
5312+ unsigned int c;
5313+ if (get_user(c, (unsigned int *)pc+i))
5314+ printk(KERN_CONT "???????? ");
5315+ else
5316+ printk(KERN_CONT "%08x ", c);
5317+ }
5318+ printk("\n");
5319+}
5320+#endif
5321+
5322 # define VM_READ_BIT 0
5323 # define VM_WRITE_BIT 1
5324 # define VM_EXEC_BIT 2
5325@@ -149,8 +166,21 @@ retry:
5326 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5327 goto bad_area;
5328
5329- if ((vma->vm_flags & mask) != mask)
5330+ if ((vma->vm_flags & mask) != mask) {
5331+
5332+#ifdef CONFIG_PAX_PAGEEXEC
5333+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5334+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5335+ goto bad_area;
5336+
5337+ up_read(&mm->mmap_sem);
5338+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5339+ do_group_exit(SIGKILL);
5340+ }
5341+#endif
5342+
5343 goto bad_area;
5344+ }
5345
5346 /*
5347 * If for any reason at all we couldn't handle the fault, make
5348diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5349index 5ca674b..127c3cb 100644
5350--- a/arch/ia64/mm/hugetlbpage.c
5351+++ b/arch/ia64/mm/hugetlbpage.c
5352@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5353 unsigned long pgoff, unsigned long flags)
5354 {
5355 struct vm_area_struct *vmm;
5356+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5357
5358 if (len > RGN_MAP_LIMIT)
5359 return -ENOMEM;
5360@@ -171,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5361 /* At this point: (!vmm || addr < vmm->vm_end). */
5362 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
5363 return -ENOMEM;
5364- if (!vmm || (addr + len) <= vmm->vm_start)
5365+ if (check_heap_stack_gap(vmm, addr, len, offset))
5366 return addr;
5367 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
5368 }
5369diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5370index b755ea9..b9a969e 100644
5371--- a/arch/ia64/mm/init.c
5372+++ b/arch/ia64/mm/init.c
5373@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
5374 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5375 vma->vm_end = vma->vm_start + PAGE_SIZE;
5376 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5377+
5378+#ifdef CONFIG_PAX_PAGEEXEC
5379+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5380+ vma->vm_flags &= ~VM_EXEC;
5381+
5382+#ifdef CONFIG_PAX_MPROTECT
5383+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
5384+ vma->vm_flags &= ~VM_MAYEXEC;
5385+#endif
5386+
5387+ }
5388+#endif
5389+
5390 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5391 down_write(&current->mm->mmap_sem);
5392 if (insert_vm_struct(current->mm, vma)) {
5393diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
5394index 40b3ee9..8c2c112 100644
5395--- a/arch/m32r/include/asm/cache.h
5396+++ b/arch/m32r/include/asm/cache.h
5397@@ -1,8 +1,10 @@
5398 #ifndef _ASM_M32R_CACHE_H
5399 #define _ASM_M32R_CACHE_H
5400
5401+#include <linux/const.h>
5402+
5403 /* L1 cache line size */
5404 #define L1_CACHE_SHIFT 4
5405-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5406+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5407
5408 #endif /* _ASM_M32R_CACHE_H */
5409diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
5410index 82abd15..d95ae5d 100644
5411--- a/arch/m32r/lib/usercopy.c
5412+++ b/arch/m32r/lib/usercopy.c
5413@@ -14,6 +14,9 @@
5414 unsigned long
5415 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5416 {
5417+ if ((long)n < 0)
5418+ return n;
5419+
5420 prefetch(from);
5421 if (access_ok(VERIFY_WRITE, to, n))
5422 __copy_user(to,from,n);
5423@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5424 unsigned long
5425 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
5426 {
5427+ if ((long)n < 0)
5428+ return n;
5429+
5430 prefetchw(to);
5431 if (access_ok(VERIFY_READ, from, n))
5432 __copy_user_zeroing(to,from,n);
5433diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
5434index 0395c51..5f26031 100644
5435--- a/arch/m68k/include/asm/cache.h
5436+++ b/arch/m68k/include/asm/cache.h
5437@@ -4,9 +4,11 @@
5438 #ifndef __ARCH_M68K_CACHE_H
5439 #define __ARCH_M68K_CACHE_H
5440
5441+#include <linux/const.h>
5442+
5443 /* bytes per L1 cache line */
5444 #define L1_CACHE_SHIFT 4
5445-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
5446+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5447
5448 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5449
5450diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5451index 4efe96a..60e8699 100644
5452--- a/arch/microblaze/include/asm/cache.h
5453+++ b/arch/microblaze/include/asm/cache.h
5454@@ -13,11 +13,12 @@
5455 #ifndef _ASM_MICROBLAZE_CACHE_H
5456 #define _ASM_MICROBLAZE_CACHE_H
5457
5458+#include <linux/const.h>
5459 #include <asm/registers.h>
5460
5461 #define L1_CACHE_SHIFT 5
5462 /* word-granular cache in microblaze */
5463-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5464+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5465
5466 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5467
5468diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
5469index 01cc6ba..bcb7a5d 100644
5470--- a/arch/mips/include/asm/atomic.h
5471+++ b/arch/mips/include/asm/atomic.h
5472@@ -21,6 +21,10 @@
5473 #include <asm/cmpxchg.h>
5474 #include <asm/war.h>
5475
5476+#ifdef CONFIG_GENERIC_ATOMIC64
5477+#include <asm-generic/atomic64.h>
5478+#endif
5479+
5480 #define ATOMIC_INIT(i) { (i) }
5481
5482 /*
5483@@ -759,6 +763,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
5484 */
5485 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
5486
5487+#define atomic64_read_unchecked(v) atomic64_read(v)
5488+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5489+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5490+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5491+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5492+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5493+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5494+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5495+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5496+
5497 #endif /* CONFIG_64BIT */
5498
5499 /*
5500diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
5501index b4db69f..8f3b093 100644
5502--- a/arch/mips/include/asm/cache.h
5503+++ b/arch/mips/include/asm/cache.h
5504@@ -9,10 +9,11 @@
5505 #ifndef _ASM_CACHE_H
5506 #define _ASM_CACHE_H
5507
5508+#include <linux/const.h>
5509 #include <kmalloc.h>
5510
5511 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
5512-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5513+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5514
5515 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
5516 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5517diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
5518index 455c0ac..ad65fbe 100644
5519--- a/arch/mips/include/asm/elf.h
5520+++ b/arch/mips/include/asm/elf.h
5521@@ -372,13 +372,16 @@ extern const char *__elf_platform;
5522 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
5523 #endif
5524
5525+#ifdef CONFIG_PAX_ASLR
5526+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
5527+
5528+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5529+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5530+#endif
5531+
5532 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
5533 struct linux_binprm;
5534 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
5535 int uses_interp);
5536
5537-struct mm_struct;
5538-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
5539-#define arch_randomize_brk arch_randomize_brk
5540-
5541 #endif /* _ASM_ELF_H */
5542diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
5543index c1f6afa..38cc6e9 100644
5544--- a/arch/mips/include/asm/exec.h
5545+++ b/arch/mips/include/asm/exec.h
5546@@ -12,6 +12,6 @@
5547 #ifndef _ASM_EXEC_H
5548 #define _ASM_EXEC_H
5549
5550-extern unsigned long arch_align_stack(unsigned long sp);
5551+#define arch_align_stack(x) ((x) & ~0xfUL)
5552
5553 #endif /* _ASM_EXEC_H */
5554diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
5555index dbaec94..6a14935 100644
5556--- a/arch/mips/include/asm/page.h
5557+++ b/arch/mips/include/asm/page.h
5558@@ -96,7 +96,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
5559 #ifdef CONFIG_CPU_MIPS32
5560 typedef struct { unsigned long pte_low, pte_high; } pte_t;
5561 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
5562- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
5563+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
5564 #else
5565 typedef struct { unsigned long long pte; } pte_t;
5566 #define pte_val(x) ((x).pte)
5567diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
5568index 881d18b..cea38bc 100644
5569--- a/arch/mips/include/asm/pgalloc.h
5570+++ b/arch/mips/include/asm/pgalloc.h
5571@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
5572 {
5573 set_pud(pud, __pud((unsigned long)pmd));
5574 }
5575+
5576+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
5577+{
5578+ pud_populate(mm, pud, pmd);
5579+}
5580 #endif
5581
5582 /*
5583diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
5584index b2050b9..d71bb1b 100644
5585--- a/arch/mips/include/asm/thread_info.h
5586+++ b/arch/mips/include/asm/thread_info.h
5587@@ -111,6 +111,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
5588 #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
5589 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
5590 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
5591+/* li takes a 32bit immediate */
5592+#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
5593 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
5594
5595 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
5596@@ -126,15 +128,18 @@ register struct thread_info *__current_thread_info __asm__("$28");
5597 #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
5598 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
5599 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
5600+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
5601+
5602+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
5603
5604 /* work to do in syscall_trace_leave() */
5605-#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
5606+#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
5607
5608 /* work to do on interrupt/exception return */
5609 #define _TIF_WORK_MASK \
5610 (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME)
5611 /* work to do on any return to u-space */
5612-#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT)
5613+#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT | _TIF_GRSEC_SETXID)
5614
5615 #endif /* __KERNEL__ */
5616
5617diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
5618index 9fdd8bc..4bd7f1a 100644
5619--- a/arch/mips/kernel/binfmt_elfn32.c
5620+++ b/arch/mips/kernel/binfmt_elfn32.c
5621@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
5622 #undef ELF_ET_DYN_BASE
5623 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
5624
5625+#ifdef CONFIG_PAX_ASLR
5626+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
5627+
5628+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5629+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5630+#endif
5631+
5632 #include <asm/processor.h>
5633 #include <linux/module.h>
5634 #include <linux/elfcore.h>
5635diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
5636index ff44823..97f8906 100644
5637--- a/arch/mips/kernel/binfmt_elfo32.c
5638+++ b/arch/mips/kernel/binfmt_elfo32.c
5639@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
5640 #undef ELF_ET_DYN_BASE
5641 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
5642
5643+#ifdef CONFIG_PAX_ASLR
5644+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
5645+
5646+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5647+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5648+#endif
5649+
5650 #include <asm/processor.h>
5651
5652 /*
5653diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
5654index a11c6f9..be5e164 100644
5655--- a/arch/mips/kernel/process.c
5656+++ b/arch/mips/kernel/process.c
5657@@ -460,15 +460,3 @@ unsigned long get_wchan(struct task_struct *task)
5658 out:
5659 return pc;
5660 }
5661-
5662-/*
5663- * Don't forget that the stack pointer must be aligned on a 8 bytes
5664- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
5665- */
5666-unsigned long arch_align_stack(unsigned long sp)
5667-{
5668- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5669- sp -= get_random_int() & ~PAGE_MASK;
5670-
5671- return sp & ALMASK;
5672-}
5673diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
5674index 4812c6d..2069554 100644
5675--- a/arch/mips/kernel/ptrace.c
5676+++ b/arch/mips/kernel/ptrace.c
5677@@ -528,6 +528,10 @@ static inline int audit_arch(void)
5678 return arch;
5679 }
5680
5681+#ifdef CONFIG_GRKERNSEC_SETXID
5682+extern void gr_delayed_cred_worker(void);
5683+#endif
5684+
5685 /*
5686 * Notification of system call entry/exit
5687 * - triggered by current->work.syscall_trace
5688@@ -537,6 +541,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
5689 /* do the secure computing check first */
5690 secure_computing_strict(regs->regs[2]);
5691
5692+#ifdef CONFIG_GRKERNSEC_SETXID
5693+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5694+ gr_delayed_cred_worker();
5695+#endif
5696+
5697 if (!(current->ptrace & PT_PTRACED))
5698 goto out;
5699
5700diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
5701index d20a4bc..7096ae5 100644
5702--- a/arch/mips/kernel/scall32-o32.S
5703+++ b/arch/mips/kernel/scall32-o32.S
5704@@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
5705
5706 stack_done:
5707 lw t0, TI_FLAGS($28) # syscall tracing enabled?
5708- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5709+ li t1, _TIF_SYSCALL_WORK
5710 and t0, t1
5711 bnez t0, syscall_trace_entry # -> yes
5712
5713diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
5714index b64f642..0fe6eab 100644
5715--- a/arch/mips/kernel/scall64-64.S
5716+++ b/arch/mips/kernel/scall64-64.S
5717@@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
5718
5719 sd a3, PT_R26(sp) # save a3 for syscall restarting
5720
5721- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5722+ li t1, _TIF_SYSCALL_WORK
5723 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
5724 and t0, t1, t0
5725 bnez t0, syscall_trace_entry
5726diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
5727index c29ac19..c592d05 100644
5728--- a/arch/mips/kernel/scall64-n32.S
5729+++ b/arch/mips/kernel/scall64-n32.S
5730@@ -47,7 +47,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
5731
5732 sd a3, PT_R26(sp) # save a3 for syscall restarting
5733
5734- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5735+ li t1, _TIF_SYSCALL_WORK
5736 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
5737 and t0, t1, t0
5738 bnez t0, n32_syscall_trace_entry
5739diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
5740index cf3e75e..72e93fe 100644
5741--- a/arch/mips/kernel/scall64-o32.S
5742+++ b/arch/mips/kernel/scall64-o32.S
5743@@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
5744 PTR 4b, bad_stack
5745 .previous
5746
5747- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5748+ li t1, _TIF_SYSCALL_WORK
5749 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
5750 and t0, t1, t0
5751 bnez t0, trace_a_syscall
5752diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
5753index ddcec1e..c7f983e 100644
5754--- a/arch/mips/mm/fault.c
5755+++ b/arch/mips/mm/fault.c
5756@@ -27,6 +27,23 @@
5757 #include <asm/highmem.h> /* For VMALLOC_END */
5758 #include <linux/kdebug.h>
5759
5760+#ifdef CONFIG_PAX_PAGEEXEC
5761+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5762+{
5763+ unsigned long i;
5764+
5765+ printk(KERN_ERR "PAX: bytes at PC: ");
5766+ for (i = 0; i < 5; i++) {
5767+ unsigned int c;
5768+ if (get_user(c, (unsigned int *)pc+i))
5769+ printk(KERN_CONT "???????? ");
5770+ else
5771+ printk(KERN_CONT "%08x ", c);
5772+ }
5773+ printk("\n");
5774+}
5775+#endif
5776+
5777 /*
5778 * This routine handles page faults. It determines the address,
5779 * and the problem, and then passes it off to one of the appropriate
5780diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
5781index 7e5fe27..479a219 100644
5782--- a/arch/mips/mm/mmap.c
5783+++ b/arch/mips/mm/mmap.c
5784@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
5785 struct vm_area_struct *vma;
5786 unsigned long addr = addr0;
5787 int do_color_align;
5788+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5789 struct vm_unmapped_area_info info;
5790
5791 if (unlikely(len > TASK_SIZE))
5792@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
5793 do_color_align = 1;
5794
5795 /* requesting a specific address */
5796+
5797+#ifdef CONFIG_PAX_RANDMMAP
5798+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
5799+#endif
5800+
5801 if (addr) {
5802 if (do_color_align)
5803 addr = COLOUR_ALIGN(addr, pgoff);
5804@@ -91,8 +97,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
5805 addr = PAGE_ALIGN(addr);
5806
5807 vma = find_vma(mm, addr);
5808- if (TASK_SIZE - len >= addr &&
5809- (!vma || addr + len <= vma->vm_start))
5810+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len, offset))
5811 return addr;
5812 }
5813
5814@@ -146,6 +151,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5815 {
5816 unsigned long random_factor = 0UL;
5817
5818+#ifdef CONFIG_PAX_RANDMMAP
5819+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5820+#endif
5821+
5822 if (current->flags & PF_RANDOMIZE) {
5823 random_factor = get_random_int();
5824 random_factor = random_factor << PAGE_SHIFT;
5825@@ -157,42 +166,27 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5826
5827 if (mmap_is_legacy()) {
5828 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5829+
5830+#ifdef CONFIG_PAX_RANDMMAP
5831+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5832+ mm->mmap_base += mm->delta_mmap;
5833+#endif
5834+
5835 mm->get_unmapped_area = arch_get_unmapped_area;
5836 mm->unmap_area = arch_unmap_area;
5837 } else {
5838 mm->mmap_base = mmap_base(random_factor);
5839+
5840+#ifdef CONFIG_PAX_RANDMMAP
5841+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5842+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5843+#endif
5844+
5845 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5846 mm->unmap_area = arch_unmap_area_topdown;
5847 }
5848 }
5849
5850-static inline unsigned long brk_rnd(void)
5851-{
5852- unsigned long rnd = get_random_int();
5853-
5854- rnd = rnd << PAGE_SHIFT;
5855- /* 8MB for 32bit, 256MB for 64bit */
5856- if (TASK_IS_32BIT_ADDR)
5857- rnd = rnd & 0x7ffffful;
5858- else
5859- rnd = rnd & 0xffffffful;
5860-
5861- return rnd;
5862-}
5863-
5864-unsigned long arch_randomize_brk(struct mm_struct *mm)
5865-{
5866- unsigned long base = mm->brk;
5867- unsigned long ret;
5868-
5869- ret = PAGE_ALIGN(base + brk_rnd());
5870-
5871- if (ret < mm->brk)
5872- return mm->brk;
5873-
5874- return ret;
5875-}
5876-
5877 int __virt_addr_valid(const volatile void *kaddr)
5878 {
5879 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
5880diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
5881index 967d144..db12197 100644
5882--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
5883+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
5884@@ -11,12 +11,14 @@
5885 #ifndef _ASM_PROC_CACHE_H
5886 #define _ASM_PROC_CACHE_H
5887
5888+#include <linux/const.h>
5889+
5890 /* L1 cache */
5891
5892 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
5893 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
5894-#define L1_CACHE_BYTES 16 /* bytes per entry */
5895 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
5896+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
5897 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
5898
5899 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
5900diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
5901index bcb5df2..84fabd2 100644
5902--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
5903+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
5904@@ -16,13 +16,15 @@
5905 #ifndef _ASM_PROC_CACHE_H
5906 #define _ASM_PROC_CACHE_H
5907
5908+#include <linux/const.h>
5909+
5910 /*
5911 * L1 cache
5912 */
5913 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
5914 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
5915-#define L1_CACHE_BYTES 32 /* bytes per entry */
5916 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
5917+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
5918 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
5919
5920 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
5921diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
5922index 4ce7a01..449202a 100644
5923--- a/arch/openrisc/include/asm/cache.h
5924+++ b/arch/openrisc/include/asm/cache.h
5925@@ -19,11 +19,13 @@
5926 #ifndef __ASM_OPENRISC_CACHE_H
5927 #define __ASM_OPENRISC_CACHE_H
5928
5929+#include <linux/const.h>
5930+
5931 /* FIXME: How can we replace these with values from the CPU...
5932 * they shouldn't be hard-coded!
5933 */
5934
5935-#define L1_CACHE_BYTES 16
5936 #define L1_CACHE_SHIFT 4
5937+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5938
5939 #endif /* __ASM_OPENRISC_CACHE_H */
5940diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
5941index af9cf30..2aae9b2 100644
5942--- a/arch/parisc/include/asm/atomic.h
5943+++ b/arch/parisc/include/asm/atomic.h
5944@@ -229,6 +229,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
5945
5946 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
5947
5948+#define atomic64_read_unchecked(v) atomic64_read(v)
5949+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5950+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5951+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5952+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5953+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5954+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5955+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5956+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5957+
5958 #endif /* !CONFIG_64BIT */
5959
5960
5961diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
5962index 47f11c7..3420df2 100644
5963--- a/arch/parisc/include/asm/cache.h
5964+++ b/arch/parisc/include/asm/cache.h
5965@@ -5,6 +5,7 @@
5966 #ifndef __ARCH_PARISC_CACHE_H
5967 #define __ARCH_PARISC_CACHE_H
5968
5969+#include <linux/const.h>
5970
5971 /*
5972 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
5973@@ -15,13 +16,13 @@
5974 * just ruin performance.
5975 */
5976 #ifdef CONFIG_PA20
5977-#define L1_CACHE_BYTES 64
5978 #define L1_CACHE_SHIFT 6
5979 #else
5980-#define L1_CACHE_BYTES 32
5981 #define L1_CACHE_SHIFT 5
5982 #endif
5983
5984+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5985+
5986 #ifndef __ASSEMBLY__
5987
5988 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5989diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
5990index 19f6cb1..6c78cf2 100644
5991--- a/arch/parisc/include/asm/elf.h
5992+++ b/arch/parisc/include/asm/elf.h
5993@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
5994
5995 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
5996
5997+#ifdef CONFIG_PAX_ASLR
5998+#define PAX_ELF_ET_DYN_BASE 0x10000UL
5999+
6000+#define PAX_DELTA_MMAP_LEN 16
6001+#define PAX_DELTA_STACK_LEN 16
6002+#endif
6003+
6004 /* This yields a mask that user programs can use to figure out what
6005 instruction set this CPU supports. This could be done in user space,
6006 but it's not easy, and we've already done it here. */
6007diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
6008index fc987a1..6e068ef 100644
6009--- a/arch/parisc/include/asm/pgalloc.h
6010+++ b/arch/parisc/include/asm/pgalloc.h
6011@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
6012 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
6013 }
6014
6015+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
6016+{
6017+ pgd_populate(mm, pgd, pmd);
6018+}
6019+
6020 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
6021 {
6022 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
6023@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
6024 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
6025 #define pmd_free(mm, x) do { } while (0)
6026 #define pgd_populate(mm, pmd, pte) BUG()
6027+#define pgd_populate_kernel(mm, pmd, pte) BUG()
6028
6029 #endif
6030
6031diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
6032index 7df49fa..38b62bf 100644
6033--- a/arch/parisc/include/asm/pgtable.h
6034+++ b/arch/parisc/include/asm/pgtable.h
6035@@ -218,6 +218,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
6036 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
6037 #define PAGE_COPY PAGE_EXECREAD
6038 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
6039+
6040+#ifdef CONFIG_PAX_PAGEEXEC
6041+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
6042+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
6043+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
6044+#else
6045+# define PAGE_SHARED_NOEXEC PAGE_SHARED
6046+# define PAGE_COPY_NOEXEC PAGE_COPY
6047+# define PAGE_READONLY_NOEXEC PAGE_READONLY
6048+#endif
6049+
6050 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
6051 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
6052 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
6053diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
6054index 4ba2c93..f5e3974 100644
6055--- a/arch/parisc/include/asm/uaccess.h
6056+++ b/arch/parisc/include/asm/uaccess.h
6057@@ -251,10 +251,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
6058 const void __user *from,
6059 unsigned long n)
6060 {
6061- int sz = __compiletime_object_size(to);
6062+ size_t sz = __compiletime_object_size(to);
6063 int ret = -EFAULT;
6064
6065- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
6066+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
6067 ret = __copy_from_user(to, from, n);
6068 else
6069 copy_from_user_overflow();
6070diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
6071index 2a625fb..9908930 100644
6072--- a/arch/parisc/kernel/module.c
6073+++ b/arch/parisc/kernel/module.c
6074@@ -98,16 +98,38 @@
6075
6076 /* three functions to determine where in the module core
6077 * or init pieces the location is */
6078+static inline int in_init_rx(struct module *me, void *loc)
6079+{
6080+ return (loc >= me->module_init_rx &&
6081+ loc < (me->module_init_rx + me->init_size_rx));
6082+}
6083+
6084+static inline int in_init_rw(struct module *me, void *loc)
6085+{
6086+ return (loc >= me->module_init_rw &&
6087+ loc < (me->module_init_rw + me->init_size_rw));
6088+}
6089+
6090 static inline int in_init(struct module *me, void *loc)
6091 {
6092- return (loc >= me->module_init &&
6093- loc <= (me->module_init + me->init_size));
6094+ return in_init_rx(me, loc) || in_init_rw(me, loc);
6095+}
6096+
6097+static inline int in_core_rx(struct module *me, void *loc)
6098+{
6099+ return (loc >= me->module_core_rx &&
6100+ loc < (me->module_core_rx + me->core_size_rx));
6101+}
6102+
6103+static inline int in_core_rw(struct module *me, void *loc)
6104+{
6105+ return (loc >= me->module_core_rw &&
6106+ loc < (me->module_core_rw + me->core_size_rw));
6107 }
6108
6109 static inline int in_core(struct module *me, void *loc)
6110 {
6111- return (loc >= me->module_core &&
6112- loc <= (me->module_core + me->core_size));
6113+ return in_core_rx(me, loc) || in_core_rw(me, loc);
6114 }
6115
6116 static inline int in_local(struct module *me, void *loc)
6117@@ -371,13 +393,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
6118 }
6119
6120 /* align things a bit */
6121- me->core_size = ALIGN(me->core_size, 16);
6122- me->arch.got_offset = me->core_size;
6123- me->core_size += gots * sizeof(struct got_entry);
6124+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
6125+ me->arch.got_offset = me->core_size_rw;
6126+ me->core_size_rw += gots * sizeof(struct got_entry);
6127
6128- me->core_size = ALIGN(me->core_size, 16);
6129- me->arch.fdesc_offset = me->core_size;
6130- me->core_size += fdescs * sizeof(Elf_Fdesc);
6131+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
6132+ me->arch.fdesc_offset = me->core_size_rw;
6133+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
6134
6135 me->arch.got_max = gots;
6136 me->arch.fdesc_max = fdescs;
6137@@ -395,7 +417,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
6138
6139 BUG_ON(value == 0);
6140
6141- got = me->module_core + me->arch.got_offset;
6142+ got = me->module_core_rw + me->arch.got_offset;
6143 for (i = 0; got[i].addr; i++)
6144 if (got[i].addr == value)
6145 goto out;
6146@@ -413,7 +435,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
6147 #ifdef CONFIG_64BIT
6148 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
6149 {
6150- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
6151+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
6152
6153 if (!value) {
6154 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
6155@@ -431,7 +453,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
6156
6157 /* Create new one */
6158 fdesc->addr = value;
6159- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
6160+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
6161 return (Elf_Addr)fdesc;
6162 }
6163 #endif /* CONFIG_64BIT */
6164@@ -843,7 +865,7 @@ register_unwind_table(struct module *me,
6165
6166 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
6167 end = table + sechdrs[me->arch.unwind_section].sh_size;
6168- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
6169+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
6170
6171 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
6172 me->arch.unwind_section, table, end, gp);
6173diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
6174index f76c108..92bad82 100644
6175--- a/arch/parisc/kernel/sys_parisc.c
6176+++ b/arch/parisc/kernel/sys_parisc.c
6177@@ -33,9 +33,11 @@
6178 #include <linux/utsname.h>
6179 #include <linux/personality.h>
6180
6181-static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
6182+static unsigned long get_unshared_area(struct file *filp, unsigned long addr, unsigned long len,
6183+ unsigned long flags)
6184 {
6185 struct vm_area_struct *vma;
6186+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
6187
6188 addr = PAGE_ALIGN(addr);
6189
6190@@ -43,7 +45,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
6191 /* At this point: (!vma || addr < vma->vm_end). */
6192 if (TASK_SIZE - len < addr)
6193 return -ENOMEM;
6194- if (!vma || addr + len <= vma->vm_start)
6195+ if (check_heap_stack_gap(vma, addr, len, offset))
6196 return addr;
6197 addr = vma->vm_end;
6198 }
6199@@ -67,11 +69,12 @@ static int get_offset(struct address_space *mapping)
6200 return offset & 0x3FF000;
6201 }
6202
6203-static unsigned long get_shared_area(struct address_space *mapping,
6204- unsigned long addr, unsigned long len, unsigned long pgoff)
6205+static unsigned long get_shared_area(struct file *filp, struct address_space *mapping,
6206+ unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
6207 {
6208 struct vm_area_struct *vma;
6209 int offset = mapping ? get_offset(mapping) : 0;
6210+ unsigned long rand_offset = gr_rand_threadstack_offset(current->mm, filp, flags);
6211
6212 offset = (offset + (pgoff << PAGE_SHIFT)) & 0x3FF000;
6213
6214@@ -81,7 +84,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
6215 /* At this point: (!vma || addr < vma->vm_end). */
6216 if (TASK_SIZE - len < addr)
6217 return -ENOMEM;
6218- if (!vma || addr + len <= vma->vm_start)
6219+ if (check_heap_stack_gap(vma, addr, len, rand_offset))
6220 return addr;
6221 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
6222 if (addr < vma->vm_end) /* handle wraparound */
6223@@ -100,14 +103,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
6224 if (flags & MAP_FIXED)
6225 return addr;
6226 if (!addr)
6227- addr = TASK_UNMAPPED_BASE;
6228+ addr = current->mm->mmap_base;
6229
6230 if (filp) {
6231- addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
6232+ addr = get_shared_area(filp, filp->f_mapping, addr, len, pgoff, flags);
6233 } else if(flags & MAP_SHARED) {
6234- addr = get_shared_area(NULL, addr, len, pgoff);
6235+ addr = get_shared_area(filp, NULL, addr, len, pgoff, flags);
6236 } else {
6237- addr = get_unshared_area(addr, len);
6238+ addr = get_unshared_area(filp, addr, len, flags);
6239 }
6240 return addr;
6241 }
6242diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
6243index 45ba99f..8e22c33 100644
6244--- a/arch/parisc/kernel/traps.c
6245+++ b/arch/parisc/kernel/traps.c
6246@@ -732,9 +732,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
6247
6248 down_read(&current->mm->mmap_sem);
6249 vma = find_vma(current->mm,regs->iaoq[0]);
6250- if (vma && (regs->iaoq[0] >= vma->vm_start)
6251- && (vma->vm_flags & VM_EXEC)) {
6252-
6253+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
6254 fault_address = regs->iaoq[0];
6255 fault_space = regs->iasq[0];
6256
6257diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
6258index 18162ce..94de376 100644
6259--- a/arch/parisc/mm/fault.c
6260+++ b/arch/parisc/mm/fault.c
6261@@ -15,6 +15,7 @@
6262 #include <linux/sched.h>
6263 #include <linux/interrupt.h>
6264 #include <linux/module.h>
6265+#include <linux/unistd.h>
6266
6267 #include <asm/uaccess.h>
6268 #include <asm/traps.h>
6269@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
6270 static unsigned long
6271 parisc_acctyp(unsigned long code, unsigned int inst)
6272 {
6273- if (code == 6 || code == 16)
6274+ if (code == 6 || code == 7 || code == 16)
6275 return VM_EXEC;
6276
6277 switch (inst & 0xf0000000) {
6278@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
6279 }
6280 #endif
6281
6282+#ifdef CONFIG_PAX_PAGEEXEC
6283+/*
6284+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
6285+ *
6286+ * returns 1 when task should be killed
6287+ * 2 when rt_sigreturn trampoline was detected
6288+ * 3 when unpatched PLT trampoline was detected
6289+ */
6290+static int pax_handle_fetch_fault(struct pt_regs *regs)
6291+{
6292+
6293+#ifdef CONFIG_PAX_EMUPLT
6294+ int err;
6295+
6296+ do { /* PaX: unpatched PLT emulation */
6297+ unsigned int bl, depwi;
6298+
6299+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
6300+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
6301+
6302+ if (err)
6303+ break;
6304+
6305+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
6306+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
6307+
6308+ err = get_user(ldw, (unsigned int *)addr);
6309+ err |= get_user(bv, (unsigned int *)(addr+4));
6310+ err |= get_user(ldw2, (unsigned int *)(addr+8));
6311+
6312+ if (err)
6313+ break;
6314+
6315+ if (ldw == 0x0E801096U &&
6316+ bv == 0xEAC0C000U &&
6317+ ldw2 == 0x0E881095U)
6318+ {
6319+ unsigned int resolver, map;
6320+
6321+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
6322+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
6323+ if (err)
6324+ break;
6325+
6326+ regs->gr[20] = instruction_pointer(regs)+8;
6327+ regs->gr[21] = map;
6328+ regs->gr[22] = resolver;
6329+ regs->iaoq[0] = resolver | 3UL;
6330+ regs->iaoq[1] = regs->iaoq[0] + 4;
6331+ return 3;
6332+ }
6333+ }
6334+ } while (0);
6335+#endif
6336+
6337+#ifdef CONFIG_PAX_EMUTRAMP
6338+
6339+#ifndef CONFIG_PAX_EMUSIGRT
6340+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
6341+ return 1;
6342+#endif
6343+
6344+ do { /* PaX: rt_sigreturn emulation */
6345+ unsigned int ldi1, ldi2, bel, nop;
6346+
6347+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
6348+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
6349+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
6350+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
6351+
6352+ if (err)
6353+ break;
6354+
6355+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
6356+ ldi2 == 0x3414015AU &&
6357+ bel == 0xE4008200U &&
6358+ nop == 0x08000240U)
6359+ {
6360+ regs->gr[25] = (ldi1 & 2) >> 1;
6361+ regs->gr[20] = __NR_rt_sigreturn;
6362+ regs->gr[31] = regs->iaoq[1] + 16;
6363+ regs->sr[0] = regs->iasq[1];
6364+ regs->iaoq[0] = 0x100UL;
6365+ regs->iaoq[1] = regs->iaoq[0] + 4;
6366+ regs->iasq[0] = regs->sr[2];
6367+ regs->iasq[1] = regs->sr[2];
6368+ return 2;
6369+ }
6370+ } while (0);
6371+#endif
6372+
6373+ return 1;
6374+}
6375+
6376+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6377+{
6378+ unsigned long i;
6379+
6380+ printk(KERN_ERR "PAX: bytes at PC: ");
6381+ for (i = 0; i < 5; i++) {
6382+ unsigned int c;
6383+ if (get_user(c, (unsigned int *)pc+i))
6384+ printk(KERN_CONT "???????? ");
6385+ else
6386+ printk(KERN_CONT "%08x ", c);
6387+ }
6388+ printk("\n");
6389+}
6390+#endif
6391+
6392 int fixup_exception(struct pt_regs *regs)
6393 {
6394 const struct exception_table_entry *fix;
6395@@ -192,8 +303,33 @@ good_area:
6396
6397 acc_type = parisc_acctyp(code,regs->iir);
6398
6399- if ((vma->vm_flags & acc_type) != acc_type)
6400+ if ((vma->vm_flags & acc_type) != acc_type) {
6401+
6402+#ifdef CONFIG_PAX_PAGEEXEC
6403+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
6404+ (address & ~3UL) == instruction_pointer(regs))
6405+ {
6406+ up_read(&mm->mmap_sem);
6407+ switch (pax_handle_fetch_fault(regs)) {
6408+
6409+#ifdef CONFIG_PAX_EMUPLT
6410+ case 3:
6411+ return;
6412+#endif
6413+
6414+#ifdef CONFIG_PAX_EMUTRAMP
6415+ case 2:
6416+ return;
6417+#endif
6418+
6419+ }
6420+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
6421+ do_group_exit(SIGKILL);
6422+ }
6423+#endif
6424+
6425 goto bad_area;
6426+ }
6427
6428 /*
6429 * If for any reason at all we couldn't handle the fault, make
6430diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
6431index e3b1d41..8e81edf 100644
6432--- a/arch/powerpc/include/asm/atomic.h
6433+++ b/arch/powerpc/include/asm/atomic.h
6434@@ -523,6 +523,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
6435 return t1;
6436 }
6437
6438+#define atomic64_read_unchecked(v) atomic64_read(v)
6439+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
6440+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
6441+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
6442+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
6443+#define atomic64_inc_unchecked(v) atomic64_inc(v)
6444+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
6445+#define atomic64_dec_unchecked(v) atomic64_dec(v)
6446+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
6447+
6448 #endif /* __powerpc64__ */
6449
6450 #endif /* __KERNEL__ */
6451diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
6452index 9e495c9..b6878e5 100644
6453--- a/arch/powerpc/include/asm/cache.h
6454+++ b/arch/powerpc/include/asm/cache.h
6455@@ -3,6 +3,7 @@
6456
6457 #ifdef __KERNEL__
6458
6459+#include <linux/const.h>
6460
6461 /* bytes per L1 cache line */
6462 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
6463@@ -22,7 +23,7 @@
6464 #define L1_CACHE_SHIFT 7
6465 #endif
6466
6467-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6468+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6469
6470 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6471
6472diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
6473index 6abf0a1..459d0f1 100644
6474--- a/arch/powerpc/include/asm/elf.h
6475+++ b/arch/powerpc/include/asm/elf.h
6476@@ -28,8 +28,19 @@
6477 the loader. We need to make sure that it is out of the way of the program
6478 that it will "exec", and that there is sufficient room for the brk. */
6479
6480-extern unsigned long randomize_et_dyn(unsigned long base);
6481-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
6482+#define ELF_ET_DYN_BASE (0x20000000)
6483+
6484+#ifdef CONFIG_PAX_ASLR
6485+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
6486+
6487+#ifdef __powerpc64__
6488+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
6489+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
6490+#else
6491+#define PAX_DELTA_MMAP_LEN 15
6492+#define PAX_DELTA_STACK_LEN 15
6493+#endif
6494+#endif
6495
6496 /*
6497 * Our registers are always unsigned longs, whether we're a 32 bit
6498@@ -124,10 +135,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6499 (0x7ff >> (PAGE_SHIFT - 12)) : \
6500 (0x3ffff >> (PAGE_SHIFT - 12)))
6501
6502-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
6503-#define arch_randomize_brk arch_randomize_brk
6504-
6505-
6506 #ifdef CONFIG_SPU_BASE
6507 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
6508 #define NT_SPU 1
6509diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
6510index 8196e9c..d83a9f3 100644
6511--- a/arch/powerpc/include/asm/exec.h
6512+++ b/arch/powerpc/include/asm/exec.h
6513@@ -4,6 +4,6 @@
6514 #ifndef _ASM_POWERPC_EXEC_H
6515 #define _ASM_POWERPC_EXEC_H
6516
6517-extern unsigned long arch_align_stack(unsigned long sp);
6518+#define arch_align_stack(x) ((x) & ~0xfUL)
6519
6520 #endif /* _ASM_POWERPC_EXEC_H */
6521diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
6522index 5acabbd..7ea14fa 100644
6523--- a/arch/powerpc/include/asm/kmap_types.h
6524+++ b/arch/powerpc/include/asm/kmap_types.h
6525@@ -10,7 +10,7 @@
6526 * 2 of the License, or (at your option) any later version.
6527 */
6528
6529-#define KM_TYPE_NR 16
6530+#define KM_TYPE_NR 17
6531
6532 #endif /* __KERNEL__ */
6533 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
6534diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
6535index 8565c25..2865190 100644
6536--- a/arch/powerpc/include/asm/mman.h
6537+++ b/arch/powerpc/include/asm/mman.h
6538@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
6539 }
6540 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
6541
6542-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
6543+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
6544 {
6545 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
6546 }
6547diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
6548index f072e97..b436dee 100644
6549--- a/arch/powerpc/include/asm/page.h
6550+++ b/arch/powerpc/include/asm/page.h
6551@@ -220,8 +220,9 @@ extern long long virt_phys_offset;
6552 * and needs to be executable. This means the whole heap ends
6553 * up being executable.
6554 */
6555-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
6556- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6557+#define VM_DATA_DEFAULT_FLAGS32 \
6558+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
6559+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6560
6561 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
6562 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6563@@ -249,6 +250,9 @@ extern long long virt_phys_offset;
6564 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
6565 #endif
6566
6567+#define ktla_ktva(addr) (addr)
6568+#define ktva_ktla(addr) (addr)
6569+
6570 /*
6571 * Use the top bit of the higher-level page table entries to indicate whether
6572 * the entries we point to contain hugepages. This works because we know that
6573diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
6574index cd915d6..c10cee8 100644
6575--- a/arch/powerpc/include/asm/page_64.h
6576+++ b/arch/powerpc/include/asm/page_64.h
6577@@ -154,15 +154,18 @@ do { \
6578 * stack by default, so in the absence of a PT_GNU_STACK program header
6579 * we turn execute permission off.
6580 */
6581-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
6582- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6583+#define VM_STACK_DEFAULT_FLAGS32 \
6584+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
6585+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6586
6587 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
6588 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6589
6590+#ifndef CONFIG_PAX_PAGEEXEC
6591 #define VM_STACK_DEFAULT_FLAGS \
6592 (is_32bit_task() ? \
6593 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
6594+#endif
6595
6596 #include <asm-generic/getorder.h>
6597
6598diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
6599index 292725c..f87ae14 100644
6600--- a/arch/powerpc/include/asm/pgalloc-64.h
6601+++ b/arch/powerpc/include/asm/pgalloc-64.h
6602@@ -50,6 +50,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
6603 #ifndef CONFIG_PPC_64K_PAGES
6604
6605 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
6606+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
6607
6608 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
6609 {
6610@@ -67,6 +68,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6611 pud_set(pud, (unsigned long)pmd);
6612 }
6613
6614+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6615+{
6616+ pud_populate(mm, pud, pmd);
6617+}
6618+
6619 #define pmd_populate(mm, pmd, pte_page) \
6620 pmd_populate_kernel(mm, pmd, page_address(pte_page))
6621 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
6622@@ -76,6 +82,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6623 #else /* CONFIG_PPC_64K_PAGES */
6624
6625 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
6626+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
6627
6628 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
6629 pte_t *pte)
6630diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
6631index a9cbd3b..3b67efa 100644
6632--- a/arch/powerpc/include/asm/pgtable.h
6633+++ b/arch/powerpc/include/asm/pgtable.h
6634@@ -2,6 +2,7 @@
6635 #define _ASM_POWERPC_PGTABLE_H
6636 #ifdef __KERNEL__
6637
6638+#include <linux/const.h>
6639 #ifndef __ASSEMBLY__
6640 #include <asm/processor.h> /* For TASK_SIZE */
6641 #include <asm/mmu.h>
6642diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
6643index 4aad413..85d86bf 100644
6644--- a/arch/powerpc/include/asm/pte-hash32.h
6645+++ b/arch/powerpc/include/asm/pte-hash32.h
6646@@ -21,6 +21,7 @@
6647 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
6648 #define _PAGE_USER 0x004 /* usermode access allowed */
6649 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
6650+#define _PAGE_EXEC _PAGE_GUARDED
6651 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
6652 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
6653 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
6654diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
6655index 3d5c9dc..62f8414 100644
6656--- a/arch/powerpc/include/asm/reg.h
6657+++ b/arch/powerpc/include/asm/reg.h
6658@@ -215,6 +215,7 @@
6659 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
6660 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
6661 #define DSISR_NOHPTE 0x40000000 /* no translation found */
6662+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
6663 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
6664 #define DSISR_ISSTORE 0x02000000 /* access was a store */
6665 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
6666diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
6667index 406b7b9..af63426 100644
6668--- a/arch/powerpc/include/asm/thread_info.h
6669+++ b/arch/powerpc/include/asm/thread_info.h
6670@@ -97,7 +97,6 @@ static inline struct thread_info *current_thread_info(void)
6671 #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
6672 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
6673 #define TIF_SINGLESTEP 8 /* singlestepping active */
6674-#define TIF_MEMDIE 9 /* is terminating due to OOM killer */
6675 #define TIF_SECCOMP 10 /* secure computing */
6676 #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
6677 #define TIF_NOERROR 12 /* Force successful syscall return */
6678@@ -106,6 +105,9 @@ static inline struct thread_info *current_thread_info(void)
6679 #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
6680 #define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
6681 for stack store? */
6682+#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
6683+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
6684+#define TIF_GRSEC_SETXID 9 /* update credentials on syscall entry/exit */
6685
6686 /* as above, but as bit values */
6687 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
6688@@ -124,8 +126,10 @@ static inline struct thread_info *current_thread_info(void)
6689 #define _TIF_UPROBE (1<<TIF_UPROBE)
6690 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6691 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
6692+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6693 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
6694- _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
6695+ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
6696+ _TIF_GRSEC_SETXID)
6697
6698 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
6699 _TIF_NOTIFY_RESUME | _TIF_UPROBE)
6700diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
6701index 4db4959..aba5c41 100644
6702--- a/arch/powerpc/include/asm/uaccess.h
6703+++ b/arch/powerpc/include/asm/uaccess.h
6704@@ -318,52 +318,6 @@ do { \
6705 extern unsigned long __copy_tofrom_user(void __user *to,
6706 const void __user *from, unsigned long size);
6707
6708-#ifndef __powerpc64__
6709-
6710-static inline unsigned long copy_from_user(void *to,
6711- const void __user *from, unsigned long n)
6712-{
6713- unsigned long over;
6714-
6715- if (access_ok(VERIFY_READ, from, n))
6716- return __copy_tofrom_user((__force void __user *)to, from, n);
6717- if ((unsigned long)from < TASK_SIZE) {
6718- over = (unsigned long)from + n - TASK_SIZE;
6719- return __copy_tofrom_user((__force void __user *)to, from,
6720- n - over) + over;
6721- }
6722- return n;
6723-}
6724-
6725-static inline unsigned long copy_to_user(void __user *to,
6726- const void *from, unsigned long n)
6727-{
6728- unsigned long over;
6729-
6730- if (access_ok(VERIFY_WRITE, to, n))
6731- return __copy_tofrom_user(to, (__force void __user *)from, n);
6732- if ((unsigned long)to < TASK_SIZE) {
6733- over = (unsigned long)to + n - TASK_SIZE;
6734- return __copy_tofrom_user(to, (__force void __user *)from,
6735- n - over) + over;
6736- }
6737- return n;
6738-}
6739-
6740-#else /* __powerpc64__ */
6741-
6742-#define __copy_in_user(to, from, size) \
6743- __copy_tofrom_user((to), (from), (size))
6744-
6745-extern unsigned long copy_from_user(void *to, const void __user *from,
6746- unsigned long n);
6747-extern unsigned long copy_to_user(void __user *to, const void *from,
6748- unsigned long n);
6749-extern unsigned long copy_in_user(void __user *to, const void __user *from,
6750- unsigned long n);
6751-
6752-#endif /* __powerpc64__ */
6753-
6754 static inline unsigned long __copy_from_user_inatomic(void *to,
6755 const void __user *from, unsigned long n)
6756 {
6757@@ -387,6 +341,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
6758 if (ret == 0)
6759 return 0;
6760 }
6761+
6762+ if (!__builtin_constant_p(n))
6763+ check_object_size(to, n, false);
6764+
6765 return __copy_tofrom_user((__force void __user *)to, from, n);
6766 }
6767
6768@@ -413,6 +371,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
6769 if (ret == 0)
6770 return 0;
6771 }
6772+
6773+ if (!__builtin_constant_p(n))
6774+ check_object_size(from, n, true);
6775+
6776 return __copy_tofrom_user(to, (__force const void __user *)from, n);
6777 }
6778
6779@@ -430,6 +392,92 @@ static inline unsigned long __copy_to_user(void __user *to,
6780 return __copy_to_user_inatomic(to, from, size);
6781 }
6782
6783+#ifndef __powerpc64__
6784+
6785+static inline unsigned long __must_check copy_from_user(void *to,
6786+ const void __user *from, unsigned long n)
6787+{
6788+ unsigned long over;
6789+
6790+ if ((long)n < 0)
6791+ return n;
6792+
6793+ if (access_ok(VERIFY_READ, from, n)) {
6794+ if (!__builtin_constant_p(n))
6795+ check_object_size(to, n, false);
6796+ return __copy_tofrom_user((__force void __user *)to, from, n);
6797+ }
6798+ if ((unsigned long)from < TASK_SIZE) {
6799+ over = (unsigned long)from + n - TASK_SIZE;
6800+ if (!__builtin_constant_p(n - over))
6801+ check_object_size(to, n - over, false);
6802+ return __copy_tofrom_user((__force void __user *)to, from,
6803+ n - over) + over;
6804+ }
6805+ return n;
6806+}
6807+
6808+static inline unsigned long __must_check copy_to_user(void __user *to,
6809+ const void *from, unsigned long n)
6810+{
6811+ unsigned long over;
6812+
6813+ if ((long)n < 0)
6814+ return n;
6815+
6816+ if (access_ok(VERIFY_WRITE, to, n)) {
6817+ if (!__builtin_constant_p(n))
6818+ check_object_size(from, n, true);
6819+ return __copy_tofrom_user(to, (__force void __user *)from, n);
6820+ }
6821+ if ((unsigned long)to < TASK_SIZE) {
6822+ over = (unsigned long)to + n - TASK_SIZE;
6823+ if (!__builtin_constant_p(n))
6824+ check_object_size(from, n - over, true);
6825+ return __copy_tofrom_user(to, (__force void __user *)from,
6826+ n - over) + over;
6827+ }
6828+ return n;
6829+}
6830+
6831+#else /* __powerpc64__ */
6832+
6833+#define __copy_in_user(to, from, size) \
6834+ __copy_tofrom_user((to), (from), (size))
6835+
6836+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
6837+{
6838+ if ((long)n < 0 || n > INT_MAX)
6839+ return n;
6840+
6841+ if (!__builtin_constant_p(n))
6842+ check_object_size(to, n, false);
6843+
6844+ if (likely(access_ok(VERIFY_READ, from, n)))
6845+ n = __copy_from_user(to, from, n);
6846+ else
6847+ memset(to, 0, n);
6848+ return n;
6849+}
6850+
6851+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
6852+{
6853+ if ((long)n < 0 || n > INT_MAX)
6854+ return n;
6855+
6856+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
6857+ if (!__builtin_constant_p(n))
6858+ check_object_size(from, n, true);
6859+ n = __copy_to_user(to, from, n);
6860+ }
6861+ return n;
6862+}
6863+
6864+extern unsigned long copy_in_user(void __user *to, const void __user *from,
6865+ unsigned long n);
6866+
6867+#endif /* __powerpc64__ */
6868+
6869 extern unsigned long __clear_user(void __user *addr, unsigned long size);
6870
6871 static inline unsigned long clear_user(void __user *addr, unsigned long size)
6872diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
6873index 4684e33..acc4d19e 100644
6874--- a/arch/powerpc/kernel/exceptions-64e.S
6875+++ b/arch/powerpc/kernel/exceptions-64e.S
6876@@ -715,6 +715,7 @@ storage_fault_common:
6877 std r14,_DAR(r1)
6878 std r15,_DSISR(r1)
6879 addi r3,r1,STACK_FRAME_OVERHEAD
6880+ bl .save_nvgprs
6881 mr r4,r14
6882 mr r5,r15
6883 ld r14,PACA_EXGEN+EX_R14(r13)
6884@@ -723,8 +724,7 @@ storage_fault_common:
6885 cmpdi r3,0
6886 bne- 1f
6887 b .ret_from_except_lite
6888-1: bl .save_nvgprs
6889- mr r5,r3
6890+1: mr r5,r3
6891 addi r3,r1,STACK_FRAME_OVERHEAD
6892 ld r4,_DAR(r1)
6893 bl .bad_page_fault
6894diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
6895index 3684cbd..bc89eab 100644
6896--- a/arch/powerpc/kernel/exceptions-64s.S
6897+++ b/arch/powerpc/kernel/exceptions-64s.S
6898@@ -1206,10 +1206,10 @@ handle_page_fault:
6899 11: ld r4,_DAR(r1)
6900 ld r5,_DSISR(r1)
6901 addi r3,r1,STACK_FRAME_OVERHEAD
6902+ bl .save_nvgprs
6903 bl .do_page_fault
6904 cmpdi r3,0
6905 beq+ 12f
6906- bl .save_nvgprs
6907 mr r5,r3
6908 addi r3,r1,STACK_FRAME_OVERHEAD
6909 lwz r4,_DAR(r1)
6910diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
6911index 2e3200c..72095ce 100644
6912--- a/arch/powerpc/kernel/module_32.c
6913+++ b/arch/powerpc/kernel/module_32.c
6914@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
6915 me->arch.core_plt_section = i;
6916 }
6917 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
6918- printk("Module doesn't contain .plt or .init.plt sections.\n");
6919+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
6920 return -ENOEXEC;
6921 }
6922
6923@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
6924
6925 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
6926 /* Init, or core PLT? */
6927- if (location >= mod->module_core
6928- && location < mod->module_core + mod->core_size)
6929+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
6930+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
6931 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
6932- else
6933+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
6934+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
6935 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
6936+ else {
6937+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
6938+ return ~0UL;
6939+ }
6940
6941 /* Find this entry, or if that fails, the next avail. entry */
6942 while (entry->jump[0]) {
6943diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
6944index 8143067..21ae55b 100644
6945--- a/arch/powerpc/kernel/process.c
6946+++ b/arch/powerpc/kernel/process.c
6947@@ -680,8 +680,8 @@ void show_regs(struct pt_regs * regs)
6948 * Lookup NIP late so we have the best change of getting the
6949 * above info out without failing
6950 */
6951- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
6952- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
6953+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
6954+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
6955 #endif
6956 show_stack(current, (unsigned long *) regs->gpr[1]);
6957 if (!user_mode(regs))
6958@@ -1129,10 +1129,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
6959 newsp = stack[0];
6960 ip = stack[STACK_FRAME_LR_SAVE];
6961 if (!firstframe || ip != lr) {
6962- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
6963+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
6964 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6965 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
6966- printk(" (%pS)",
6967+ printk(" (%pA)",
6968 (void *)current->ret_stack[curr_frame].ret);
6969 curr_frame--;
6970 }
6971@@ -1152,7 +1152,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
6972 struct pt_regs *regs = (struct pt_regs *)
6973 (sp + STACK_FRAME_OVERHEAD);
6974 lr = regs->link;
6975- printk("--- Exception: %lx at %pS\n LR = %pS\n",
6976+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
6977 regs->trap, (void *)regs->nip, (void *)lr);
6978 firstframe = 1;
6979 }
6980@@ -1194,58 +1194,3 @@ void __ppc64_runlatch_off(void)
6981 mtspr(SPRN_CTRLT, ctrl);
6982 }
6983 #endif /* CONFIG_PPC64 */
6984-
6985-unsigned long arch_align_stack(unsigned long sp)
6986-{
6987- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6988- sp -= get_random_int() & ~PAGE_MASK;
6989- return sp & ~0xf;
6990-}
6991-
6992-static inline unsigned long brk_rnd(void)
6993-{
6994- unsigned long rnd = 0;
6995-
6996- /* 8MB for 32bit, 1GB for 64bit */
6997- if (is_32bit_task())
6998- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
6999- else
7000- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
7001-
7002- return rnd << PAGE_SHIFT;
7003-}
7004-
7005-unsigned long arch_randomize_brk(struct mm_struct *mm)
7006-{
7007- unsigned long base = mm->brk;
7008- unsigned long ret;
7009-
7010-#ifdef CONFIG_PPC_STD_MMU_64
7011- /*
7012- * If we are using 1TB segments and we are allowed to randomise
7013- * the heap, we can put it above 1TB so it is backed by a 1TB
7014- * segment. Otherwise the heap will be in the bottom 1TB
7015- * which always uses 256MB segments and this may result in a
7016- * performance penalty.
7017- */
7018- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
7019- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
7020-#endif
7021-
7022- ret = PAGE_ALIGN(base + brk_rnd());
7023-
7024- if (ret < mm->brk)
7025- return mm->brk;
7026-
7027- return ret;
7028-}
7029-
7030-unsigned long randomize_et_dyn(unsigned long base)
7031-{
7032- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
7033-
7034- if (ret < base)
7035- return base;
7036-
7037- return ret;
7038-}
7039diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
7040index c497000..8fde506 100644
7041--- a/arch/powerpc/kernel/ptrace.c
7042+++ b/arch/powerpc/kernel/ptrace.c
7043@@ -1737,6 +1737,10 @@ long arch_ptrace(struct task_struct *child, long request,
7044 return ret;
7045 }
7046
7047+#ifdef CONFIG_GRKERNSEC_SETXID
7048+extern void gr_delayed_cred_worker(void);
7049+#endif
7050+
7051 /*
7052 * We must return the syscall number to actually look up in the table.
7053 * This can be -1L to skip running any syscall at all.
7054@@ -1747,6 +1751,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
7055
7056 secure_computing_strict(regs->gpr[0]);
7057
7058+#ifdef CONFIG_GRKERNSEC_SETXID
7059+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
7060+ gr_delayed_cred_worker();
7061+#endif
7062+
7063 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
7064 tracehook_report_syscall_entry(regs))
7065 /*
7066@@ -1781,6 +1790,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
7067 {
7068 int step;
7069
7070+#ifdef CONFIG_GRKERNSEC_SETXID
7071+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
7072+ gr_delayed_cred_worker();
7073+#endif
7074+
7075 audit_syscall_exit(regs);
7076
7077 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
7078diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
7079index 804e323..79181c1 100644
7080--- a/arch/powerpc/kernel/signal_32.c
7081+++ b/arch/powerpc/kernel/signal_32.c
7082@@ -851,7 +851,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
7083 /* Save user registers on the stack */
7084 frame = &rt_sf->uc.uc_mcontext;
7085 addr = frame;
7086- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
7087+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
7088 if (save_user_regs(regs, frame, 0, 1))
7089 goto badframe;
7090 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
7091diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
7092index 1ca045d..139c3f7 100644
7093--- a/arch/powerpc/kernel/signal_64.c
7094+++ b/arch/powerpc/kernel/signal_64.c
7095@@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
7096 current->thread.fpscr.val = 0;
7097
7098 /* Set up to return from userspace. */
7099- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
7100+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
7101 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
7102 } else {
7103 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
7104diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
7105index 3ce1f86..c30e629 100644
7106--- a/arch/powerpc/kernel/sysfs.c
7107+++ b/arch/powerpc/kernel/sysfs.c
7108@@ -522,7 +522,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
7109 return NOTIFY_OK;
7110 }
7111
7112-static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
7113+static struct notifier_block sysfs_cpu_nb = {
7114 .notifier_call = sysfs_cpu_notify,
7115 };
7116
7117diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
7118index 3251840..3f7c77a 100644
7119--- a/arch/powerpc/kernel/traps.c
7120+++ b/arch/powerpc/kernel/traps.c
7121@@ -133,6 +133,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
7122 return flags;
7123 }
7124
7125+extern void gr_handle_kernel_exploit(void);
7126+
7127 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
7128 int signr)
7129 {
7130@@ -182,6 +184,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
7131 panic("Fatal exception in interrupt");
7132 if (panic_on_oops)
7133 panic("Fatal exception");
7134+
7135+ gr_handle_kernel_exploit();
7136+
7137 do_exit(signr);
7138 }
7139
7140diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
7141index 1b2076f..835e4be 100644
7142--- a/arch/powerpc/kernel/vdso.c
7143+++ b/arch/powerpc/kernel/vdso.c
7144@@ -34,6 +34,7 @@
7145 #include <asm/firmware.h>
7146 #include <asm/vdso.h>
7147 #include <asm/vdso_datapage.h>
7148+#include <asm/mman.h>
7149
7150 #include "setup.h"
7151
7152@@ -218,7 +219,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
7153 vdso_base = VDSO32_MBASE;
7154 #endif
7155
7156- current->mm->context.vdso_base = 0;
7157+ current->mm->context.vdso_base = ~0UL;
7158
7159 /* vDSO has a problem and was disabled, just don't "enable" it for the
7160 * process
7161@@ -238,7 +239,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
7162 vdso_base = get_unmapped_area(NULL, vdso_base,
7163 (vdso_pages << PAGE_SHIFT) +
7164 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
7165- 0, 0);
7166+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
7167 if (IS_ERR_VALUE(vdso_base)) {
7168 rc = vdso_base;
7169 goto fail_mmapsem;
7170diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
7171index 5eea6f3..5d10396 100644
7172--- a/arch/powerpc/lib/usercopy_64.c
7173+++ b/arch/powerpc/lib/usercopy_64.c
7174@@ -9,22 +9,6 @@
7175 #include <linux/module.h>
7176 #include <asm/uaccess.h>
7177
7178-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
7179-{
7180- if (likely(access_ok(VERIFY_READ, from, n)))
7181- n = __copy_from_user(to, from, n);
7182- else
7183- memset(to, 0, n);
7184- return n;
7185-}
7186-
7187-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
7188-{
7189- if (likely(access_ok(VERIFY_WRITE, to, n)))
7190- n = __copy_to_user(to, from, n);
7191- return n;
7192-}
7193-
7194 unsigned long copy_in_user(void __user *to, const void __user *from,
7195 unsigned long n)
7196 {
7197@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
7198 return n;
7199 }
7200
7201-EXPORT_SYMBOL(copy_from_user);
7202-EXPORT_SYMBOL(copy_to_user);
7203 EXPORT_SYMBOL(copy_in_user);
7204
7205diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
7206index 3a8489a..6a63b3b 100644
7207--- a/arch/powerpc/mm/fault.c
7208+++ b/arch/powerpc/mm/fault.c
7209@@ -32,6 +32,10 @@
7210 #include <linux/perf_event.h>
7211 #include <linux/magic.h>
7212 #include <linux/ratelimit.h>
7213+#include <linux/slab.h>
7214+#include <linux/pagemap.h>
7215+#include <linux/compiler.h>
7216+#include <linux/unistd.h>
7217
7218 #include <asm/firmware.h>
7219 #include <asm/page.h>
7220@@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
7221 }
7222 #endif
7223
7224+#ifdef CONFIG_PAX_PAGEEXEC
7225+/*
7226+ * PaX: decide what to do with offenders (regs->nip = fault address)
7227+ *
7228+ * returns 1 when task should be killed
7229+ */
7230+static int pax_handle_fetch_fault(struct pt_regs *regs)
7231+{
7232+ return 1;
7233+}
7234+
7235+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7236+{
7237+ unsigned long i;
7238+
7239+ printk(KERN_ERR "PAX: bytes at PC: ");
7240+ for (i = 0; i < 5; i++) {
7241+ unsigned int c;
7242+ if (get_user(c, (unsigned int __user *)pc+i))
7243+ printk(KERN_CONT "???????? ");
7244+ else
7245+ printk(KERN_CONT "%08x ", c);
7246+ }
7247+ printk("\n");
7248+}
7249+#endif
7250+
7251 /*
7252 * Check whether the instruction at regs->nip is a store using
7253 * an update addressing form which will update r1.
7254@@ -213,7 +244,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
7255 * indicate errors in DSISR but can validly be set in SRR1.
7256 */
7257 if (trap == 0x400)
7258- error_code &= 0x48200000;
7259+ error_code &= 0x58200000;
7260 else
7261 is_write = error_code & DSISR_ISSTORE;
7262 #else
7263@@ -364,7 +395,7 @@ good_area:
7264 * "undefined". Of those that can be set, this is the only
7265 * one which seems bad.
7266 */
7267- if (error_code & 0x10000000)
7268+ if (error_code & DSISR_GUARDED)
7269 /* Guarded storage error. */
7270 goto bad_area;
7271 #endif /* CONFIG_8xx */
7272@@ -379,7 +410,7 @@ good_area:
7273 * processors use the same I/D cache coherency mechanism
7274 * as embedded.
7275 */
7276- if (error_code & DSISR_PROTFAULT)
7277+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
7278 goto bad_area;
7279 #endif /* CONFIG_PPC_STD_MMU */
7280
7281@@ -462,6 +493,23 @@ bad_area:
7282 bad_area_nosemaphore:
7283 /* User mode accesses cause a SIGSEGV */
7284 if (user_mode(regs)) {
7285+
7286+#ifdef CONFIG_PAX_PAGEEXEC
7287+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
7288+#ifdef CONFIG_PPC_STD_MMU
7289+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
7290+#else
7291+ if (is_exec && regs->nip == address) {
7292+#endif
7293+ switch (pax_handle_fetch_fault(regs)) {
7294+ }
7295+
7296+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
7297+ do_group_exit(SIGKILL);
7298+ }
7299+ }
7300+#endif
7301+
7302 _exception(SIGSEGV, regs, code, address);
7303 return 0;
7304 }
7305diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
7306index 67a42ed..cd463e0 100644
7307--- a/arch/powerpc/mm/mmap_64.c
7308+++ b/arch/powerpc/mm/mmap_64.c
7309@@ -57,6 +57,10 @@ static unsigned long mmap_rnd(void)
7310 {
7311 unsigned long rnd = 0;
7312
7313+#ifdef CONFIG_PAX_RANDMMAP
7314+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7315+#endif
7316+
7317 if (current->flags & PF_RANDOMIZE) {
7318 /* 8MB for 32bit, 1GB for 64bit */
7319 if (is_32bit_task())
7320@@ -91,10 +95,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7321 */
7322 if (mmap_is_legacy()) {
7323 mm->mmap_base = TASK_UNMAPPED_BASE;
7324+
7325+#ifdef CONFIG_PAX_RANDMMAP
7326+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7327+ mm->mmap_base += mm->delta_mmap;
7328+#endif
7329+
7330 mm->get_unmapped_area = arch_get_unmapped_area;
7331 mm->unmap_area = arch_unmap_area;
7332 } else {
7333 mm->mmap_base = mmap_base();
7334+
7335+#ifdef CONFIG_PAX_RANDMMAP
7336+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7337+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7338+#endif
7339+
7340 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7341 mm->unmap_area = arch_unmap_area_topdown;
7342 }
7343diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
7344index e779642..e5bb889 100644
7345--- a/arch/powerpc/mm/mmu_context_nohash.c
7346+++ b/arch/powerpc/mm/mmu_context_nohash.c
7347@@ -363,7 +363,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
7348 return NOTIFY_OK;
7349 }
7350
7351-static struct notifier_block __cpuinitdata mmu_context_cpu_nb = {
7352+static struct notifier_block mmu_context_cpu_nb = {
7353 .notifier_call = mmu_context_cpu_notify,
7354 };
7355
7356diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
7357index bba87ca..c346a33 100644
7358--- a/arch/powerpc/mm/numa.c
7359+++ b/arch/powerpc/mm/numa.c
7360@@ -932,7 +932,7 @@ static void __init *careful_zallocation(int nid, unsigned long size,
7361 return ret;
7362 }
7363
7364-static struct notifier_block __cpuinitdata ppc64_numa_nb = {
7365+static struct notifier_block ppc64_numa_nb = {
7366 .notifier_call = cpu_numa_callback,
7367 .priority = 1 /* Must run before sched domains notifier. */
7368 };
7369diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
7370index cf9dada..241529f 100644
7371--- a/arch/powerpc/mm/slice.c
7372+++ b/arch/powerpc/mm/slice.c
7373@@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
7374 if ((mm->task_size - len) < addr)
7375 return 0;
7376 vma = find_vma(mm, addr);
7377- return (!vma || (addr + len) <= vma->vm_start);
7378+ return check_heap_stack_gap(vma, addr, len, 0);
7379 }
7380
7381 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
7382@@ -272,7 +272,7 @@ full_search:
7383 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
7384 continue;
7385 }
7386- if (!vma || addr + len <= vma->vm_start) {
7387+ if (check_heap_stack_gap(vma, addr, len, 0)) {
7388 /*
7389 * Remember the place where we stopped the search:
7390 */
7391@@ -329,10 +329,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
7392 }
7393 }
7394
7395- addr = mm->mmap_base;
7396- while (addr > len) {
7397+ if (mm->mmap_base < len)
7398+ addr = -ENOMEM;
7399+ else
7400+ addr = mm->mmap_base - len;
7401+
7402+ while (!IS_ERR_VALUE(addr)) {
7403 /* Go down by chunk size */
7404- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
7405+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
7406
7407 /* Check for hit with different page size */
7408 mask = slice_range_to_mask(addr, len);
7409@@ -352,7 +356,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
7410 * return with success:
7411 */
7412 vma = find_vma(mm, addr);
7413- if (!vma || (addr + len) <= vma->vm_start) {
7414+ if (check_heap_stack_gap(vma, addr, len, 0)) {
7415 /* remember the address as a hint for next time */
7416 if (use_cache)
7417 mm->free_area_cache = addr;
7418@@ -364,7 +368,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
7419 mm->cached_hole_size = vma->vm_start - addr;
7420
7421 /* try just below the current vma->vm_start */
7422- addr = vma->vm_start;
7423+ addr = skip_heap_stack_gap(vma, len, 0);
7424 }
7425
7426 /*
7427@@ -442,6 +446,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
7428 if (fixed && addr > (mm->task_size - len))
7429 return -EINVAL;
7430
7431+#ifdef CONFIG_PAX_RANDMMAP
7432+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
7433+ addr = 0;
7434+#endif
7435+
7436 /* If hint, make sure it matches our alignment restrictions */
7437 if (!fixed && addr) {
7438 addr = _ALIGN_UP(addr, 1ul << pshift);
7439diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
7440index 0cfece4..2f1a0e5 100644
7441--- a/arch/powerpc/platforms/cell/spufs/file.c
7442+++ b/arch/powerpc/platforms/cell/spufs/file.c
7443@@ -281,9 +281,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
7444 return VM_FAULT_NOPAGE;
7445 }
7446
7447-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
7448+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
7449 unsigned long address,
7450- void *buf, int len, int write)
7451+ void *buf, size_t len, int write)
7452 {
7453 struct spu_context *ctx = vma->vm_file->private_data;
7454 unsigned long offset = address - vma->vm_start;
7455diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
7456index bdb738a..49c9f95 100644
7457--- a/arch/powerpc/platforms/powermac/smp.c
7458+++ b/arch/powerpc/platforms/powermac/smp.c
7459@@ -885,7 +885,7 @@ static int smp_core99_cpu_notify(struct notifier_block *self,
7460 return NOTIFY_OK;
7461 }
7462
7463-static struct notifier_block __cpuinitdata smp_core99_cpu_nb = {
7464+static struct notifier_block smp_core99_cpu_nb = {
7465 .notifier_call = smp_core99_cpu_notify,
7466 };
7467 #endif /* CONFIG_HOTPLUG_CPU */
7468diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
7469index c797832..ce575c8 100644
7470--- a/arch/s390/include/asm/atomic.h
7471+++ b/arch/s390/include/asm/atomic.h
7472@@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
7473 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
7474 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
7475
7476+#define atomic64_read_unchecked(v) atomic64_read(v)
7477+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7478+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7479+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7480+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7481+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7482+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7483+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7484+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7485+
7486 #define smp_mb__before_atomic_dec() smp_mb()
7487 #define smp_mb__after_atomic_dec() smp_mb()
7488 #define smp_mb__before_atomic_inc() smp_mb()
7489diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
7490index 4d7ccac..d03d0ad 100644
7491--- a/arch/s390/include/asm/cache.h
7492+++ b/arch/s390/include/asm/cache.h
7493@@ -9,8 +9,10 @@
7494 #ifndef __ARCH_S390_CACHE_H
7495 #define __ARCH_S390_CACHE_H
7496
7497-#define L1_CACHE_BYTES 256
7498+#include <linux/const.h>
7499+
7500 #define L1_CACHE_SHIFT 8
7501+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7502 #define NET_SKB_PAD 32
7503
7504 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
7505diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
7506index 178ff96..8c93bd1 100644
7507--- a/arch/s390/include/asm/elf.h
7508+++ b/arch/s390/include/asm/elf.h
7509@@ -160,8 +160,14 @@ extern unsigned int vdso_enabled;
7510 the loader. We need to make sure that it is out of the way of the program
7511 that it will "exec", and that there is sufficient room for the brk. */
7512
7513-extern unsigned long randomize_et_dyn(unsigned long base);
7514-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
7515+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
7516+
7517+#ifdef CONFIG_PAX_ASLR
7518+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
7519+
7520+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
7521+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
7522+#endif
7523
7524 /* This yields a mask that user programs can use to figure out what
7525 instruction set this CPU supports. */
7526@@ -210,9 +216,6 @@ struct linux_binprm;
7527 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
7528 int arch_setup_additional_pages(struct linux_binprm *, int);
7529
7530-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7531-#define arch_randomize_brk arch_randomize_brk
7532-
7533 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
7534
7535 #endif
7536diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
7537index c4a93d6..4d2a9b4 100644
7538--- a/arch/s390/include/asm/exec.h
7539+++ b/arch/s390/include/asm/exec.h
7540@@ -7,6 +7,6 @@
7541 #ifndef __ASM_EXEC_H
7542 #define __ASM_EXEC_H
7543
7544-extern unsigned long arch_align_stack(unsigned long sp);
7545+#define arch_align_stack(x) ((x) & ~0xfUL)
7546
7547 #endif /* __ASM_EXEC_H */
7548diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
7549index 34268df..ea97318 100644
7550--- a/arch/s390/include/asm/uaccess.h
7551+++ b/arch/s390/include/asm/uaccess.h
7552@@ -252,6 +252,10 @@ static inline unsigned long __must_check
7553 copy_to_user(void __user *to, const void *from, unsigned long n)
7554 {
7555 might_fault();
7556+
7557+ if ((long)n < 0)
7558+ return n;
7559+
7560 if (access_ok(VERIFY_WRITE, to, n))
7561 n = __copy_to_user(to, from, n);
7562 return n;
7563@@ -277,6 +281,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
7564 static inline unsigned long __must_check
7565 __copy_from_user(void *to, const void __user *from, unsigned long n)
7566 {
7567+ if ((long)n < 0)
7568+ return n;
7569+
7570 if (__builtin_constant_p(n) && (n <= 256))
7571 return uaccess.copy_from_user_small(n, from, to);
7572 else
7573@@ -308,10 +315,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
7574 static inline unsigned long __must_check
7575 copy_from_user(void *to, const void __user *from, unsigned long n)
7576 {
7577- unsigned int sz = __compiletime_object_size(to);
7578+ size_t sz = __compiletime_object_size(to);
7579
7580 might_fault();
7581- if (unlikely(sz != -1 && sz < n)) {
7582+
7583+ if ((long)n < 0)
7584+ return n;
7585+
7586+ if (unlikely(sz != (size_t)-1 && sz < n)) {
7587 copy_from_user_overflow();
7588 return n;
7589 }
7590diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
7591index 4610dea..cf0af21 100644
7592--- a/arch/s390/kernel/module.c
7593+++ b/arch/s390/kernel/module.c
7594@@ -171,11 +171,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
7595
7596 /* Increase core size by size of got & plt and set start
7597 offsets for got and plt. */
7598- me->core_size = ALIGN(me->core_size, 4);
7599- me->arch.got_offset = me->core_size;
7600- me->core_size += me->arch.got_size;
7601- me->arch.plt_offset = me->core_size;
7602- me->core_size += me->arch.plt_size;
7603+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
7604+ me->arch.got_offset = me->core_size_rw;
7605+ me->core_size_rw += me->arch.got_size;
7606+ me->arch.plt_offset = me->core_size_rx;
7607+ me->core_size_rx += me->arch.plt_size;
7608 return 0;
7609 }
7610
7611@@ -252,7 +252,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7612 if (info->got_initialized == 0) {
7613 Elf_Addr *gotent;
7614
7615- gotent = me->module_core + me->arch.got_offset +
7616+ gotent = me->module_core_rw + me->arch.got_offset +
7617 info->got_offset;
7618 *gotent = val;
7619 info->got_initialized = 1;
7620@@ -276,7 +276,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7621 else if (r_type == R_390_GOTENT ||
7622 r_type == R_390_GOTPLTENT)
7623 *(unsigned int *) loc =
7624- (val + (Elf_Addr) me->module_core - loc) >> 1;
7625+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
7626 else if (r_type == R_390_GOT64 ||
7627 r_type == R_390_GOTPLT64)
7628 *(unsigned long *) loc = val;
7629@@ -290,7 +290,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7630 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
7631 if (info->plt_initialized == 0) {
7632 unsigned int *ip;
7633- ip = me->module_core + me->arch.plt_offset +
7634+ ip = me->module_core_rx + me->arch.plt_offset +
7635 info->plt_offset;
7636 #ifndef CONFIG_64BIT
7637 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
7638@@ -315,7 +315,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7639 val - loc + 0xffffUL < 0x1ffffeUL) ||
7640 (r_type == R_390_PLT32DBL &&
7641 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
7642- val = (Elf_Addr) me->module_core +
7643+ val = (Elf_Addr) me->module_core_rx +
7644 me->arch.plt_offset +
7645 info->plt_offset;
7646 val += rela->r_addend - loc;
7647@@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7648 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
7649 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
7650 val = val + rela->r_addend -
7651- ((Elf_Addr) me->module_core + me->arch.got_offset);
7652+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
7653 if (r_type == R_390_GOTOFF16)
7654 *(unsigned short *) loc = val;
7655 else if (r_type == R_390_GOTOFF32)
7656@@ -347,7 +347,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7657 break;
7658 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
7659 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
7660- val = (Elf_Addr) me->module_core + me->arch.got_offset +
7661+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
7662 rela->r_addend - loc;
7663 if (r_type == R_390_GOTPC)
7664 *(unsigned int *) loc = val;
7665diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
7666index 536d645..4a5bd9e 100644
7667--- a/arch/s390/kernel/process.c
7668+++ b/arch/s390/kernel/process.c
7669@@ -250,39 +250,3 @@ unsigned long get_wchan(struct task_struct *p)
7670 }
7671 return 0;
7672 }
7673-
7674-unsigned long arch_align_stack(unsigned long sp)
7675-{
7676- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7677- sp -= get_random_int() & ~PAGE_MASK;
7678- return sp & ~0xf;
7679-}
7680-
7681-static inline unsigned long brk_rnd(void)
7682-{
7683- /* 8MB for 32bit, 1GB for 64bit */
7684- if (is_32bit_task())
7685- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
7686- else
7687- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
7688-}
7689-
7690-unsigned long arch_randomize_brk(struct mm_struct *mm)
7691-{
7692- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
7693-
7694- if (ret < mm->brk)
7695- return mm->brk;
7696- return ret;
7697-}
7698-
7699-unsigned long randomize_et_dyn(unsigned long base)
7700-{
7701- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
7702-
7703- if (!(current->flags & PF_RANDOMIZE))
7704- return base;
7705- if (ret < base)
7706- return base;
7707- return ret;
7708-}
7709diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
7710index c59a5ef..3fae59c 100644
7711--- a/arch/s390/mm/mmap.c
7712+++ b/arch/s390/mm/mmap.c
7713@@ -90,10 +90,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7714 */
7715 if (mmap_is_legacy()) {
7716 mm->mmap_base = TASK_UNMAPPED_BASE;
7717+
7718+#ifdef CONFIG_PAX_RANDMMAP
7719+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7720+ mm->mmap_base += mm->delta_mmap;
7721+#endif
7722+
7723 mm->get_unmapped_area = arch_get_unmapped_area;
7724 mm->unmap_area = arch_unmap_area;
7725 } else {
7726 mm->mmap_base = mmap_base();
7727+
7728+#ifdef CONFIG_PAX_RANDMMAP
7729+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7730+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7731+#endif
7732+
7733 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7734 mm->unmap_area = arch_unmap_area_topdown;
7735 }
7736@@ -172,10 +184,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7737 */
7738 if (mmap_is_legacy()) {
7739 mm->mmap_base = TASK_UNMAPPED_BASE;
7740+
7741+#ifdef CONFIG_PAX_RANDMMAP
7742+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7743+ mm->mmap_base += mm->delta_mmap;
7744+#endif
7745+
7746 mm->get_unmapped_area = s390_get_unmapped_area;
7747 mm->unmap_area = arch_unmap_area;
7748 } else {
7749 mm->mmap_base = mmap_base();
7750+
7751+#ifdef CONFIG_PAX_RANDMMAP
7752+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7753+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7754+#endif
7755+
7756 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
7757 mm->unmap_area = arch_unmap_area_topdown;
7758 }
7759diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
7760index ae3d59f..f65f075 100644
7761--- a/arch/score/include/asm/cache.h
7762+++ b/arch/score/include/asm/cache.h
7763@@ -1,7 +1,9 @@
7764 #ifndef _ASM_SCORE_CACHE_H
7765 #define _ASM_SCORE_CACHE_H
7766
7767+#include <linux/const.h>
7768+
7769 #define L1_CACHE_SHIFT 4
7770-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7771+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7772
7773 #endif /* _ASM_SCORE_CACHE_H */
7774diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
7775index f9f3cd5..58ff438 100644
7776--- a/arch/score/include/asm/exec.h
7777+++ b/arch/score/include/asm/exec.h
7778@@ -1,6 +1,6 @@
7779 #ifndef _ASM_SCORE_EXEC_H
7780 #define _ASM_SCORE_EXEC_H
7781
7782-extern unsigned long arch_align_stack(unsigned long sp);
7783+#define arch_align_stack(x) (x)
7784
7785 #endif /* _ASM_SCORE_EXEC_H */
7786diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
7787index 7956846..5f37677 100644
7788--- a/arch/score/kernel/process.c
7789+++ b/arch/score/kernel/process.c
7790@@ -134,8 +134,3 @@ unsigned long get_wchan(struct task_struct *task)
7791
7792 return task_pt_regs(task)->cp0_epc;
7793 }
7794-
7795-unsigned long arch_align_stack(unsigned long sp)
7796-{
7797- return sp;
7798-}
7799diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
7800index ef9e555..331bd29 100644
7801--- a/arch/sh/include/asm/cache.h
7802+++ b/arch/sh/include/asm/cache.h
7803@@ -9,10 +9,11 @@
7804 #define __ASM_SH_CACHE_H
7805 #ifdef __KERNEL__
7806
7807+#include <linux/const.h>
7808 #include <linux/init.h>
7809 #include <cpu/cache.h>
7810
7811-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7812+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7813
7814 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
7815
7816diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
7817index 03f2b55..b027032 100644
7818--- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
7819+++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
7820@@ -143,7 +143,7 @@ shx3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
7821 return NOTIFY_OK;
7822 }
7823
7824-static struct notifier_block __cpuinitdata shx3_cpu_notifier = {
7825+static struct notifier_block shx3_cpu_notifier = {
7826 .notifier_call = shx3_cpu_callback,
7827 };
7828
7829diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
7830index 6777177..cb5e44f 100644
7831--- a/arch/sh/mm/mmap.c
7832+++ b/arch/sh/mm/mmap.c
7833@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7834 struct mm_struct *mm = current->mm;
7835 struct vm_area_struct *vma;
7836 int do_colour_align;
7837+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
7838 struct vm_unmapped_area_info info;
7839
7840 if (flags & MAP_FIXED) {
7841@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7842 if (filp || (flags & MAP_SHARED))
7843 do_colour_align = 1;
7844
7845+#ifdef CONFIG_PAX_RANDMMAP
7846+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7847+#endif
7848+
7849 if (addr) {
7850 if (do_colour_align)
7851 addr = COLOUR_ALIGN(addr, pgoff);
7852@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7853 addr = PAGE_ALIGN(addr);
7854
7855 vma = find_vma(mm, addr);
7856- if (TASK_SIZE - len >= addr &&
7857- (!vma || addr + len <= vma->vm_start))
7858+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
7859 return addr;
7860 }
7861
7862 info.flags = 0;
7863 info.length = len;
7864- info.low_limit = TASK_UNMAPPED_BASE;
7865+ info.low_limit = mm->mmap_base;
7866 info.high_limit = TASK_SIZE;
7867 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
7868 info.align_offset = pgoff << PAGE_SHIFT;
7869@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7870 struct mm_struct *mm = current->mm;
7871 unsigned long addr = addr0;
7872 int do_colour_align;
7873+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
7874 struct vm_unmapped_area_info info;
7875
7876 if (flags & MAP_FIXED) {
7877@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7878 if (filp || (flags & MAP_SHARED))
7879 do_colour_align = 1;
7880
7881+#ifdef CONFIG_PAX_RANDMMAP
7882+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7883+#endif
7884+
7885 /* requesting a specific address */
7886 if (addr) {
7887 if (do_colour_align)
7888@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7889 addr = PAGE_ALIGN(addr);
7890
7891 vma = find_vma(mm, addr);
7892- if (TASK_SIZE - len >= addr &&
7893- (!vma || addr + len <= vma->vm_start))
7894+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
7895 return addr;
7896 }
7897
7898@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7899 VM_BUG_ON(addr != -ENOMEM);
7900 info.flags = 0;
7901 info.low_limit = TASK_UNMAPPED_BASE;
7902+
7903+#ifdef CONFIG_PAX_RANDMMAP
7904+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7905+ info.low_limit += mm->delta_mmap;
7906+#endif
7907+
7908 info.high_limit = TASK_SIZE;
7909 addr = vm_unmapped_area(&info);
7910 }
7911diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
7912index be56a24..443328f 100644
7913--- a/arch/sparc/include/asm/atomic_64.h
7914+++ b/arch/sparc/include/asm/atomic_64.h
7915@@ -14,18 +14,40 @@
7916 #define ATOMIC64_INIT(i) { (i) }
7917
7918 #define atomic_read(v) (*(volatile int *)&(v)->counter)
7919+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7920+{
7921+ return v->counter;
7922+}
7923 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
7924+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7925+{
7926+ return v->counter;
7927+}
7928
7929 #define atomic_set(v, i) (((v)->counter) = i)
7930+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7931+{
7932+ v->counter = i;
7933+}
7934 #define atomic64_set(v, i) (((v)->counter) = i)
7935+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7936+{
7937+ v->counter = i;
7938+}
7939
7940 extern void atomic_add(int, atomic_t *);
7941+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
7942 extern void atomic64_add(long, atomic64_t *);
7943+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
7944 extern void atomic_sub(int, atomic_t *);
7945+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
7946 extern void atomic64_sub(long, atomic64_t *);
7947+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
7948
7949 extern int atomic_add_ret(int, atomic_t *);
7950+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
7951 extern long atomic64_add_ret(long, atomic64_t *);
7952+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
7953 extern int atomic_sub_ret(int, atomic_t *);
7954 extern long atomic64_sub_ret(long, atomic64_t *);
7955
7956@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
7957 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
7958
7959 #define atomic_inc_return(v) atomic_add_ret(1, v)
7960+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7961+{
7962+ return atomic_add_ret_unchecked(1, v);
7963+}
7964 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
7965+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7966+{
7967+ return atomic64_add_ret_unchecked(1, v);
7968+}
7969
7970 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
7971 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
7972
7973 #define atomic_add_return(i, v) atomic_add_ret(i, v)
7974+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7975+{
7976+ return atomic_add_ret_unchecked(i, v);
7977+}
7978 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
7979+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
7980+{
7981+ return atomic64_add_ret_unchecked(i, v);
7982+}
7983
7984 /*
7985 * atomic_inc_and_test - increment and test
7986@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
7987 * other cases.
7988 */
7989 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
7990+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7991+{
7992+ return atomic_inc_return_unchecked(v) == 0;
7993+}
7994 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
7995
7996 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
7997@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
7998 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
7999
8000 #define atomic_inc(v) atomic_add(1, v)
8001+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
8002+{
8003+ atomic_add_unchecked(1, v);
8004+}
8005 #define atomic64_inc(v) atomic64_add(1, v)
8006+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
8007+{
8008+ atomic64_add_unchecked(1, v);
8009+}
8010
8011 #define atomic_dec(v) atomic_sub(1, v)
8012+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
8013+{
8014+ atomic_sub_unchecked(1, v);
8015+}
8016 #define atomic64_dec(v) atomic64_sub(1, v)
8017+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
8018+{
8019+ atomic64_sub_unchecked(1, v);
8020+}
8021
8022 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
8023 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
8024
8025 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
8026+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8027+{
8028+ return cmpxchg(&v->counter, old, new);
8029+}
8030 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
8031+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8032+{
8033+ return xchg(&v->counter, new);
8034+}
8035
8036 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
8037 {
8038- int c, old;
8039+ int c, old, new;
8040 c = atomic_read(v);
8041 for (;;) {
8042- if (unlikely(c == (u)))
8043+ if (unlikely(c == u))
8044 break;
8045- old = atomic_cmpxchg((v), c, c + (a));
8046+
8047+ asm volatile("addcc %2, %0, %0\n"
8048+
8049+#ifdef CONFIG_PAX_REFCOUNT
8050+ "tvs %%icc, 6\n"
8051+#endif
8052+
8053+ : "=r" (new)
8054+ : "0" (c), "ir" (a)
8055+ : "cc");
8056+
8057+ old = atomic_cmpxchg(v, c, new);
8058 if (likely(old == c))
8059 break;
8060 c = old;
8061@@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
8062 #define atomic64_cmpxchg(v, o, n) \
8063 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
8064 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
8065+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
8066+{
8067+ return xchg(&v->counter, new);
8068+}
8069
8070 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
8071 {
8072- long c, old;
8073+ long c, old, new;
8074 c = atomic64_read(v);
8075 for (;;) {
8076- if (unlikely(c == (u)))
8077+ if (unlikely(c == u))
8078 break;
8079- old = atomic64_cmpxchg((v), c, c + (a));
8080+
8081+ asm volatile("addcc %2, %0, %0\n"
8082+
8083+#ifdef CONFIG_PAX_REFCOUNT
8084+ "tvs %%xcc, 6\n"
8085+#endif
8086+
8087+ : "=r" (new)
8088+ : "0" (c), "ir" (a)
8089+ : "cc");
8090+
8091+ old = atomic64_cmpxchg(v, c, new);
8092 if (likely(old == c))
8093 break;
8094 c = old;
8095 }
8096- return c != (u);
8097+ return c != u;
8098 }
8099
8100 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
8101diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
8102index 5bb6991..5c2132e 100644
8103--- a/arch/sparc/include/asm/cache.h
8104+++ b/arch/sparc/include/asm/cache.h
8105@@ -7,10 +7,12 @@
8106 #ifndef _SPARC_CACHE_H
8107 #define _SPARC_CACHE_H
8108
8109+#include <linux/const.h>
8110+
8111 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
8112
8113 #define L1_CACHE_SHIFT 5
8114-#define L1_CACHE_BYTES 32
8115+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8116
8117 #ifdef CONFIG_SPARC32
8118 #define SMP_CACHE_BYTES_SHIFT 5
8119diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
8120index ac74a2c..a9e58af 100644
8121--- a/arch/sparc/include/asm/elf_32.h
8122+++ b/arch/sparc/include/asm/elf_32.h
8123@@ -114,6 +114,13 @@ typedef struct {
8124
8125 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
8126
8127+#ifdef CONFIG_PAX_ASLR
8128+#define PAX_ELF_ET_DYN_BASE 0x10000UL
8129+
8130+#define PAX_DELTA_MMAP_LEN 16
8131+#define PAX_DELTA_STACK_LEN 16
8132+#endif
8133+
8134 /* This yields a mask that user programs can use to figure out what
8135 instruction set this cpu supports. This can NOT be done in userspace
8136 on Sparc. */
8137diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
8138index 370ca1e..d4f4a98 100644
8139--- a/arch/sparc/include/asm/elf_64.h
8140+++ b/arch/sparc/include/asm/elf_64.h
8141@@ -189,6 +189,13 @@ typedef struct {
8142 #define ELF_ET_DYN_BASE 0x0000010000000000UL
8143 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
8144
8145+#ifdef CONFIG_PAX_ASLR
8146+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
8147+
8148+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
8149+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
8150+#endif
8151+
8152 extern unsigned long sparc64_elf_hwcap;
8153 #define ELF_HWCAP sparc64_elf_hwcap
8154
8155diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
8156index 9b1c36d..209298b 100644
8157--- a/arch/sparc/include/asm/pgalloc_32.h
8158+++ b/arch/sparc/include/asm/pgalloc_32.h
8159@@ -33,6 +33,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
8160 }
8161
8162 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
8163+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
8164
8165 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
8166 unsigned long address)
8167diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
8168index bcfe063..b333142 100644
8169--- a/arch/sparc/include/asm/pgalloc_64.h
8170+++ b/arch/sparc/include/asm/pgalloc_64.h
8171@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
8172 }
8173
8174 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
8175+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
8176
8177 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
8178 {
8179diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
8180index 6fc1348..390c50a 100644
8181--- a/arch/sparc/include/asm/pgtable_32.h
8182+++ b/arch/sparc/include/asm/pgtable_32.h
8183@@ -50,6 +50,9 @@ extern unsigned long calc_highpages(void);
8184 #define PAGE_SHARED SRMMU_PAGE_SHARED
8185 #define PAGE_COPY SRMMU_PAGE_COPY
8186 #define PAGE_READONLY SRMMU_PAGE_RDONLY
8187+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
8188+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
8189+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
8190 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
8191
8192 /* Top-level page directory - dummy used by init-mm.
8193@@ -62,18 +65,18 @@ extern unsigned long ptr_in_current_pgd;
8194
8195 /* xwr */
8196 #define __P000 PAGE_NONE
8197-#define __P001 PAGE_READONLY
8198-#define __P010 PAGE_COPY
8199-#define __P011 PAGE_COPY
8200+#define __P001 PAGE_READONLY_NOEXEC
8201+#define __P010 PAGE_COPY_NOEXEC
8202+#define __P011 PAGE_COPY_NOEXEC
8203 #define __P100 PAGE_READONLY
8204 #define __P101 PAGE_READONLY
8205 #define __P110 PAGE_COPY
8206 #define __P111 PAGE_COPY
8207
8208 #define __S000 PAGE_NONE
8209-#define __S001 PAGE_READONLY
8210-#define __S010 PAGE_SHARED
8211-#define __S011 PAGE_SHARED
8212+#define __S001 PAGE_READONLY_NOEXEC
8213+#define __S010 PAGE_SHARED_NOEXEC
8214+#define __S011 PAGE_SHARED_NOEXEC
8215 #define __S100 PAGE_READONLY
8216 #define __S101 PAGE_READONLY
8217 #define __S110 PAGE_SHARED
8218diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
8219index 79da178..c2eede8 100644
8220--- a/arch/sparc/include/asm/pgtsrmmu.h
8221+++ b/arch/sparc/include/asm/pgtsrmmu.h
8222@@ -115,6 +115,11 @@
8223 SRMMU_EXEC | SRMMU_REF)
8224 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
8225 SRMMU_EXEC | SRMMU_REF)
8226+
8227+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
8228+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
8229+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
8230+
8231 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
8232 SRMMU_DIRTY | SRMMU_REF)
8233
8234diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
8235index 9689176..63c18ea 100644
8236--- a/arch/sparc/include/asm/spinlock_64.h
8237+++ b/arch/sparc/include/asm/spinlock_64.h
8238@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
8239
8240 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
8241
8242-static void inline arch_read_lock(arch_rwlock_t *lock)
8243+static inline void arch_read_lock(arch_rwlock_t *lock)
8244 {
8245 unsigned long tmp1, tmp2;
8246
8247 __asm__ __volatile__ (
8248 "1: ldsw [%2], %0\n"
8249 " brlz,pn %0, 2f\n"
8250-"4: add %0, 1, %1\n"
8251+"4: addcc %0, 1, %1\n"
8252+
8253+#ifdef CONFIG_PAX_REFCOUNT
8254+" tvs %%icc, 6\n"
8255+#endif
8256+
8257 " cas [%2], %0, %1\n"
8258 " cmp %0, %1\n"
8259 " bne,pn %%icc, 1b\n"
8260@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
8261 " .previous"
8262 : "=&r" (tmp1), "=&r" (tmp2)
8263 : "r" (lock)
8264- : "memory");
8265+ : "memory", "cc");
8266 }
8267
8268-static int inline arch_read_trylock(arch_rwlock_t *lock)
8269+static inline int arch_read_trylock(arch_rwlock_t *lock)
8270 {
8271 int tmp1, tmp2;
8272
8273@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
8274 "1: ldsw [%2], %0\n"
8275 " brlz,a,pn %0, 2f\n"
8276 " mov 0, %0\n"
8277-" add %0, 1, %1\n"
8278+" addcc %0, 1, %1\n"
8279+
8280+#ifdef CONFIG_PAX_REFCOUNT
8281+" tvs %%icc, 6\n"
8282+#endif
8283+
8284 " cas [%2], %0, %1\n"
8285 " cmp %0, %1\n"
8286 " bne,pn %%icc, 1b\n"
8287@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
8288 return tmp1;
8289 }
8290
8291-static void inline arch_read_unlock(arch_rwlock_t *lock)
8292+static inline void arch_read_unlock(arch_rwlock_t *lock)
8293 {
8294 unsigned long tmp1, tmp2;
8295
8296 __asm__ __volatile__(
8297 "1: lduw [%2], %0\n"
8298-" sub %0, 1, %1\n"
8299+" subcc %0, 1, %1\n"
8300+
8301+#ifdef CONFIG_PAX_REFCOUNT
8302+" tvs %%icc, 6\n"
8303+#endif
8304+
8305 " cas [%2], %0, %1\n"
8306 " cmp %0, %1\n"
8307 " bne,pn %%xcc, 1b\n"
8308@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
8309 : "memory");
8310 }
8311
8312-static void inline arch_write_lock(arch_rwlock_t *lock)
8313+static inline void arch_write_lock(arch_rwlock_t *lock)
8314 {
8315 unsigned long mask, tmp1, tmp2;
8316
8317@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
8318 : "memory");
8319 }
8320
8321-static void inline arch_write_unlock(arch_rwlock_t *lock)
8322+static inline void arch_write_unlock(arch_rwlock_t *lock)
8323 {
8324 __asm__ __volatile__(
8325 " stw %%g0, [%0]"
8326@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
8327 : "memory");
8328 }
8329
8330-static int inline arch_write_trylock(arch_rwlock_t *lock)
8331+static inline int arch_write_trylock(arch_rwlock_t *lock)
8332 {
8333 unsigned long mask, tmp1, tmp2, result;
8334
8335diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
8336index 25849ae..924c54b 100644
8337--- a/arch/sparc/include/asm/thread_info_32.h
8338+++ b/arch/sparc/include/asm/thread_info_32.h
8339@@ -49,6 +49,8 @@ struct thread_info {
8340 unsigned long w_saved;
8341
8342 struct restart_block restart_block;
8343+
8344+ unsigned long lowest_stack;
8345 };
8346
8347 /*
8348diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
8349index 269bd92..e46a9b8 100644
8350--- a/arch/sparc/include/asm/thread_info_64.h
8351+++ b/arch/sparc/include/asm/thread_info_64.h
8352@@ -63,6 +63,8 @@ struct thread_info {
8353 struct pt_regs *kern_una_regs;
8354 unsigned int kern_una_insn;
8355
8356+ unsigned long lowest_stack;
8357+
8358 unsigned long fpregs[0] __attribute__ ((aligned(64)));
8359 };
8360
8361@@ -192,10 +194,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
8362 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
8363 /* flag bit 6 is available */
8364 #define TIF_32BIT 7 /* 32-bit binary */
8365-/* flag bit 8 is available */
8366+#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */
8367 #define TIF_SECCOMP 9 /* secure computing */
8368 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
8369 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
8370+
8371 /* NOTE: Thread flags >= 12 should be ones we have no interest
8372 * in using in assembly, else we can't use the mask as
8373 * an immediate value in instructions such as andcc.
8374@@ -214,12 +217,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
8375 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
8376 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
8377 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
8378+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
8379
8380 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
8381 _TIF_DO_NOTIFY_RESUME_MASK | \
8382 _TIF_NEED_RESCHED)
8383 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
8384
8385+#define _TIF_WORK_SYSCALL \
8386+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
8387+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
8388+
8389+
8390 /*
8391 * Thread-synchronous status.
8392 *
8393diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
8394index 0167d26..767bb0c 100644
8395--- a/arch/sparc/include/asm/uaccess.h
8396+++ b/arch/sparc/include/asm/uaccess.h
8397@@ -1,5 +1,6 @@
8398 #ifndef ___ASM_SPARC_UACCESS_H
8399 #define ___ASM_SPARC_UACCESS_H
8400+
8401 #if defined(__sparc__) && defined(__arch64__)
8402 #include <asm/uaccess_64.h>
8403 #else
8404diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
8405index 53a28dd..50c38c3 100644
8406--- a/arch/sparc/include/asm/uaccess_32.h
8407+++ b/arch/sparc/include/asm/uaccess_32.h
8408@@ -250,27 +250,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
8409
8410 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
8411 {
8412- if (n && __access_ok((unsigned long) to, n))
8413+ if ((long)n < 0)
8414+ return n;
8415+
8416+ if (n && __access_ok((unsigned long) to, n)) {
8417+ if (!__builtin_constant_p(n))
8418+ check_object_size(from, n, true);
8419 return __copy_user(to, (__force void __user *) from, n);
8420- else
8421+ } else
8422 return n;
8423 }
8424
8425 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
8426 {
8427+ if ((long)n < 0)
8428+ return n;
8429+
8430+ if (!__builtin_constant_p(n))
8431+ check_object_size(from, n, true);
8432+
8433 return __copy_user(to, (__force void __user *) from, n);
8434 }
8435
8436 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
8437 {
8438- if (n && __access_ok((unsigned long) from, n))
8439+ if ((long)n < 0)
8440+ return n;
8441+
8442+ if (n && __access_ok((unsigned long) from, n)) {
8443+ if (!__builtin_constant_p(n))
8444+ check_object_size(to, n, false);
8445 return __copy_user((__force void __user *) to, from, n);
8446- else
8447+ } else
8448 return n;
8449 }
8450
8451 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
8452 {
8453+ if ((long)n < 0)
8454+ return n;
8455+
8456 return __copy_user((__force void __user *) to, from, n);
8457 }
8458
8459diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
8460index e562d3c..191f176 100644
8461--- a/arch/sparc/include/asm/uaccess_64.h
8462+++ b/arch/sparc/include/asm/uaccess_64.h
8463@@ -10,6 +10,7 @@
8464 #include <linux/compiler.h>
8465 #include <linux/string.h>
8466 #include <linux/thread_info.h>
8467+#include <linux/kernel.h>
8468 #include <asm/asi.h>
8469 #include <asm/spitfire.h>
8470 #include <asm-generic/uaccess-unaligned.h>
8471@@ -214,8 +215,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
8472 static inline unsigned long __must_check
8473 copy_from_user(void *to, const void __user *from, unsigned long size)
8474 {
8475- unsigned long ret = ___copy_from_user(to, from, size);
8476+ unsigned long ret;
8477
8478+ if ((long)size < 0 || size > INT_MAX)
8479+ return size;
8480+
8481+ if (!__builtin_constant_p(size))
8482+ check_object_size(to, size, false);
8483+
8484+ ret = ___copy_from_user(to, from, size);
8485 if (unlikely(ret))
8486 ret = copy_from_user_fixup(to, from, size);
8487
8488@@ -231,8 +239,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
8489 static inline unsigned long __must_check
8490 copy_to_user(void __user *to, const void *from, unsigned long size)
8491 {
8492- unsigned long ret = ___copy_to_user(to, from, size);
8493+ unsigned long ret;
8494
8495+ if ((long)size < 0 || size > INT_MAX)
8496+ return size;
8497+
8498+ if (!__builtin_constant_p(size))
8499+ check_object_size(from, size, true);
8500+
8501+ ret = ___copy_to_user(to, from, size);
8502 if (unlikely(ret))
8503 ret = copy_to_user_fixup(to, from, size);
8504 return ret;
8505diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
8506index 6cf591b..b49e65a 100644
8507--- a/arch/sparc/kernel/Makefile
8508+++ b/arch/sparc/kernel/Makefile
8509@@ -3,7 +3,7 @@
8510 #
8511
8512 asflags-y := -ansi
8513-ccflags-y := -Werror
8514+#ccflags-y := -Werror
8515
8516 extra-y := head_$(BITS).o
8517
8518diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
8519index be8e862..5b50b12 100644
8520--- a/arch/sparc/kernel/process_32.c
8521+++ b/arch/sparc/kernel/process_32.c
8522@@ -126,14 +126,14 @@ void show_regs(struct pt_regs *r)
8523
8524 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
8525 r->psr, r->pc, r->npc, r->y, print_tainted());
8526- printk("PC: <%pS>\n", (void *) r->pc);
8527+ printk("PC: <%pA>\n", (void *) r->pc);
8528 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
8529 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
8530 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
8531 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
8532 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
8533 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
8534- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
8535+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
8536
8537 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
8538 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
8539@@ -168,7 +168,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
8540 rw = (struct reg_window32 *) fp;
8541 pc = rw->ins[7];
8542 printk("[%08lx : ", pc);
8543- printk("%pS ] ", (void *) pc);
8544+ printk("%pA ] ", (void *) pc);
8545 fp = rw->ins[6];
8546 } while (++count < 16);
8547 printk("\n");
8548diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
8549index cdb80b2..5ca141d 100644
8550--- a/arch/sparc/kernel/process_64.c
8551+++ b/arch/sparc/kernel/process_64.c
8552@@ -181,14 +181,14 @@ static void show_regwindow(struct pt_regs *regs)
8553 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
8554 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
8555 if (regs->tstate & TSTATE_PRIV)
8556- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
8557+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
8558 }
8559
8560 void show_regs(struct pt_regs *regs)
8561 {
8562 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
8563 regs->tpc, regs->tnpc, regs->y, print_tainted());
8564- printk("TPC: <%pS>\n", (void *) regs->tpc);
8565+ printk("TPC: <%pA>\n", (void *) regs->tpc);
8566 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
8567 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
8568 regs->u_regs[3]);
8569@@ -201,7 +201,7 @@ void show_regs(struct pt_regs *regs)
8570 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
8571 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
8572 regs->u_regs[15]);
8573- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
8574+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
8575 show_regwindow(regs);
8576 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
8577 }
8578@@ -290,7 +290,7 @@ void arch_trigger_all_cpu_backtrace(void)
8579 ((tp && tp->task) ? tp->task->pid : -1));
8580
8581 if (gp->tstate & TSTATE_PRIV) {
8582- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
8583+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
8584 (void *) gp->tpc,
8585 (void *) gp->o7,
8586 (void *) gp->i7,
8587diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
8588index 1303021..c2a6321 100644
8589--- a/arch/sparc/kernel/prom_common.c
8590+++ b/arch/sparc/kernel/prom_common.c
8591@@ -143,7 +143,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
8592
8593 unsigned int prom_early_allocated __initdata;
8594
8595-static struct of_pdt_ops prom_sparc_ops __initdata = {
8596+static struct of_pdt_ops prom_sparc_ops __initconst = {
8597 .nextprop = prom_common_nextprop,
8598 .getproplen = prom_getproplen,
8599 .getproperty = prom_getproperty,
8600diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
8601index 7ff45e4..a58f271 100644
8602--- a/arch/sparc/kernel/ptrace_64.c
8603+++ b/arch/sparc/kernel/ptrace_64.c
8604@@ -1057,6 +1057,10 @@ long arch_ptrace(struct task_struct *child, long request,
8605 return ret;
8606 }
8607
8608+#ifdef CONFIG_GRKERNSEC_SETXID
8609+extern void gr_delayed_cred_worker(void);
8610+#endif
8611+
8612 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
8613 {
8614 int ret = 0;
8615@@ -1064,6 +1068,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
8616 /* do the secure computing check first */
8617 secure_computing_strict(regs->u_regs[UREG_G1]);
8618
8619+#ifdef CONFIG_GRKERNSEC_SETXID
8620+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8621+ gr_delayed_cred_worker();
8622+#endif
8623+
8624 if (test_thread_flag(TIF_SYSCALL_TRACE))
8625 ret = tracehook_report_syscall_entry(regs);
8626
8627@@ -1084,6 +1093,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
8628
8629 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
8630 {
8631+#ifdef CONFIG_GRKERNSEC_SETXID
8632+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8633+ gr_delayed_cred_worker();
8634+#endif
8635+
8636 audit_syscall_exit(regs);
8637
8638 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
8639diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
8640index 2da0bdc..79128d2 100644
8641--- a/arch/sparc/kernel/sys_sparc_32.c
8642+++ b/arch/sparc/kernel/sys_sparc_32.c
8643@@ -52,7 +52,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8644 if (len > TASK_SIZE - PAGE_SIZE)
8645 return -ENOMEM;
8646 if (!addr)
8647- addr = TASK_UNMAPPED_BASE;
8648+ addr = current->mm->mmap_base;
8649
8650 info.flags = 0;
8651 info.length = len;
8652diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
8653index 708bc29..f0129cb 100644
8654--- a/arch/sparc/kernel/sys_sparc_64.c
8655+++ b/arch/sparc/kernel/sys_sparc_64.c
8656@@ -90,13 +90,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8657 struct vm_area_struct * vma;
8658 unsigned long task_size = TASK_SIZE;
8659 int do_color_align;
8660+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8661 struct vm_unmapped_area_info info;
8662
8663 if (flags & MAP_FIXED) {
8664 /* We do not accept a shared mapping if it would violate
8665 * cache aliasing constraints.
8666 */
8667- if ((flags & MAP_SHARED) &&
8668+ if ((filp || (flags & MAP_SHARED)) &&
8669 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
8670 return -EINVAL;
8671 return addr;
8672@@ -111,6 +112,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8673 if (filp || (flags & MAP_SHARED))
8674 do_color_align = 1;
8675
8676+#ifdef CONFIG_PAX_RANDMMAP
8677+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8678+#endif
8679+
8680 if (addr) {
8681 if (do_color_align)
8682 addr = COLOR_ALIGN(addr, pgoff);
8683@@ -118,14 +123,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8684 addr = PAGE_ALIGN(addr);
8685
8686 vma = find_vma(mm, addr);
8687- if (task_size - len >= addr &&
8688- (!vma || addr + len <= vma->vm_start))
8689+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8690 return addr;
8691 }
8692
8693 info.flags = 0;
8694 info.length = len;
8695- info.low_limit = TASK_UNMAPPED_BASE;
8696+ info.low_limit = mm->mmap_base;
8697 info.high_limit = min(task_size, VA_EXCLUDE_START);
8698 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
8699 info.align_offset = pgoff << PAGE_SHIFT;
8700@@ -134,6 +138,12 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8701 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
8702 VM_BUG_ON(addr != -ENOMEM);
8703 info.low_limit = VA_EXCLUDE_END;
8704+
8705+#ifdef CONFIG_PAX_RANDMMAP
8706+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8707+ info.low_limit += mm->delta_mmap;
8708+#endif
8709+
8710 info.high_limit = task_size;
8711 addr = vm_unmapped_area(&info);
8712 }
8713@@ -151,6 +161,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8714 unsigned long task_size = STACK_TOP32;
8715 unsigned long addr = addr0;
8716 int do_color_align;
8717+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8718 struct vm_unmapped_area_info info;
8719
8720 /* This should only ever run for 32-bit processes. */
8721@@ -160,7 +171,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8722 /* We do not accept a shared mapping if it would violate
8723 * cache aliasing constraints.
8724 */
8725- if ((flags & MAP_SHARED) &&
8726+ if ((filp || (flags & MAP_SHARED)) &&
8727 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
8728 return -EINVAL;
8729 return addr;
8730@@ -173,6 +184,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8731 if (filp || (flags & MAP_SHARED))
8732 do_color_align = 1;
8733
8734+#ifdef CONFIG_PAX_RANDMMAP
8735+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8736+#endif
8737+
8738 /* requesting a specific address */
8739 if (addr) {
8740 if (do_color_align)
8741@@ -181,8 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8742 addr = PAGE_ALIGN(addr);
8743
8744 vma = find_vma(mm, addr);
8745- if (task_size - len >= addr &&
8746- (!vma || addr + len <= vma->vm_start))
8747+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8748 return addr;
8749 }
8750
8751@@ -204,6 +218,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8752 VM_BUG_ON(addr != -ENOMEM);
8753 info.flags = 0;
8754 info.low_limit = TASK_UNMAPPED_BASE;
8755+
8756+#ifdef CONFIG_PAX_RANDMMAP
8757+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8758+ info.low_limit += mm->delta_mmap;
8759+#endif
8760+
8761 info.high_limit = STACK_TOP32;
8762 addr = vm_unmapped_area(&info);
8763 }
8764@@ -264,6 +284,10 @@ static unsigned long mmap_rnd(void)
8765 {
8766 unsigned long rnd = 0UL;
8767
8768+#ifdef CONFIG_PAX_RANDMMAP
8769+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8770+#endif
8771+
8772 if (current->flags & PF_RANDOMIZE) {
8773 unsigned long val = get_random_int();
8774 if (test_thread_flag(TIF_32BIT))
8775@@ -289,6 +313,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8776 gap == RLIM_INFINITY ||
8777 sysctl_legacy_va_layout) {
8778 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
8779+
8780+#ifdef CONFIG_PAX_RANDMMAP
8781+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8782+ mm->mmap_base += mm->delta_mmap;
8783+#endif
8784+
8785 mm->get_unmapped_area = arch_get_unmapped_area;
8786 mm->unmap_area = arch_unmap_area;
8787 } else {
8788@@ -301,6 +331,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8789 gap = (task_size / 6 * 5);
8790
8791 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
8792+
8793+#ifdef CONFIG_PAX_RANDMMAP
8794+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8795+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
8796+#endif
8797+
8798 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
8799 mm->unmap_area = arch_unmap_area_topdown;
8800 }
8801diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
8802index e0fed77..604a7e5 100644
8803--- a/arch/sparc/kernel/syscalls.S
8804+++ b/arch/sparc/kernel/syscalls.S
8805@@ -58,7 +58,7 @@ sys32_rt_sigreturn:
8806 #endif
8807 .align 32
8808 1: ldx [%g6 + TI_FLAGS], %l5
8809- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8810+ andcc %l5, _TIF_WORK_SYSCALL, %g0
8811 be,pt %icc, rtrap
8812 nop
8813 call syscall_trace_leave
8814@@ -190,7 +190,7 @@ linux_sparc_syscall32:
8815
8816 srl %i5, 0, %o5 ! IEU1
8817 srl %i2, 0, %o2 ! IEU0 Group
8818- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8819+ andcc %l0, _TIF_WORK_SYSCALL, %g0
8820 bne,pn %icc, linux_syscall_trace32 ! CTI
8821 mov %i0, %l5 ! IEU1
8822 call %l7 ! CTI Group brk forced
8823@@ -213,7 +213,7 @@ linux_sparc_syscall:
8824
8825 mov %i3, %o3 ! IEU1
8826 mov %i4, %o4 ! IEU0 Group
8827- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8828+ andcc %l0, _TIF_WORK_SYSCALL, %g0
8829 bne,pn %icc, linux_syscall_trace ! CTI Group
8830 mov %i0, %l5 ! IEU0
8831 2: call %l7 ! CTI Group brk forced
8832@@ -229,7 +229,7 @@ ret_sys_call:
8833
8834 cmp %o0, -ERESTART_RESTARTBLOCK
8835 bgeu,pn %xcc, 1f
8836- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8837+ andcc %l0, _TIF_WORK_SYSCALL, %g0
8838 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
8839
8840 2:
8841diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
8842index 654e8aa..45f431b 100644
8843--- a/arch/sparc/kernel/sysfs.c
8844+++ b/arch/sparc/kernel/sysfs.c
8845@@ -266,7 +266,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
8846 return NOTIFY_OK;
8847 }
8848
8849-static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
8850+static struct notifier_block sysfs_cpu_nb = {
8851 .notifier_call = sysfs_cpu_notify,
8852 };
8853
8854diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
8855index a5785ea..405c5f7 100644
8856--- a/arch/sparc/kernel/traps_32.c
8857+++ b/arch/sparc/kernel/traps_32.c
8858@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
8859 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
8860 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
8861
8862+extern void gr_handle_kernel_exploit(void);
8863+
8864 void die_if_kernel(char *str, struct pt_regs *regs)
8865 {
8866 static int die_counter;
8867@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
8868 count++ < 30 &&
8869 (((unsigned long) rw) >= PAGE_OFFSET) &&
8870 !(((unsigned long) rw) & 0x7)) {
8871- printk("Caller[%08lx]: %pS\n", rw->ins[7],
8872+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
8873 (void *) rw->ins[7]);
8874 rw = (struct reg_window32 *)rw->ins[6];
8875 }
8876 }
8877 printk("Instruction DUMP:");
8878 instruction_dump ((unsigned long *) regs->pc);
8879- if(regs->psr & PSR_PS)
8880+ if(regs->psr & PSR_PS) {
8881+ gr_handle_kernel_exploit();
8882 do_exit(SIGKILL);
8883+ }
8884 do_exit(SIGSEGV);
8885 }
8886
8887diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
8888index e7ecf15..6520e65 100644
8889--- a/arch/sparc/kernel/traps_64.c
8890+++ b/arch/sparc/kernel/traps_64.c
8891@@ -76,7 +76,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
8892 i + 1,
8893 p->trapstack[i].tstate, p->trapstack[i].tpc,
8894 p->trapstack[i].tnpc, p->trapstack[i].tt);
8895- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
8896+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
8897 }
8898 }
8899
8900@@ -96,6 +96,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
8901
8902 lvl -= 0x100;
8903 if (regs->tstate & TSTATE_PRIV) {
8904+
8905+#ifdef CONFIG_PAX_REFCOUNT
8906+ if (lvl == 6)
8907+ pax_report_refcount_overflow(regs);
8908+#endif
8909+
8910 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
8911 die_if_kernel(buffer, regs);
8912 }
8913@@ -114,11 +120,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
8914 void bad_trap_tl1(struct pt_regs *regs, long lvl)
8915 {
8916 char buffer[32];
8917-
8918+
8919 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
8920 0, lvl, SIGTRAP) == NOTIFY_STOP)
8921 return;
8922
8923+#ifdef CONFIG_PAX_REFCOUNT
8924+ if (lvl == 6)
8925+ pax_report_refcount_overflow(regs);
8926+#endif
8927+
8928 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
8929
8930 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
8931@@ -1142,7 +1153,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
8932 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
8933 printk("%s" "ERROR(%d): ",
8934 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
8935- printk("TPC<%pS>\n", (void *) regs->tpc);
8936+ printk("TPC<%pA>\n", (void *) regs->tpc);
8937 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
8938 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
8939 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
8940@@ -1749,7 +1760,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
8941 smp_processor_id(),
8942 (type & 0x1) ? 'I' : 'D',
8943 regs->tpc);
8944- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
8945+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
8946 panic("Irrecoverable Cheetah+ parity error.");
8947 }
8948
8949@@ -1757,7 +1768,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
8950 smp_processor_id(),
8951 (type & 0x1) ? 'I' : 'D',
8952 regs->tpc);
8953- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
8954+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
8955 }
8956
8957 struct sun4v_error_entry {
8958@@ -2104,9 +2115,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
8959
8960 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
8961 regs->tpc, tl);
8962- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
8963+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
8964 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
8965- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
8966+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
8967 (void *) regs->u_regs[UREG_I7]);
8968 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
8969 "pte[%lx] error[%lx]\n",
8970@@ -2128,9 +2139,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
8971
8972 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
8973 regs->tpc, tl);
8974- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
8975+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
8976 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
8977- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
8978+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
8979 (void *) regs->u_regs[UREG_I7]);
8980 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
8981 "pte[%lx] error[%lx]\n",
8982@@ -2336,13 +2347,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
8983 fp = (unsigned long)sf->fp + STACK_BIAS;
8984 }
8985
8986- printk(" [%016lx] %pS\n", pc, (void *) pc);
8987+ printk(" [%016lx] %pA\n", pc, (void *) pc);
8988 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
8989 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
8990 int index = tsk->curr_ret_stack;
8991 if (tsk->ret_stack && index >= graph) {
8992 pc = tsk->ret_stack[index - graph].ret;
8993- printk(" [%016lx] %pS\n", pc, (void *) pc);
8994+ printk(" [%016lx] %pA\n", pc, (void *) pc);
8995 graph++;
8996 }
8997 }
8998@@ -2367,6 +2378,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
8999 return (struct reg_window *) (fp + STACK_BIAS);
9000 }
9001
9002+extern void gr_handle_kernel_exploit(void);
9003+
9004 void die_if_kernel(char *str, struct pt_regs *regs)
9005 {
9006 static int die_counter;
9007@@ -2395,7 +2408,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
9008 while (rw &&
9009 count++ < 30 &&
9010 kstack_valid(tp, (unsigned long) rw)) {
9011- printk("Caller[%016lx]: %pS\n", rw->ins[7],
9012+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
9013 (void *) rw->ins[7]);
9014
9015 rw = kernel_stack_up(rw);
9016@@ -2408,8 +2421,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
9017 }
9018 user_instruction_dump ((unsigned int __user *) regs->tpc);
9019 }
9020- if (regs->tstate & TSTATE_PRIV)
9021+ if (regs->tstate & TSTATE_PRIV) {
9022+ gr_handle_kernel_exploit();
9023 do_exit(SIGKILL);
9024+ }
9025 do_exit(SIGSEGV);
9026 }
9027 EXPORT_SYMBOL(die_if_kernel);
9028diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
9029index 8201c25e..072a2a7 100644
9030--- a/arch/sparc/kernel/unaligned_64.c
9031+++ b/arch/sparc/kernel/unaligned_64.c
9032@@ -286,7 +286,7 @@ static void log_unaligned(struct pt_regs *regs)
9033 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
9034
9035 if (__ratelimit(&ratelimit)) {
9036- printk("Kernel unaligned access at TPC[%lx] %pS\n",
9037+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
9038 regs->tpc, (void *) regs->tpc);
9039 }
9040 }
9041diff --git a/arch/sparc/kernel/us3_cpufreq.c b/arch/sparc/kernel/us3_cpufreq.c
9042index eb1624b..9f3d607 100644
9043--- a/arch/sparc/kernel/us3_cpufreq.c
9044+++ b/arch/sparc/kernel/us3_cpufreq.c
9045@@ -197,6 +197,20 @@ static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
9046 return 0;
9047 }
9048
9049+static int __init us3_freq_init(void);
9050+static void __exit us3_freq_exit(void);
9051+
9052+static struct cpufreq_driver _cpufreq_us3_driver = {
9053+ .init = us3_freq_cpu_init,
9054+ .verify = us3_freq_verify,
9055+ .target = us3_freq_target,
9056+ .get = us3_freq_get,
9057+ .exit = us3_freq_cpu_exit,
9058+ .owner = THIS_MODULE,
9059+ .name = "UltraSPARC-III",
9060+
9061+};
9062+
9063 static int __init us3_freq_init(void)
9064 {
9065 unsigned long manuf, impl, ver;
9066@@ -214,39 +228,22 @@ static int __init us3_freq_init(void)
9067 impl == CHEETAH_PLUS_IMPL ||
9068 impl == JAGUAR_IMPL ||
9069 impl == PANTHER_IMPL)) {
9070- struct cpufreq_driver *driver;
9071-
9072 ret = -ENOMEM;
9073- driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
9074- if (!driver)
9075- goto err_out;
9076-
9077 us3_freq_table = kzalloc(
9078 (NR_CPUS * sizeof(struct us3_freq_percpu_info)),
9079 GFP_KERNEL);
9080 if (!us3_freq_table)
9081 goto err_out;
9082
9083- driver->init = us3_freq_cpu_init;
9084- driver->verify = us3_freq_verify;
9085- driver->target = us3_freq_target;
9086- driver->get = us3_freq_get;
9087- driver->exit = us3_freq_cpu_exit;
9088- driver->owner = THIS_MODULE,
9089- strcpy(driver->name, "UltraSPARC-III");
9090-
9091- cpufreq_us3_driver = driver;
9092- ret = cpufreq_register_driver(driver);
9093+ cpufreq_us3_driver = &_cpufreq_us3_driver;
9094+ ret = cpufreq_register_driver(cpufreq_us3_driver);
9095 if (ret)
9096 goto err_out;
9097
9098 return 0;
9099
9100 err_out:
9101- if (driver) {
9102- kfree(driver);
9103- cpufreq_us3_driver = NULL;
9104- }
9105+ cpufreq_us3_driver = NULL;
9106 kfree(us3_freq_table);
9107 us3_freq_table = NULL;
9108 return ret;
9109@@ -259,7 +256,6 @@ static void __exit us3_freq_exit(void)
9110 {
9111 if (cpufreq_us3_driver) {
9112 cpufreq_unregister_driver(cpufreq_us3_driver);
9113- kfree(cpufreq_us3_driver);
9114 cpufreq_us3_driver = NULL;
9115 kfree(us3_freq_table);
9116 us3_freq_table = NULL;
9117diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
9118index 8410065f2..4fd4ca22 100644
9119--- a/arch/sparc/lib/Makefile
9120+++ b/arch/sparc/lib/Makefile
9121@@ -2,7 +2,7 @@
9122 #
9123
9124 asflags-y := -ansi -DST_DIV0=0x02
9125-ccflags-y := -Werror
9126+#ccflags-y := -Werror
9127
9128 lib-$(CONFIG_SPARC32) += ashrdi3.o
9129 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
9130diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
9131index 85c233d..68500e0 100644
9132--- a/arch/sparc/lib/atomic_64.S
9133+++ b/arch/sparc/lib/atomic_64.S
9134@@ -17,7 +17,12 @@
9135 ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
9136 BACKOFF_SETUP(%o2)
9137 1: lduw [%o1], %g1
9138- add %g1, %o0, %g7
9139+ addcc %g1, %o0, %g7
9140+
9141+#ifdef CONFIG_PAX_REFCOUNT
9142+ tvs %icc, 6
9143+#endif
9144+
9145 cas [%o1], %g1, %g7
9146 cmp %g1, %g7
9147 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
9148@@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
9149 2: BACKOFF_SPIN(%o2, %o3, 1b)
9150 ENDPROC(atomic_add)
9151
9152+ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
9153+ BACKOFF_SETUP(%o2)
9154+1: lduw [%o1], %g1
9155+ add %g1, %o0, %g7
9156+ cas [%o1], %g1, %g7
9157+ cmp %g1, %g7
9158+ bne,pn %icc, 2f
9159+ nop
9160+ retl
9161+ nop
9162+2: BACKOFF_SPIN(%o2, %o3, 1b)
9163+ENDPROC(atomic_add_unchecked)
9164+
9165 ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
9166 BACKOFF_SETUP(%o2)
9167 1: lduw [%o1], %g1
9168- sub %g1, %o0, %g7
9169+ subcc %g1, %o0, %g7
9170+
9171+#ifdef CONFIG_PAX_REFCOUNT
9172+ tvs %icc, 6
9173+#endif
9174+
9175 cas [%o1], %g1, %g7
9176 cmp %g1, %g7
9177 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
9178@@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
9179 2: BACKOFF_SPIN(%o2, %o3, 1b)
9180 ENDPROC(atomic_sub)
9181
9182+ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
9183+ BACKOFF_SETUP(%o2)
9184+1: lduw [%o1], %g1
9185+ sub %g1, %o0, %g7
9186+ cas [%o1], %g1, %g7
9187+ cmp %g1, %g7
9188+ bne,pn %icc, 2f
9189+ nop
9190+ retl
9191+ nop
9192+2: BACKOFF_SPIN(%o2, %o3, 1b)
9193+ENDPROC(atomic_sub_unchecked)
9194+
9195 ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
9196 BACKOFF_SETUP(%o2)
9197 1: lduw [%o1], %g1
9198- add %g1, %o0, %g7
9199+ addcc %g1, %o0, %g7
9200+
9201+#ifdef CONFIG_PAX_REFCOUNT
9202+ tvs %icc, 6
9203+#endif
9204+
9205 cas [%o1], %g1, %g7
9206 cmp %g1, %g7
9207 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
9208@@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
9209 2: BACKOFF_SPIN(%o2, %o3, 1b)
9210 ENDPROC(atomic_add_ret)
9211
9212+ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
9213+ BACKOFF_SETUP(%o2)
9214+1: lduw [%o1], %g1
9215+ addcc %g1, %o0, %g7
9216+ cas [%o1], %g1, %g7
9217+ cmp %g1, %g7
9218+ bne,pn %icc, 2f
9219+ add %g7, %o0, %g7
9220+ sra %g7, 0, %o0
9221+ retl
9222+ nop
9223+2: BACKOFF_SPIN(%o2, %o3, 1b)
9224+ENDPROC(atomic_add_ret_unchecked)
9225+
9226 ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
9227 BACKOFF_SETUP(%o2)
9228 1: lduw [%o1], %g1
9229- sub %g1, %o0, %g7
9230+ subcc %g1, %o0, %g7
9231+
9232+#ifdef CONFIG_PAX_REFCOUNT
9233+ tvs %icc, 6
9234+#endif
9235+
9236 cas [%o1], %g1, %g7
9237 cmp %g1, %g7
9238 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
9239@@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret)
9240 ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
9241 BACKOFF_SETUP(%o2)
9242 1: ldx [%o1], %g1
9243- add %g1, %o0, %g7
9244+ addcc %g1, %o0, %g7
9245+
9246+#ifdef CONFIG_PAX_REFCOUNT
9247+ tvs %xcc, 6
9248+#endif
9249+
9250 casx [%o1], %g1, %g7
9251 cmp %g1, %g7
9252 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
9253@@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
9254 2: BACKOFF_SPIN(%o2, %o3, 1b)
9255 ENDPROC(atomic64_add)
9256
9257+ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
9258+ BACKOFF_SETUP(%o2)
9259+1: ldx [%o1], %g1
9260+ addcc %g1, %o0, %g7
9261+ casx [%o1], %g1, %g7
9262+ cmp %g1, %g7
9263+ bne,pn %xcc, 2f
9264+ nop
9265+ retl
9266+ nop
9267+2: BACKOFF_SPIN(%o2, %o3, 1b)
9268+ENDPROC(atomic64_add_unchecked)
9269+
9270 ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
9271 BACKOFF_SETUP(%o2)
9272 1: ldx [%o1], %g1
9273- sub %g1, %o0, %g7
9274+ subcc %g1, %o0, %g7
9275+
9276+#ifdef CONFIG_PAX_REFCOUNT
9277+ tvs %xcc, 6
9278+#endif
9279+
9280 casx [%o1], %g1, %g7
9281 cmp %g1, %g7
9282 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
9283@@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
9284 2: BACKOFF_SPIN(%o2, %o3, 1b)
9285 ENDPROC(atomic64_sub)
9286
9287+ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
9288+ BACKOFF_SETUP(%o2)
9289+1: ldx [%o1], %g1
9290+ subcc %g1, %o0, %g7
9291+ casx [%o1], %g1, %g7
9292+ cmp %g1, %g7
9293+ bne,pn %xcc, 2f
9294+ nop
9295+ retl
9296+ nop
9297+2: BACKOFF_SPIN(%o2, %o3, 1b)
9298+ENDPROC(atomic64_sub_unchecked)
9299+
9300 ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
9301 BACKOFF_SETUP(%o2)
9302 1: ldx [%o1], %g1
9303- add %g1, %o0, %g7
9304+ addcc %g1, %o0, %g7
9305+
9306+#ifdef CONFIG_PAX_REFCOUNT
9307+ tvs %xcc, 6
9308+#endif
9309+
9310 casx [%o1], %g1, %g7
9311 cmp %g1, %g7
9312 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
9313@@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
9314 2: BACKOFF_SPIN(%o2, %o3, 1b)
9315 ENDPROC(atomic64_add_ret)
9316
9317+ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
9318+ BACKOFF_SETUP(%o2)
9319+1: ldx [%o1], %g1
9320+ addcc %g1, %o0, %g7
9321+ casx [%o1], %g1, %g7
9322+ cmp %g1, %g7
9323+ bne,pn %xcc, 2f
9324+ add %g7, %o0, %g7
9325+ mov %g7, %o0
9326+ retl
9327+ nop
9328+2: BACKOFF_SPIN(%o2, %o3, 1b)
9329+ENDPROC(atomic64_add_ret_unchecked)
9330+
9331 ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
9332 BACKOFF_SETUP(%o2)
9333 1: ldx [%o1], %g1
9334- sub %g1, %o0, %g7
9335+ subcc %g1, %o0, %g7
9336+
9337+#ifdef CONFIG_PAX_REFCOUNT
9338+ tvs %xcc, 6
9339+#endif
9340+
9341 casx [%o1], %g1, %g7
9342 cmp %g1, %g7
9343 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
9344diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
9345index 0c4e35e..745d3e4 100644
9346--- a/arch/sparc/lib/ksyms.c
9347+++ b/arch/sparc/lib/ksyms.c
9348@@ -109,12 +109,18 @@ EXPORT_SYMBOL(__downgrade_write);
9349
9350 /* Atomic counter implementation. */
9351 EXPORT_SYMBOL(atomic_add);
9352+EXPORT_SYMBOL(atomic_add_unchecked);
9353 EXPORT_SYMBOL(atomic_add_ret);
9354+EXPORT_SYMBOL(atomic_add_ret_unchecked);
9355 EXPORT_SYMBOL(atomic_sub);
9356+EXPORT_SYMBOL(atomic_sub_unchecked);
9357 EXPORT_SYMBOL(atomic_sub_ret);
9358 EXPORT_SYMBOL(atomic64_add);
9359+EXPORT_SYMBOL(atomic64_add_unchecked);
9360 EXPORT_SYMBOL(atomic64_add_ret);
9361+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
9362 EXPORT_SYMBOL(atomic64_sub);
9363+EXPORT_SYMBOL(atomic64_sub_unchecked);
9364 EXPORT_SYMBOL(atomic64_sub_ret);
9365 EXPORT_SYMBOL(atomic64_dec_if_positive);
9366
9367diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
9368index 30c3ecc..736f015 100644
9369--- a/arch/sparc/mm/Makefile
9370+++ b/arch/sparc/mm/Makefile
9371@@ -2,7 +2,7 @@
9372 #
9373
9374 asflags-y := -ansi
9375-ccflags-y := -Werror
9376+#ccflags-y := -Werror
9377
9378 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
9379 obj-y += fault_$(BITS).o
9380diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
9381index e98bfda..ea8d221 100644
9382--- a/arch/sparc/mm/fault_32.c
9383+++ b/arch/sparc/mm/fault_32.c
9384@@ -21,6 +21,9 @@
9385 #include <linux/perf_event.h>
9386 #include <linux/interrupt.h>
9387 #include <linux/kdebug.h>
9388+#include <linux/slab.h>
9389+#include <linux/pagemap.h>
9390+#include <linux/compiler.h>
9391
9392 #include <asm/page.h>
9393 #include <asm/pgtable.h>
9394@@ -159,6 +162,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
9395 return safe_compute_effective_address(regs, insn);
9396 }
9397
9398+#ifdef CONFIG_PAX_PAGEEXEC
9399+#ifdef CONFIG_PAX_DLRESOLVE
9400+static void pax_emuplt_close(struct vm_area_struct *vma)
9401+{
9402+ vma->vm_mm->call_dl_resolve = 0UL;
9403+}
9404+
9405+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9406+{
9407+ unsigned int *kaddr;
9408+
9409+ vmf->page = alloc_page(GFP_HIGHUSER);
9410+ if (!vmf->page)
9411+ return VM_FAULT_OOM;
9412+
9413+ kaddr = kmap(vmf->page);
9414+ memset(kaddr, 0, PAGE_SIZE);
9415+ kaddr[0] = 0x9DE3BFA8U; /* save */
9416+ flush_dcache_page(vmf->page);
9417+ kunmap(vmf->page);
9418+ return VM_FAULT_MAJOR;
9419+}
9420+
9421+static const struct vm_operations_struct pax_vm_ops = {
9422+ .close = pax_emuplt_close,
9423+ .fault = pax_emuplt_fault
9424+};
9425+
9426+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
9427+{
9428+ int ret;
9429+
9430+ INIT_LIST_HEAD(&vma->anon_vma_chain);
9431+ vma->vm_mm = current->mm;
9432+ vma->vm_start = addr;
9433+ vma->vm_end = addr + PAGE_SIZE;
9434+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
9435+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
9436+ vma->vm_ops = &pax_vm_ops;
9437+
9438+ ret = insert_vm_struct(current->mm, vma);
9439+ if (ret)
9440+ return ret;
9441+
9442+ ++current->mm->total_vm;
9443+ return 0;
9444+}
9445+#endif
9446+
9447+/*
9448+ * PaX: decide what to do with offenders (regs->pc = fault address)
9449+ *
9450+ * returns 1 when task should be killed
9451+ * 2 when patched PLT trampoline was detected
9452+ * 3 when unpatched PLT trampoline was detected
9453+ */
9454+static int pax_handle_fetch_fault(struct pt_regs *regs)
9455+{
9456+
9457+#ifdef CONFIG_PAX_EMUPLT
9458+ int err;
9459+
9460+ do { /* PaX: patched PLT emulation #1 */
9461+ unsigned int sethi1, sethi2, jmpl;
9462+
9463+ err = get_user(sethi1, (unsigned int *)regs->pc);
9464+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
9465+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
9466+
9467+ if (err)
9468+ break;
9469+
9470+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
9471+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
9472+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
9473+ {
9474+ unsigned int addr;
9475+
9476+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
9477+ addr = regs->u_regs[UREG_G1];
9478+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
9479+ regs->pc = addr;
9480+ regs->npc = addr+4;
9481+ return 2;
9482+ }
9483+ } while (0);
9484+
9485+ do { /* PaX: patched PLT emulation #2 */
9486+ unsigned int ba;
9487+
9488+ err = get_user(ba, (unsigned int *)regs->pc);
9489+
9490+ if (err)
9491+ break;
9492+
9493+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
9494+ unsigned int addr;
9495+
9496+ if ((ba & 0xFFC00000U) == 0x30800000U)
9497+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
9498+ else
9499+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
9500+ regs->pc = addr;
9501+ regs->npc = addr+4;
9502+ return 2;
9503+ }
9504+ } while (0);
9505+
9506+ do { /* PaX: patched PLT emulation #3 */
9507+ unsigned int sethi, bajmpl, nop;
9508+
9509+ err = get_user(sethi, (unsigned int *)regs->pc);
9510+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
9511+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
9512+
9513+ if (err)
9514+ break;
9515+
9516+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9517+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
9518+ nop == 0x01000000U)
9519+ {
9520+ unsigned int addr;
9521+
9522+ addr = (sethi & 0x003FFFFFU) << 10;
9523+ regs->u_regs[UREG_G1] = addr;
9524+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
9525+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
9526+ else
9527+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
9528+ regs->pc = addr;
9529+ regs->npc = addr+4;
9530+ return 2;
9531+ }
9532+ } while (0);
9533+
9534+ do { /* PaX: unpatched PLT emulation step 1 */
9535+ unsigned int sethi, ba, nop;
9536+
9537+ err = get_user(sethi, (unsigned int *)regs->pc);
9538+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
9539+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
9540+
9541+ if (err)
9542+ break;
9543+
9544+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9545+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
9546+ nop == 0x01000000U)
9547+ {
9548+ unsigned int addr, save, call;
9549+
9550+ if ((ba & 0xFFC00000U) == 0x30800000U)
9551+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
9552+ else
9553+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
9554+
9555+ err = get_user(save, (unsigned int *)addr);
9556+ err |= get_user(call, (unsigned int *)(addr+4));
9557+ err |= get_user(nop, (unsigned int *)(addr+8));
9558+ if (err)
9559+ break;
9560+
9561+#ifdef CONFIG_PAX_DLRESOLVE
9562+ if (save == 0x9DE3BFA8U &&
9563+ (call & 0xC0000000U) == 0x40000000U &&
9564+ nop == 0x01000000U)
9565+ {
9566+ struct vm_area_struct *vma;
9567+ unsigned long call_dl_resolve;
9568+
9569+ down_read(&current->mm->mmap_sem);
9570+ call_dl_resolve = current->mm->call_dl_resolve;
9571+ up_read(&current->mm->mmap_sem);
9572+ if (likely(call_dl_resolve))
9573+ goto emulate;
9574+
9575+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
9576+
9577+ down_write(&current->mm->mmap_sem);
9578+ if (current->mm->call_dl_resolve) {
9579+ call_dl_resolve = current->mm->call_dl_resolve;
9580+ up_write(&current->mm->mmap_sem);
9581+ if (vma)
9582+ kmem_cache_free(vm_area_cachep, vma);
9583+ goto emulate;
9584+ }
9585+
9586+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
9587+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
9588+ up_write(&current->mm->mmap_sem);
9589+ if (vma)
9590+ kmem_cache_free(vm_area_cachep, vma);
9591+ return 1;
9592+ }
9593+
9594+ if (pax_insert_vma(vma, call_dl_resolve)) {
9595+ up_write(&current->mm->mmap_sem);
9596+ kmem_cache_free(vm_area_cachep, vma);
9597+ return 1;
9598+ }
9599+
9600+ current->mm->call_dl_resolve = call_dl_resolve;
9601+ up_write(&current->mm->mmap_sem);
9602+
9603+emulate:
9604+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9605+ regs->pc = call_dl_resolve;
9606+ regs->npc = addr+4;
9607+ return 3;
9608+ }
9609+#endif
9610+
9611+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
9612+ if ((save & 0xFFC00000U) == 0x05000000U &&
9613+ (call & 0xFFFFE000U) == 0x85C0A000U &&
9614+ nop == 0x01000000U)
9615+ {
9616+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9617+ regs->u_regs[UREG_G2] = addr + 4;
9618+ addr = (save & 0x003FFFFFU) << 10;
9619+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
9620+ regs->pc = addr;
9621+ regs->npc = addr+4;
9622+ return 3;
9623+ }
9624+ }
9625+ } while (0);
9626+
9627+ do { /* PaX: unpatched PLT emulation step 2 */
9628+ unsigned int save, call, nop;
9629+
9630+ err = get_user(save, (unsigned int *)(regs->pc-4));
9631+ err |= get_user(call, (unsigned int *)regs->pc);
9632+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
9633+ if (err)
9634+ break;
9635+
9636+ if (save == 0x9DE3BFA8U &&
9637+ (call & 0xC0000000U) == 0x40000000U &&
9638+ nop == 0x01000000U)
9639+ {
9640+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
9641+
9642+ regs->u_regs[UREG_RETPC] = regs->pc;
9643+ regs->pc = dl_resolve;
9644+ regs->npc = dl_resolve+4;
9645+ return 3;
9646+ }
9647+ } while (0);
9648+#endif
9649+
9650+ return 1;
9651+}
9652+
9653+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9654+{
9655+ unsigned long i;
9656+
9657+ printk(KERN_ERR "PAX: bytes at PC: ");
9658+ for (i = 0; i < 8; i++) {
9659+ unsigned int c;
9660+ if (get_user(c, (unsigned int *)pc+i))
9661+ printk(KERN_CONT "???????? ");
9662+ else
9663+ printk(KERN_CONT "%08x ", c);
9664+ }
9665+ printk("\n");
9666+}
9667+#endif
9668+
9669 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
9670 int text_fault)
9671 {
9672@@ -230,6 +504,24 @@ good_area:
9673 if (!(vma->vm_flags & VM_WRITE))
9674 goto bad_area;
9675 } else {
9676+
9677+#ifdef CONFIG_PAX_PAGEEXEC
9678+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
9679+ up_read(&mm->mmap_sem);
9680+ switch (pax_handle_fetch_fault(regs)) {
9681+
9682+#ifdef CONFIG_PAX_EMUPLT
9683+ case 2:
9684+ case 3:
9685+ return;
9686+#endif
9687+
9688+ }
9689+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
9690+ do_group_exit(SIGKILL);
9691+ }
9692+#endif
9693+
9694 /* Allow reads even for write-only mappings */
9695 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
9696 goto bad_area;
9697diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
9698index 5062ff3..e0b75f3 100644
9699--- a/arch/sparc/mm/fault_64.c
9700+++ b/arch/sparc/mm/fault_64.c
9701@@ -21,6 +21,9 @@
9702 #include <linux/kprobes.h>
9703 #include <linux/kdebug.h>
9704 #include <linux/percpu.h>
9705+#include <linux/slab.h>
9706+#include <linux/pagemap.h>
9707+#include <linux/compiler.h>
9708
9709 #include <asm/page.h>
9710 #include <asm/pgtable.h>
9711@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
9712 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
9713 regs->tpc);
9714 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
9715- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
9716+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
9717 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
9718 dump_stack();
9719 unhandled_fault(regs->tpc, current, regs);
9720@@ -270,6 +273,466 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
9721 show_regs(regs);
9722 }
9723
9724+#ifdef CONFIG_PAX_PAGEEXEC
9725+#ifdef CONFIG_PAX_DLRESOLVE
9726+static void pax_emuplt_close(struct vm_area_struct *vma)
9727+{
9728+ vma->vm_mm->call_dl_resolve = 0UL;
9729+}
9730+
9731+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9732+{
9733+ unsigned int *kaddr;
9734+
9735+ vmf->page = alloc_page(GFP_HIGHUSER);
9736+ if (!vmf->page)
9737+ return VM_FAULT_OOM;
9738+
9739+ kaddr = kmap(vmf->page);
9740+ memset(kaddr, 0, PAGE_SIZE);
9741+ kaddr[0] = 0x9DE3BFA8U; /* save */
9742+ flush_dcache_page(vmf->page);
9743+ kunmap(vmf->page);
9744+ return VM_FAULT_MAJOR;
9745+}
9746+
9747+static const struct vm_operations_struct pax_vm_ops = {
9748+ .close = pax_emuplt_close,
9749+ .fault = pax_emuplt_fault
9750+};
9751+
9752+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
9753+{
9754+ int ret;
9755+
9756+ INIT_LIST_HEAD(&vma->anon_vma_chain);
9757+ vma->vm_mm = current->mm;
9758+ vma->vm_start = addr;
9759+ vma->vm_end = addr + PAGE_SIZE;
9760+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
9761+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
9762+ vma->vm_ops = &pax_vm_ops;
9763+
9764+ ret = insert_vm_struct(current->mm, vma);
9765+ if (ret)
9766+ return ret;
9767+
9768+ ++current->mm->total_vm;
9769+ return 0;
9770+}
9771+#endif
9772+
9773+/*
9774+ * PaX: decide what to do with offenders (regs->tpc = fault address)
9775+ *
9776+ * returns 1 when task should be killed
9777+ * 2 when patched PLT trampoline was detected
9778+ * 3 when unpatched PLT trampoline was detected
9779+ */
9780+static int pax_handle_fetch_fault(struct pt_regs *regs)
9781+{
9782+
9783+#ifdef CONFIG_PAX_EMUPLT
9784+ int err;
9785+
9786+ do { /* PaX: patched PLT emulation #1 */
9787+ unsigned int sethi1, sethi2, jmpl;
9788+
9789+ err = get_user(sethi1, (unsigned int *)regs->tpc);
9790+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
9791+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
9792+
9793+ if (err)
9794+ break;
9795+
9796+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
9797+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
9798+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
9799+ {
9800+ unsigned long addr;
9801+
9802+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
9803+ addr = regs->u_regs[UREG_G1];
9804+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
9805+
9806+ if (test_thread_flag(TIF_32BIT))
9807+ addr &= 0xFFFFFFFFUL;
9808+
9809+ regs->tpc = addr;
9810+ regs->tnpc = addr+4;
9811+ return 2;
9812+ }
9813+ } while (0);
9814+
9815+ do { /* PaX: patched PLT emulation #2 */
9816+ unsigned int ba;
9817+
9818+ err = get_user(ba, (unsigned int *)regs->tpc);
9819+
9820+ if (err)
9821+ break;
9822+
9823+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
9824+ unsigned long addr;
9825+
9826+ if ((ba & 0xFFC00000U) == 0x30800000U)
9827+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
9828+ else
9829+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
9830+
9831+ if (test_thread_flag(TIF_32BIT))
9832+ addr &= 0xFFFFFFFFUL;
9833+
9834+ regs->tpc = addr;
9835+ regs->tnpc = addr+4;
9836+ return 2;
9837+ }
9838+ } while (0);
9839+
9840+ do { /* PaX: patched PLT emulation #3 */
9841+ unsigned int sethi, bajmpl, nop;
9842+
9843+ err = get_user(sethi, (unsigned int *)regs->tpc);
9844+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
9845+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
9846+
9847+ if (err)
9848+ break;
9849+
9850+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9851+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
9852+ nop == 0x01000000U)
9853+ {
9854+ unsigned long addr;
9855+
9856+ addr = (sethi & 0x003FFFFFU) << 10;
9857+ regs->u_regs[UREG_G1] = addr;
9858+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
9859+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
9860+ else
9861+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
9862+
9863+ if (test_thread_flag(TIF_32BIT))
9864+ addr &= 0xFFFFFFFFUL;
9865+
9866+ regs->tpc = addr;
9867+ regs->tnpc = addr+4;
9868+ return 2;
9869+ }
9870+ } while (0);
9871+
9872+ do { /* PaX: patched PLT emulation #4 */
9873+ unsigned int sethi, mov1, call, mov2;
9874+
9875+ err = get_user(sethi, (unsigned int *)regs->tpc);
9876+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
9877+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
9878+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
9879+
9880+ if (err)
9881+ break;
9882+
9883+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9884+ mov1 == 0x8210000FU &&
9885+ (call & 0xC0000000U) == 0x40000000U &&
9886+ mov2 == 0x9E100001U)
9887+ {
9888+ unsigned long addr;
9889+
9890+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
9891+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
9892+
9893+ if (test_thread_flag(TIF_32BIT))
9894+ addr &= 0xFFFFFFFFUL;
9895+
9896+ regs->tpc = addr;
9897+ regs->tnpc = addr+4;
9898+ return 2;
9899+ }
9900+ } while (0);
9901+
9902+ do { /* PaX: patched PLT emulation #5 */
9903+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
9904+
9905+ err = get_user(sethi, (unsigned int *)regs->tpc);
9906+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
9907+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
9908+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
9909+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
9910+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
9911+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
9912+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
9913+
9914+ if (err)
9915+ break;
9916+
9917+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9918+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
9919+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
9920+ (or1 & 0xFFFFE000U) == 0x82106000U &&
9921+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
9922+ sllx == 0x83287020U &&
9923+ jmpl == 0x81C04005U &&
9924+ nop == 0x01000000U)
9925+ {
9926+ unsigned long addr;
9927+
9928+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
9929+ regs->u_regs[UREG_G1] <<= 32;
9930+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
9931+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
9932+ regs->tpc = addr;
9933+ regs->tnpc = addr+4;
9934+ return 2;
9935+ }
9936+ } while (0);
9937+
9938+ do { /* PaX: patched PLT emulation #6 */
9939+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
9940+
9941+ err = get_user(sethi, (unsigned int *)regs->tpc);
9942+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
9943+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
9944+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
9945+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
9946+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
9947+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
9948+
9949+ if (err)
9950+ break;
9951+
9952+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9953+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
9954+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
9955+ sllx == 0x83287020U &&
9956+ (or & 0xFFFFE000U) == 0x8A116000U &&
9957+ jmpl == 0x81C04005U &&
9958+ nop == 0x01000000U)
9959+ {
9960+ unsigned long addr;
9961+
9962+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
9963+ regs->u_regs[UREG_G1] <<= 32;
9964+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
9965+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
9966+ regs->tpc = addr;
9967+ regs->tnpc = addr+4;
9968+ return 2;
9969+ }
9970+ } while (0);
9971+
9972+ do { /* PaX: unpatched PLT emulation step 1 */
9973+ unsigned int sethi, ba, nop;
9974+
9975+ err = get_user(sethi, (unsigned int *)regs->tpc);
9976+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
9977+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
9978+
9979+ if (err)
9980+ break;
9981+
9982+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9983+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
9984+ nop == 0x01000000U)
9985+ {
9986+ unsigned long addr;
9987+ unsigned int save, call;
9988+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
9989+
9990+ if ((ba & 0xFFC00000U) == 0x30800000U)
9991+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
9992+ else
9993+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
9994+
9995+ if (test_thread_flag(TIF_32BIT))
9996+ addr &= 0xFFFFFFFFUL;
9997+
9998+ err = get_user(save, (unsigned int *)addr);
9999+ err |= get_user(call, (unsigned int *)(addr+4));
10000+ err |= get_user(nop, (unsigned int *)(addr+8));
10001+ if (err)
10002+ break;
10003+
10004+#ifdef CONFIG_PAX_DLRESOLVE
10005+ if (save == 0x9DE3BFA8U &&
10006+ (call & 0xC0000000U) == 0x40000000U &&
10007+ nop == 0x01000000U)
10008+ {
10009+ struct vm_area_struct *vma;
10010+ unsigned long call_dl_resolve;
10011+
10012+ down_read(&current->mm->mmap_sem);
10013+ call_dl_resolve = current->mm->call_dl_resolve;
10014+ up_read(&current->mm->mmap_sem);
10015+ if (likely(call_dl_resolve))
10016+ goto emulate;
10017+
10018+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
10019+
10020+ down_write(&current->mm->mmap_sem);
10021+ if (current->mm->call_dl_resolve) {
10022+ call_dl_resolve = current->mm->call_dl_resolve;
10023+ up_write(&current->mm->mmap_sem);
10024+ if (vma)
10025+ kmem_cache_free(vm_area_cachep, vma);
10026+ goto emulate;
10027+ }
10028+
10029+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
10030+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
10031+ up_write(&current->mm->mmap_sem);
10032+ if (vma)
10033+ kmem_cache_free(vm_area_cachep, vma);
10034+ return 1;
10035+ }
10036+
10037+ if (pax_insert_vma(vma, call_dl_resolve)) {
10038+ up_write(&current->mm->mmap_sem);
10039+ kmem_cache_free(vm_area_cachep, vma);
10040+ return 1;
10041+ }
10042+
10043+ current->mm->call_dl_resolve = call_dl_resolve;
10044+ up_write(&current->mm->mmap_sem);
10045+
10046+emulate:
10047+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
10048+ regs->tpc = call_dl_resolve;
10049+ regs->tnpc = addr+4;
10050+ return 3;
10051+ }
10052+#endif
10053+
10054+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
10055+ if ((save & 0xFFC00000U) == 0x05000000U &&
10056+ (call & 0xFFFFE000U) == 0x85C0A000U &&
10057+ nop == 0x01000000U)
10058+ {
10059+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
10060+ regs->u_regs[UREG_G2] = addr + 4;
10061+ addr = (save & 0x003FFFFFU) << 10;
10062+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
10063+
10064+ if (test_thread_flag(TIF_32BIT))
10065+ addr &= 0xFFFFFFFFUL;
10066+
10067+ regs->tpc = addr;
10068+ regs->tnpc = addr+4;
10069+ return 3;
10070+ }
10071+
10072+ /* PaX: 64-bit PLT stub */
10073+ err = get_user(sethi1, (unsigned int *)addr);
10074+ err |= get_user(sethi2, (unsigned int *)(addr+4));
10075+ err |= get_user(or1, (unsigned int *)(addr+8));
10076+ err |= get_user(or2, (unsigned int *)(addr+12));
10077+ err |= get_user(sllx, (unsigned int *)(addr+16));
10078+ err |= get_user(add, (unsigned int *)(addr+20));
10079+ err |= get_user(jmpl, (unsigned int *)(addr+24));
10080+ err |= get_user(nop, (unsigned int *)(addr+28));
10081+ if (err)
10082+ break;
10083+
10084+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
10085+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
10086+ (or1 & 0xFFFFE000U) == 0x88112000U &&
10087+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
10088+ sllx == 0x89293020U &&
10089+ add == 0x8A010005U &&
10090+ jmpl == 0x89C14000U &&
10091+ nop == 0x01000000U)
10092+ {
10093+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
10094+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
10095+ regs->u_regs[UREG_G4] <<= 32;
10096+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
10097+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
10098+ regs->u_regs[UREG_G4] = addr + 24;
10099+ addr = regs->u_regs[UREG_G5];
10100+ regs->tpc = addr;
10101+ regs->tnpc = addr+4;
10102+ return 3;
10103+ }
10104+ }
10105+ } while (0);
10106+
10107+#ifdef CONFIG_PAX_DLRESOLVE
10108+ do { /* PaX: unpatched PLT emulation step 2 */
10109+ unsigned int save, call, nop;
10110+
10111+ err = get_user(save, (unsigned int *)(regs->tpc-4));
10112+ err |= get_user(call, (unsigned int *)regs->tpc);
10113+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
10114+ if (err)
10115+ break;
10116+
10117+ if (save == 0x9DE3BFA8U &&
10118+ (call & 0xC0000000U) == 0x40000000U &&
10119+ nop == 0x01000000U)
10120+ {
10121+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
10122+
10123+ if (test_thread_flag(TIF_32BIT))
10124+ dl_resolve &= 0xFFFFFFFFUL;
10125+
10126+ regs->u_regs[UREG_RETPC] = regs->tpc;
10127+ regs->tpc = dl_resolve;
10128+ regs->tnpc = dl_resolve+4;
10129+ return 3;
10130+ }
10131+ } while (0);
10132+#endif
10133+
10134+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
10135+ unsigned int sethi, ba, nop;
10136+
10137+ err = get_user(sethi, (unsigned int *)regs->tpc);
10138+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
10139+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
10140+
10141+ if (err)
10142+ break;
10143+
10144+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10145+ (ba & 0xFFF00000U) == 0x30600000U &&
10146+ nop == 0x01000000U)
10147+ {
10148+ unsigned long addr;
10149+
10150+ addr = (sethi & 0x003FFFFFU) << 10;
10151+ regs->u_regs[UREG_G1] = addr;
10152+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
10153+
10154+ if (test_thread_flag(TIF_32BIT))
10155+ addr &= 0xFFFFFFFFUL;
10156+
10157+ regs->tpc = addr;
10158+ regs->tnpc = addr+4;
10159+ return 2;
10160+ }
10161+ } while (0);
10162+
10163+#endif
10164+
10165+ return 1;
10166+}
10167+
10168+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
10169+{
10170+ unsigned long i;
10171+
10172+ printk(KERN_ERR "PAX: bytes at PC: ");
10173+ for (i = 0; i < 8; i++) {
10174+ unsigned int c;
10175+ if (get_user(c, (unsigned int *)pc+i))
10176+ printk(KERN_CONT "???????? ");
10177+ else
10178+ printk(KERN_CONT "%08x ", c);
10179+ }
10180+ printk("\n");
10181+}
10182+#endif
10183+
10184 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
10185 {
10186 struct mm_struct *mm = current->mm;
10187@@ -341,6 +804,29 @@ retry:
10188 if (!vma)
10189 goto bad_area;
10190
10191+#ifdef CONFIG_PAX_PAGEEXEC
10192+ /* PaX: detect ITLB misses on non-exec pages */
10193+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
10194+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
10195+ {
10196+ if (address != regs->tpc)
10197+ goto good_area;
10198+
10199+ up_read(&mm->mmap_sem);
10200+ switch (pax_handle_fetch_fault(regs)) {
10201+
10202+#ifdef CONFIG_PAX_EMUPLT
10203+ case 2:
10204+ case 3:
10205+ return;
10206+#endif
10207+
10208+ }
10209+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
10210+ do_group_exit(SIGKILL);
10211+ }
10212+#endif
10213+
10214 /* Pure DTLB misses do not tell us whether the fault causing
10215 * load/store/atomic was a write or not, it only says that there
10216 * was no match. So in such a case we (carefully) read the
10217diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
10218index d2b5944..bd813f2 100644
10219--- a/arch/sparc/mm/hugetlbpage.c
10220+++ b/arch/sparc/mm/hugetlbpage.c
10221@@ -38,7 +38,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
10222
10223 info.flags = 0;
10224 info.length = len;
10225- info.low_limit = TASK_UNMAPPED_BASE;
10226+ info.low_limit = mm->mmap_base;
10227 info.high_limit = min(task_size, VA_EXCLUDE_START);
10228 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
10229 info.align_offset = 0;
10230@@ -47,6 +47,12 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
10231 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
10232 VM_BUG_ON(addr != -ENOMEM);
10233 info.low_limit = VA_EXCLUDE_END;
10234+
10235+#ifdef CONFIG_PAX_RANDMMAP
10236+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10237+ info.low_limit += mm->delta_mmap;
10238+#endif
10239+
10240 info.high_limit = task_size;
10241 addr = vm_unmapped_area(&info);
10242 }
10243@@ -85,6 +91,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10244 VM_BUG_ON(addr != -ENOMEM);
10245 info.flags = 0;
10246 info.low_limit = TASK_UNMAPPED_BASE;
10247+
10248+#ifdef CONFIG_PAX_RANDMMAP
10249+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10250+ info.low_limit += mm->delta_mmap;
10251+#endif
10252+
10253 info.high_limit = STACK_TOP32;
10254 addr = vm_unmapped_area(&info);
10255 }
10256@@ -99,6 +111,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
10257 struct mm_struct *mm = current->mm;
10258 struct vm_area_struct *vma;
10259 unsigned long task_size = TASK_SIZE;
10260+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
10261
10262 if (test_thread_flag(TIF_32BIT))
10263 task_size = STACK_TOP32;
10264@@ -114,11 +127,14 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
10265 return addr;
10266 }
10267
10268+#ifdef CONFIG_PAX_RANDMMAP
10269+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10270+#endif
10271+
10272 if (addr) {
10273 addr = ALIGN(addr, HPAGE_SIZE);
10274 vma = find_vma(mm, addr);
10275- if (task_size - len >= addr &&
10276- (!vma || addr + len <= vma->vm_start))
10277+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10278 return addr;
10279 }
10280 if (mm->get_unmapped_area == arch_get_unmapped_area)
10281diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
10282index f4500c6..889656c 100644
10283--- a/arch/tile/include/asm/atomic_64.h
10284+++ b/arch/tile/include/asm/atomic_64.h
10285@@ -143,6 +143,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
10286
10287 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10288
10289+#define atomic64_read_unchecked(v) atomic64_read(v)
10290+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
10291+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
10292+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
10293+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
10294+#define atomic64_inc_unchecked(v) atomic64_inc(v)
10295+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
10296+#define atomic64_dec_unchecked(v) atomic64_dec(v)
10297+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
10298+
10299 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
10300 #define smp_mb__before_atomic_dec() smp_mb()
10301 #define smp_mb__after_atomic_dec() smp_mb()
10302diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
10303index a9a5299..0fce79e 100644
10304--- a/arch/tile/include/asm/cache.h
10305+++ b/arch/tile/include/asm/cache.h
10306@@ -15,11 +15,12 @@
10307 #ifndef _ASM_TILE_CACHE_H
10308 #define _ASM_TILE_CACHE_H
10309
10310+#include <linux/const.h>
10311 #include <arch/chip.h>
10312
10313 /* bytes per L1 data cache line */
10314 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
10315-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10316+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10317
10318 /* bytes per L2 cache line */
10319 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
10320diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
10321index 9ab078a..d6635c2 100644
10322--- a/arch/tile/include/asm/uaccess.h
10323+++ b/arch/tile/include/asm/uaccess.h
10324@@ -403,9 +403,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
10325 const void __user *from,
10326 unsigned long n)
10327 {
10328- int sz = __compiletime_object_size(to);
10329+ size_t sz = __compiletime_object_size(to);
10330
10331- if (likely(sz == -1 || sz >= n))
10332+ if (likely(sz == (size_t)-1 || sz >= n))
10333 n = _copy_from_user(to, from, n);
10334 else
10335 copy_from_user_overflow();
10336diff --git a/arch/um/Makefile b/arch/um/Makefile
10337index 133f7de..1d6f2f1 100644
10338--- a/arch/um/Makefile
10339+++ b/arch/um/Makefile
10340@@ -62,6 +62,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
10341 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
10342 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
10343
10344+ifdef CONSTIFY_PLUGIN
10345+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
10346+endif
10347+
10348 #This will adjust *FLAGS accordingly to the platform.
10349 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
10350
10351diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
10352index 19e1bdd..3665b77 100644
10353--- a/arch/um/include/asm/cache.h
10354+++ b/arch/um/include/asm/cache.h
10355@@ -1,6 +1,7 @@
10356 #ifndef __UM_CACHE_H
10357 #define __UM_CACHE_H
10358
10359+#include <linux/const.h>
10360
10361 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
10362 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
10363@@ -12,6 +13,6 @@
10364 # define L1_CACHE_SHIFT 5
10365 #endif
10366
10367-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10368+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10369
10370 #endif
10371diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
10372index 2e0a6b1..a64d0f5 100644
10373--- a/arch/um/include/asm/kmap_types.h
10374+++ b/arch/um/include/asm/kmap_types.h
10375@@ -8,6 +8,6 @@
10376
10377 /* No more #include "asm/arch/kmap_types.h" ! */
10378
10379-#define KM_TYPE_NR 14
10380+#define KM_TYPE_NR 15
10381
10382 #endif
10383diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
10384index 5ff53d9..5850cdf 100644
10385--- a/arch/um/include/asm/page.h
10386+++ b/arch/um/include/asm/page.h
10387@@ -14,6 +14,9 @@
10388 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
10389 #define PAGE_MASK (~(PAGE_SIZE-1))
10390
10391+#define ktla_ktva(addr) (addr)
10392+#define ktva_ktla(addr) (addr)
10393+
10394 #ifndef __ASSEMBLY__
10395
10396 struct page;
10397diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
10398index 0032f92..cd151e0 100644
10399--- a/arch/um/include/asm/pgtable-3level.h
10400+++ b/arch/um/include/asm/pgtable-3level.h
10401@@ -58,6 +58,7 @@
10402 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
10403 #define pud_populate(mm, pud, pmd) \
10404 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
10405+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
10406
10407 #ifdef CONFIG_64BIT
10408 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
10409diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
10410index b462b13..e7a19aa 100644
10411--- a/arch/um/kernel/process.c
10412+++ b/arch/um/kernel/process.c
10413@@ -386,22 +386,6 @@ int singlestepping(void * t)
10414 return 2;
10415 }
10416
10417-/*
10418- * Only x86 and x86_64 have an arch_align_stack().
10419- * All other arches have "#define arch_align_stack(x) (x)"
10420- * in their asm/system.h
10421- * As this is included in UML from asm-um/system-generic.h,
10422- * we can use it to behave as the subarch does.
10423- */
10424-#ifndef arch_align_stack
10425-unsigned long arch_align_stack(unsigned long sp)
10426-{
10427- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
10428- sp -= get_random_int() % 8192;
10429- return sp & ~0xf;
10430-}
10431-#endif
10432-
10433 unsigned long get_wchan(struct task_struct *p)
10434 {
10435 unsigned long stack_page, sp, ip;
10436diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
10437index ad8f795..2c7eec6 100644
10438--- a/arch/unicore32/include/asm/cache.h
10439+++ b/arch/unicore32/include/asm/cache.h
10440@@ -12,8 +12,10 @@
10441 #ifndef __UNICORE_CACHE_H__
10442 #define __UNICORE_CACHE_H__
10443
10444-#define L1_CACHE_SHIFT (5)
10445-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10446+#include <linux/const.h>
10447+
10448+#define L1_CACHE_SHIFT 5
10449+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10450
10451 /*
10452 * Memory returned by kmalloc() may be used for DMA, so we must make
10453diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
10454index 0694d09..b58b3aa 100644
10455--- a/arch/x86/Kconfig
10456+++ b/arch/x86/Kconfig
10457@@ -238,7 +238,7 @@ config X86_HT
10458
10459 config X86_32_LAZY_GS
10460 def_bool y
10461- depends on X86_32 && !CC_STACKPROTECTOR
10462+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10463
10464 config ARCH_HWEIGHT_CFLAGS
10465 string
10466@@ -1031,6 +1031,7 @@ config MICROCODE_OLD_INTERFACE
10467
10468 config X86_MSR
10469 tristate "/dev/cpu/*/msr - Model-specific register support"
10470+ depends on !GRKERNSEC_KMEM
10471 ---help---
10472 This device gives privileged processes access to the x86
10473 Model-Specific Registers (MSRs). It is a character device with
10474@@ -1054,7 +1055,7 @@ choice
10475
10476 config NOHIGHMEM
10477 bool "off"
10478- depends on !X86_NUMAQ
10479+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10480 ---help---
10481 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
10482 However, the address space of 32-bit x86 processors is only 4
10483@@ -1091,7 +1092,7 @@ config NOHIGHMEM
10484
10485 config HIGHMEM4G
10486 bool "4GB"
10487- depends on !X86_NUMAQ
10488+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10489 ---help---
10490 Select this if you have a 32-bit processor and between 1 and 4
10491 gigabytes of physical RAM.
10492@@ -1145,7 +1146,7 @@ config PAGE_OFFSET
10493 hex
10494 default 0xB0000000 if VMSPLIT_3G_OPT
10495 default 0x80000000 if VMSPLIT_2G
10496- default 0x78000000 if VMSPLIT_2G_OPT
10497+ default 0x70000000 if VMSPLIT_2G_OPT
10498 default 0x40000000 if VMSPLIT_1G
10499 default 0xC0000000
10500 depends on X86_32
10501@@ -1542,6 +1543,7 @@ config SECCOMP
10502
10503 config CC_STACKPROTECTOR
10504 bool "Enable -fstack-protector buffer overflow detection"
10505+ depends on X86_64 || !PAX_MEMORY_UDEREF
10506 ---help---
10507 This option turns on the -fstack-protector GCC feature. This
10508 feature puts, at the beginning of functions, a canary value on
10509@@ -1599,6 +1601,7 @@ config KEXEC_JUMP
10510 config PHYSICAL_START
10511 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
10512 default "0x1000000"
10513+ range 0x400000 0x40000000
10514 ---help---
10515 This gives the physical address where the kernel is loaded.
10516
10517@@ -1662,6 +1665,7 @@ config X86_NEED_RELOCS
10518 config PHYSICAL_ALIGN
10519 hex "Alignment value to which kernel should be aligned" if X86_32
10520 default "0x1000000"
10521+ range 0x400000 0x1000000 if PAX_KERNEXEC
10522 range 0x2000 0x1000000
10523 ---help---
10524 This value puts the alignment restrictions on physical address
10525@@ -1737,9 +1741,10 @@ config DEBUG_HOTPLUG_CPU0
10526 If unsure, say N.
10527
10528 config COMPAT_VDSO
10529- def_bool y
10530+ def_bool n
10531 prompt "Compat VDSO support"
10532 depends on X86_32 || IA32_EMULATION
10533+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
10534 ---help---
10535 Map the 32-bit VDSO to the predictable old-style address too.
10536
10537diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
10538index c026cca..14657ae 100644
10539--- a/arch/x86/Kconfig.cpu
10540+++ b/arch/x86/Kconfig.cpu
10541@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
10542
10543 config X86_F00F_BUG
10544 def_bool y
10545- depends on M586MMX || M586TSC || M586 || M486
10546+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
10547
10548 config X86_INVD_BUG
10549 def_bool y
10550@@ -327,7 +327,7 @@ config X86_INVD_BUG
10551
10552 config X86_ALIGNMENT_16
10553 def_bool y
10554- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10555+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10556
10557 config X86_INTEL_USERCOPY
10558 def_bool y
10559@@ -373,7 +373,7 @@ config X86_CMPXCHG64
10560 # generates cmov.
10561 config X86_CMOV
10562 def_bool y
10563- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10564+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10565
10566 config X86_MINIMUM_CPU_FAMILY
10567 int
10568diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
10569index b322f12..652d0d9 100644
10570--- a/arch/x86/Kconfig.debug
10571+++ b/arch/x86/Kconfig.debug
10572@@ -84,7 +84,7 @@ config X86_PTDUMP
10573 config DEBUG_RODATA
10574 bool "Write protect kernel read-only data structures"
10575 default y
10576- depends on DEBUG_KERNEL
10577+ depends on DEBUG_KERNEL && BROKEN
10578 ---help---
10579 Mark the kernel read-only data as write-protected in the pagetables,
10580 in order to catch accidental (and incorrect) writes to such const
10581@@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
10582
10583 config DEBUG_SET_MODULE_RONX
10584 bool "Set loadable kernel module data as NX and text as RO"
10585- depends on MODULES
10586+ depends on MODULES && BROKEN
10587 ---help---
10588 This option helps catch unintended modifications to loadable
10589 kernel module's text and read-only data. It also prevents execution
10590@@ -294,7 +294,7 @@ config OPTIMIZE_INLINING
10591
10592 config DEBUG_STRICT_USER_COPY_CHECKS
10593 bool "Strict copy size checks"
10594- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
10595+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
10596 ---help---
10597 Enabling this option turns a certain set of sanity checks for user
10598 copy operations into compile time failures.
10599diff --git a/arch/x86/Makefile b/arch/x86/Makefile
10600index e71fc42..7829607 100644
10601--- a/arch/x86/Makefile
10602+++ b/arch/x86/Makefile
10603@@ -50,6 +50,7 @@ else
10604 UTS_MACHINE := x86_64
10605 CHECKFLAGS += -D__x86_64__ -m64
10606
10607+ biarch := $(call cc-option,-m64)
10608 KBUILD_AFLAGS += -m64
10609 KBUILD_CFLAGS += -m64
10610
10611@@ -230,3 +231,12 @@ define archhelp
10612 echo ' FDARGS="..." arguments for the booted kernel'
10613 echo ' FDINITRD=file initrd for the booted kernel'
10614 endef
10615+
10616+define OLD_LD
10617+
10618+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
10619+*** Please upgrade your binutils to 2.18 or newer
10620+endef
10621+
10622+archprepare:
10623+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
10624diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
10625index 379814b..add62ce 100644
10626--- a/arch/x86/boot/Makefile
10627+++ b/arch/x86/boot/Makefile
10628@@ -65,6 +65,9 @@ KBUILD_CFLAGS := $(USERINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
10629 $(call cc-option, -fno-stack-protector) \
10630 $(call cc-option, -mpreferred-stack-boundary=2)
10631 KBUILD_CFLAGS += $(call cc-option, -m32)
10632+ifdef CONSTIFY_PLUGIN
10633+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
10634+endif
10635 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10636 GCOV_PROFILE := n
10637
10638diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
10639index 878e4b9..20537ab 100644
10640--- a/arch/x86/boot/bitops.h
10641+++ b/arch/x86/boot/bitops.h
10642@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
10643 u8 v;
10644 const u32 *p = (const u32 *)addr;
10645
10646- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
10647+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
10648 return v;
10649 }
10650
10651@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
10652
10653 static inline void set_bit(int nr, void *addr)
10654 {
10655- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
10656+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
10657 }
10658
10659 #endif /* BOOT_BITOPS_H */
10660diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
10661index 18997e5..83d9c67 100644
10662--- a/arch/x86/boot/boot.h
10663+++ b/arch/x86/boot/boot.h
10664@@ -85,7 +85,7 @@ static inline void io_delay(void)
10665 static inline u16 ds(void)
10666 {
10667 u16 seg;
10668- asm("movw %%ds,%0" : "=rm" (seg));
10669+ asm volatile("movw %%ds,%0" : "=rm" (seg));
10670 return seg;
10671 }
10672
10673@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
10674 static inline int memcmp(const void *s1, const void *s2, size_t len)
10675 {
10676 u8 diff;
10677- asm("repe; cmpsb; setnz %0"
10678+ asm volatile("repe; cmpsb; setnz %0"
10679 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
10680 return diff;
10681 }
10682diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
10683index 8a84501..b2d165f 100644
10684--- a/arch/x86/boot/compressed/Makefile
10685+++ b/arch/x86/boot/compressed/Makefile
10686@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
10687 KBUILD_CFLAGS += $(cflags-y)
10688 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
10689 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
10690+ifdef CONSTIFY_PLUGIN
10691+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
10692+endif
10693
10694 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10695 GCOV_PROFILE := n
10696diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
10697index c205035..5853587 100644
10698--- a/arch/x86/boot/compressed/eboot.c
10699+++ b/arch/x86/boot/compressed/eboot.c
10700@@ -150,7 +150,6 @@ again:
10701 *addr = max_addr;
10702 }
10703
10704-free_pool:
10705 efi_call_phys1(sys_table->boottime->free_pool, map);
10706
10707 fail:
10708@@ -214,7 +213,6 @@ static efi_status_t low_alloc(unsigned long size, unsigned long align,
10709 if (i == map_size / desc_size)
10710 status = EFI_NOT_FOUND;
10711
10712-free_pool:
10713 efi_call_phys1(sys_table->boottime->free_pool, map);
10714 fail:
10715 return status;
10716diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
10717index 1e3184f..0d11e2e 100644
10718--- a/arch/x86/boot/compressed/head_32.S
10719+++ b/arch/x86/boot/compressed/head_32.S
10720@@ -118,7 +118,7 @@ preferred_addr:
10721 notl %eax
10722 andl %eax, %ebx
10723 #else
10724- movl $LOAD_PHYSICAL_ADDR, %ebx
10725+ movl $____LOAD_PHYSICAL_ADDR, %ebx
10726 #endif
10727
10728 /* Target address to relocate to for decompression */
10729@@ -204,7 +204,7 @@ relocated:
10730 * and where it was actually loaded.
10731 */
10732 movl %ebp, %ebx
10733- subl $LOAD_PHYSICAL_ADDR, %ebx
10734+ subl $____LOAD_PHYSICAL_ADDR, %ebx
10735 jz 2f /* Nothing to be done if loaded at compiled addr. */
10736 /*
10737 * Process relocations.
10738@@ -212,8 +212,7 @@ relocated:
10739
10740 1: subl $4, %edi
10741 movl (%edi), %ecx
10742- testl %ecx, %ecx
10743- jz 2f
10744+ jecxz 2f
10745 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
10746 jmp 1b
10747 2:
10748diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
10749index f5d1aaa..cce11dc 100644
10750--- a/arch/x86/boot/compressed/head_64.S
10751+++ b/arch/x86/boot/compressed/head_64.S
10752@@ -91,7 +91,7 @@ ENTRY(startup_32)
10753 notl %eax
10754 andl %eax, %ebx
10755 #else
10756- movl $LOAD_PHYSICAL_ADDR, %ebx
10757+ movl $____LOAD_PHYSICAL_ADDR, %ebx
10758 #endif
10759
10760 /* Target address to relocate to for decompression */
10761@@ -273,7 +273,7 @@ preferred_addr:
10762 notq %rax
10763 andq %rax, %rbp
10764 #else
10765- movq $LOAD_PHYSICAL_ADDR, %rbp
10766+ movq $____LOAD_PHYSICAL_ADDR, %rbp
10767 #endif
10768
10769 /* Target address to relocate to for decompression */
10770diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
10771index 88f7ff6..ed695dd 100644
10772--- a/arch/x86/boot/compressed/misc.c
10773+++ b/arch/x86/boot/compressed/misc.c
10774@@ -303,7 +303,7 @@ static void parse_elf(void *output)
10775 case PT_LOAD:
10776 #ifdef CONFIG_RELOCATABLE
10777 dest = output;
10778- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
10779+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
10780 #else
10781 dest = (void *)(phdr->p_paddr);
10782 #endif
10783@@ -352,7 +352,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
10784 error("Destination address too large");
10785 #endif
10786 #ifndef CONFIG_RELOCATABLE
10787- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
10788+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
10789 error("Wrong destination address");
10790 #endif
10791
10792diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
10793index 4d3ff03..e4972ff 100644
10794--- a/arch/x86/boot/cpucheck.c
10795+++ b/arch/x86/boot/cpucheck.c
10796@@ -74,7 +74,7 @@ static int has_fpu(void)
10797 u16 fcw = -1, fsw = -1;
10798 u32 cr0;
10799
10800- asm("movl %%cr0,%0" : "=r" (cr0));
10801+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
10802 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
10803 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
10804 asm volatile("movl %0,%%cr0" : : "r" (cr0));
10805@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
10806 {
10807 u32 f0, f1;
10808
10809- asm("pushfl ; "
10810+ asm volatile("pushfl ; "
10811 "pushfl ; "
10812 "popl %0 ; "
10813 "movl %0,%1 ; "
10814@@ -115,7 +115,7 @@ static void get_flags(void)
10815 set_bit(X86_FEATURE_FPU, cpu.flags);
10816
10817 if (has_eflag(X86_EFLAGS_ID)) {
10818- asm("cpuid"
10819+ asm volatile("cpuid"
10820 : "=a" (max_intel_level),
10821 "=b" (cpu_vendor[0]),
10822 "=d" (cpu_vendor[1]),
10823@@ -124,7 +124,7 @@ static void get_flags(void)
10824
10825 if (max_intel_level >= 0x00000001 &&
10826 max_intel_level <= 0x0000ffff) {
10827- asm("cpuid"
10828+ asm volatile("cpuid"
10829 : "=a" (tfms),
10830 "=c" (cpu.flags[4]),
10831 "=d" (cpu.flags[0])
10832@@ -136,7 +136,7 @@ static void get_flags(void)
10833 cpu.model += ((tfms >> 16) & 0xf) << 4;
10834 }
10835
10836- asm("cpuid"
10837+ asm volatile("cpuid"
10838 : "=a" (max_amd_level)
10839 : "a" (0x80000000)
10840 : "ebx", "ecx", "edx");
10841@@ -144,7 +144,7 @@ static void get_flags(void)
10842 if (max_amd_level >= 0x80000001 &&
10843 max_amd_level <= 0x8000ffff) {
10844 u32 eax = 0x80000001;
10845- asm("cpuid"
10846+ asm volatile("cpuid"
10847 : "+a" (eax),
10848 "=c" (cpu.flags[6]),
10849 "=d" (cpu.flags[1])
10850@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
10851 u32 ecx = MSR_K7_HWCR;
10852 u32 eax, edx;
10853
10854- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10855+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10856 eax &= ~(1 << 15);
10857- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10858+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10859
10860 get_flags(); /* Make sure it really did something */
10861 err = check_flags();
10862@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
10863 u32 ecx = MSR_VIA_FCR;
10864 u32 eax, edx;
10865
10866- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10867+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10868 eax |= (1<<1)|(1<<7);
10869- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10870+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10871
10872 set_bit(X86_FEATURE_CX8, cpu.flags);
10873 err = check_flags();
10874@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
10875 u32 eax, edx;
10876 u32 level = 1;
10877
10878- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10879- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
10880- asm("cpuid"
10881+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10882+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
10883+ asm volatile("cpuid"
10884 : "+a" (level), "=d" (cpu.flags[0])
10885 : : "ecx", "ebx");
10886- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10887+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10888
10889 err = check_flags();
10890 }
10891diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
10892index 944ce59..87ee37a 100644
10893--- a/arch/x86/boot/header.S
10894+++ b/arch/x86/boot/header.S
10895@@ -401,10 +401,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
10896 # single linked list of
10897 # struct setup_data
10898
10899-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
10900+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
10901
10902 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
10903+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
10904+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
10905+#else
10906 #define VO_INIT_SIZE (VO__end - VO__text)
10907+#endif
10908 #if ZO_INIT_SIZE > VO_INIT_SIZE
10909 #define INIT_SIZE ZO_INIT_SIZE
10910 #else
10911diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
10912index db75d07..8e6d0af 100644
10913--- a/arch/x86/boot/memory.c
10914+++ b/arch/x86/boot/memory.c
10915@@ -19,7 +19,7 @@
10916
10917 static int detect_memory_e820(void)
10918 {
10919- int count = 0;
10920+ unsigned int count = 0;
10921 struct biosregs ireg, oreg;
10922 struct e820entry *desc = boot_params.e820_map;
10923 static struct e820entry buf; /* static so it is zeroed */
10924diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
10925index 11e8c6e..fdbb1ed 100644
10926--- a/arch/x86/boot/video-vesa.c
10927+++ b/arch/x86/boot/video-vesa.c
10928@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
10929
10930 boot_params.screen_info.vesapm_seg = oreg.es;
10931 boot_params.screen_info.vesapm_off = oreg.di;
10932+ boot_params.screen_info.vesapm_size = oreg.cx;
10933 }
10934
10935 /*
10936diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
10937index 43eda28..5ab5fdb 100644
10938--- a/arch/x86/boot/video.c
10939+++ b/arch/x86/boot/video.c
10940@@ -96,7 +96,7 @@ static void store_mode_params(void)
10941 static unsigned int get_entry(void)
10942 {
10943 char entry_buf[4];
10944- int i, len = 0;
10945+ unsigned int i, len = 0;
10946 int key;
10947 unsigned int v;
10948
10949diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
10950index 5b577d5..3c1fed4 100644
10951--- a/arch/x86/crypto/aes-x86_64-asm_64.S
10952+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
10953@@ -8,6 +8,8 @@
10954 * including this sentence is retained in full.
10955 */
10956
10957+#include <asm/alternative-asm.h>
10958+
10959 .extern crypto_ft_tab
10960 .extern crypto_it_tab
10961 .extern crypto_fl_tab
10962@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
10963 je B192; \
10964 leaq 32(r9),r9;
10965
10966+#define ret pax_force_retaddr 0, 1; ret
10967+
10968 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
10969 movq r1,r2; \
10970 movq r3,r4; \
10971diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
10972index 3470624..201259d 100644
10973--- a/arch/x86/crypto/aesni-intel_asm.S
10974+++ b/arch/x86/crypto/aesni-intel_asm.S
10975@@ -31,6 +31,7 @@
10976
10977 #include <linux/linkage.h>
10978 #include <asm/inst.h>
10979+#include <asm/alternative-asm.h>
10980
10981 #ifdef __x86_64__
10982 .data
10983@@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
10984 pop %r14
10985 pop %r13
10986 pop %r12
10987+ pax_force_retaddr 0, 1
10988 ret
10989+ENDPROC(aesni_gcm_dec)
10990
10991
10992 /*****************************************************************************
10993@@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
10994 pop %r14
10995 pop %r13
10996 pop %r12
10997+ pax_force_retaddr 0, 1
10998 ret
10999+ENDPROC(aesni_gcm_enc)
11000
11001 #endif
11002
11003@@ -1714,6 +1719,7 @@ _key_expansion_256a:
11004 pxor %xmm1, %xmm0
11005 movaps %xmm0, (TKEYP)
11006 add $0x10, TKEYP
11007+ pax_force_retaddr_bts
11008 ret
11009
11010 .align 4
11011@@ -1738,6 +1744,7 @@ _key_expansion_192a:
11012 shufps $0b01001110, %xmm2, %xmm1
11013 movaps %xmm1, 0x10(TKEYP)
11014 add $0x20, TKEYP
11015+ pax_force_retaddr_bts
11016 ret
11017
11018 .align 4
11019@@ -1757,6 +1764,7 @@ _key_expansion_192b:
11020
11021 movaps %xmm0, (TKEYP)
11022 add $0x10, TKEYP
11023+ pax_force_retaddr_bts
11024 ret
11025
11026 .align 4
11027@@ -1769,6 +1777,7 @@ _key_expansion_256b:
11028 pxor %xmm1, %xmm2
11029 movaps %xmm2, (TKEYP)
11030 add $0x10, TKEYP
11031+ pax_force_retaddr_bts
11032 ret
11033
11034 /*
11035@@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
11036 #ifndef __x86_64__
11037 popl KEYP
11038 #endif
11039+ pax_force_retaddr 0, 1
11040 ret
11041+ENDPROC(aesni_set_key)
11042
11043 /*
11044 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
11045@@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
11046 popl KLEN
11047 popl KEYP
11048 #endif
11049+ pax_force_retaddr 0, 1
11050 ret
11051+ENDPROC(aesni_enc)
11052
11053 /*
11054 * _aesni_enc1: internal ABI
11055@@ -1959,6 +1972,7 @@ _aesni_enc1:
11056 AESENC KEY STATE
11057 movaps 0x70(TKEYP), KEY
11058 AESENCLAST KEY STATE
11059+ pax_force_retaddr_bts
11060 ret
11061
11062 /*
11063@@ -2067,6 +2081,7 @@ _aesni_enc4:
11064 AESENCLAST KEY STATE2
11065 AESENCLAST KEY STATE3
11066 AESENCLAST KEY STATE4
11067+ pax_force_retaddr_bts
11068 ret
11069
11070 /*
11071@@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
11072 popl KLEN
11073 popl KEYP
11074 #endif
11075+ pax_force_retaddr 0, 1
11076 ret
11077+ENDPROC(aesni_dec)
11078
11079 /*
11080 * _aesni_dec1: internal ABI
11081@@ -2146,6 +2163,7 @@ _aesni_dec1:
11082 AESDEC KEY STATE
11083 movaps 0x70(TKEYP), KEY
11084 AESDECLAST KEY STATE
11085+ pax_force_retaddr_bts
11086 ret
11087
11088 /*
11089@@ -2254,6 +2272,7 @@ _aesni_dec4:
11090 AESDECLAST KEY STATE2
11091 AESDECLAST KEY STATE3
11092 AESDECLAST KEY STATE4
11093+ pax_force_retaddr_bts
11094 ret
11095
11096 /*
11097@@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
11098 popl KEYP
11099 popl LEN
11100 #endif
11101+ pax_force_retaddr 0, 1
11102 ret
11103+ENDPROC(aesni_ecb_enc)
11104
11105 /*
11106 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
11107@@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
11108 popl KEYP
11109 popl LEN
11110 #endif
11111+ pax_force_retaddr 0, 1
11112 ret
11113+ENDPROC(aesni_ecb_dec)
11114
11115 /*
11116 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
11117@@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
11118 popl LEN
11119 popl IVP
11120 #endif
11121+ pax_force_retaddr 0, 1
11122 ret
11123+ENDPROC(aesni_cbc_enc)
11124
11125 /*
11126 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
11127@@ -2500,7 +2525,9 @@ ENTRY(aesni_cbc_dec)
11128 popl LEN
11129 popl IVP
11130 #endif
11131+ pax_force_retaddr 0, 1
11132 ret
11133+ENDPROC(aesni_cbc_dec)
11134
11135 #ifdef __x86_64__
11136 .align 16
11137@@ -2526,6 +2553,7 @@ _aesni_inc_init:
11138 mov $1, TCTR_LOW
11139 MOVQ_R64_XMM TCTR_LOW INC
11140 MOVQ_R64_XMM CTR TCTR_LOW
11141+ pax_force_retaddr_bts
11142 ret
11143
11144 /*
11145@@ -2554,6 +2582,7 @@ _aesni_inc:
11146 .Linc_low:
11147 movaps CTR, IV
11148 PSHUFB_XMM BSWAP_MASK IV
11149+ pax_force_retaddr_bts
11150 ret
11151
11152 /*
11153@@ -2614,5 +2643,7 @@ ENTRY(aesni_ctr_enc)
11154 .Lctr_enc_ret:
11155 movups IV, (IVP)
11156 .Lctr_enc_just_ret:
11157+ pax_force_retaddr 0, 1
11158 ret
11159+ENDPROC(aesni_ctr_enc)
11160 #endif
11161diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
11162index 391d245..67f35c2 100644
11163--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
11164+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
11165@@ -20,6 +20,8 @@
11166 *
11167 */
11168
11169+#include <asm/alternative-asm.h>
11170+
11171 .file "blowfish-x86_64-asm.S"
11172 .text
11173
11174@@ -151,9 +153,11 @@ __blowfish_enc_blk:
11175 jnz __enc_xor;
11176
11177 write_block();
11178+ pax_force_retaddr 0, 1
11179 ret;
11180 __enc_xor:
11181 xor_block();
11182+ pax_force_retaddr 0, 1
11183 ret;
11184
11185 .align 8
11186@@ -188,6 +192,7 @@ blowfish_dec_blk:
11187
11188 movq %r11, %rbp;
11189
11190+ pax_force_retaddr 0, 1
11191 ret;
11192
11193 /**********************************************************************
11194@@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
11195
11196 popq %rbx;
11197 popq %rbp;
11198+ pax_force_retaddr 0, 1
11199 ret;
11200
11201 __enc_xor4:
11202@@ -349,6 +355,7 @@ __enc_xor4:
11203
11204 popq %rbx;
11205 popq %rbp;
11206+ pax_force_retaddr 0, 1
11207 ret;
11208
11209 .align 8
11210@@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
11211 popq %rbx;
11212 popq %rbp;
11213
11214+ pax_force_retaddr 0, 1
11215 ret;
11216
11217diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
11218index 0b33743..7a56206 100644
11219--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
11220+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
11221@@ -20,6 +20,8 @@
11222 *
11223 */
11224
11225+#include <asm/alternative-asm.h>
11226+
11227 .file "camellia-x86_64-asm_64.S"
11228 .text
11229
11230@@ -229,12 +231,14 @@ __enc_done:
11231 enc_outunpack(mov, RT1);
11232
11233 movq RRBP, %rbp;
11234+ pax_force_retaddr 0, 1
11235 ret;
11236
11237 __enc_xor:
11238 enc_outunpack(xor, RT1);
11239
11240 movq RRBP, %rbp;
11241+ pax_force_retaddr 0, 1
11242 ret;
11243
11244 .global camellia_dec_blk;
11245@@ -275,6 +279,7 @@ __dec_rounds16:
11246 dec_outunpack();
11247
11248 movq RRBP, %rbp;
11249+ pax_force_retaddr 0, 1
11250 ret;
11251
11252 /**********************************************************************
11253@@ -468,6 +473,7 @@ __enc2_done:
11254
11255 movq RRBP, %rbp;
11256 popq %rbx;
11257+ pax_force_retaddr 0, 1
11258 ret;
11259
11260 __enc2_xor:
11261@@ -475,6 +481,7 @@ __enc2_xor:
11262
11263 movq RRBP, %rbp;
11264 popq %rbx;
11265+ pax_force_retaddr 0, 1
11266 ret;
11267
11268 .global camellia_dec_blk_2way;
11269@@ -517,4 +524,5 @@ __dec2_rounds16:
11270
11271 movq RRBP, %rbp;
11272 movq RXOR, %rbx;
11273+ pax_force_retaddr 0, 1
11274 ret;
11275diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
11276index 15b00ac..2071784 100644
11277--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
11278+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
11279@@ -23,6 +23,8 @@
11280 *
11281 */
11282
11283+#include <asm/alternative-asm.h>
11284+
11285 .file "cast5-avx-x86_64-asm_64.S"
11286
11287 .extern cast_s1
11288@@ -281,6 +283,7 @@ __skip_enc:
11289 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
11290 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
11291
11292+ pax_force_retaddr 0, 1
11293 ret;
11294
11295 .align 16
11296@@ -353,6 +356,7 @@ __dec_tail:
11297 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
11298 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
11299
11300+ pax_force_retaddr 0, 1
11301 ret;
11302
11303 __skip_dec:
11304@@ -392,6 +396,7 @@ cast5_ecb_enc_16way:
11305 vmovdqu RR4, (6*4*4)(%r11);
11306 vmovdqu RL4, (7*4*4)(%r11);
11307
11308+ pax_force_retaddr
11309 ret;
11310
11311 .align 16
11312@@ -427,6 +432,7 @@ cast5_ecb_dec_16way:
11313 vmovdqu RR4, (6*4*4)(%r11);
11314 vmovdqu RL4, (7*4*4)(%r11);
11315
11316+ pax_force_retaddr
11317 ret;
11318
11319 .align 16
11320@@ -479,6 +485,7 @@ cast5_cbc_dec_16way:
11321
11322 popq %r12;
11323
11324+ pax_force_retaddr
11325 ret;
11326
11327 .align 16
11328@@ -555,4 +562,5 @@ cast5_ctr_16way:
11329
11330 popq %r12;
11331
11332+ pax_force_retaddr
11333 ret;
11334diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
11335index 2569d0d..637c289 100644
11336--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
11337+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
11338@@ -23,6 +23,8 @@
11339 *
11340 */
11341
11342+#include <asm/alternative-asm.h>
11343+
11344 #include "glue_helper-asm-avx.S"
11345
11346 .file "cast6-avx-x86_64-asm_64.S"
11347@@ -294,6 +296,7 @@ __cast6_enc_blk8:
11348 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
11349 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
11350
11351+ pax_force_retaddr 0, 1
11352 ret;
11353
11354 .align 8
11355@@ -340,6 +343,7 @@ __cast6_dec_blk8:
11356 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
11357 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
11358
11359+ pax_force_retaddr 0, 1
11360 ret;
11361
11362 .align 8
11363@@ -361,6 +365,7 @@ cast6_ecb_enc_8way:
11364
11365 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11366
11367+ pax_force_retaddr
11368 ret;
11369
11370 .align 8
11371@@ -382,6 +387,7 @@ cast6_ecb_dec_8way:
11372
11373 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11374
11375+ pax_force_retaddr
11376 ret;
11377
11378 .align 8
11379@@ -408,6 +414,7 @@ cast6_cbc_dec_8way:
11380
11381 popq %r12;
11382
11383+ pax_force_retaddr
11384 ret;
11385
11386 .align 8
11387@@ -436,4 +443,5 @@ cast6_ctr_8way:
11388
11389 popq %r12;
11390
11391+ pax_force_retaddr
11392 ret;
11393diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
11394index 6214a9b..1f4fc9a 100644
11395--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
11396+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
11397@@ -1,3 +1,5 @@
11398+#include <asm/alternative-asm.h>
11399+
11400 # enter ECRYPT_encrypt_bytes
11401 .text
11402 .p2align 5
11403@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
11404 add %r11,%rsp
11405 mov %rdi,%rax
11406 mov %rsi,%rdx
11407+ pax_force_retaddr 0, 1
11408 ret
11409 # bytesatleast65:
11410 ._bytesatleast65:
11411@@ -891,6 +894,7 @@ ECRYPT_keysetup:
11412 add %r11,%rsp
11413 mov %rdi,%rax
11414 mov %rsi,%rdx
11415+ pax_force_retaddr
11416 ret
11417 # enter ECRYPT_ivsetup
11418 .text
11419@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
11420 add %r11,%rsp
11421 mov %rdi,%rax
11422 mov %rsi,%rdx
11423+ pax_force_retaddr
11424 ret
11425diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
11426index 02b0e9f..cf4cf5c 100644
11427--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
11428+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
11429@@ -24,6 +24,8 @@
11430 *
11431 */
11432
11433+#include <asm/alternative-asm.h>
11434+
11435 #include "glue_helper-asm-avx.S"
11436
11437 .file "serpent-avx-x86_64-asm_64.S"
11438@@ -618,6 +620,7 @@ __serpent_enc_blk8_avx:
11439 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
11440 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
11441
11442+ pax_force_retaddr
11443 ret;
11444
11445 .align 8
11446@@ -673,6 +676,7 @@ __serpent_dec_blk8_avx:
11447 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
11448 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
11449
11450+ pax_force_retaddr
11451 ret;
11452
11453 .align 8
11454@@ -692,6 +696,7 @@ serpent_ecb_enc_8way_avx:
11455
11456 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11457
11458+ pax_force_retaddr
11459 ret;
11460
11461 .align 8
11462@@ -711,6 +716,7 @@ serpent_ecb_dec_8way_avx:
11463
11464 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
11465
11466+ pax_force_retaddr
11467 ret;
11468
11469 .align 8
11470@@ -730,6 +736,7 @@ serpent_cbc_dec_8way_avx:
11471
11472 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
11473
11474+ pax_force_retaddr
11475 ret;
11476
11477 .align 8
11478@@ -751,4 +758,5 @@ serpent_ctr_8way_avx:
11479
11480 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11481
11482+ pax_force_retaddr
11483 ret;
11484diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
11485index 3ee1ff0..cbc568b 100644
11486--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
11487+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
11488@@ -24,6 +24,8 @@
11489 *
11490 */
11491
11492+#include <asm/alternative-asm.h>
11493+
11494 .file "serpent-sse2-x86_64-asm_64.S"
11495 .text
11496
11497@@ -692,12 +694,14 @@ __serpent_enc_blk_8way:
11498 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
11499 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
11500
11501+ pax_force_retaddr
11502 ret;
11503
11504 __enc_xor8:
11505 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
11506 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
11507
11508+ pax_force_retaddr
11509 ret;
11510
11511 .align 8
11512@@ -755,4 +759,5 @@ serpent_dec_blk_8way:
11513 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
11514 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
11515
11516+ pax_force_retaddr
11517 ret;
11518diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
11519index 49d6987..df66bd4 100644
11520--- a/arch/x86/crypto/sha1_ssse3_asm.S
11521+++ b/arch/x86/crypto/sha1_ssse3_asm.S
11522@@ -28,6 +28,8 @@
11523 * (at your option) any later version.
11524 */
11525
11526+#include <asm/alternative-asm.h>
11527+
11528 #define CTX %rdi // arg1
11529 #define BUF %rsi // arg2
11530 #define CNT %rdx // arg3
11531@@ -104,6 +106,7 @@
11532 pop %r12
11533 pop %rbp
11534 pop %rbx
11535+ pax_force_retaddr 0, 1
11536 ret
11537
11538 .size \name, .-\name
11539diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
11540index ebac16b..8092eb9 100644
11541--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
11542+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
11543@@ -23,6 +23,8 @@
11544 *
11545 */
11546
11547+#include <asm/alternative-asm.h>
11548+
11549 #include "glue_helper-asm-avx.S"
11550
11551 .file "twofish-avx-x86_64-asm_64.S"
11552@@ -283,6 +285,7 @@ __twofish_enc_blk8:
11553 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
11554 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
11555
11556+ pax_force_retaddr 0, 1
11557 ret;
11558
11559 .align 8
11560@@ -324,6 +327,7 @@ __twofish_dec_blk8:
11561 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
11562 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
11563
11564+ pax_force_retaddr 0, 1
11565 ret;
11566
11567 .align 8
11568@@ -345,6 +349,7 @@ twofish_ecb_enc_8way:
11569
11570 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
11571
11572+ pax_force_retaddr 0, 1
11573 ret;
11574
11575 .align 8
11576@@ -366,6 +371,7 @@ twofish_ecb_dec_8way:
11577
11578 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11579
11580+ pax_force_retaddr 0, 1
11581 ret;
11582
11583 .align 8
11584@@ -392,6 +398,7 @@ twofish_cbc_dec_8way:
11585
11586 popq %r12;
11587
11588+ pax_force_retaddr 0, 1
11589 ret;
11590
11591 .align 8
11592@@ -420,4 +427,5 @@ twofish_ctr_8way:
11593
11594 popq %r12;
11595
11596+ pax_force_retaddr 0, 1
11597 ret;
11598diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
11599index 5b012a2..36d5364 100644
11600--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
11601+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
11602@@ -20,6 +20,8 @@
11603 *
11604 */
11605
11606+#include <asm/alternative-asm.h>
11607+
11608 .file "twofish-x86_64-asm-3way.S"
11609 .text
11610
11611@@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
11612 popq %r13;
11613 popq %r14;
11614 popq %r15;
11615+ pax_force_retaddr 0, 1
11616 ret;
11617
11618 __enc_xor3:
11619@@ -271,6 +274,7 @@ __enc_xor3:
11620 popq %r13;
11621 popq %r14;
11622 popq %r15;
11623+ pax_force_retaddr 0, 1
11624 ret;
11625
11626 .global twofish_dec_blk_3way
11627@@ -312,5 +316,6 @@ twofish_dec_blk_3way:
11628 popq %r13;
11629 popq %r14;
11630 popq %r15;
11631+ pax_force_retaddr 0, 1
11632 ret;
11633
11634diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
11635index 7bcf3fc..f53832f 100644
11636--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
11637+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
11638@@ -21,6 +21,7 @@
11639 .text
11640
11641 #include <asm/asm-offsets.h>
11642+#include <asm/alternative-asm.h>
11643
11644 #define a_offset 0
11645 #define b_offset 4
11646@@ -268,6 +269,7 @@ twofish_enc_blk:
11647
11648 popq R1
11649 movq $1,%rax
11650+ pax_force_retaddr 0, 1
11651 ret
11652
11653 twofish_dec_blk:
11654@@ -319,4 +321,5 @@ twofish_dec_blk:
11655
11656 popq R1
11657 movq $1,%rax
11658+ pax_force_retaddr 0, 1
11659 ret
11660diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
11661index a703af1..f5b9c36 100644
11662--- a/arch/x86/ia32/ia32_aout.c
11663+++ b/arch/x86/ia32/ia32_aout.c
11664@@ -159,6 +159,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
11665 unsigned long dump_start, dump_size;
11666 struct user32 dump;
11667
11668+ memset(&dump, 0, sizeof(dump));
11669+
11670 fs = get_fs();
11671 set_fs(KERNEL_DS);
11672 has_dumped = 1;
11673diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
11674index a1daf4a..f8c4537 100644
11675--- a/arch/x86/ia32/ia32_signal.c
11676+++ b/arch/x86/ia32/ia32_signal.c
11677@@ -348,7 +348,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
11678 sp -= frame_size;
11679 /* Align the stack pointer according to the i386 ABI,
11680 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
11681- sp = ((sp + 4) & -16ul) - 4;
11682+ sp = ((sp - 12) & -16ul) - 4;
11683 return (void __user *) sp;
11684 }
11685
11686@@ -406,7 +406,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
11687 * These are actually not used anymore, but left because some
11688 * gdb versions depend on them as a marker.
11689 */
11690- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
11691+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
11692 } put_user_catch(err);
11693
11694 if (err)
11695@@ -448,7 +448,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
11696 0xb8,
11697 __NR_ia32_rt_sigreturn,
11698 0x80cd,
11699- 0,
11700+ 0
11701 };
11702
11703 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
11704@@ -471,16 +471,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
11705
11706 if (ka->sa.sa_flags & SA_RESTORER)
11707 restorer = ka->sa.sa_restorer;
11708+ else if (current->mm->context.vdso)
11709+ /* Return stub is in 32bit vsyscall page */
11710+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
11711 else
11712- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
11713- rt_sigreturn);
11714+ restorer = &frame->retcode;
11715 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
11716
11717 /*
11718 * Not actually used anymore, but left because some gdb
11719 * versions need it.
11720 */
11721- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
11722+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
11723 } put_user_catch(err);
11724
11725 err |= copy_siginfo_to_user32(&frame->info, info);
11726diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
11727index 142c4ce..19b683f 100644
11728--- a/arch/x86/ia32/ia32entry.S
11729+++ b/arch/x86/ia32/ia32entry.S
11730@@ -15,8 +15,10 @@
11731 #include <asm/irqflags.h>
11732 #include <asm/asm.h>
11733 #include <asm/smap.h>
11734+#include <asm/pgtable.h>
11735 #include <linux/linkage.h>
11736 #include <linux/err.h>
11737+#include <asm/alternative-asm.h>
11738
11739 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
11740 #include <linux/elf-em.h>
11741@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
11742 ENDPROC(native_irq_enable_sysexit)
11743 #endif
11744
11745+ .macro pax_enter_kernel_user
11746+ pax_set_fptr_mask
11747+#ifdef CONFIG_PAX_MEMORY_UDEREF
11748+ call pax_enter_kernel_user
11749+#endif
11750+ .endm
11751+
11752+ .macro pax_exit_kernel_user
11753+#ifdef CONFIG_PAX_MEMORY_UDEREF
11754+ call pax_exit_kernel_user
11755+#endif
11756+#ifdef CONFIG_PAX_RANDKSTACK
11757+ pushq %rax
11758+ pushq %r11
11759+ call pax_randomize_kstack
11760+ popq %r11
11761+ popq %rax
11762+#endif
11763+ .endm
11764+
11765+.macro pax_erase_kstack
11766+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11767+ call pax_erase_kstack
11768+#endif
11769+.endm
11770+
11771 /*
11772 * 32bit SYSENTER instruction entry.
11773 *
11774@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
11775 CFI_REGISTER rsp,rbp
11776 SWAPGS_UNSAFE_STACK
11777 movq PER_CPU_VAR(kernel_stack), %rsp
11778- addq $(KERNEL_STACK_OFFSET),%rsp
11779- /*
11780- * No need to follow this irqs on/off section: the syscall
11781- * disabled irqs, here we enable it straight after entry:
11782- */
11783- ENABLE_INTERRUPTS(CLBR_NONE)
11784 movl %ebp,%ebp /* zero extension */
11785 pushq_cfi $__USER32_DS
11786 /*CFI_REL_OFFSET ss,0*/
11787@@ -135,24 +157,44 @@ ENTRY(ia32_sysenter_target)
11788 CFI_REL_OFFSET rsp,0
11789 pushfq_cfi
11790 /*CFI_REL_OFFSET rflags,0*/
11791- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
11792- CFI_REGISTER rip,r10
11793+ orl $X86_EFLAGS_IF,(%rsp)
11794+ GET_THREAD_INFO(%r11)
11795+ movl TI_sysenter_return(%r11), %r11d
11796+ CFI_REGISTER rip,r11
11797 pushq_cfi $__USER32_CS
11798 /*CFI_REL_OFFSET cs,0*/
11799 movl %eax, %eax
11800- pushq_cfi %r10
11801+ pushq_cfi %r11
11802 CFI_REL_OFFSET rip,0
11803 pushq_cfi %rax
11804 cld
11805 SAVE_ARGS 0,1,0
11806+ pax_enter_kernel_user
11807+
11808+#ifdef CONFIG_PAX_RANDKSTACK
11809+ pax_erase_kstack
11810+#endif
11811+
11812+ /*
11813+ * No need to follow this irqs on/off section: the syscall
11814+ * disabled irqs, here we enable it straight after entry:
11815+ */
11816+ ENABLE_INTERRUPTS(CLBR_NONE)
11817 /* no need to do an access_ok check here because rbp has been
11818 32bit zero extended */
11819+
11820+#ifdef CONFIG_PAX_MEMORY_UDEREF
11821+ mov $PAX_USER_SHADOW_BASE,%r11
11822+ add %r11,%rbp
11823+#endif
11824+
11825 ASM_STAC
11826 1: movl (%rbp),%ebp
11827 _ASM_EXTABLE(1b,ia32_badarg)
11828 ASM_CLAC
11829- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11830- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11831+ GET_THREAD_INFO(%r11)
11832+ orl $TS_COMPAT,TI_status(%r11)
11833+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
11834 CFI_REMEMBER_STATE
11835 jnz sysenter_tracesys
11836 cmpq $(IA32_NR_syscalls-1),%rax
11837@@ -162,12 +204,15 @@ sysenter_do_call:
11838 sysenter_dispatch:
11839 call *ia32_sys_call_table(,%rax,8)
11840 movq %rax,RAX-ARGOFFSET(%rsp)
11841+ GET_THREAD_INFO(%r11)
11842 DISABLE_INTERRUPTS(CLBR_NONE)
11843 TRACE_IRQS_OFF
11844- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11845+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
11846 jnz sysexit_audit
11847 sysexit_from_sys_call:
11848- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11849+ pax_exit_kernel_user
11850+ pax_erase_kstack
11851+ andl $~TS_COMPAT,TI_status(%r11)
11852 /* clear IF, that popfq doesn't enable interrupts early */
11853 andl $~0x200,EFLAGS-R11(%rsp)
11854 movl RIP-R11(%rsp),%edx /* User %eip */
11855@@ -193,6 +238,9 @@ sysexit_from_sys_call:
11856 movl %eax,%esi /* 2nd arg: syscall number */
11857 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
11858 call __audit_syscall_entry
11859+
11860+ pax_erase_kstack
11861+
11862 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
11863 cmpq $(IA32_NR_syscalls-1),%rax
11864 ja ia32_badsys
11865@@ -204,7 +252,7 @@ sysexit_from_sys_call:
11866 .endm
11867
11868 .macro auditsys_exit exit
11869- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11870+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
11871 jnz ia32_ret_from_sys_call
11872 TRACE_IRQS_ON
11873 ENABLE_INTERRUPTS(CLBR_NONE)
11874@@ -215,11 +263,12 @@ sysexit_from_sys_call:
11875 1: setbe %al /* 1 if error, 0 if not */
11876 movzbl %al,%edi /* zero-extend that into %edi */
11877 call __audit_syscall_exit
11878+ GET_THREAD_INFO(%r11)
11879 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
11880 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
11881 DISABLE_INTERRUPTS(CLBR_NONE)
11882 TRACE_IRQS_OFF
11883- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11884+ testl %edi,TI_flags(%r11)
11885 jz \exit
11886 CLEAR_RREGS -ARGOFFSET
11887 jmp int_with_check
11888@@ -237,7 +286,7 @@ sysexit_audit:
11889
11890 sysenter_tracesys:
11891 #ifdef CONFIG_AUDITSYSCALL
11892- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11893+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
11894 jz sysenter_auditsys
11895 #endif
11896 SAVE_REST
11897@@ -249,6 +298,9 @@ sysenter_tracesys:
11898 RESTORE_REST
11899 cmpq $(IA32_NR_syscalls-1),%rax
11900 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
11901+
11902+ pax_erase_kstack
11903+
11904 jmp sysenter_do_call
11905 CFI_ENDPROC
11906 ENDPROC(ia32_sysenter_target)
11907@@ -276,19 +328,25 @@ ENDPROC(ia32_sysenter_target)
11908 ENTRY(ia32_cstar_target)
11909 CFI_STARTPROC32 simple
11910 CFI_SIGNAL_FRAME
11911- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
11912+ CFI_DEF_CFA rsp,0
11913 CFI_REGISTER rip,rcx
11914 /*CFI_REGISTER rflags,r11*/
11915 SWAPGS_UNSAFE_STACK
11916 movl %esp,%r8d
11917 CFI_REGISTER rsp,r8
11918 movq PER_CPU_VAR(kernel_stack),%rsp
11919+ SAVE_ARGS 8*6,0,0
11920+ pax_enter_kernel_user
11921+
11922+#ifdef CONFIG_PAX_RANDKSTACK
11923+ pax_erase_kstack
11924+#endif
11925+
11926 /*
11927 * No need to follow this irqs on/off section: the syscall
11928 * disabled irqs and here we enable it straight after entry:
11929 */
11930 ENABLE_INTERRUPTS(CLBR_NONE)
11931- SAVE_ARGS 8,0,0
11932 movl %eax,%eax /* zero extension */
11933 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
11934 movq %rcx,RIP-ARGOFFSET(%rsp)
11935@@ -304,12 +362,19 @@ ENTRY(ia32_cstar_target)
11936 /* no need to do an access_ok check here because r8 has been
11937 32bit zero extended */
11938 /* hardware stack frame is complete now */
11939+
11940+#ifdef CONFIG_PAX_MEMORY_UDEREF
11941+ mov $PAX_USER_SHADOW_BASE,%r11
11942+ add %r11,%r8
11943+#endif
11944+
11945 ASM_STAC
11946 1: movl (%r8),%r9d
11947 _ASM_EXTABLE(1b,ia32_badarg)
11948 ASM_CLAC
11949- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11950- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11951+ GET_THREAD_INFO(%r11)
11952+ orl $TS_COMPAT,TI_status(%r11)
11953+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
11954 CFI_REMEMBER_STATE
11955 jnz cstar_tracesys
11956 cmpq $IA32_NR_syscalls-1,%rax
11957@@ -319,12 +384,15 @@ cstar_do_call:
11958 cstar_dispatch:
11959 call *ia32_sys_call_table(,%rax,8)
11960 movq %rax,RAX-ARGOFFSET(%rsp)
11961+ GET_THREAD_INFO(%r11)
11962 DISABLE_INTERRUPTS(CLBR_NONE)
11963 TRACE_IRQS_OFF
11964- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11965+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
11966 jnz sysretl_audit
11967 sysretl_from_sys_call:
11968- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11969+ pax_exit_kernel_user
11970+ pax_erase_kstack
11971+ andl $~TS_COMPAT,TI_status(%r11)
11972 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
11973 movl RIP-ARGOFFSET(%rsp),%ecx
11974 CFI_REGISTER rip,rcx
11975@@ -352,7 +420,7 @@ sysretl_audit:
11976
11977 cstar_tracesys:
11978 #ifdef CONFIG_AUDITSYSCALL
11979- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11980+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
11981 jz cstar_auditsys
11982 #endif
11983 xchgl %r9d,%ebp
11984@@ -366,6 +434,9 @@ cstar_tracesys:
11985 xchgl %ebp,%r9d
11986 cmpq $(IA32_NR_syscalls-1),%rax
11987 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
11988+
11989+ pax_erase_kstack
11990+
11991 jmp cstar_do_call
11992 END(ia32_cstar_target)
11993
11994@@ -407,19 +478,26 @@ ENTRY(ia32_syscall)
11995 CFI_REL_OFFSET rip,RIP-RIP
11996 PARAVIRT_ADJUST_EXCEPTION_FRAME
11997 SWAPGS
11998- /*
11999- * No need to follow this irqs on/off section: the syscall
12000- * disabled irqs and here we enable it straight after entry:
12001- */
12002- ENABLE_INTERRUPTS(CLBR_NONE)
12003 movl %eax,%eax
12004 pushq_cfi %rax
12005 cld
12006 /* note the registers are not zero extended to the sf.
12007 this could be a problem. */
12008 SAVE_ARGS 0,1,0
12009- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12010- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
12011+ pax_enter_kernel_user
12012+
12013+#ifdef CONFIG_PAX_RANDKSTACK
12014+ pax_erase_kstack
12015+#endif
12016+
12017+ /*
12018+ * No need to follow this irqs on/off section: the syscall
12019+ * disabled irqs and here we enable it straight after entry:
12020+ */
12021+ ENABLE_INTERRUPTS(CLBR_NONE)
12022+ GET_THREAD_INFO(%r11)
12023+ orl $TS_COMPAT,TI_status(%r11)
12024+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
12025 jnz ia32_tracesys
12026 cmpq $(IA32_NR_syscalls-1),%rax
12027 ja ia32_badsys
12028@@ -442,6 +520,9 @@ ia32_tracesys:
12029 RESTORE_REST
12030 cmpq $(IA32_NR_syscalls-1),%rax
12031 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
12032+
12033+ pax_erase_kstack
12034+
12035 jmp ia32_do_call
12036 END(ia32_syscall)
12037
12038diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
12039index d0b689b..6811ddc 100644
12040--- a/arch/x86/ia32/sys_ia32.c
12041+++ b/arch/x86/ia32/sys_ia32.c
12042@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
12043 */
12044 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
12045 {
12046- typeof(ubuf->st_uid) uid = 0;
12047- typeof(ubuf->st_gid) gid = 0;
12048+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
12049+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
12050 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
12051 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
12052 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
12053@@ -303,7 +303,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
12054 mm_segment_t old_fs = get_fs();
12055
12056 set_fs(KERNEL_DS);
12057- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
12058+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
12059 set_fs(old_fs);
12060 if (put_compat_timespec(&t, interval))
12061 return -EFAULT;
12062@@ -313,13 +313,13 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
12063 asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
12064 compat_size_t sigsetsize)
12065 {
12066- sigset_t s;
12067+ sigset_t s = { };
12068 compat_sigset_t s32;
12069 int ret;
12070 mm_segment_t old_fs = get_fs();
12071
12072 set_fs(KERNEL_DS);
12073- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
12074+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
12075 set_fs(old_fs);
12076 if (!ret) {
12077 switch (_NSIG_WORDS) {
12078@@ -344,7 +344,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
12079 if (copy_siginfo_from_user32(&info, uinfo))
12080 return -EFAULT;
12081 set_fs(KERNEL_DS);
12082- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
12083+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
12084 set_fs(old_fs);
12085 return ret;
12086 }
12087@@ -376,7 +376,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
12088 return -EFAULT;
12089
12090 set_fs(KERNEL_DS);
12091- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
12092+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
12093 count);
12094 set_fs(old_fs);
12095
12096diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
12097index 372231c..a5aa1a1 100644
12098--- a/arch/x86/include/asm/alternative-asm.h
12099+++ b/arch/x86/include/asm/alternative-asm.h
12100@@ -18,6 +18,45 @@
12101 .endm
12102 #endif
12103
12104+#ifdef KERNEXEC_PLUGIN
12105+ .macro pax_force_retaddr_bts rip=0
12106+ btsq $63,\rip(%rsp)
12107+ .endm
12108+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
12109+ .macro pax_force_retaddr rip=0, reload=0
12110+ btsq $63,\rip(%rsp)
12111+ .endm
12112+ .macro pax_force_fptr ptr
12113+ btsq $63,\ptr
12114+ .endm
12115+ .macro pax_set_fptr_mask
12116+ .endm
12117+#endif
12118+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
12119+ .macro pax_force_retaddr rip=0, reload=0
12120+ .if \reload
12121+ pax_set_fptr_mask
12122+ .endif
12123+ orq %r10,\rip(%rsp)
12124+ .endm
12125+ .macro pax_force_fptr ptr
12126+ orq %r10,\ptr
12127+ .endm
12128+ .macro pax_set_fptr_mask
12129+ movabs $0x8000000000000000,%r10
12130+ .endm
12131+#endif
12132+#else
12133+ .macro pax_force_retaddr rip=0, reload=0
12134+ .endm
12135+ .macro pax_force_fptr ptr
12136+ .endm
12137+ .macro pax_force_retaddr_bts rip=0
12138+ .endm
12139+ .macro pax_set_fptr_mask
12140+ .endm
12141+#endif
12142+
12143 .macro altinstruction_entry orig alt feature orig_len alt_len
12144 .long \orig - .
12145 .long \alt - .
12146diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
12147index 58ed6d9..f1cbe58 100644
12148--- a/arch/x86/include/asm/alternative.h
12149+++ b/arch/x86/include/asm/alternative.h
12150@@ -105,7 +105,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
12151 ".pushsection .discard,\"aw\",@progbits\n" \
12152 DISCARD_ENTRY(1) \
12153 ".popsection\n" \
12154- ".pushsection .altinstr_replacement, \"ax\"\n" \
12155+ ".pushsection .altinstr_replacement, \"a\"\n" \
12156 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
12157 ".popsection"
12158
12159@@ -119,7 +119,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
12160 DISCARD_ENTRY(1) \
12161 DISCARD_ENTRY(2) \
12162 ".popsection\n" \
12163- ".pushsection .altinstr_replacement, \"ax\"\n" \
12164+ ".pushsection .altinstr_replacement, \"a\"\n" \
12165 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
12166 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
12167 ".popsection"
12168diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
12169index 3388034..050f0b9 100644
12170--- a/arch/x86/include/asm/apic.h
12171+++ b/arch/x86/include/asm/apic.h
12172@@ -44,7 +44,7 @@ static inline void generic_apic_probe(void)
12173
12174 #ifdef CONFIG_X86_LOCAL_APIC
12175
12176-extern unsigned int apic_verbosity;
12177+extern int apic_verbosity;
12178 extern int local_apic_timer_c2_ok;
12179
12180 extern int disable_apic;
12181diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
12182index 20370c6..a2eb9b0 100644
12183--- a/arch/x86/include/asm/apm.h
12184+++ b/arch/x86/include/asm/apm.h
12185@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
12186 __asm__ __volatile__(APM_DO_ZERO_SEGS
12187 "pushl %%edi\n\t"
12188 "pushl %%ebp\n\t"
12189- "lcall *%%cs:apm_bios_entry\n\t"
12190+ "lcall *%%ss:apm_bios_entry\n\t"
12191 "setc %%al\n\t"
12192 "popl %%ebp\n\t"
12193 "popl %%edi\n\t"
12194@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
12195 __asm__ __volatile__(APM_DO_ZERO_SEGS
12196 "pushl %%edi\n\t"
12197 "pushl %%ebp\n\t"
12198- "lcall *%%cs:apm_bios_entry\n\t"
12199+ "lcall *%%ss:apm_bios_entry\n\t"
12200 "setc %%bl\n\t"
12201 "popl %%ebp\n\t"
12202 "popl %%edi\n\t"
12203diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
12204index 722aa3b..3a0bb27 100644
12205--- a/arch/x86/include/asm/atomic.h
12206+++ b/arch/x86/include/asm/atomic.h
12207@@ -22,7 +22,18 @@
12208 */
12209 static inline int atomic_read(const atomic_t *v)
12210 {
12211- return (*(volatile int *)&(v)->counter);
12212+ return (*(volatile const int *)&(v)->counter);
12213+}
12214+
12215+/**
12216+ * atomic_read_unchecked - read atomic variable
12217+ * @v: pointer of type atomic_unchecked_t
12218+ *
12219+ * Atomically reads the value of @v.
12220+ */
12221+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
12222+{
12223+ return (*(volatile const int *)&(v)->counter);
12224 }
12225
12226 /**
12227@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
12228 }
12229
12230 /**
12231+ * atomic_set_unchecked - set atomic variable
12232+ * @v: pointer of type atomic_unchecked_t
12233+ * @i: required value
12234+ *
12235+ * Atomically sets the value of @v to @i.
12236+ */
12237+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
12238+{
12239+ v->counter = i;
12240+}
12241+
12242+/**
12243 * atomic_add - add integer to atomic variable
12244 * @i: integer value to add
12245 * @v: pointer of type atomic_t
12246@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
12247 */
12248 static inline void atomic_add(int i, atomic_t *v)
12249 {
12250- asm volatile(LOCK_PREFIX "addl %1,%0"
12251+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
12252+
12253+#ifdef CONFIG_PAX_REFCOUNT
12254+ "jno 0f\n"
12255+ LOCK_PREFIX "subl %1,%0\n"
12256+ "int $4\n0:\n"
12257+ _ASM_EXTABLE(0b, 0b)
12258+#endif
12259+
12260+ : "+m" (v->counter)
12261+ : "ir" (i));
12262+}
12263+
12264+/**
12265+ * atomic_add_unchecked - add integer to atomic variable
12266+ * @i: integer value to add
12267+ * @v: pointer of type atomic_unchecked_t
12268+ *
12269+ * Atomically adds @i to @v.
12270+ */
12271+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
12272+{
12273+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
12274 : "+m" (v->counter)
12275 : "ir" (i));
12276 }
12277@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
12278 */
12279 static inline void atomic_sub(int i, atomic_t *v)
12280 {
12281- asm volatile(LOCK_PREFIX "subl %1,%0"
12282+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
12283+
12284+#ifdef CONFIG_PAX_REFCOUNT
12285+ "jno 0f\n"
12286+ LOCK_PREFIX "addl %1,%0\n"
12287+ "int $4\n0:\n"
12288+ _ASM_EXTABLE(0b, 0b)
12289+#endif
12290+
12291+ : "+m" (v->counter)
12292+ : "ir" (i));
12293+}
12294+
12295+/**
12296+ * atomic_sub_unchecked - subtract integer from atomic variable
12297+ * @i: integer value to subtract
12298+ * @v: pointer of type atomic_unchecked_t
12299+ *
12300+ * Atomically subtracts @i from @v.
12301+ */
12302+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
12303+{
12304+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
12305 : "+m" (v->counter)
12306 : "ir" (i));
12307 }
12308@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
12309 {
12310 unsigned char c;
12311
12312- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
12313+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
12314+
12315+#ifdef CONFIG_PAX_REFCOUNT
12316+ "jno 0f\n"
12317+ LOCK_PREFIX "addl %2,%0\n"
12318+ "int $4\n0:\n"
12319+ _ASM_EXTABLE(0b, 0b)
12320+#endif
12321+
12322+ "sete %1\n"
12323 : "+m" (v->counter), "=qm" (c)
12324 : "ir" (i) : "memory");
12325 return c;
12326@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
12327 */
12328 static inline void atomic_inc(atomic_t *v)
12329 {
12330- asm volatile(LOCK_PREFIX "incl %0"
12331+ asm volatile(LOCK_PREFIX "incl %0\n"
12332+
12333+#ifdef CONFIG_PAX_REFCOUNT
12334+ "jno 0f\n"
12335+ LOCK_PREFIX "decl %0\n"
12336+ "int $4\n0:\n"
12337+ _ASM_EXTABLE(0b, 0b)
12338+#endif
12339+
12340+ : "+m" (v->counter));
12341+}
12342+
12343+/**
12344+ * atomic_inc_unchecked - increment atomic variable
12345+ * @v: pointer of type atomic_unchecked_t
12346+ *
12347+ * Atomically increments @v by 1.
12348+ */
12349+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
12350+{
12351+ asm volatile(LOCK_PREFIX "incl %0\n"
12352 : "+m" (v->counter));
12353 }
12354
12355@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
12356 */
12357 static inline void atomic_dec(atomic_t *v)
12358 {
12359- asm volatile(LOCK_PREFIX "decl %0"
12360+ asm volatile(LOCK_PREFIX "decl %0\n"
12361+
12362+#ifdef CONFIG_PAX_REFCOUNT
12363+ "jno 0f\n"
12364+ LOCK_PREFIX "incl %0\n"
12365+ "int $4\n0:\n"
12366+ _ASM_EXTABLE(0b, 0b)
12367+#endif
12368+
12369+ : "+m" (v->counter));
12370+}
12371+
12372+/**
12373+ * atomic_dec_unchecked - decrement atomic variable
12374+ * @v: pointer of type atomic_unchecked_t
12375+ *
12376+ * Atomically decrements @v by 1.
12377+ */
12378+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
12379+{
12380+ asm volatile(LOCK_PREFIX "decl %0\n"
12381 : "+m" (v->counter));
12382 }
12383
12384@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
12385 {
12386 unsigned char c;
12387
12388- asm volatile(LOCK_PREFIX "decl %0; sete %1"
12389+ asm volatile(LOCK_PREFIX "decl %0\n"
12390+
12391+#ifdef CONFIG_PAX_REFCOUNT
12392+ "jno 0f\n"
12393+ LOCK_PREFIX "incl %0\n"
12394+ "int $4\n0:\n"
12395+ _ASM_EXTABLE(0b, 0b)
12396+#endif
12397+
12398+ "sete %1\n"
12399 : "+m" (v->counter), "=qm" (c)
12400 : : "memory");
12401 return c != 0;
12402@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
12403 {
12404 unsigned char c;
12405
12406- asm volatile(LOCK_PREFIX "incl %0; sete %1"
12407+ asm volatile(LOCK_PREFIX "incl %0\n"
12408+
12409+#ifdef CONFIG_PAX_REFCOUNT
12410+ "jno 0f\n"
12411+ LOCK_PREFIX "decl %0\n"
12412+ "int $4\n0:\n"
12413+ _ASM_EXTABLE(0b, 0b)
12414+#endif
12415+
12416+ "sete %1\n"
12417+ : "+m" (v->counter), "=qm" (c)
12418+ : : "memory");
12419+ return c != 0;
12420+}
12421+
12422+/**
12423+ * atomic_inc_and_test_unchecked - increment and test
12424+ * @v: pointer of type atomic_unchecked_t
12425+ *
12426+ * Atomically increments @v by 1
12427+ * and returns true if the result is zero, or false for all
12428+ * other cases.
12429+ */
12430+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
12431+{
12432+ unsigned char c;
12433+
12434+ asm volatile(LOCK_PREFIX "incl %0\n"
12435+ "sete %1\n"
12436 : "+m" (v->counter), "=qm" (c)
12437 : : "memory");
12438 return c != 0;
12439@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
12440 {
12441 unsigned char c;
12442
12443- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
12444+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
12445+
12446+#ifdef CONFIG_PAX_REFCOUNT
12447+ "jno 0f\n"
12448+ LOCK_PREFIX "subl %2,%0\n"
12449+ "int $4\n0:\n"
12450+ _ASM_EXTABLE(0b, 0b)
12451+#endif
12452+
12453+ "sets %1\n"
12454 : "+m" (v->counter), "=qm" (c)
12455 : "ir" (i) : "memory");
12456 return c;
12457@@ -172,6 +334,18 @@ static inline int atomic_add_negative(int i, atomic_t *v)
12458 */
12459 static inline int atomic_add_return(int i, atomic_t *v)
12460 {
12461+ return i + xadd_check_overflow(&v->counter, i);
12462+}
12463+
12464+/**
12465+ * atomic_add_return_unchecked - add integer and return
12466+ * @i: integer value to add
12467+ * @v: pointer of type atomic_unchecked_t
12468+ *
12469+ * Atomically adds @i to @v and returns @i + @v
12470+ */
12471+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
12472+{
12473 return i + xadd(&v->counter, i);
12474 }
12475
12476@@ -188,6 +362,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
12477 }
12478
12479 #define atomic_inc_return(v) (atomic_add_return(1, v))
12480+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
12481+{
12482+ return atomic_add_return_unchecked(1, v);
12483+}
12484 #define atomic_dec_return(v) (atomic_sub_return(1, v))
12485
12486 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
12487@@ -195,11 +373,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
12488 return cmpxchg(&v->counter, old, new);
12489 }
12490
12491+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
12492+{
12493+ return cmpxchg(&v->counter, old, new);
12494+}
12495+
12496 static inline int atomic_xchg(atomic_t *v, int new)
12497 {
12498 return xchg(&v->counter, new);
12499 }
12500
12501+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
12502+{
12503+ return xchg(&v->counter, new);
12504+}
12505+
12506 /**
12507 * __atomic_add_unless - add unless the number is already a given value
12508 * @v: pointer of type atomic_t
12509@@ -211,12 +399,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
12510 */
12511 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
12512 {
12513- int c, old;
12514+ int c, old, new;
12515 c = atomic_read(v);
12516 for (;;) {
12517- if (unlikely(c == (u)))
12518+ if (unlikely(c == u))
12519 break;
12520- old = atomic_cmpxchg((v), c, c + (a));
12521+
12522+ asm volatile("addl %2,%0\n"
12523+
12524+#ifdef CONFIG_PAX_REFCOUNT
12525+ "jno 0f\n"
12526+ "subl %2,%0\n"
12527+ "int $4\n0:\n"
12528+ _ASM_EXTABLE(0b, 0b)
12529+#endif
12530+
12531+ : "=r" (new)
12532+ : "0" (c), "ir" (a));
12533+
12534+ old = atomic_cmpxchg(v, c, new);
12535 if (likely(old == c))
12536 break;
12537 c = old;
12538@@ -225,6 +426,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
12539 }
12540
12541 /**
12542+ * atomic_inc_not_zero_hint - increment if not null
12543+ * @v: pointer of type atomic_t
12544+ * @hint: probable value of the atomic before the increment
12545+ *
12546+ * This version of atomic_inc_not_zero() gives a hint of probable
12547+ * value of the atomic. This helps processor to not read the memory
12548+ * before doing the atomic read/modify/write cycle, lowering
12549+ * number of bus transactions on some arches.
12550+ *
12551+ * Returns: 0 if increment was not done, 1 otherwise.
12552+ */
12553+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
12554+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
12555+{
12556+ int val, c = hint, new;
12557+
12558+ /* sanity test, should be removed by compiler if hint is a constant */
12559+ if (!hint)
12560+ return __atomic_add_unless(v, 1, 0);
12561+
12562+ do {
12563+ asm volatile("incl %0\n"
12564+
12565+#ifdef CONFIG_PAX_REFCOUNT
12566+ "jno 0f\n"
12567+ "decl %0\n"
12568+ "int $4\n0:\n"
12569+ _ASM_EXTABLE(0b, 0b)
12570+#endif
12571+
12572+ : "=r" (new)
12573+ : "0" (c));
12574+
12575+ val = atomic_cmpxchg(v, c, new);
12576+ if (val == c)
12577+ return 1;
12578+ c = val;
12579+ } while (c);
12580+
12581+ return 0;
12582+}
12583+
12584+/**
12585 * atomic_inc_short - increment of a short integer
12586 * @v: pointer to type int
12587 *
12588@@ -253,14 +497,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
12589 #endif
12590
12591 /* These are x86-specific, used by some header files */
12592-#define atomic_clear_mask(mask, addr) \
12593- asm volatile(LOCK_PREFIX "andl %0,%1" \
12594- : : "r" (~(mask)), "m" (*(addr)) : "memory")
12595+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
12596+{
12597+ asm volatile(LOCK_PREFIX "andl %1,%0"
12598+ : "+m" (v->counter)
12599+ : "r" (~(mask))
12600+ : "memory");
12601+}
12602
12603-#define atomic_set_mask(mask, addr) \
12604- asm volatile(LOCK_PREFIX "orl %0,%1" \
12605- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
12606- : "memory")
12607+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
12608+{
12609+ asm volatile(LOCK_PREFIX "andl %1,%0"
12610+ : "+m" (v->counter)
12611+ : "r" (~(mask))
12612+ : "memory");
12613+}
12614+
12615+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
12616+{
12617+ asm volatile(LOCK_PREFIX "orl %1,%0"
12618+ : "+m" (v->counter)
12619+ : "r" (mask)
12620+ : "memory");
12621+}
12622+
12623+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
12624+{
12625+ asm volatile(LOCK_PREFIX "orl %1,%0"
12626+ : "+m" (v->counter)
12627+ : "r" (mask)
12628+ : "memory");
12629+}
12630
12631 /* Atomic operations are already serializing on x86 */
12632 #define smp_mb__before_atomic_dec() barrier()
12633diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
12634index b154de7..aadebd8 100644
12635--- a/arch/x86/include/asm/atomic64_32.h
12636+++ b/arch/x86/include/asm/atomic64_32.h
12637@@ -12,6 +12,14 @@ typedef struct {
12638 u64 __aligned(8) counter;
12639 } atomic64_t;
12640
12641+#ifdef CONFIG_PAX_REFCOUNT
12642+typedef struct {
12643+ u64 __aligned(8) counter;
12644+} atomic64_unchecked_t;
12645+#else
12646+typedef atomic64_t atomic64_unchecked_t;
12647+#endif
12648+
12649 #define ATOMIC64_INIT(val) { (val) }
12650
12651 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
12652@@ -37,21 +45,31 @@ typedef struct {
12653 ATOMIC64_DECL_ONE(sym##_386)
12654
12655 ATOMIC64_DECL_ONE(add_386);
12656+ATOMIC64_DECL_ONE(add_unchecked_386);
12657 ATOMIC64_DECL_ONE(sub_386);
12658+ATOMIC64_DECL_ONE(sub_unchecked_386);
12659 ATOMIC64_DECL_ONE(inc_386);
12660+ATOMIC64_DECL_ONE(inc_unchecked_386);
12661 ATOMIC64_DECL_ONE(dec_386);
12662+ATOMIC64_DECL_ONE(dec_unchecked_386);
12663 #endif
12664
12665 #define alternative_atomic64(f, out, in...) \
12666 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
12667
12668 ATOMIC64_DECL(read);
12669+ATOMIC64_DECL(read_unchecked);
12670 ATOMIC64_DECL(set);
12671+ATOMIC64_DECL(set_unchecked);
12672 ATOMIC64_DECL(xchg);
12673 ATOMIC64_DECL(add_return);
12674+ATOMIC64_DECL(add_return_unchecked);
12675 ATOMIC64_DECL(sub_return);
12676+ATOMIC64_DECL(sub_return_unchecked);
12677 ATOMIC64_DECL(inc_return);
12678+ATOMIC64_DECL(inc_return_unchecked);
12679 ATOMIC64_DECL(dec_return);
12680+ATOMIC64_DECL(dec_return_unchecked);
12681 ATOMIC64_DECL(dec_if_positive);
12682 ATOMIC64_DECL(inc_not_zero);
12683 ATOMIC64_DECL(add_unless);
12684@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
12685 }
12686
12687 /**
12688+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
12689+ * @p: pointer to type atomic64_unchecked_t
12690+ * @o: expected value
12691+ * @n: new value
12692+ *
12693+ * Atomically sets @v to @n if it was equal to @o and returns
12694+ * the old value.
12695+ */
12696+
12697+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
12698+{
12699+ return cmpxchg64(&v->counter, o, n);
12700+}
12701+
12702+/**
12703 * atomic64_xchg - xchg atomic64 variable
12704 * @v: pointer to type atomic64_t
12705 * @n: value to assign
12706@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
12707 }
12708
12709 /**
12710+ * atomic64_set_unchecked - set atomic64 variable
12711+ * @v: pointer to type atomic64_unchecked_t
12712+ * @n: value to assign
12713+ *
12714+ * Atomically sets the value of @v to @n.
12715+ */
12716+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
12717+{
12718+ unsigned high = (unsigned)(i >> 32);
12719+ unsigned low = (unsigned)i;
12720+ alternative_atomic64(set, /* no output */,
12721+ "S" (v), "b" (low), "c" (high)
12722+ : "eax", "edx", "memory");
12723+}
12724+
12725+/**
12726 * atomic64_read - read atomic64 variable
12727 * @v: pointer to type atomic64_t
12728 *
12729@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
12730 }
12731
12732 /**
12733+ * atomic64_read_unchecked - read atomic64 variable
12734+ * @v: pointer to type atomic64_unchecked_t
12735+ *
12736+ * Atomically reads the value of @v and returns it.
12737+ */
12738+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
12739+{
12740+ long long r;
12741+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
12742+ return r;
12743+ }
12744+
12745+/**
12746 * atomic64_add_return - add and return
12747 * @i: integer value to add
12748 * @v: pointer to type atomic64_t
12749@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
12750 return i;
12751 }
12752
12753+/**
12754+ * atomic64_add_return_unchecked - add and return
12755+ * @i: integer value to add
12756+ * @v: pointer to type atomic64_unchecked_t
12757+ *
12758+ * Atomically adds @i to @v and returns @i + *@v
12759+ */
12760+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
12761+{
12762+ alternative_atomic64(add_return_unchecked,
12763+ ASM_OUTPUT2("+A" (i), "+c" (v)),
12764+ ASM_NO_INPUT_CLOBBER("memory"));
12765+ return i;
12766+}
12767+
12768 /*
12769 * Other variants with different arithmetic operators:
12770 */
12771@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
12772 return a;
12773 }
12774
12775+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
12776+{
12777+ long long a;
12778+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
12779+ "S" (v) : "memory", "ecx");
12780+ return a;
12781+}
12782+
12783 static inline long long atomic64_dec_return(atomic64_t *v)
12784 {
12785 long long a;
12786@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
12787 }
12788
12789 /**
12790+ * atomic64_add_unchecked - add integer to atomic64 variable
12791+ * @i: integer value to add
12792+ * @v: pointer to type atomic64_unchecked_t
12793+ *
12794+ * Atomically adds @i to @v.
12795+ */
12796+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
12797+{
12798+ __alternative_atomic64(add_unchecked, add_return_unchecked,
12799+ ASM_OUTPUT2("+A" (i), "+c" (v)),
12800+ ASM_NO_INPUT_CLOBBER("memory"));
12801+ return i;
12802+}
12803+
12804+/**
12805 * atomic64_sub - subtract the atomic64 variable
12806 * @i: integer value to subtract
12807 * @v: pointer to type atomic64_t
12808diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
12809index 0e1cbfc..5623683 100644
12810--- a/arch/x86/include/asm/atomic64_64.h
12811+++ b/arch/x86/include/asm/atomic64_64.h
12812@@ -18,7 +18,19 @@
12813 */
12814 static inline long atomic64_read(const atomic64_t *v)
12815 {
12816- return (*(volatile long *)&(v)->counter);
12817+ return (*(volatile const long *)&(v)->counter);
12818+}
12819+
12820+/**
12821+ * atomic64_read_unchecked - read atomic64 variable
12822+ * @v: pointer of type atomic64_unchecked_t
12823+ *
12824+ * Atomically reads the value of @v.
12825+ * Doesn't imply a read memory barrier.
12826+ */
12827+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
12828+{
12829+ return (*(volatile const long *)&(v)->counter);
12830 }
12831
12832 /**
12833@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
12834 }
12835
12836 /**
12837+ * atomic64_set_unchecked - set atomic64 variable
12838+ * @v: pointer to type atomic64_unchecked_t
12839+ * @i: required value
12840+ *
12841+ * Atomically sets the value of @v to @i.
12842+ */
12843+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
12844+{
12845+ v->counter = i;
12846+}
12847+
12848+/**
12849 * atomic64_add - add integer to atomic64 variable
12850 * @i: integer value to add
12851 * @v: pointer to type atomic64_t
12852@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
12853 */
12854 static inline void atomic64_add(long i, atomic64_t *v)
12855 {
12856+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
12857+
12858+#ifdef CONFIG_PAX_REFCOUNT
12859+ "jno 0f\n"
12860+ LOCK_PREFIX "subq %1,%0\n"
12861+ "int $4\n0:\n"
12862+ _ASM_EXTABLE(0b, 0b)
12863+#endif
12864+
12865+ : "=m" (v->counter)
12866+ : "er" (i), "m" (v->counter));
12867+}
12868+
12869+/**
12870+ * atomic64_add_unchecked - add integer to atomic64 variable
12871+ * @i: integer value to add
12872+ * @v: pointer to type atomic64_unchecked_t
12873+ *
12874+ * Atomically adds @i to @v.
12875+ */
12876+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
12877+{
12878 asm volatile(LOCK_PREFIX "addq %1,%0"
12879 : "=m" (v->counter)
12880 : "er" (i), "m" (v->counter));
12881@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
12882 */
12883 static inline void atomic64_sub(long i, atomic64_t *v)
12884 {
12885- asm volatile(LOCK_PREFIX "subq %1,%0"
12886+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
12887+
12888+#ifdef CONFIG_PAX_REFCOUNT
12889+ "jno 0f\n"
12890+ LOCK_PREFIX "addq %1,%0\n"
12891+ "int $4\n0:\n"
12892+ _ASM_EXTABLE(0b, 0b)
12893+#endif
12894+
12895+ : "=m" (v->counter)
12896+ : "er" (i), "m" (v->counter));
12897+}
12898+
12899+/**
12900+ * atomic64_sub_unchecked - subtract the atomic64 variable
12901+ * @i: integer value to subtract
12902+ * @v: pointer to type atomic64_unchecked_t
12903+ *
12904+ * Atomically subtracts @i from @v.
12905+ */
12906+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
12907+{
12908+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
12909 : "=m" (v->counter)
12910 : "er" (i), "m" (v->counter));
12911 }
12912@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
12913 {
12914 unsigned char c;
12915
12916- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
12917+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
12918+
12919+#ifdef CONFIG_PAX_REFCOUNT
12920+ "jno 0f\n"
12921+ LOCK_PREFIX "addq %2,%0\n"
12922+ "int $4\n0:\n"
12923+ _ASM_EXTABLE(0b, 0b)
12924+#endif
12925+
12926+ "sete %1\n"
12927 : "=m" (v->counter), "=qm" (c)
12928 : "er" (i), "m" (v->counter) : "memory");
12929 return c;
12930@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
12931 */
12932 static inline void atomic64_inc(atomic64_t *v)
12933 {
12934+ asm volatile(LOCK_PREFIX "incq %0\n"
12935+
12936+#ifdef CONFIG_PAX_REFCOUNT
12937+ "jno 0f\n"
12938+ LOCK_PREFIX "decq %0\n"
12939+ "int $4\n0:\n"
12940+ _ASM_EXTABLE(0b, 0b)
12941+#endif
12942+
12943+ : "=m" (v->counter)
12944+ : "m" (v->counter));
12945+}
12946+
12947+/**
12948+ * atomic64_inc_unchecked - increment atomic64 variable
12949+ * @v: pointer to type atomic64_unchecked_t
12950+ *
12951+ * Atomically increments @v by 1.
12952+ */
12953+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
12954+{
12955 asm volatile(LOCK_PREFIX "incq %0"
12956 : "=m" (v->counter)
12957 : "m" (v->counter));
12958@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
12959 */
12960 static inline void atomic64_dec(atomic64_t *v)
12961 {
12962- asm volatile(LOCK_PREFIX "decq %0"
12963+ asm volatile(LOCK_PREFIX "decq %0\n"
12964+
12965+#ifdef CONFIG_PAX_REFCOUNT
12966+ "jno 0f\n"
12967+ LOCK_PREFIX "incq %0\n"
12968+ "int $4\n0:\n"
12969+ _ASM_EXTABLE(0b, 0b)
12970+#endif
12971+
12972+ : "=m" (v->counter)
12973+ : "m" (v->counter));
12974+}
12975+
12976+/**
12977+ * atomic64_dec_unchecked - decrement atomic64 variable
12978+ * @v: pointer to type atomic64_t
12979+ *
12980+ * Atomically decrements @v by 1.
12981+ */
12982+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
12983+{
12984+ asm volatile(LOCK_PREFIX "decq %0\n"
12985 : "=m" (v->counter)
12986 : "m" (v->counter));
12987 }
12988@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
12989 {
12990 unsigned char c;
12991
12992- asm volatile(LOCK_PREFIX "decq %0; sete %1"
12993+ asm volatile(LOCK_PREFIX "decq %0\n"
12994+
12995+#ifdef CONFIG_PAX_REFCOUNT
12996+ "jno 0f\n"
12997+ LOCK_PREFIX "incq %0\n"
12998+ "int $4\n0:\n"
12999+ _ASM_EXTABLE(0b, 0b)
13000+#endif
13001+
13002+ "sete %1\n"
13003 : "=m" (v->counter), "=qm" (c)
13004 : "m" (v->counter) : "memory");
13005 return c != 0;
13006@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
13007 {
13008 unsigned char c;
13009
13010- asm volatile(LOCK_PREFIX "incq %0; sete %1"
13011+ asm volatile(LOCK_PREFIX "incq %0\n"
13012+
13013+#ifdef CONFIG_PAX_REFCOUNT
13014+ "jno 0f\n"
13015+ LOCK_PREFIX "decq %0\n"
13016+ "int $4\n0:\n"
13017+ _ASM_EXTABLE(0b, 0b)
13018+#endif
13019+
13020+ "sete %1\n"
13021 : "=m" (v->counter), "=qm" (c)
13022 : "m" (v->counter) : "memory");
13023 return c != 0;
13024@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
13025 {
13026 unsigned char c;
13027
13028- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
13029+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
13030+
13031+#ifdef CONFIG_PAX_REFCOUNT
13032+ "jno 0f\n"
13033+ LOCK_PREFIX "subq %2,%0\n"
13034+ "int $4\n0:\n"
13035+ _ASM_EXTABLE(0b, 0b)
13036+#endif
13037+
13038+ "sets %1\n"
13039 : "=m" (v->counter), "=qm" (c)
13040 : "er" (i), "m" (v->counter) : "memory");
13041 return c;
13042@@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
13043 */
13044 static inline long atomic64_add_return(long i, atomic64_t *v)
13045 {
13046+ return i + xadd_check_overflow(&v->counter, i);
13047+}
13048+
13049+/**
13050+ * atomic64_add_return_unchecked - add and return
13051+ * @i: integer value to add
13052+ * @v: pointer to type atomic64_unchecked_t
13053+ *
13054+ * Atomically adds @i to @v and returns @i + @v
13055+ */
13056+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
13057+{
13058 return i + xadd(&v->counter, i);
13059 }
13060
13061@@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
13062 }
13063
13064 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
13065+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
13066+{
13067+ return atomic64_add_return_unchecked(1, v);
13068+}
13069 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
13070
13071 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
13072@@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
13073 return cmpxchg(&v->counter, old, new);
13074 }
13075
13076+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
13077+{
13078+ return cmpxchg(&v->counter, old, new);
13079+}
13080+
13081 static inline long atomic64_xchg(atomic64_t *v, long new)
13082 {
13083 return xchg(&v->counter, new);
13084@@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
13085 */
13086 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
13087 {
13088- long c, old;
13089+ long c, old, new;
13090 c = atomic64_read(v);
13091 for (;;) {
13092- if (unlikely(c == (u)))
13093+ if (unlikely(c == u))
13094 break;
13095- old = atomic64_cmpxchg((v), c, c + (a));
13096+
13097+ asm volatile("add %2,%0\n"
13098+
13099+#ifdef CONFIG_PAX_REFCOUNT
13100+ "jno 0f\n"
13101+ "sub %2,%0\n"
13102+ "int $4\n0:\n"
13103+ _ASM_EXTABLE(0b, 0b)
13104+#endif
13105+
13106+ : "=r" (new)
13107+ : "0" (c), "ir" (a));
13108+
13109+ old = atomic64_cmpxchg(v, c, new);
13110 if (likely(old == c))
13111 break;
13112 c = old;
13113 }
13114- return c != (u);
13115+ return c != u;
13116 }
13117
13118 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
13119diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
13120index 6dfd019..28e188d 100644
13121--- a/arch/x86/include/asm/bitops.h
13122+++ b/arch/x86/include/asm/bitops.h
13123@@ -40,7 +40,7 @@
13124 * a mask operation on a byte.
13125 */
13126 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
13127-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
13128+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
13129 #define CONST_MASK(nr) (1 << ((nr) & 7))
13130
13131 /**
13132@@ -486,7 +486,7 @@ static inline int fls(int x)
13133 * at position 64.
13134 */
13135 #ifdef CONFIG_X86_64
13136-static __always_inline int fls64(__u64 x)
13137+static __always_inline long fls64(__u64 x)
13138 {
13139 int bitpos = -1;
13140 /*
13141diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
13142index 4fa687a..60f2d39 100644
13143--- a/arch/x86/include/asm/boot.h
13144+++ b/arch/x86/include/asm/boot.h
13145@@ -6,10 +6,15 @@
13146 #include <uapi/asm/boot.h>
13147
13148 /* Physical address where kernel should be loaded. */
13149-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
13150+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
13151 + (CONFIG_PHYSICAL_ALIGN - 1)) \
13152 & ~(CONFIG_PHYSICAL_ALIGN - 1))
13153
13154+#ifndef __ASSEMBLY__
13155+extern unsigned char __LOAD_PHYSICAL_ADDR[];
13156+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
13157+#endif
13158+
13159 /* Minimum kernel alignment, as a power of two */
13160 #ifdef CONFIG_X86_64
13161 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
13162diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
13163index 48f99f1..d78ebf9 100644
13164--- a/arch/x86/include/asm/cache.h
13165+++ b/arch/x86/include/asm/cache.h
13166@@ -5,12 +5,13 @@
13167
13168 /* L1 cache line size */
13169 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
13170-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
13171+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
13172
13173 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
13174+#define __read_only __attribute__((__section__(".data..read_only")))
13175
13176 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
13177-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
13178+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
13179
13180 #ifdef CONFIG_X86_VSMP
13181 #ifdef CONFIG_SMP
13182diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
13183index 9863ee3..4a1f8e1 100644
13184--- a/arch/x86/include/asm/cacheflush.h
13185+++ b/arch/x86/include/asm/cacheflush.h
13186@@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
13187 unsigned long pg_flags = pg->flags & _PGMT_MASK;
13188
13189 if (pg_flags == _PGMT_DEFAULT)
13190- return -1;
13191+ return ~0UL;
13192 else if (pg_flags == _PGMT_WC)
13193 return _PAGE_CACHE_WC;
13194 else if (pg_flags == _PGMT_UC_MINUS)
13195diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
13196index 46fc474..b02b0f9 100644
13197--- a/arch/x86/include/asm/checksum_32.h
13198+++ b/arch/x86/include/asm/checksum_32.h
13199@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
13200 int len, __wsum sum,
13201 int *src_err_ptr, int *dst_err_ptr);
13202
13203+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
13204+ int len, __wsum sum,
13205+ int *src_err_ptr, int *dst_err_ptr);
13206+
13207+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
13208+ int len, __wsum sum,
13209+ int *src_err_ptr, int *dst_err_ptr);
13210+
13211 /*
13212 * Note: when you get a NULL pointer exception here this means someone
13213 * passed in an incorrect kernel address to one of these functions.
13214@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
13215 int *err_ptr)
13216 {
13217 might_sleep();
13218- return csum_partial_copy_generic((__force void *)src, dst,
13219+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
13220 len, sum, err_ptr, NULL);
13221 }
13222
13223@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
13224 {
13225 might_sleep();
13226 if (access_ok(VERIFY_WRITE, dst, len))
13227- return csum_partial_copy_generic(src, (__force void *)dst,
13228+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
13229 len, sum, NULL, err_ptr);
13230
13231 if (len)
13232diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
13233index 8d871ea..c1a0dc9 100644
13234--- a/arch/x86/include/asm/cmpxchg.h
13235+++ b/arch/x86/include/asm/cmpxchg.h
13236@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
13237 __compiletime_error("Bad argument size for cmpxchg");
13238 extern void __xadd_wrong_size(void)
13239 __compiletime_error("Bad argument size for xadd");
13240+extern void __xadd_check_overflow_wrong_size(void)
13241+ __compiletime_error("Bad argument size for xadd_check_overflow");
13242 extern void __add_wrong_size(void)
13243 __compiletime_error("Bad argument size for add");
13244+extern void __add_check_overflow_wrong_size(void)
13245+ __compiletime_error("Bad argument size for add_check_overflow");
13246
13247 /*
13248 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
13249@@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
13250 __ret; \
13251 })
13252
13253+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
13254+ ({ \
13255+ __typeof__ (*(ptr)) __ret = (arg); \
13256+ switch (sizeof(*(ptr))) { \
13257+ case __X86_CASE_L: \
13258+ asm volatile (lock #op "l %0, %1\n" \
13259+ "jno 0f\n" \
13260+ "mov %0,%1\n" \
13261+ "int $4\n0:\n" \
13262+ _ASM_EXTABLE(0b, 0b) \
13263+ : "+r" (__ret), "+m" (*(ptr)) \
13264+ : : "memory", "cc"); \
13265+ break; \
13266+ case __X86_CASE_Q: \
13267+ asm volatile (lock #op "q %q0, %1\n" \
13268+ "jno 0f\n" \
13269+ "mov %0,%1\n" \
13270+ "int $4\n0:\n" \
13271+ _ASM_EXTABLE(0b, 0b) \
13272+ : "+r" (__ret), "+m" (*(ptr)) \
13273+ : : "memory", "cc"); \
13274+ break; \
13275+ default: \
13276+ __ ## op ## _check_overflow_wrong_size(); \
13277+ } \
13278+ __ret; \
13279+ })
13280+
13281 /*
13282 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
13283 * Since this is generally used to protect other memory information, we
13284@@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
13285 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
13286 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
13287
13288+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
13289+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
13290+
13291 #define __add(ptr, inc, lock) \
13292 ({ \
13293 __typeof__ (*(ptr)) __ret = (inc); \
13294diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
13295index 59c6c40..5e0b22c 100644
13296--- a/arch/x86/include/asm/compat.h
13297+++ b/arch/x86/include/asm/compat.h
13298@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
13299 typedef u32 compat_uint_t;
13300 typedef u32 compat_ulong_t;
13301 typedef u64 __attribute__((aligned(4))) compat_u64;
13302-typedef u32 compat_uptr_t;
13303+typedef u32 __user compat_uptr_t;
13304
13305 struct compat_timespec {
13306 compat_time_t tv_sec;
13307diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
13308index 2d9075e..b75a844 100644
13309--- a/arch/x86/include/asm/cpufeature.h
13310+++ b/arch/x86/include/asm/cpufeature.h
13311@@ -206,7 +206,7 @@
13312 #define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
13313 #define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
13314 #define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
13315-#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
13316+#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Prevention */
13317 #define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
13318 #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
13319 #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
13320@@ -375,7 +375,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
13321 ".section .discard,\"aw\",@progbits\n"
13322 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
13323 ".previous\n"
13324- ".section .altinstr_replacement,\"ax\"\n"
13325+ ".section .altinstr_replacement,\"a\"\n"
13326 "3: movb $1,%0\n"
13327 "4:\n"
13328 ".previous\n"
13329diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
13330index 8bf1c06..b6ae785 100644
13331--- a/arch/x86/include/asm/desc.h
13332+++ b/arch/x86/include/asm/desc.h
13333@@ -4,6 +4,7 @@
13334 #include <asm/desc_defs.h>
13335 #include <asm/ldt.h>
13336 #include <asm/mmu.h>
13337+#include <asm/pgtable.h>
13338
13339 #include <linux/smp.h>
13340 #include <linux/percpu.h>
13341@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
13342
13343 desc->type = (info->read_exec_only ^ 1) << 1;
13344 desc->type |= info->contents << 2;
13345+ desc->type |= info->seg_not_present ^ 1;
13346
13347 desc->s = 1;
13348 desc->dpl = 0x3;
13349@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
13350 }
13351
13352 extern struct desc_ptr idt_descr;
13353-extern gate_desc idt_table[];
13354 extern struct desc_ptr nmi_idt_descr;
13355-extern gate_desc nmi_idt_table[];
13356-
13357-struct gdt_page {
13358- struct desc_struct gdt[GDT_ENTRIES];
13359-} __attribute__((aligned(PAGE_SIZE)));
13360-
13361-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
13362+extern gate_desc idt_table[256];
13363+extern gate_desc nmi_idt_table[256];
13364
13365+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
13366 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
13367 {
13368- return per_cpu(gdt_page, cpu).gdt;
13369+ return cpu_gdt_table[cpu];
13370 }
13371
13372 #ifdef CONFIG_X86_64
13373@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
13374 unsigned long base, unsigned dpl, unsigned flags,
13375 unsigned short seg)
13376 {
13377- gate->a = (seg << 16) | (base & 0xffff);
13378- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
13379+ gate->gate.offset_low = base;
13380+ gate->gate.seg = seg;
13381+ gate->gate.reserved = 0;
13382+ gate->gate.type = type;
13383+ gate->gate.s = 0;
13384+ gate->gate.dpl = dpl;
13385+ gate->gate.p = 1;
13386+ gate->gate.offset_high = base >> 16;
13387 }
13388
13389 #endif
13390@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
13391
13392 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
13393 {
13394+ pax_open_kernel();
13395 memcpy(&idt[entry], gate, sizeof(*gate));
13396+ pax_close_kernel();
13397 }
13398
13399 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
13400 {
13401+ pax_open_kernel();
13402 memcpy(&ldt[entry], desc, 8);
13403+ pax_close_kernel();
13404 }
13405
13406 static inline void
13407@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
13408 default: size = sizeof(*gdt); break;
13409 }
13410
13411+ pax_open_kernel();
13412 memcpy(&gdt[entry], desc, size);
13413+ pax_close_kernel();
13414 }
13415
13416 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
13417@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
13418
13419 static inline void native_load_tr_desc(void)
13420 {
13421+ pax_open_kernel();
13422 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
13423+ pax_close_kernel();
13424 }
13425
13426 static inline void native_load_gdt(const struct desc_ptr *dtr)
13427@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
13428 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
13429 unsigned int i;
13430
13431+ pax_open_kernel();
13432 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
13433 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
13434+ pax_close_kernel();
13435 }
13436
13437 #define _LDT_empty(info) \
13438@@ -287,7 +300,7 @@ static inline void load_LDT(mm_context_t *pc)
13439 preempt_enable();
13440 }
13441
13442-static inline unsigned long get_desc_base(const struct desc_struct *desc)
13443+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
13444 {
13445 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
13446 }
13447@@ -311,7 +324,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
13448 }
13449
13450 #ifdef CONFIG_X86_64
13451-static inline void set_nmi_gate(int gate, void *addr)
13452+static inline void set_nmi_gate(int gate, const void *addr)
13453 {
13454 gate_desc s;
13455
13456@@ -320,7 +333,7 @@ static inline void set_nmi_gate(int gate, void *addr)
13457 }
13458 #endif
13459
13460-static inline void _set_gate(int gate, unsigned type, void *addr,
13461+static inline void _set_gate(int gate, unsigned type, const void *addr,
13462 unsigned dpl, unsigned ist, unsigned seg)
13463 {
13464 gate_desc s;
13465@@ -339,7 +352,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
13466 * Pentium F0 0F bugfix can have resulted in the mapped
13467 * IDT being write-protected.
13468 */
13469-static inline void set_intr_gate(unsigned int n, void *addr)
13470+static inline void set_intr_gate(unsigned int n, const void *addr)
13471 {
13472 BUG_ON((unsigned)n > 0xFF);
13473 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
13474@@ -369,19 +382,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
13475 /*
13476 * This routine sets up an interrupt gate at directory privilege level 3.
13477 */
13478-static inline void set_system_intr_gate(unsigned int n, void *addr)
13479+static inline void set_system_intr_gate(unsigned int n, const void *addr)
13480 {
13481 BUG_ON((unsigned)n > 0xFF);
13482 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
13483 }
13484
13485-static inline void set_system_trap_gate(unsigned int n, void *addr)
13486+static inline void set_system_trap_gate(unsigned int n, const void *addr)
13487 {
13488 BUG_ON((unsigned)n > 0xFF);
13489 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
13490 }
13491
13492-static inline void set_trap_gate(unsigned int n, void *addr)
13493+static inline void set_trap_gate(unsigned int n, const void *addr)
13494 {
13495 BUG_ON((unsigned)n > 0xFF);
13496 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
13497@@ -390,19 +403,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
13498 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
13499 {
13500 BUG_ON((unsigned)n > 0xFF);
13501- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
13502+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
13503 }
13504
13505-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
13506+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
13507 {
13508 BUG_ON((unsigned)n > 0xFF);
13509 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
13510 }
13511
13512-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
13513+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
13514 {
13515 BUG_ON((unsigned)n > 0xFF);
13516 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
13517 }
13518
13519+#ifdef CONFIG_X86_32
13520+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
13521+{
13522+ struct desc_struct d;
13523+
13524+ if (likely(limit))
13525+ limit = (limit - 1UL) >> PAGE_SHIFT;
13526+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
13527+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
13528+}
13529+#endif
13530+
13531 #endif /* _ASM_X86_DESC_H */
13532diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
13533index 278441f..b95a174 100644
13534--- a/arch/x86/include/asm/desc_defs.h
13535+++ b/arch/x86/include/asm/desc_defs.h
13536@@ -31,6 +31,12 @@ struct desc_struct {
13537 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
13538 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
13539 };
13540+ struct {
13541+ u16 offset_low;
13542+ u16 seg;
13543+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
13544+ unsigned offset_high: 16;
13545+ } gate;
13546 };
13547 } __attribute__((packed));
13548
13549diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
13550index ced283a..ffe04cc 100644
13551--- a/arch/x86/include/asm/div64.h
13552+++ b/arch/x86/include/asm/div64.h
13553@@ -39,7 +39,7 @@
13554 __mod; \
13555 })
13556
13557-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
13558+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
13559 {
13560 union {
13561 u64 v64;
13562diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
13563index 9c999c1..3860cb8 100644
13564--- a/arch/x86/include/asm/elf.h
13565+++ b/arch/x86/include/asm/elf.h
13566@@ -243,7 +243,25 @@ extern int force_personality32;
13567 the loader. We need to make sure that it is out of the way of the program
13568 that it will "exec", and that there is sufficient room for the brk. */
13569
13570+#ifdef CONFIG_PAX_SEGMEXEC
13571+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
13572+#else
13573 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
13574+#endif
13575+
13576+#ifdef CONFIG_PAX_ASLR
13577+#ifdef CONFIG_X86_32
13578+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
13579+
13580+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
13581+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
13582+#else
13583+#define PAX_ELF_ET_DYN_BASE 0x400000UL
13584+
13585+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
13586+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
13587+#endif
13588+#endif
13589
13590 /* This yields a mask that user programs can use to figure out what
13591 instruction set this CPU supports. This could be done in user space,
13592@@ -296,16 +314,12 @@ do { \
13593
13594 #define ARCH_DLINFO \
13595 do { \
13596- if (vdso_enabled) \
13597- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
13598- (unsigned long)current->mm->context.vdso); \
13599+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
13600 } while (0)
13601
13602 #define ARCH_DLINFO_X32 \
13603 do { \
13604- if (vdso_enabled) \
13605- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
13606- (unsigned long)current->mm->context.vdso); \
13607+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
13608 } while (0)
13609
13610 #define AT_SYSINFO 32
13611@@ -320,7 +334,7 @@ else \
13612
13613 #endif /* !CONFIG_X86_32 */
13614
13615-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
13616+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
13617
13618 #define VDSO_ENTRY \
13619 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
13620@@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
13621 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
13622 #define compat_arch_setup_additional_pages syscall32_setup_pages
13623
13624-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
13625-#define arch_randomize_brk arch_randomize_brk
13626-
13627 /*
13628 * True on X86_32 or when emulating IA32 on X86_64
13629 */
13630diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
13631index 75ce3f4..882e801 100644
13632--- a/arch/x86/include/asm/emergency-restart.h
13633+++ b/arch/x86/include/asm/emergency-restart.h
13634@@ -13,6 +13,6 @@ enum reboot_type {
13635
13636 extern enum reboot_type reboot_type;
13637
13638-extern void machine_emergency_restart(void);
13639+extern void machine_emergency_restart(void) __noreturn;
13640
13641 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
13642diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
13643index 41ab26e..a88c9e6 100644
13644--- a/arch/x86/include/asm/fpu-internal.h
13645+++ b/arch/x86/include/asm/fpu-internal.h
13646@@ -126,7 +126,9 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
13647 ({ \
13648 int err; \
13649 asm volatile(ASM_STAC "\n" \
13650- "1:" #insn "\n\t" \
13651+ "1:" \
13652+ __copyuser_seg \
13653+ #insn "\n\t" \
13654 "2: " ASM_CLAC "\n" \
13655 ".section .fixup,\"ax\"\n" \
13656 "3: movl $-1,%[err]\n" \
13657@@ -299,7 +301,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
13658 "emms\n\t" /* clear stack tags */
13659 "fildl %P[addr]", /* set F?P to defined value */
13660 X86_FEATURE_FXSAVE_LEAK,
13661- [addr] "m" (tsk->thread.fpu.has_fpu));
13662+ [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
13663
13664 return fpu_restore_checking(&tsk->thread.fpu);
13665 }
13666diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
13667index be27ba1..8f13ff9 100644
13668--- a/arch/x86/include/asm/futex.h
13669+++ b/arch/x86/include/asm/futex.h
13670@@ -12,6 +12,7 @@
13671 #include <asm/smap.h>
13672
13673 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
13674+ typecheck(u32 __user *, uaddr); \
13675 asm volatile("\t" ASM_STAC "\n" \
13676 "1:\t" insn "\n" \
13677 "2:\t" ASM_CLAC "\n" \
13678@@ -20,15 +21,16 @@
13679 "\tjmp\t2b\n" \
13680 "\t.previous\n" \
13681 _ASM_EXTABLE(1b, 3b) \
13682- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
13683+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
13684 : "i" (-EFAULT), "0" (oparg), "1" (0))
13685
13686 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
13687+ typecheck(u32 __user *, uaddr); \
13688 asm volatile("\t" ASM_STAC "\n" \
13689 "1:\tmovl %2, %0\n" \
13690 "\tmovl\t%0, %3\n" \
13691 "\t" insn "\n" \
13692- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
13693+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
13694 "\tjnz\t1b\n" \
13695 "3:\t" ASM_CLAC "\n" \
13696 "\t.section .fixup,\"ax\"\n" \
13697@@ -38,7 +40,7 @@
13698 _ASM_EXTABLE(1b, 4b) \
13699 _ASM_EXTABLE(2b, 4b) \
13700 : "=&a" (oldval), "=&r" (ret), \
13701- "+m" (*uaddr), "=&r" (tem) \
13702+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
13703 : "r" (oparg), "i" (-EFAULT), "1" (0))
13704
13705 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
13706@@ -59,10 +61,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
13707
13708 switch (op) {
13709 case FUTEX_OP_SET:
13710- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
13711+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
13712 break;
13713 case FUTEX_OP_ADD:
13714- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
13715+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
13716 uaddr, oparg);
13717 break;
13718 case FUTEX_OP_OR:
13719@@ -116,14 +118,14 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
13720 return -EFAULT;
13721
13722 asm volatile("\t" ASM_STAC "\n"
13723- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
13724+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
13725 "2:\t" ASM_CLAC "\n"
13726 "\t.section .fixup, \"ax\"\n"
13727 "3:\tmov %3, %0\n"
13728 "\tjmp 2b\n"
13729 "\t.previous\n"
13730 _ASM_EXTABLE(1b, 3b)
13731- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
13732+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
13733 : "i" (-EFAULT), "r" (newval), "1" (oldval)
13734 : "memory"
13735 );
13736diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
13737index eb92a6e..b98b2f4 100644
13738--- a/arch/x86/include/asm/hw_irq.h
13739+++ b/arch/x86/include/asm/hw_irq.h
13740@@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
13741 extern void enable_IO_APIC(void);
13742
13743 /* Statistics */
13744-extern atomic_t irq_err_count;
13745-extern atomic_t irq_mis_count;
13746+extern atomic_unchecked_t irq_err_count;
13747+extern atomic_unchecked_t irq_mis_count;
13748
13749 /* EISA */
13750 extern void eisa_set_level_irq(unsigned int irq);
13751diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
13752index a203659..9889f1c 100644
13753--- a/arch/x86/include/asm/i8259.h
13754+++ b/arch/x86/include/asm/i8259.h
13755@@ -62,7 +62,7 @@ struct legacy_pic {
13756 void (*init)(int auto_eoi);
13757 int (*irq_pending)(unsigned int irq);
13758 void (*make_irq)(unsigned int irq);
13759-};
13760+} __do_const;
13761
13762 extern struct legacy_pic *legacy_pic;
13763 extern struct legacy_pic null_legacy_pic;
13764diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
13765index d8e8eef..1765f78 100644
13766--- a/arch/x86/include/asm/io.h
13767+++ b/arch/x86/include/asm/io.h
13768@@ -51,12 +51,12 @@ static inline void name(type val, volatile void __iomem *addr) \
13769 "m" (*(volatile type __force *)addr) barrier); }
13770
13771 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
13772-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
13773-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
13774+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
13775+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
13776
13777 build_mmio_read(__readb, "b", unsigned char, "=q", )
13778-build_mmio_read(__readw, "w", unsigned short, "=r", )
13779-build_mmio_read(__readl, "l", unsigned int, "=r", )
13780+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
13781+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
13782
13783 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
13784 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
13785@@ -184,7 +184,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
13786 return ioremap_nocache(offset, size);
13787 }
13788
13789-extern void iounmap(volatile void __iomem *addr);
13790+extern void iounmap(const volatile void __iomem *addr);
13791
13792 extern void set_iounmap_nonlazy(void);
13793
13794@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
13795
13796 #include <linux/vmalloc.h>
13797
13798+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
13799+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
13800+{
13801+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
13802+}
13803+
13804+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
13805+{
13806+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
13807+}
13808+
13809 /*
13810 * Convert a virtual cached pointer to an uncached pointer
13811 */
13812diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
13813index bba3cf8..06bc8da 100644
13814--- a/arch/x86/include/asm/irqflags.h
13815+++ b/arch/x86/include/asm/irqflags.h
13816@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
13817 sti; \
13818 sysexit
13819
13820+#define GET_CR0_INTO_RDI mov %cr0, %rdi
13821+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
13822+#define GET_CR3_INTO_RDI mov %cr3, %rdi
13823+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
13824+
13825 #else
13826 #define INTERRUPT_RETURN iret
13827 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
13828diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
13829index d3ddd17..c9fb0cc 100644
13830--- a/arch/x86/include/asm/kprobes.h
13831+++ b/arch/x86/include/asm/kprobes.h
13832@@ -38,13 +38,8 @@ typedef u8 kprobe_opcode_t;
13833 #define RELATIVEJUMP_SIZE 5
13834 #define RELATIVECALL_OPCODE 0xe8
13835 #define RELATIVE_ADDR_SIZE 4
13836-#define MAX_STACK_SIZE 64
13837-#define MIN_STACK_SIZE(ADDR) \
13838- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
13839- THREAD_SIZE - (unsigned long)(ADDR))) \
13840- ? (MAX_STACK_SIZE) \
13841- : (((unsigned long)current_thread_info()) + \
13842- THREAD_SIZE - (unsigned long)(ADDR)))
13843+#define MAX_STACK_SIZE 64UL
13844+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
13845
13846 #define flush_insn_slot(p) do { } while (0)
13847
13848diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
13849index dc87b65..85039f9 100644
13850--- a/arch/x86/include/asm/kvm_host.h
13851+++ b/arch/x86/include/asm/kvm_host.h
13852@@ -419,8 +419,8 @@ struct kvm_vcpu_arch {
13853 gpa_t time;
13854 struct pvclock_vcpu_time_info hv_clock;
13855 unsigned int hw_tsc_khz;
13856- unsigned int time_offset;
13857- struct page *time_page;
13858+ struct gfn_to_hva_cache pv_time;
13859+ bool pv_time_enabled;
13860 /* set guest stopped flag in pvclock flags field */
13861 bool pvclock_set_guest_stopped_request;
13862
13863diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
13864index 2d89e39..baee879 100644
13865--- a/arch/x86/include/asm/local.h
13866+++ b/arch/x86/include/asm/local.h
13867@@ -10,33 +10,97 @@ typedef struct {
13868 atomic_long_t a;
13869 } local_t;
13870
13871+typedef struct {
13872+ atomic_long_unchecked_t a;
13873+} local_unchecked_t;
13874+
13875 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
13876
13877 #define local_read(l) atomic_long_read(&(l)->a)
13878+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
13879 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
13880+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
13881
13882 static inline void local_inc(local_t *l)
13883 {
13884- asm volatile(_ASM_INC "%0"
13885+ asm volatile(_ASM_INC "%0\n"
13886+
13887+#ifdef CONFIG_PAX_REFCOUNT
13888+ "jno 0f\n"
13889+ _ASM_DEC "%0\n"
13890+ "int $4\n0:\n"
13891+ _ASM_EXTABLE(0b, 0b)
13892+#endif
13893+
13894+ : "+m" (l->a.counter));
13895+}
13896+
13897+static inline void local_inc_unchecked(local_unchecked_t *l)
13898+{
13899+ asm volatile(_ASM_INC "%0\n"
13900 : "+m" (l->a.counter));
13901 }
13902
13903 static inline void local_dec(local_t *l)
13904 {
13905- asm volatile(_ASM_DEC "%0"
13906+ asm volatile(_ASM_DEC "%0\n"
13907+
13908+#ifdef CONFIG_PAX_REFCOUNT
13909+ "jno 0f\n"
13910+ _ASM_INC "%0\n"
13911+ "int $4\n0:\n"
13912+ _ASM_EXTABLE(0b, 0b)
13913+#endif
13914+
13915+ : "+m" (l->a.counter));
13916+}
13917+
13918+static inline void local_dec_unchecked(local_unchecked_t *l)
13919+{
13920+ asm volatile(_ASM_DEC "%0\n"
13921 : "+m" (l->a.counter));
13922 }
13923
13924 static inline void local_add(long i, local_t *l)
13925 {
13926- asm volatile(_ASM_ADD "%1,%0"
13927+ asm volatile(_ASM_ADD "%1,%0\n"
13928+
13929+#ifdef CONFIG_PAX_REFCOUNT
13930+ "jno 0f\n"
13931+ _ASM_SUB "%1,%0\n"
13932+ "int $4\n0:\n"
13933+ _ASM_EXTABLE(0b, 0b)
13934+#endif
13935+
13936+ : "+m" (l->a.counter)
13937+ : "ir" (i));
13938+}
13939+
13940+static inline void local_add_unchecked(long i, local_unchecked_t *l)
13941+{
13942+ asm volatile(_ASM_ADD "%1,%0\n"
13943 : "+m" (l->a.counter)
13944 : "ir" (i));
13945 }
13946
13947 static inline void local_sub(long i, local_t *l)
13948 {
13949- asm volatile(_ASM_SUB "%1,%0"
13950+ asm volatile(_ASM_SUB "%1,%0\n"
13951+
13952+#ifdef CONFIG_PAX_REFCOUNT
13953+ "jno 0f\n"
13954+ _ASM_ADD "%1,%0\n"
13955+ "int $4\n0:\n"
13956+ _ASM_EXTABLE(0b, 0b)
13957+#endif
13958+
13959+ : "+m" (l->a.counter)
13960+ : "ir" (i));
13961+}
13962+
13963+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
13964+{
13965+ asm volatile(_ASM_SUB "%1,%0\n"
13966 : "+m" (l->a.counter)
13967 : "ir" (i));
13968 }
13969@@ -54,7 +118,16 @@ static inline int local_sub_and_test(long i, local_t *l)
13970 {
13971 unsigned char c;
13972
13973- asm volatile(_ASM_SUB "%2,%0; sete %1"
13974+ asm volatile(_ASM_SUB "%2,%0\n"
13975+
13976+#ifdef CONFIG_PAX_REFCOUNT
13977+ "jno 0f\n"
13978+ _ASM_ADD "%2,%0\n"
13979+ "int $4\n0:\n"
13980+ _ASM_EXTABLE(0b, 0b)
13981+#endif
13982+
13983+ "sete %1\n"
13984 : "+m" (l->a.counter), "=qm" (c)
13985 : "ir" (i) : "memory");
13986 return c;
13987@@ -72,7 +145,16 @@ static inline int local_dec_and_test(local_t *l)
13988 {
13989 unsigned char c;
13990
13991- asm volatile(_ASM_DEC "%0; sete %1"
13992+ asm volatile(_ASM_DEC "%0\n"
13993+
13994+#ifdef CONFIG_PAX_REFCOUNT
13995+ "jno 0f\n"
13996+ _ASM_INC "%0\n"
13997+ "int $4\n0:\n"
13998+ _ASM_EXTABLE(0b, 0b)
13999+#endif
14000+
14001+ "sete %1\n"
14002 : "+m" (l->a.counter), "=qm" (c)
14003 : : "memory");
14004 return c != 0;
14005@@ -90,7 +172,16 @@ static inline int local_inc_and_test(local_t *l)
14006 {
14007 unsigned char c;
14008
14009- asm volatile(_ASM_INC "%0; sete %1"
14010+ asm volatile(_ASM_INC "%0\n"
14011+
14012+#ifdef CONFIG_PAX_REFCOUNT
14013+ "jno 0f\n"
14014+ _ASM_DEC "%0\n"
14015+ "int $4\n0:\n"
14016+ _ASM_EXTABLE(0b, 0b)
14017+#endif
14018+
14019+ "sete %1\n"
14020 : "+m" (l->a.counter), "=qm" (c)
14021 : : "memory");
14022 return c != 0;
14023@@ -109,7 +200,16 @@ static inline int local_add_negative(long i, local_t *l)
14024 {
14025 unsigned char c;
14026
14027- asm volatile(_ASM_ADD "%2,%0; sets %1"
14028+ asm volatile(_ASM_ADD "%2,%0\n"
14029+
14030+#ifdef CONFIG_PAX_REFCOUNT
14031+ "jno 0f\n"
14032+ _ASM_SUB "%2,%0\n"
14033+ "int $4\n0:\n"
14034+ _ASM_EXTABLE(0b, 0b)
14035+#endif
14036+
14037+ "sets %1\n"
14038 : "+m" (l->a.counter), "=qm" (c)
14039 : "ir" (i) : "memory");
14040 return c;
14041@@ -125,6 +225,30 @@ static inline int local_add_negative(long i, local_t *l)
14042 static inline long local_add_return(long i, local_t *l)
14043 {
14044 long __i = i;
14045+ asm volatile(_ASM_XADD "%0, %1\n"
14046+
14047+#ifdef CONFIG_PAX_REFCOUNT
14048+ "jno 0f\n"
14049+ _ASM_MOV "%0,%1\n"
14050+ "int $4\n0:\n"
14051+ _ASM_EXTABLE(0b, 0b)
14052+#endif
14053+
14054+ : "+r" (i), "+m" (l->a.counter)
14055+ : : "memory");
14056+ return i + __i;
14057+}
14058+
14059+/**
14060+ * local_add_return_unchecked - add and return
14061+ * @i: integer value to add
14062+ * @l: pointer to type local_unchecked_t
14063+ *
14064+ * Atomically adds @i to @l and returns @i + @l
14065+ */
14066+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
14067+{
14068+ long __i = i;
14069 asm volatile(_ASM_XADD "%0, %1;"
14070 : "+r" (i), "+m" (l->a.counter)
14071 : : "memory");
14072@@ -141,6 +265,8 @@ static inline long local_sub_return(long i, local_t *l)
14073
14074 #define local_cmpxchg(l, o, n) \
14075 (cmpxchg_local(&((l)->a.counter), (o), (n)))
14076+#define local_cmpxchg_unchecked(l, o, n) \
14077+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
14078 /* Always has a lock prefix */
14079 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
14080
14081diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
14082new file mode 100644
14083index 0000000..2bfd3ba
14084--- /dev/null
14085+++ b/arch/x86/include/asm/mman.h
14086@@ -0,0 +1,15 @@
14087+#ifndef _X86_MMAN_H
14088+#define _X86_MMAN_H
14089+
14090+#include <uapi/asm/mman.h>
14091+
14092+#ifdef __KERNEL__
14093+#ifndef __ASSEMBLY__
14094+#ifdef CONFIG_X86_32
14095+#define arch_mmap_check i386_mmap_check
14096+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
14097+#endif
14098+#endif
14099+#endif
14100+
14101+#endif /* X86_MMAN_H */
14102diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
14103index 5f55e69..e20bfb1 100644
14104--- a/arch/x86/include/asm/mmu.h
14105+++ b/arch/x86/include/asm/mmu.h
14106@@ -9,7 +9,7 @@
14107 * we put the segment information here.
14108 */
14109 typedef struct {
14110- void *ldt;
14111+ struct desc_struct *ldt;
14112 int size;
14113
14114 #ifdef CONFIG_X86_64
14115@@ -18,7 +18,19 @@ typedef struct {
14116 #endif
14117
14118 struct mutex lock;
14119- void *vdso;
14120+ unsigned long vdso;
14121+
14122+#ifdef CONFIG_X86_32
14123+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
14124+ unsigned long user_cs_base;
14125+ unsigned long user_cs_limit;
14126+
14127+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
14128+ cpumask_t cpu_user_cs_mask;
14129+#endif
14130+
14131+#endif
14132+#endif
14133 } mm_context_t;
14134
14135 #ifdef CONFIG_SMP
14136diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
14137index cdbf367..adb37ac 100644
14138--- a/arch/x86/include/asm/mmu_context.h
14139+++ b/arch/x86/include/asm/mmu_context.h
14140@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
14141
14142 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
14143 {
14144+
14145+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
14146+ unsigned int i;
14147+ pgd_t *pgd;
14148+
14149+ pax_open_kernel();
14150+ pgd = get_cpu_pgd(smp_processor_id());
14151+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
14152+ set_pgd_batched(pgd+i, native_make_pgd(0));
14153+ pax_close_kernel();
14154+#endif
14155+
14156 #ifdef CONFIG_SMP
14157 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
14158 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
14159@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
14160 struct task_struct *tsk)
14161 {
14162 unsigned cpu = smp_processor_id();
14163+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
14164+ int tlbstate = TLBSTATE_OK;
14165+#endif
14166
14167 if (likely(prev != next)) {
14168 #ifdef CONFIG_SMP
14169+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
14170+ tlbstate = this_cpu_read(cpu_tlbstate.state);
14171+#endif
14172 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
14173 this_cpu_write(cpu_tlbstate.active_mm, next);
14174 #endif
14175 cpumask_set_cpu(cpu, mm_cpumask(next));
14176
14177 /* Re-load page tables */
14178+#ifdef CONFIG_PAX_PER_CPU_PGD
14179+ pax_open_kernel();
14180+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
14181+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
14182+ pax_close_kernel();
14183+ load_cr3(get_cpu_pgd(cpu));
14184+#else
14185 load_cr3(next->pgd);
14186+#endif
14187
14188 /* stop flush ipis for the previous mm */
14189 cpumask_clear_cpu(cpu, mm_cpumask(prev));
14190@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
14191 */
14192 if (unlikely(prev->context.ldt != next->context.ldt))
14193 load_LDT_nolock(&next->context);
14194- }
14195+
14196+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
14197+ if (!(__supported_pte_mask & _PAGE_NX)) {
14198+ smp_mb__before_clear_bit();
14199+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
14200+ smp_mb__after_clear_bit();
14201+ cpu_set(cpu, next->context.cpu_user_cs_mask);
14202+ }
14203+#endif
14204+
14205+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
14206+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
14207+ prev->context.user_cs_limit != next->context.user_cs_limit))
14208+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
14209 #ifdef CONFIG_SMP
14210+ else if (unlikely(tlbstate != TLBSTATE_OK))
14211+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
14212+#endif
14213+#endif
14214+
14215+ }
14216 else {
14217+
14218+#ifdef CONFIG_PAX_PER_CPU_PGD
14219+ pax_open_kernel();
14220+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
14221+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
14222+ pax_close_kernel();
14223+ load_cr3(get_cpu_pgd(cpu));
14224+#endif
14225+
14226+#ifdef CONFIG_SMP
14227 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
14228 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
14229
14230@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
14231 * tlb flush IPI delivery. We must reload CR3
14232 * to make sure to use no freed page tables.
14233 */
14234+
14235+#ifndef CONFIG_PAX_PER_CPU_PGD
14236 load_cr3(next->pgd);
14237+#endif
14238+
14239 load_LDT_nolock(&next->context);
14240+
14241+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
14242+ if (!(__supported_pte_mask & _PAGE_NX))
14243+ cpu_set(cpu, next->context.cpu_user_cs_mask);
14244+#endif
14245+
14246+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
14247+#ifdef CONFIG_PAX_PAGEEXEC
14248+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
14249+#endif
14250+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
14251+#endif
14252+
14253 }
14254+#endif
14255 }
14256-#endif
14257 }
14258
14259 #define activate_mm(prev, next) \
14260diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
14261index e3b7819..b257c64 100644
14262--- a/arch/x86/include/asm/module.h
14263+++ b/arch/x86/include/asm/module.h
14264@@ -5,6 +5,7 @@
14265
14266 #ifdef CONFIG_X86_64
14267 /* X86_64 does not define MODULE_PROC_FAMILY */
14268+#define MODULE_PROC_FAMILY ""
14269 #elif defined CONFIG_M486
14270 #define MODULE_PROC_FAMILY "486 "
14271 #elif defined CONFIG_M586
14272@@ -57,8 +58,20 @@
14273 #error unknown processor family
14274 #endif
14275
14276-#ifdef CONFIG_X86_32
14277-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
14278+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
14279+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
14280+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
14281+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
14282+#else
14283+#define MODULE_PAX_KERNEXEC ""
14284 #endif
14285
14286+#ifdef CONFIG_PAX_MEMORY_UDEREF
14287+#define MODULE_PAX_UDEREF "UDEREF "
14288+#else
14289+#define MODULE_PAX_UDEREF ""
14290+#endif
14291+
14292+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
14293+
14294 #endif /* _ASM_X86_MODULE_H */
14295diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
14296index c0fa356..07a498a 100644
14297--- a/arch/x86/include/asm/nmi.h
14298+++ b/arch/x86/include/asm/nmi.h
14299@@ -42,11 +42,11 @@ struct nmiaction {
14300 nmi_handler_t handler;
14301 unsigned long flags;
14302 const char *name;
14303-};
14304+} __do_const;
14305
14306 #define register_nmi_handler(t, fn, fg, n, init...) \
14307 ({ \
14308- static struct nmiaction init fn##_na = { \
14309+ static const struct nmiaction init fn##_na = { \
14310 .handler = (fn), \
14311 .name = (n), \
14312 .flags = (fg), \
14313@@ -54,7 +54,7 @@ struct nmiaction {
14314 __register_nmi_handler((t), &fn##_na); \
14315 })
14316
14317-int __register_nmi_handler(unsigned int, struct nmiaction *);
14318+int __register_nmi_handler(unsigned int, const struct nmiaction *);
14319
14320 void unregister_nmi_handler(unsigned int, const char *);
14321
14322diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
14323index 320f7bb..e89f8f8 100644
14324--- a/arch/x86/include/asm/page_64_types.h
14325+++ b/arch/x86/include/asm/page_64_types.h
14326@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
14327
14328 /* duplicated to the one in bootmem.h */
14329 extern unsigned long max_pfn;
14330-extern unsigned long phys_base;
14331+extern const unsigned long phys_base;
14332
14333 extern unsigned long __phys_addr(unsigned long);
14334 #define __phys_reloc_hide(x) (x)
14335diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
14336index 5edd174..c395822 100644
14337--- a/arch/x86/include/asm/paravirt.h
14338+++ b/arch/x86/include/asm/paravirt.h
14339@@ -564,7 +564,7 @@ static inline pmd_t __pmd(pmdval_t val)
14340 return (pmd_t) { ret };
14341 }
14342
14343-static inline pmdval_t pmd_val(pmd_t pmd)
14344+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
14345 {
14346 pmdval_t ret;
14347
14348@@ -630,6 +630,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
14349 val);
14350 }
14351
14352+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
14353+{
14354+ pgdval_t val = native_pgd_val(pgd);
14355+
14356+ if (sizeof(pgdval_t) > sizeof(long))
14357+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
14358+ val, (u64)val >> 32);
14359+ else
14360+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
14361+ val);
14362+}
14363+
14364 static inline void pgd_clear(pgd_t *pgdp)
14365 {
14366 set_pgd(pgdp, __pgd(0));
14367@@ -711,6 +723,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
14368 pv_mmu_ops.set_fixmap(idx, phys, flags);
14369 }
14370
14371+#ifdef CONFIG_PAX_KERNEXEC
14372+static inline unsigned long pax_open_kernel(void)
14373+{
14374+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
14375+}
14376+
14377+static inline unsigned long pax_close_kernel(void)
14378+{
14379+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
14380+}
14381+#else
14382+static inline unsigned long pax_open_kernel(void) { return 0; }
14383+static inline unsigned long pax_close_kernel(void) { return 0; }
14384+#endif
14385+
14386 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
14387
14388 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
14389@@ -927,7 +954,7 @@ extern void default_banner(void);
14390
14391 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
14392 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
14393-#define PARA_INDIRECT(addr) *%cs:addr
14394+#define PARA_INDIRECT(addr) *%ss:addr
14395 #endif
14396
14397 #define INTERRUPT_RETURN \
14398@@ -1002,6 +1029,21 @@ extern void default_banner(void);
14399 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
14400 CLBR_NONE, \
14401 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
14402+
14403+#define GET_CR0_INTO_RDI \
14404+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
14405+ mov %rax,%rdi
14406+
14407+#define SET_RDI_INTO_CR0 \
14408+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
14409+
14410+#define GET_CR3_INTO_RDI \
14411+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
14412+ mov %rax,%rdi
14413+
14414+#define SET_RDI_INTO_CR3 \
14415+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
14416+
14417 #endif /* CONFIG_X86_32 */
14418
14419 #endif /* __ASSEMBLY__ */
14420diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
14421index 142236e..5446ffbc 100644
14422--- a/arch/x86/include/asm/paravirt_types.h
14423+++ b/arch/x86/include/asm/paravirt_types.h
14424@@ -84,7 +84,7 @@ struct pv_init_ops {
14425 */
14426 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
14427 unsigned long addr, unsigned len);
14428-};
14429+} __no_const;
14430
14431
14432 struct pv_lazy_ops {
14433@@ -97,7 +97,7 @@ struct pv_time_ops {
14434 unsigned long long (*sched_clock)(void);
14435 unsigned long long (*steal_clock)(int cpu);
14436 unsigned long (*get_tsc_khz)(void);
14437-};
14438+} __no_const;
14439
14440 struct pv_cpu_ops {
14441 /* hooks for various privileged instructions */
14442@@ -191,7 +191,7 @@ struct pv_cpu_ops {
14443
14444 void (*start_context_switch)(struct task_struct *prev);
14445 void (*end_context_switch)(struct task_struct *next);
14446-};
14447+} __no_const;
14448
14449 struct pv_irq_ops {
14450 /*
14451@@ -222,7 +222,7 @@ struct pv_apic_ops {
14452 unsigned long start_eip,
14453 unsigned long start_esp);
14454 #endif
14455-};
14456+} __no_const;
14457
14458 struct pv_mmu_ops {
14459 unsigned long (*read_cr2)(void);
14460@@ -312,6 +312,7 @@ struct pv_mmu_ops {
14461 struct paravirt_callee_save make_pud;
14462
14463 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
14464+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
14465 #endif /* PAGETABLE_LEVELS == 4 */
14466 #endif /* PAGETABLE_LEVELS >= 3 */
14467
14468@@ -323,6 +324,12 @@ struct pv_mmu_ops {
14469 an mfn. We can tell which is which from the index. */
14470 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
14471 phys_addr_t phys, pgprot_t flags);
14472+
14473+#ifdef CONFIG_PAX_KERNEXEC
14474+ unsigned long (*pax_open_kernel)(void);
14475+ unsigned long (*pax_close_kernel)(void);
14476+#endif
14477+
14478 };
14479
14480 struct arch_spinlock;
14481@@ -333,7 +340,7 @@ struct pv_lock_ops {
14482 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
14483 int (*spin_trylock)(struct arch_spinlock *lock);
14484 void (*spin_unlock)(struct arch_spinlock *lock);
14485-};
14486+} __no_const;
14487
14488 /* This contains all the paravirt structures: we get a convenient
14489 * number for each function using the offset which we use to indicate
14490diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
14491index b4389a4..7024269 100644
14492--- a/arch/x86/include/asm/pgalloc.h
14493+++ b/arch/x86/include/asm/pgalloc.h
14494@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
14495 pmd_t *pmd, pte_t *pte)
14496 {
14497 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
14498+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
14499+}
14500+
14501+static inline void pmd_populate_user(struct mm_struct *mm,
14502+ pmd_t *pmd, pte_t *pte)
14503+{
14504+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
14505 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
14506 }
14507
14508@@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
14509
14510 #ifdef CONFIG_X86_PAE
14511 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
14512+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
14513+{
14514+ pud_populate(mm, pudp, pmd);
14515+}
14516 #else /* !CONFIG_X86_PAE */
14517 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
14518 {
14519 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
14520 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
14521 }
14522+
14523+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
14524+{
14525+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
14526+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
14527+}
14528 #endif /* CONFIG_X86_PAE */
14529
14530 #if PAGETABLE_LEVELS > 3
14531@@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
14532 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
14533 }
14534
14535+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
14536+{
14537+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
14538+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
14539+}
14540+
14541 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
14542 {
14543 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
14544diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
14545index f2b489c..4f7e2e5 100644
14546--- a/arch/x86/include/asm/pgtable-2level.h
14547+++ b/arch/x86/include/asm/pgtable-2level.h
14548@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
14549
14550 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
14551 {
14552+ pax_open_kernel();
14553 *pmdp = pmd;
14554+ pax_close_kernel();
14555 }
14556
14557 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
14558diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
14559index 4cc9f2b..5fd9226 100644
14560--- a/arch/x86/include/asm/pgtable-3level.h
14561+++ b/arch/x86/include/asm/pgtable-3level.h
14562@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
14563
14564 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
14565 {
14566+ pax_open_kernel();
14567 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
14568+ pax_close_kernel();
14569 }
14570
14571 static inline void native_set_pud(pud_t *pudp, pud_t pud)
14572 {
14573+ pax_open_kernel();
14574 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
14575+ pax_close_kernel();
14576 }
14577
14578 /*
14579diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
14580index 1c1a955..50f828c 100644
14581--- a/arch/x86/include/asm/pgtable.h
14582+++ b/arch/x86/include/asm/pgtable.h
14583@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
14584
14585 #ifndef __PAGETABLE_PUD_FOLDED
14586 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
14587+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
14588 #define pgd_clear(pgd) native_pgd_clear(pgd)
14589 #endif
14590
14591@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
14592
14593 #define arch_end_context_switch(prev) do {} while(0)
14594
14595+#define pax_open_kernel() native_pax_open_kernel()
14596+#define pax_close_kernel() native_pax_close_kernel()
14597 #endif /* CONFIG_PARAVIRT */
14598
14599+#define __HAVE_ARCH_PAX_OPEN_KERNEL
14600+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
14601+
14602+#ifdef CONFIG_PAX_KERNEXEC
14603+static inline unsigned long native_pax_open_kernel(void)
14604+{
14605+ unsigned long cr0;
14606+
14607+ preempt_disable();
14608+ barrier();
14609+ cr0 = read_cr0() ^ X86_CR0_WP;
14610+ BUG_ON(cr0 & X86_CR0_WP);
14611+ write_cr0(cr0);
14612+ return cr0 ^ X86_CR0_WP;
14613+}
14614+
14615+static inline unsigned long native_pax_close_kernel(void)
14616+{
14617+ unsigned long cr0;
14618+
14619+ cr0 = read_cr0() ^ X86_CR0_WP;
14620+ BUG_ON(!(cr0 & X86_CR0_WP));
14621+ write_cr0(cr0);
14622+ barrier();
14623+ preempt_enable_no_resched();
14624+ return cr0 ^ X86_CR0_WP;
14625+}
14626+#else
14627+static inline unsigned long native_pax_open_kernel(void) { return 0; }
14628+static inline unsigned long native_pax_close_kernel(void) { return 0; }
14629+#endif
14630+
14631 /*
14632 * The following only work if pte_present() is true.
14633 * Undefined behaviour if not..
14634 */
14635+static inline int pte_user(pte_t pte)
14636+{
14637+ return pte_val(pte) & _PAGE_USER;
14638+}
14639+
14640 static inline int pte_dirty(pte_t pte)
14641 {
14642 return pte_flags(pte) & _PAGE_DIRTY;
14643@@ -200,9 +240,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
14644 return pte_clear_flags(pte, _PAGE_RW);
14645 }
14646
14647+static inline pte_t pte_mkread(pte_t pte)
14648+{
14649+ return __pte(pte_val(pte) | _PAGE_USER);
14650+}
14651+
14652 static inline pte_t pte_mkexec(pte_t pte)
14653 {
14654- return pte_clear_flags(pte, _PAGE_NX);
14655+#ifdef CONFIG_X86_PAE
14656+ if (__supported_pte_mask & _PAGE_NX)
14657+ return pte_clear_flags(pte, _PAGE_NX);
14658+ else
14659+#endif
14660+ return pte_set_flags(pte, _PAGE_USER);
14661+}
14662+
14663+static inline pte_t pte_exprotect(pte_t pte)
14664+{
14665+#ifdef CONFIG_X86_PAE
14666+ if (__supported_pte_mask & _PAGE_NX)
14667+ return pte_set_flags(pte, _PAGE_NX);
14668+ else
14669+#endif
14670+ return pte_clear_flags(pte, _PAGE_USER);
14671 }
14672
14673 static inline pte_t pte_mkdirty(pte_t pte)
14674@@ -394,6 +454,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
14675 #endif
14676
14677 #ifndef __ASSEMBLY__
14678+
14679+#ifdef CONFIG_PAX_PER_CPU_PGD
14680+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
14681+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
14682+{
14683+ return cpu_pgd[cpu];
14684+}
14685+#endif
14686+
14687 #include <linux/mm_types.h>
14688
14689 static inline int pte_none(pte_t pte)
14690@@ -583,7 +652,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
14691
14692 static inline int pgd_bad(pgd_t pgd)
14693 {
14694- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
14695+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
14696 }
14697
14698 static inline int pgd_none(pgd_t pgd)
14699@@ -606,7 +675,12 @@ static inline int pgd_none(pgd_t pgd)
14700 * pgd_offset() returns a (pgd_t *)
14701 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
14702 */
14703-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
14704+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
14705+
14706+#ifdef CONFIG_PAX_PER_CPU_PGD
14707+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
14708+#endif
14709+
14710 /*
14711 * a shortcut which implies the use of the kernel's pgd, instead
14712 * of a process's
14713@@ -617,6 +691,20 @@ static inline int pgd_none(pgd_t pgd)
14714 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
14715 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
14716
14717+#ifdef CONFIG_X86_32
14718+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
14719+#else
14720+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
14721+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
14722+
14723+#ifdef CONFIG_PAX_MEMORY_UDEREF
14724+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
14725+#else
14726+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
14727+#endif
14728+
14729+#endif
14730+
14731 #ifndef __ASSEMBLY__
14732
14733 extern int direct_gbpages;
14734@@ -781,11 +869,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
14735 * dst and src can be on the same page, but the range must not overlap,
14736 * and must not cross a page boundary.
14737 */
14738-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
14739+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
14740 {
14741- memcpy(dst, src, count * sizeof(pgd_t));
14742+ pax_open_kernel();
14743+ while (count--)
14744+ *dst++ = *src++;
14745+ pax_close_kernel();
14746 }
14747
14748+#ifdef CONFIG_PAX_PER_CPU_PGD
14749+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
14750+#endif
14751+
14752+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
14753+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
14754+#else
14755+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
14756+#endif
14757
14758 #include <asm-generic/pgtable.h>
14759 #endif /* __ASSEMBLY__ */
14760diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
14761index 8faa215..a8a17ea 100644
14762--- a/arch/x86/include/asm/pgtable_32.h
14763+++ b/arch/x86/include/asm/pgtable_32.h
14764@@ -25,9 +25,6 @@
14765 struct mm_struct;
14766 struct vm_area_struct;
14767
14768-extern pgd_t swapper_pg_dir[1024];
14769-extern pgd_t initial_page_table[1024];
14770-
14771 static inline void pgtable_cache_init(void) { }
14772 static inline void check_pgt_cache(void) { }
14773 void paging_init(void);
14774@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
14775 # include <asm/pgtable-2level.h>
14776 #endif
14777
14778+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
14779+extern pgd_t initial_page_table[PTRS_PER_PGD];
14780+#ifdef CONFIG_X86_PAE
14781+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
14782+#endif
14783+
14784 #if defined(CONFIG_HIGHPTE)
14785 #define pte_offset_map(dir, address) \
14786 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
14787@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
14788 /* Clear a kernel PTE and flush it from the TLB */
14789 #define kpte_clear_flush(ptep, vaddr) \
14790 do { \
14791+ pax_open_kernel(); \
14792 pte_clear(&init_mm, (vaddr), (ptep)); \
14793+ pax_close_kernel(); \
14794 __flush_tlb_one((vaddr)); \
14795 } while (0)
14796
14797@@ -75,6 +80,9 @@ do { \
14798
14799 #endif /* !__ASSEMBLY__ */
14800
14801+#define HAVE_ARCH_UNMAPPED_AREA
14802+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
14803+
14804 /*
14805 * kern_addr_valid() is (1) for FLATMEM and (0) for
14806 * SPARSEMEM and DISCONTIGMEM
14807diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
14808index ed5903b..c7fe163 100644
14809--- a/arch/x86/include/asm/pgtable_32_types.h
14810+++ b/arch/x86/include/asm/pgtable_32_types.h
14811@@ -8,7 +8,7 @@
14812 */
14813 #ifdef CONFIG_X86_PAE
14814 # include <asm/pgtable-3level_types.h>
14815-# define PMD_SIZE (1UL << PMD_SHIFT)
14816+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
14817 # define PMD_MASK (~(PMD_SIZE - 1))
14818 #else
14819 # include <asm/pgtable-2level_types.h>
14820@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
14821 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
14822 #endif
14823
14824+#ifdef CONFIG_PAX_KERNEXEC
14825+#ifndef __ASSEMBLY__
14826+extern unsigned char MODULES_EXEC_VADDR[];
14827+extern unsigned char MODULES_EXEC_END[];
14828+#endif
14829+#include <asm/boot.h>
14830+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
14831+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
14832+#else
14833+#define ktla_ktva(addr) (addr)
14834+#define ktva_ktla(addr) (addr)
14835+#endif
14836+
14837 #define MODULES_VADDR VMALLOC_START
14838 #define MODULES_END VMALLOC_END
14839 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
14840diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
14841index 47356f9..deb94a2 100644
14842--- a/arch/x86/include/asm/pgtable_64.h
14843+++ b/arch/x86/include/asm/pgtable_64.h
14844@@ -16,10 +16,14 @@
14845
14846 extern pud_t level3_kernel_pgt[512];
14847 extern pud_t level3_ident_pgt[512];
14848+extern pud_t level3_vmalloc_start_pgt[512];
14849+extern pud_t level3_vmalloc_end_pgt[512];
14850+extern pud_t level3_vmemmap_pgt[512];
14851+extern pud_t level2_vmemmap_pgt[512];
14852 extern pmd_t level2_kernel_pgt[512];
14853 extern pmd_t level2_fixmap_pgt[512];
14854-extern pmd_t level2_ident_pgt[512];
14855-extern pgd_t init_level4_pgt[];
14856+extern pmd_t level2_ident_pgt[512*2];
14857+extern pgd_t init_level4_pgt[512];
14858
14859 #define swapper_pg_dir init_level4_pgt
14860
14861@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
14862
14863 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
14864 {
14865+ pax_open_kernel();
14866 *pmdp = pmd;
14867+ pax_close_kernel();
14868 }
14869
14870 static inline void native_pmd_clear(pmd_t *pmd)
14871@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
14872
14873 static inline void native_set_pud(pud_t *pudp, pud_t pud)
14874 {
14875+ pax_open_kernel();
14876 *pudp = pud;
14877+ pax_close_kernel();
14878 }
14879
14880 static inline void native_pud_clear(pud_t *pud)
14881@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
14882
14883 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
14884 {
14885+ pax_open_kernel();
14886+ *pgdp = pgd;
14887+ pax_close_kernel();
14888+}
14889+
14890+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
14891+{
14892 *pgdp = pgd;
14893 }
14894
14895diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
14896index 766ea16..5b96cb3 100644
14897--- a/arch/x86/include/asm/pgtable_64_types.h
14898+++ b/arch/x86/include/asm/pgtable_64_types.h
14899@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
14900 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
14901 #define MODULES_END _AC(0xffffffffff000000, UL)
14902 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
14903+#define MODULES_EXEC_VADDR MODULES_VADDR
14904+#define MODULES_EXEC_END MODULES_END
14905+
14906+#define ktla_ktva(addr) (addr)
14907+#define ktva_ktla(addr) (addr)
14908
14909 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
14910diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
14911index 3c32db8..1ddccf5 100644
14912--- a/arch/x86/include/asm/pgtable_types.h
14913+++ b/arch/x86/include/asm/pgtable_types.h
14914@@ -16,13 +16,12 @@
14915 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
14916 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
14917 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
14918-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
14919+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
14920 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
14921 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
14922 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
14923-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
14924-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
14925-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
14926+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
14927+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
14928 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
14929
14930 /* If _PAGE_BIT_PRESENT is clear, we use these: */
14931@@ -40,7 +39,6 @@
14932 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
14933 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
14934 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
14935-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
14936 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
14937 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
14938 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
14939@@ -57,8 +55,10 @@
14940
14941 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
14942 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
14943-#else
14944+#elif defined(CONFIG_KMEMCHECK)
14945 #define _PAGE_NX (_AT(pteval_t, 0))
14946+#else
14947+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
14948 #endif
14949
14950 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
14951@@ -116,6 +116,9 @@
14952 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
14953 _PAGE_ACCESSED)
14954
14955+#define PAGE_READONLY_NOEXEC PAGE_READONLY
14956+#define PAGE_SHARED_NOEXEC PAGE_SHARED
14957+
14958 #define __PAGE_KERNEL_EXEC \
14959 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
14960 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
14961@@ -126,7 +129,7 @@
14962 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
14963 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
14964 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
14965-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
14966+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
14967 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
14968 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
14969 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
14970@@ -188,8 +191,8 @@
14971 * bits are combined, this will alow user to access the high address mapped
14972 * VDSO in the presence of CONFIG_COMPAT_VDSO
14973 */
14974-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
14975-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
14976+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
14977+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
14978 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
14979 #endif
14980
14981@@ -227,7 +230,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
14982 {
14983 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
14984 }
14985+#endif
14986
14987+#if PAGETABLE_LEVELS == 3
14988+#include <asm-generic/pgtable-nopud.h>
14989+#endif
14990+
14991+#if PAGETABLE_LEVELS == 2
14992+#include <asm-generic/pgtable-nopmd.h>
14993+#endif
14994+
14995+#ifndef __ASSEMBLY__
14996 #if PAGETABLE_LEVELS > 3
14997 typedef struct { pudval_t pud; } pud_t;
14998
14999@@ -241,8 +254,6 @@ static inline pudval_t native_pud_val(pud_t pud)
15000 return pud.pud;
15001 }
15002 #else
15003-#include <asm-generic/pgtable-nopud.h>
15004-
15005 static inline pudval_t native_pud_val(pud_t pud)
15006 {
15007 return native_pgd_val(pud.pgd);
15008@@ -262,8 +273,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
15009 return pmd.pmd;
15010 }
15011 #else
15012-#include <asm-generic/pgtable-nopmd.h>
15013-
15014 static inline pmdval_t native_pmd_val(pmd_t pmd)
15015 {
15016 return native_pgd_val(pmd.pud.pgd);
15017@@ -303,7 +312,6 @@ typedef struct page *pgtable_t;
15018
15019 extern pteval_t __supported_pte_mask;
15020 extern void set_nx(void);
15021-extern int nx_enabled;
15022
15023 #define pgprot_writecombine pgprot_writecombine
15024 extern pgprot_t pgprot_writecombine(pgprot_t prot);
15025diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
15026index 888184b..a07ac89 100644
15027--- a/arch/x86/include/asm/processor.h
15028+++ b/arch/x86/include/asm/processor.h
15029@@ -287,7 +287,7 @@ struct tss_struct {
15030
15031 } ____cacheline_aligned;
15032
15033-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
15034+extern struct tss_struct init_tss[NR_CPUS];
15035
15036 /*
15037 * Save the original ist values for checking stack pointers during debugging
15038@@ -827,11 +827,18 @@ static inline void spin_lock_prefetch(const void *x)
15039 */
15040 #define TASK_SIZE PAGE_OFFSET
15041 #define TASK_SIZE_MAX TASK_SIZE
15042+
15043+#ifdef CONFIG_PAX_SEGMEXEC
15044+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
15045+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
15046+#else
15047 #define STACK_TOP TASK_SIZE
15048-#define STACK_TOP_MAX STACK_TOP
15049+#endif
15050+
15051+#define STACK_TOP_MAX TASK_SIZE
15052
15053 #define INIT_THREAD { \
15054- .sp0 = sizeof(init_stack) + (long)&init_stack, \
15055+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
15056 .vm86_info = NULL, \
15057 .sysenter_cs = __KERNEL_CS, \
15058 .io_bitmap_ptr = NULL, \
15059@@ -845,7 +852,7 @@ static inline void spin_lock_prefetch(const void *x)
15060 */
15061 #define INIT_TSS { \
15062 .x86_tss = { \
15063- .sp0 = sizeof(init_stack) + (long)&init_stack, \
15064+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
15065 .ss0 = __KERNEL_DS, \
15066 .ss1 = __KERNEL_CS, \
15067 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
15068@@ -856,11 +863,7 @@ static inline void spin_lock_prefetch(const void *x)
15069 extern unsigned long thread_saved_pc(struct task_struct *tsk);
15070
15071 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
15072-#define KSTK_TOP(info) \
15073-({ \
15074- unsigned long *__ptr = (unsigned long *)(info); \
15075- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
15076-})
15077+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
15078
15079 /*
15080 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
15081@@ -875,7 +878,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
15082 #define task_pt_regs(task) \
15083 ({ \
15084 struct pt_regs *__regs__; \
15085- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
15086+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
15087 __regs__ - 1; \
15088 })
15089
15090@@ -885,13 +888,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
15091 /*
15092 * User space process size. 47bits minus one guard page.
15093 */
15094-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
15095+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
15096
15097 /* This decides where the kernel will search for a free chunk of vm
15098 * space during mmap's.
15099 */
15100 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
15101- 0xc0000000 : 0xFFFFe000)
15102+ 0xc0000000 : 0xFFFFf000)
15103
15104 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
15105 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
15106@@ -902,11 +905,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
15107 #define STACK_TOP_MAX TASK_SIZE_MAX
15108
15109 #define INIT_THREAD { \
15110- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
15111+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
15112 }
15113
15114 #define INIT_TSS { \
15115- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
15116+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
15117 }
15118
15119 /*
15120@@ -934,6 +937,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
15121 */
15122 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
15123
15124+#ifdef CONFIG_PAX_SEGMEXEC
15125+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
15126+#endif
15127+
15128 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
15129
15130 /* Get/set a process' ability to use the timestamp counter instruction */
15131@@ -994,12 +1001,12 @@ extern bool cpu_has_amd_erratum(const int *);
15132 #define cpu_has_amd_erratum(x) (false)
15133 #endif /* CONFIG_CPU_SUP_AMD */
15134
15135-extern unsigned long arch_align_stack(unsigned long sp);
15136+#define arch_align_stack(x) ((x) & ~0xfUL)
15137 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
15138
15139 void default_idle(void);
15140 bool set_pm_idle_to_default(void);
15141
15142-void stop_this_cpu(void *dummy);
15143+void stop_this_cpu(void *dummy) __noreturn;
15144
15145 #endif /* _ASM_X86_PROCESSOR_H */
15146diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
15147index 942a086..6c26446 100644
15148--- a/arch/x86/include/asm/ptrace.h
15149+++ b/arch/x86/include/asm/ptrace.h
15150@@ -85,28 +85,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
15151 }
15152
15153 /*
15154- * user_mode_vm(regs) determines whether a register set came from user mode.
15155+ * user_mode(regs) determines whether a register set came from user mode.
15156 * This is true if V8086 mode was enabled OR if the register set was from
15157 * protected mode with RPL-3 CS value. This tricky test checks that with
15158 * one comparison. Many places in the kernel can bypass this full check
15159- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
15160+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
15161+ * be used.
15162 */
15163-static inline int user_mode(struct pt_regs *regs)
15164+static inline int user_mode_novm(struct pt_regs *regs)
15165 {
15166 #ifdef CONFIG_X86_32
15167 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
15168 #else
15169- return !!(regs->cs & 3);
15170+ return !!(regs->cs & SEGMENT_RPL_MASK);
15171 #endif
15172 }
15173
15174-static inline int user_mode_vm(struct pt_regs *regs)
15175+static inline int user_mode(struct pt_regs *regs)
15176 {
15177 #ifdef CONFIG_X86_32
15178 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
15179 USER_RPL;
15180 #else
15181- return user_mode(regs);
15182+ return user_mode_novm(regs);
15183 #endif
15184 }
15185
15186@@ -122,15 +123,16 @@ static inline int v8086_mode(struct pt_regs *regs)
15187 #ifdef CONFIG_X86_64
15188 static inline bool user_64bit_mode(struct pt_regs *regs)
15189 {
15190+ unsigned long cs = regs->cs & 0xffff;
15191 #ifndef CONFIG_PARAVIRT
15192 /*
15193 * On non-paravirt systems, this is the only long mode CPL 3
15194 * selector. We do not allow long mode selectors in the LDT.
15195 */
15196- return regs->cs == __USER_CS;
15197+ return cs == __USER_CS;
15198 #else
15199 /* Headers are too twisted for this to go in paravirt.h. */
15200- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
15201+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
15202 #endif
15203 }
15204
15205@@ -181,9 +183,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
15206 * Traps from the kernel do not save sp and ss.
15207 * Use the helper function to retrieve sp.
15208 */
15209- if (offset == offsetof(struct pt_regs, sp) &&
15210- regs->cs == __KERNEL_CS)
15211- return kernel_stack_pointer(regs);
15212+ if (offset == offsetof(struct pt_regs, sp)) {
15213+ unsigned long cs = regs->cs & 0xffff;
15214+ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
15215+ return kernel_stack_pointer(regs);
15216+ }
15217 #endif
15218 return *(unsigned long *)((unsigned long)regs + offset);
15219 }
15220diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
15221index fe1ec5b..dc5c3fe 100644
15222--- a/arch/x86/include/asm/realmode.h
15223+++ b/arch/x86/include/asm/realmode.h
15224@@ -22,16 +22,14 @@ struct real_mode_header {
15225 #endif
15226 /* APM/BIOS reboot */
15227 u32 machine_real_restart_asm;
15228-#ifdef CONFIG_X86_64
15229 u32 machine_real_restart_seg;
15230-#endif
15231 };
15232
15233 /* This must match data at trampoline_32/64.S */
15234 struct trampoline_header {
15235 #ifdef CONFIG_X86_32
15236 u32 start;
15237- u16 gdt_pad;
15238+ u16 boot_cs;
15239 u16 gdt_limit;
15240 u32 gdt_base;
15241 #else
15242diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
15243index a82c4f1..ac45053 100644
15244--- a/arch/x86/include/asm/reboot.h
15245+++ b/arch/x86/include/asm/reboot.h
15246@@ -6,13 +6,13 @@
15247 struct pt_regs;
15248
15249 struct machine_ops {
15250- void (*restart)(char *cmd);
15251- void (*halt)(void);
15252- void (*power_off)(void);
15253+ void (* __noreturn restart)(char *cmd);
15254+ void (* __noreturn halt)(void);
15255+ void (* __noreturn power_off)(void);
15256 void (*shutdown)(void);
15257 void (*crash_shutdown)(struct pt_regs *);
15258- void (*emergency_restart)(void);
15259-};
15260+ void (* __noreturn emergency_restart)(void);
15261+} __no_const;
15262
15263 extern struct machine_ops machine_ops;
15264
15265diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
15266index 2dbe4a7..ce1db00 100644
15267--- a/arch/x86/include/asm/rwsem.h
15268+++ b/arch/x86/include/asm/rwsem.h
15269@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
15270 {
15271 asm volatile("# beginning down_read\n\t"
15272 LOCK_PREFIX _ASM_INC "(%1)\n\t"
15273+
15274+#ifdef CONFIG_PAX_REFCOUNT
15275+ "jno 0f\n"
15276+ LOCK_PREFIX _ASM_DEC "(%1)\n"
15277+ "int $4\n0:\n"
15278+ _ASM_EXTABLE(0b, 0b)
15279+#endif
15280+
15281 /* adds 0x00000001 */
15282 " jns 1f\n"
15283 " call call_rwsem_down_read_failed\n"
15284@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
15285 "1:\n\t"
15286 " mov %1,%2\n\t"
15287 " add %3,%2\n\t"
15288+
15289+#ifdef CONFIG_PAX_REFCOUNT
15290+ "jno 0f\n"
15291+ "sub %3,%2\n"
15292+ "int $4\n0:\n"
15293+ _ASM_EXTABLE(0b, 0b)
15294+#endif
15295+
15296 " jle 2f\n\t"
15297 LOCK_PREFIX " cmpxchg %2,%0\n\t"
15298 " jnz 1b\n\t"
15299@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
15300 long tmp;
15301 asm volatile("# beginning down_write\n\t"
15302 LOCK_PREFIX " xadd %1,(%2)\n\t"
15303+
15304+#ifdef CONFIG_PAX_REFCOUNT
15305+ "jno 0f\n"
15306+ "mov %1,(%2)\n"
15307+ "int $4\n0:\n"
15308+ _ASM_EXTABLE(0b, 0b)
15309+#endif
15310+
15311 /* adds 0xffff0001, returns the old value */
15312 " test %1,%1\n\t"
15313 /* was the count 0 before? */
15314@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
15315 long tmp;
15316 asm volatile("# beginning __up_read\n\t"
15317 LOCK_PREFIX " xadd %1,(%2)\n\t"
15318+
15319+#ifdef CONFIG_PAX_REFCOUNT
15320+ "jno 0f\n"
15321+ "mov %1,(%2)\n"
15322+ "int $4\n0:\n"
15323+ _ASM_EXTABLE(0b, 0b)
15324+#endif
15325+
15326 /* subtracts 1, returns the old value */
15327 " jns 1f\n\t"
15328 " call call_rwsem_wake\n" /* expects old value in %edx */
15329@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
15330 long tmp;
15331 asm volatile("# beginning __up_write\n\t"
15332 LOCK_PREFIX " xadd %1,(%2)\n\t"
15333+
15334+#ifdef CONFIG_PAX_REFCOUNT
15335+ "jno 0f\n"
15336+ "mov %1,(%2)\n"
15337+ "int $4\n0:\n"
15338+ _ASM_EXTABLE(0b, 0b)
15339+#endif
15340+
15341 /* subtracts 0xffff0001, returns the old value */
15342 " jns 1f\n\t"
15343 " call call_rwsem_wake\n" /* expects old value in %edx */
15344@@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
15345 {
15346 asm volatile("# beginning __downgrade_write\n\t"
15347 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
15348+
15349+#ifdef CONFIG_PAX_REFCOUNT
15350+ "jno 0f\n"
15351+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
15352+ "int $4\n0:\n"
15353+ _ASM_EXTABLE(0b, 0b)
15354+#endif
15355+
15356 /*
15357 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
15358 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
15359@@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
15360 */
15361 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
15362 {
15363- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
15364+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
15365+
15366+#ifdef CONFIG_PAX_REFCOUNT
15367+ "jno 0f\n"
15368+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
15369+ "int $4\n0:\n"
15370+ _ASM_EXTABLE(0b, 0b)
15371+#endif
15372+
15373 : "+m" (sem->count)
15374 : "er" (delta));
15375 }
15376@@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
15377 */
15378 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
15379 {
15380- return delta + xadd(&sem->count, delta);
15381+ return delta + xadd_check_overflow(&sem->count, delta);
15382 }
15383
15384 #endif /* __KERNEL__ */
15385diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
15386index c48a950..c6d7468 100644
15387--- a/arch/x86/include/asm/segment.h
15388+++ b/arch/x86/include/asm/segment.h
15389@@ -64,10 +64,15 @@
15390 * 26 - ESPFIX small SS
15391 * 27 - per-cpu [ offset to per-cpu data area ]
15392 * 28 - stack_canary-20 [ for stack protector ]
15393- * 29 - unused
15394- * 30 - unused
15395+ * 29 - PCI BIOS CS
15396+ * 30 - PCI BIOS DS
15397 * 31 - TSS for double fault handler
15398 */
15399+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
15400+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
15401+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
15402+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
15403+
15404 #define GDT_ENTRY_TLS_MIN 6
15405 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
15406
15407@@ -79,6 +84,8 @@
15408
15409 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
15410
15411+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
15412+
15413 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
15414
15415 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
15416@@ -104,6 +111,12 @@
15417 #define __KERNEL_STACK_CANARY 0
15418 #endif
15419
15420+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
15421+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
15422+
15423+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
15424+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
15425+
15426 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
15427
15428 /*
15429@@ -141,7 +154,7 @@
15430 */
15431
15432 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
15433-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
15434+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
15435
15436
15437 #else
15438@@ -165,6 +178,8 @@
15439 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
15440 #define __USER32_DS __USER_DS
15441
15442+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
15443+
15444 #define GDT_ENTRY_TSS 8 /* needs two entries */
15445 #define GDT_ENTRY_LDT 10 /* needs two entries */
15446 #define GDT_ENTRY_TLS_MIN 12
15447@@ -185,6 +200,7 @@
15448 #endif
15449
15450 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
15451+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
15452 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
15453 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
15454 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
15455@@ -265,7 +281,7 @@ static inline unsigned long get_limit(unsigned long segment)
15456 {
15457 unsigned long __limit;
15458 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
15459- return __limit + 1;
15460+ return __limit;
15461 }
15462
15463 #endif /* !__ASSEMBLY__ */
15464diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
15465index b073aae..39f9bdd 100644
15466--- a/arch/x86/include/asm/smp.h
15467+++ b/arch/x86/include/asm/smp.h
15468@@ -36,7 +36,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
15469 /* cpus sharing the last level cache: */
15470 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
15471 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
15472-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
15473+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
15474
15475 static inline struct cpumask *cpu_sibling_mask(int cpu)
15476 {
15477@@ -79,7 +79,7 @@ struct smp_ops {
15478
15479 void (*send_call_func_ipi)(const struct cpumask *mask);
15480 void (*send_call_func_single_ipi)(int cpu);
15481-};
15482+} __no_const;
15483
15484 /* Globals due to paravirt */
15485 extern void set_cpu_sibling_map(int cpu);
15486@@ -191,14 +191,8 @@ extern unsigned disabled_cpus __cpuinitdata;
15487 extern int safe_smp_processor_id(void);
15488
15489 #elif defined(CONFIG_X86_64_SMP)
15490-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
15491-
15492-#define stack_smp_processor_id() \
15493-({ \
15494- struct thread_info *ti; \
15495- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
15496- ti->cpu; \
15497-})
15498+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
15499+#define stack_smp_processor_id() raw_smp_processor_id()
15500 #define safe_smp_processor_id() smp_processor_id()
15501
15502 #endif
15503diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
15504index 33692ea..350a534 100644
15505--- a/arch/x86/include/asm/spinlock.h
15506+++ b/arch/x86/include/asm/spinlock.h
15507@@ -172,6 +172,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
15508 static inline void arch_read_lock(arch_rwlock_t *rw)
15509 {
15510 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
15511+
15512+#ifdef CONFIG_PAX_REFCOUNT
15513+ "jno 0f\n"
15514+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
15515+ "int $4\n0:\n"
15516+ _ASM_EXTABLE(0b, 0b)
15517+#endif
15518+
15519 "jns 1f\n"
15520 "call __read_lock_failed\n\t"
15521 "1:\n"
15522@@ -181,6 +189,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
15523 static inline void arch_write_lock(arch_rwlock_t *rw)
15524 {
15525 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
15526+
15527+#ifdef CONFIG_PAX_REFCOUNT
15528+ "jno 0f\n"
15529+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
15530+ "int $4\n0:\n"
15531+ _ASM_EXTABLE(0b, 0b)
15532+#endif
15533+
15534 "jz 1f\n"
15535 "call __write_lock_failed\n\t"
15536 "1:\n"
15537@@ -210,13 +226,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
15538
15539 static inline void arch_read_unlock(arch_rwlock_t *rw)
15540 {
15541- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
15542+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
15543+
15544+#ifdef CONFIG_PAX_REFCOUNT
15545+ "jno 0f\n"
15546+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
15547+ "int $4\n0:\n"
15548+ _ASM_EXTABLE(0b, 0b)
15549+#endif
15550+
15551 :"+m" (rw->lock) : : "memory");
15552 }
15553
15554 static inline void arch_write_unlock(arch_rwlock_t *rw)
15555 {
15556- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
15557+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
15558+
15559+#ifdef CONFIG_PAX_REFCOUNT
15560+ "jno 0f\n"
15561+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
15562+ "int $4\n0:\n"
15563+ _ASM_EXTABLE(0b, 0b)
15564+#endif
15565+
15566 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
15567 }
15568
15569diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
15570index 6a99859..03cb807 100644
15571--- a/arch/x86/include/asm/stackprotector.h
15572+++ b/arch/x86/include/asm/stackprotector.h
15573@@ -47,7 +47,7 @@
15574 * head_32 for boot CPU and setup_per_cpu_areas() for others.
15575 */
15576 #define GDT_STACK_CANARY_INIT \
15577- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
15578+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
15579
15580 /*
15581 * Initialize the stackprotector canary value.
15582@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
15583
15584 static inline void load_stack_canary_segment(void)
15585 {
15586-#ifdef CONFIG_X86_32
15587+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
15588 asm volatile ("mov %0, %%gs" : : "r" (0));
15589 #endif
15590 }
15591diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
15592index 70bbe39..4ae2bd4 100644
15593--- a/arch/x86/include/asm/stacktrace.h
15594+++ b/arch/x86/include/asm/stacktrace.h
15595@@ -11,28 +11,20 @@
15596
15597 extern int kstack_depth_to_print;
15598
15599-struct thread_info;
15600+struct task_struct;
15601 struct stacktrace_ops;
15602
15603-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
15604- unsigned long *stack,
15605- unsigned long bp,
15606- const struct stacktrace_ops *ops,
15607- void *data,
15608- unsigned long *end,
15609- int *graph);
15610+typedef unsigned long walk_stack_t(struct task_struct *task,
15611+ void *stack_start,
15612+ unsigned long *stack,
15613+ unsigned long bp,
15614+ const struct stacktrace_ops *ops,
15615+ void *data,
15616+ unsigned long *end,
15617+ int *graph);
15618
15619-extern unsigned long
15620-print_context_stack(struct thread_info *tinfo,
15621- unsigned long *stack, unsigned long bp,
15622- const struct stacktrace_ops *ops, void *data,
15623- unsigned long *end, int *graph);
15624-
15625-extern unsigned long
15626-print_context_stack_bp(struct thread_info *tinfo,
15627- unsigned long *stack, unsigned long bp,
15628- const struct stacktrace_ops *ops, void *data,
15629- unsigned long *end, int *graph);
15630+extern walk_stack_t print_context_stack;
15631+extern walk_stack_t print_context_stack_bp;
15632
15633 /* Generic stack tracer with callbacks */
15634
15635@@ -40,7 +32,7 @@ struct stacktrace_ops {
15636 void (*address)(void *data, unsigned long address, int reliable);
15637 /* On negative return stop dumping */
15638 int (*stack)(void *data, char *name);
15639- walk_stack_t walk_stack;
15640+ walk_stack_t *walk_stack;
15641 };
15642
15643 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
15644diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
15645index 4ec45b3..a4f0a8a 100644
15646--- a/arch/x86/include/asm/switch_to.h
15647+++ b/arch/x86/include/asm/switch_to.h
15648@@ -108,7 +108,7 @@ do { \
15649 "call __switch_to\n\t" \
15650 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
15651 __switch_canary \
15652- "movq %P[thread_info](%%rsi),%%r8\n\t" \
15653+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
15654 "movq %%rax,%%rdi\n\t" \
15655 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
15656 "jnz ret_from_fork\n\t" \
15657@@ -119,7 +119,7 @@ do { \
15658 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
15659 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
15660 [_tif_fork] "i" (_TIF_FORK), \
15661- [thread_info] "i" (offsetof(struct task_struct, stack)), \
15662+ [thread_info] "m" (current_tinfo), \
15663 [current_task] "m" (current_task) \
15664 __switch_canary_iparam \
15665 : "memory", "cc" __EXTRA_CLOBBER)
15666diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
15667index 2d946e6..e453ec4 100644
15668--- a/arch/x86/include/asm/thread_info.h
15669+++ b/arch/x86/include/asm/thread_info.h
15670@@ -10,6 +10,7 @@
15671 #include <linux/compiler.h>
15672 #include <asm/page.h>
15673 #include <asm/types.h>
15674+#include <asm/percpu.h>
15675
15676 /*
15677 * low level task data that entry.S needs immediate access to
15678@@ -24,7 +25,6 @@ struct exec_domain;
15679 #include <linux/atomic.h>
15680
15681 struct thread_info {
15682- struct task_struct *task; /* main task structure */
15683 struct exec_domain *exec_domain; /* execution domain */
15684 __u32 flags; /* low level flags */
15685 __u32 status; /* thread synchronous flags */
15686@@ -34,19 +34,13 @@ struct thread_info {
15687 mm_segment_t addr_limit;
15688 struct restart_block restart_block;
15689 void __user *sysenter_return;
15690-#ifdef CONFIG_X86_32
15691- unsigned long previous_esp; /* ESP of the previous stack in
15692- case of nested (IRQ) stacks
15693- */
15694- __u8 supervisor_stack[0];
15695-#endif
15696+ unsigned long lowest_stack;
15697 unsigned int sig_on_uaccess_error:1;
15698 unsigned int uaccess_err:1; /* uaccess failed */
15699 };
15700
15701-#define INIT_THREAD_INFO(tsk) \
15702+#define INIT_THREAD_INFO \
15703 { \
15704- .task = &tsk, \
15705 .exec_domain = &default_exec_domain, \
15706 .flags = 0, \
15707 .cpu = 0, \
15708@@ -57,7 +51,7 @@ struct thread_info {
15709 }, \
15710 }
15711
15712-#define init_thread_info (init_thread_union.thread_info)
15713+#define init_thread_info (init_thread_union.stack)
15714 #define init_stack (init_thread_union.stack)
15715
15716 #else /* !__ASSEMBLY__ */
15717@@ -98,6 +92,7 @@ struct thread_info {
15718 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
15719 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
15720 #define TIF_X32 30 /* 32-bit native x86-64 binary */
15721+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
15722
15723 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
15724 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
15725@@ -122,17 +117,18 @@ struct thread_info {
15726 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
15727 #define _TIF_ADDR32 (1 << TIF_ADDR32)
15728 #define _TIF_X32 (1 << TIF_X32)
15729+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
15730
15731 /* work to do in syscall_trace_enter() */
15732 #define _TIF_WORK_SYSCALL_ENTRY \
15733 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
15734 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
15735- _TIF_NOHZ)
15736+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
15737
15738 /* work to do in syscall_trace_leave() */
15739 #define _TIF_WORK_SYSCALL_EXIT \
15740 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
15741- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
15742+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
15743
15744 /* work to do on interrupt/exception return */
15745 #define _TIF_WORK_MASK \
15746@@ -143,7 +139,7 @@ struct thread_info {
15747 /* work to do on any return to user space */
15748 #define _TIF_ALLWORK_MASK \
15749 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
15750- _TIF_NOHZ)
15751+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
15752
15753 /* Only used for 64 bit */
15754 #define _TIF_DO_NOTIFY_MASK \
15755@@ -159,45 +155,40 @@ struct thread_info {
15756
15757 #define PREEMPT_ACTIVE 0x10000000
15758
15759-#ifdef CONFIG_X86_32
15760-
15761-#define STACK_WARN (THREAD_SIZE/8)
15762-/*
15763- * macros/functions for gaining access to the thread information structure
15764- *
15765- * preempt_count needs to be 1 initially, until the scheduler is functional.
15766- */
15767-#ifndef __ASSEMBLY__
15768-
15769-
15770-/* how to get the current stack pointer from C */
15771-register unsigned long current_stack_pointer asm("esp") __used;
15772-
15773-/* how to get the thread information struct from C */
15774-static inline struct thread_info *current_thread_info(void)
15775-{
15776- return (struct thread_info *)
15777- (current_stack_pointer & ~(THREAD_SIZE - 1));
15778-}
15779-
15780-#else /* !__ASSEMBLY__ */
15781-
15782+#ifdef __ASSEMBLY__
15783 /* how to get the thread information struct from ASM */
15784 #define GET_THREAD_INFO(reg) \
15785- movl $-THREAD_SIZE, reg; \
15786- andl %esp, reg
15787+ mov PER_CPU_VAR(current_tinfo), reg
15788
15789 /* use this one if reg already contains %esp */
15790-#define GET_THREAD_INFO_WITH_ESP(reg) \
15791- andl $-THREAD_SIZE, reg
15792+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
15793+#else
15794+/* how to get the thread information struct from C */
15795+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
15796+
15797+static __always_inline struct thread_info *current_thread_info(void)
15798+{
15799+ return this_cpu_read_stable(current_tinfo);
15800+}
15801+#endif
15802+
15803+#ifdef CONFIG_X86_32
15804+
15805+#define STACK_WARN (THREAD_SIZE/8)
15806+/*
15807+ * macros/functions for gaining access to the thread information structure
15808+ *
15809+ * preempt_count needs to be 1 initially, until the scheduler is functional.
15810+ */
15811+#ifndef __ASSEMBLY__
15812+
15813+/* how to get the current stack pointer from C */
15814+register unsigned long current_stack_pointer asm("esp") __used;
15815
15816 #endif
15817
15818 #else /* X86_32 */
15819
15820-#include <asm/percpu.h>
15821-#define KERNEL_STACK_OFFSET (5*8)
15822-
15823 /*
15824 * macros/functions for gaining access to the thread information structure
15825 * preempt_count needs to be 1 initially, until the scheduler is functional.
15826@@ -205,27 +196,8 @@ static inline struct thread_info *current_thread_info(void)
15827 #ifndef __ASSEMBLY__
15828 DECLARE_PER_CPU(unsigned long, kernel_stack);
15829
15830-static inline struct thread_info *current_thread_info(void)
15831-{
15832- struct thread_info *ti;
15833- ti = (void *)(this_cpu_read_stable(kernel_stack) +
15834- KERNEL_STACK_OFFSET - THREAD_SIZE);
15835- return ti;
15836-}
15837-
15838-#else /* !__ASSEMBLY__ */
15839-
15840-/* how to get the thread information struct from ASM */
15841-#define GET_THREAD_INFO(reg) \
15842- movq PER_CPU_VAR(kernel_stack),reg ; \
15843- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
15844-
15845-/*
15846- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
15847- * a certain register (to be used in assembler memory operands).
15848- */
15849-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
15850-
15851+/* how to get the current stack pointer from C */
15852+register unsigned long current_stack_pointer asm("rsp") __used;
15853 #endif
15854
15855 #endif /* !X86_32 */
15856@@ -286,5 +258,12 @@ static inline bool is_ia32_task(void)
15857 extern void arch_task_cache_init(void);
15858 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
15859 extern void arch_release_task_struct(struct task_struct *tsk);
15860+
15861+#define __HAVE_THREAD_FUNCTIONS
15862+#define task_thread_info(task) (&(task)->tinfo)
15863+#define task_stack_page(task) ((task)->stack)
15864+#define setup_thread_stack(p, org) do {} while (0)
15865+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
15866+
15867 #endif
15868 #endif /* _ASM_X86_THREAD_INFO_H */
15869diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
15870index 1709801..0a60f2f 100644
15871--- a/arch/x86/include/asm/uaccess.h
15872+++ b/arch/x86/include/asm/uaccess.h
15873@@ -7,6 +7,7 @@
15874 #include <linux/compiler.h>
15875 #include <linux/thread_info.h>
15876 #include <linux/string.h>
15877+#include <linux/sched.h>
15878 #include <asm/asm.h>
15879 #include <asm/page.h>
15880 #include <asm/smap.h>
15881@@ -29,7 +30,12 @@
15882
15883 #define get_ds() (KERNEL_DS)
15884 #define get_fs() (current_thread_info()->addr_limit)
15885+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15886+void __set_fs(mm_segment_t x);
15887+void set_fs(mm_segment_t x);
15888+#else
15889 #define set_fs(x) (current_thread_info()->addr_limit = (x))
15890+#endif
15891
15892 #define segment_eq(a, b) ((a).seg == (b).seg)
15893
15894@@ -77,8 +83,33 @@
15895 * checks that the pointer is in the user space range - after calling
15896 * this function, memory access functions may still return -EFAULT.
15897 */
15898-#define access_ok(type, addr, size) \
15899- (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
15900+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
15901+#define access_ok(type, addr, size) \
15902+({ \
15903+ long __size = size; \
15904+ unsigned long __addr = (unsigned long)addr; \
15905+ unsigned long __addr_ao = __addr & PAGE_MASK; \
15906+ unsigned long __end_ao = __addr + __size - 1; \
15907+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
15908+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
15909+ while(__addr_ao <= __end_ao) { \
15910+ char __c_ao; \
15911+ __addr_ao += PAGE_SIZE; \
15912+ if (__size > PAGE_SIZE) \
15913+ cond_resched(); \
15914+ if (__get_user(__c_ao, (char __user *)__addr)) \
15915+ break; \
15916+ if (type != VERIFY_WRITE) { \
15917+ __addr = __addr_ao; \
15918+ continue; \
15919+ } \
15920+ if (__put_user(__c_ao, (char __user *)__addr)) \
15921+ break; \
15922+ __addr = __addr_ao; \
15923+ } \
15924+ } \
15925+ __ret_ao; \
15926+})
15927
15928 /*
15929 * The exception table consists of pairs of addresses relative to the
15930@@ -189,13 +220,21 @@ extern int __get_user_bad(void);
15931 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
15932 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
15933
15934-
15935+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15936+#define __copyuser_seg "gs;"
15937+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
15938+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
15939+#else
15940+#define __copyuser_seg
15941+#define __COPYUSER_SET_ES
15942+#define __COPYUSER_RESTORE_ES
15943+#endif
15944
15945 #ifdef CONFIG_X86_32
15946 #define __put_user_asm_u64(x, addr, err, errret) \
15947 asm volatile(ASM_STAC "\n" \
15948- "1: movl %%eax,0(%2)\n" \
15949- "2: movl %%edx,4(%2)\n" \
15950+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
15951+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
15952 "3: " ASM_CLAC "\n" \
15953 ".section .fixup,\"ax\"\n" \
15954 "4: movl %3,%0\n" \
15955@@ -208,8 +247,8 @@ extern int __get_user_bad(void);
15956
15957 #define __put_user_asm_ex_u64(x, addr) \
15958 asm volatile(ASM_STAC "\n" \
15959- "1: movl %%eax,0(%1)\n" \
15960- "2: movl %%edx,4(%1)\n" \
15961+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
15962+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
15963 "3: " ASM_CLAC "\n" \
15964 _ASM_EXTABLE_EX(1b, 2b) \
15965 _ASM_EXTABLE_EX(2b, 3b) \
15966@@ -259,7 +298,7 @@ extern void __put_user_8(void);
15967 __typeof__(*(ptr)) __pu_val; \
15968 __chk_user_ptr(ptr); \
15969 might_fault(); \
15970- __pu_val = x; \
15971+ __pu_val = (x); \
15972 switch (sizeof(*(ptr))) { \
15973 case 1: \
15974 __put_user_x(1, __pu_val, ptr, __ret_pu); \
15975@@ -358,7 +397,7 @@ do { \
15976
15977 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
15978 asm volatile(ASM_STAC "\n" \
15979- "1: mov"itype" %2,%"rtype"1\n" \
15980+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
15981 "2: " ASM_CLAC "\n" \
15982 ".section .fixup,\"ax\"\n" \
15983 "3: mov %3,%0\n" \
15984@@ -366,7 +405,7 @@ do { \
15985 " jmp 2b\n" \
15986 ".previous\n" \
15987 _ASM_EXTABLE(1b, 3b) \
15988- : "=r" (err), ltype(x) \
15989+ : "=r" (err), ltype (x) \
15990 : "m" (__m(addr)), "i" (errret), "0" (err))
15991
15992 #define __get_user_size_ex(x, ptr, size) \
15993@@ -391,7 +430,7 @@ do { \
15994 } while (0)
15995
15996 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
15997- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
15998+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
15999 "2:\n" \
16000 _ASM_EXTABLE_EX(1b, 2b) \
16001 : ltype(x) : "m" (__m(addr)))
16002@@ -408,13 +447,24 @@ do { \
16003 int __gu_err; \
16004 unsigned long __gu_val; \
16005 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
16006- (x) = (__force __typeof__(*(ptr)))__gu_val; \
16007+ (x) = (__typeof__(*(ptr)))__gu_val; \
16008 __gu_err; \
16009 })
16010
16011 /* FIXME: this hack is definitely wrong -AK */
16012 struct __large_struct { unsigned long buf[100]; };
16013-#define __m(x) (*(struct __large_struct __user *)(x))
16014+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16015+#define ____m(x) \
16016+({ \
16017+ unsigned long ____x = (unsigned long)(x); \
16018+ if (____x < PAX_USER_SHADOW_BASE) \
16019+ ____x += PAX_USER_SHADOW_BASE; \
16020+ (void __user *)____x; \
16021+})
16022+#else
16023+#define ____m(x) (x)
16024+#endif
16025+#define __m(x) (*(struct __large_struct __user *)____m(x))
16026
16027 /*
16028 * Tell gcc we read from memory instead of writing: this is because
16029@@ -423,7 +473,7 @@ struct __large_struct { unsigned long buf[100]; };
16030 */
16031 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
16032 asm volatile(ASM_STAC "\n" \
16033- "1: mov"itype" %"rtype"1,%2\n" \
16034+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
16035 "2: " ASM_CLAC "\n" \
16036 ".section .fixup,\"ax\"\n" \
16037 "3: mov %3,%0\n" \
16038@@ -431,10 +481,10 @@ struct __large_struct { unsigned long buf[100]; };
16039 ".previous\n" \
16040 _ASM_EXTABLE(1b, 3b) \
16041 : "=r"(err) \
16042- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
16043+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
16044
16045 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
16046- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
16047+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
16048 "2:\n" \
16049 _ASM_EXTABLE_EX(1b, 2b) \
16050 : : ltype(x), "m" (__m(addr)))
16051@@ -473,8 +523,12 @@ struct __large_struct { unsigned long buf[100]; };
16052 * On error, the variable @x is set to zero.
16053 */
16054
16055+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16056+#define __get_user(x, ptr) get_user((x), (ptr))
16057+#else
16058 #define __get_user(x, ptr) \
16059 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
16060+#endif
16061
16062 /**
16063 * __put_user: - Write a simple value into user space, with less checking.
16064@@ -496,8 +550,12 @@ struct __large_struct { unsigned long buf[100]; };
16065 * Returns zero on success, or -EFAULT on error.
16066 */
16067
16068+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16069+#define __put_user(x, ptr) put_user((x), (ptr))
16070+#else
16071 #define __put_user(x, ptr) \
16072 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
16073+#endif
16074
16075 #define __get_user_unaligned __get_user
16076 #define __put_user_unaligned __put_user
16077@@ -515,7 +573,7 @@ struct __large_struct { unsigned long buf[100]; };
16078 #define get_user_ex(x, ptr) do { \
16079 unsigned long __gue_val; \
16080 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
16081- (x) = (__force __typeof__(*(ptr)))__gue_val; \
16082+ (x) = (__typeof__(*(ptr)))__gue_val; \
16083 } while (0)
16084
16085 #define put_user_try uaccess_try
16086@@ -532,8 +590,8 @@ strncpy_from_user(char *dst, const char __user *src, long count);
16087 extern __must_check long strlen_user(const char __user *str);
16088 extern __must_check long strnlen_user(const char __user *str, long n);
16089
16090-unsigned long __must_check clear_user(void __user *mem, unsigned long len);
16091-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
16092+unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
16093+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
16094
16095 /*
16096 * movsl can be slow when source and dest are not both 8-byte aligned
16097diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
16098index 7f760a9..04b1c65 100644
16099--- a/arch/x86/include/asm/uaccess_32.h
16100+++ b/arch/x86/include/asm/uaccess_32.h
16101@@ -11,15 +11,15 @@
16102 #include <asm/page.h>
16103
16104 unsigned long __must_check __copy_to_user_ll
16105- (void __user *to, const void *from, unsigned long n);
16106+ (void __user *to, const void *from, unsigned long n) __size_overflow(3);
16107 unsigned long __must_check __copy_from_user_ll
16108- (void *to, const void __user *from, unsigned long n);
16109+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
16110 unsigned long __must_check __copy_from_user_ll_nozero
16111- (void *to, const void __user *from, unsigned long n);
16112+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
16113 unsigned long __must_check __copy_from_user_ll_nocache
16114- (void *to, const void __user *from, unsigned long n);
16115+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
16116 unsigned long __must_check __copy_from_user_ll_nocache_nozero
16117- (void *to, const void __user *from, unsigned long n);
16118+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
16119
16120 /**
16121 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
16122@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
16123 static __always_inline unsigned long __must_check
16124 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
16125 {
16126+ if ((long)n < 0)
16127+ return n;
16128+
16129+ check_object_size(from, n, true);
16130+
16131 if (__builtin_constant_p(n)) {
16132 unsigned long ret;
16133
16134@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
16135 __copy_to_user(void __user *to, const void *from, unsigned long n)
16136 {
16137 might_fault();
16138+
16139 return __copy_to_user_inatomic(to, from, n);
16140 }
16141
16142 static __always_inline unsigned long
16143 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
16144 {
16145+ if ((long)n < 0)
16146+ return n;
16147+
16148 /* Avoid zeroing the tail if the copy fails..
16149 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
16150 * but as the zeroing behaviour is only significant when n is not
16151@@ -137,6 +146,12 @@ static __always_inline unsigned long
16152 __copy_from_user(void *to, const void __user *from, unsigned long n)
16153 {
16154 might_fault();
16155+
16156+ if ((long)n < 0)
16157+ return n;
16158+
16159+ check_object_size(to, n, false);
16160+
16161 if (__builtin_constant_p(n)) {
16162 unsigned long ret;
16163
16164@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
16165 const void __user *from, unsigned long n)
16166 {
16167 might_fault();
16168+
16169+ if ((long)n < 0)
16170+ return n;
16171+
16172 if (__builtin_constant_p(n)) {
16173 unsigned long ret;
16174
16175@@ -181,15 +200,19 @@ static __always_inline unsigned long
16176 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
16177 unsigned long n)
16178 {
16179- return __copy_from_user_ll_nocache_nozero(to, from, n);
16180+ if ((long)n < 0)
16181+ return n;
16182+
16183+ return __copy_from_user_ll_nocache_nozero(to, from, n);
16184 }
16185
16186-unsigned long __must_check copy_to_user(void __user *to,
16187- const void *from, unsigned long n);
16188-unsigned long __must_check _copy_from_user(void *to,
16189- const void __user *from,
16190- unsigned long n);
16191-
16192+extern void copy_to_user_overflow(void)
16193+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
16194+ __compiletime_error("copy_to_user() buffer size is not provably correct")
16195+#else
16196+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
16197+#endif
16198+;
16199
16200 extern void copy_from_user_overflow(void)
16201 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
16202@@ -199,17 +222,60 @@ extern void copy_from_user_overflow(void)
16203 #endif
16204 ;
16205
16206-static inline unsigned long __must_check copy_from_user(void *to,
16207- const void __user *from,
16208- unsigned long n)
16209+/**
16210+ * copy_to_user: - Copy a block of data into user space.
16211+ * @to: Destination address, in user space.
16212+ * @from: Source address, in kernel space.
16213+ * @n: Number of bytes to copy.
16214+ *
16215+ * Context: User context only. This function may sleep.
16216+ *
16217+ * Copy data from kernel space to user space.
16218+ *
16219+ * Returns number of bytes that could not be copied.
16220+ * On success, this will be zero.
16221+ */
16222+static inline unsigned long __must_check
16223+copy_to_user(void __user *to, const void *from, unsigned long n)
16224 {
16225- int sz = __compiletime_object_size(to);
16226+ size_t sz = __compiletime_object_size(from);
16227
16228- if (likely(sz == -1 || sz >= n))
16229- n = _copy_from_user(to, from, n);
16230- else
16231+ if (unlikely(sz != (size_t)-1 && sz < n))
16232+ copy_to_user_overflow();
16233+ else if (access_ok(VERIFY_WRITE, to, n))
16234+ n = __copy_to_user(to, from, n);
16235+ return n;
16236+}
16237+
16238+/**
16239+ * copy_from_user: - Copy a block of data from user space.
16240+ * @to: Destination address, in kernel space.
16241+ * @from: Source address, in user space.
16242+ * @n: Number of bytes to copy.
16243+ *
16244+ * Context: User context only. This function may sleep.
16245+ *
16246+ * Copy data from user space to kernel space.
16247+ *
16248+ * Returns number of bytes that could not be copied.
16249+ * On success, this will be zero.
16250+ *
16251+ * If some data could not be copied, this function will pad the copied
16252+ * data to the requested size using zero bytes.
16253+ */
16254+static inline unsigned long __must_check
16255+copy_from_user(void *to, const void __user *from, unsigned long n)
16256+{
16257+ size_t sz = __compiletime_object_size(to);
16258+
16259+ check_object_size(to, n, false);
16260+
16261+ if (unlikely(sz != (size_t)-1 && sz < n))
16262 copy_from_user_overflow();
16263-
16264+ else if (access_ok(VERIFY_READ, from, n))
16265+ n = __copy_from_user(to, from, n);
16266+ else if ((long)n > 0)
16267+ memset(to, 0, n);
16268 return n;
16269 }
16270
16271diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
16272index 142810c..1f2a0a7 100644
16273--- a/arch/x86/include/asm/uaccess_64.h
16274+++ b/arch/x86/include/asm/uaccess_64.h
16275@@ -10,6 +10,9 @@
16276 #include <asm/alternative.h>
16277 #include <asm/cpufeature.h>
16278 #include <asm/page.h>
16279+#include <asm/pgtable.h>
16280+
16281+#define set_fs(x) (current_thread_info()->addr_limit = (x))
16282
16283 /*
16284 * Copy To/From Userspace
16285@@ -17,13 +20,13 @@
16286
16287 /* Handles exceptions in both to and from, but doesn't do access_ok */
16288 __must_check unsigned long
16289-copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
16290+copy_user_enhanced_fast_string(void *to, const void *from, unsigned len) __size_overflow(3);
16291 __must_check unsigned long
16292-copy_user_generic_string(void *to, const void *from, unsigned len);
16293+copy_user_generic_string(void *to, const void *from, unsigned len) __size_overflow(3);
16294 __must_check unsigned long
16295-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
16296+copy_user_generic_unrolled(void *to, const void *from, unsigned len) __size_overflow(3);
16297
16298-static __always_inline __must_check unsigned long
16299+static __always_inline __must_check __size_overflow(3) unsigned long
16300 copy_user_generic(void *to, const void *from, unsigned len)
16301 {
16302 unsigned ret;
16303@@ -41,142 +44,204 @@ copy_user_generic(void *to, const void *from, unsigned len)
16304 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
16305 "=d" (len)),
16306 "1" (to), "2" (from), "3" (len)
16307- : "memory", "rcx", "r8", "r9", "r10", "r11");
16308+ : "memory", "rcx", "r8", "r9", "r11");
16309 return ret;
16310 }
16311
16312+static __always_inline __must_check unsigned long
16313+__copy_to_user(void __user *to, const void *from, unsigned long len);
16314+static __always_inline __must_check unsigned long
16315+__copy_from_user(void *to, const void __user *from, unsigned long len);
16316 __must_check unsigned long
16317-_copy_to_user(void __user *to, const void *from, unsigned len);
16318-__must_check unsigned long
16319-_copy_from_user(void *to, const void __user *from, unsigned len);
16320-__must_check unsigned long
16321-copy_in_user(void __user *to, const void __user *from, unsigned len);
16322+copy_in_user(void __user *to, const void __user *from, unsigned long len);
16323+
16324+extern void copy_to_user_overflow(void)
16325+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
16326+ __compiletime_error("copy_to_user() buffer size is not provably correct")
16327+#else
16328+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
16329+#endif
16330+;
16331+
16332+extern void copy_from_user_overflow(void)
16333+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
16334+ __compiletime_error("copy_from_user() buffer size is not provably correct")
16335+#else
16336+ __compiletime_warning("copy_from_user() buffer size is not provably correct")
16337+#endif
16338+;
16339
16340 static inline unsigned long __must_check copy_from_user(void *to,
16341 const void __user *from,
16342 unsigned long n)
16343 {
16344- int sz = __compiletime_object_size(to);
16345-
16346 might_fault();
16347- if (likely(sz == -1 || sz >= n))
16348- n = _copy_from_user(to, from, n);
16349-#ifdef CONFIG_DEBUG_VM
16350- else
16351- WARN(1, "Buffer overflow detected!\n");
16352-#endif
16353+
16354+ check_object_size(to, n, false);
16355+
16356+ if (access_ok(VERIFY_READ, from, n))
16357+ n = __copy_from_user(to, from, n);
16358+ else if (n < INT_MAX)
16359+ memset(to, 0, n);
16360 return n;
16361 }
16362
16363 static __always_inline __must_check
16364-int copy_to_user(void __user *dst, const void *src, unsigned size)
16365+int copy_to_user(void __user *dst, const void *src, unsigned long size)
16366 {
16367 might_fault();
16368
16369- return _copy_to_user(dst, src, size);
16370+ if (access_ok(VERIFY_WRITE, dst, size))
16371+ size = __copy_to_user(dst, src, size);
16372+ return size;
16373 }
16374
16375 static __always_inline __must_check
16376-int __copy_from_user(void *dst, const void __user *src, unsigned size)
16377+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
16378 {
16379- int ret = 0;
16380+ size_t sz = __compiletime_object_size(dst);
16381+ unsigned ret = 0;
16382
16383 might_fault();
16384+
16385+ if (size > INT_MAX)
16386+ return size;
16387+
16388+ check_object_size(dst, size, false);
16389+
16390+#ifdef CONFIG_PAX_MEMORY_UDEREF
16391+ if (!__access_ok(VERIFY_READ, src, size))
16392+ return size;
16393+#endif
16394+
16395+ if (unlikely(sz != (size_t)-1 && sz < size)) {
16396+ copy_from_user_overflow();
16397+ return size;
16398+ }
16399+
16400 if (!__builtin_constant_p(size))
16401- return copy_user_generic(dst, (__force void *)src, size);
16402+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
16403 switch (size) {
16404- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
16405+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
16406 ret, "b", "b", "=q", 1);
16407 return ret;
16408- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
16409+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
16410 ret, "w", "w", "=r", 2);
16411 return ret;
16412- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
16413+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
16414 ret, "l", "k", "=r", 4);
16415 return ret;
16416- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
16417+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
16418 ret, "q", "", "=r", 8);
16419 return ret;
16420 case 10:
16421- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
16422+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
16423 ret, "q", "", "=r", 10);
16424 if (unlikely(ret))
16425 return ret;
16426 __get_user_asm(*(u16 *)(8 + (char *)dst),
16427- (u16 __user *)(8 + (char __user *)src),
16428+ (const u16 __user *)(8 + (const char __user *)src),
16429 ret, "w", "w", "=r", 2);
16430 return ret;
16431 case 16:
16432- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
16433+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
16434 ret, "q", "", "=r", 16);
16435 if (unlikely(ret))
16436 return ret;
16437 __get_user_asm(*(u64 *)(8 + (char *)dst),
16438- (u64 __user *)(8 + (char __user *)src),
16439+ (const u64 __user *)(8 + (const char __user *)src),
16440 ret, "q", "", "=r", 8);
16441 return ret;
16442 default:
16443- return copy_user_generic(dst, (__force void *)src, size);
16444+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
16445 }
16446 }
16447
16448 static __always_inline __must_check
16449-int __copy_to_user(void __user *dst, const void *src, unsigned size)
16450+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
16451 {
16452- int ret = 0;
16453+ size_t sz = __compiletime_object_size(src);
16454+ unsigned ret = 0;
16455
16456 might_fault();
16457+
16458+ if (size > INT_MAX)
16459+ return size;
16460+
16461+ check_object_size(src, size, true);
16462+
16463+#ifdef CONFIG_PAX_MEMORY_UDEREF
16464+ if (!__access_ok(VERIFY_WRITE, dst, size))
16465+ return size;
16466+#endif
16467+
16468+ if (unlikely(sz != (size_t)-1 && sz < size)) {
16469+ copy_to_user_overflow();
16470+ return size;
16471+ }
16472+
16473 if (!__builtin_constant_p(size))
16474- return copy_user_generic((__force void *)dst, src, size);
16475+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
16476 switch (size) {
16477- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
16478+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
16479 ret, "b", "b", "iq", 1);
16480 return ret;
16481- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
16482+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
16483 ret, "w", "w", "ir", 2);
16484 return ret;
16485- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
16486+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
16487 ret, "l", "k", "ir", 4);
16488 return ret;
16489- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
16490+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
16491 ret, "q", "", "er", 8);
16492 return ret;
16493 case 10:
16494- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
16495+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
16496 ret, "q", "", "er", 10);
16497 if (unlikely(ret))
16498 return ret;
16499 asm("":::"memory");
16500- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
16501+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
16502 ret, "w", "w", "ir", 2);
16503 return ret;
16504 case 16:
16505- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
16506+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
16507 ret, "q", "", "er", 16);
16508 if (unlikely(ret))
16509 return ret;
16510 asm("":::"memory");
16511- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
16512+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
16513 ret, "q", "", "er", 8);
16514 return ret;
16515 default:
16516- return copy_user_generic((__force void *)dst, src, size);
16517+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
16518 }
16519 }
16520
16521 static __always_inline __must_check
16522-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
16523+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
16524 {
16525- int ret = 0;
16526+ unsigned ret = 0;
16527
16528 might_fault();
16529+
16530+ if (size > INT_MAX)
16531+ return size;
16532+
16533+#ifdef CONFIG_PAX_MEMORY_UDEREF
16534+ if (!__access_ok(VERIFY_READ, src, size))
16535+ return size;
16536+ if (!__access_ok(VERIFY_WRITE, dst, size))
16537+ return size;
16538+#endif
16539+
16540 if (!__builtin_constant_p(size))
16541- return copy_user_generic((__force void *)dst,
16542- (__force void *)src, size);
16543+ return copy_user_generic((__force_kernel void *)____m(dst),
16544+ (__force_kernel const void *)____m(src), size);
16545 switch (size) {
16546 case 1: {
16547 u8 tmp;
16548- __get_user_asm(tmp, (u8 __user *)src,
16549+ __get_user_asm(tmp, (const u8 __user *)src,
16550 ret, "b", "b", "=q", 1);
16551 if (likely(!ret))
16552 __put_user_asm(tmp, (u8 __user *)dst,
16553@@ -185,7 +250,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
16554 }
16555 case 2: {
16556 u16 tmp;
16557- __get_user_asm(tmp, (u16 __user *)src,
16558+ __get_user_asm(tmp, (const u16 __user *)src,
16559 ret, "w", "w", "=r", 2);
16560 if (likely(!ret))
16561 __put_user_asm(tmp, (u16 __user *)dst,
16562@@ -195,7 +260,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
16563
16564 case 4: {
16565 u32 tmp;
16566- __get_user_asm(tmp, (u32 __user *)src,
16567+ __get_user_asm(tmp, (const u32 __user *)src,
16568 ret, "l", "k", "=r", 4);
16569 if (likely(!ret))
16570 __put_user_asm(tmp, (u32 __user *)dst,
16571@@ -204,7 +269,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
16572 }
16573 case 8: {
16574 u64 tmp;
16575- __get_user_asm(tmp, (u64 __user *)src,
16576+ __get_user_asm(tmp, (const u64 __user *)src,
16577 ret, "q", "", "=r", 8);
16578 if (likely(!ret))
16579 __put_user_asm(tmp, (u64 __user *)dst,
16580@@ -212,41 +277,72 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
16581 return ret;
16582 }
16583 default:
16584- return copy_user_generic((__force void *)dst,
16585- (__force void *)src, size);
16586+ return copy_user_generic((__force_kernel void *)____m(dst),
16587+ (__force_kernel const void *)____m(src), size);
16588 }
16589 }
16590
16591 static __must_check __always_inline int
16592-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
16593+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
16594 {
16595- return copy_user_generic(dst, (__force const void *)src, size);
16596+ if (size > INT_MAX)
16597+ return size;
16598+
16599+#ifdef CONFIG_PAX_MEMORY_UDEREF
16600+ if (!__access_ok(VERIFY_READ, src, size))
16601+ return size;
16602+#endif
16603+
16604+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
16605 }
16606
16607-static __must_check __always_inline int
16608-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
16609+static __must_check __always_inline unsigned long
16610+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
16611 {
16612- return copy_user_generic((__force void *)dst, src, size);
16613+ if (size > INT_MAX)
16614+ return size;
16615+
16616+#ifdef CONFIG_PAX_MEMORY_UDEREF
16617+ if (!__access_ok(VERIFY_WRITE, dst, size))
16618+ return size;
16619+#endif
16620+
16621+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
16622 }
16623
16624-extern long __copy_user_nocache(void *dst, const void __user *src,
16625- unsigned size, int zerorest);
16626+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
16627+ unsigned long size, int zerorest) __size_overflow(3);
16628
16629-static inline int
16630-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
16631+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
16632 {
16633 might_sleep();
16634+
16635+ if (size > INT_MAX)
16636+ return size;
16637+
16638+#ifdef CONFIG_PAX_MEMORY_UDEREF
16639+ if (!__access_ok(VERIFY_READ, src, size))
16640+ return size;
16641+#endif
16642+
16643 return __copy_user_nocache(dst, src, size, 1);
16644 }
16645
16646-static inline int
16647-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
16648- unsigned size)
16649+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
16650+ unsigned long size)
16651 {
16652+ if (size > INT_MAX)
16653+ return size;
16654+
16655+#ifdef CONFIG_PAX_MEMORY_UDEREF
16656+ if (!__access_ok(VERIFY_READ, src, size))
16657+ return size;
16658+#endif
16659+
16660 return __copy_user_nocache(dst, src, size, 0);
16661 }
16662
16663-unsigned long
16664-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
16665+extern unsigned long
16666+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
16667
16668 #endif /* _ASM_X86_UACCESS_64_H */
16669diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
16670index 5b238981..77fdd78 100644
16671--- a/arch/x86/include/asm/word-at-a-time.h
16672+++ b/arch/x86/include/asm/word-at-a-time.h
16673@@ -11,7 +11,7 @@
16674 * and shift, for example.
16675 */
16676 struct word_at_a_time {
16677- const unsigned long one_bits, high_bits;
16678+ unsigned long one_bits, high_bits;
16679 };
16680
16681 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
16682diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
16683index 5769349..a3d3e2a 100644
16684--- a/arch/x86/include/asm/x86_init.h
16685+++ b/arch/x86/include/asm/x86_init.h
16686@@ -141,7 +141,7 @@ struct x86_init_ops {
16687 struct x86_init_timers timers;
16688 struct x86_init_iommu iommu;
16689 struct x86_init_pci pci;
16690-};
16691+} __no_const;
16692
16693 /**
16694 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
16695@@ -152,7 +152,7 @@ struct x86_cpuinit_ops {
16696 void (*setup_percpu_clockev)(void);
16697 void (*early_percpu_clock_init)(void);
16698 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
16699-};
16700+} __no_const;
16701
16702 /**
16703 * struct x86_platform_ops - platform specific runtime functions
16704@@ -178,7 +178,7 @@ struct x86_platform_ops {
16705 void (*save_sched_clock_state)(void);
16706 void (*restore_sched_clock_state)(void);
16707 void (*apic_post_init)(void);
16708-};
16709+} __no_const;
16710
16711 struct pci_dev;
16712
16713@@ -187,14 +187,14 @@ struct x86_msi_ops {
16714 void (*teardown_msi_irq)(unsigned int irq);
16715 void (*teardown_msi_irqs)(struct pci_dev *dev);
16716 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
16717-};
16718+} __no_const;
16719
16720 struct x86_io_apic_ops {
16721 void (*init) (void);
16722 unsigned int (*read) (unsigned int apic, unsigned int reg);
16723 void (*write) (unsigned int apic, unsigned int reg, unsigned int value);
16724 void (*modify)(unsigned int apic, unsigned int reg, unsigned int value);
16725-};
16726+} __no_const;
16727
16728 extern struct x86_init_ops x86_init;
16729 extern struct x86_cpuinit_ops x86_cpuinit;
16730diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
16731index 0415cda..b43d877 100644
16732--- a/arch/x86/include/asm/xsave.h
16733+++ b/arch/x86/include/asm/xsave.h
16734@@ -71,7 +71,9 @@ static inline int xsave_user(struct xsave_struct __user *buf)
16735 return -EFAULT;
16736
16737 __asm__ __volatile__(ASM_STAC "\n"
16738- "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
16739+ "1:"
16740+ __copyuser_seg
16741+ ".byte " REX_PREFIX "0x0f,0xae,0x27\n"
16742 "2: " ASM_CLAC "\n"
16743 ".section .fixup,\"ax\"\n"
16744 "3: movl $-1,%[err]\n"
16745@@ -87,12 +89,14 @@ static inline int xsave_user(struct xsave_struct __user *buf)
16746 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
16747 {
16748 int err;
16749- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
16750+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
16751 u32 lmask = mask;
16752 u32 hmask = mask >> 32;
16753
16754 __asm__ __volatile__(ASM_STAC "\n"
16755- "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
16756+ "1:"
16757+ __copyuser_seg
16758+ ".byte " REX_PREFIX "0x0f,0xae,0x2f\n"
16759 "2: " ASM_CLAC "\n"
16760 ".section .fixup,\"ax\"\n"
16761 "3: movl $-1,%[err]\n"
16762diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
16763index bbae024..e1528f9 100644
16764--- a/arch/x86/include/uapi/asm/e820.h
16765+++ b/arch/x86/include/uapi/asm/e820.h
16766@@ -63,7 +63,7 @@ struct e820map {
16767 #define ISA_START_ADDRESS 0xa0000
16768 #define ISA_END_ADDRESS 0x100000
16769
16770-#define BIOS_BEGIN 0x000a0000
16771+#define BIOS_BEGIN 0x000c0000
16772 #define BIOS_END 0x00100000
16773
16774 #define BIOS_ROM_BASE 0xffe00000
16775diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
16776index 34e923a..0c6bb6e 100644
16777--- a/arch/x86/kernel/Makefile
16778+++ b/arch/x86/kernel/Makefile
16779@@ -22,7 +22,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
16780 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
16781 obj-$(CONFIG_IRQ_WORK) += irq_work.o
16782 obj-y += probe_roms.o
16783-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
16784+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
16785 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
16786 obj-y += syscall_$(BITS).o
16787 obj-$(CONFIG_X86_64) += vsyscall_64.o
16788diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
16789index bacf4b0..4ede72e 100644
16790--- a/arch/x86/kernel/acpi/boot.c
16791+++ b/arch/x86/kernel/acpi/boot.c
16792@@ -1358,7 +1358,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
16793 * If your system is blacklisted here, but you find that acpi=force
16794 * works for you, please contact linux-acpi@vger.kernel.org
16795 */
16796-static struct dmi_system_id __initdata acpi_dmi_table[] = {
16797+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
16798 /*
16799 * Boxes that need ACPI disabled
16800 */
16801@@ -1433,7 +1433,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
16802 };
16803
16804 /* second table for DMI checks that should run after early-quirks */
16805-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
16806+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
16807 /*
16808 * HP laptops which use a DSDT reporting as HP/SB400/10000,
16809 * which includes some code which overrides all temperature
16810diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
16811index d5e0d71..6533e08 100644
16812--- a/arch/x86/kernel/acpi/sleep.c
16813+++ b/arch/x86/kernel/acpi/sleep.c
16814@@ -74,8 +74,12 @@ int acpi_suspend_lowlevel(void)
16815 #else /* CONFIG_64BIT */
16816 #ifdef CONFIG_SMP
16817 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
16818+
16819+ pax_open_kernel();
16820 early_gdt_descr.address =
16821 (unsigned long)get_cpu_gdt_table(smp_processor_id());
16822+ pax_close_kernel();
16823+
16824 initial_gs = per_cpu_offset(smp_processor_id());
16825 #endif
16826 initial_code = (unsigned long)wakeup_long64;
16827diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
16828index 13ab720..95d5442 100644
16829--- a/arch/x86/kernel/acpi/wakeup_32.S
16830+++ b/arch/x86/kernel/acpi/wakeup_32.S
16831@@ -30,13 +30,11 @@ wakeup_pmode_return:
16832 # and restore the stack ... but you need gdt for this to work
16833 movl saved_context_esp, %esp
16834
16835- movl %cs:saved_magic, %eax
16836- cmpl $0x12345678, %eax
16837+ cmpl $0x12345678, saved_magic
16838 jne bogus_magic
16839
16840 # jump to place where we left off
16841- movl saved_eip, %eax
16842- jmp *%eax
16843+ jmp *(saved_eip)
16844
16845 bogus_magic:
16846 jmp bogus_magic
16847diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
16848index ef5ccca..bd83949 100644
16849--- a/arch/x86/kernel/alternative.c
16850+++ b/arch/x86/kernel/alternative.c
16851@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
16852 */
16853 for (a = start; a < end; a++) {
16854 instr = (u8 *)&a->instr_offset + a->instr_offset;
16855+
16856+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16857+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16858+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
16859+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16860+#endif
16861+
16862 replacement = (u8 *)&a->repl_offset + a->repl_offset;
16863 BUG_ON(a->replacementlen > a->instrlen);
16864 BUG_ON(a->instrlen > sizeof(insnbuf));
16865@@ -299,10 +306,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
16866 for (poff = start; poff < end; poff++) {
16867 u8 *ptr = (u8 *)poff + *poff;
16868
16869+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16870+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16871+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
16872+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16873+#endif
16874+
16875 if (!*poff || ptr < text || ptr >= text_end)
16876 continue;
16877 /* turn DS segment override prefix into lock prefix */
16878- if (*ptr == 0x3e)
16879+ if (*ktla_ktva(ptr) == 0x3e)
16880 text_poke(ptr, ((unsigned char []){0xf0}), 1);
16881 }
16882 mutex_unlock(&text_mutex);
16883@@ -317,10 +330,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
16884 for (poff = start; poff < end; poff++) {
16885 u8 *ptr = (u8 *)poff + *poff;
16886
16887+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16888+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16889+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
16890+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16891+#endif
16892+
16893 if (!*poff || ptr < text || ptr >= text_end)
16894 continue;
16895 /* turn lock prefix into DS segment override prefix */
16896- if (*ptr == 0xf0)
16897+ if (*ktla_ktva(ptr) == 0xf0)
16898 text_poke(ptr, ((unsigned char []){0x3E}), 1);
16899 }
16900 mutex_unlock(&text_mutex);
16901@@ -468,7 +487,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
16902
16903 BUG_ON(p->len > MAX_PATCH_LEN);
16904 /* prep the buffer with the original instructions */
16905- memcpy(insnbuf, p->instr, p->len);
16906+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
16907 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
16908 (unsigned long)p->instr, p->len);
16909
16910@@ -515,7 +534,7 @@ void __init alternative_instructions(void)
16911 if (!uniproc_patched || num_possible_cpus() == 1)
16912 free_init_pages("SMP alternatives",
16913 (unsigned long)__smp_locks,
16914- (unsigned long)__smp_locks_end);
16915+ PAGE_ALIGN((unsigned long)__smp_locks_end));
16916 #endif
16917
16918 apply_paravirt(__parainstructions, __parainstructions_end);
16919@@ -535,13 +554,17 @@ void __init alternative_instructions(void)
16920 * instructions. And on the local CPU you need to be protected again NMI or MCE
16921 * handlers seeing an inconsistent instruction while you patch.
16922 */
16923-void *__init_or_module text_poke_early(void *addr, const void *opcode,
16924+void *__kprobes text_poke_early(void *addr, const void *opcode,
16925 size_t len)
16926 {
16927 unsigned long flags;
16928 local_irq_save(flags);
16929- memcpy(addr, opcode, len);
16930+
16931+ pax_open_kernel();
16932+ memcpy(ktla_ktva(addr), opcode, len);
16933 sync_core();
16934+ pax_close_kernel();
16935+
16936 local_irq_restore(flags);
16937 /* Could also do a CLFLUSH here to speed up CPU recovery; but
16938 that causes hangs on some VIA CPUs. */
16939@@ -563,36 +586,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
16940 */
16941 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
16942 {
16943- unsigned long flags;
16944- char *vaddr;
16945+ unsigned char *vaddr = ktla_ktva(addr);
16946 struct page *pages[2];
16947- int i;
16948+ size_t i;
16949
16950 if (!core_kernel_text((unsigned long)addr)) {
16951- pages[0] = vmalloc_to_page(addr);
16952- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
16953+ pages[0] = vmalloc_to_page(vaddr);
16954+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
16955 } else {
16956- pages[0] = virt_to_page(addr);
16957+ pages[0] = virt_to_page(vaddr);
16958 WARN_ON(!PageReserved(pages[0]));
16959- pages[1] = virt_to_page(addr + PAGE_SIZE);
16960+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
16961 }
16962 BUG_ON(!pages[0]);
16963- local_irq_save(flags);
16964- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
16965- if (pages[1])
16966- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
16967- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
16968- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
16969- clear_fixmap(FIX_TEXT_POKE0);
16970- if (pages[1])
16971- clear_fixmap(FIX_TEXT_POKE1);
16972- local_flush_tlb();
16973- sync_core();
16974- /* Could also do a CLFLUSH here to speed up CPU recovery; but
16975- that causes hangs on some VIA CPUs. */
16976+ text_poke_early(addr, opcode, len);
16977 for (i = 0; i < len; i++)
16978- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
16979- local_irq_restore(flags);
16980+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
16981 return addr;
16982 }
16983
16984diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
16985index cbf5121..812b537 100644
16986--- a/arch/x86/kernel/apic/apic.c
16987+++ b/arch/x86/kernel/apic/apic.c
16988@@ -189,7 +189,7 @@ int first_system_vector = 0xfe;
16989 /*
16990 * Debug level, exported for io_apic.c
16991 */
16992-unsigned int apic_verbosity;
16993+int apic_verbosity;
16994
16995 int pic_mode;
16996
16997@@ -1956,7 +1956,7 @@ void smp_error_interrupt(struct pt_regs *regs)
16998 apic_write(APIC_ESR, 0);
16999 v1 = apic_read(APIC_ESR);
17000 ack_APIC_irq();
17001- atomic_inc(&irq_err_count);
17002+ atomic_inc_unchecked(&irq_err_count);
17003
17004 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
17005 smp_processor_id(), v0 , v1);
17006diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
17007index 00c77cf..2dc6a2d 100644
17008--- a/arch/x86/kernel/apic/apic_flat_64.c
17009+++ b/arch/x86/kernel/apic/apic_flat_64.c
17010@@ -157,7 +157,7 @@ static int flat_probe(void)
17011 return 1;
17012 }
17013
17014-static struct apic apic_flat = {
17015+static struct apic apic_flat __read_only = {
17016 .name = "flat",
17017 .probe = flat_probe,
17018 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
17019@@ -271,7 +271,7 @@ static int physflat_probe(void)
17020 return 0;
17021 }
17022
17023-static struct apic apic_physflat = {
17024+static struct apic apic_physflat __read_only = {
17025
17026 .name = "physical flat",
17027 .probe = physflat_probe,
17028diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
17029index e145f28..2752888 100644
17030--- a/arch/x86/kernel/apic/apic_noop.c
17031+++ b/arch/x86/kernel/apic/apic_noop.c
17032@@ -119,7 +119,7 @@ static void noop_apic_write(u32 reg, u32 v)
17033 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
17034 }
17035
17036-struct apic apic_noop = {
17037+struct apic apic_noop __read_only = {
17038 .name = "noop",
17039 .probe = noop_probe,
17040 .acpi_madt_oem_check = NULL,
17041diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
17042index d50e364..543bee3 100644
17043--- a/arch/x86/kernel/apic/bigsmp_32.c
17044+++ b/arch/x86/kernel/apic/bigsmp_32.c
17045@@ -152,7 +152,7 @@ static int probe_bigsmp(void)
17046 return dmi_bigsmp;
17047 }
17048
17049-static struct apic apic_bigsmp = {
17050+static struct apic apic_bigsmp __read_only = {
17051
17052 .name = "bigsmp",
17053 .probe = probe_bigsmp,
17054diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
17055index 0874799..a7a7892 100644
17056--- a/arch/x86/kernel/apic/es7000_32.c
17057+++ b/arch/x86/kernel/apic/es7000_32.c
17058@@ -608,8 +608,7 @@ static int es7000_mps_oem_check_cluster(struct mpc_table *mpc, char *oem,
17059 return ret && es7000_apic_is_cluster();
17060 }
17061
17062-/* We've been warned by a false positive warning.Use __refdata to keep calm. */
17063-static struct apic __refdata apic_es7000_cluster = {
17064+static struct apic apic_es7000_cluster __read_only = {
17065
17066 .name = "es7000",
17067 .probe = probe_es7000,
17068@@ -675,7 +674,7 @@ static struct apic __refdata apic_es7000_cluster = {
17069 .x86_32_early_logical_apicid = es7000_early_logical_apicid,
17070 };
17071
17072-static struct apic __refdata apic_es7000 = {
17073+static struct apic apic_es7000 __read_only = {
17074
17075 .name = "es7000",
17076 .probe = probe_es7000,
17077diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
17078index b739d39..aebc14c 100644
17079--- a/arch/x86/kernel/apic/io_apic.c
17080+++ b/arch/x86/kernel/apic/io_apic.c
17081@@ -1084,7 +1084,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
17082 }
17083 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
17084
17085-void lock_vector_lock(void)
17086+void lock_vector_lock(void) __acquires(vector_lock)
17087 {
17088 /* Used to the online set of cpus does not change
17089 * during assign_irq_vector.
17090@@ -1092,7 +1092,7 @@ void lock_vector_lock(void)
17091 raw_spin_lock(&vector_lock);
17092 }
17093
17094-void unlock_vector_lock(void)
17095+void unlock_vector_lock(void) __releases(vector_lock)
17096 {
17097 raw_spin_unlock(&vector_lock);
17098 }
17099@@ -2399,7 +2399,7 @@ static void ack_apic_edge(struct irq_data *data)
17100 ack_APIC_irq();
17101 }
17102
17103-atomic_t irq_mis_count;
17104+atomic_unchecked_t irq_mis_count;
17105
17106 #ifdef CONFIG_GENERIC_PENDING_IRQ
17107 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
17108@@ -2540,7 +2540,7 @@ static void ack_apic_level(struct irq_data *data)
17109 * at the cpu.
17110 */
17111 if (!(v & (1 << (i & 0x1f)))) {
17112- atomic_inc(&irq_mis_count);
17113+ atomic_inc_unchecked(&irq_mis_count);
17114
17115 eoi_ioapic_irq(irq, cfg);
17116 }
17117@@ -2567,11 +2567,13 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
17118
17119 static void irq_remap_modify_chip_defaults(struct irq_chip *chip)
17120 {
17121- chip->irq_print_chip = ir_print_prefix;
17122- chip->irq_ack = ir_ack_apic_edge;
17123- chip->irq_eoi = ir_ack_apic_level;
17124+ pax_open_kernel();
17125+ *(void **)&chip->irq_print_chip = ir_print_prefix;
17126+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
17127+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
17128
17129- chip->irq_set_affinity = set_remapped_irq_affinity;
17130+ *(void **)&chip->irq_set_affinity = set_remapped_irq_affinity;
17131+ pax_close_kernel();
17132 }
17133 #endif /* CONFIG_IRQ_REMAP */
17134
17135diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
17136index d661ee9..791fd33 100644
17137--- a/arch/x86/kernel/apic/numaq_32.c
17138+++ b/arch/x86/kernel/apic/numaq_32.c
17139@@ -455,8 +455,7 @@ static void numaq_setup_portio_remap(void)
17140 (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
17141 }
17142
17143-/* Use __refdata to keep false positive warning calm. */
17144-static struct apic __refdata apic_numaq = {
17145+static struct apic apic_numaq __read_only = {
17146
17147 .name = "NUMAQ",
17148 .probe = probe_numaq,
17149diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
17150index eb35ef9..f184a21 100644
17151--- a/arch/x86/kernel/apic/probe_32.c
17152+++ b/arch/x86/kernel/apic/probe_32.c
17153@@ -72,7 +72,7 @@ static int probe_default(void)
17154 return 1;
17155 }
17156
17157-static struct apic apic_default = {
17158+static struct apic apic_default __read_only = {
17159
17160 .name = "default",
17161 .probe = probe_default,
17162diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
17163index 77c95c0..434f8a4 100644
17164--- a/arch/x86/kernel/apic/summit_32.c
17165+++ b/arch/x86/kernel/apic/summit_32.c
17166@@ -486,7 +486,7 @@ void setup_summit(void)
17167 }
17168 #endif
17169
17170-static struct apic apic_summit = {
17171+static struct apic apic_summit __read_only = {
17172
17173 .name = "summit",
17174 .probe = probe_summit,
17175diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
17176index c88baa4..757aee1 100644
17177--- a/arch/x86/kernel/apic/x2apic_cluster.c
17178+++ b/arch/x86/kernel/apic/x2apic_cluster.c
17179@@ -183,7 +183,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
17180 return notifier_from_errno(err);
17181 }
17182
17183-static struct notifier_block __refdata x2apic_cpu_notifier = {
17184+static struct notifier_block x2apic_cpu_notifier = {
17185 .notifier_call = update_clusterinfo,
17186 };
17187
17188@@ -235,7 +235,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
17189 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
17190 }
17191
17192-static struct apic apic_x2apic_cluster = {
17193+static struct apic apic_x2apic_cluster __read_only = {
17194
17195 .name = "cluster x2apic",
17196 .probe = x2apic_cluster_probe,
17197diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
17198index 562a76d..a003c0f 100644
17199--- a/arch/x86/kernel/apic/x2apic_phys.c
17200+++ b/arch/x86/kernel/apic/x2apic_phys.c
17201@@ -89,7 +89,7 @@ static int x2apic_phys_probe(void)
17202 return apic == &apic_x2apic_phys;
17203 }
17204
17205-static struct apic apic_x2apic_phys = {
17206+static struct apic apic_x2apic_phys __read_only = {
17207
17208 .name = "physical x2apic",
17209 .probe = x2apic_phys_probe,
17210diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
17211index 8cfade9..b9d04fc 100644
17212--- a/arch/x86/kernel/apic/x2apic_uv_x.c
17213+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
17214@@ -333,7 +333,7 @@ static int uv_probe(void)
17215 return apic == &apic_x2apic_uv_x;
17216 }
17217
17218-static struct apic __refdata apic_x2apic_uv_x = {
17219+static struct apic apic_x2apic_uv_x __read_only = {
17220
17221 .name = "UV large system",
17222 .probe = uv_probe,
17223diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
17224index d65464e..1035d31 100644
17225--- a/arch/x86/kernel/apm_32.c
17226+++ b/arch/x86/kernel/apm_32.c
17227@@ -412,7 +412,7 @@ static DEFINE_MUTEX(apm_mutex);
17228 * This is for buggy BIOS's that refer to (real mode) segment 0x40
17229 * even though they are called in protected mode.
17230 */
17231-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
17232+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
17233 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
17234
17235 static const char driver_version[] = "1.16ac"; /* no spaces */
17236@@ -590,7 +590,10 @@ static long __apm_bios_call(void *_call)
17237 BUG_ON(cpu != 0);
17238 gdt = get_cpu_gdt_table(cpu);
17239 save_desc_40 = gdt[0x40 / 8];
17240+
17241+ pax_open_kernel();
17242 gdt[0x40 / 8] = bad_bios_desc;
17243+ pax_close_kernel();
17244
17245 apm_irq_save(flags);
17246 APM_DO_SAVE_SEGS;
17247@@ -599,7 +602,11 @@ static long __apm_bios_call(void *_call)
17248 &call->esi);
17249 APM_DO_RESTORE_SEGS;
17250 apm_irq_restore(flags);
17251+
17252+ pax_open_kernel();
17253 gdt[0x40 / 8] = save_desc_40;
17254+ pax_close_kernel();
17255+
17256 put_cpu();
17257
17258 return call->eax & 0xff;
17259@@ -666,7 +673,10 @@ static long __apm_bios_call_simple(void *_call)
17260 BUG_ON(cpu != 0);
17261 gdt = get_cpu_gdt_table(cpu);
17262 save_desc_40 = gdt[0x40 / 8];
17263+
17264+ pax_open_kernel();
17265 gdt[0x40 / 8] = bad_bios_desc;
17266+ pax_close_kernel();
17267
17268 apm_irq_save(flags);
17269 APM_DO_SAVE_SEGS;
17270@@ -674,7 +684,11 @@ static long __apm_bios_call_simple(void *_call)
17271 &call->eax);
17272 APM_DO_RESTORE_SEGS;
17273 apm_irq_restore(flags);
17274+
17275+ pax_open_kernel();
17276 gdt[0x40 / 8] = save_desc_40;
17277+ pax_close_kernel();
17278+
17279 put_cpu();
17280 return error;
17281 }
17282@@ -2345,12 +2359,15 @@ static int __init apm_init(void)
17283 * code to that CPU.
17284 */
17285 gdt = get_cpu_gdt_table(0);
17286+
17287+ pax_open_kernel();
17288 set_desc_base(&gdt[APM_CS >> 3],
17289 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
17290 set_desc_base(&gdt[APM_CS_16 >> 3],
17291 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
17292 set_desc_base(&gdt[APM_DS >> 3],
17293 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
17294+ pax_close_kernel();
17295
17296 proc_create("apm", 0, NULL, &apm_file_ops);
17297
17298diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
17299index 2861082..6d4718e 100644
17300--- a/arch/x86/kernel/asm-offsets.c
17301+++ b/arch/x86/kernel/asm-offsets.c
17302@@ -33,6 +33,8 @@ void common(void) {
17303 OFFSET(TI_status, thread_info, status);
17304 OFFSET(TI_addr_limit, thread_info, addr_limit);
17305 OFFSET(TI_preempt_count, thread_info, preempt_count);
17306+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
17307+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
17308
17309 BLANK();
17310 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
17311@@ -53,8 +55,26 @@ void common(void) {
17312 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
17313 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
17314 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
17315+
17316+#ifdef CONFIG_PAX_KERNEXEC
17317+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
17318 #endif
17319
17320+#ifdef CONFIG_PAX_MEMORY_UDEREF
17321+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
17322+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
17323+#ifdef CONFIG_X86_64
17324+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
17325+#endif
17326+#endif
17327+
17328+#endif
17329+
17330+ BLANK();
17331+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
17332+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
17333+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
17334+
17335 #ifdef CONFIG_XEN
17336 BLANK();
17337 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
17338diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
17339index 1b4754f..fbb4227 100644
17340--- a/arch/x86/kernel/asm-offsets_64.c
17341+++ b/arch/x86/kernel/asm-offsets_64.c
17342@@ -76,6 +76,7 @@ int main(void)
17343 BLANK();
17344 #undef ENTRY
17345
17346+ DEFINE(TSS_size, sizeof(struct tss_struct));
17347 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
17348 BLANK();
17349
17350diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
17351index a0e067d..9c7db16 100644
17352--- a/arch/x86/kernel/cpu/Makefile
17353+++ b/arch/x86/kernel/cpu/Makefile
17354@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
17355 CFLAGS_REMOVE_perf_event.o = -pg
17356 endif
17357
17358-# Make sure load_percpu_segment has no stackprotector
17359-nostackp := $(call cc-option, -fno-stack-protector)
17360-CFLAGS_common.o := $(nostackp)
17361-
17362 obj-y := intel_cacheinfo.o scattered.o topology.o
17363 obj-y += proc.o capflags.o powerflags.o common.o
17364 obj-y += vmware.o hypervisor.o mshyperv.o
17365diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
17366index 15239ff..e23e04e 100644
17367--- a/arch/x86/kernel/cpu/amd.c
17368+++ b/arch/x86/kernel/cpu/amd.c
17369@@ -733,7 +733,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
17370 unsigned int size)
17371 {
17372 /* AMD errata T13 (order #21922) */
17373- if ((c->x86 == 6)) {
17374+ if (c->x86 == 6) {
17375 /* Duron Rev A0 */
17376 if (c->x86_model == 3 && c->x86_mask == 0)
17377 size = 64;
17378diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
17379index 9c3ab43..51e6366 100644
17380--- a/arch/x86/kernel/cpu/common.c
17381+++ b/arch/x86/kernel/cpu/common.c
17382@@ -86,60 +86,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
17383
17384 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
17385
17386-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
17387-#ifdef CONFIG_X86_64
17388- /*
17389- * We need valid kernel segments for data and code in long mode too
17390- * IRET will check the segment types kkeil 2000/10/28
17391- * Also sysret mandates a special GDT layout
17392- *
17393- * TLS descriptors are currently at a different place compared to i386.
17394- * Hopefully nobody expects them at a fixed place (Wine?)
17395- */
17396- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
17397- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
17398- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
17399- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
17400- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
17401- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
17402-#else
17403- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
17404- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
17405- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
17406- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
17407- /*
17408- * Segments used for calling PnP BIOS have byte granularity.
17409- * They code segments and data segments have fixed 64k limits,
17410- * the transfer segment sizes are set at run time.
17411- */
17412- /* 32-bit code */
17413- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
17414- /* 16-bit code */
17415- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
17416- /* 16-bit data */
17417- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
17418- /* 16-bit data */
17419- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
17420- /* 16-bit data */
17421- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
17422- /*
17423- * The APM segments have byte granularity and their bases
17424- * are set at run time. All have 64k limits.
17425- */
17426- /* 32-bit code */
17427- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
17428- /* 16-bit code */
17429- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
17430- /* data */
17431- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
17432-
17433- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
17434- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
17435- GDT_STACK_CANARY_INIT
17436-#endif
17437-} };
17438-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
17439-
17440 static int __init x86_xsave_setup(char *s)
17441 {
17442 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
17443@@ -389,7 +335,7 @@ void switch_to_new_gdt(int cpu)
17444 {
17445 struct desc_ptr gdt_descr;
17446
17447- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
17448+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
17449 gdt_descr.size = GDT_SIZE - 1;
17450 load_gdt(&gdt_descr);
17451 /* Reload the per-cpu base */
17452@@ -885,6 +831,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
17453 /* Filter out anything that depends on CPUID levels we don't have */
17454 filter_cpuid_features(c, true);
17455
17456+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
17457+ setup_clear_cpu_cap(X86_FEATURE_SEP);
17458+#endif
17459+
17460 /* If the model name is still unset, do table lookup. */
17461 if (!c->x86_model_id[0]) {
17462 const char *p;
17463@@ -1068,10 +1018,12 @@ static __init int setup_disablecpuid(char *arg)
17464 }
17465 __setup("clearcpuid=", setup_disablecpuid);
17466
17467+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
17468+EXPORT_PER_CPU_SYMBOL(current_tinfo);
17469+
17470 #ifdef CONFIG_X86_64
17471 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
17472-struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
17473- (unsigned long) nmi_idt_table };
17474+struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
17475
17476 DEFINE_PER_CPU_FIRST(union irq_stack_union,
17477 irq_stack_union) __aligned(PAGE_SIZE);
17478@@ -1085,7 +1037,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
17479 EXPORT_PER_CPU_SYMBOL(current_task);
17480
17481 DEFINE_PER_CPU(unsigned long, kernel_stack) =
17482- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
17483+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
17484 EXPORT_PER_CPU_SYMBOL(kernel_stack);
17485
17486 DEFINE_PER_CPU(char *, irq_stack_ptr) =
17487@@ -1224,7 +1176,7 @@ void __cpuinit cpu_init(void)
17488 int i;
17489
17490 cpu = stack_smp_processor_id();
17491- t = &per_cpu(init_tss, cpu);
17492+ t = init_tss + cpu;
17493 oist = &per_cpu(orig_ist, cpu);
17494
17495 #ifdef CONFIG_NUMA
17496@@ -1250,7 +1202,7 @@ void __cpuinit cpu_init(void)
17497 switch_to_new_gdt(cpu);
17498 loadsegment(fs, 0);
17499
17500- load_idt((const struct desc_ptr *)&idt_descr);
17501+ load_idt(&idt_descr);
17502
17503 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
17504 syscall_init();
17505@@ -1259,7 +1211,6 @@ void __cpuinit cpu_init(void)
17506 wrmsrl(MSR_KERNEL_GS_BASE, 0);
17507 barrier();
17508
17509- x86_configure_nx();
17510 enable_x2apic();
17511
17512 /*
17513@@ -1311,7 +1262,7 @@ void __cpuinit cpu_init(void)
17514 {
17515 int cpu = smp_processor_id();
17516 struct task_struct *curr = current;
17517- struct tss_struct *t = &per_cpu(init_tss, cpu);
17518+ struct tss_struct *t = init_tss + cpu;
17519 struct thread_struct *thread = &curr->thread;
17520
17521 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
17522diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
17523index fcaabd0..7b55a26 100644
17524--- a/arch/x86/kernel/cpu/intel.c
17525+++ b/arch/x86/kernel/cpu/intel.c
17526@@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
17527 * Update the IDT descriptor and reload the IDT so that
17528 * it uses the read-only mapped virtual address.
17529 */
17530- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
17531+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
17532 load_idt(&idt_descr);
17533 }
17534 #endif
17535diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
17536index 84c1309..39b7224 100644
17537--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
17538+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
17539@@ -1017,6 +1017,22 @@ static struct attribute *default_attrs[] = {
17540 };
17541
17542 #ifdef CONFIG_AMD_NB
17543+static struct attribute *default_attrs_amd_nb[] = {
17544+ &type.attr,
17545+ &level.attr,
17546+ &coherency_line_size.attr,
17547+ &physical_line_partition.attr,
17548+ &ways_of_associativity.attr,
17549+ &number_of_sets.attr,
17550+ &size.attr,
17551+ &shared_cpu_map.attr,
17552+ &shared_cpu_list.attr,
17553+ NULL,
17554+ NULL,
17555+ NULL,
17556+ NULL
17557+};
17558+
17559 static struct attribute ** __cpuinit amd_l3_attrs(void)
17560 {
17561 static struct attribute **attrs;
17562@@ -1027,18 +1043,7 @@ static struct attribute ** __cpuinit amd_l3_attrs(void)
17563
17564 n = ARRAY_SIZE(default_attrs);
17565
17566- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
17567- n += 2;
17568-
17569- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
17570- n += 1;
17571-
17572- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
17573- if (attrs == NULL)
17574- return attrs = default_attrs;
17575-
17576- for (n = 0; default_attrs[n]; n++)
17577- attrs[n] = default_attrs[n];
17578+ attrs = default_attrs_amd_nb;
17579
17580 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
17581 attrs[n++] = &cache_disable_0.attr;
17582@@ -1089,6 +1094,13 @@ static struct kobj_type ktype_cache = {
17583 .default_attrs = default_attrs,
17584 };
17585
17586+#ifdef CONFIG_AMD_NB
17587+static struct kobj_type ktype_cache_amd_nb = {
17588+ .sysfs_ops = &sysfs_ops,
17589+ .default_attrs = default_attrs_amd_nb,
17590+};
17591+#endif
17592+
17593 static struct kobj_type ktype_percpu_entry = {
17594 .sysfs_ops = &sysfs_ops,
17595 };
17596@@ -1154,20 +1166,26 @@ static int __cpuinit cache_add_dev(struct device *dev)
17597 return retval;
17598 }
17599
17600+#ifdef CONFIG_AMD_NB
17601+ amd_l3_attrs();
17602+#endif
17603+
17604 for (i = 0; i < num_cache_leaves; i++) {
17605+ struct kobj_type *ktype;
17606+
17607 this_object = INDEX_KOBJECT_PTR(cpu, i);
17608 this_object->cpu = cpu;
17609 this_object->index = i;
17610
17611 this_leaf = CPUID4_INFO_IDX(cpu, i);
17612
17613- ktype_cache.default_attrs = default_attrs;
17614+ ktype = &ktype_cache;
17615 #ifdef CONFIG_AMD_NB
17616 if (this_leaf->base.nb)
17617- ktype_cache.default_attrs = amd_l3_attrs();
17618+ ktype = &ktype_cache_amd_nb;
17619 #endif
17620 retval = kobject_init_and_add(&(this_object->kobj),
17621- &ktype_cache,
17622+ ktype,
17623 per_cpu(ici_cache_kobject, cpu),
17624 "index%1lu", i);
17625 if (unlikely(retval)) {
17626@@ -1222,7 +1240,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
17627 return NOTIFY_OK;
17628 }
17629
17630-static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
17631+static struct notifier_block cacheinfo_cpu_notifier = {
17632 .notifier_call = cacheinfo_cpu_callback,
17633 };
17634
17635diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
17636index 80dbda8..be16652 100644
17637--- a/arch/x86/kernel/cpu/mcheck/mce.c
17638+++ b/arch/x86/kernel/cpu/mcheck/mce.c
17639@@ -45,6 +45,7 @@
17640 #include <asm/processor.h>
17641 #include <asm/mce.h>
17642 #include <asm/msr.h>
17643+#include <asm/local.h>
17644
17645 #include "mce-internal.h"
17646
17647@@ -246,7 +247,7 @@ static void print_mce(struct mce *m)
17648 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
17649 m->cs, m->ip);
17650
17651- if (m->cs == __KERNEL_CS)
17652+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
17653 print_symbol("{%s}", m->ip);
17654 pr_cont("\n");
17655 }
17656@@ -279,10 +280,10 @@ static void print_mce(struct mce *m)
17657
17658 #define PANIC_TIMEOUT 5 /* 5 seconds */
17659
17660-static atomic_t mce_paniced;
17661+static atomic_unchecked_t mce_paniced;
17662
17663 static int fake_panic;
17664-static atomic_t mce_fake_paniced;
17665+static atomic_unchecked_t mce_fake_paniced;
17666
17667 /* Panic in progress. Enable interrupts and wait for final IPI */
17668 static void wait_for_panic(void)
17669@@ -306,7 +307,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
17670 /*
17671 * Make sure only one CPU runs in machine check panic
17672 */
17673- if (atomic_inc_return(&mce_paniced) > 1)
17674+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
17675 wait_for_panic();
17676 barrier();
17677
17678@@ -314,7 +315,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
17679 console_verbose();
17680 } else {
17681 /* Don't log too much for fake panic */
17682- if (atomic_inc_return(&mce_fake_paniced) > 1)
17683+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
17684 return;
17685 }
17686 /* First print corrected ones that are still unlogged */
17687@@ -686,7 +687,7 @@ static int mce_timed_out(u64 *t)
17688 * might have been modified by someone else.
17689 */
17690 rmb();
17691- if (atomic_read(&mce_paniced))
17692+ if (atomic_read_unchecked(&mce_paniced))
17693 wait_for_panic();
17694 if (!mca_cfg.monarch_timeout)
17695 goto out;
17696@@ -1662,7 +1663,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
17697 }
17698
17699 /* Call the installed machine check handler for this CPU setup. */
17700-void (*machine_check_vector)(struct pt_regs *, long error_code) =
17701+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
17702 unexpected_machine_check;
17703
17704 /*
17705@@ -1685,7 +1686,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
17706 return;
17707 }
17708
17709+ pax_open_kernel();
17710 machine_check_vector = do_machine_check;
17711+ pax_close_kernel();
17712
17713 __mcheck_cpu_init_generic();
17714 __mcheck_cpu_init_vendor(c);
17715@@ -1699,7 +1702,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
17716 */
17717
17718 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
17719-static int mce_chrdev_open_count; /* #times opened */
17720+static local_t mce_chrdev_open_count; /* #times opened */
17721 static int mce_chrdev_open_exclu; /* already open exclusive? */
17722
17723 static int mce_chrdev_open(struct inode *inode, struct file *file)
17724@@ -1707,7 +1710,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
17725 spin_lock(&mce_chrdev_state_lock);
17726
17727 if (mce_chrdev_open_exclu ||
17728- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
17729+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
17730 spin_unlock(&mce_chrdev_state_lock);
17731
17732 return -EBUSY;
17733@@ -1715,7 +1718,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
17734
17735 if (file->f_flags & O_EXCL)
17736 mce_chrdev_open_exclu = 1;
17737- mce_chrdev_open_count++;
17738+ local_inc(&mce_chrdev_open_count);
17739
17740 spin_unlock(&mce_chrdev_state_lock);
17741
17742@@ -1726,7 +1729,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
17743 {
17744 spin_lock(&mce_chrdev_state_lock);
17745
17746- mce_chrdev_open_count--;
17747+ local_dec(&mce_chrdev_open_count);
17748 mce_chrdev_open_exclu = 0;
17749
17750 spin_unlock(&mce_chrdev_state_lock);
17751@@ -2372,7 +2375,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
17752 return NOTIFY_OK;
17753 }
17754
17755-static struct notifier_block mce_cpu_notifier __cpuinitdata = {
17756+static struct notifier_block mce_cpu_notifier = {
17757 .notifier_call = mce_cpu_callback,
17758 };
17759
17760@@ -2382,7 +2385,7 @@ static __init void mce_init_banks(void)
17761
17762 for (i = 0; i < mca_cfg.banks; i++) {
17763 struct mce_bank *b = &mce_banks[i];
17764- struct device_attribute *a = &b->attr;
17765+ device_attribute_no_const *a = &b->attr;
17766
17767 sysfs_attr_init(&a->attr);
17768 a->attr.name = b->attrname;
17769@@ -2450,7 +2453,7 @@ struct dentry *mce_get_debugfs_dir(void)
17770 static void mce_reset(void)
17771 {
17772 cpu_missing = 0;
17773- atomic_set(&mce_fake_paniced, 0);
17774+ atomic_set_unchecked(&mce_fake_paniced, 0);
17775 atomic_set(&mce_executing, 0);
17776 atomic_set(&mce_callin, 0);
17777 atomic_set(&global_nwo, 0);
17778diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
17779index 2d5454c..51987eb 100644
17780--- a/arch/x86/kernel/cpu/mcheck/p5.c
17781+++ b/arch/x86/kernel/cpu/mcheck/p5.c
17782@@ -11,6 +11,7 @@
17783 #include <asm/processor.h>
17784 #include <asm/mce.h>
17785 #include <asm/msr.h>
17786+#include <asm/pgtable.h>
17787
17788 /* By default disabled */
17789 int mce_p5_enabled __read_mostly;
17790@@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
17791 if (!cpu_has(c, X86_FEATURE_MCE))
17792 return;
17793
17794+ pax_open_kernel();
17795 machine_check_vector = pentium_machine_check;
17796+ pax_close_kernel();
17797 /* Make sure the vector pointer is visible before we enable MCEs: */
17798 wmb();
17799
17800diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
17801index 47a1870..8c019a7 100644
17802--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
17803+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
17804@@ -288,7 +288,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
17805 return notifier_from_errno(err);
17806 }
17807
17808-static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata =
17809+static struct notifier_block thermal_throttle_cpu_notifier =
17810 {
17811 .notifier_call = thermal_throttle_cpu_callback,
17812 };
17813diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
17814index 2d7998f..17c9de1 100644
17815--- a/arch/x86/kernel/cpu/mcheck/winchip.c
17816+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
17817@@ -10,6 +10,7 @@
17818 #include <asm/processor.h>
17819 #include <asm/mce.h>
17820 #include <asm/msr.h>
17821+#include <asm/pgtable.h>
17822
17823 /* Machine check handler for WinChip C6: */
17824 static void winchip_machine_check(struct pt_regs *regs, long error_code)
17825@@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
17826 {
17827 u32 lo, hi;
17828
17829+ pax_open_kernel();
17830 machine_check_vector = winchip_machine_check;
17831+ pax_close_kernel();
17832 /* Make sure the vector pointer is visible before we enable MCEs: */
17833 wmb();
17834
17835diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
17836index 726bf96..81f0526 100644
17837--- a/arch/x86/kernel/cpu/mtrr/main.c
17838+++ b/arch/x86/kernel/cpu/mtrr/main.c
17839@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
17840 u64 size_or_mask, size_and_mask;
17841 static bool mtrr_aps_delayed_init;
17842
17843-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
17844+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
17845
17846 const struct mtrr_ops *mtrr_if;
17847
17848diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
17849index df5e41f..816c719 100644
17850--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
17851+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
17852@@ -25,7 +25,7 @@ struct mtrr_ops {
17853 int (*validate_add_page)(unsigned long base, unsigned long size,
17854 unsigned int type);
17855 int (*have_wrcomb)(void);
17856-};
17857+} __do_const;
17858
17859 extern int generic_get_free_region(unsigned long base, unsigned long size,
17860 int replace_reg);
17861diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
17862index 6774c17..72c1b22 100644
17863--- a/arch/x86/kernel/cpu/perf_event.c
17864+++ b/arch/x86/kernel/cpu/perf_event.c
17865@@ -1305,7 +1305,7 @@ static void __init pmu_check_apic(void)
17866 pr_info("no hardware sampling interrupt available.\n");
17867 }
17868
17869-static struct attribute_group x86_pmu_format_group = {
17870+static attribute_group_no_const x86_pmu_format_group = {
17871 .name = "format",
17872 .attrs = NULL,
17873 };
17874@@ -1313,7 +1313,7 @@ static struct attribute_group x86_pmu_format_group = {
17875 struct perf_pmu_events_attr {
17876 struct device_attribute attr;
17877 u64 id;
17878-};
17879+} __do_const;
17880
17881 /*
17882 * Remove all undefined events (x86_pmu.event_map(id) == 0)
17883@@ -1381,7 +1381,7 @@ static struct attribute *events_attr[] = {
17884 NULL,
17885 };
17886
17887-static struct attribute_group x86_pmu_events_group = {
17888+static attribute_group_no_const x86_pmu_events_group = {
17889 .name = "events",
17890 .attrs = events_attr,
17891 };
17892@@ -1880,7 +1880,7 @@ static unsigned long get_segment_base(unsigned int segment)
17893 if (idx > GDT_ENTRIES)
17894 return 0;
17895
17896- desc = __this_cpu_ptr(&gdt_page.gdt[0]);
17897+ desc = get_cpu_gdt_table(smp_processor_id());
17898 }
17899
17900 return get_desc_base(desc + idx);
17901@@ -1970,7 +1970,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
17902 break;
17903
17904 perf_callchain_store(entry, frame.return_address);
17905- fp = frame.next_frame;
17906+ fp = (const void __force_user *)frame.next_frame;
17907 }
17908 }
17909
17910diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
17911index 4914e94..60b06e3 100644
17912--- a/arch/x86/kernel/cpu/perf_event_intel.c
17913+++ b/arch/x86/kernel/cpu/perf_event_intel.c
17914@@ -1958,10 +1958,10 @@ __init int intel_pmu_init(void)
17915 * v2 and above have a perf capabilities MSR
17916 */
17917 if (version > 1) {
17918- u64 capabilities;
17919+ u64 capabilities = x86_pmu.intel_cap.capabilities;
17920
17921- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
17922- x86_pmu.intel_cap.capabilities = capabilities;
17923+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
17924+ x86_pmu.intel_cap.capabilities = capabilities;
17925 }
17926
17927 intel_ds_init();
17928diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
17929index b43200d..d235b3e 100644
17930--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
17931+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
17932@@ -2428,7 +2428,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
17933 static int __init uncore_type_init(struct intel_uncore_type *type)
17934 {
17935 struct intel_uncore_pmu *pmus;
17936- struct attribute_group *events_group;
17937+ attribute_group_no_const *attr_group;
17938 struct attribute **attrs;
17939 int i, j;
17940
17941@@ -2455,19 +2455,19 @@ static int __init uncore_type_init(struct intel_uncore_type *type)
17942 while (type->event_descs[i].attr.attr.name)
17943 i++;
17944
17945- events_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
17946- sizeof(*events_group), GFP_KERNEL);
17947- if (!events_group)
17948+ attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
17949+ sizeof(*attr_group), GFP_KERNEL);
17950+ if (!attr_group)
17951 goto fail;
17952
17953- attrs = (struct attribute **)(events_group + 1);
17954- events_group->name = "events";
17955- events_group->attrs = attrs;
17956+ attrs = (struct attribute **)(attr_group + 1);
17957+ attr_group->name = "events";
17958+ attr_group->attrs = attrs;
17959
17960 for (j = 0; j < i; j++)
17961 attrs[j] = &type->event_descs[j].attr.attr;
17962
17963- type->events_group = events_group;
17964+ type->events_group = attr_group;
17965 }
17966
17967 type->pmu_group = &uncore_pmu_attr_group;
17968@@ -2826,7 +2826,7 @@ static int
17969 return NOTIFY_OK;
17970 }
17971
17972-static struct notifier_block uncore_cpu_nb __cpuinitdata = {
17973+static struct notifier_block uncore_cpu_nb = {
17974 .notifier_call = uncore_cpu_notifier,
17975 /*
17976 * to migrate uncore events, our notifier should be executed
17977diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
17978index e68a455..975a932 100644
17979--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
17980+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
17981@@ -428,7 +428,7 @@ struct intel_uncore_box {
17982 struct uncore_event_desc {
17983 struct kobj_attribute attr;
17984 const char *config;
17985-};
17986+} __do_const;
17987
17988 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \
17989 { \
17990diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
17991index 60c7891..9e911d3 100644
17992--- a/arch/x86/kernel/cpuid.c
17993+++ b/arch/x86/kernel/cpuid.c
17994@@ -171,7 +171,7 @@ static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb,
17995 return notifier_from_errno(err);
17996 }
17997
17998-static struct notifier_block __refdata cpuid_class_cpu_notifier =
17999+static struct notifier_block cpuid_class_cpu_notifier =
18000 {
18001 .notifier_call = cpuid_class_cpu_callback,
18002 };
18003diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
18004index 74467fe..18793d5 100644
18005--- a/arch/x86/kernel/crash.c
18006+++ b/arch/x86/kernel/crash.c
18007@@ -58,10 +58,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
18008 {
18009 #ifdef CONFIG_X86_32
18010 struct pt_regs fixed_regs;
18011-#endif
18012
18013-#ifdef CONFIG_X86_32
18014- if (!user_mode_vm(regs)) {
18015+ if (!user_mode(regs)) {
18016 crash_fixup_ss_esp(&fixed_regs, regs);
18017 regs = &fixed_regs;
18018 }
18019diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
18020index 37250fe..bf2ec74 100644
18021--- a/arch/x86/kernel/doublefault_32.c
18022+++ b/arch/x86/kernel/doublefault_32.c
18023@@ -11,7 +11,7 @@
18024
18025 #define DOUBLEFAULT_STACKSIZE (1024)
18026 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
18027-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
18028+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
18029
18030 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
18031
18032@@ -21,7 +21,7 @@ static void doublefault_fn(void)
18033 unsigned long gdt, tss;
18034
18035 store_gdt(&gdt_desc);
18036- gdt = gdt_desc.address;
18037+ gdt = (unsigned long)gdt_desc.address;
18038
18039 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
18040
18041@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
18042 /* 0x2 bit is always set */
18043 .flags = X86_EFLAGS_SF | 0x2,
18044 .sp = STACK_START,
18045- .es = __USER_DS,
18046+ .es = __KERNEL_DS,
18047 .cs = __KERNEL_CS,
18048 .ss = __KERNEL_DS,
18049- .ds = __USER_DS,
18050+ .ds = __KERNEL_DS,
18051 .fs = __KERNEL_PERCPU,
18052
18053 .__cr3 = __pa_nodebug(swapper_pg_dir),
18054diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
18055index ae42418b..787c16b 100644
18056--- a/arch/x86/kernel/dumpstack.c
18057+++ b/arch/x86/kernel/dumpstack.c
18058@@ -2,6 +2,9 @@
18059 * Copyright (C) 1991, 1992 Linus Torvalds
18060 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
18061 */
18062+#ifdef CONFIG_GRKERNSEC_HIDESYM
18063+#define __INCLUDED_BY_HIDESYM 1
18064+#endif
18065 #include <linux/kallsyms.h>
18066 #include <linux/kprobes.h>
18067 #include <linux/uaccess.h>
18068@@ -35,16 +38,14 @@ void printk_address(unsigned long address, int reliable)
18069 static void
18070 print_ftrace_graph_addr(unsigned long addr, void *data,
18071 const struct stacktrace_ops *ops,
18072- struct thread_info *tinfo, int *graph)
18073+ struct task_struct *task, int *graph)
18074 {
18075- struct task_struct *task;
18076 unsigned long ret_addr;
18077 int index;
18078
18079 if (addr != (unsigned long)return_to_handler)
18080 return;
18081
18082- task = tinfo->task;
18083 index = task->curr_ret_stack;
18084
18085 if (!task->ret_stack || index < *graph)
18086@@ -61,7 +62,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
18087 static inline void
18088 print_ftrace_graph_addr(unsigned long addr, void *data,
18089 const struct stacktrace_ops *ops,
18090- struct thread_info *tinfo, int *graph)
18091+ struct task_struct *task, int *graph)
18092 { }
18093 #endif
18094
18095@@ -72,10 +73,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
18096 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
18097 */
18098
18099-static inline int valid_stack_ptr(struct thread_info *tinfo,
18100- void *p, unsigned int size, void *end)
18101+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
18102 {
18103- void *t = tinfo;
18104 if (end) {
18105 if (p < end && p >= (end-THREAD_SIZE))
18106 return 1;
18107@@ -86,14 +85,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
18108 }
18109
18110 unsigned long
18111-print_context_stack(struct thread_info *tinfo,
18112+print_context_stack(struct task_struct *task, void *stack_start,
18113 unsigned long *stack, unsigned long bp,
18114 const struct stacktrace_ops *ops, void *data,
18115 unsigned long *end, int *graph)
18116 {
18117 struct stack_frame *frame = (struct stack_frame *)bp;
18118
18119- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
18120+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
18121 unsigned long addr;
18122
18123 addr = *stack;
18124@@ -105,7 +104,7 @@ print_context_stack(struct thread_info *tinfo,
18125 } else {
18126 ops->address(data, addr, 0);
18127 }
18128- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
18129+ print_ftrace_graph_addr(addr, data, ops, task, graph);
18130 }
18131 stack++;
18132 }
18133@@ -114,7 +113,7 @@ print_context_stack(struct thread_info *tinfo,
18134 EXPORT_SYMBOL_GPL(print_context_stack);
18135
18136 unsigned long
18137-print_context_stack_bp(struct thread_info *tinfo,
18138+print_context_stack_bp(struct task_struct *task, void *stack_start,
18139 unsigned long *stack, unsigned long bp,
18140 const struct stacktrace_ops *ops, void *data,
18141 unsigned long *end, int *graph)
18142@@ -122,7 +121,7 @@ print_context_stack_bp(struct thread_info *tinfo,
18143 struct stack_frame *frame = (struct stack_frame *)bp;
18144 unsigned long *ret_addr = &frame->return_address;
18145
18146- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
18147+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
18148 unsigned long addr = *ret_addr;
18149
18150 if (!__kernel_text_address(addr))
18151@@ -131,7 +130,7 @@ print_context_stack_bp(struct thread_info *tinfo,
18152 ops->address(data, addr, 1);
18153 frame = frame->next_frame;
18154 ret_addr = &frame->return_address;
18155- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
18156+ print_ftrace_graph_addr(addr, data, ops, task, graph);
18157 }
18158
18159 return (unsigned long)frame;
18160@@ -189,7 +188,7 @@ void dump_stack(void)
18161
18162 bp = stack_frame(current, NULL);
18163 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
18164- current->pid, current->comm, print_tainted(),
18165+ task_pid_nr(current), current->comm, print_tainted(),
18166 init_utsname()->release,
18167 (int)strcspn(init_utsname()->version, " "),
18168 init_utsname()->version);
18169@@ -225,6 +224,8 @@ unsigned __kprobes long oops_begin(void)
18170 }
18171 EXPORT_SYMBOL_GPL(oops_begin);
18172
18173+extern void gr_handle_kernel_exploit(void);
18174+
18175 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
18176 {
18177 if (regs && kexec_should_crash(current))
18178@@ -246,7 +247,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
18179 panic("Fatal exception in interrupt");
18180 if (panic_on_oops)
18181 panic("Fatal exception");
18182- do_exit(signr);
18183+
18184+ gr_handle_kernel_exploit();
18185+
18186+ do_group_exit(signr);
18187 }
18188
18189 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
18190@@ -274,7 +278,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
18191 print_modules();
18192 show_regs(regs);
18193 #ifdef CONFIG_X86_32
18194- if (user_mode_vm(regs)) {
18195+ if (user_mode(regs)) {
18196 sp = regs->sp;
18197 ss = regs->ss & 0xffff;
18198 } else {
18199@@ -302,7 +306,7 @@ void die(const char *str, struct pt_regs *regs, long err)
18200 unsigned long flags = oops_begin();
18201 int sig = SIGSEGV;
18202
18203- if (!user_mode_vm(regs))
18204+ if (!user_mode(regs))
18205 report_bug(regs->ip, regs);
18206
18207 if (__die(str, regs, err))
18208diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
18209index 1038a41..db2c12b 100644
18210--- a/arch/x86/kernel/dumpstack_32.c
18211+++ b/arch/x86/kernel/dumpstack_32.c
18212@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18213 bp = stack_frame(task, regs);
18214
18215 for (;;) {
18216- struct thread_info *context;
18217+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
18218
18219- context = (struct thread_info *)
18220- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
18221- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
18222+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
18223
18224- stack = (unsigned long *)context->previous_esp;
18225- if (!stack)
18226+ if (stack_start == task_stack_page(task))
18227 break;
18228+ stack = *(unsigned long **)stack_start;
18229 if (ops->stack(data, "IRQ") < 0)
18230 break;
18231 touch_nmi_watchdog();
18232@@ -86,7 +84,7 @@ void show_regs(struct pt_regs *regs)
18233 {
18234 int i;
18235
18236- __show_regs(regs, !user_mode_vm(regs));
18237+ __show_regs(regs, !user_mode(regs));
18238
18239 pr_emerg("Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n",
18240 TASK_COMM_LEN, current->comm, task_pid_nr(current),
18241@@ -95,21 +93,22 @@ void show_regs(struct pt_regs *regs)
18242 * When in-kernel, we also print out the stack and code at the
18243 * time of the fault..
18244 */
18245- if (!user_mode_vm(regs)) {
18246+ if (!user_mode(regs)) {
18247 unsigned int code_prologue = code_bytes * 43 / 64;
18248 unsigned int code_len = code_bytes;
18249 unsigned char c;
18250 u8 *ip;
18251+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
18252
18253 pr_emerg("Stack:\n");
18254 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
18255
18256 pr_emerg("Code:");
18257
18258- ip = (u8 *)regs->ip - code_prologue;
18259+ ip = (u8 *)regs->ip - code_prologue + cs_base;
18260 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
18261 /* try starting at IP */
18262- ip = (u8 *)regs->ip;
18263+ ip = (u8 *)regs->ip + cs_base;
18264 code_len = code_len - code_prologue + 1;
18265 }
18266 for (i = 0; i < code_len; i++, ip++) {
18267@@ -118,7 +117,7 @@ void show_regs(struct pt_regs *regs)
18268 pr_cont(" Bad EIP value.");
18269 break;
18270 }
18271- if (ip == (u8 *)regs->ip)
18272+ if (ip == (u8 *)regs->ip + cs_base)
18273 pr_cont(" <%02x>", c);
18274 else
18275 pr_cont(" %02x", c);
18276@@ -131,6 +130,7 @@ int is_valid_bugaddr(unsigned long ip)
18277 {
18278 unsigned short ud2;
18279
18280+ ip = ktla_ktva(ip);
18281 if (ip < PAGE_OFFSET)
18282 return 0;
18283 if (probe_kernel_address((unsigned short *)ip, ud2))
18284@@ -138,3 +138,15 @@ int is_valid_bugaddr(unsigned long ip)
18285
18286 return ud2 == 0x0b0f;
18287 }
18288+
18289+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
18290+void pax_check_alloca(unsigned long size)
18291+{
18292+ unsigned long sp = (unsigned long)&sp, stack_left;
18293+
18294+ /* all kernel stacks are of the same size */
18295+ stack_left = sp & (THREAD_SIZE - 1);
18296+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
18297+}
18298+EXPORT_SYMBOL(pax_check_alloca);
18299+#endif
18300diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
18301index b653675..51cc8c0 100644
18302--- a/arch/x86/kernel/dumpstack_64.c
18303+++ b/arch/x86/kernel/dumpstack_64.c
18304@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18305 unsigned long *irq_stack_end =
18306 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
18307 unsigned used = 0;
18308- struct thread_info *tinfo;
18309 int graph = 0;
18310 unsigned long dummy;
18311+ void *stack_start;
18312
18313 if (!task)
18314 task = current;
18315@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18316 * current stack address. If the stacks consist of nested
18317 * exceptions
18318 */
18319- tinfo = task_thread_info(task);
18320 for (;;) {
18321 char *id;
18322 unsigned long *estack_end;
18323+
18324 estack_end = in_exception_stack(cpu, (unsigned long)stack,
18325 &used, &id);
18326
18327@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18328 if (ops->stack(data, id) < 0)
18329 break;
18330
18331- bp = ops->walk_stack(tinfo, stack, bp, ops,
18332+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
18333 data, estack_end, &graph);
18334 ops->stack(data, "<EOE>");
18335 /*
18336@@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18337 * second-to-last pointer (index -2 to end) in the
18338 * exception stack:
18339 */
18340+ if ((u16)estack_end[-1] != __KERNEL_DS)
18341+ goto out;
18342 stack = (unsigned long *) estack_end[-2];
18343 continue;
18344 }
18345@@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18346 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
18347 if (ops->stack(data, "IRQ") < 0)
18348 break;
18349- bp = ops->walk_stack(tinfo, stack, bp,
18350+ bp = ops->walk_stack(task, irq_stack, stack, bp,
18351 ops, data, irq_stack_end, &graph);
18352 /*
18353 * We link to the next stack (which would be
18354@@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18355 /*
18356 * This handles the process stack:
18357 */
18358- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
18359+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
18360+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
18361+out:
18362 put_cpu();
18363 }
18364 EXPORT_SYMBOL(dump_trace);
18365@@ -249,7 +253,7 @@ void show_regs(struct pt_regs *regs)
18366 {
18367 int i;
18368 unsigned long sp;
18369- const int cpu = smp_processor_id();
18370+ const int cpu = raw_smp_processor_id();
18371 struct task_struct *cur = current;
18372
18373 sp = regs->sp;
18374@@ -304,3 +308,50 @@ int is_valid_bugaddr(unsigned long ip)
18375
18376 return ud2 == 0x0b0f;
18377 }
18378+
18379+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
18380+void pax_check_alloca(unsigned long size)
18381+{
18382+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
18383+ unsigned cpu, used;
18384+ char *id;
18385+
18386+ /* check the process stack first */
18387+ stack_start = (unsigned long)task_stack_page(current);
18388+ stack_end = stack_start + THREAD_SIZE;
18389+ if (likely(stack_start <= sp && sp < stack_end)) {
18390+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
18391+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
18392+ return;
18393+ }
18394+
18395+ cpu = get_cpu();
18396+
18397+ /* check the irq stacks */
18398+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
18399+ stack_start = stack_end - IRQ_STACK_SIZE;
18400+ if (stack_start <= sp && sp < stack_end) {
18401+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
18402+ put_cpu();
18403+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
18404+ return;
18405+ }
18406+
18407+ /* check the exception stacks */
18408+ used = 0;
18409+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
18410+ stack_start = stack_end - EXCEPTION_STKSZ;
18411+ if (stack_end && stack_start <= sp && sp < stack_end) {
18412+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
18413+ put_cpu();
18414+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
18415+ return;
18416+ }
18417+
18418+ put_cpu();
18419+
18420+ /* unknown stack */
18421+ BUG();
18422+}
18423+EXPORT_SYMBOL(pax_check_alloca);
18424+#endif
18425diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
18426index 9b9f18b..9fcaa04 100644
18427--- a/arch/x86/kernel/early_printk.c
18428+++ b/arch/x86/kernel/early_printk.c
18429@@ -7,6 +7,7 @@
18430 #include <linux/pci_regs.h>
18431 #include <linux/pci_ids.h>
18432 #include <linux/errno.h>
18433+#include <linux/sched.h>
18434 #include <asm/io.h>
18435 #include <asm/processor.h>
18436 #include <asm/fcntl.h>
18437diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
18438index 6ed91d9..6cc365b 100644
18439--- a/arch/x86/kernel/entry_32.S
18440+++ b/arch/x86/kernel/entry_32.S
18441@@ -177,13 +177,153 @@
18442 /*CFI_REL_OFFSET gs, PT_GS*/
18443 .endm
18444 .macro SET_KERNEL_GS reg
18445+
18446+#ifdef CONFIG_CC_STACKPROTECTOR
18447 movl $(__KERNEL_STACK_CANARY), \reg
18448+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
18449+ movl $(__USER_DS), \reg
18450+#else
18451+ xorl \reg, \reg
18452+#endif
18453+
18454 movl \reg, %gs
18455 .endm
18456
18457 #endif /* CONFIG_X86_32_LAZY_GS */
18458
18459-.macro SAVE_ALL
18460+.macro pax_enter_kernel
18461+#ifdef CONFIG_PAX_KERNEXEC
18462+ call pax_enter_kernel
18463+#endif
18464+.endm
18465+
18466+.macro pax_exit_kernel
18467+#ifdef CONFIG_PAX_KERNEXEC
18468+ call pax_exit_kernel
18469+#endif
18470+.endm
18471+
18472+#ifdef CONFIG_PAX_KERNEXEC
18473+ENTRY(pax_enter_kernel)
18474+#ifdef CONFIG_PARAVIRT
18475+ pushl %eax
18476+ pushl %ecx
18477+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
18478+ mov %eax, %esi
18479+#else
18480+ mov %cr0, %esi
18481+#endif
18482+ bts $16, %esi
18483+ jnc 1f
18484+ mov %cs, %esi
18485+ cmp $__KERNEL_CS, %esi
18486+ jz 3f
18487+ ljmp $__KERNEL_CS, $3f
18488+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
18489+2:
18490+#ifdef CONFIG_PARAVIRT
18491+ mov %esi, %eax
18492+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
18493+#else
18494+ mov %esi, %cr0
18495+#endif
18496+3:
18497+#ifdef CONFIG_PARAVIRT
18498+ popl %ecx
18499+ popl %eax
18500+#endif
18501+ ret
18502+ENDPROC(pax_enter_kernel)
18503+
18504+ENTRY(pax_exit_kernel)
18505+#ifdef CONFIG_PARAVIRT
18506+ pushl %eax
18507+ pushl %ecx
18508+#endif
18509+ mov %cs, %esi
18510+ cmp $__KERNEXEC_KERNEL_CS, %esi
18511+ jnz 2f
18512+#ifdef CONFIG_PARAVIRT
18513+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
18514+ mov %eax, %esi
18515+#else
18516+ mov %cr0, %esi
18517+#endif
18518+ btr $16, %esi
18519+ ljmp $__KERNEL_CS, $1f
18520+1:
18521+#ifdef CONFIG_PARAVIRT
18522+ mov %esi, %eax
18523+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
18524+#else
18525+ mov %esi, %cr0
18526+#endif
18527+2:
18528+#ifdef CONFIG_PARAVIRT
18529+ popl %ecx
18530+ popl %eax
18531+#endif
18532+ ret
18533+ENDPROC(pax_exit_kernel)
18534+#endif
18535+
18536+.macro pax_erase_kstack
18537+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
18538+ call pax_erase_kstack
18539+#endif
18540+.endm
18541+
18542+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
18543+/*
18544+ * ebp: thread_info
18545+ */
18546+ENTRY(pax_erase_kstack)
18547+ pushl %edi
18548+ pushl %ecx
18549+ pushl %eax
18550+
18551+ mov TI_lowest_stack(%ebp), %edi
18552+ mov $-0xBEEF, %eax
18553+ std
18554+
18555+1: mov %edi, %ecx
18556+ and $THREAD_SIZE_asm - 1, %ecx
18557+ shr $2, %ecx
18558+ repne scasl
18559+ jecxz 2f
18560+
18561+ cmp $2*16, %ecx
18562+ jc 2f
18563+
18564+ mov $2*16, %ecx
18565+ repe scasl
18566+ jecxz 2f
18567+ jne 1b
18568+
18569+2: cld
18570+ mov %esp, %ecx
18571+ sub %edi, %ecx
18572+
18573+ cmp $THREAD_SIZE_asm, %ecx
18574+ jb 3f
18575+ ud2
18576+3:
18577+
18578+ shr $2, %ecx
18579+ rep stosl
18580+
18581+ mov TI_task_thread_sp0(%ebp), %edi
18582+ sub $128, %edi
18583+ mov %edi, TI_lowest_stack(%ebp)
18584+
18585+ popl %eax
18586+ popl %ecx
18587+ popl %edi
18588+ ret
18589+ENDPROC(pax_erase_kstack)
18590+#endif
18591+
18592+.macro __SAVE_ALL _DS
18593 cld
18594 PUSH_GS
18595 pushl_cfi %fs
18596@@ -206,7 +346,7 @@
18597 CFI_REL_OFFSET ecx, 0
18598 pushl_cfi %ebx
18599 CFI_REL_OFFSET ebx, 0
18600- movl $(__USER_DS), %edx
18601+ movl $\_DS, %edx
18602 movl %edx, %ds
18603 movl %edx, %es
18604 movl $(__KERNEL_PERCPU), %edx
18605@@ -214,6 +354,15 @@
18606 SET_KERNEL_GS %edx
18607 .endm
18608
18609+.macro SAVE_ALL
18610+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
18611+ __SAVE_ALL __KERNEL_DS
18612+ pax_enter_kernel
18613+#else
18614+ __SAVE_ALL __USER_DS
18615+#endif
18616+.endm
18617+
18618 .macro RESTORE_INT_REGS
18619 popl_cfi %ebx
18620 CFI_RESTORE ebx
18621@@ -297,7 +446,7 @@ ENTRY(ret_from_fork)
18622 popfl_cfi
18623 jmp syscall_exit
18624 CFI_ENDPROC
18625-END(ret_from_fork)
18626+ENDPROC(ret_from_fork)
18627
18628 ENTRY(ret_from_kernel_thread)
18629 CFI_STARTPROC
18630@@ -344,7 +493,15 @@ ret_from_intr:
18631 andl $SEGMENT_RPL_MASK, %eax
18632 #endif
18633 cmpl $USER_RPL, %eax
18634+
18635+#ifdef CONFIG_PAX_KERNEXEC
18636+ jae resume_userspace
18637+
18638+ pax_exit_kernel
18639+ jmp resume_kernel
18640+#else
18641 jb resume_kernel # not returning to v8086 or userspace
18642+#endif
18643
18644 ENTRY(resume_userspace)
18645 LOCKDEP_SYS_EXIT
18646@@ -356,8 +513,8 @@ ENTRY(resume_userspace)
18647 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
18648 # int/exception return?
18649 jne work_pending
18650- jmp restore_all
18651-END(ret_from_exception)
18652+ jmp restore_all_pax
18653+ENDPROC(ret_from_exception)
18654
18655 #ifdef CONFIG_PREEMPT
18656 ENTRY(resume_kernel)
18657@@ -372,7 +529,7 @@ need_resched:
18658 jz restore_all
18659 call preempt_schedule_irq
18660 jmp need_resched
18661-END(resume_kernel)
18662+ENDPROC(resume_kernel)
18663 #endif
18664 CFI_ENDPROC
18665 /*
18666@@ -406,30 +563,45 @@ sysenter_past_esp:
18667 /*CFI_REL_OFFSET cs, 0*/
18668 /*
18669 * Push current_thread_info()->sysenter_return to the stack.
18670- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
18671- * pushed above; +8 corresponds to copy_thread's esp0 setting.
18672 */
18673- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
18674+ pushl_cfi $0
18675 CFI_REL_OFFSET eip, 0
18676
18677 pushl_cfi %eax
18678 SAVE_ALL
18679+ GET_THREAD_INFO(%ebp)
18680+ movl TI_sysenter_return(%ebp),%ebp
18681+ movl %ebp,PT_EIP(%esp)
18682 ENABLE_INTERRUPTS(CLBR_NONE)
18683
18684 /*
18685 * Load the potential sixth argument from user stack.
18686 * Careful about security.
18687 */
18688+ movl PT_OLDESP(%esp),%ebp
18689+
18690+#ifdef CONFIG_PAX_MEMORY_UDEREF
18691+ mov PT_OLDSS(%esp),%ds
18692+1: movl %ds:(%ebp),%ebp
18693+ push %ss
18694+ pop %ds
18695+#else
18696 cmpl $__PAGE_OFFSET-3,%ebp
18697 jae syscall_fault
18698 ASM_STAC
18699 1: movl (%ebp),%ebp
18700 ASM_CLAC
18701+#endif
18702+
18703 movl %ebp,PT_EBP(%esp)
18704 _ASM_EXTABLE(1b,syscall_fault)
18705
18706 GET_THREAD_INFO(%ebp)
18707
18708+#ifdef CONFIG_PAX_RANDKSTACK
18709+ pax_erase_kstack
18710+#endif
18711+
18712 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
18713 jnz sysenter_audit
18714 sysenter_do_call:
18715@@ -444,12 +616,24 @@ sysenter_do_call:
18716 testl $_TIF_ALLWORK_MASK, %ecx
18717 jne sysexit_audit
18718 sysenter_exit:
18719+
18720+#ifdef CONFIG_PAX_RANDKSTACK
18721+ pushl_cfi %eax
18722+ movl %esp, %eax
18723+ call pax_randomize_kstack
18724+ popl_cfi %eax
18725+#endif
18726+
18727+ pax_erase_kstack
18728+
18729 /* if something modifies registers it must also disable sysexit */
18730 movl PT_EIP(%esp), %edx
18731 movl PT_OLDESP(%esp), %ecx
18732 xorl %ebp,%ebp
18733 TRACE_IRQS_ON
18734 1: mov PT_FS(%esp), %fs
18735+2: mov PT_DS(%esp), %ds
18736+3: mov PT_ES(%esp), %es
18737 PTGS_TO_GS
18738 ENABLE_INTERRUPTS_SYSEXIT
18739
18740@@ -466,6 +650,9 @@ sysenter_audit:
18741 movl %eax,%edx /* 2nd arg: syscall number */
18742 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
18743 call __audit_syscall_entry
18744+
18745+ pax_erase_kstack
18746+
18747 pushl_cfi %ebx
18748 movl PT_EAX(%esp),%eax /* reload syscall number */
18749 jmp sysenter_do_call
18750@@ -491,10 +678,16 @@ sysexit_audit:
18751
18752 CFI_ENDPROC
18753 .pushsection .fixup,"ax"
18754-2: movl $0,PT_FS(%esp)
18755+4: movl $0,PT_FS(%esp)
18756+ jmp 1b
18757+5: movl $0,PT_DS(%esp)
18758+ jmp 1b
18759+6: movl $0,PT_ES(%esp)
18760 jmp 1b
18761 .popsection
18762- _ASM_EXTABLE(1b,2b)
18763+ _ASM_EXTABLE(1b,4b)
18764+ _ASM_EXTABLE(2b,5b)
18765+ _ASM_EXTABLE(3b,6b)
18766 PTGS_TO_GS_EX
18767 ENDPROC(ia32_sysenter_target)
18768
18769@@ -509,6 +702,11 @@ ENTRY(system_call)
18770 pushl_cfi %eax # save orig_eax
18771 SAVE_ALL
18772 GET_THREAD_INFO(%ebp)
18773+
18774+#ifdef CONFIG_PAX_RANDKSTACK
18775+ pax_erase_kstack
18776+#endif
18777+
18778 # system call tracing in operation / emulation
18779 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
18780 jnz syscall_trace_entry
18781@@ -527,6 +725,15 @@ syscall_exit:
18782 testl $_TIF_ALLWORK_MASK, %ecx # current->work
18783 jne syscall_exit_work
18784
18785+restore_all_pax:
18786+
18787+#ifdef CONFIG_PAX_RANDKSTACK
18788+ movl %esp, %eax
18789+ call pax_randomize_kstack
18790+#endif
18791+
18792+ pax_erase_kstack
18793+
18794 restore_all:
18795 TRACE_IRQS_IRET
18796 restore_all_notrace:
18797@@ -583,14 +790,34 @@ ldt_ss:
18798 * compensating for the offset by changing to the ESPFIX segment with
18799 * a base address that matches for the difference.
18800 */
18801-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
18802+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
18803 mov %esp, %edx /* load kernel esp */
18804 mov PT_OLDESP(%esp), %eax /* load userspace esp */
18805 mov %dx, %ax /* eax: new kernel esp */
18806 sub %eax, %edx /* offset (low word is 0) */
18807+#ifdef CONFIG_SMP
18808+ movl PER_CPU_VAR(cpu_number), %ebx
18809+ shll $PAGE_SHIFT_asm, %ebx
18810+ addl $cpu_gdt_table, %ebx
18811+#else
18812+ movl $cpu_gdt_table, %ebx
18813+#endif
18814 shr $16, %edx
18815- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
18816- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
18817+
18818+#ifdef CONFIG_PAX_KERNEXEC
18819+ mov %cr0, %esi
18820+ btr $16, %esi
18821+ mov %esi, %cr0
18822+#endif
18823+
18824+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
18825+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
18826+
18827+#ifdef CONFIG_PAX_KERNEXEC
18828+ bts $16, %esi
18829+ mov %esi, %cr0
18830+#endif
18831+
18832 pushl_cfi $__ESPFIX_SS
18833 pushl_cfi %eax /* new kernel esp */
18834 /* Disable interrupts, but do not irqtrace this section: we
18835@@ -619,20 +846,18 @@ work_resched:
18836 movl TI_flags(%ebp), %ecx
18837 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
18838 # than syscall tracing?
18839- jz restore_all
18840+ jz restore_all_pax
18841 testb $_TIF_NEED_RESCHED, %cl
18842 jnz work_resched
18843
18844 work_notifysig: # deal with pending signals and
18845 # notify-resume requests
18846+ movl %esp, %eax
18847 #ifdef CONFIG_VM86
18848 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
18849- movl %esp, %eax
18850 jne work_notifysig_v86 # returning to kernel-space or
18851 # vm86-space
18852 1:
18853-#else
18854- movl %esp, %eax
18855 #endif
18856 TRACE_IRQS_ON
18857 ENABLE_INTERRUPTS(CLBR_NONE)
18858@@ -653,7 +878,7 @@ work_notifysig_v86:
18859 movl %eax, %esp
18860 jmp 1b
18861 #endif
18862-END(work_pending)
18863+ENDPROC(work_pending)
18864
18865 # perform syscall exit tracing
18866 ALIGN
18867@@ -661,11 +886,14 @@ syscall_trace_entry:
18868 movl $-ENOSYS,PT_EAX(%esp)
18869 movl %esp, %eax
18870 call syscall_trace_enter
18871+
18872+ pax_erase_kstack
18873+
18874 /* What it returned is what we'll actually use. */
18875 cmpl $(NR_syscalls), %eax
18876 jnae syscall_call
18877 jmp syscall_exit
18878-END(syscall_trace_entry)
18879+ENDPROC(syscall_trace_entry)
18880
18881 # perform syscall exit tracing
18882 ALIGN
18883@@ -678,21 +906,25 @@ syscall_exit_work:
18884 movl %esp, %eax
18885 call syscall_trace_leave
18886 jmp resume_userspace
18887-END(syscall_exit_work)
18888+ENDPROC(syscall_exit_work)
18889 CFI_ENDPROC
18890
18891 RING0_INT_FRAME # can't unwind into user space anyway
18892 syscall_fault:
18893+#ifdef CONFIG_PAX_MEMORY_UDEREF
18894+ push %ss
18895+ pop %ds
18896+#endif
18897 ASM_CLAC
18898 GET_THREAD_INFO(%ebp)
18899 movl $-EFAULT,PT_EAX(%esp)
18900 jmp resume_userspace
18901-END(syscall_fault)
18902+ENDPROC(syscall_fault)
18903
18904 syscall_badsys:
18905 movl $-ENOSYS,PT_EAX(%esp)
18906 jmp resume_userspace
18907-END(syscall_badsys)
18908+ENDPROC(syscall_badsys)
18909 CFI_ENDPROC
18910 /*
18911 * End of kprobes section
18912@@ -753,8 +985,15 @@ PTREGSCALL1(vm86old)
18913 * normal stack and adjusts ESP with the matching offset.
18914 */
18915 /* fixup the stack */
18916- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
18917- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
18918+#ifdef CONFIG_SMP
18919+ movl PER_CPU_VAR(cpu_number), %ebx
18920+ shll $PAGE_SHIFT_asm, %ebx
18921+ addl $cpu_gdt_table, %ebx
18922+#else
18923+ movl $cpu_gdt_table, %ebx
18924+#endif
18925+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
18926+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
18927 shl $16, %eax
18928 addl %esp, %eax /* the adjusted stack pointer */
18929 pushl_cfi $__KERNEL_DS
18930@@ -807,7 +1046,7 @@ vector=vector+1
18931 .endr
18932 2: jmp common_interrupt
18933 .endr
18934-END(irq_entries_start)
18935+ENDPROC(irq_entries_start)
18936
18937 .previous
18938 END(interrupt)
18939@@ -858,7 +1097,7 @@ ENTRY(coprocessor_error)
18940 pushl_cfi $do_coprocessor_error
18941 jmp error_code
18942 CFI_ENDPROC
18943-END(coprocessor_error)
18944+ENDPROC(coprocessor_error)
18945
18946 ENTRY(simd_coprocessor_error)
18947 RING0_INT_FRAME
18948@@ -880,7 +1119,7 @@ ENTRY(simd_coprocessor_error)
18949 #endif
18950 jmp error_code
18951 CFI_ENDPROC
18952-END(simd_coprocessor_error)
18953+ENDPROC(simd_coprocessor_error)
18954
18955 ENTRY(device_not_available)
18956 RING0_INT_FRAME
18957@@ -889,18 +1128,18 @@ ENTRY(device_not_available)
18958 pushl_cfi $do_device_not_available
18959 jmp error_code
18960 CFI_ENDPROC
18961-END(device_not_available)
18962+ENDPROC(device_not_available)
18963
18964 #ifdef CONFIG_PARAVIRT
18965 ENTRY(native_iret)
18966 iret
18967 _ASM_EXTABLE(native_iret, iret_exc)
18968-END(native_iret)
18969+ENDPROC(native_iret)
18970
18971 ENTRY(native_irq_enable_sysexit)
18972 sti
18973 sysexit
18974-END(native_irq_enable_sysexit)
18975+ENDPROC(native_irq_enable_sysexit)
18976 #endif
18977
18978 ENTRY(overflow)
18979@@ -910,7 +1149,7 @@ ENTRY(overflow)
18980 pushl_cfi $do_overflow
18981 jmp error_code
18982 CFI_ENDPROC
18983-END(overflow)
18984+ENDPROC(overflow)
18985
18986 ENTRY(bounds)
18987 RING0_INT_FRAME
18988@@ -919,7 +1158,7 @@ ENTRY(bounds)
18989 pushl_cfi $do_bounds
18990 jmp error_code
18991 CFI_ENDPROC
18992-END(bounds)
18993+ENDPROC(bounds)
18994
18995 ENTRY(invalid_op)
18996 RING0_INT_FRAME
18997@@ -928,7 +1167,7 @@ ENTRY(invalid_op)
18998 pushl_cfi $do_invalid_op
18999 jmp error_code
19000 CFI_ENDPROC
19001-END(invalid_op)
19002+ENDPROC(invalid_op)
19003
19004 ENTRY(coprocessor_segment_overrun)
19005 RING0_INT_FRAME
19006@@ -937,7 +1176,7 @@ ENTRY(coprocessor_segment_overrun)
19007 pushl_cfi $do_coprocessor_segment_overrun
19008 jmp error_code
19009 CFI_ENDPROC
19010-END(coprocessor_segment_overrun)
19011+ENDPROC(coprocessor_segment_overrun)
19012
19013 ENTRY(invalid_TSS)
19014 RING0_EC_FRAME
19015@@ -945,7 +1184,7 @@ ENTRY(invalid_TSS)
19016 pushl_cfi $do_invalid_TSS
19017 jmp error_code
19018 CFI_ENDPROC
19019-END(invalid_TSS)
19020+ENDPROC(invalid_TSS)
19021
19022 ENTRY(segment_not_present)
19023 RING0_EC_FRAME
19024@@ -953,7 +1192,7 @@ ENTRY(segment_not_present)
19025 pushl_cfi $do_segment_not_present
19026 jmp error_code
19027 CFI_ENDPROC
19028-END(segment_not_present)
19029+ENDPROC(segment_not_present)
19030
19031 ENTRY(stack_segment)
19032 RING0_EC_FRAME
19033@@ -961,7 +1200,7 @@ ENTRY(stack_segment)
19034 pushl_cfi $do_stack_segment
19035 jmp error_code
19036 CFI_ENDPROC
19037-END(stack_segment)
19038+ENDPROC(stack_segment)
19039
19040 ENTRY(alignment_check)
19041 RING0_EC_FRAME
19042@@ -969,7 +1208,7 @@ ENTRY(alignment_check)
19043 pushl_cfi $do_alignment_check
19044 jmp error_code
19045 CFI_ENDPROC
19046-END(alignment_check)
19047+ENDPROC(alignment_check)
19048
19049 ENTRY(divide_error)
19050 RING0_INT_FRAME
19051@@ -978,7 +1217,7 @@ ENTRY(divide_error)
19052 pushl_cfi $do_divide_error
19053 jmp error_code
19054 CFI_ENDPROC
19055-END(divide_error)
19056+ENDPROC(divide_error)
19057
19058 #ifdef CONFIG_X86_MCE
19059 ENTRY(machine_check)
19060@@ -988,7 +1227,7 @@ ENTRY(machine_check)
19061 pushl_cfi machine_check_vector
19062 jmp error_code
19063 CFI_ENDPROC
19064-END(machine_check)
19065+ENDPROC(machine_check)
19066 #endif
19067
19068 ENTRY(spurious_interrupt_bug)
19069@@ -998,7 +1237,7 @@ ENTRY(spurious_interrupt_bug)
19070 pushl_cfi $do_spurious_interrupt_bug
19071 jmp error_code
19072 CFI_ENDPROC
19073-END(spurious_interrupt_bug)
19074+ENDPROC(spurious_interrupt_bug)
19075 /*
19076 * End of kprobes section
19077 */
19078@@ -1101,7 +1340,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
19079
19080 ENTRY(mcount)
19081 ret
19082-END(mcount)
19083+ENDPROC(mcount)
19084
19085 ENTRY(ftrace_caller)
19086 cmpl $0, function_trace_stop
19087@@ -1134,7 +1373,7 @@ ftrace_graph_call:
19088 .globl ftrace_stub
19089 ftrace_stub:
19090 ret
19091-END(ftrace_caller)
19092+ENDPROC(ftrace_caller)
19093
19094 ENTRY(ftrace_regs_caller)
19095 pushf /* push flags before compare (in cs location) */
19096@@ -1235,7 +1474,7 @@ trace:
19097 popl %ecx
19098 popl %eax
19099 jmp ftrace_stub
19100-END(mcount)
19101+ENDPROC(mcount)
19102 #endif /* CONFIG_DYNAMIC_FTRACE */
19103 #endif /* CONFIG_FUNCTION_TRACER */
19104
19105@@ -1253,7 +1492,7 @@ ENTRY(ftrace_graph_caller)
19106 popl %ecx
19107 popl %eax
19108 ret
19109-END(ftrace_graph_caller)
19110+ENDPROC(ftrace_graph_caller)
19111
19112 .globl return_to_handler
19113 return_to_handler:
19114@@ -1309,15 +1548,18 @@ error_code:
19115 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
19116 REG_TO_PTGS %ecx
19117 SET_KERNEL_GS %ecx
19118- movl $(__USER_DS), %ecx
19119+ movl $(__KERNEL_DS), %ecx
19120 movl %ecx, %ds
19121 movl %ecx, %es
19122+
19123+ pax_enter_kernel
19124+
19125 TRACE_IRQS_OFF
19126 movl %esp,%eax # pt_regs pointer
19127 call *%edi
19128 jmp ret_from_exception
19129 CFI_ENDPROC
19130-END(page_fault)
19131+ENDPROC(page_fault)
19132
19133 /*
19134 * Debug traps and NMI can happen at the one SYSENTER instruction
19135@@ -1360,7 +1602,7 @@ debug_stack_correct:
19136 call do_debug
19137 jmp ret_from_exception
19138 CFI_ENDPROC
19139-END(debug)
19140+ENDPROC(debug)
19141
19142 /*
19143 * NMI is doubly nasty. It can happen _while_ we're handling
19144@@ -1398,6 +1640,9 @@ nmi_stack_correct:
19145 xorl %edx,%edx # zero error code
19146 movl %esp,%eax # pt_regs pointer
19147 call do_nmi
19148+
19149+ pax_exit_kernel
19150+
19151 jmp restore_all_notrace
19152 CFI_ENDPROC
19153
19154@@ -1434,12 +1679,15 @@ nmi_espfix_stack:
19155 FIXUP_ESPFIX_STACK # %eax == %esp
19156 xorl %edx,%edx # zero error code
19157 call do_nmi
19158+
19159+ pax_exit_kernel
19160+
19161 RESTORE_REGS
19162 lss 12+4(%esp), %esp # back to espfix stack
19163 CFI_ADJUST_CFA_OFFSET -24
19164 jmp irq_return
19165 CFI_ENDPROC
19166-END(nmi)
19167+ENDPROC(nmi)
19168
19169 ENTRY(int3)
19170 RING0_INT_FRAME
19171@@ -1452,14 +1700,14 @@ ENTRY(int3)
19172 call do_int3
19173 jmp ret_from_exception
19174 CFI_ENDPROC
19175-END(int3)
19176+ENDPROC(int3)
19177
19178 ENTRY(general_protection)
19179 RING0_EC_FRAME
19180 pushl_cfi $do_general_protection
19181 jmp error_code
19182 CFI_ENDPROC
19183-END(general_protection)
19184+ENDPROC(general_protection)
19185
19186 #ifdef CONFIG_KVM_GUEST
19187 ENTRY(async_page_fault)
19188@@ -1468,7 +1716,7 @@ ENTRY(async_page_fault)
19189 pushl_cfi $do_async_page_fault
19190 jmp error_code
19191 CFI_ENDPROC
19192-END(async_page_fault)
19193+ENDPROC(async_page_fault)
19194 #endif
19195
19196 /*
19197diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
19198index cb3c591..bc63707 100644
19199--- a/arch/x86/kernel/entry_64.S
19200+++ b/arch/x86/kernel/entry_64.S
19201@@ -59,6 +59,8 @@
19202 #include <asm/context_tracking.h>
19203 #include <asm/smap.h>
19204 #include <linux/err.h>
19205+#include <asm/pgtable.h>
19206+#include <asm/alternative-asm.h>
19207
19208 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
19209 #include <linux/elf-em.h>
19210@@ -80,8 +82,9 @@
19211 #ifdef CONFIG_DYNAMIC_FTRACE
19212
19213 ENTRY(function_hook)
19214+ pax_force_retaddr
19215 retq
19216-END(function_hook)
19217+ENDPROC(function_hook)
19218
19219 /* skip is set if stack has been adjusted */
19220 .macro ftrace_caller_setup skip=0
19221@@ -122,8 +125,9 @@ GLOBAL(ftrace_graph_call)
19222 #endif
19223
19224 GLOBAL(ftrace_stub)
19225+ pax_force_retaddr
19226 retq
19227-END(ftrace_caller)
19228+ENDPROC(ftrace_caller)
19229
19230 ENTRY(ftrace_regs_caller)
19231 /* Save the current flags before compare (in SS location)*/
19232@@ -191,7 +195,7 @@ ftrace_restore_flags:
19233 popfq
19234 jmp ftrace_stub
19235
19236-END(ftrace_regs_caller)
19237+ENDPROC(ftrace_regs_caller)
19238
19239
19240 #else /* ! CONFIG_DYNAMIC_FTRACE */
19241@@ -212,6 +216,7 @@ ENTRY(function_hook)
19242 #endif
19243
19244 GLOBAL(ftrace_stub)
19245+ pax_force_retaddr
19246 retq
19247
19248 trace:
19249@@ -225,12 +230,13 @@ trace:
19250 #endif
19251 subq $MCOUNT_INSN_SIZE, %rdi
19252
19253+ pax_force_fptr ftrace_trace_function
19254 call *ftrace_trace_function
19255
19256 MCOUNT_RESTORE_FRAME
19257
19258 jmp ftrace_stub
19259-END(function_hook)
19260+ENDPROC(function_hook)
19261 #endif /* CONFIG_DYNAMIC_FTRACE */
19262 #endif /* CONFIG_FUNCTION_TRACER */
19263
19264@@ -252,8 +258,9 @@ ENTRY(ftrace_graph_caller)
19265
19266 MCOUNT_RESTORE_FRAME
19267
19268+ pax_force_retaddr
19269 retq
19270-END(ftrace_graph_caller)
19271+ENDPROC(ftrace_graph_caller)
19272
19273 GLOBAL(return_to_handler)
19274 subq $24, %rsp
19275@@ -269,7 +276,9 @@ GLOBAL(return_to_handler)
19276 movq 8(%rsp), %rdx
19277 movq (%rsp), %rax
19278 addq $24, %rsp
19279+ pax_force_fptr %rdi
19280 jmp *%rdi
19281+ENDPROC(return_to_handler)
19282 #endif
19283
19284
19285@@ -284,6 +293,273 @@ ENTRY(native_usergs_sysret64)
19286 ENDPROC(native_usergs_sysret64)
19287 #endif /* CONFIG_PARAVIRT */
19288
19289+ .macro ljmpq sel, off
19290+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
19291+ .byte 0x48; ljmp *1234f(%rip)
19292+ .pushsection .rodata
19293+ .align 16
19294+ 1234: .quad \off; .word \sel
19295+ .popsection
19296+#else
19297+ pushq $\sel
19298+ pushq $\off
19299+ lretq
19300+#endif
19301+ .endm
19302+
19303+ .macro pax_enter_kernel
19304+ pax_set_fptr_mask
19305+#ifdef CONFIG_PAX_KERNEXEC
19306+ call pax_enter_kernel
19307+#endif
19308+ .endm
19309+
19310+ .macro pax_exit_kernel
19311+#ifdef CONFIG_PAX_KERNEXEC
19312+ call pax_exit_kernel
19313+#endif
19314+ .endm
19315+
19316+#ifdef CONFIG_PAX_KERNEXEC
19317+ENTRY(pax_enter_kernel)
19318+ pushq %rdi
19319+
19320+#ifdef CONFIG_PARAVIRT
19321+ PV_SAVE_REGS(CLBR_RDI)
19322+#endif
19323+
19324+ GET_CR0_INTO_RDI
19325+ bts $16,%rdi
19326+ jnc 3f
19327+ mov %cs,%edi
19328+ cmp $__KERNEL_CS,%edi
19329+ jnz 2f
19330+1:
19331+
19332+#ifdef CONFIG_PARAVIRT
19333+ PV_RESTORE_REGS(CLBR_RDI)
19334+#endif
19335+
19336+ popq %rdi
19337+ pax_force_retaddr
19338+ retq
19339+
19340+2: ljmpq __KERNEL_CS,1f
19341+3: ljmpq __KERNEXEC_KERNEL_CS,4f
19342+4: SET_RDI_INTO_CR0
19343+ jmp 1b
19344+ENDPROC(pax_enter_kernel)
19345+
19346+ENTRY(pax_exit_kernel)
19347+ pushq %rdi
19348+
19349+#ifdef CONFIG_PARAVIRT
19350+ PV_SAVE_REGS(CLBR_RDI)
19351+#endif
19352+
19353+ mov %cs,%rdi
19354+ cmp $__KERNEXEC_KERNEL_CS,%edi
19355+ jz 2f
19356+1:
19357+
19358+#ifdef CONFIG_PARAVIRT
19359+ PV_RESTORE_REGS(CLBR_RDI);
19360+#endif
19361+
19362+ popq %rdi
19363+ pax_force_retaddr
19364+ retq
19365+
19366+2: GET_CR0_INTO_RDI
19367+ btr $16,%rdi
19368+ ljmpq __KERNEL_CS,3f
19369+3: SET_RDI_INTO_CR0
19370+ jmp 1b
19371+ENDPROC(pax_exit_kernel)
19372+#endif
19373+
19374+ .macro pax_enter_kernel_user
19375+ pax_set_fptr_mask
19376+#ifdef CONFIG_PAX_MEMORY_UDEREF
19377+ call pax_enter_kernel_user
19378+#endif
19379+ .endm
19380+
19381+ .macro pax_exit_kernel_user
19382+#ifdef CONFIG_PAX_MEMORY_UDEREF
19383+ call pax_exit_kernel_user
19384+#endif
19385+#ifdef CONFIG_PAX_RANDKSTACK
19386+ pushq %rax
19387+ call pax_randomize_kstack
19388+ popq %rax
19389+#endif
19390+ .endm
19391+
19392+#ifdef CONFIG_PAX_MEMORY_UDEREF
19393+ENTRY(pax_enter_kernel_user)
19394+ pushq %rdi
19395+ pushq %rbx
19396+
19397+#ifdef CONFIG_PARAVIRT
19398+ PV_SAVE_REGS(CLBR_RDI)
19399+#endif
19400+
19401+ GET_CR3_INTO_RDI
19402+ mov %rdi,%rbx
19403+ add $__START_KERNEL_map,%rbx
19404+ sub phys_base(%rip),%rbx
19405+
19406+#ifdef CONFIG_PARAVIRT
19407+ pushq %rdi
19408+ cmpl $0, pv_info+PARAVIRT_enabled
19409+ jz 1f
19410+ i = 0
19411+ .rept USER_PGD_PTRS
19412+ mov i*8(%rbx),%rsi
19413+ mov $0,%sil
19414+ lea i*8(%rbx),%rdi
19415+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
19416+ i = i + 1
19417+ .endr
19418+ jmp 2f
19419+1:
19420+#endif
19421+
19422+ i = 0
19423+ .rept USER_PGD_PTRS
19424+ movb $0,i*8(%rbx)
19425+ i = i + 1
19426+ .endr
19427+
19428+#ifdef CONFIG_PARAVIRT
19429+2: popq %rdi
19430+#endif
19431+ SET_RDI_INTO_CR3
19432+
19433+#ifdef CONFIG_PAX_KERNEXEC
19434+ GET_CR0_INTO_RDI
19435+ bts $16,%rdi
19436+ SET_RDI_INTO_CR0
19437+#endif
19438+
19439+#ifdef CONFIG_PARAVIRT
19440+ PV_RESTORE_REGS(CLBR_RDI)
19441+#endif
19442+
19443+ popq %rbx
19444+ popq %rdi
19445+ pax_force_retaddr
19446+ retq
19447+ENDPROC(pax_enter_kernel_user)
19448+
19449+ENTRY(pax_exit_kernel_user)
19450+ push %rdi
19451+
19452+#ifdef CONFIG_PARAVIRT
19453+ pushq %rbx
19454+ PV_SAVE_REGS(CLBR_RDI)
19455+#endif
19456+
19457+#ifdef CONFIG_PAX_KERNEXEC
19458+ GET_CR0_INTO_RDI
19459+ btr $16,%rdi
19460+ SET_RDI_INTO_CR0
19461+#endif
19462+
19463+ GET_CR3_INTO_RDI
19464+ add $__START_KERNEL_map,%rdi
19465+ sub phys_base(%rip),%rdi
19466+
19467+#ifdef CONFIG_PARAVIRT
19468+ cmpl $0, pv_info+PARAVIRT_enabled
19469+ jz 1f
19470+ mov %rdi,%rbx
19471+ i = 0
19472+ .rept USER_PGD_PTRS
19473+ mov i*8(%rbx),%rsi
19474+ mov $0x67,%sil
19475+ lea i*8(%rbx),%rdi
19476+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
19477+ i = i + 1
19478+ .endr
19479+ jmp 2f
19480+1:
19481+#endif
19482+
19483+ i = 0
19484+ .rept USER_PGD_PTRS
19485+ movb $0x67,i*8(%rdi)
19486+ i = i + 1
19487+ .endr
19488+
19489+#ifdef CONFIG_PARAVIRT
19490+2: PV_RESTORE_REGS(CLBR_RDI)
19491+ popq %rbx
19492+#endif
19493+
19494+ popq %rdi
19495+ pax_force_retaddr
19496+ retq
19497+ENDPROC(pax_exit_kernel_user)
19498+#endif
19499+
19500+.macro pax_erase_kstack
19501+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
19502+ call pax_erase_kstack
19503+#endif
19504+.endm
19505+
19506+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
19507+ENTRY(pax_erase_kstack)
19508+ pushq %rdi
19509+ pushq %rcx
19510+ pushq %rax
19511+ pushq %r11
19512+
19513+ GET_THREAD_INFO(%r11)
19514+ mov TI_lowest_stack(%r11), %rdi
19515+ mov $-0xBEEF, %rax
19516+ std
19517+
19518+1: mov %edi, %ecx
19519+ and $THREAD_SIZE_asm - 1, %ecx
19520+ shr $3, %ecx
19521+ repne scasq
19522+ jecxz 2f
19523+
19524+ cmp $2*8, %ecx
19525+ jc 2f
19526+
19527+ mov $2*8, %ecx
19528+ repe scasq
19529+ jecxz 2f
19530+ jne 1b
19531+
19532+2: cld
19533+ mov %esp, %ecx
19534+ sub %edi, %ecx
19535+
19536+ cmp $THREAD_SIZE_asm, %rcx
19537+ jb 3f
19538+ ud2
19539+3:
19540+
19541+ shr $3, %ecx
19542+ rep stosq
19543+
19544+ mov TI_task_thread_sp0(%r11), %rdi
19545+ sub $256, %rdi
19546+ mov %rdi, TI_lowest_stack(%r11)
19547+
19548+ popq %r11
19549+ popq %rax
19550+ popq %rcx
19551+ popq %rdi
19552+ pax_force_retaddr
19553+ ret
19554+ENDPROC(pax_erase_kstack)
19555+#endif
19556
19557 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
19558 #ifdef CONFIG_TRACE_IRQFLAGS
19559@@ -375,8 +651,8 @@ ENDPROC(native_usergs_sysret64)
19560 .endm
19561
19562 .macro UNFAKE_STACK_FRAME
19563- addq $8*6, %rsp
19564- CFI_ADJUST_CFA_OFFSET -(6*8)
19565+ addq $8*6 + ARG_SKIP, %rsp
19566+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
19567 .endm
19568
19569 /*
19570@@ -463,7 +739,7 @@ ENDPROC(native_usergs_sysret64)
19571 movq %rsp, %rsi
19572
19573 leaq -RBP(%rsp),%rdi /* arg1 for handler */
19574- testl $3, CS-RBP(%rsi)
19575+ testb $3, CS-RBP(%rsi)
19576 je 1f
19577 SWAPGS
19578 /*
19579@@ -498,9 +774,10 @@ ENTRY(save_rest)
19580 movq_cfi r15, R15+16
19581 movq %r11, 8(%rsp) /* return address */
19582 FIXUP_TOP_OF_STACK %r11, 16
19583+ pax_force_retaddr
19584 ret
19585 CFI_ENDPROC
19586-END(save_rest)
19587+ENDPROC(save_rest)
19588
19589 /* save complete stack frame */
19590 .pushsection .kprobes.text, "ax"
19591@@ -529,9 +806,10 @@ ENTRY(save_paranoid)
19592 js 1f /* negative -> in kernel */
19593 SWAPGS
19594 xorl %ebx,%ebx
19595-1: ret
19596+1: pax_force_retaddr_bts
19597+ ret
19598 CFI_ENDPROC
19599-END(save_paranoid)
19600+ENDPROC(save_paranoid)
19601 .popsection
19602
19603 /*
19604@@ -553,7 +831,7 @@ ENTRY(ret_from_fork)
19605
19606 RESTORE_REST
19607
19608- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
19609+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
19610 jz 1f
19611
19612 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
19613@@ -571,7 +849,7 @@ ENTRY(ret_from_fork)
19614 RESTORE_REST
19615 jmp int_ret_from_sys_call
19616 CFI_ENDPROC
19617-END(ret_from_fork)
19618+ENDPROC(ret_from_fork)
19619
19620 /*
19621 * System call entry. Up to 6 arguments in registers are supported.
19622@@ -608,7 +886,7 @@ END(ret_from_fork)
19623 ENTRY(system_call)
19624 CFI_STARTPROC simple
19625 CFI_SIGNAL_FRAME
19626- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
19627+ CFI_DEF_CFA rsp,0
19628 CFI_REGISTER rip,rcx
19629 /*CFI_REGISTER rflags,r11*/
19630 SWAPGS_UNSAFE_STACK
19631@@ -621,16 +899,23 @@ GLOBAL(system_call_after_swapgs)
19632
19633 movq %rsp,PER_CPU_VAR(old_rsp)
19634 movq PER_CPU_VAR(kernel_stack),%rsp
19635+ SAVE_ARGS 8*6,0
19636+ pax_enter_kernel_user
19637+
19638+#ifdef CONFIG_PAX_RANDKSTACK
19639+ pax_erase_kstack
19640+#endif
19641+
19642 /*
19643 * No need to follow this irqs off/on section - it's straight
19644 * and short:
19645 */
19646 ENABLE_INTERRUPTS(CLBR_NONE)
19647- SAVE_ARGS 8,0
19648 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
19649 movq %rcx,RIP-ARGOFFSET(%rsp)
19650 CFI_REL_OFFSET rip,RIP-ARGOFFSET
19651- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
19652+ GET_THREAD_INFO(%rcx)
19653+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
19654 jnz tracesys
19655 system_call_fastpath:
19656 #if __SYSCALL_MASK == ~0
19657@@ -640,7 +925,7 @@ system_call_fastpath:
19658 cmpl $__NR_syscall_max,%eax
19659 #endif
19660 ja badsys
19661- movq %r10,%rcx
19662+ movq R10-ARGOFFSET(%rsp),%rcx
19663 call *sys_call_table(,%rax,8) # XXX: rip relative
19664 movq %rax,RAX-ARGOFFSET(%rsp)
19665 /*
19666@@ -654,10 +939,13 @@ sysret_check:
19667 LOCKDEP_SYS_EXIT
19668 DISABLE_INTERRUPTS(CLBR_NONE)
19669 TRACE_IRQS_OFF
19670- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
19671+ GET_THREAD_INFO(%rcx)
19672+ movl TI_flags(%rcx),%edx
19673 andl %edi,%edx
19674 jnz sysret_careful
19675 CFI_REMEMBER_STATE
19676+ pax_exit_kernel_user
19677+ pax_erase_kstack
19678 /*
19679 * sysretq will re-enable interrupts:
19680 */
19681@@ -709,14 +997,18 @@ badsys:
19682 * jump back to the normal fast path.
19683 */
19684 auditsys:
19685- movq %r10,%r9 /* 6th arg: 4th syscall arg */
19686+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
19687 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
19688 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
19689 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
19690 movq %rax,%rsi /* 2nd arg: syscall number */
19691 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
19692 call __audit_syscall_entry
19693+
19694+ pax_erase_kstack
19695+
19696 LOAD_ARGS 0 /* reload call-clobbered registers */
19697+ pax_set_fptr_mask
19698 jmp system_call_fastpath
19699
19700 /*
19701@@ -737,7 +1029,7 @@ sysret_audit:
19702 /* Do syscall tracing */
19703 tracesys:
19704 #ifdef CONFIG_AUDITSYSCALL
19705- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
19706+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
19707 jz auditsys
19708 #endif
19709 SAVE_REST
19710@@ -745,12 +1037,16 @@ tracesys:
19711 FIXUP_TOP_OF_STACK %rdi
19712 movq %rsp,%rdi
19713 call syscall_trace_enter
19714+
19715+ pax_erase_kstack
19716+
19717 /*
19718 * Reload arg registers from stack in case ptrace changed them.
19719 * We don't reload %rax because syscall_trace_enter() returned
19720 * the value it wants us to use in the table lookup.
19721 */
19722 LOAD_ARGS ARGOFFSET, 1
19723+ pax_set_fptr_mask
19724 RESTORE_REST
19725 #if __SYSCALL_MASK == ~0
19726 cmpq $__NR_syscall_max,%rax
19727@@ -759,7 +1055,7 @@ tracesys:
19728 cmpl $__NR_syscall_max,%eax
19729 #endif
19730 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
19731- movq %r10,%rcx /* fixup for C */
19732+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
19733 call *sys_call_table(,%rax,8)
19734 movq %rax,RAX-ARGOFFSET(%rsp)
19735 /* Use IRET because user could have changed frame */
19736@@ -780,7 +1076,9 @@ GLOBAL(int_with_check)
19737 andl %edi,%edx
19738 jnz int_careful
19739 andl $~TS_COMPAT,TI_status(%rcx)
19740- jmp retint_swapgs
19741+ pax_exit_kernel_user
19742+ pax_erase_kstack
19743+ jmp retint_swapgs_pax
19744
19745 /* Either reschedule or signal or syscall exit tracking needed. */
19746 /* First do a reschedule test. */
19747@@ -826,7 +1124,7 @@ int_restore_rest:
19748 TRACE_IRQS_OFF
19749 jmp int_with_check
19750 CFI_ENDPROC
19751-END(system_call)
19752+ENDPROC(system_call)
19753
19754 /*
19755 * Certain special system calls that need to save a complete full stack frame.
19756@@ -842,7 +1140,7 @@ ENTRY(\label)
19757 call \func
19758 jmp ptregscall_common
19759 CFI_ENDPROC
19760-END(\label)
19761+ENDPROC(\label)
19762 .endm
19763
19764 .macro FORK_LIKE func
19765@@ -856,9 +1154,10 @@ ENTRY(stub_\func)
19766 DEFAULT_FRAME 0 8 /* offset 8: return address */
19767 call sys_\func
19768 RESTORE_TOP_OF_STACK %r11, 8
19769+ pax_force_retaddr
19770 ret $REST_SKIP /* pop extended registers */
19771 CFI_ENDPROC
19772-END(stub_\func)
19773+ENDPROC(stub_\func)
19774 .endm
19775
19776 FORK_LIKE clone
19777@@ -875,9 +1174,10 @@ ENTRY(ptregscall_common)
19778 movq_cfi_restore R12+8, r12
19779 movq_cfi_restore RBP+8, rbp
19780 movq_cfi_restore RBX+8, rbx
19781+ pax_force_retaddr
19782 ret $REST_SKIP /* pop extended registers */
19783 CFI_ENDPROC
19784-END(ptregscall_common)
19785+ENDPROC(ptregscall_common)
19786
19787 ENTRY(stub_execve)
19788 CFI_STARTPROC
19789@@ -891,7 +1191,7 @@ ENTRY(stub_execve)
19790 RESTORE_REST
19791 jmp int_ret_from_sys_call
19792 CFI_ENDPROC
19793-END(stub_execve)
19794+ENDPROC(stub_execve)
19795
19796 /*
19797 * sigreturn is special because it needs to restore all registers on return.
19798@@ -909,7 +1209,7 @@ ENTRY(stub_rt_sigreturn)
19799 RESTORE_REST
19800 jmp int_ret_from_sys_call
19801 CFI_ENDPROC
19802-END(stub_rt_sigreturn)
19803+ENDPROC(stub_rt_sigreturn)
19804
19805 #ifdef CONFIG_X86_X32_ABI
19806 ENTRY(stub_x32_rt_sigreturn)
19807@@ -975,7 +1275,7 @@ vector=vector+1
19808 2: jmp common_interrupt
19809 .endr
19810 CFI_ENDPROC
19811-END(irq_entries_start)
19812+ENDPROC(irq_entries_start)
19813
19814 .previous
19815 END(interrupt)
19816@@ -995,6 +1295,16 @@ END(interrupt)
19817 subq $ORIG_RAX-RBP, %rsp
19818 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
19819 SAVE_ARGS_IRQ
19820+#ifdef CONFIG_PAX_MEMORY_UDEREF
19821+ testb $3, CS(%rdi)
19822+ jnz 1f
19823+ pax_enter_kernel
19824+ jmp 2f
19825+1: pax_enter_kernel_user
19826+2:
19827+#else
19828+ pax_enter_kernel
19829+#endif
19830 call \func
19831 .endm
19832
19833@@ -1027,7 +1337,7 @@ ret_from_intr:
19834
19835 exit_intr:
19836 GET_THREAD_INFO(%rcx)
19837- testl $3,CS-ARGOFFSET(%rsp)
19838+ testb $3,CS-ARGOFFSET(%rsp)
19839 je retint_kernel
19840
19841 /* Interrupt came from user space */
19842@@ -1049,12 +1359,16 @@ retint_swapgs: /* return to user-space */
19843 * The iretq could re-enable interrupts:
19844 */
19845 DISABLE_INTERRUPTS(CLBR_ANY)
19846+ pax_exit_kernel_user
19847+retint_swapgs_pax:
19848 TRACE_IRQS_IRETQ
19849 SWAPGS
19850 jmp restore_args
19851
19852 retint_restore_args: /* return to kernel space */
19853 DISABLE_INTERRUPTS(CLBR_ANY)
19854+ pax_exit_kernel
19855+ pax_force_retaddr (RIP-ARGOFFSET)
19856 /*
19857 * The iretq could re-enable interrupts:
19858 */
19859@@ -1137,7 +1451,7 @@ ENTRY(retint_kernel)
19860 #endif
19861
19862 CFI_ENDPROC
19863-END(common_interrupt)
19864+ENDPROC(common_interrupt)
19865 /*
19866 * End of kprobes section
19867 */
19868@@ -1155,7 +1469,7 @@ ENTRY(\sym)
19869 interrupt \do_sym
19870 jmp ret_from_intr
19871 CFI_ENDPROC
19872-END(\sym)
19873+ENDPROC(\sym)
19874 .endm
19875
19876 #ifdef CONFIG_SMP
19877@@ -1211,12 +1525,22 @@ ENTRY(\sym)
19878 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
19879 call error_entry
19880 DEFAULT_FRAME 0
19881+#ifdef CONFIG_PAX_MEMORY_UDEREF
19882+ testb $3, CS(%rsp)
19883+ jnz 1f
19884+ pax_enter_kernel
19885+ jmp 2f
19886+1: pax_enter_kernel_user
19887+2:
19888+#else
19889+ pax_enter_kernel
19890+#endif
19891 movq %rsp,%rdi /* pt_regs pointer */
19892 xorl %esi,%esi /* no error code */
19893 call \do_sym
19894 jmp error_exit /* %ebx: no swapgs flag */
19895 CFI_ENDPROC
19896-END(\sym)
19897+ENDPROC(\sym)
19898 .endm
19899
19900 .macro paranoidzeroentry sym do_sym
19901@@ -1229,15 +1553,25 @@ ENTRY(\sym)
19902 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
19903 call save_paranoid
19904 TRACE_IRQS_OFF
19905+#ifdef CONFIG_PAX_MEMORY_UDEREF
19906+ testb $3, CS(%rsp)
19907+ jnz 1f
19908+ pax_enter_kernel
19909+ jmp 2f
19910+1: pax_enter_kernel_user
19911+2:
19912+#else
19913+ pax_enter_kernel
19914+#endif
19915 movq %rsp,%rdi /* pt_regs pointer */
19916 xorl %esi,%esi /* no error code */
19917 call \do_sym
19918 jmp paranoid_exit /* %ebx: no swapgs flag */
19919 CFI_ENDPROC
19920-END(\sym)
19921+ENDPROC(\sym)
19922 .endm
19923
19924-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
19925+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
19926 .macro paranoidzeroentry_ist sym do_sym ist
19927 ENTRY(\sym)
19928 INTR_FRAME
19929@@ -1248,14 +1582,30 @@ ENTRY(\sym)
19930 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
19931 call save_paranoid
19932 TRACE_IRQS_OFF_DEBUG
19933+#ifdef CONFIG_PAX_MEMORY_UDEREF
19934+ testb $3, CS(%rsp)
19935+ jnz 1f
19936+ pax_enter_kernel
19937+ jmp 2f
19938+1: pax_enter_kernel_user
19939+2:
19940+#else
19941+ pax_enter_kernel
19942+#endif
19943 movq %rsp,%rdi /* pt_regs pointer */
19944 xorl %esi,%esi /* no error code */
19945+#ifdef CONFIG_SMP
19946+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
19947+ lea init_tss(%r12), %r12
19948+#else
19949+ lea init_tss(%rip), %r12
19950+#endif
19951 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
19952 call \do_sym
19953 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
19954 jmp paranoid_exit /* %ebx: no swapgs flag */
19955 CFI_ENDPROC
19956-END(\sym)
19957+ENDPROC(\sym)
19958 .endm
19959
19960 .macro errorentry sym do_sym
19961@@ -1267,13 +1617,23 @@ ENTRY(\sym)
19962 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
19963 call error_entry
19964 DEFAULT_FRAME 0
19965+#ifdef CONFIG_PAX_MEMORY_UDEREF
19966+ testb $3, CS(%rsp)
19967+ jnz 1f
19968+ pax_enter_kernel
19969+ jmp 2f
19970+1: pax_enter_kernel_user
19971+2:
19972+#else
19973+ pax_enter_kernel
19974+#endif
19975 movq %rsp,%rdi /* pt_regs pointer */
19976 movq ORIG_RAX(%rsp),%rsi /* get error code */
19977 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
19978 call \do_sym
19979 jmp error_exit /* %ebx: no swapgs flag */
19980 CFI_ENDPROC
19981-END(\sym)
19982+ENDPROC(\sym)
19983 .endm
19984
19985 /* error code is on the stack already */
19986@@ -1287,13 +1647,23 @@ ENTRY(\sym)
19987 call save_paranoid
19988 DEFAULT_FRAME 0
19989 TRACE_IRQS_OFF
19990+#ifdef CONFIG_PAX_MEMORY_UDEREF
19991+ testb $3, CS(%rsp)
19992+ jnz 1f
19993+ pax_enter_kernel
19994+ jmp 2f
19995+1: pax_enter_kernel_user
19996+2:
19997+#else
19998+ pax_enter_kernel
19999+#endif
20000 movq %rsp,%rdi /* pt_regs pointer */
20001 movq ORIG_RAX(%rsp),%rsi /* get error code */
20002 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
20003 call \do_sym
20004 jmp paranoid_exit /* %ebx: no swapgs flag */
20005 CFI_ENDPROC
20006-END(\sym)
20007+ENDPROC(\sym)
20008 .endm
20009
20010 zeroentry divide_error do_divide_error
20011@@ -1323,9 +1693,10 @@ gs_change:
20012 2: mfence /* workaround */
20013 SWAPGS
20014 popfq_cfi
20015+ pax_force_retaddr
20016 ret
20017 CFI_ENDPROC
20018-END(native_load_gs_index)
20019+ENDPROC(native_load_gs_index)
20020
20021 _ASM_EXTABLE(gs_change,bad_gs)
20022 .section .fixup,"ax"
20023@@ -1353,9 +1724,10 @@ ENTRY(call_softirq)
20024 CFI_DEF_CFA_REGISTER rsp
20025 CFI_ADJUST_CFA_OFFSET -8
20026 decl PER_CPU_VAR(irq_count)
20027+ pax_force_retaddr
20028 ret
20029 CFI_ENDPROC
20030-END(call_softirq)
20031+ENDPROC(call_softirq)
20032
20033 #ifdef CONFIG_XEN
20034 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
20035@@ -1393,7 +1765,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
20036 decl PER_CPU_VAR(irq_count)
20037 jmp error_exit
20038 CFI_ENDPROC
20039-END(xen_do_hypervisor_callback)
20040+ENDPROC(xen_do_hypervisor_callback)
20041
20042 /*
20043 * Hypervisor uses this for application faults while it executes.
20044@@ -1452,7 +1824,7 @@ ENTRY(xen_failsafe_callback)
20045 SAVE_ALL
20046 jmp error_exit
20047 CFI_ENDPROC
20048-END(xen_failsafe_callback)
20049+ENDPROC(xen_failsafe_callback)
20050
20051 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
20052 xen_hvm_callback_vector xen_evtchn_do_upcall
20053@@ -1501,16 +1873,31 @@ ENTRY(paranoid_exit)
20054 TRACE_IRQS_OFF_DEBUG
20055 testl %ebx,%ebx /* swapgs needed? */
20056 jnz paranoid_restore
20057- testl $3,CS(%rsp)
20058+ testb $3,CS(%rsp)
20059 jnz paranoid_userspace
20060+#ifdef CONFIG_PAX_MEMORY_UDEREF
20061+ pax_exit_kernel
20062+ TRACE_IRQS_IRETQ 0
20063+ SWAPGS_UNSAFE_STACK
20064+ RESTORE_ALL 8
20065+ pax_force_retaddr_bts
20066+ jmp irq_return
20067+#endif
20068 paranoid_swapgs:
20069+#ifdef CONFIG_PAX_MEMORY_UDEREF
20070+ pax_exit_kernel_user
20071+#else
20072+ pax_exit_kernel
20073+#endif
20074 TRACE_IRQS_IRETQ 0
20075 SWAPGS_UNSAFE_STACK
20076 RESTORE_ALL 8
20077 jmp irq_return
20078 paranoid_restore:
20079+ pax_exit_kernel
20080 TRACE_IRQS_IRETQ_DEBUG 0
20081 RESTORE_ALL 8
20082+ pax_force_retaddr_bts
20083 jmp irq_return
20084 paranoid_userspace:
20085 GET_THREAD_INFO(%rcx)
20086@@ -1539,7 +1926,7 @@ paranoid_schedule:
20087 TRACE_IRQS_OFF
20088 jmp paranoid_userspace
20089 CFI_ENDPROC
20090-END(paranoid_exit)
20091+ENDPROC(paranoid_exit)
20092
20093 /*
20094 * Exception entry point. This expects an error code/orig_rax on the stack.
20095@@ -1566,12 +1953,13 @@ ENTRY(error_entry)
20096 movq_cfi r14, R14+8
20097 movq_cfi r15, R15+8
20098 xorl %ebx,%ebx
20099- testl $3,CS+8(%rsp)
20100+ testb $3,CS+8(%rsp)
20101 je error_kernelspace
20102 error_swapgs:
20103 SWAPGS
20104 error_sti:
20105 TRACE_IRQS_OFF
20106+ pax_force_retaddr_bts
20107 ret
20108
20109 /*
20110@@ -1598,7 +1986,7 @@ bstep_iret:
20111 movq %rcx,RIP+8(%rsp)
20112 jmp error_swapgs
20113 CFI_ENDPROC
20114-END(error_entry)
20115+ENDPROC(error_entry)
20116
20117
20118 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
20119@@ -1618,7 +2006,7 @@ ENTRY(error_exit)
20120 jnz retint_careful
20121 jmp retint_swapgs
20122 CFI_ENDPROC
20123-END(error_exit)
20124+ENDPROC(error_exit)
20125
20126 /*
20127 * Test if a given stack is an NMI stack or not.
20128@@ -1676,9 +2064,11 @@ ENTRY(nmi)
20129 * If %cs was not the kernel segment, then the NMI triggered in user
20130 * space, which means it is definitely not nested.
20131 */
20132+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
20133+ je 1f
20134 cmpl $__KERNEL_CS, 16(%rsp)
20135 jne first_nmi
20136-
20137+1:
20138 /*
20139 * Check the special variable on the stack to see if NMIs are
20140 * executing.
20141@@ -1847,6 +2237,17 @@ end_repeat_nmi:
20142 */
20143 movq %cr2, %r12
20144
20145+#ifdef CONFIG_PAX_MEMORY_UDEREF
20146+ testb $3, CS(%rsp)
20147+ jnz 1f
20148+ pax_enter_kernel
20149+ jmp 2f
20150+1: pax_enter_kernel_user
20151+2:
20152+#else
20153+ pax_enter_kernel
20154+#endif
20155+
20156 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
20157 movq %rsp,%rdi
20158 movq $-1,%rsi
20159@@ -1862,23 +2263,34 @@ end_repeat_nmi:
20160 testl %ebx,%ebx /* swapgs needed? */
20161 jnz nmi_restore
20162 nmi_swapgs:
20163+#ifdef CONFIG_PAX_MEMORY_UDEREF
20164+ pax_exit_kernel_user
20165+#else
20166+ pax_exit_kernel
20167+#endif
20168 SWAPGS_UNSAFE_STACK
20169+ RESTORE_ALL 6*8
20170+ /* Clear the NMI executing stack variable */
20171+ movq $0, 5*8(%rsp)
20172+ jmp irq_return
20173 nmi_restore:
20174+ pax_exit_kernel
20175 /* Pop the extra iret frame at once */
20176 RESTORE_ALL 6*8
20177+ pax_force_retaddr_bts
20178
20179 /* Clear the NMI executing stack variable */
20180 movq $0, 5*8(%rsp)
20181 jmp irq_return
20182 CFI_ENDPROC
20183-END(nmi)
20184+ENDPROC(nmi)
20185
20186 ENTRY(ignore_sysret)
20187 CFI_STARTPROC
20188 mov $-ENOSYS,%eax
20189 sysret
20190 CFI_ENDPROC
20191-END(ignore_sysret)
20192+ENDPROC(ignore_sysret)
20193
20194 /*
20195 * End of kprobes section
20196diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
20197index 1d41402..af9a46a 100644
20198--- a/arch/x86/kernel/ftrace.c
20199+++ b/arch/x86/kernel/ftrace.c
20200@@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
20201 {
20202 unsigned char replaced[MCOUNT_INSN_SIZE];
20203
20204+ ip = ktla_ktva(ip);
20205+
20206 /*
20207 * Note: Due to modules and __init, code can
20208 * disappear and change, we need to protect against faulting
20209@@ -227,7 +229,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
20210 unsigned char old[MCOUNT_INSN_SIZE], *new;
20211 int ret;
20212
20213- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
20214+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
20215 new = ftrace_call_replace(ip, (unsigned long)func);
20216
20217 /* See comment above by declaration of modifying_ftrace_code */
20218@@ -238,7 +240,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
20219 /* Also update the regs callback function */
20220 if (!ret) {
20221 ip = (unsigned long)(&ftrace_regs_call);
20222- memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE);
20223+ memcpy(old, ktla_ktva((void *)&ftrace_regs_call), MCOUNT_INSN_SIZE);
20224 new = ftrace_call_replace(ip, (unsigned long)func);
20225 ret = ftrace_modify_code(ip, old, new);
20226 }
20227@@ -279,7 +281,7 @@ static int ftrace_write(unsigned long ip, const char *val, int size)
20228 * kernel identity mapping to modify code.
20229 */
20230 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
20231- ip = (unsigned long)__va(__pa(ip));
20232+ ip = (unsigned long)__va(__pa(ktla_ktva(ip)));
20233
20234 return probe_kernel_write((void *)ip, val, size);
20235 }
20236@@ -289,7 +291,7 @@ static int add_break(unsigned long ip, const char *old)
20237 unsigned char replaced[MCOUNT_INSN_SIZE];
20238 unsigned char brk = BREAKPOINT_INSTRUCTION;
20239
20240- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
20241+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
20242 return -EFAULT;
20243
20244 /* Make sure it is what we expect it to be */
20245@@ -637,7 +639,7 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
20246 return ret;
20247
20248 fail_update:
20249- probe_kernel_write((void *)ip, &old_code[0], 1);
20250+ probe_kernel_write((void *)ktla_ktva(ip), &old_code[0], 1);
20251 goto out;
20252 }
20253
20254@@ -670,6 +672,8 @@ static int ftrace_mod_jmp(unsigned long ip,
20255 {
20256 unsigned char code[MCOUNT_INSN_SIZE];
20257
20258+ ip = ktla_ktva(ip);
20259+
20260 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
20261 return -EFAULT;
20262
20263diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
20264index c18f59d..9c0c9f6 100644
20265--- a/arch/x86/kernel/head32.c
20266+++ b/arch/x86/kernel/head32.c
20267@@ -18,6 +18,7 @@
20268 #include <asm/io_apic.h>
20269 #include <asm/bios_ebda.h>
20270 #include <asm/tlbflush.h>
20271+#include <asm/boot.h>
20272
20273 static void __init i386_default_early_setup(void)
20274 {
20275@@ -30,8 +31,7 @@ static void __init i386_default_early_setup(void)
20276
20277 void __init i386_start_kernel(void)
20278 {
20279- memblock_reserve(__pa_symbol(&_text),
20280- __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
20281+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop) - LOAD_PHYSICAL_ADDR);
20282
20283 #ifdef CONFIG_BLK_DEV_INITRD
20284 /* Reserve INITRD */
20285diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
20286index c8932c7..d56b622 100644
20287--- a/arch/x86/kernel/head_32.S
20288+++ b/arch/x86/kernel/head_32.S
20289@@ -26,6 +26,12 @@
20290 /* Physical address */
20291 #define pa(X) ((X) - __PAGE_OFFSET)
20292
20293+#ifdef CONFIG_PAX_KERNEXEC
20294+#define ta(X) (X)
20295+#else
20296+#define ta(X) ((X) - __PAGE_OFFSET)
20297+#endif
20298+
20299 /*
20300 * References to members of the new_cpu_data structure.
20301 */
20302@@ -55,11 +61,7 @@
20303 * and small than max_low_pfn, otherwise will waste some page table entries
20304 */
20305
20306-#if PTRS_PER_PMD > 1
20307-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
20308-#else
20309-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
20310-#endif
20311+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
20312
20313 /* Number of possible pages in the lowmem region */
20314 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
20315@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
20316 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
20317
20318 /*
20319+ * Real beginning of normal "text" segment
20320+ */
20321+ENTRY(stext)
20322+ENTRY(_stext)
20323+
20324+/*
20325 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
20326 * %esi points to the real-mode code as a 32-bit pointer.
20327 * CS and DS must be 4 GB flat segments, but we don't depend on
20328@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
20329 * can.
20330 */
20331 __HEAD
20332+
20333+#ifdef CONFIG_PAX_KERNEXEC
20334+ jmp startup_32
20335+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
20336+.fill PAGE_SIZE-5,1,0xcc
20337+#endif
20338+
20339 ENTRY(startup_32)
20340 movl pa(stack_start),%ecx
20341
20342@@ -106,6 +121,59 @@ ENTRY(startup_32)
20343 2:
20344 leal -__PAGE_OFFSET(%ecx),%esp
20345
20346+#ifdef CONFIG_SMP
20347+ movl $pa(cpu_gdt_table),%edi
20348+ movl $__per_cpu_load,%eax
20349+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
20350+ rorl $16,%eax
20351+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
20352+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
20353+ movl $__per_cpu_end - 1,%eax
20354+ subl $__per_cpu_start,%eax
20355+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
20356+#endif
20357+
20358+#ifdef CONFIG_PAX_MEMORY_UDEREF
20359+ movl $NR_CPUS,%ecx
20360+ movl $pa(cpu_gdt_table),%edi
20361+1:
20362+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
20363+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
20364+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
20365+ addl $PAGE_SIZE_asm,%edi
20366+ loop 1b
20367+#endif
20368+
20369+#ifdef CONFIG_PAX_KERNEXEC
20370+ movl $pa(boot_gdt),%edi
20371+ movl $__LOAD_PHYSICAL_ADDR,%eax
20372+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
20373+ rorl $16,%eax
20374+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
20375+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
20376+ rorl $16,%eax
20377+
20378+ ljmp $(__BOOT_CS),$1f
20379+1:
20380+
20381+ movl $NR_CPUS,%ecx
20382+ movl $pa(cpu_gdt_table),%edi
20383+ addl $__PAGE_OFFSET,%eax
20384+1:
20385+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
20386+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
20387+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
20388+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
20389+ rorl $16,%eax
20390+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
20391+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
20392+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
20393+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
20394+ rorl $16,%eax
20395+ addl $PAGE_SIZE_asm,%edi
20396+ loop 1b
20397+#endif
20398+
20399 /*
20400 * Clear BSS first so that there are no surprises...
20401 */
20402@@ -196,8 +264,11 @@ ENTRY(startup_32)
20403 movl %eax, pa(max_pfn_mapped)
20404
20405 /* Do early initialization of the fixmap area */
20406- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
20407- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
20408+#ifdef CONFIG_COMPAT_VDSO
20409+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
20410+#else
20411+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
20412+#endif
20413 #else /* Not PAE */
20414
20415 page_pde_offset = (__PAGE_OFFSET >> 20);
20416@@ -227,8 +298,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
20417 movl %eax, pa(max_pfn_mapped)
20418
20419 /* Do early initialization of the fixmap area */
20420- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
20421- movl %eax,pa(initial_page_table+0xffc)
20422+#ifdef CONFIG_COMPAT_VDSO
20423+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
20424+#else
20425+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
20426+#endif
20427 #endif
20428
20429 #ifdef CONFIG_PARAVIRT
20430@@ -242,9 +316,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
20431 cmpl $num_subarch_entries, %eax
20432 jae bad_subarch
20433
20434- movl pa(subarch_entries)(,%eax,4), %eax
20435- subl $__PAGE_OFFSET, %eax
20436- jmp *%eax
20437+ jmp *pa(subarch_entries)(,%eax,4)
20438
20439 bad_subarch:
20440 WEAK(lguest_entry)
20441@@ -256,10 +328,10 @@ WEAK(xen_entry)
20442 __INITDATA
20443
20444 subarch_entries:
20445- .long default_entry /* normal x86/PC */
20446- .long lguest_entry /* lguest hypervisor */
20447- .long xen_entry /* Xen hypervisor */
20448- .long default_entry /* Moorestown MID */
20449+ .long ta(default_entry) /* normal x86/PC */
20450+ .long ta(lguest_entry) /* lguest hypervisor */
20451+ .long ta(xen_entry) /* Xen hypervisor */
20452+ .long ta(default_entry) /* Moorestown MID */
20453 num_subarch_entries = (. - subarch_entries) / 4
20454 .previous
20455 #else
20456@@ -335,6 +407,7 @@ default_entry:
20457 movl pa(mmu_cr4_features),%eax
20458 movl %eax,%cr4
20459
20460+#ifdef CONFIG_X86_PAE
20461 testb $X86_CR4_PAE, %al # check if PAE is enabled
20462 jz 6f
20463
20464@@ -363,6 +436,9 @@ default_entry:
20465 /* Make changes effective */
20466 wrmsr
20467
20468+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
20469+#endif
20470+
20471 6:
20472
20473 /*
20474@@ -460,14 +536,20 @@ is386: movl $2,%ecx # set MP
20475 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
20476 movl %eax,%ss # after changing gdt.
20477
20478- movl $(__USER_DS),%eax # DS/ES contains default USER segment
20479+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
20480 movl %eax,%ds
20481 movl %eax,%es
20482
20483 movl $(__KERNEL_PERCPU), %eax
20484 movl %eax,%fs # set this cpu's percpu
20485
20486+#ifdef CONFIG_CC_STACKPROTECTOR
20487 movl $(__KERNEL_STACK_CANARY),%eax
20488+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
20489+ movl $(__USER_DS),%eax
20490+#else
20491+ xorl %eax,%eax
20492+#endif
20493 movl %eax,%gs
20494
20495 xorl %eax,%eax # Clear LDT
20496@@ -544,8 +626,11 @@ setup_once:
20497 * relocation. Manually set base address in stack canary
20498 * segment descriptor.
20499 */
20500- movl $gdt_page,%eax
20501+ movl $cpu_gdt_table,%eax
20502 movl $stack_canary,%ecx
20503+#ifdef CONFIG_SMP
20504+ addl $__per_cpu_load,%ecx
20505+#endif
20506 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
20507 shrl $16, %ecx
20508 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
20509@@ -576,7 +661,7 @@ ENDPROC(early_idt_handlers)
20510 /* This is global to keep gas from relaxing the jumps */
20511 ENTRY(early_idt_handler)
20512 cld
20513- cmpl $2,%ss:early_recursion_flag
20514+ cmpl $1,%ss:early_recursion_flag
20515 je hlt_loop
20516 incl %ss:early_recursion_flag
20517
20518@@ -614,8 +699,8 @@ ENTRY(early_idt_handler)
20519 pushl (20+6*4)(%esp) /* trapno */
20520 pushl $fault_msg
20521 call printk
20522-#endif
20523 call dump_stack
20524+#endif
20525 hlt_loop:
20526 hlt
20527 jmp hlt_loop
20528@@ -634,8 +719,11 @@ ENDPROC(early_idt_handler)
20529 /* This is the default interrupt "handler" :-) */
20530 ALIGN
20531 ignore_int:
20532- cld
20533 #ifdef CONFIG_PRINTK
20534+ cmpl $2,%ss:early_recursion_flag
20535+ je hlt_loop
20536+ incl %ss:early_recursion_flag
20537+ cld
20538 pushl %eax
20539 pushl %ecx
20540 pushl %edx
20541@@ -644,9 +732,6 @@ ignore_int:
20542 movl $(__KERNEL_DS),%eax
20543 movl %eax,%ds
20544 movl %eax,%es
20545- cmpl $2,early_recursion_flag
20546- je hlt_loop
20547- incl early_recursion_flag
20548 pushl 16(%esp)
20549 pushl 24(%esp)
20550 pushl 32(%esp)
20551@@ -680,29 +765,43 @@ ENTRY(setup_once_ref)
20552 /*
20553 * BSS section
20554 */
20555-__PAGE_ALIGNED_BSS
20556- .align PAGE_SIZE
20557 #ifdef CONFIG_X86_PAE
20558+.section .initial_pg_pmd,"a",@progbits
20559 initial_pg_pmd:
20560 .fill 1024*KPMDS,4,0
20561 #else
20562+.section .initial_page_table,"a",@progbits
20563 ENTRY(initial_page_table)
20564 .fill 1024,4,0
20565 #endif
20566+.section .initial_pg_fixmap,"a",@progbits
20567 initial_pg_fixmap:
20568 .fill 1024,4,0
20569+.section .empty_zero_page,"a",@progbits
20570 ENTRY(empty_zero_page)
20571 .fill 4096,1,0
20572+.section .swapper_pg_dir,"a",@progbits
20573 ENTRY(swapper_pg_dir)
20574+#ifdef CONFIG_X86_PAE
20575+ .fill 4,8,0
20576+#else
20577 .fill 1024,4,0
20578+#endif
20579+
20580+/*
20581+ * The IDT has to be page-aligned to simplify the Pentium
20582+ * F0 0F bug workaround.. We have a special link segment
20583+ * for this.
20584+ */
20585+.section .idt,"a",@progbits
20586+ENTRY(idt_table)
20587+ .fill 256,8,0
20588
20589 /*
20590 * This starts the data section.
20591 */
20592 #ifdef CONFIG_X86_PAE
20593-__PAGE_ALIGNED_DATA
20594- /* Page-aligned for the benefit of paravirt? */
20595- .align PAGE_SIZE
20596+.section .initial_page_table,"a",@progbits
20597 ENTRY(initial_page_table)
20598 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
20599 # if KPMDS == 3
20600@@ -721,12 +820,20 @@ ENTRY(initial_page_table)
20601 # error "Kernel PMDs should be 1, 2 or 3"
20602 # endif
20603 .align PAGE_SIZE /* needs to be page-sized too */
20604+
20605+#ifdef CONFIG_PAX_PER_CPU_PGD
20606+ENTRY(cpu_pgd)
20607+ .rept NR_CPUS
20608+ .fill 4,8,0
20609+ .endr
20610+#endif
20611+
20612 #endif
20613
20614 .data
20615 .balign 4
20616 ENTRY(stack_start)
20617- .long init_thread_union+THREAD_SIZE
20618+ .long init_thread_union+THREAD_SIZE-8
20619
20620 __INITRODATA
20621 int_msg:
20622@@ -754,7 +861,7 @@ fault_msg:
20623 * segment size, and 32-bit linear address value:
20624 */
20625
20626- .data
20627+.section .rodata,"a",@progbits
20628 .globl boot_gdt_descr
20629 .globl idt_descr
20630
20631@@ -763,7 +870,7 @@ fault_msg:
20632 .word 0 # 32 bit align gdt_desc.address
20633 boot_gdt_descr:
20634 .word __BOOT_DS+7
20635- .long boot_gdt - __PAGE_OFFSET
20636+ .long pa(boot_gdt)
20637
20638 .word 0 # 32-bit align idt_desc.address
20639 idt_descr:
20640@@ -774,7 +881,7 @@ idt_descr:
20641 .word 0 # 32 bit align gdt_desc.address
20642 ENTRY(early_gdt_descr)
20643 .word GDT_ENTRIES*8-1
20644- .long gdt_page /* Overwritten for secondary CPUs */
20645+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
20646
20647 /*
20648 * The boot_gdt must mirror the equivalent in setup.S and is
20649@@ -783,5 +890,65 @@ ENTRY(early_gdt_descr)
20650 .align L1_CACHE_BYTES
20651 ENTRY(boot_gdt)
20652 .fill GDT_ENTRY_BOOT_CS,8,0
20653- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
20654- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
20655+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
20656+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
20657+
20658+ .align PAGE_SIZE_asm
20659+ENTRY(cpu_gdt_table)
20660+ .rept NR_CPUS
20661+ .quad 0x0000000000000000 /* NULL descriptor */
20662+ .quad 0x0000000000000000 /* 0x0b reserved */
20663+ .quad 0x0000000000000000 /* 0x13 reserved */
20664+ .quad 0x0000000000000000 /* 0x1b reserved */
20665+
20666+#ifdef CONFIG_PAX_KERNEXEC
20667+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
20668+#else
20669+ .quad 0x0000000000000000 /* 0x20 unused */
20670+#endif
20671+
20672+ .quad 0x0000000000000000 /* 0x28 unused */
20673+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
20674+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
20675+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
20676+ .quad 0x0000000000000000 /* 0x4b reserved */
20677+ .quad 0x0000000000000000 /* 0x53 reserved */
20678+ .quad 0x0000000000000000 /* 0x5b reserved */
20679+
20680+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
20681+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
20682+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
20683+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
20684+
20685+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
20686+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
20687+
20688+ /*
20689+ * Segments used for calling PnP BIOS have byte granularity.
20690+ * The code segments and data segments have fixed 64k limits,
20691+ * the transfer segment sizes are set at run time.
20692+ */
20693+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
20694+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
20695+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
20696+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
20697+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
20698+
20699+ /*
20700+ * The APM segments have byte granularity and their bases
20701+ * are set at run time. All have 64k limits.
20702+ */
20703+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
20704+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
20705+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
20706+
20707+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
20708+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
20709+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
20710+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
20711+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
20712+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
20713+
20714+ /* Be sure this is zeroed to avoid false validations in Xen */
20715+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
20716+ .endr
20717diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
20718index 980053c..74d3b44 100644
20719--- a/arch/x86/kernel/head_64.S
20720+++ b/arch/x86/kernel/head_64.S
20721@@ -20,6 +20,8 @@
20722 #include <asm/processor-flags.h>
20723 #include <asm/percpu.h>
20724 #include <asm/nops.h>
20725+#include <asm/cpufeature.h>
20726+#include <asm/alternative-asm.h>
20727
20728 #ifdef CONFIG_PARAVIRT
20729 #include <asm/asm-offsets.h>
20730@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
20731 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
20732 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
20733 L3_START_KERNEL = pud_index(__START_KERNEL_map)
20734+L4_VMALLOC_START = pgd_index(VMALLOC_START)
20735+L3_VMALLOC_START = pud_index(VMALLOC_START)
20736+L4_VMALLOC_END = pgd_index(VMALLOC_END)
20737+L3_VMALLOC_END = pud_index(VMALLOC_END)
20738+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
20739+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
20740
20741 .text
20742 __HEAD
20743@@ -88,35 +96,23 @@ startup_64:
20744 */
20745 addq %rbp, init_level4_pgt + 0(%rip)
20746 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
20747+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
20748+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
20749+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
20750 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
20751
20752 addq %rbp, level3_ident_pgt + 0(%rip)
20753+#ifndef CONFIG_XEN
20754+ addq %rbp, level3_ident_pgt + 8(%rip)
20755+#endif
20756
20757- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
20758- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
20759+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
20760+
20761+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
20762+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
20763
20764 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
20765-
20766- /* Add an Identity mapping if I am above 1G */
20767- leaq _text(%rip), %rdi
20768- andq $PMD_PAGE_MASK, %rdi
20769-
20770- movq %rdi, %rax
20771- shrq $PUD_SHIFT, %rax
20772- andq $(PTRS_PER_PUD - 1), %rax
20773- jz ident_complete
20774-
20775- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
20776- leaq level3_ident_pgt(%rip), %rbx
20777- movq %rdx, 0(%rbx, %rax, 8)
20778-
20779- movq %rdi, %rax
20780- shrq $PMD_SHIFT, %rax
20781- andq $(PTRS_PER_PMD - 1), %rax
20782- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
20783- leaq level2_spare_pgt(%rip), %rbx
20784- movq %rdx, 0(%rbx, %rax, 8)
20785-ident_complete:
20786+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
20787
20788 /*
20789 * Fixup the kernel text+data virtual addresses. Note that
20790@@ -159,8 +155,8 @@ ENTRY(secondary_startup_64)
20791 * after the boot processor executes this code.
20792 */
20793
20794- /* Enable PAE mode and PGE */
20795- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
20796+ /* Enable PAE mode and PSE/PGE */
20797+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
20798 movq %rax, %cr4
20799
20800 /* Setup early boot stage 4 level pagetables. */
20801@@ -182,9 +178,17 @@ ENTRY(secondary_startup_64)
20802 movl $MSR_EFER, %ecx
20803 rdmsr
20804 btsl $_EFER_SCE, %eax /* Enable System Call */
20805- btl $20,%edi /* No Execute supported? */
20806+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
20807 jnc 1f
20808 btsl $_EFER_NX, %eax
20809+ leaq init_level4_pgt(%rip), %rdi
20810+#ifndef CONFIG_EFI
20811+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
20812+#endif
20813+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
20814+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
20815+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
20816+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
20817 1: wrmsr /* Make changes effective */
20818
20819 /* Setup cr0 */
20820@@ -246,6 +250,7 @@ ENTRY(secondary_startup_64)
20821 * jump. In addition we need to ensure %cs is set so we make this
20822 * a far return.
20823 */
20824+ pax_set_fptr_mask
20825 movq initial_code(%rip),%rax
20826 pushq $0 # fake return address to stop unwinder
20827 pushq $__KERNEL_CS # set correct cs
20828@@ -284,7 +289,7 @@ ENDPROC(start_cpu0)
20829 bad_address:
20830 jmp bad_address
20831
20832- .section ".init.text","ax"
20833+ __INIT
20834 .globl early_idt_handlers
20835 early_idt_handlers:
20836 # 104(%rsp) %rflags
20837@@ -343,7 +348,7 @@ ENTRY(early_idt_handler)
20838 call dump_stack
20839 #ifdef CONFIG_KALLSYMS
20840 leaq early_idt_ripmsg(%rip),%rdi
20841- movq 40(%rsp),%rsi # %rip again
20842+ movq 88(%rsp),%rsi # %rip again
20843 call __print_symbol
20844 #endif
20845 #endif /* EARLY_PRINTK */
20846@@ -363,11 +368,15 @@ ENTRY(early_idt_handler)
20847 addq $16,%rsp # drop vector number and error code
20848 decl early_recursion_flag(%rip)
20849 INTERRUPT_RETURN
20850+ .previous
20851
20852+ __INITDATA
20853 .balign 4
20854 early_recursion_flag:
20855 .long 0
20856+ .previous
20857
20858+ .section .rodata,"a",@progbits
20859 #ifdef CONFIG_EARLY_PRINTK
20860 early_idt_msg:
20861 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
20862@@ -376,6 +385,7 @@ early_idt_ripmsg:
20863 #endif /* CONFIG_EARLY_PRINTK */
20864 .previous
20865
20866+ .section .rodata,"a",@progbits
20867 #define NEXT_PAGE(name) \
20868 .balign PAGE_SIZE; \
20869 ENTRY(name)
20870@@ -388,7 +398,6 @@ ENTRY(name)
20871 i = i + 1 ; \
20872 .endr
20873
20874- .data
20875 /*
20876 * This default setting generates an ident mapping at address 0x100000
20877 * and a mapping for the kernel that precisely maps virtual address
20878@@ -399,13 +408,41 @@ NEXT_PAGE(init_level4_pgt)
20879 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
20880 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
20881 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
20882+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
20883+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
20884+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
20885+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
20886+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
20887+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
20888 .org init_level4_pgt + L4_START_KERNEL*8, 0
20889 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
20890 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
20891
20892+#ifdef CONFIG_PAX_PER_CPU_PGD
20893+NEXT_PAGE(cpu_pgd)
20894+ .rept NR_CPUS
20895+ .fill 512,8,0
20896+ .endr
20897+#endif
20898+
20899 NEXT_PAGE(level3_ident_pgt)
20900 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
20901+#ifdef CONFIG_XEN
20902 .fill 511,8,0
20903+#else
20904+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
20905+ .fill 510,8,0
20906+#endif
20907+
20908+NEXT_PAGE(level3_vmalloc_start_pgt)
20909+ .fill 512,8,0
20910+
20911+NEXT_PAGE(level3_vmalloc_end_pgt)
20912+ .fill 512,8,0
20913+
20914+NEXT_PAGE(level3_vmemmap_pgt)
20915+ .fill L3_VMEMMAP_START,8,0
20916+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
20917
20918 NEXT_PAGE(level3_kernel_pgt)
20919 .fill L3_START_KERNEL,8,0
20920@@ -413,20 +450,23 @@ NEXT_PAGE(level3_kernel_pgt)
20921 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
20922 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
20923
20924+NEXT_PAGE(level2_vmemmap_pgt)
20925+ .fill 512,8,0
20926+
20927 NEXT_PAGE(level2_fixmap_pgt)
20928- .fill 506,8,0
20929- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
20930- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
20931- .fill 5,8,0
20932+ .fill 507,8,0
20933+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
20934+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
20935+ .fill 4,8,0
20936
20937-NEXT_PAGE(level1_fixmap_pgt)
20938+NEXT_PAGE(level1_vsyscall_pgt)
20939 .fill 512,8,0
20940
20941-NEXT_PAGE(level2_ident_pgt)
20942- /* Since I easily can, map the first 1G.
20943+ /* Since I easily can, map the first 2G.
20944 * Don't set NX because code runs from these pages.
20945 */
20946- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
20947+NEXT_PAGE(level2_ident_pgt)
20948+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
20949
20950 NEXT_PAGE(level2_kernel_pgt)
20951 /*
20952@@ -439,37 +479,59 @@ NEXT_PAGE(level2_kernel_pgt)
20953 * If you want to increase this then increase MODULES_VADDR
20954 * too.)
20955 */
20956- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
20957- KERNEL_IMAGE_SIZE/PMD_SIZE)
20958-
20959-NEXT_PAGE(level2_spare_pgt)
20960- .fill 512, 8, 0
20961+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
20962
20963 #undef PMDS
20964 #undef NEXT_PAGE
20965
20966- .data
20967+ .align PAGE_SIZE
20968+ENTRY(cpu_gdt_table)
20969+ .rept NR_CPUS
20970+ .quad 0x0000000000000000 /* NULL descriptor */
20971+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
20972+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
20973+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
20974+ .quad 0x00cffb000000ffff /* __USER32_CS */
20975+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
20976+ .quad 0x00affb000000ffff /* __USER_CS */
20977+
20978+#ifdef CONFIG_PAX_KERNEXEC
20979+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
20980+#else
20981+ .quad 0x0 /* unused */
20982+#endif
20983+
20984+ .quad 0,0 /* TSS */
20985+ .quad 0,0 /* LDT */
20986+ .quad 0,0,0 /* three TLS descriptors */
20987+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
20988+ /* asm/segment.h:GDT_ENTRIES must match this */
20989+
20990+ /* zero the remaining page */
20991+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
20992+ .endr
20993+
20994 .align 16
20995 .globl early_gdt_descr
20996 early_gdt_descr:
20997 .word GDT_ENTRIES*8-1
20998 early_gdt_descr_base:
20999- .quad INIT_PER_CPU_VAR(gdt_page)
21000+ .quad cpu_gdt_table
21001
21002 ENTRY(phys_base)
21003 /* This must match the first entry in level2_kernel_pgt */
21004 .quad 0x0000000000000000
21005
21006 #include "../../x86/xen/xen-head.S"
21007-
21008- .section .bss, "aw", @nobits
21009+
21010+ .section .rodata,"a",@progbits
21011 .align L1_CACHE_BYTES
21012 ENTRY(idt_table)
21013- .skip IDT_ENTRIES * 16
21014+ .fill 512,8,0
21015
21016 .align L1_CACHE_BYTES
21017 ENTRY(nmi_idt_table)
21018- .skip IDT_ENTRIES * 16
21019+ .fill 512,8,0
21020
21021 __PAGE_ALIGNED_BSS
21022 .align PAGE_SIZE
21023diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
21024index 9c3bd4a..e1d9b35 100644
21025--- a/arch/x86/kernel/i386_ksyms_32.c
21026+++ b/arch/x86/kernel/i386_ksyms_32.c
21027@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
21028 EXPORT_SYMBOL(cmpxchg8b_emu);
21029 #endif
21030
21031+EXPORT_SYMBOL_GPL(cpu_gdt_table);
21032+
21033 /* Networking helper routines. */
21034 EXPORT_SYMBOL(csum_partial_copy_generic);
21035+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
21036+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
21037
21038 EXPORT_SYMBOL(__get_user_1);
21039 EXPORT_SYMBOL(__get_user_2);
21040@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
21041
21042 EXPORT_SYMBOL(csum_partial);
21043 EXPORT_SYMBOL(empty_zero_page);
21044+
21045+#ifdef CONFIG_PAX_KERNEXEC
21046+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
21047+#endif
21048diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
21049index 245a71d..89d9ce4 100644
21050--- a/arch/x86/kernel/i387.c
21051+++ b/arch/x86/kernel/i387.c
21052@@ -55,7 +55,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
21053 static inline bool interrupted_user_mode(void)
21054 {
21055 struct pt_regs *regs = get_irq_regs();
21056- return regs && user_mode_vm(regs);
21057+ return regs && user_mode(regs);
21058 }
21059
21060 /*
21061diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
21062index 9a5c460..b332a4b 100644
21063--- a/arch/x86/kernel/i8259.c
21064+++ b/arch/x86/kernel/i8259.c
21065@@ -209,7 +209,7 @@ spurious_8259A_irq:
21066 "spurious 8259A interrupt: IRQ%d.\n", irq);
21067 spurious_irq_mask |= irqmask;
21068 }
21069- atomic_inc(&irq_err_count);
21070+ atomic_inc_unchecked(&irq_err_count);
21071 /*
21072 * Theoretically we do not have to handle this IRQ,
21073 * but in Linux this does not cause problems and is
21074@@ -333,14 +333,16 @@ static void init_8259A(int auto_eoi)
21075 /* (slave's support for AEOI in flat mode is to be investigated) */
21076 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
21077
21078+ pax_open_kernel();
21079 if (auto_eoi)
21080 /*
21081 * In AEOI mode we just have to mask the interrupt
21082 * when acking.
21083 */
21084- i8259A_chip.irq_mask_ack = disable_8259A_irq;
21085+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
21086 else
21087- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
21088+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
21089+ pax_close_kernel();
21090
21091 udelay(100); /* wait for 8259A to initialize */
21092
21093diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
21094index a979b5b..1d6db75 100644
21095--- a/arch/x86/kernel/io_delay.c
21096+++ b/arch/x86/kernel/io_delay.c
21097@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
21098 * Quirk table for systems that misbehave (lock up, etc.) if port
21099 * 0x80 is used:
21100 */
21101-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
21102+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
21103 {
21104 .callback = dmi_io_delay_0xed_port,
21105 .ident = "Compaq Presario V6000",
21106diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
21107index 8c96897..be66bfa 100644
21108--- a/arch/x86/kernel/ioport.c
21109+++ b/arch/x86/kernel/ioport.c
21110@@ -6,6 +6,7 @@
21111 #include <linux/sched.h>
21112 #include <linux/kernel.h>
21113 #include <linux/capability.h>
21114+#include <linux/security.h>
21115 #include <linux/errno.h>
21116 #include <linux/types.h>
21117 #include <linux/ioport.h>
21118@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
21119
21120 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
21121 return -EINVAL;
21122+#ifdef CONFIG_GRKERNSEC_IO
21123+ if (turn_on && grsec_disable_privio) {
21124+ gr_handle_ioperm();
21125+ return -EPERM;
21126+ }
21127+#endif
21128 if (turn_on && !capable(CAP_SYS_RAWIO))
21129 return -EPERM;
21130
21131@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
21132 * because the ->io_bitmap_max value must match the bitmap
21133 * contents:
21134 */
21135- tss = &per_cpu(init_tss, get_cpu());
21136+ tss = init_tss + get_cpu();
21137
21138 if (turn_on)
21139 bitmap_clear(t->io_bitmap_ptr, from, num);
21140@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
21141 return -EINVAL;
21142 /* Trying to gain more privileges? */
21143 if (level > old) {
21144+#ifdef CONFIG_GRKERNSEC_IO
21145+ if (grsec_disable_privio) {
21146+ gr_handle_iopl();
21147+ return -EPERM;
21148+ }
21149+#endif
21150 if (!capable(CAP_SYS_RAWIO))
21151 return -EPERM;
21152 }
21153diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
21154index e4595f1..ee3bfb8 100644
21155--- a/arch/x86/kernel/irq.c
21156+++ b/arch/x86/kernel/irq.c
21157@@ -18,7 +18,7 @@
21158 #include <asm/mce.h>
21159 #include <asm/hw_irq.h>
21160
21161-atomic_t irq_err_count;
21162+atomic_unchecked_t irq_err_count;
21163
21164 /* Function pointer for generic interrupt vector handling */
21165 void (*x86_platform_ipi_callback)(void) = NULL;
21166@@ -122,9 +122,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
21167 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
21168 seq_printf(p, " Machine check polls\n");
21169 #endif
21170- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
21171+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
21172 #if defined(CONFIG_X86_IO_APIC)
21173- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
21174+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
21175 #endif
21176 return 0;
21177 }
21178@@ -164,10 +164,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
21179
21180 u64 arch_irq_stat(void)
21181 {
21182- u64 sum = atomic_read(&irq_err_count);
21183+ u64 sum = atomic_read_unchecked(&irq_err_count);
21184
21185 #ifdef CONFIG_X86_IO_APIC
21186- sum += atomic_read(&irq_mis_count);
21187+ sum += atomic_read_unchecked(&irq_mis_count);
21188 #endif
21189 return sum;
21190 }
21191diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
21192index 344faf8..355f60d 100644
21193--- a/arch/x86/kernel/irq_32.c
21194+++ b/arch/x86/kernel/irq_32.c
21195@@ -39,7 +39,7 @@ static int check_stack_overflow(void)
21196 __asm__ __volatile__("andl %%esp,%0" :
21197 "=r" (sp) : "0" (THREAD_SIZE - 1));
21198
21199- return sp < (sizeof(struct thread_info) + STACK_WARN);
21200+ return sp < STACK_WARN;
21201 }
21202
21203 static void print_stack_overflow(void)
21204@@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
21205 * per-CPU IRQ handling contexts (thread information and stack)
21206 */
21207 union irq_ctx {
21208- struct thread_info tinfo;
21209- u32 stack[THREAD_SIZE/sizeof(u32)];
21210+ unsigned long previous_esp;
21211+ u32 stack[THREAD_SIZE/sizeof(u32)];
21212 } __attribute__((aligned(THREAD_SIZE)));
21213
21214 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
21215@@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
21216 static inline int
21217 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
21218 {
21219- union irq_ctx *curctx, *irqctx;
21220+ union irq_ctx *irqctx;
21221 u32 *isp, arg1, arg2;
21222
21223- curctx = (union irq_ctx *) current_thread_info();
21224 irqctx = __this_cpu_read(hardirq_ctx);
21225
21226 /*
21227@@ -92,16 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
21228 * handler) we can't do that and just have to keep using the
21229 * current stack (which is the irq stack already after all)
21230 */
21231- if (unlikely(curctx == irqctx))
21232+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
21233 return 0;
21234
21235 /* build the stack frame on the IRQ stack */
21236- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
21237- irqctx->tinfo.task = curctx->tinfo.task;
21238- irqctx->tinfo.previous_esp = current_stack_pointer;
21239+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
21240+ irqctx->previous_esp = current_stack_pointer;
21241
21242- /* Copy the preempt_count so that the [soft]irq checks work. */
21243- irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
21244+#ifdef CONFIG_PAX_MEMORY_UDEREF
21245+ __set_fs(MAKE_MM_SEG(0));
21246+#endif
21247
21248 if (unlikely(overflow))
21249 call_on_stack(print_stack_overflow, isp);
21250@@ -113,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
21251 : "0" (irq), "1" (desc), "2" (isp),
21252 "D" (desc->handle_irq)
21253 : "memory", "cc", "ecx");
21254+
21255+#ifdef CONFIG_PAX_MEMORY_UDEREF
21256+ __set_fs(current_thread_info()->addr_limit);
21257+#endif
21258+
21259 return 1;
21260 }
21261
21262@@ -121,29 +125,14 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
21263 */
21264 void __cpuinit irq_ctx_init(int cpu)
21265 {
21266- union irq_ctx *irqctx;
21267-
21268 if (per_cpu(hardirq_ctx, cpu))
21269 return;
21270
21271- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
21272- THREADINFO_GFP,
21273- THREAD_SIZE_ORDER));
21274- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
21275- irqctx->tinfo.cpu = cpu;
21276- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
21277- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
21278-
21279- per_cpu(hardirq_ctx, cpu) = irqctx;
21280-
21281- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
21282- THREADINFO_GFP,
21283- THREAD_SIZE_ORDER));
21284- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
21285- irqctx->tinfo.cpu = cpu;
21286- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
21287-
21288- per_cpu(softirq_ctx, cpu) = irqctx;
21289+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
21290+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
21291+
21292+ printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
21293+ cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
21294
21295 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
21296 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
21297@@ -152,7 +141,6 @@ void __cpuinit irq_ctx_init(int cpu)
21298 asmlinkage void do_softirq(void)
21299 {
21300 unsigned long flags;
21301- struct thread_info *curctx;
21302 union irq_ctx *irqctx;
21303 u32 *isp;
21304
21305@@ -162,15 +150,22 @@ asmlinkage void do_softirq(void)
21306 local_irq_save(flags);
21307
21308 if (local_softirq_pending()) {
21309- curctx = current_thread_info();
21310 irqctx = __this_cpu_read(softirq_ctx);
21311- irqctx->tinfo.task = curctx->task;
21312- irqctx->tinfo.previous_esp = current_stack_pointer;
21313+ irqctx->previous_esp = current_stack_pointer;
21314
21315 /* build the stack frame on the softirq stack */
21316- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
21317+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
21318+
21319+#ifdef CONFIG_PAX_MEMORY_UDEREF
21320+ __set_fs(MAKE_MM_SEG(0));
21321+#endif
21322
21323 call_on_stack(__do_softirq, isp);
21324+
21325+#ifdef CONFIG_PAX_MEMORY_UDEREF
21326+ __set_fs(current_thread_info()->addr_limit);
21327+#endif
21328+
21329 /*
21330 * Shouldn't happen, we returned above if in_interrupt():
21331 */
21332@@ -191,7 +186,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
21333 if (unlikely(!desc))
21334 return false;
21335
21336- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
21337+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
21338 if (unlikely(overflow))
21339 print_stack_overflow();
21340 desc->handle_irq(irq, desc);
21341diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
21342index d04d3ec..ea4b374 100644
21343--- a/arch/x86/kernel/irq_64.c
21344+++ b/arch/x86/kernel/irq_64.c
21345@@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
21346 u64 estack_top, estack_bottom;
21347 u64 curbase = (u64)task_stack_page(current);
21348
21349- if (user_mode_vm(regs))
21350+ if (user_mode(regs))
21351 return;
21352
21353 if (regs->sp >= curbase + sizeof(struct thread_info) +
21354diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
21355index dc1404b..bbc43e7 100644
21356--- a/arch/x86/kernel/kdebugfs.c
21357+++ b/arch/x86/kernel/kdebugfs.c
21358@@ -27,7 +27,7 @@ struct setup_data_node {
21359 u32 len;
21360 };
21361
21362-static ssize_t setup_data_read(struct file *file, char __user *user_buf,
21363+static ssize_t __size_overflow(3) setup_data_read(struct file *file, char __user *user_buf,
21364 size_t count, loff_t *ppos)
21365 {
21366 struct setup_data_node *node = file->private_data;
21367diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
21368index 836f832..a8bda67 100644
21369--- a/arch/x86/kernel/kgdb.c
21370+++ b/arch/x86/kernel/kgdb.c
21371@@ -127,11 +127,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
21372 #ifdef CONFIG_X86_32
21373 switch (regno) {
21374 case GDB_SS:
21375- if (!user_mode_vm(regs))
21376+ if (!user_mode(regs))
21377 *(unsigned long *)mem = __KERNEL_DS;
21378 break;
21379 case GDB_SP:
21380- if (!user_mode_vm(regs))
21381+ if (!user_mode(regs))
21382 *(unsigned long *)mem = kernel_stack_pointer(regs);
21383 break;
21384 case GDB_GS:
21385@@ -229,7 +229,10 @@ static void kgdb_correct_hw_break(void)
21386 bp->attr.bp_addr = breakinfo[breakno].addr;
21387 bp->attr.bp_len = breakinfo[breakno].len;
21388 bp->attr.bp_type = breakinfo[breakno].type;
21389- info->address = breakinfo[breakno].addr;
21390+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
21391+ info->address = ktla_ktva(breakinfo[breakno].addr);
21392+ else
21393+ info->address = breakinfo[breakno].addr;
21394 info->len = breakinfo[breakno].len;
21395 info->type = breakinfo[breakno].type;
21396 val = arch_install_hw_breakpoint(bp);
21397@@ -476,12 +479,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
21398 case 'k':
21399 /* clear the trace bit */
21400 linux_regs->flags &= ~X86_EFLAGS_TF;
21401- atomic_set(&kgdb_cpu_doing_single_step, -1);
21402+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
21403
21404 /* set the trace bit if we're stepping */
21405 if (remcomInBuffer[0] == 's') {
21406 linux_regs->flags |= X86_EFLAGS_TF;
21407- atomic_set(&kgdb_cpu_doing_single_step,
21408+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
21409 raw_smp_processor_id());
21410 }
21411
21412@@ -546,7 +549,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
21413
21414 switch (cmd) {
21415 case DIE_DEBUG:
21416- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
21417+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
21418 if (user_mode(regs))
21419 return single_step_cont(regs, args);
21420 break;
21421@@ -751,11 +754,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
21422 #endif /* CONFIG_DEBUG_RODATA */
21423
21424 bpt->type = BP_BREAKPOINT;
21425- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
21426+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
21427 BREAK_INSTR_SIZE);
21428 if (err)
21429 return err;
21430- err = probe_kernel_write((char *)bpt->bpt_addr,
21431+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
21432 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
21433 #ifdef CONFIG_DEBUG_RODATA
21434 if (!err)
21435@@ -768,7 +771,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
21436 return -EBUSY;
21437 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
21438 BREAK_INSTR_SIZE);
21439- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
21440+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
21441 if (err)
21442 return err;
21443 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
21444@@ -793,13 +796,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
21445 if (mutex_is_locked(&text_mutex))
21446 goto knl_write;
21447 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
21448- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
21449+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
21450 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
21451 goto knl_write;
21452 return err;
21453 knl_write:
21454 #endif /* CONFIG_DEBUG_RODATA */
21455- return probe_kernel_write((char *)bpt->bpt_addr,
21456+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
21457 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
21458 }
21459
21460diff --git a/arch/x86/kernel/kprobes-opt.c b/arch/x86/kernel/kprobes-opt.c
21461index c5e410e..ed5a7f0 100644
21462--- a/arch/x86/kernel/kprobes-opt.c
21463+++ b/arch/x86/kernel/kprobes-opt.c
21464@@ -338,7 +338,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
21465 * Verify if the address gap is in 2GB range, because this uses
21466 * a relative jump.
21467 */
21468- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
21469+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
21470 if (abs(rel) > 0x7fffffff)
21471 return -ERANGE;
21472
21473@@ -353,16 +353,16 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
21474 op->optinsn.size = ret;
21475
21476 /* Copy arch-dep-instance from template */
21477- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
21478+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
21479
21480 /* Set probe information */
21481 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
21482
21483 /* Set probe function call */
21484- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
21485+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
21486
21487 /* Set returning jmp instruction at the tail of out-of-line buffer */
21488- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
21489+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
21490 (u8 *)op->kp.addr + op->optinsn.size);
21491
21492 flush_icache_range((unsigned long) buf,
21493@@ -385,7 +385,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
21494 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
21495
21496 /* Backup instructions which will be replaced by jump address */
21497- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
21498+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
21499 RELATIVE_ADDR_SIZE);
21500
21501 insn_buf[0] = RELATIVEJUMP_OPCODE;
21502@@ -483,7 +483,7 @@ setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
21503 /* This kprobe is really able to run optimized path. */
21504 op = container_of(p, struct optimized_kprobe, kp);
21505 /* Detour through copied instructions */
21506- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
21507+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
21508 if (!reenter)
21509 reset_current_kprobe();
21510 preempt_enable_no_resched();
21511diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
21512index 57916c0..9e0b9d0 100644
21513--- a/arch/x86/kernel/kprobes.c
21514+++ b/arch/x86/kernel/kprobes.c
21515@@ -119,9 +119,12 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
21516 s32 raddr;
21517 } __attribute__((packed)) *insn;
21518
21519- insn = (struct __arch_relative_insn *)from;
21520+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
21521+
21522+ pax_open_kernel();
21523 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
21524 insn->op = op;
21525+ pax_close_kernel();
21526 }
21527
21528 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
21529@@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t *opcodes)
21530 kprobe_opcode_t opcode;
21531 kprobe_opcode_t *orig_opcodes = opcodes;
21532
21533- if (search_exception_tables((unsigned long)opcodes))
21534+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
21535 return 0; /* Page fault may occur on this address. */
21536
21537 retry:
21538@@ -238,9 +241,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
21539 * for the first byte, we can recover the original instruction
21540 * from it and kp->opcode.
21541 */
21542- memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
21543+ memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
21544 buf[0] = kp->opcode;
21545- return (unsigned long)buf;
21546+ return ktva_ktla((unsigned long)buf);
21547 }
21548
21549 /*
21550@@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
21551 /* Another subsystem puts a breakpoint, failed to recover */
21552 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
21553 return 0;
21554+ pax_open_kernel();
21555 memcpy(dest, insn.kaddr, insn.length);
21556+ pax_close_kernel();
21557
21558 #ifdef CONFIG_X86_64
21559 if (insn_rip_relative(&insn)) {
21560@@ -355,7 +360,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
21561 newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
21562 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
21563 disp = (u8 *) dest + insn_offset_displacement(&insn);
21564+ pax_open_kernel();
21565 *(s32 *) disp = (s32) newdisp;
21566+ pax_close_kernel();
21567 }
21568 #endif
21569 return insn.length;
21570@@ -485,7 +492,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
21571 * nor set current_kprobe, because it doesn't use single
21572 * stepping.
21573 */
21574- regs->ip = (unsigned long)p->ainsn.insn;
21575+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
21576 preempt_enable_no_resched();
21577 return;
21578 }
21579@@ -502,9 +509,9 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
21580 regs->flags &= ~X86_EFLAGS_IF;
21581 /* single step inline if the instruction is an int3 */
21582 if (p->opcode == BREAKPOINT_INSTRUCTION)
21583- regs->ip = (unsigned long)p->addr;
21584+ regs->ip = ktla_ktva((unsigned long)p->addr);
21585 else
21586- regs->ip = (unsigned long)p->ainsn.insn;
21587+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
21588 }
21589
21590 /*
21591@@ -600,7 +607,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
21592 setup_singlestep(p, regs, kcb, 0);
21593 return 1;
21594 }
21595- } else if (*addr != BREAKPOINT_INSTRUCTION) {
21596+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
21597 /*
21598 * The breakpoint instruction was removed right
21599 * after we hit it. Another cpu has removed
21600@@ -651,6 +658,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
21601 " movq %rax, 152(%rsp)\n"
21602 RESTORE_REGS_STRING
21603 " popfq\n"
21604+#ifdef KERNEXEC_PLUGIN
21605+ " btsq $63,(%rsp)\n"
21606+#endif
21607 #else
21608 " pushf\n"
21609 SAVE_REGS_STRING
21610@@ -788,7 +798,7 @@ static void __kprobes
21611 resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
21612 {
21613 unsigned long *tos = stack_addr(regs);
21614- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
21615+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
21616 unsigned long orig_ip = (unsigned long)p->addr;
21617 kprobe_opcode_t *insn = p->ainsn.insn;
21618
21619@@ -970,7 +980,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
21620 struct die_args *args = data;
21621 int ret = NOTIFY_DONE;
21622
21623- if (args->regs && user_mode_vm(args->regs))
21624+ if (args->regs && user_mode(args->regs))
21625 return ret;
21626
21627 switch (val) {
21628diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
21629index 9c2bd8b..bb1131c 100644
21630--- a/arch/x86/kernel/kvm.c
21631+++ b/arch/x86/kernel/kvm.c
21632@@ -452,7 +452,7 @@ static int __cpuinit kvm_cpu_notify(struct notifier_block *self,
21633 return NOTIFY_OK;
21634 }
21635
21636-static struct notifier_block __cpuinitdata kvm_cpu_notifier = {
21637+static struct notifier_block kvm_cpu_notifier = {
21638 .notifier_call = kvm_cpu_notify,
21639 };
21640 #endif
21641diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
21642index ebc9873..1b9724b 100644
21643--- a/arch/x86/kernel/ldt.c
21644+++ b/arch/x86/kernel/ldt.c
21645@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
21646 if (reload) {
21647 #ifdef CONFIG_SMP
21648 preempt_disable();
21649- load_LDT(pc);
21650+ load_LDT_nolock(pc);
21651 if (!cpumask_equal(mm_cpumask(current->mm),
21652 cpumask_of(smp_processor_id())))
21653 smp_call_function(flush_ldt, current->mm, 1);
21654 preempt_enable();
21655 #else
21656- load_LDT(pc);
21657+ load_LDT_nolock(pc);
21658 #endif
21659 }
21660 if (oldsize) {
21661@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
21662 return err;
21663
21664 for (i = 0; i < old->size; i++)
21665- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
21666+ write_ldt_entry(new->ldt, i, old->ldt + i);
21667 return 0;
21668 }
21669
21670@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
21671 retval = copy_ldt(&mm->context, &old_mm->context);
21672 mutex_unlock(&old_mm->context.lock);
21673 }
21674+
21675+ if (tsk == current) {
21676+ mm->context.vdso = 0;
21677+
21678+#ifdef CONFIG_X86_32
21679+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21680+ mm->context.user_cs_base = 0UL;
21681+ mm->context.user_cs_limit = ~0UL;
21682+
21683+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
21684+ cpus_clear(mm->context.cpu_user_cs_mask);
21685+#endif
21686+
21687+#endif
21688+#endif
21689+
21690+ }
21691+
21692 return retval;
21693 }
21694
21695@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
21696 }
21697 }
21698
21699+#ifdef CONFIG_PAX_SEGMEXEC
21700+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
21701+ error = -EINVAL;
21702+ goto out_unlock;
21703+ }
21704+#endif
21705+
21706 fill_ldt(&ldt, &ldt_info);
21707 if (oldmode)
21708 ldt.avl = 0;
21709diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
21710index 5b19e4d..6476a76 100644
21711--- a/arch/x86/kernel/machine_kexec_32.c
21712+++ b/arch/x86/kernel/machine_kexec_32.c
21713@@ -26,7 +26,7 @@
21714 #include <asm/cacheflush.h>
21715 #include <asm/debugreg.h>
21716
21717-static void set_idt(void *newidt, __u16 limit)
21718+static void set_idt(struct desc_struct *newidt, __u16 limit)
21719 {
21720 struct desc_ptr curidt;
21721
21722@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
21723 }
21724
21725
21726-static void set_gdt(void *newgdt, __u16 limit)
21727+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
21728 {
21729 struct desc_ptr curgdt;
21730
21731@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
21732 }
21733
21734 control_page = page_address(image->control_code_page);
21735- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
21736+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
21737
21738 relocate_kernel_ptr = control_page;
21739 page_list[PA_CONTROL_PAGE] = __pa(control_page);
21740diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
21741index 3a04b22..1d2eb09 100644
21742--- a/arch/x86/kernel/microcode_core.c
21743+++ b/arch/x86/kernel/microcode_core.c
21744@@ -512,7 +512,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
21745 return NOTIFY_OK;
21746 }
21747
21748-static struct notifier_block __refdata mc_cpu_notifier = {
21749+static struct notifier_block mc_cpu_notifier = {
21750 .notifier_call = mc_cpu_callback,
21751 };
21752
21753diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
21754index 3544aed..01ddc1c 100644
21755--- a/arch/x86/kernel/microcode_intel.c
21756+++ b/arch/x86/kernel/microcode_intel.c
21757@@ -431,13 +431,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
21758
21759 static int get_ucode_user(void *to, const void *from, size_t n)
21760 {
21761- return copy_from_user(to, from, n);
21762+ return copy_from_user(to, (const void __force_user *)from, n);
21763 }
21764
21765 static enum ucode_state
21766 request_microcode_user(int cpu, const void __user *buf, size_t size)
21767 {
21768- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
21769+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
21770 }
21771
21772 static void microcode_fini_cpu(int cpu)
21773diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
21774index 216a4d7..228255a 100644
21775--- a/arch/x86/kernel/module.c
21776+++ b/arch/x86/kernel/module.c
21777@@ -43,15 +43,60 @@ do { \
21778 } while (0)
21779 #endif
21780
21781-void *module_alloc(unsigned long size)
21782+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
21783 {
21784- if (PAGE_ALIGN(size) > MODULES_LEN)
21785+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
21786 return NULL;
21787 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
21788- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
21789+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
21790 -1, __builtin_return_address(0));
21791 }
21792
21793+void *module_alloc(unsigned long size)
21794+{
21795+
21796+#ifdef CONFIG_PAX_KERNEXEC
21797+ return __module_alloc(size, PAGE_KERNEL);
21798+#else
21799+ return __module_alloc(size, PAGE_KERNEL_EXEC);
21800+#endif
21801+
21802+}
21803+
21804+#ifdef CONFIG_PAX_KERNEXEC
21805+#ifdef CONFIG_X86_32
21806+void *module_alloc_exec(unsigned long size)
21807+{
21808+ struct vm_struct *area;
21809+
21810+ if (size == 0)
21811+ return NULL;
21812+
21813+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
21814+ return area ? area->addr : NULL;
21815+}
21816+EXPORT_SYMBOL(module_alloc_exec);
21817+
21818+void module_free_exec(struct module *mod, void *module_region)
21819+{
21820+ vunmap(module_region);
21821+}
21822+EXPORT_SYMBOL(module_free_exec);
21823+#else
21824+void module_free_exec(struct module *mod, void *module_region)
21825+{
21826+ module_free(mod, module_region);
21827+}
21828+EXPORT_SYMBOL(module_free_exec);
21829+
21830+void *module_alloc_exec(unsigned long size)
21831+{
21832+ return __module_alloc(size, PAGE_KERNEL_RX);
21833+}
21834+EXPORT_SYMBOL(module_alloc_exec);
21835+#endif
21836+#endif
21837+
21838 #ifdef CONFIG_X86_32
21839 int apply_relocate(Elf32_Shdr *sechdrs,
21840 const char *strtab,
21841@@ -62,14 +107,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
21842 unsigned int i;
21843 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
21844 Elf32_Sym *sym;
21845- uint32_t *location;
21846+ uint32_t *plocation, location;
21847
21848 DEBUGP("Applying relocate section %u to %u\n",
21849 relsec, sechdrs[relsec].sh_info);
21850 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
21851 /* This is where to make the change */
21852- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
21853- + rel[i].r_offset;
21854+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
21855+ location = (uint32_t)plocation;
21856+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
21857+ plocation = ktla_ktva((void *)plocation);
21858 /* This is the symbol it is referring to. Note that all
21859 undefined symbols have been resolved. */
21860 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
21861@@ -78,11 +125,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
21862 switch (ELF32_R_TYPE(rel[i].r_info)) {
21863 case R_386_32:
21864 /* We add the value into the location given */
21865- *location += sym->st_value;
21866+ pax_open_kernel();
21867+ *plocation += sym->st_value;
21868+ pax_close_kernel();
21869 break;
21870 case R_386_PC32:
21871 /* Add the value, subtract its position */
21872- *location += sym->st_value - (uint32_t)location;
21873+ pax_open_kernel();
21874+ *plocation += sym->st_value - location;
21875+ pax_close_kernel();
21876 break;
21877 default:
21878 pr_err("%s: Unknown relocation: %u\n",
21879@@ -127,21 +178,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
21880 case R_X86_64_NONE:
21881 break;
21882 case R_X86_64_64:
21883+ pax_open_kernel();
21884 *(u64 *)loc = val;
21885+ pax_close_kernel();
21886 break;
21887 case R_X86_64_32:
21888+ pax_open_kernel();
21889 *(u32 *)loc = val;
21890+ pax_close_kernel();
21891 if (val != *(u32 *)loc)
21892 goto overflow;
21893 break;
21894 case R_X86_64_32S:
21895+ pax_open_kernel();
21896 *(s32 *)loc = val;
21897+ pax_close_kernel();
21898 if ((s64)val != *(s32 *)loc)
21899 goto overflow;
21900 break;
21901 case R_X86_64_PC32:
21902 val -= (u64)loc;
21903+ pax_open_kernel();
21904 *(u32 *)loc = val;
21905+ pax_close_kernel();
21906+
21907 #if 0
21908 if ((s64)val != *(s32 *)loc)
21909 goto overflow;
21910diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
21911index 4929502..686c291 100644
21912--- a/arch/x86/kernel/msr.c
21913+++ b/arch/x86/kernel/msr.c
21914@@ -234,7 +234,7 @@ static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb,
21915 return notifier_from_errno(err);
21916 }
21917
21918-static struct notifier_block __refdata msr_class_cpu_notifier = {
21919+static struct notifier_block msr_class_cpu_notifier = {
21920 .notifier_call = msr_class_cpu_callback,
21921 };
21922
21923diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
21924index f84f5c5..f404e81 100644
21925--- a/arch/x86/kernel/nmi.c
21926+++ b/arch/x86/kernel/nmi.c
21927@@ -105,7 +105,7 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2
21928 return handled;
21929 }
21930
21931-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
21932+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
21933 {
21934 struct nmi_desc *desc = nmi_to_desc(type);
21935 unsigned long flags;
21936@@ -129,9 +129,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
21937 * event confuses some handlers (kdump uses this flag)
21938 */
21939 if (action->flags & NMI_FLAG_FIRST)
21940- list_add_rcu(&action->list, &desc->head);
21941+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
21942 else
21943- list_add_tail_rcu(&action->list, &desc->head);
21944+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
21945
21946 spin_unlock_irqrestore(&desc->lock, flags);
21947 return 0;
21948@@ -154,7 +154,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
21949 if (!strcmp(n->name, name)) {
21950 WARN(in_nmi(),
21951 "Trying to free NMI (%s) from NMI context!\n", n->name);
21952- list_del_rcu(&n->list);
21953+ pax_list_del_rcu((struct list_head *)&n->list);
21954 break;
21955 }
21956 }
21957@@ -479,6 +479,17 @@ static inline void nmi_nesting_postprocess(void)
21958 dotraplinkage notrace __kprobes void
21959 do_nmi(struct pt_regs *regs, long error_code)
21960 {
21961+
21962+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21963+ if (!user_mode(regs)) {
21964+ unsigned long cs = regs->cs & 0xFFFF;
21965+ unsigned long ip = ktva_ktla(regs->ip);
21966+
21967+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
21968+ regs->ip = ip;
21969+ }
21970+#endif
21971+
21972 nmi_nesting_preprocess(regs);
21973
21974 nmi_enter();
21975diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
21976index 6d9582e..f746287 100644
21977--- a/arch/x86/kernel/nmi_selftest.c
21978+++ b/arch/x86/kernel/nmi_selftest.c
21979@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
21980 {
21981 /* trap all the unknown NMIs we may generate */
21982 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
21983- __initdata);
21984+ __initconst);
21985 }
21986
21987 static void __init cleanup_nmi_testsuite(void)
21988@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
21989 unsigned long timeout;
21990
21991 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
21992- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
21993+ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
21994 nmi_fail = FAILURE;
21995 return;
21996 }
21997diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
21998index 676b8c7..870ba04 100644
21999--- a/arch/x86/kernel/paravirt-spinlocks.c
22000+++ b/arch/x86/kernel/paravirt-spinlocks.c
22001@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
22002 arch_spin_lock(lock);
22003 }
22004
22005-struct pv_lock_ops pv_lock_ops = {
22006+struct pv_lock_ops pv_lock_ops __read_only = {
22007 #ifdef CONFIG_SMP
22008 .spin_is_locked = __ticket_spin_is_locked,
22009 .spin_is_contended = __ticket_spin_is_contended,
22010diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
22011index 17fff18..5cfa0f4 100644
22012--- a/arch/x86/kernel/paravirt.c
22013+++ b/arch/x86/kernel/paravirt.c
22014@@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
22015 {
22016 return x;
22017 }
22018+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
22019+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
22020+#endif
22021
22022 void __init default_banner(void)
22023 {
22024@@ -147,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
22025 if (opfunc == NULL)
22026 /* If there's no function, patch it with a ud2a (BUG) */
22027 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
22028- else if (opfunc == _paravirt_nop)
22029+ else if (opfunc == (void *)_paravirt_nop)
22030 /* If the operation is a nop, then nop the callsite */
22031 ret = paravirt_patch_nop();
22032
22033 /* identity functions just return their single argument */
22034- else if (opfunc == _paravirt_ident_32)
22035+ else if (opfunc == (void *)_paravirt_ident_32)
22036 ret = paravirt_patch_ident_32(insnbuf, len);
22037- else if (opfunc == _paravirt_ident_64)
22038+ else if (opfunc == (void *)_paravirt_ident_64)
22039 ret = paravirt_patch_ident_64(insnbuf, len);
22040+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
22041+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
22042+ ret = paravirt_patch_ident_64(insnbuf, len);
22043+#endif
22044
22045 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
22046 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
22047@@ -180,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
22048 if (insn_len > len || start == NULL)
22049 insn_len = len;
22050 else
22051- memcpy(insnbuf, start, insn_len);
22052+ memcpy(insnbuf, ktla_ktva(start), insn_len);
22053
22054 return insn_len;
22055 }
22056@@ -304,7 +311,7 @@ void arch_flush_lazy_mmu_mode(void)
22057 preempt_enable();
22058 }
22059
22060-struct pv_info pv_info = {
22061+struct pv_info pv_info __read_only = {
22062 .name = "bare hardware",
22063 .paravirt_enabled = 0,
22064 .kernel_rpl = 0,
22065@@ -315,16 +322,16 @@ struct pv_info pv_info = {
22066 #endif
22067 };
22068
22069-struct pv_init_ops pv_init_ops = {
22070+struct pv_init_ops pv_init_ops __read_only = {
22071 .patch = native_patch,
22072 };
22073
22074-struct pv_time_ops pv_time_ops = {
22075+struct pv_time_ops pv_time_ops __read_only = {
22076 .sched_clock = native_sched_clock,
22077 .steal_clock = native_steal_clock,
22078 };
22079
22080-struct pv_irq_ops pv_irq_ops = {
22081+struct pv_irq_ops pv_irq_ops __read_only = {
22082 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
22083 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
22084 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
22085@@ -336,7 +343,7 @@ struct pv_irq_ops pv_irq_ops = {
22086 #endif
22087 };
22088
22089-struct pv_cpu_ops pv_cpu_ops = {
22090+struct pv_cpu_ops pv_cpu_ops __read_only = {
22091 .cpuid = native_cpuid,
22092 .get_debugreg = native_get_debugreg,
22093 .set_debugreg = native_set_debugreg,
22094@@ -395,21 +402,26 @@ struct pv_cpu_ops pv_cpu_ops = {
22095 .end_context_switch = paravirt_nop,
22096 };
22097
22098-struct pv_apic_ops pv_apic_ops = {
22099+struct pv_apic_ops pv_apic_ops __read_only= {
22100 #ifdef CONFIG_X86_LOCAL_APIC
22101 .startup_ipi_hook = paravirt_nop,
22102 #endif
22103 };
22104
22105-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
22106+#ifdef CONFIG_X86_32
22107+#ifdef CONFIG_X86_PAE
22108+/* 64-bit pagetable entries */
22109+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
22110+#else
22111 /* 32-bit pagetable entries */
22112 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
22113+#endif
22114 #else
22115 /* 64-bit pagetable entries */
22116 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
22117 #endif
22118
22119-struct pv_mmu_ops pv_mmu_ops = {
22120+struct pv_mmu_ops pv_mmu_ops __read_only = {
22121
22122 .read_cr2 = native_read_cr2,
22123 .write_cr2 = native_write_cr2,
22124@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
22125 .make_pud = PTE_IDENT,
22126
22127 .set_pgd = native_set_pgd,
22128+ .set_pgd_batched = native_set_pgd_batched,
22129 #endif
22130 #endif /* PAGETABLE_LEVELS >= 3 */
22131
22132@@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
22133 },
22134
22135 .set_fixmap = native_set_fixmap,
22136+
22137+#ifdef CONFIG_PAX_KERNEXEC
22138+ .pax_open_kernel = native_pax_open_kernel,
22139+ .pax_close_kernel = native_pax_close_kernel,
22140+#endif
22141+
22142 };
22143
22144 EXPORT_SYMBOL_GPL(pv_time_ops);
22145diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
22146index 35ccf75..7a15747 100644
22147--- a/arch/x86/kernel/pci-iommu_table.c
22148+++ b/arch/x86/kernel/pci-iommu_table.c
22149@@ -2,7 +2,7 @@
22150 #include <asm/iommu_table.h>
22151 #include <linux/string.h>
22152 #include <linux/kallsyms.h>
22153-
22154+#include <linux/sched.h>
22155
22156 #define DEBUG 1
22157
22158diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
22159index 6c483ba..d10ce2f 100644
22160--- a/arch/x86/kernel/pci-swiotlb.c
22161+++ b/arch/x86/kernel/pci-swiotlb.c
22162@@ -32,7 +32,7 @@ static void x86_swiotlb_free_coherent(struct device *dev, size_t size,
22163 void *vaddr, dma_addr_t dma_addr,
22164 struct dma_attrs *attrs)
22165 {
22166- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
22167+ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
22168 }
22169
22170 static struct dma_map_ops swiotlb_dma_ops = {
22171diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
22172index 2ed787f..f70c9f6 100644
22173--- a/arch/x86/kernel/process.c
22174+++ b/arch/x86/kernel/process.c
22175@@ -36,7 +36,8 @@
22176 * section. Since TSS's are completely CPU-local, we want them
22177 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
22178 */
22179-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
22180+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
22181+EXPORT_SYMBOL(init_tss);
22182
22183 #ifdef CONFIG_X86_64
22184 static DEFINE_PER_CPU(unsigned char, is_idle);
22185@@ -92,7 +93,7 @@ void arch_task_cache_init(void)
22186 task_xstate_cachep =
22187 kmem_cache_create("task_xstate", xstate_size,
22188 __alignof__(union thread_xstate),
22189- SLAB_PANIC | SLAB_NOTRACK, NULL);
22190+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
22191 }
22192
22193 /*
22194@@ -105,7 +106,7 @@ void exit_thread(void)
22195 unsigned long *bp = t->io_bitmap_ptr;
22196
22197 if (bp) {
22198- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
22199+ struct tss_struct *tss = init_tss + get_cpu();
22200
22201 t->io_bitmap_ptr = NULL;
22202 clear_thread_flag(TIF_IO_BITMAP);
22203@@ -136,7 +137,7 @@ void show_regs_common(void)
22204 board = dmi_get_system_info(DMI_BOARD_NAME);
22205
22206 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s %s%s%s\n",
22207- current->pid, current->comm, print_tainted(),
22208+ task_pid_nr(current), current->comm, print_tainted(),
22209 init_utsname()->release,
22210 (int)strcspn(init_utsname()->version, " "),
22211 init_utsname()->version,
22212@@ -149,6 +150,9 @@ void flush_thread(void)
22213 {
22214 struct task_struct *tsk = current;
22215
22216+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
22217+ loadsegment(gs, 0);
22218+#endif
22219 flush_ptrace_hw_breakpoint(tsk);
22220 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
22221 drop_init_fpu(tsk);
22222@@ -301,7 +305,7 @@ static void __exit_idle(void)
22223 void exit_idle(void)
22224 {
22225 /* idle loop has pid 0 */
22226- if (current->pid)
22227+ if (task_pid_nr(current))
22228 return;
22229 __exit_idle();
22230 }
22231@@ -404,7 +408,7 @@ bool set_pm_idle_to_default(void)
22232
22233 return ret;
22234 }
22235-void stop_this_cpu(void *dummy)
22236+__noreturn void stop_this_cpu(void *dummy)
22237 {
22238 local_irq_disable();
22239 /*
22240@@ -632,16 +636,37 @@ static int __init idle_setup(char *str)
22241 }
22242 early_param("idle", idle_setup);
22243
22244-unsigned long arch_align_stack(unsigned long sp)
22245+#ifdef CONFIG_PAX_RANDKSTACK
22246+void pax_randomize_kstack(struct pt_regs *regs)
22247 {
22248- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
22249- sp -= get_random_int() % 8192;
22250- return sp & ~0xf;
22251-}
22252+ struct thread_struct *thread = &current->thread;
22253+ unsigned long time;
22254
22255-unsigned long arch_randomize_brk(struct mm_struct *mm)
22256-{
22257- unsigned long range_end = mm->brk + 0x02000000;
22258- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
22259-}
22260+ if (!randomize_va_space)
22261+ return;
22262+
22263+ if (v8086_mode(regs))
22264+ return;
22265
22266+ rdtscl(time);
22267+
22268+ /* P4 seems to return a 0 LSB, ignore it */
22269+#ifdef CONFIG_MPENTIUM4
22270+ time &= 0x3EUL;
22271+ time <<= 2;
22272+#elif defined(CONFIG_X86_64)
22273+ time &= 0xFUL;
22274+ time <<= 4;
22275+#else
22276+ time &= 0x1FUL;
22277+ time <<= 3;
22278+#endif
22279+
22280+ thread->sp0 ^= time;
22281+ load_sp0(init_tss + smp_processor_id(), thread);
22282+
22283+#ifdef CONFIG_X86_64
22284+ this_cpu_write(kernel_stack, thread->sp0);
22285+#endif
22286+}
22287+#endif
22288diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
22289index b5a8905..d9cacac 100644
22290--- a/arch/x86/kernel/process_32.c
22291+++ b/arch/x86/kernel/process_32.c
22292@@ -65,6 +65,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
22293 unsigned long thread_saved_pc(struct task_struct *tsk)
22294 {
22295 return ((unsigned long *)tsk->thread.sp)[3];
22296+//XXX return tsk->thread.eip;
22297 }
22298
22299 void __show_regs(struct pt_regs *regs, int all)
22300@@ -74,21 +75,20 @@ void __show_regs(struct pt_regs *regs, int all)
22301 unsigned long sp;
22302 unsigned short ss, gs;
22303
22304- if (user_mode_vm(regs)) {
22305+ if (user_mode(regs)) {
22306 sp = regs->sp;
22307 ss = regs->ss & 0xffff;
22308- gs = get_user_gs(regs);
22309 } else {
22310 sp = kernel_stack_pointer(regs);
22311 savesegment(ss, ss);
22312- savesegment(gs, gs);
22313 }
22314+ gs = get_user_gs(regs);
22315
22316 show_regs_common();
22317
22318 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
22319 (u16)regs->cs, regs->ip, regs->flags,
22320- smp_processor_id());
22321+ raw_smp_processor_id());
22322 print_symbol("EIP is at %s\n", regs->ip);
22323
22324 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
22325@@ -130,20 +130,21 @@ void release_thread(struct task_struct *dead_task)
22326 int copy_thread(unsigned long clone_flags, unsigned long sp,
22327 unsigned long arg, struct task_struct *p)
22328 {
22329- struct pt_regs *childregs = task_pt_regs(p);
22330+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
22331 struct task_struct *tsk;
22332 int err;
22333
22334 p->thread.sp = (unsigned long) childregs;
22335 p->thread.sp0 = (unsigned long) (childregs+1);
22336+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
22337
22338 if (unlikely(p->flags & PF_KTHREAD)) {
22339 /* kernel thread */
22340 memset(childregs, 0, sizeof(struct pt_regs));
22341 p->thread.ip = (unsigned long) ret_from_kernel_thread;
22342- task_user_gs(p) = __KERNEL_STACK_CANARY;
22343- childregs->ds = __USER_DS;
22344- childregs->es = __USER_DS;
22345+ savesegment(gs, childregs->gs);
22346+ childregs->ds = __KERNEL_DS;
22347+ childregs->es = __KERNEL_DS;
22348 childregs->fs = __KERNEL_PERCPU;
22349 childregs->bx = sp; /* function */
22350 childregs->bp = arg;
22351@@ -250,7 +251,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22352 struct thread_struct *prev = &prev_p->thread,
22353 *next = &next_p->thread;
22354 int cpu = smp_processor_id();
22355- struct tss_struct *tss = &per_cpu(init_tss, cpu);
22356+ struct tss_struct *tss = init_tss + cpu;
22357 fpu_switch_t fpu;
22358
22359 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
22360@@ -274,6 +275,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22361 */
22362 lazy_save_gs(prev->gs);
22363
22364+#ifdef CONFIG_PAX_MEMORY_UDEREF
22365+ __set_fs(task_thread_info(next_p)->addr_limit);
22366+#endif
22367+
22368 /*
22369 * Load the per-thread Thread-Local Storage descriptor.
22370 */
22371@@ -304,6 +309,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22372 */
22373 arch_end_context_switch(next_p);
22374
22375+ this_cpu_write(current_task, next_p);
22376+ this_cpu_write(current_tinfo, &next_p->tinfo);
22377+
22378 /*
22379 * Restore %gs if needed (which is common)
22380 */
22381@@ -312,8 +320,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22382
22383 switch_fpu_finish(next_p, fpu);
22384
22385- this_cpu_write(current_task, next_p);
22386-
22387 return prev_p;
22388 }
22389
22390@@ -343,4 +349,3 @@ unsigned long get_wchan(struct task_struct *p)
22391 } while (count++ < 16);
22392 return 0;
22393 }
22394-
22395diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
22396index 6e68a61..955a9a5 100644
22397--- a/arch/x86/kernel/process_64.c
22398+++ b/arch/x86/kernel/process_64.c
22399@@ -152,10 +152,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
22400 struct pt_regs *childregs;
22401 struct task_struct *me = current;
22402
22403- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
22404+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
22405 childregs = task_pt_regs(p);
22406 p->thread.sp = (unsigned long) childregs;
22407 p->thread.usersp = me->thread.usersp;
22408+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
22409 set_tsk_thread_flag(p, TIF_FORK);
22410 p->fpu_counter = 0;
22411 p->thread.io_bitmap_ptr = NULL;
22412@@ -274,7 +275,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22413 struct thread_struct *prev = &prev_p->thread;
22414 struct thread_struct *next = &next_p->thread;
22415 int cpu = smp_processor_id();
22416- struct tss_struct *tss = &per_cpu(init_tss, cpu);
22417+ struct tss_struct *tss = init_tss + cpu;
22418 unsigned fsindex, gsindex;
22419 fpu_switch_t fpu;
22420
22421@@ -356,10 +357,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22422 prev->usersp = this_cpu_read(old_rsp);
22423 this_cpu_write(old_rsp, next->usersp);
22424 this_cpu_write(current_task, next_p);
22425+ this_cpu_write(current_tinfo, &next_p->tinfo);
22426
22427- this_cpu_write(kernel_stack,
22428- (unsigned long)task_stack_page(next_p) +
22429- THREAD_SIZE - KERNEL_STACK_OFFSET);
22430+ this_cpu_write(kernel_stack, next->sp0);
22431
22432 /*
22433 * Now maybe reload the debug registers and handle I/O bitmaps
22434@@ -428,12 +428,11 @@ unsigned long get_wchan(struct task_struct *p)
22435 if (!p || p == current || p->state == TASK_RUNNING)
22436 return 0;
22437 stack = (unsigned long)task_stack_page(p);
22438- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
22439+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
22440 return 0;
22441 fp = *(u64 *)(p->thread.sp);
22442 do {
22443- if (fp < (unsigned long)stack ||
22444- fp >= (unsigned long)stack+THREAD_SIZE)
22445+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
22446 return 0;
22447 ip = *(u64 *)(fp+8);
22448 if (!in_sched_functions(ip))
22449diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
22450index b629bbe..0fa615a 100644
22451--- a/arch/x86/kernel/ptrace.c
22452+++ b/arch/x86/kernel/ptrace.c
22453@@ -184,14 +184,13 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
22454 {
22455 unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
22456 unsigned long sp = (unsigned long)&regs->sp;
22457- struct thread_info *tinfo;
22458
22459- if (context == (sp & ~(THREAD_SIZE - 1)))
22460+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
22461 return sp;
22462
22463- tinfo = (struct thread_info *)context;
22464- if (tinfo->previous_esp)
22465- return tinfo->previous_esp;
22466+ sp = *(unsigned long *)context;
22467+ if (sp)
22468+ return sp;
22469
22470 return (unsigned long)regs;
22471 }
22472@@ -588,7 +587,7 @@ static void ptrace_triggered(struct perf_event *bp,
22473 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
22474 {
22475 int i;
22476- int dr7 = 0;
22477+ unsigned long dr7 = 0;
22478 struct arch_hw_breakpoint *info;
22479
22480 for (i = 0; i < HBP_NUM; i++) {
22481@@ -856,7 +855,7 @@ long arch_ptrace(struct task_struct *child, long request,
22482 unsigned long addr, unsigned long data)
22483 {
22484 int ret;
22485- unsigned long __user *datap = (unsigned long __user *)data;
22486+ unsigned long __user *datap = (__force unsigned long __user *)data;
22487
22488 switch (request) {
22489 /* read the word at location addr in the USER area. */
22490@@ -941,14 +940,14 @@ long arch_ptrace(struct task_struct *child, long request,
22491 if ((int) addr < 0)
22492 return -EIO;
22493 ret = do_get_thread_area(child, addr,
22494- (struct user_desc __user *)data);
22495+ (__force struct user_desc __user *) data);
22496 break;
22497
22498 case PTRACE_SET_THREAD_AREA:
22499 if ((int) addr < 0)
22500 return -EIO;
22501 ret = do_set_thread_area(child, addr,
22502- (struct user_desc __user *)data, 0);
22503+ (__force struct user_desc __user *) data, 0);
22504 break;
22505 #endif
22506
22507@@ -1326,7 +1325,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
22508
22509 #ifdef CONFIG_X86_64
22510
22511-static struct user_regset x86_64_regsets[] __read_mostly = {
22512+static user_regset_no_const x86_64_regsets[] __read_only = {
22513 [REGSET_GENERAL] = {
22514 .core_note_type = NT_PRSTATUS,
22515 .n = sizeof(struct user_regs_struct) / sizeof(long),
22516@@ -1367,7 +1366,7 @@ static const struct user_regset_view user_x86_64_view = {
22517 #endif /* CONFIG_X86_64 */
22518
22519 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
22520-static struct user_regset x86_32_regsets[] __read_mostly = {
22521+static user_regset_no_const x86_32_regsets[] __read_only = {
22522 [REGSET_GENERAL] = {
22523 .core_note_type = NT_PRSTATUS,
22524 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
22525@@ -1420,7 +1419,7 @@ static const struct user_regset_view user_x86_32_view = {
22526 */
22527 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
22528
22529-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
22530+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
22531 {
22532 #ifdef CONFIG_X86_64
22533 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
22534@@ -1455,7 +1454,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
22535 memset(info, 0, sizeof(*info));
22536 info->si_signo = SIGTRAP;
22537 info->si_code = si_code;
22538- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
22539+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
22540 }
22541
22542 void user_single_step_siginfo(struct task_struct *tsk,
22543@@ -1484,6 +1483,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
22544 # define IS_IA32 0
22545 #endif
22546
22547+#ifdef CONFIG_GRKERNSEC_SETXID
22548+extern void gr_delayed_cred_worker(void);
22549+#endif
22550+
22551 /*
22552 * We must return the syscall number to actually look up in the table.
22553 * This can be -1L to skip running any syscall at all.
22554@@ -1494,6 +1497,11 @@ long syscall_trace_enter(struct pt_regs *regs)
22555
22556 user_exit();
22557
22558+#ifdef CONFIG_GRKERNSEC_SETXID
22559+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
22560+ gr_delayed_cred_worker();
22561+#endif
22562+
22563 /*
22564 * If we stepped into a sysenter/syscall insn, it trapped in
22565 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
22566@@ -1549,6 +1557,11 @@ void syscall_trace_leave(struct pt_regs *regs)
22567 */
22568 user_exit();
22569
22570+#ifdef CONFIG_GRKERNSEC_SETXID
22571+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
22572+ gr_delayed_cred_worker();
22573+#endif
22574+
22575 audit_syscall_exit(regs);
22576
22577 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
22578diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
22579index 2cb9470..ff1fd80 100644
22580--- a/arch/x86/kernel/pvclock.c
22581+++ b/arch/x86/kernel/pvclock.c
22582@@ -43,11 +43,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
22583 return pv_tsc_khz;
22584 }
22585
22586-static atomic64_t last_value = ATOMIC64_INIT(0);
22587+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
22588
22589 void pvclock_resume(void)
22590 {
22591- atomic64_set(&last_value, 0);
22592+ atomic64_set_unchecked(&last_value, 0);
22593 }
22594
22595 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
22596@@ -92,11 +92,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
22597 * updating at the same time, and one of them could be slightly behind,
22598 * making the assumption that last_value always go forward fail to hold.
22599 */
22600- last = atomic64_read(&last_value);
22601+ last = atomic64_read_unchecked(&last_value);
22602 do {
22603 if (ret < last)
22604 return last;
22605- last = atomic64_cmpxchg(&last_value, last, ret);
22606+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
22607 } while (unlikely(last != ret));
22608
22609 return ret;
22610diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
22611index 76fa1e9..abf09ea 100644
22612--- a/arch/x86/kernel/reboot.c
22613+++ b/arch/x86/kernel/reboot.c
22614@@ -36,7 +36,7 @@ void (*pm_power_off)(void);
22615 EXPORT_SYMBOL(pm_power_off);
22616
22617 static const struct desc_ptr no_idt = {};
22618-static int reboot_mode;
22619+static unsigned short reboot_mode;
22620 enum reboot_type reboot_type = BOOT_ACPI;
22621 int reboot_force;
22622
22623@@ -157,6 +157,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
22624
22625 void __noreturn machine_real_restart(unsigned int type)
22626 {
22627+
22628+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
22629+ struct desc_struct *gdt;
22630+#endif
22631+
22632 local_irq_disable();
22633
22634 /*
22635@@ -184,7 +189,29 @@ void __noreturn machine_real_restart(unsigned int type)
22636
22637 /* Jump to the identity-mapped low memory code */
22638 #ifdef CONFIG_X86_32
22639- asm volatile("jmpl *%0" : :
22640+
22641+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22642+ gdt = get_cpu_gdt_table(smp_processor_id());
22643+ pax_open_kernel();
22644+#ifdef CONFIG_PAX_MEMORY_UDEREF
22645+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
22646+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
22647+ loadsegment(ds, __KERNEL_DS);
22648+ loadsegment(es, __KERNEL_DS);
22649+ loadsegment(ss, __KERNEL_DS);
22650+#endif
22651+#ifdef CONFIG_PAX_KERNEXEC
22652+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
22653+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
22654+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
22655+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
22656+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
22657+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
22658+#endif
22659+ pax_close_kernel();
22660+#endif
22661+
22662+ asm volatile("ljmpl *%0" : :
22663 "rm" (real_mode_header->machine_real_restart_asm),
22664 "a" (type));
22665 #else
22666@@ -531,7 +558,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
22667 * try to force a triple fault and then cycle between hitting the keyboard
22668 * controller and doing that
22669 */
22670-static void native_machine_emergency_restart(void)
22671+static void __noreturn native_machine_emergency_restart(void)
22672 {
22673 int i;
22674 int attempt = 0;
22675@@ -654,13 +681,13 @@ void native_machine_shutdown(void)
22676 #endif
22677 }
22678
22679-static void __machine_emergency_restart(int emergency)
22680+static void __noreturn __machine_emergency_restart(int emergency)
22681 {
22682 reboot_emergency = emergency;
22683 machine_ops.emergency_restart();
22684 }
22685
22686-static void native_machine_restart(char *__unused)
22687+static void __noreturn native_machine_restart(char *__unused)
22688 {
22689 pr_notice("machine restart\n");
22690
22691@@ -669,7 +696,7 @@ static void native_machine_restart(char *__unused)
22692 __machine_emergency_restart(0);
22693 }
22694
22695-static void native_machine_halt(void)
22696+static void __noreturn native_machine_halt(void)
22697 {
22698 /* Stop other cpus and apics */
22699 machine_shutdown();
22700@@ -679,7 +706,7 @@ static void native_machine_halt(void)
22701 stop_this_cpu(NULL);
22702 }
22703
22704-static void native_machine_power_off(void)
22705+static void __noreturn native_machine_power_off(void)
22706 {
22707 if (pm_power_off) {
22708 if (!reboot_force)
22709@@ -688,9 +715,10 @@ static void native_machine_power_off(void)
22710 }
22711 /* A fallback in case there is no PM info available */
22712 tboot_shutdown(TB_SHUTDOWN_HALT);
22713+ unreachable();
22714 }
22715
22716-struct machine_ops machine_ops = {
22717+struct machine_ops machine_ops __read_only = {
22718 .power_off = native_machine_power_off,
22719 .shutdown = native_machine_shutdown,
22720 .emergency_restart = native_machine_emergency_restart,
22721diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
22722index 7a6f3b3..bed145d7 100644
22723--- a/arch/x86/kernel/relocate_kernel_64.S
22724+++ b/arch/x86/kernel/relocate_kernel_64.S
22725@@ -11,6 +11,7 @@
22726 #include <asm/kexec.h>
22727 #include <asm/processor-flags.h>
22728 #include <asm/pgtable_types.h>
22729+#include <asm/alternative-asm.h>
22730
22731 /*
22732 * Must be relocatable PIC code callable as a C function
22733@@ -160,13 +161,14 @@ identity_mapped:
22734 xorq %rbp, %rbp
22735 xorq %r8, %r8
22736 xorq %r9, %r9
22737- xorq %r10, %r9
22738+ xorq %r10, %r10
22739 xorq %r11, %r11
22740 xorq %r12, %r12
22741 xorq %r13, %r13
22742 xorq %r14, %r14
22743 xorq %r15, %r15
22744
22745+ pax_force_retaddr 0, 1
22746 ret
22747
22748 1:
22749diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
22750index 8b24289..d37b58b 100644
22751--- a/arch/x86/kernel/setup.c
22752+++ b/arch/x86/kernel/setup.c
22753@@ -437,7 +437,7 @@ static void __init parse_setup_data(void)
22754
22755 switch (data->type) {
22756 case SETUP_E820_EXT:
22757- parse_e820_ext(data);
22758+ parse_e820_ext((struct setup_data __force_kernel *)data);
22759 break;
22760 case SETUP_DTB:
22761 add_dtb(pa_data);
22762@@ -706,7 +706,7 @@ static void __init trim_bios_range(void)
22763 * area (640->1Mb) as ram even though it is not.
22764 * take them out.
22765 */
22766- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
22767+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
22768
22769 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
22770 }
22771@@ -830,14 +830,14 @@ void __init setup_arch(char **cmdline_p)
22772
22773 if (!boot_params.hdr.root_flags)
22774 root_mountflags &= ~MS_RDONLY;
22775- init_mm.start_code = (unsigned long) _text;
22776- init_mm.end_code = (unsigned long) _etext;
22777+ init_mm.start_code = ktla_ktva((unsigned long) _text);
22778+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
22779 init_mm.end_data = (unsigned long) _edata;
22780 init_mm.brk = _brk_end;
22781
22782- code_resource.start = virt_to_phys(_text);
22783- code_resource.end = virt_to_phys(_etext)-1;
22784- data_resource.start = virt_to_phys(_etext);
22785+ code_resource.start = virt_to_phys(ktla_ktva(_text));
22786+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
22787+ data_resource.start = virt_to_phys(_sdata);
22788 data_resource.end = virt_to_phys(_edata)-1;
22789 bss_resource.start = virt_to_phys(&__bss_start);
22790 bss_resource.end = virt_to_phys(&__bss_stop)-1;
22791diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
22792index 5cdff03..80fa283 100644
22793--- a/arch/x86/kernel/setup_percpu.c
22794+++ b/arch/x86/kernel/setup_percpu.c
22795@@ -21,19 +21,17 @@
22796 #include <asm/cpu.h>
22797 #include <asm/stackprotector.h>
22798
22799-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
22800+#ifdef CONFIG_SMP
22801+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
22802 EXPORT_PER_CPU_SYMBOL(cpu_number);
22803+#endif
22804
22805-#ifdef CONFIG_X86_64
22806 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
22807-#else
22808-#define BOOT_PERCPU_OFFSET 0
22809-#endif
22810
22811 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
22812 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
22813
22814-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
22815+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
22816 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
22817 };
22818 EXPORT_SYMBOL(__per_cpu_offset);
22819@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
22820 {
22821 #ifdef CONFIG_NEED_MULTIPLE_NODES
22822 pg_data_t *last = NULL;
22823- unsigned int cpu;
22824+ int cpu;
22825
22826 for_each_possible_cpu(cpu) {
22827 int node = early_cpu_to_node(cpu);
22828@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
22829 {
22830 #ifdef CONFIG_X86_32
22831 struct desc_struct gdt;
22832+ unsigned long base = per_cpu_offset(cpu);
22833
22834- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
22835- 0x2 | DESCTYPE_S, 0x8);
22836- gdt.s = 1;
22837+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
22838+ 0x83 | DESCTYPE_S, 0xC);
22839 write_gdt_entry(get_cpu_gdt_table(cpu),
22840 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
22841 #endif
22842@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
22843 /* alrighty, percpu areas up and running */
22844 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
22845 for_each_possible_cpu(cpu) {
22846+#ifdef CONFIG_CC_STACKPROTECTOR
22847+#ifdef CONFIG_X86_32
22848+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
22849+#endif
22850+#endif
22851 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
22852 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
22853 per_cpu(cpu_number, cpu) = cpu;
22854@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
22855 */
22856 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
22857 #endif
22858+#ifdef CONFIG_CC_STACKPROTECTOR
22859+#ifdef CONFIG_X86_32
22860+ if (!cpu)
22861+ per_cpu(stack_canary.canary, cpu) = canary;
22862+#endif
22863+#endif
22864 /*
22865 * Up to this point, the boot CPU has been using .init.data
22866 * area. Reload any changed state for the boot CPU.
22867diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
22868index d6bf1f3..3ffce5a 100644
22869--- a/arch/x86/kernel/signal.c
22870+++ b/arch/x86/kernel/signal.c
22871@@ -196,7 +196,7 @@ static unsigned long align_sigframe(unsigned long sp)
22872 * Align the stack pointer according to the i386 ABI,
22873 * i.e. so that on function entry ((sp + 4) & 15) == 0.
22874 */
22875- sp = ((sp + 4) & -16ul) - 4;
22876+ sp = ((sp - 12) & -16ul) - 4;
22877 #else /* !CONFIG_X86_32 */
22878 sp = round_down(sp, 16) - 8;
22879 #endif
22880@@ -304,9 +304,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
22881 }
22882
22883 if (current->mm->context.vdso)
22884- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
22885+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
22886 else
22887- restorer = &frame->retcode;
22888+ restorer = (void __user *)&frame->retcode;
22889 if (ka->sa.sa_flags & SA_RESTORER)
22890 restorer = ka->sa.sa_restorer;
22891
22892@@ -320,7 +320,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
22893 * reasons and because gdb uses it as a signature to notice
22894 * signal handler stack frames.
22895 */
22896- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
22897+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
22898
22899 if (err)
22900 return -EFAULT;
22901@@ -367,7 +367,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
22902 err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
22903
22904 /* Set up to return from userspace. */
22905- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
22906+ if (current->mm->context.vdso)
22907+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
22908+ else
22909+ restorer = (void __user *)&frame->retcode;
22910 if (ka->sa.sa_flags & SA_RESTORER)
22911 restorer = ka->sa.sa_restorer;
22912 put_user_ex(restorer, &frame->pretcode);
22913@@ -379,7 +382,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
22914 * reasons and because gdb uses it as a signature to notice
22915 * signal handler stack frames.
22916 */
22917- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
22918+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
22919 } put_user_catch(err);
22920
22921 err |= copy_siginfo_to_user(&frame->info, info);
22922diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
22923index 48d2b7d..90d328a 100644
22924--- a/arch/x86/kernel/smp.c
22925+++ b/arch/x86/kernel/smp.c
22926@@ -285,7 +285,7 @@ static int __init nonmi_ipi_setup(char *str)
22927
22928 __setup("nonmi_ipi", nonmi_ipi_setup);
22929
22930-struct smp_ops smp_ops = {
22931+struct smp_ops smp_ops __read_only = {
22932 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
22933 .smp_prepare_cpus = native_smp_prepare_cpus,
22934 .smp_cpus_done = native_smp_cpus_done,
22935diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
22936index ed0fe38..87fc692 100644
22937--- a/arch/x86/kernel/smpboot.c
22938+++ b/arch/x86/kernel/smpboot.c
22939@@ -748,6 +748,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
22940 idle->thread.sp = (unsigned long) (((struct pt_regs *)
22941 (THREAD_SIZE + task_stack_page(idle))) - 1);
22942 per_cpu(current_task, cpu) = idle;
22943+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
22944
22945 #ifdef CONFIG_X86_32
22946 /* Stack for startup_32 can be just as for start_secondary onwards */
22947@@ -755,11 +756,13 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
22948 #else
22949 clear_tsk_thread_flag(idle, TIF_FORK);
22950 initial_gs = per_cpu_offset(cpu);
22951- per_cpu(kernel_stack, cpu) =
22952- (unsigned long)task_stack_page(idle) -
22953- KERNEL_STACK_OFFSET + THREAD_SIZE;
22954+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
22955 #endif
22956+
22957+ pax_open_kernel();
22958 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
22959+ pax_close_kernel();
22960+
22961 initial_code = (unsigned long)start_secondary;
22962 stack_start = idle->thread.sp;
22963
22964@@ -908,6 +911,15 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
22965 /* the FPU context is blank, nobody can own it */
22966 __cpu_disable_lazy_restore(cpu);
22967
22968+#ifdef CONFIG_PAX_PER_CPU_PGD
22969+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
22970+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22971+ KERNEL_PGD_PTRS);
22972+#endif
22973+
22974+ /* the FPU context is blank, nobody can own it */
22975+ __cpu_disable_lazy_restore(cpu);
22976+
22977 err = do_boot_cpu(apicid, cpu, tidle);
22978 if (err) {
22979 pr_debug("do_boot_cpu failed %d\n", err);
22980diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
22981index 9b4d51d..5d28b58 100644
22982--- a/arch/x86/kernel/step.c
22983+++ b/arch/x86/kernel/step.c
22984@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
22985 struct desc_struct *desc;
22986 unsigned long base;
22987
22988- seg &= ~7UL;
22989+ seg >>= 3;
22990
22991 mutex_lock(&child->mm->context.lock);
22992- if (unlikely((seg >> 3) >= child->mm->context.size))
22993+ if (unlikely(seg >= child->mm->context.size))
22994 addr = -1L; /* bogus selector, access would fault */
22995 else {
22996 desc = child->mm->context.ldt + seg;
22997@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
22998 addr += base;
22999 }
23000 mutex_unlock(&child->mm->context.lock);
23001- }
23002+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
23003+ addr = ktla_ktva(addr);
23004
23005 return addr;
23006 }
23007@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
23008 unsigned char opcode[15];
23009 unsigned long addr = convert_ip_to_linear(child, regs);
23010
23011+ if (addr == -EINVAL)
23012+ return 0;
23013+
23014 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
23015 for (i = 0; i < copied; i++) {
23016 switch (opcode[i]) {
23017diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
23018new file mode 100644
23019index 0000000..207bec6
23020--- /dev/null
23021+++ b/arch/x86/kernel/sys_i386_32.c
23022@@ -0,0 +1,250 @@
23023+/*
23024+ * This file contains various random system calls that
23025+ * have a non-standard calling sequence on the Linux/i386
23026+ * platform.
23027+ */
23028+
23029+#include <linux/errno.h>
23030+#include <linux/sched.h>
23031+#include <linux/mm.h>
23032+#include <linux/fs.h>
23033+#include <linux/smp.h>
23034+#include <linux/sem.h>
23035+#include <linux/msg.h>
23036+#include <linux/shm.h>
23037+#include <linux/stat.h>
23038+#include <linux/syscalls.h>
23039+#include <linux/mman.h>
23040+#include <linux/file.h>
23041+#include <linux/utsname.h>
23042+#include <linux/ipc.h>
23043+
23044+#include <linux/uaccess.h>
23045+#include <linux/unistd.h>
23046+
23047+#include <asm/syscalls.h>
23048+
23049+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
23050+{
23051+ unsigned long pax_task_size = TASK_SIZE;
23052+
23053+#ifdef CONFIG_PAX_SEGMEXEC
23054+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
23055+ pax_task_size = SEGMEXEC_TASK_SIZE;
23056+#endif
23057+
23058+ if (flags & MAP_FIXED)
23059+ if (len > pax_task_size || addr > pax_task_size - len)
23060+ return -EINVAL;
23061+
23062+ return 0;
23063+}
23064+
23065+unsigned long
23066+arch_get_unmapped_area(struct file *filp, unsigned long addr,
23067+ unsigned long len, unsigned long pgoff, unsigned long flags)
23068+{
23069+ struct mm_struct *mm = current->mm;
23070+ struct vm_area_struct *vma;
23071+ unsigned long start_addr, pax_task_size = TASK_SIZE;
23072+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
23073+
23074+#ifdef CONFIG_PAX_SEGMEXEC
23075+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
23076+ pax_task_size = SEGMEXEC_TASK_SIZE;
23077+#endif
23078+
23079+ pax_task_size -= PAGE_SIZE;
23080+
23081+ if (len > pax_task_size)
23082+ return -ENOMEM;
23083+
23084+ if (flags & MAP_FIXED)
23085+ return addr;
23086+
23087+#ifdef CONFIG_PAX_RANDMMAP
23088+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
23089+#endif
23090+
23091+ if (addr) {
23092+ addr = PAGE_ALIGN(addr);
23093+ if (pax_task_size - len >= addr) {
23094+ vma = find_vma(mm, addr);
23095+ if (check_heap_stack_gap(vma, addr, len, offset))
23096+ return addr;
23097+ }
23098+ }
23099+ if (len > mm->cached_hole_size) {
23100+ start_addr = addr = mm->free_area_cache;
23101+ } else {
23102+ start_addr = addr = mm->mmap_base;
23103+ mm->cached_hole_size = 0;
23104+ }
23105+
23106+#ifdef CONFIG_PAX_PAGEEXEC
23107+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
23108+ start_addr = 0x00110000UL;
23109+
23110+#ifdef CONFIG_PAX_RANDMMAP
23111+ if (mm->pax_flags & MF_PAX_RANDMMAP)
23112+ start_addr += mm->delta_mmap & 0x03FFF000UL;
23113+#endif
23114+
23115+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
23116+ start_addr = addr = mm->mmap_base;
23117+ else
23118+ addr = start_addr;
23119+ }
23120+#endif
23121+
23122+full_search:
23123+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
23124+ /* At this point: (!vma || addr < vma->vm_end). */
23125+ if (pax_task_size - len < addr) {
23126+ /*
23127+ * Start a new search - just in case we missed
23128+ * some holes.
23129+ */
23130+ if (start_addr != mm->mmap_base) {
23131+ start_addr = addr = mm->mmap_base;
23132+ mm->cached_hole_size = 0;
23133+ goto full_search;
23134+ }
23135+ return -ENOMEM;
23136+ }
23137+ if (check_heap_stack_gap(vma, addr, len, offset))
23138+ break;
23139+ if (addr + mm->cached_hole_size < vma->vm_start)
23140+ mm->cached_hole_size = vma->vm_start - addr;
23141+ addr = vma->vm_end;
23142+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
23143+ start_addr = addr = mm->mmap_base;
23144+ mm->cached_hole_size = 0;
23145+ goto full_search;
23146+ }
23147+ }
23148+
23149+ /*
23150+ * Remember the place where we stopped the search:
23151+ */
23152+ mm->free_area_cache = addr + len;
23153+ return addr;
23154+}
23155+
23156+unsigned long
23157+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
23158+ const unsigned long len, const unsigned long pgoff,
23159+ const unsigned long flags)
23160+{
23161+ struct vm_area_struct *vma;
23162+ struct mm_struct *mm = current->mm;
23163+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
23164+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
23165+
23166+#ifdef CONFIG_PAX_SEGMEXEC
23167+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
23168+ pax_task_size = SEGMEXEC_TASK_SIZE;
23169+#endif
23170+
23171+ pax_task_size -= PAGE_SIZE;
23172+
23173+ /* requested length too big for entire address space */
23174+ if (len > pax_task_size)
23175+ return -ENOMEM;
23176+
23177+ if (flags & MAP_FIXED)
23178+ return addr;
23179+
23180+#ifdef CONFIG_PAX_PAGEEXEC
23181+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
23182+ goto bottomup;
23183+#endif
23184+
23185+#ifdef CONFIG_PAX_RANDMMAP
23186+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
23187+#endif
23188+
23189+ /* requesting a specific address */
23190+ if (addr) {
23191+ addr = PAGE_ALIGN(addr);
23192+ if (pax_task_size - len >= addr) {
23193+ vma = find_vma(mm, addr);
23194+ if (check_heap_stack_gap(vma, addr, len, offset))
23195+ return addr;
23196+ }
23197+ }
23198+
23199+ /* check if free_area_cache is useful for us */
23200+ if (len <= mm->cached_hole_size) {
23201+ mm->cached_hole_size = 0;
23202+ mm->free_area_cache = mm->mmap_base;
23203+ }
23204+
23205+ /* either no address requested or can't fit in requested address hole */
23206+ addr = mm->free_area_cache;
23207+
23208+ /* make sure it can fit in the remaining address space */
23209+ if (addr > len) {
23210+ vma = find_vma(mm, addr-len);
23211+ if (check_heap_stack_gap(vma, addr - len, len, offset))
23212+ /* remember the address as a hint for next time */
23213+ return (mm->free_area_cache = addr-len);
23214+ }
23215+
23216+ if (mm->mmap_base < len)
23217+ goto bottomup;
23218+
23219+ addr = mm->mmap_base-len;
23220+
23221+ do {
23222+ /*
23223+ * Lookup failure means no vma is above this address,
23224+ * else if new region fits below vma->vm_start,
23225+ * return with success:
23226+ */
23227+ vma = find_vma(mm, addr);
23228+ if (check_heap_stack_gap(vma, addr, len, offset))
23229+ /* remember the address as a hint for next time */
23230+ return (mm->free_area_cache = addr);
23231+
23232+ /* remember the largest hole we saw so far */
23233+ if (addr + mm->cached_hole_size < vma->vm_start)
23234+ mm->cached_hole_size = vma->vm_start - addr;
23235+
23236+ /* try just below the current vma->vm_start */
23237+ addr = skip_heap_stack_gap(vma, len, offset);
23238+ } while (!IS_ERR_VALUE(addr));
23239+
23240+bottomup:
23241+ /*
23242+ * A failed mmap() very likely causes application failure,
23243+ * so fall back to the bottom-up function here. This scenario
23244+ * can happen with large stack limits and large mmap()
23245+ * allocations.
23246+ */
23247+
23248+#ifdef CONFIG_PAX_SEGMEXEC
23249+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
23250+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
23251+ else
23252+#endif
23253+
23254+ mm->mmap_base = TASK_UNMAPPED_BASE;
23255+
23256+#ifdef CONFIG_PAX_RANDMMAP
23257+ if (mm->pax_flags & MF_PAX_RANDMMAP)
23258+ mm->mmap_base += mm->delta_mmap;
23259+#endif
23260+
23261+ mm->free_area_cache = mm->mmap_base;
23262+ mm->cached_hole_size = ~0UL;
23263+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
23264+ /*
23265+ * Restore the topdown base:
23266+ */
23267+ mm->mmap_base = base;
23268+ mm->free_area_cache = base;
23269+ mm->cached_hole_size = ~0UL;
23270+
23271+ return addr;
23272+}
23273diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
23274index 97ef74b..57a1882 100644
23275--- a/arch/x86/kernel/sys_x86_64.c
23276+++ b/arch/x86/kernel/sys_x86_64.c
23277@@ -81,8 +81,8 @@ out:
23278 return error;
23279 }
23280
23281-static void find_start_end(unsigned long flags, unsigned long *begin,
23282- unsigned long *end)
23283+static void find_start_end(struct mm_struct *mm, unsigned long flags,
23284+ unsigned long *begin, unsigned long *end)
23285 {
23286 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
23287 unsigned long new_begin;
23288@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
23289 *begin = new_begin;
23290 }
23291 } else {
23292- *begin = TASK_UNMAPPED_BASE;
23293+ *begin = mm->mmap_base;
23294 *end = TASK_SIZE;
23295 }
23296 }
23297@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
23298 struct vm_area_struct *vma;
23299 struct vm_unmapped_area_info info;
23300 unsigned long begin, end;
23301+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
23302
23303 if (flags & MAP_FIXED)
23304 return addr;
23305
23306- find_start_end(flags, &begin, &end);
23307+ find_start_end(mm, flags, &begin, &end);
23308
23309 if (len > end)
23310 return -ENOMEM;
23311
23312+#ifdef CONFIG_PAX_RANDMMAP
23313+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
23314+#endif
23315+
23316 if (addr) {
23317 addr = PAGE_ALIGN(addr);
23318 vma = find_vma(mm, addr);
23319- if (end - len >= addr &&
23320- (!vma || addr + len <= vma->vm_start))
23321+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
23322 return addr;
23323 }
23324
23325@@ -161,6 +165,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
23326 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
23327 goto bottomup;
23328
23329+#ifdef CONFIG_PAX_RANDMMAP
23330+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
23331+#endif
23332+
23333 /* requesting a specific address */
23334 if (addr) {
23335 addr = PAGE_ALIGN(addr);
23336diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
23337index f84fe00..f41d9f1 100644
23338--- a/arch/x86/kernel/tboot.c
23339+++ b/arch/x86/kernel/tboot.c
23340@@ -220,7 +220,7 @@ static int tboot_setup_sleep(void)
23341
23342 void tboot_shutdown(u32 shutdown_type)
23343 {
23344- void (*shutdown)(void);
23345+ void (* __noreturn shutdown)(void);
23346
23347 if (!tboot_enabled())
23348 return;
23349@@ -242,7 +242,7 @@ void tboot_shutdown(u32 shutdown_type)
23350
23351 switch_to_tboot_pt();
23352
23353- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
23354+ shutdown = (void *)tboot->shutdown_entry;
23355 shutdown();
23356
23357 /* should not reach here */
23358@@ -300,7 +300,7 @@ static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
23359 return 0;
23360 }
23361
23362-static atomic_t ap_wfs_count;
23363+static atomic_unchecked_t ap_wfs_count;
23364
23365 static int tboot_wait_for_aps(int num_aps)
23366 {
23367@@ -324,16 +324,16 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
23368 {
23369 switch (action) {
23370 case CPU_DYING:
23371- atomic_inc(&ap_wfs_count);
23372+ atomic_inc_unchecked(&ap_wfs_count);
23373 if (num_online_cpus() == 1)
23374- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
23375+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
23376 return NOTIFY_BAD;
23377 break;
23378 }
23379 return NOTIFY_OK;
23380 }
23381
23382-static struct notifier_block tboot_cpu_notifier __cpuinitdata =
23383+static struct notifier_block tboot_cpu_notifier =
23384 {
23385 .notifier_call = tboot_cpu_callback,
23386 };
23387@@ -345,7 +345,7 @@ static __init int tboot_late_init(void)
23388
23389 tboot_create_trampoline();
23390
23391- atomic_set(&ap_wfs_count, 0);
23392+ atomic_set_unchecked(&ap_wfs_count, 0);
23393 register_hotcpu_notifier(&tboot_cpu_notifier);
23394
23395 acpi_os_set_prepare_sleep(&tboot_sleep);
23396diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
23397index 24d3c91..d06b473 100644
23398--- a/arch/x86/kernel/time.c
23399+++ b/arch/x86/kernel/time.c
23400@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
23401 {
23402 unsigned long pc = instruction_pointer(regs);
23403
23404- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
23405+ if (!user_mode(regs) && in_lock_functions(pc)) {
23406 #ifdef CONFIG_FRAME_POINTER
23407- return *(unsigned long *)(regs->bp + sizeof(long));
23408+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
23409 #else
23410 unsigned long *sp =
23411 (unsigned long *)kernel_stack_pointer(regs);
23412@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
23413 * or above a saved flags. Eflags has bits 22-31 zero,
23414 * kernel addresses don't.
23415 */
23416+
23417+#ifdef CONFIG_PAX_KERNEXEC
23418+ return ktla_ktva(sp[0]);
23419+#else
23420 if (sp[0] >> 22)
23421 return sp[0];
23422 if (sp[1] >> 22)
23423 return sp[1];
23424 #endif
23425+
23426+#endif
23427 }
23428 return pc;
23429 }
23430diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
23431index 9d9d2f9..cad418a 100644
23432--- a/arch/x86/kernel/tls.c
23433+++ b/arch/x86/kernel/tls.c
23434@@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
23435 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
23436 return -EINVAL;
23437
23438+#ifdef CONFIG_PAX_SEGMEXEC
23439+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
23440+ return -EINVAL;
23441+#endif
23442+
23443 set_tls_desc(p, idx, &info, 1);
23444
23445 return 0;
23446@@ -204,7 +209,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
23447
23448 if (kbuf)
23449 info = kbuf;
23450- else if (__copy_from_user(infobuf, ubuf, count))
23451+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
23452 return -EFAULT;
23453 else
23454 info = infobuf;
23455diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
23456index ecffca1..95c4d13 100644
23457--- a/arch/x86/kernel/traps.c
23458+++ b/arch/x86/kernel/traps.c
23459@@ -68,12 +68,6 @@
23460 #include <asm/setup.h>
23461
23462 asmlinkage int system_call(void);
23463-
23464-/*
23465- * The IDT has to be page-aligned to simplify the Pentium
23466- * F0 0F bug workaround.
23467- */
23468-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
23469 #endif
23470
23471 DECLARE_BITMAP(used_vectors, NR_VECTORS);
23472@@ -106,11 +100,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
23473 }
23474
23475 static int __kprobes
23476-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
23477+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
23478 struct pt_regs *regs, long error_code)
23479 {
23480 #ifdef CONFIG_X86_32
23481- if (regs->flags & X86_VM_MASK) {
23482+ if (v8086_mode(regs)) {
23483 /*
23484 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
23485 * On nmi (interrupt 2), do_trap should not be called.
23486@@ -123,12 +117,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
23487 return -1;
23488 }
23489 #endif
23490- if (!user_mode(regs)) {
23491+ if (!user_mode_novm(regs)) {
23492 if (!fixup_exception(regs)) {
23493 tsk->thread.error_code = error_code;
23494 tsk->thread.trap_nr = trapnr;
23495+
23496+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23497+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
23498+ str = "PAX: suspicious stack segment fault";
23499+#endif
23500+
23501 die(str, regs, error_code);
23502 }
23503+
23504+#ifdef CONFIG_PAX_REFCOUNT
23505+ if (trapnr == 4)
23506+ pax_report_refcount_overflow(regs);
23507+#endif
23508+
23509 return 0;
23510 }
23511
23512@@ -136,7 +142,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
23513 }
23514
23515 static void __kprobes
23516-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
23517+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
23518 long error_code, siginfo_t *info)
23519 {
23520 struct task_struct *tsk = current;
23521@@ -160,7 +166,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
23522 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
23523 printk_ratelimit()) {
23524 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
23525- tsk->comm, tsk->pid, str,
23526+ tsk->comm, task_pid_nr(tsk), str,
23527 regs->ip, regs->sp, error_code);
23528 print_vma_addr(" in ", regs->ip);
23529 pr_cont("\n");
23530@@ -266,7 +272,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
23531 conditional_sti(regs);
23532
23533 #ifdef CONFIG_X86_32
23534- if (regs->flags & X86_VM_MASK) {
23535+ if (v8086_mode(regs)) {
23536 local_irq_enable();
23537 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
23538 goto exit;
23539@@ -274,18 +280,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
23540 #endif
23541
23542 tsk = current;
23543- if (!user_mode(regs)) {
23544+ if (!user_mode_novm(regs)) {
23545 if (fixup_exception(regs))
23546 goto exit;
23547
23548 tsk->thread.error_code = error_code;
23549 tsk->thread.trap_nr = X86_TRAP_GP;
23550 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
23551- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
23552+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
23553+
23554+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23555+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
23556+ die("PAX: suspicious general protection fault", regs, error_code);
23557+ else
23558+#endif
23559+
23560 die("general protection fault", regs, error_code);
23561+ }
23562 goto exit;
23563 }
23564
23565+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23566+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
23567+ struct mm_struct *mm = tsk->mm;
23568+ unsigned long limit;
23569+
23570+ down_write(&mm->mmap_sem);
23571+ limit = mm->context.user_cs_limit;
23572+ if (limit < TASK_SIZE) {
23573+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
23574+ up_write(&mm->mmap_sem);
23575+ return;
23576+ }
23577+ up_write(&mm->mmap_sem);
23578+ }
23579+#endif
23580+
23581 tsk->thread.error_code = error_code;
23582 tsk->thread.trap_nr = X86_TRAP_GP;
23583
23584@@ -440,7 +470,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
23585 /* It's safe to allow irq's after DR6 has been saved */
23586 preempt_conditional_sti(regs);
23587
23588- if (regs->flags & X86_VM_MASK) {
23589+ if (v8086_mode(regs)) {
23590 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
23591 X86_TRAP_DB);
23592 preempt_conditional_cli(regs);
23593@@ -455,7 +485,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
23594 * We already checked v86 mode above, so we can check for kernel mode
23595 * by just checking the CPL of CS.
23596 */
23597- if ((dr6 & DR_STEP) && !user_mode(regs)) {
23598+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
23599 tsk->thread.debugreg6 &= ~DR_STEP;
23600 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
23601 regs->flags &= ~X86_EFLAGS_TF;
23602@@ -487,7 +517,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
23603 return;
23604 conditional_sti(regs);
23605
23606- if (!user_mode_vm(regs))
23607+ if (!user_mode(regs))
23608 {
23609 if (!fixup_exception(regs)) {
23610 task->thread.error_code = error_code;
23611diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
23612index c71025b..b117501 100644
23613--- a/arch/x86/kernel/uprobes.c
23614+++ b/arch/x86/kernel/uprobes.c
23615@@ -629,7 +629,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
23616 int ret = NOTIFY_DONE;
23617
23618 /* We are only interested in userspace traps */
23619- if (regs && !user_mode_vm(regs))
23620+ if (regs && !user_mode(regs))
23621 return NOTIFY_DONE;
23622
23623 switch (val) {
23624diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
23625index b9242ba..50c5edd 100644
23626--- a/arch/x86/kernel/verify_cpu.S
23627+++ b/arch/x86/kernel/verify_cpu.S
23628@@ -20,6 +20,7 @@
23629 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
23630 * arch/x86/kernel/trampoline_64.S: secondary processor verification
23631 * arch/x86/kernel/head_32.S: processor startup
23632+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
23633 *
23634 * verify_cpu, returns the status of longmode and SSE in register %eax.
23635 * 0: Success 1: Failure
23636diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
23637index 1dfe69c..a3df6f6 100644
23638--- a/arch/x86/kernel/vm86_32.c
23639+++ b/arch/x86/kernel/vm86_32.c
23640@@ -43,6 +43,7 @@
23641 #include <linux/ptrace.h>
23642 #include <linux/audit.h>
23643 #include <linux/stddef.h>
23644+#include <linux/grsecurity.h>
23645
23646 #include <asm/uaccess.h>
23647 #include <asm/io.h>
23648@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
23649 do_exit(SIGSEGV);
23650 }
23651
23652- tss = &per_cpu(init_tss, get_cpu());
23653+ tss = init_tss + get_cpu();
23654 current->thread.sp0 = current->thread.saved_sp0;
23655 current->thread.sysenter_cs = __KERNEL_CS;
23656 load_sp0(tss, &current->thread);
23657@@ -212,6 +213,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
23658 struct task_struct *tsk;
23659 int tmp, ret = -EPERM;
23660
23661+#ifdef CONFIG_GRKERNSEC_VM86
23662+ if (!capable(CAP_SYS_RAWIO)) {
23663+ gr_handle_vm86();
23664+ goto out;
23665+ }
23666+#endif
23667+
23668 tsk = current;
23669 if (tsk->thread.saved_sp0)
23670 goto out;
23671@@ -242,6 +250,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
23672 int tmp, ret;
23673 struct vm86plus_struct __user *v86;
23674
23675+#ifdef CONFIG_GRKERNSEC_VM86
23676+ if (!capable(CAP_SYS_RAWIO)) {
23677+ gr_handle_vm86();
23678+ ret = -EPERM;
23679+ goto out;
23680+ }
23681+#endif
23682+
23683 tsk = current;
23684 switch (cmd) {
23685 case VM86_REQUEST_IRQ:
23686@@ -328,7 +344,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
23687 tsk->thread.saved_fs = info->regs32->fs;
23688 tsk->thread.saved_gs = get_user_gs(info->regs32);
23689
23690- tss = &per_cpu(init_tss, get_cpu());
23691+ tss = init_tss + get_cpu();
23692 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
23693 if (cpu_has_sep)
23694 tsk->thread.sysenter_cs = 0;
23695@@ -535,7 +551,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
23696 goto cannot_handle;
23697 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
23698 goto cannot_handle;
23699- intr_ptr = (unsigned long __user *) (i << 2);
23700+ intr_ptr = (__force unsigned long __user *) (i << 2);
23701 if (get_user(segoffs, intr_ptr))
23702 goto cannot_handle;
23703 if ((segoffs >> 16) == BIOSSEG)
23704diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
23705index 22a1530..8fbaaad 100644
23706--- a/arch/x86/kernel/vmlinux.lds.S
23707+++ b/arch/x86/kernel/vmlinux.lds.S
23708@@ -26,6 +26,13 @@
23709 #include <asm/page_types.h>
23710 #include <asm/cache.h>
23711 #include <asm/boot.h>
23712+#include <asm/segment.h>
23713+
23714+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23715+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
23716+#else
23717+#define __KERNEL_TEXT_OFFSET 0
23718+#endif
23719
23720 #undef i386 /* in case the preprocessor is a 32bit one */
23721
23722@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
23723
23724 PHDRS {
23725 text PT_LOAD FLAGS(5); /* R_E */
23726+#ifdef CONFIG_X86_32
23727+ module PT_LOAD FLAGS(5); /* R_E */
23728+#endif
23729+#ifdef CONFIG_XEN
23730+ rodata PT_LOAD FLAGS(5); /* R_E */
23731+#else
23732+ rodata PT_LOAD FLAGS(4); /* R__ */
23733+#endif
23734 data PT_LOAD FLAGS(6); /* RW_ */
23735-#ifdef CONFIG_X86_64
23736+ init.begin PT_LOAD FLAGS(6); /* RW_ */
23737 #ifdef CONFIG_SMP
23738 percpu PT_LOAD FLAGS(6); /* RW_ */
23739 #endif
23740+ text.init PT_LOAD FLAGS(5); /* R_E */
23741+ text.exit PT_LOAD FLAGS(5); /* R_E */
23742 init PT_LOAD FLAGS(7); /* RWE */
23743-#endif
23744 note PT_NOTE FLAGS(0); /* ___ */
23745 }
23746
23747 SECTIONS
23748 {
23749 #ifdef CONFIG_X86_32
23750- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
23751- phys_startup_32 = startup_32 - LOAD_OFFSET;
23752+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
23753 #else
23754- . = __START_KERNEL;
23755- phys_startup_64 = startup_64 - LOAD_OFFSET;
23756+ . = __START_KERNEL;
23757 #endif
23758
23759 /* Text and read-only data */
23760- .text : AT(ADDR(.text) - LOAD_OFFSET) {
23761- _text = .;
23762+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
23763 /* bootstrapping code */
23764+#ifdef CONFIG_X86_32
23765+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
23766+#else
23767+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
23768+#endif
23769+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
23770+ _text = .;
23771 HEAD_TEXT
23772 #ifdef CONFIG_X86_32
23773 . = ALIGN(PAGE_SIZE);
23774@@ -108,13 +128,48 @@ SECTIONS
23775 IRQENTRY_TEXT
23776 *(.fixup)
23777 *(.gnu.warning)
23778- /* End of text section */
23779- _etext = .;
23780 } :text = 0x9090
23781
23782- NOTES :text :note
23783+ . += __KERNEL_TEXT_OFFSET;
23784
23785- EXCEPTION_TABLE(16) :text = 0x9090
23786+#ifdef CONFIG_X86_32
23787+ . = ALIGN(PAGE_SIZE);
23788+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
23789+
23790+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
23791+ MODULES_EXEC_VADDR = .;
23792+ BYTE(0)
23793+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
23794+ . = ALIGN(HPAGE_SIZE) - 1;
23795+ MODULES_EXEC_END = .;
23796+#endif
23797+
23798+ } :module
23799+#endif
23800+
23801+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
23802+ /* End of text section */
23803+ BYTE(0)
23804+ _etext = . - __KERNEL_TEXT_OFFSET;
23805+ }
23806+
23807+#ifdef CONFIG_X86_32
23808+ . = ALIGN(PAGE_SIZE);
23809+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
23810+ *(.idt)
23811+ . = ALIGN(PAGE_SIZE);
23812+ *(.empty_zero_page)
23813+ *(.initial_pg_fixmap)
23814+ *(.initial_pg_pmd)
23815+ *(.initial_page_table)
23816+ *(.swapper_pg_dir)
23817+ } :rodata
23818+#endif
23819+
23820+ . = ALIGN(PAGE_SIZE);
23821+ NOTES :rodata :note
23822+
23823+ EXCEPTION_TABLE(16) :rodata
23824
23825 #if defined(CONFIG_DEBUG_RODATA)
23826 /* .text should occupy whole number of pages */
23827@@ -126,16 +181,20 @@ SECTIONS
23828
23829 /* Data */
23830 .data : AT(ADDR(.data) - LOAD_OFFSET) {
23831+
23832+#ifdef CONFIG_PAX_KERNEXEC
23833+ . = ALIGN(HPAGE_SIZE);
23834+#else
23835+ . = ALIGN(PAGE_SIZE);
23836+#endif
23837+
23838 /* Start of data section */
23839 _sdata = .;
23840
23841 /* init_task */
23842 INIT_TASK_DATA(THREAD_SIZE)
23843
23844-#ifdef CONFIG_X86_32
23845- /* 32 bit has nosave before _edata */
23846 NOSAVE_DATA
23847-#endif
23848
23849 PAGE_ALIGNED_DATA(PAGE_SIZE)
23850
23851@@ -176,12 +235,19 @@ SECTIONS
23852 #endif /* CONFIG_X86_64 */
23853
23854 /* Init code and data - will be freed after init */
23855- . = ALIGN(PAGE_SIZE);
23856 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
23857+ BYTE(0)
23858+
23859+#ifdef CONFIG_PAX_KERNEXEC
23860+ . = ALIGN(HPAGE_SIZE);
23861+#else
23862+ . = ALIGN(PAGE_SIZE);
23863+#endif
23864+
23865 __init_begin = .; /* paired with __init_end */
23866- }
23867+ } :init.begin
23868
23869-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
23870+#ifdef CONFIG_SMP
23871 /*
23872 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
23873 * output PHDR, so the next output section - .init.text - should
23874@@ -190,12 +256,27 @@ SECTIONS
23875 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
23876 #endif
23877
23878- INIT_TEXT_SECTION(PAGE_SIZE)
23879-#ifdef CONFIG_X86_64
23880- :init
23881-#endif
23882+ . = ALIGN(PAGE_SIZE);
23883+ init_begin = .;
23884+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
23885+ VMLINUX_SYMBOL(_sinittext) = .;
23886+ INIT_TEXT
23887+ VMLINUX_SYMBOL(_einittext) = .;
23888+ . = ALIGN(PAGE_SIZE);
23889+ } :text.init
23890
23891- INIT_DATA_SECTION(16)
23892+ /*
23893+ * .exit.text is discard at runtime, not link time, to deal with
23894+ * references from .altinstructions and .eh_frame
23895+ */
23896+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
23897+ EXIT_TEXT
23898+ . = ALIGN(16);
23899+ } :text.exit
23900+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
23901+
23902+ . = ALIGN(PAGE_SIZE);
23903+ INIT_DATA_SECTION(16) :init
23904
23905 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
23906 __x86_cpu_dev_start = .;
23907@@ -257,19 +338,12 @@ SECTIONS
23908 }
23909
23910 . = ALIGN(8);
23911- /*
23912- * .exit.text is discard at runtime, not link time, to deal with
23913- * references from .altinstructions and .eh_frame
23914- */
23915- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
23916- EXIT_TEXT
23917- }
23918
23919 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
23920 EXIT_DATA
23921 }
23922
23923-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
23924+#ifndef CONFIG_SMP
23925 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
23926 #endif
23927
23928@@ -288,16 +362,10 @@ SECTIONS
23929 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
23930 __smp_locks = .;
23931 *(.smp_locks)
23932- . = ALIGN(PAGE_SIZE);
23933 __smp_locks_end = .;
23934+ . = ALIGN(PAGE_SIZE);
23935 }
23936
23937-#ifdef CONFIG_X86_64
23938- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
23939- NOSAVE_DATA
23940- }
23941-#endif
23942-
23943 /* BSS */
23944 . = ALIGN(PAGE_SIZE);
23945 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
23946@@ -313,6 +381,7 @@ SECTIONS
23947 __brk_base = .;
23948 . += 64 * 1024; /* 64k alignment slop space */
23949 *(.brk_reservation) /* areas brk users have reserved */
23950+ . = ALIGN(HPAGE_SIZE);
23951 __brk_limit = .;
23952 }
23953
23954@@ -339,13 +408,12 @@ SECTIONS
23955 * for the boot processor.
23956 */
23957 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
23958-INIT_PER_CPU(gdt_page);
23959 INIT_PER_CPU(irq_stack_union);
23960
23961 /*
23962 * Build-time check on the image size:
23963 */
23964-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
23965+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
23966 "kernel image bigger than KERNEL_IMAGE_SIZE");
23967
23968 #ifdef CONFIG_SMP
23969diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
23970index 9a907a6..f83f921 100644
23971--- a/arch/x86/kernel/vsyscall_64.c
23972+++ b/arch/x86/kernel/vsyscall_64.c
23973@@ -56,15 +56,13 @@
23974 DEFINE_VVAR(int, vgetcpu_mode);
23975 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
23976
23977-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
23978+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
23979
23980 static int __init vsyscall_setup(char *str)
23981 {
23982 if (str) {
23983 if (!strcmp("emulate", str))
23984 vsyscall_mode = EMULATE;
23985- else if (!strcmp("native", str))
23986- vsyscall_mode = NATIVE;
23987 else if (!strcmp("none", str))
23988 vsyscall_mode = NONE;
23989 else
23990@@ -323,8 +321,7 @@ do_ret:
23991 return true;
23992
23993 sigsegv:
23994- force_sig(SIGSEGV, current);
23995- return true;
23996+ do_group_exit(SIGKILL);
23997 }
23998
23999 /*
24000@@ -377,10 +374,7 @@ void __init map_vsyscall(void)
24001 extern char __vvar_page;
24002 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
24003
24004- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
24005- vsyscall_mode == NATIVE
24006- ? PAGE_KERNEL_VSYSCALL
24007- : PAGE_KERNEL_VVAR);
24008+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
24009 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
24010 (unsigned long)VSYSCALL_START);
24011
24012diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
24013index 1330dd1..d220b99 100644
24014--- a/arch/x86/kernel/x8664_ksyms_64.c
24015+++ b/arch/x86/kernel/x8664_ksyms_64.c
24016@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
24017 EXPORT_SYMBOL(copy_user_generic_unrolled);
24018 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
24019 EXPORT_SYMBOL(__copy_user_nocache);
24020-EXPORT_SYMBOL(_copy_from_user);
24021-EXPORT_SYMBOL(_copy_to_user);
24022
24023 EXPORT_SYMBOL(copy_page);
24024 EXPORT_SYMBOL(clear_page);
24025diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
24026index 7a3d075..6cb373d 100644
24027--- a/arch/x86/kernel/x86_init.c
24028+++ b/arch/x86/kernel/x86_init.c
24029@@ -88,7 +88,7 @@ struct x86_init_ops x86_init __initdata = {
24030 },
24031 };
24032
24033-struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
24034+struct x86_cpuinit_ops x86_cpuinit __cpuinitconst = {
24035 .early_percpu_clock_init = x86_init_noop,
24036 .setup_percpu_clockev = setup_secondary_APIC_clock,
24037 };
24038@@ -96,7 +96,7 @@ struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
24039 static void default_nmi_init(void) { };
24040 static int default_i8042_detect(void) { return 1; };
24041
24042-struct x86_platform_ops x86_platform = {
24043+struct x86_platform_ops x86_platform __read_only = {
24044 .calibrate_tsc = native_calibrate_tsc,
24045 .get_wallclock = mach_get_cmos_time,
24046 .set_wallclock = mach_set_rtc_mmss,
24047@@ -110,14 +110,14 @@ struct x86_platform_ops x86_platform = {
24048 };
24049
24050 EXPORT_SYMBOL_GPL(x86_platform);
24051-struct x86_msi_ops x86_msi = {
24052+struct x86_msi_ops x86_msi __read_only = {
24053 .setup_msi_irqs = native_setup_msi_irqs,
24054 .teardown_msi_irq = native_teardown_msi_irq,
24055 .teardown_msi_irqs = default_teardown_msi_irqs,
24056 .restore_msi_irqs = default_restore_msi_irqs,
24057 };
24058
24059-struct x86_io_apic_ops x86_io_apic_ops = {
24060+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
24061 .init = native_io_apic_init_mappings,
24062 .read = native_io_apic_read,
24063 .write = native_io_apic_write,
24064diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
24065index ada87a3..afea76d 100644
24066--- a/arch/x86/kernel/xsave.c
24067+++ b/arch/x86/kernel/xsave.c
24068@@ -199,6 +199,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
24069 {
24070 int err;
24071
24072+ buf = (struct xsave_struct __user *)____m(buf);
24073 if (use_xsave())
24074 err = xsave_user(buf);
24075 else if (use_fxsr())
24076@@ -311,6 +312,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
24077 */
24078 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
24079 {
24080+ buf = (void __user *)____m(buf);
24081 if (use_xsave()) {
24082 if ((unsigned long)buf % 64 || fx_only) {
24083 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
24084diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
24085index a20ecb5..d0e2194 100644
24086--- a/arch/x86/kvm/cpuid.c
24087+++ b/arch/x86/kvm/cpuid.c
24088@@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
24089 struct kvm_cpuid2 *cpuid,
24090 struct kvm_cpuid_entry2 __user *entries)
24091 {
24092- int r;
24093+ int r, i;
24094
24095 r = -E2BIG;
24096 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
24097 goto out;
24098 r = -EFAULT;
24099- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
24100- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
24101+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
24102 goto out;
24103+ for (i = 0; i < cpuid->nent; ++i) {
24104+ struct kvm_cpuid_entry2 cpuid_entry;
24105+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
24106+ goto out;
24107+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
24108+ }
24109 vcpu->arch.cpuid_nent = cpuid->nent;
24110 kvm_apic_set_version(vcpu);
24111 kvm_x86_ops->cpuid_update(vcpu);
24112@@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
24113 struct kvm_cpuid2 *cpuid,
24114 struct kvm_cpuid_entry2 __user *entries)
24115 {
24116- int r;
24117+ int r, i;
24118
24119 r = -E2BIG;
24120 if (cpuid->nent < vcpu->arch.cpuid_nent)
24121 goto out;
24122 r = -EFAULT;
24123- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
24124- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
24125+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
24126 goto out;
24127+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
24128+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
24129+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
24130+ goto out;
24131+ }
24132 return 0;
24133
24134 out:
24135diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
24136index a27e763..54bfe43 100644
24137--- a/arch/x86/kvm/emulate.c
24138+++ b/arch/x86/kvm/emulate.c
24139@@ -292,6 +292,7 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
24140
24141 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
24142 do { \
24143+ unsigned long _tmp; \
24144 __asm__ __volatile__ ( \
24145 _PRE_EFLAGS("0", "4", "2") \
24146 _op _suffix " %"_x"3,%1; " \
24147@@ -306,8 +307,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
24148 /* Raw emulation: instruction has two explicit operands. */
24149 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
24150 do { \
24151- unsigned long _tmp; \
24152- \
24153 switch ((ctxt)->dst.bytes) { \
24154 case 2: \
24155 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
24156@@ -323,7 +322,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
24157
24158 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
24159 do { \
24160- unsigned long _tmp; \
24161 switch ((ctxt)->dst.bytes) { \
24162 case 1: \
24163 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
24164diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
24165index 9392f52..0e56d77 100644
24166--- a/arch/x86/kvm/lapic.c
24167+++ b/arch/x86/kvm/lapic.c
24168@@ -55,7 +55,7 @@
24169 #define APIC_BUS_CYCLE_NS 1
24170
24171 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
24172-#define apic_debug(fmt, arg...)
24173+#define apic_debug(fmt, arg...) do {} while (0)
24174
24175 #define APIC_LVT_NUM 6
24176 /* 14 is the version for Xeon and Pentium 8.4.8*/
24177diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
24178index 891eb6d..e027900 100644
24179--- a/arch/x86/kvm/paging_tmpl.h
24180+++ b/arch/x86/kvm/paging_tmpl.h
24181@@ -208,7 +208,7 @@ retry_walk:
24182 if (unlikely(kvm_is_error_hva(host_addr)))
24183 goto error;
24184
24185- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
24186+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
24187 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
24188 goto error;
24189 walker->ptep_user[walker->level - 1] = ptep_user;
24190diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
24191index d29d3cd..ec9d522 100644
24192--- a/arch/x86/kvm/svm.c
24193+++ b/arch/x86/kvm/svm.c
24194@@ -3507,7 +3507,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
24195 int cpu = raw_smp_processor_id();
24196
24197 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
24198+
24199+ pax_open_kernel();
24200 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
24201+ pax_close_kernel();
24202+
24203 load_TR_desc();
24204 }
24205
24206@@ -3881,6 +3885,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
24207 #endif
24208 #endif
24209
24210+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
24211+ __set_fs(current_thread_info()->addr_limit);
24212+#endif
24213+
24214 reload_tss(vcpu);
24215
24216 local_irq_disable();
24217diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
24218index 9120ae1..238abc0 100644
24219--- a/arch/x86/kvm/vmx.c
24220+++ b/arch/x86/kvm/vmx.c
24221@@ -1370,7 +1370,11 @@ static void reload_tss(void)
24222 struct desc_struct *descs;
24223
24224 descs = (void *)gdt->address;
24225+
24226+ pax_open_kernel();
24227 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
24228+ pax_close_kernel();
24229+
24230 load_TR_desc();
24231 }
24232
24233@@ -1594,6 +1598,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
24234 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
24235 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
24236
24237+#ifdef CONFIG_PAX_PER_CPU_PGD
24238+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
24239+#endif
24240+
24241 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
24242 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
24243 vmx->loaded_vmcs->cpu = cpu;
24244@@ -2738,8 +2746,11 @@ static __init int hardware_setup(void)
24245 if (!cpu_has_vmx_flexpriority())
24246 flexpriority_enabled = 0;
24247
24248- if (!cpu_has_vmx_tpr_shadow())
24249- kvm_x86_ops->update_cr8_intercept = NULL;
24250+ if (!cpu_has_vmx_tpr_shadow()) {
24251+ pax_open_kernel();
24252+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
24253+ pax_close_kernel();
24254+ }
24255
24256 if (enable_ept && !cpu_has_vmx_ept_2m_page())
24257 kvm_disable_largepages();
24258@@ -3782,7 +3793,10 @@ static void vmx_set_constant_host_state(void)
24259
24260 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
24261 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
24262+
24263+#ifndef CONFIG_PAX_PER_CPU_PGD
24264 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
24265+#endif
24266
24267 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
24268 #ifdef CONFIG_X86_64
24269@@ -3803,7 +3817,7 @@ static void vmx_set_constant_host_state(void)
24270 native_store_idt(&dt);
24271 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
24272
24273- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
24274+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
24275
24276 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
24277 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
24278@@ -6355,6 +6369,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
24279 "jmp 2f \n\t"
24280 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
24281 "2: "
24282+
24283+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
24284+ "ljmp %[cs],$3f\n\t"
24285+ "3: "
24286+#endif
24287+
24288 /* Save guest registers, load host registers, keep flags */
24289 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
24290 "pop %0 \n\t"
24291@@ -6407,6 +6427,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
24292 #endif
24293 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
24294 [wordsize]"i"(sizeof(ulong))
24295+
24296+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
24297+ ,[cs]"i"(__KERNEL_CS)
24298+#endif
24299+
24300 : "cc", "memory"
24301 #ifdef CONFIG_X86_64
24302 , "rax", "rbx", "rdi", "rsi"
24303@@ -6420,7 +6445,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
24304 if (debugctlmsr)
24305 update_debugctlmsr(debugctlmsr);
24306
24307-#ifndef CONFIG_X86_64
24308+#ifdef CONFIG_X86_32
24309 /*
24310 * The sysexit path does not restore ds/es, so we must set them to
24311 * a reasonable value ourselves.
24312@@ -6429,8 +6454,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
24313 * may be executed in interrupt context, which saves and restore segments
24314 * around it, nullifying its effect.
24315 */
24316- loadsegment(ds, __USER_DS);
24317- loadsegment(es, __USER_DS);
24318+ loadsegment(ds, __KERNEL_DS);
24319+ loadsegment(es, __KERNEL_DS);
24320+ loadsegment(ss, __KERNEL_DS);
24321+
24322+#ifdef CONFIG_PAX_KERNEXEC
24323+ loadsegment(fs, __KERNEL_PERCPU);
24324+#endif
24325+
24326+#ifdef CONFIG_PAX_MEMORY_UDEREF
24327+ __set_fs(current_thread_info()->addr_limit);
24328+#endif
24329+
24330 #endif
24331
24332 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
24333diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
24334index c243b81..b692af3 100644
24335--- a/arch/x86/kvm/x86.c
24336+++ b/arch/x86/kvm/x86.c
24337@@ -1408,10 +1408,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
24338 unsigned long flags, this_tsc_khz;
24339 struct kvm_vcpu_arch *vcpu = &v->arch;
24340 struct kvm_arch *ka = &v->kvm->arch;
24341- void *shared_kaddr;
24342 s64 kernel_ns, max_kernel_ns;
24343 u64 tsc_timestamp, host_tsc;
24344- struct pvclock_vcpu_time_info *guest_hv_clock;
24345+ struct pvclock_vcpu_time_info guest_hv_clock;
24346 u8 pvclock_flags;
24347 bool use_master_clock;
24348
24349@@ -1465,7 +1464,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
24350
24351 local_irq_restore(flags);
24352
24353- if (!vcpu->time_page)
24354+ if (!vcpu->pv_time_enabled)
24355 return 0;
24356
24357 /*
24358@@ -1527,12 +1526,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
24359 */
24360 vcpu->hv_clock.version += 2;
24361
24362- shared_kaddr = kmap_atomic(vcpu->time_page);
24363-
24364- guest_hv_clock = shared_kaddr + vcpu->time_offset;
24365+ if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
24366+ &guest_hv_clock, sizeof(guest_hv_clock))))
24367+ return 0;
24368
24369 /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
24370- pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
24371+ pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
24372
24373 if (vcpu->pvclock_set_guest_stopped_request) {
24374 pvclock_flags |= PVCLOCK_GUEST_STOPPED;
24375@@ -1545,12 +1544,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
24376
24377 vcpu->hv_clock.flags = pvclock_flags;
24378
24379- memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
24380- sizeof(vcpu->hv_clock));
24381-
24382- kunmap_atomic(shared_kaddr);
24383-
24384- mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
24385+ kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
24386+ &vcpu->hv_clock,
24387+ sizeof(vcpu->hv_clock));
24388 return 0;
24389 }
24390
24391@@ -1692,8 +1688,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
24392 {
24393 struct kvm *kvm = vcpu->kvm;
24394 int lm = is_long_mode(vcpu);
24395- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
24396- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
24397+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
24398+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
24399 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
24400 : kvm->arch.xen_hvm_config.blob_size_32;
24401 u32 page_num = data & ~PAGE_MASK;
24402@@ -1839,10 +1835,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
24403
24404 static void kvmclock_reset(struct kvm_vcpu *vcpu)
24405 {
24406- if (vcpu->arch.time_page) {
24407- kvm_release_page_dirty(vcpu->arch.time_page);
24408- vcpu->arch.time_page = NULL;
24409- }
24410+ vcpu->arch.pv_time_enabled = false;
24411 }
24412
24413 static void accumulate_steal_time(struct kvm_vcpu *vcpu)
24414@@ -1948,6 +1941,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
24415 break;
24416 case MSR_KVM_SYSTEM_TIME_NEW:
24417 case MSR_KVM_SYSTEM_TIME: {
24418+ u64 gpa_offset;
24419 kvmclock_reset(vcpu);
24420
24421 vcpu->arch.time = data;
24422@@ -1957,14 +1951,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
24423 if (!(data & 1))
24424 break;
24425
24426- /* ...but clean it before doing the actual write */
24427- vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
24428+ gpa_offset = data & ~(PAGE_MASK | 1);
24429
24430- vcpu->arch.time_page =
24431- gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
24432+ /* Check that the address is 32-byte aligned. */
24433+ if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
24434+ break;
24435
24436- if (is_error_page(vcpu->arch.time_page))
24437- vcpu->arch.time_page = NULL;
24438+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
24439+ &vcpu->arch.pv_time, data & ~1ULL))
24440+ vcpu->arch.pv_time_enabled = false;
24441+ else
24442+ vcpu->arch.pv_time_enabled = true;
24443
24444 break;
24445 }
24446@@ -2571,6 +2568,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
24447 if (n < msr_list.nmsrs)
24448 goto out;
24449 r = -EFAULT;
24450+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
24451+ goto out;
24452 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
24453 num_msrs_to_save * sizeof(u32)))
24454 goto out;
24455@@ -2700,7 +2699,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
24456 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
24457 struct kvm_interrupt *irq)
24458 {
24459- if (irq->irq < 0 || irq->irq >= KVM_NR_INTERRUPTS)
24460+ if (irq->irq >= KVM_NR_INTERRUPTS)
24461 return -EINVAL;
24462 if (irqchip_in_kernel(vcpu->kvm))
24463 return -ENXIO;
24464@@ -2967,7 +2966,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
24465 */
24466 static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
24467 {
24468- if (!vcpu->arch.time_page)
24469+ if (!vcpu->arch.pv_time_enabled)
24470 return -EINVAL;
24471 vcpu->arch.pvclock_set_guest_stopped_request = true;
24472 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
24473@@ -5213,7 +5212,7 @@ static struct notifier_block pvclock_gtod_notifier = {
24474 };
24475 #endif
24476
24477-int kvm_arch_init(void *opaque)
24478+int kvm_arch_init(const void *opaque)
24479 {
24480 int r;
24481 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
24482@@ -6661,6 +6660,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
24483 goto fail_free_wbinvd_dirty_mask;
24484
24485 vcpu->arch.ia32_tsc_adjust_msr = 0x0;
24486+ vcpu->arch.pv_time_enabled = false;
24487 kvm_async_pf_hash_reset(vcpu);
24488 kvm_pmu_init(vcpu);
24489
24490diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
24491index df4176c..23ce092 100644
24492--- a/arch/x86/lguest/boot.c
24493+++ b/arch/x86/lguest/boot.c
24494@@ -1200,9 +1200,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
24495 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
24496 * Launcher to reboot us.
24497 */
24498-static void lguest_restart(char *reason)
24499+static __noreturn void lguest_restart(char *reason)
24500 {
24501 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
24502+ BUG();
24503 }
24504
24505 /*G:050
24506diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
24507index 00933d5..3a64af9 100644
24508--- a/arch/x86/lib/atomic64_386_32.S
24509+++ b/arch/x86/lib/atomic64_386_32.S
24510@@ -48,6 +48,10 @@ BEGIN(read)
24511 movl (v), %eax
24512 movl 4(v), %edx
24513 RET_ENDP
24514+BEGIN(read_unchecked)
24515+ movl (v), %eax
24516+ movl 4(v), %edx
24517+RET_ENDP
24518 #undef v
24519
24520 #define v %esi
24521@@ -55,6 +59,10 @@ BEGIN(set)
24522 movl %ebx, (v)
24523 movl %ecx, 4(v)
24524 RET_ENDP
24525+BEGIN(set_unchecked)
24526+ movl %ebx, (v)
24527+ movl %ecx, 4(v)
24528+RET_ENDP
24529 #undef v
24530
24531 #define v %esi
24532@@ -70,6 +78,20 @@ RET_ENDP
24533 BEGIN(add)
24534 addl %eax, (v)
24535 adcl %edx, 4(v)
24536+
24537+#ifdef CONFIG_PAX_REFCOUNT
24538+ jno 0f
24539+ subl %eax, (v)
24540+ sbbl %edx, 4(v)
24541+ int $4
24542+0:
24543+ _ASM_EXTABLE(0b, 0b)
24544+#endif
24545+
24546+RET_ENDP
24547+BEGIN(add_unchecked)
24548+ addl %eax, (v)
24549+ adcl %edx, 4(v)
24550 RET_ENDP
24551 #undef v
24552
24553@@ -77,6 +99,24 @@ RET_ENDP
24554 BEGIN(add_return)
24555 addl (v), %eax
24556 adcl 4(v), %edx
24557+
24558+#ifdef CONFIG_PAX_REFCOUNT
24559+ into
24560+1234:
24561+ _ASM_EXTABLE(1234b, 2f)
24562+#endif
24563+
24564+ movl %eax, (v)
24565+ movl %edx, 4(v)
24566+
24567+#ifdef CONFIG_PAX_REFCOUNT
24568+2:
24569+#endif
24570+
24571+RET_ENDP
24572+BEGIN(add_return_unchecked)
24573+ addl (v), %eax
24574+ adcl 4(v), %edx
24575 movl %eax, (v)
24576 movl %edx, 4(v)
24577 RET_ENDP
24578@@ -86,6 +126,20 @@ RET_ENDP
24579 BEGIN(sub)
24580 subl %eax, (v)
24581 sbbl %edx, 4(v)
24582+
24583+#ifdef CONFIG_PAX_REFCOUNT
24584+ jno 0f
24585+ addl %eax, (v)
24586+ adcl %edx, 4(v)
24587+ int $4
24588+0:
24589+ _ASM_EXTABLE(0b, 0b)
24590+#endif
24591+
24592+RET_ENDP
24593+BEGIN(sub_unchecked)
24594+ subl %eax, (v)
24595+ sbbl %edx, 4(v)
24596 RET_ENDP
24597 #undef v
24598
24599@@ -96,6 +150,27 @@ BEGIN(sub_return)
24600 sbbl $0, %edx
24601 addl (v), %eax
24602 adcl 4(v), %edx
24603+
24604+#ifdef CONFIG_PAX_REFCOUNT
24605+ into
24606+1234:
24607+ _ASM_EXTABLE(1234b, 2f)
24608+#endif
24609+
24610+ movl %eax, (v)
24611+ movl %edx, 4(v)
24612+
24613+#ifdef CONFIG_PAX_REFCOUNT
24614+2:
24615+#endif
24616+
24617+RET_ENDP
24618+BEGIN(sub_return_unchecked)
24619+ negl %edx
24620+ negl %eax
24621+ sbbl $0, %edx
24622+ addl (v), %eax
24623+ adcl 4(v), %edx
24624 movl %eax, (v)
24625 movl %edx, 4(v)
24626 RET_ENDP
24627@@ -105,6 +180,20 @@ RET_ENDP
24628 BEGIN(inc)
24629 addl $1, (v)
24630 adcl $0, 4(v)
24631+
24632+#ifdef CONFIG_PAX_REFCOUNT
24633+ jno 0f
24634+ subl $1, (v)
24635+ sbbl $0, 4(v)
24636+ int $4
24637+0:
24638+ _ASM_EXTABLE(0b, 0b)
24639+#endif
24640+
24641+RET_ENDP
24642+BEGIN(inc_unchecked)
24643+ addl $1, (v)
24644+ adcl $0, 4(v)
24645 RET_ENDP
24646 #undef v
24647
24648@@ -114,6 +203,26 @@ BEGIN(inc_return)
24649 movl 4(v), %edx
24650 addl $1, %eax
24651 adcl $0, %edx
24652+
24653+#ifdef CONFIG_PAX_REFCOUNT
24654+ into
24655+1234:
24656+ _ASM_EXTABLE(1234b, 2f)
24657+#endif
24658+
24659+ movl %eax, (v)
24660+ movl %edx, 4(v)
24661+
24662+#ifdef CONFIG_PAX_REFCOUNT
24663+2:
24664+#endif
24665+
24666+RET_ENDP
24667+BEGIN(inc_return_unchecked)
24668+ movl (v), %eax
24669+ movl 4(v), %edx
24670+ addl $1, %eax
24671+ adcl $0, %edx
24672 movl %eax, (v)
24673 movl %edx, 4(v)
24674 RET_ENDP
24675@@ -123,6 +232,20 @@ RET_ENDP
24676 BEGIN(dec)
24677 subl $1, (v)
24678 sbbl $0, 4(v)
24679+
24680+#ifdef CONFIG_PAX_REFCOUNT
24681+ jno 0f
24682+ addl $1, (v)
24683+ adcl $0, 4(v)
24684+ int $4
24685+0:
24686+ _ASM_EXTABLE(0b, 0b)
24687+#endif
24688+
24689+RET_ENDP
24690+BEGIN(dec_unchecked)
24691+ subl $1, (v)
24692+ sbbl $0, 4(v)
24693 RET_ENDP
24694 #undef v
24695
24696@@ -132,6 +255,26 @@ BEGIN(dec_return)
24697 movl 4(v), %edx
24698 subl $1, %eax
24699 sbbl $0, %edx
24700+
24701+#ifdef CONFIG_PAX_REFCOUNT
24702+ into
24703+1234:
24704+ _ASM_EXTABLE(1234b, 2f)
24705+#endif
24706+
24707+ movl %eax, (v)
24708+ movl %edx, 4(v)
24709+
24710+#ifdef CONFIG_PAX_REFCOUNT
24711+2:
24712+#endif
24713+
24714+RET_ENDP
24715+BEGIN(dec_return_unchecked)
24716+ movl (v), %eax
24717+ movl 4(v), %edx
24718+ subl $1, %eax
24719+ sbbl $0, %edx
24720 movl %eax, (v)
24721 movl %edx, 4(v)
24722 RET_ENDP
24723@@ -143,6 +286,13 @@ BEGIN(add_unless)
24724 adcl %edx, %edi
24725 addl (v), %eax
24726 adcl 4(v), %edx
24727+
24728+#ifdef CONFIG_PAX_REFCOUNT
24729+ into
24730+1234:
24731+ _ASM_EXTABLE(1234b, 2f)
24732+#endif
24733+
24734 cmpl %eax, %ecx
24735 je 3f
24736 1:
24737@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
24738 1:
24739 addl $1, %eax
24740 adcl $0, %edx
24741+
24742+#ifdef CONFIG_PAX_REFCOUNT
24743+ into
24744+1234:
24745+ _ASM_EXTABLE(1234b, 2f)
24746+#endif
24747+
24748 movl %eax, (v)
24749 movl %edx, 4(v)
24750 movl $1, %eax
24751@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
24752 movl 4(v), %edx
24753 subl $1, %eax
24754 sbbl $0, %edx
24755+
24756+#ifdef CONFIG_PAX_REFCOUNT
24757+ into
24758+1234:
24759+ _ASM_EXTABLE(1234b, 1f)
24760+#endif
24761+
24762 js 1f
24763 movl %eax, (v)
24764 movl %edx, 4(v)
24765diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
24766index f5cc9eb..51fa319 100644
24767--- a/arch/x86/lib/atomic64_cx8_32.S
24768+++ b/arch/x86/lib/atomic64_cx8_32.S
24769@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
24770 CFI_STARTPROC
24771
24772 read64 %ecx
24773+ pax_force_retaddr
24774 ret
24775 CFI_ENDPROC
24776 ENDPROC(atomic64_read_cx8)
24777
24778+ENTRY(atomic64_read_unchecked_cx8)
24779+ CFI_STARTPROC
24780+
24781+ read64 %ecx
24782+ pax_force_retaddr
24783+ ret
24784+ CFI_ENDPROC
24785+ENDPROC(atomic64_read_unchecked_cx8)
24786+
24787 ENTRY(atomic64_set_cx8)
24788 CFI_STARTPROC
24789
24790@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
24791 cmpxchg8b (%esi)
24792 jne 1b
24793
24794+ pax_force_retaddr
24795 ret
24796 CFI_ENDPROC
24797 ENDPROC(atomic64_set_cx8)
24798
24799+ENTRY(atomic64_set_unchecked_cx8)
24800+ CFI_STARTPROC
24801+
24802+1:
24803+/* we don't need LOCK_PREFIX since aligned 64-bit writes
24804+ * are atomic on 586 and newer */
24805+ cmpxchg8b (%esi)
24806+ jne 1b
24807+
24808+ pax_force_retaddr
24809+ ret
24810+ CFI_ENDPROC
24811+ENDPROC(atomic64_set_unchecked_cx8)
24812+
24813 ENTRY(atomic64_xchg_cx8)
24814 CFI_STARTPROC
24815
24816@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
24817 cmpxchg8b (%esi)
24818 jne 1b
24819
24820+ pax_force_retaddr
24821 ret
24822 CFI_ENDPROC
24823 ENDPROC(atomic64_xchg_cx8)
24824
24825-.macro addsub_return func ins insc
24826-ENTRY(atomic64_\func\()_return_cx8)
24827+.macro addsub_return func ins insc unchecked=""
24828+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
24829 CFI_STARTPROC
24830 SAVE ebp
24831 SAVE ebx
24832@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
24833 movl %edx, %ecx
24834 \ins\()l %esi, %ebx
24835 \insc\()l %edi, %ecx
24836+
24837+.ifb \unchecked
24838+#ifdef CONFIG_PAX_REFCOUNT
24839+ into
24840+2:
24841+ _ASM_EXTABLE(2b, 3f)
24842+#endif
24843+.endif
24844+
24845 LOCK_PREFIX
24846 cmpxchg8b (%ebp)
24847 jne 1b
24848-
24849-10:
24850 movl %ebx, %eax
24851 movl %ecx, %edx
24852+
24853+.ifb \unchecked
24854+#ifdef CONFIG_PAX_REFCOUNT
24855+3:
24856+#endif
24857+.endif
24858+
24859 RESTORE edi
24860 RESTORE esi
24861 RESTORE ebx
24862 RESTORE ebp
24863+ pax_force_retaddr
24864 ret
24865 CFI_ENDPROC
24866-ENDPROC(atomic64_\func\()_return_cx8)
24867+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
24868 .endm
24869
24870 addsub_return add add adc
24871 addsub_return sub sub sbb
24872+addsub_return add add adc _unchecked
24873+addsub_return sub sub sbb _unchecked
24874
24875-.macro incdec_return func ins insc
24876-ENTRY(atomic64_\func\()_return_cx8)
24877+.macro incdec_return func ins insc unchecked=""
24878+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
24879 CFI_STARTPROC
24880 SAVE ebx
24881
24882@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
24883 movl %edx, %ecx
24884 \ins\()l $1, %ebx
24885 \insc\()l $0, %ecx
24886+
24887+.ifb \unchecked
24888+#ifdef CONFIG_PAX_REFCOUNT
24889+ into
24890+2:
24891+ _ASM_EXTABLE(2b, 3f)
24892+#endif
24893+.endif
24894+
24895 LOCK_PREFIX
24896 cmpxchg8b (%esi)
24897 jne 1b
24898
24899-10:
24900 movl %ebx, %eax
24901 movl %ecx, %edx
24902+
24903+.ifb \unchecked
24904+#ifdef CONFIG_PAX_REFCOUNT
24905+3:
24906+#endif
24907+.endif
24908+
24909 RESTORE ebx
24910+ pax_force_retaddr
24911 ret
24912 CFI_ENDPROC
24913-ENDPROC(atomic64_\func\()_return_cx8)
24914+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
24915 .endm
24916
24917 incdec_return inc add adc
24918 incdec_return dec sub sbb
24919+incdec_return inc add adc _unchecked
24920+incdec_return dec sub sbb _unchecked
24921
24922 ENTRY(atomic64_dec_if_positive_cx8)
24923 CFI_STARTPROC
24924@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
24925 movl %edx, %ecx
24926 subl $1, %ebx
24927 sbb $0, %ecx
24928+
24929+#ifdef CONFIG_PAX_REFCOUNT
24930+ into
24931+1234:
24932+ _ASM_EXTABLE(1234b, 2f)
24933+#endif
24934+
24935 js 2f
24936 LOCK_PREFIX
24937 cmpxchg8b (%esi)
24938@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
24939 movl %ebx, %eax
24940 movl %ecx, %edx
24941 RESTORE ebx
24942+ pax_force_retaddr
24943 ret
24944 CFI_ENDPROC
24945 ENDPROC(atomic64_dec_if_positive_cx8)
24946@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
24947 movl %edx, %ecx
24948 addl %ebp, %ebx
24949 adcl %edi, %ecx
24950+
24951+#ifdef CONFIG_PAX_REFCOUNT
24952+ into
24953+1234:
24954+ _ASM_EXTABLE(1234b, 3f)
24955+#endif
24956+
24957 LOCK_PREFIX
24958 cmpxchg8b (%esi)
24959 jne 1b
24960@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
24961 CFI_ADJUST_CFA_OFFSET -8
24962 RESTORE ebx
24963 RESTORE ebp
24964+ pax_force_retaddr
24965 ret
24966 4:
24967 cmpl %edx, 4(%esp)
24968@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
24969 xorl %ecx, %ecx
24970 addl $1, %ebx
24971 adcl %edx, %ecx
24972+
24973+#ifdef CONFIG_PAX_REFCOUNT
24974+ into
24975+1234:
24976+ _ASM_EXTABLE(1234b, 3f)
24977+#endif
24978+
24979 LOCK_PREFIX
24980 cmpxchg8b (%esi)
24981 jne 1b
24982@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
24983 movl $1, %eax
24984 3:
24985 RESTORE ebx
24986+ pax_force_retaddr
24987 ret
24988 CFI_ENDPROC
24989 ENDPROC(atomic64_inc_not_zero_cx8)
24990diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
24991index 2af5df3..62b1a5a 100644
24992--- a/arch/x86/lib/checksum_32.S
24993+++ b/arch/x86/lib/checksum_32.S
24994@@ -29,7 +29,8 @@
24995 #include <asm/dwarf2.h>
24996 #include <asm/errno.h>
24997 #include <asm/asm.h>
24998-
24999+#include <asm/segment.h>
25000+
25001 /*
25002 * computes a partial checksum, e.g. for TCP/UDP fragments
25003 */
25004@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
25005
25006 #define ARGBASE 16
25007 #define FP 12
25008-
25009-ENTRY(csum_partial_copy_generic)
25010+
25011+ENTRY(csum_partial_copy_generic_to_user)
25012 CFI_STARTPROC
25013+
25014+#ifdef CONFIG_PAX_MEMORY_UDEREF
25015+ pushl_cfi %gs
25016+ popl_cfi %es
25017+ jmp csum_partial_copy_generic
25018+#endif
25019+
25020+ENTRY(csum_partial_copy_generic_from_user)
25021+
25022+#ifdef CONFIG_PAX_MEMORY_UDEREF
25023+ pushl_cfi %gs
25024+ popl_cfi %ds
25025+#endif
25026+
25027+ENTRY(csum_partial_copy_generic)
25028 subl $4,%esp
25029 CFI_ADJUST_CFA_OFFSET 4
25030 pushl_cfi %edi
25031@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
25032 jmp 4f
25033 SRC(1: movw (%esi), %bx )
25034 addl $2, %esi
25035-DST( movw %bx, (%edi) )
25036+DST( movw %bx, %es:(%edi) )
25037 addl $2, %edi
25038 addw %bx, %ax
25039 adcl $0, %eax
25040@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
25041 SRC(1: movl (%esi), %ebx )
25042 SRC( movl 4(%esi), %edx )
25043 adcl %ebx, %eax
25044-DST( movl %ebx, (%edi) )
25045+DST( movl %ebx, %es:(%edi) )
25046 adcl %edx, %eax
25047-DST( movl %edx, 4(%edi) )
25048+DST( movl %edx, %es:4(%edi) )
25049
25050 SRC( movl 8(%esi), %ebx )
25051 SRC( movl 12(%esi), %edx )
25052 adcl %ebx, %eax
25053-DST( movl %ebx, 8(%edi) )
25054+DST( movl %ebx, %es:8(%edi) )
25055 adcl %edx, %eax
25056-DST( movl %edx, 12(%edi) )
25057+DST( movl %edx, %es:12(%edi) )
25058
25059 SRC( movl 16(%esi), %ebx )
25060 SRC( movl 20(%esi), %edx )
25061 adcl %ebx, %eax
25062-DST( movl %ebx, 16(%edi) )
25063+DST( movl %ebx, %es:16(%edi) )
25064 adcl %edx, %eax
25065-DST( movl %edx, 20(%edi) )
25066+DST( movl %edx, %es:20(%edi) )
25067
25068 SRC( movl 24(%esi), %ebx )
25069 SRC( movl 28(%esi), %edx )
25070 adcl %ebx, %eax
25071-DST( movl %ebx, 24(%edi) )
25072+DST( movl %ebx, %es:24(%edi) )
25073 adcl %edx, %eax
25074-DST( movl %edx, 28(%edi) )
25075+DST( movl %edx, %es:28(%edi) )
25076
25077 lea 32(%esi), %esi
25078 lea 32(%edi), %edi
25079@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
25080 shrl $2, %edx # This clears CF
25081 SRC(3: movl (%esi), %ebx )
25082 adcl %ebx, %eax
25083-DST( movl %ebx, (%edi) )
25084+DST( movl %ebx, %es:(%edi) )
25085 lea 4(%esi), %esi
25086 lea 4(%edi), %edi
25087 dec %edx
25088@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
25089 jb 5f
25090 SRC( movw (%esi), %cx )
25091 leal 2(%esi), %esi
25092-DST( movw %cx, (%edi) )
25093+DST( movw %cx, %es:(%edi) )
25094 leal 2(%edi), %edi
25095 je 6f
25096 shll $16,%ecx
25097 SRC(5: movb (%esi), %cl )
25098-DST( movb %cl, (%edi) )
25099+DST( movb %cl, %es:(%edi) )
25100 6: addl %ecx, %eax
25101 adcl $0, %eax
25102 7:
25103@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
25104
25105 6001:
25106 movl ARGBASE+20(%esp), %ebx # src_err_ptr
25107- movl $-EFAULT, (%ebx)
25108+ movl $-EFAULT, %ss:(%ebx)
25109
25110 # zero the complete destination - computing the rest
25111 # is too much work
25112@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
25113
25114 6002:
25115 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
25116- movl $-EFAULT,(%ebx)
25117+ movl $-EFAULT,%ss:(%ebx)
25118 jmp 5000b
25119
25120 .previous
25121
25122+ pushl_cfi %ss
25123+ popl_cfi %ds
25124+ pushl_cfi %ss
25125+ popl_cfi %es
25126 popl_cfi %ebx
25127 CFI_RESTORE ebx
25128 popl_cfi %esi
25129@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
25130 popl_cfi %ecx # equivalent to addl $4,%esp
25131 ret
25132 CFI_ENDPROC
25133-ENDPROC(csum_partial_copy_generic)
25134+ENDPROC(csum_partial_copy_generic_to_user)
25135
25136 #else
25137
25138 /* Version for PentiumII/PPro */
25139
25140 #define ROUND1(x) \
25141+ nop; nop; nop; \
25142 SRC(movl x(%esi), %ebx ) ; \
25143 addl %ebx, %eax ; \
25144- DST(movl %ebx, x(%edi) ) ;
25145+ DST(movl %ebx, %es:x(%edi)) ;
25146
25147 #define ROUND(x) \
25148+ nop; nop; nop; \
25149 SRC(movl x(%esi), %ebx ) ; \
25150 adcl %ebx, %eax ; \
25151- DST(movl %ebx, x(%edi) ) ;
25152+ DST(movl %ebx, %es:x(%edi)) ;
25153
25154 #define ARGBASE 12
25155-
25156-ENTRY(csum_partial_copy_generic)
25157+
25158+ENTRY(csum_partial_copy_generic_to_user)
25159 CFI_STARTPROC
25160+
25161+#ifdef CONFIG_PAX_MEMORY_UDEREF
25162+ pushl_cfi %gs
25163+ popl_cfi %es
25164+ jmp csum_partial_copy_generic
25165+#endif
25166+
25167+ENTRY(csum_partial_copy_generic_from_user)
25168+
25169+#ifdef CONFIG_PAX_MEMORY_UDEREF
25170+ pushl_cfi %gs
25171+ popl_cfi %ds
25172+#endif
25173+
25174+ENTRY(csum_partial_copy_generic)
25175 pushl_cfi %ebx
25176 CFI_REL_OFFSET ebx, 0
25177 pushl_cfi %edi
25178@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
25179 subl %ebx, %edi
25180 lea -1(%esi),%edx
25181 andl $-32,%edx
25182- lea 3f(%ebx,%ebx), %ebx
25183+ lea 3f(%ebx,%ebx,2), %ebx
25184 testl %esi, %esi
25185 jmp *%ebx
25186 1: addl $64,%esi
25187@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
25188 jb 5f
25189 SRC( movw (%esi), %dx )
25190 leal 2(%esi), %esi
25191-DST( movw %dx, (%edi) )
25192+DST( movw %dx, %es:(%edi) )
25193 leal 2(%edi), %edi
25194 je 6f
25195 shll $16,%edx
25196 5:
25197 SRC( movb (%esi), %dl )
25198-DST( movb %dl, (%edi) )
25199+DST( movb %dl, %es:(%edi) )
25200 6: addl %edx, %eax
25201 adcl $0, %eax
25202 7:
25203 .section .fixup, "ax"
25204 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
25205- movl $-EFAULT, (%ebx)
25206+ movl $-EFAULT, %ss:(%ebx)
25207 # zero the complete destination (computing the rest is too much work)
25208 movl ARGBASE+8(%esp),%edi # dst
25209 movl ARGBASE+12(%esp),%ecx # len
25210@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
25211 rep; stosb
25212 jmp 7b
25213 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
25214- movl $-EFAULT, (%ebx)
25215+ movl $-EFAULT, %ss:(%ebx)
25216 jmp 7b
25217 .previous
25218
25219+#ifdef CONFIG_PAX_MEMORY_UDEREF
25220+ pushl_cfi %ss
25221+ popl_cfi %ds
25222+ pushl_cfi %ss
25223+ popl_cfi %es
25224+#endif
25225+
25226 popl_cfi %esi
25227 CFI_RESTORE esi
25228 popl_cfi %edi
25229@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
25230 CFI_RESTORE ebx
25231 ret
25232 CFI_ENDPROC
25233-ENDPROC(csum_partial_copy_generic)
25234+ENDPROC(csum_partial_copy_generic_to_user)
25235
25236 #undef ROUND
25237 #undef ROUND1
25238diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
25239index f2145cf..cea889d 100644
25240--- a/arch/x86/lib/clear_page_64.S
25241+++ b/arch/x86/lib/clear_page_64.S
25242@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
25243 movl $4096/8,%ecx
25244 xorl %eax,%eax
25245 rep stosq
25246+ pax_force_retaddr
25247 ret
25248 CFI_ENDPROC
25249 ENDPROC(clear_page_c)
25250@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
25251 movl $4096,%ecx
25252 xorl %eax,%eax
25253 rep stosb
25254+ pax_force_retaddr
25255 ret
25256 CFI_ENDPROC
25257 ENDPROC(clear_page_c_e)
25258@@ -43,6 +45,7 @@ ENTRY(clear_page)
25259 leaq 64(%rdi),%rdi
25260 jnz .Lloop
25261 nop
25262+ pax_force_retaddr
25263 ret
25264 CFI_ENDPROC
25265 .Lclear_page_end:
25266@@ -58,7 +61,7 @@ ENDPROC(clear_page)
25267
25268 #include <asm/cpufeature.h>
25269
25270- .section .altinstr_replacement,"ax"
25271+ .section .altinstr_replacement,"a"
25272 1: .byte 0xeb /* jmp <disp8> */
25273 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
25274 2: .byte 0xeb /* jmp <disp8> */
25275diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
25276index 1e572c5..2a162cd 100644
25277--- a/arch/x86/lib/cmpxchg16b_emu.S
25278+++ b/arch/x86/lib/cmpxchg16b_emu.S
25279@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
25280
25281 popf
25282 mov $1, %al
25283+ pax_force_retaddr
25284 ret
25285
25286 not_same:
25287 popf
25288 xor %al,%al
25289+ pax_force_retaddr
25290 ret
25291
25292 CFI_ENDPROC
25293diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
25294index 176cca6..1166c50 100644
25295--- a/arch/x86/lib/copy_page_64.S
25296+++ b/arch/x86/lib/copy_page_64.S
25297@@ -9,6 +9,7 @@ copy_page_rep:
25298 CFI_STARTPROC
25299 movl $4096/8, %ecx
25300 rep movsq
25301+ pax_force_retaddr
25302 ret
25303 CFI_ENDPROC
25304 ENDPROC(copy_page_rep)
25305@@ -20,12 +21,14 @@ ENDPROC(copy_page_rep)
25306
25307 ENTRY(copy_page)
25308 CFI_STARTPROC
25309- subq $2*8, %rsp
25310- CFI_ADJUST_CFA_OFFSET 2*8
25311+ subq $3*8, %rsp
25312+ CFI_ADJUST_CFA_OFFSET 3*8
25313 movq %rbx, (%rsp)
25314 CFI_REL_OFFSET rbx, 0
25315 movq %r12, 1*8(%rsp)
25316 CFI_REL_OFFSET r12, 1*8
25317+ movq %r13, 2*8(%rsp)
25318+ CFI_REL_OFFSET r13, 2*8
25319
25320 movl $(4096/64)-5, %ecx
25321 .p2align 4
25322@@ -36,7 +39,7 @@ ENTRY(copy_page)
25323 movq 0x8*2(%rsi), %rdx
25324 movq 0x8*3(%rsi), %r8
25325 movq 0x8*4(%rsi), %r9
25326- movq 0x8*5(%rsi), %r10
25327+ movq 0x8*5(%rsi), %r13
25328 movq 0x8*6(%rsi), %r11
25329 movq 0x8*7(%rsi), %r12
25330
25331@@ -47,7 +50,7 @@ ENTRY(copy_page)
25332 movq %rdx, 0x8*2(%rdi)
25333 movq %r8, 0x8*3(%rdi)
25334 movq %r9, 0x8*4(%rdi)
25335- movq %r10, 0x8*5(%rdi)
25336+ movq %r13, 0x8*5(%rdi)
25337 movq %r11, 0x8*6(%rdi)
25338 movq %r12, 0x8*7(%rdi)
25339
25340@@ -66,7 +69,7 @@ ENTRY(copy_page)
25341 movq 0x8*2(%rsi), %rdx
25342 movq 0x8*3(%rsi), %r8
25343 movq 0x8*4(%rsi), %r9
25344- movq 0x8*5(%rsi), %r10
25345+ movq 0x8*5(%rsi), %r13
25346 movq 0x8*6(%rsi), %r11
25347 movq 0x8*7(%rsi), %r12
25348
25349@@ -75,7 +78,7 @@ ENTRY(copy_page)
25350 movq %rdx, 0x8*2(%rdi)
25351 movq %r8, 0x8*3(%rdi)
25352 movq %r9, 0x8*4(%rdi)
25353- movq %r10, 0x8*5(%rdi)
25354+ movq %r13, 0x8*5(%rdi)
25355 movq %r11, 0x8*6(%rdi)
25356 movq %r12, 0x8*7(%rdi)
25357
25358@@ -87,8 +90,11 @@ ENTRY(copy_page)
25359 CFI_RESTORE rbx
25360 movq 1*8(%rsp), %r12
25361 CFI_RESTORE r12
25362- addq $2*8, %rsp
25363- CFI_ADJUST_CFA_OFFSET -2*8
25364+ movq 2*8(%rsp), %r13
25365+ CFI_RESTORE r13
25366+ addq $3*8, %rsp
25367+ CFI_ADJUST_CFA_OFFSET -3*8
25368+ pax_force_retaddr
25369 ret
25370 .Lcopy_page_end:
25371 CFI_ENDPROC
25372@@ -99,7 +105,7 @@ ENDPROC(copy_page)
25373
25374 #include <asm/cpufeature.h>
25375
25376- .section .altinstr_replacement,"ax"
25377+ .section .altinstr_replacement,"a"
25378 1: .byte 0xeb /* jmp <disp8> */
25379 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
25380 2:
25381diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
25382index a30ca15..d25fab6 100644
25383--- a/arch/x86/lib/copy_user_64.S
25384+++ b/arch/x86/lib/copy_user_64.S
25385@@ -18,6 +18,7 @@
25386 #include <asm/alternative-asm.h>
25387 #include <asm/asm.h>
25388 #include <asm/smap.h>
25389+#include <asm/pgtable.h>
25390
25391 /*
25392 * By placing feature2 after feature1 in altinstructions section, we logically
25393@@ -31,7 +32,7 @@
25394 .byte 0xe9 /* 32bit jump */
25395 .long \orig-1f /* by default jump to orig */
25396 1:
25397- .section .altinstr_replacement,"ax"
25398+ .section .altinstr_replacement,"a"
25399 2: .byte 0xe9 /* near jump with 32bit immediate */
25400 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
25401 3: .byte 0xe9 /* near jump with 32bit immediate */
25402@@ -70,47 +71,20 @@
25403 #endif
25404 .endm
25405
25406-/* Standard copy_to_user with segment limit checking */
25407-ENTRY(_copy_to_user)
25408- CFI_STARTPROC
25409- GET_THREAD_INFO(%rax)
25410- movq %rdi,%rcx
25411- addq %rdx,%rcx
25412- jc bad_to_user
25413- cmpq TI_addr_limit(%rax),%rcx
25414- ja bad_to_user
25415- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
25416- copy_user_generic_unrolled,copy_user_generic_string, \
25417- copy_user_enhanced_fast_string
25418- CFI_ENDPROC
25419-ENDPROC(_copy_to_user)
25420-
25421-/* Standard copy_from_user with segment limit checking */
25422-ENTRY(_copy_from_user)
25423- CFI_STARTPROC
25424- GET_THREAD_INFO(%rax)
25425- movq %rsi,%rcx
25426- addq %rdx,%rcx
25427- jc bad_from_user
25428- cmpq TI_addr_limit(%rax),%rcx
25429- ja bad_from_user
25430- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
25431- copy_user_generic_unrolled,copy_user_generic_string, \
25432- copy_user_enhanced_fast_string
25433- CFI_ENDPROC
25434-ENDPROC(_copy_from_user)
25435-
25436 .section .fixup,"ax"
25437 /* must zero dest */
25438 ENTRY(bad_from_user)
25439 bad_from_user:
25440 CFI_STARTPROC
25441+ testl %edx,%edx
25442+ js bad_to_user
25443 movl %edx,%ecx
25444 xorl %eax,%eax
25445 rep
25446 stosb
25447 bad_to_user:
25448 movl %edx,%eax
25449+ pax_force_retaddr
25450 ret
25451 CFI_ENDPROC
25452 ENDPROC(bad_from_user)
25453@@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
25454 jz 17f
25455 1: movq (%rsi),%r8
25456 2: movq 1*8(%rsi),%r9
25457-3: movq 2*8(%rsi),%r10
25458+3: movq 2*8(%rsi),%rax
25459 4: movq 3*8(%rsi),%r11
25460 5: movq %r8,(%rdi)
25461 6: movq %r9,1*8(%rdi)
25462-7: movq %r10,2*8(%rdi)
25463+7: movq %rax,2*8(%rdi)
25464 8: movq %r11,3*8(%rdi)
25465 9: movq 4*8(%rsi),%r8
25466 10: movq 5*8(%rsi),%r9
25467-11: movq 6*8(%rsi),%r10
25468+11: movq 6*8(%rsi),%rax
25469 12: movq 7*8(%rsi),%r11
25470 13: movq %r8,4*8(%rdi)
25471 14: movq %r9,5*8(%rdi)
25472-15: movq %r10,6*8(%rdi)
25473+15: movq %rax,6*8(%rdi)
25474 16: movq %r11,7*8(%rdi)
25475 leaq 64(%rsi),%rsi
25476 leaq 64(%rdi),%rdi
25477@@ -180,6 +154,7 @@ ENTRY(copy_user_generic_unrolled)
25478 jnz 21b
25479 23: xor %eax,%eax
25480 ASM_CLAC
25481+ pax_force_retaddr
25482 ret
25483
25484 .section .fixup,"ax"
25485@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
25486 movsb
25487 4: xorl %eax,%eax
25488 ASM_CLAC
25489+ pax_force_retaddr
25490 ret
25491
25492 .section .fixup,"ax"
25493@@ -286,6 +262,7 @@ ENTRY(copy_user_enhanced_fast_string)
25494 movsb
25495 2: xorl %eax,%eax
25496 ASM_CLAC
25497+ pax_force_retaddr
25498 ret
25499
25500 .section .fixup,"ax"
25501diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
25502index 6a4f43c..f5f9e26 100644
25503--- a/arch/x86/lib/copy_user_nocache_64.S
25504+++ b/arch/x86/lib/copy_user_nocache_64.S
25505@@ -8,6 +8,7 @@
25506
25507 #include <linux/linkage.h>
25508 #include <asm/dwarf2.h>
25509+#include <asm/alternative-asm.h>
25510
25511 #define FIX_ALIGNMENT 1
25512
25513@@ -16,6 +17,7 @@
25514 #include <asm/thread_info.h>
25515 #include <asm/asm.h>
25516 #include <asm/smap.h>
25517+#include <asm/pgtable.h>
25518
25519 .macro ALIGN_DESTINATION
25520 #ifdef FIX_ALIGNMENT
25521@@ -49,6 +51,15 @@
25522 */
25523 ENTRY(__copy_user_nocache)
25524 CFI_STARTPROC
25525+
25526+#ifdef CONFIG_PAX_MEMORY_UDEREF
25527+ mov $PAX_USER_SHADOW_BASE,%rcx
25528+ cmp %rcx,%rsi
25529+ jae 1f
25530+ add %rcx,%rsi
25531+1:
25532+#endif
25533+
25534 ASM_STAC
25535 cmpl $8,%edx
25536 jb 20f /* less then 8 bytes, go to byte copy loop */
25537@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
25538 jz 17f
25539 1: movq (%rsi),%r8
25540 2: movq 1*8(%rsi),%r9
25541-3: movq 2*8(%rsi),%r10
25542+3: movq 2*8(%rsi),%rax
25543 4: movq 3*8(%rsi),%r11
25544 5: movnti %r8,(%rdi)
25545 6: movnti %r9,1*8(%rdi)
25546-7: movnti %r10,2*8(%rdi)
25547+7: movnti %rax,2*8(%rdi)
25548 8: movnti %r11,3*8(%rdi)
25549 9: movq 4*8(%rsi),%r8
25550 10: movq 5*8(%rsi),%r9
25551-11: movq 6*8(%rsi),%r10
25552+11: movq 6*8(%rsi),%rax
25553 12: movq 7*8(%rsi),%r11
25554 13: movnti %r8,4*8(%rdi)
25555 14: movnti %r9,5*8(%rdi)
25556-15: movnti %r10,6*8(%rdi)
25557+15: movnti %rax,6*8(%rdi)
25558 16: movnti %r11,7*8(%rdi)
25559 leaq 64(%rsi),%rsi
25560 leaq 64(%rdi),%rdi
25561@@ -99,6 +110,7 @@ ENTRY(__copy_user_nocache)
25562 23: xorl %eax,%eax
25563 ASM_CLAC
25564 sfence
25565+ pax_force_retaddr
25566 ret
25567
25568 .section .fixup,"ax"
25569diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
25570index 2419d5f..953ee51 100644
25571--- a/arch/x86/lib/csum-copy_64.S
25572+++ b/arch/x86/lib/csum-copy_64.S
25573@@ -9,6 +9,7 @@
25574 #include <asm/dwarf2.h>
25575 #include <asm/errno.h>
25576 #include <asm/asm.h>
25577+#include <asm/alternative-asm.h>
25578
25579 /*
25580 * Checksum copy with exception handling.
25581@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
25582 CFI_RESTORE rbp
25583 addq $7*8, %rsp
25584 CFI_ADJUST_CFA_OFFSET -7*8
25585+ pax_force_retaddr 0, 1
25586 ret
25587 CFI_RESTORE_STATE
25588
25589diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
25590index 25b7ae8..169fafc 100644
25591--- a/arch/x86/lib/csum-wrappers_64.c
25592+++ b/arch/x86/lib/csum-wrappers_64.c
25593@@ -52,7 +52,7 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
25594 len -= 2;
25595 }
25596 }
25597- isum = csum_partial_copy_generic((__force const void *)src,
25598+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
25599 dst, len, isum, errp, NULL);
25600 if (unlikely(*errp))
25601 goto out_err;
25602@@ -105,7 +105,7 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
25603 }
25604
25605 *errp = 0;
25606- return csum_partial_copy_generic(src, (void __force *)dst,
25607+ return csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
25608 len, isum, NULL, errp);
25609 }
25610 EXPORT_SYMBOL(csum_partial_copy_to_user);
25611diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
25612index 156b9c8..b144132 100644
25613--- a/arch/x86/lib/getuser.S
25614+++ b/arch/x86/lib/getuser.S
25615@@ -34,17 +34,40 @@
25616 #include <asm/thread_info.h>
25617 #include <asm/asm.h>
25618 #include <asm/smap.h>
25619+#include <asm/segment.h>
25620+#include <asm/pgtable.h>
25621+#include <asm/alternative-asm.h>
25622+
25623+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
25624+#define __copyuser_seg gs;
25625+#else
25626+#define __copyuser_seg
25627+#endif
25628
25629 .text
25630 ENTRY(__get_user_1)
25631 CFI_STARTPROC
25632+
25633+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
25634 GET_THREAD_INFO(%_ASM_DX)
25635 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
25636 jae bad_get_user
25637 ASM_STAC
25638-1: movzb (%_ASM_AX),%edx
25639+
25640+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25641+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
25642+ cmp %_ASM_DX,%_ASM_AX
25643+ jae 1234f
25644+ add %_ASM_DX,%_ASM_AX
25645+1234:
25646+#endif
25647+
25648+#endif
25649+
25650+1: __copyuser_seg movzb (%_ASM_AX),%edx
25651 xor %eax,%eax
25652 ASM_CLAC
25653+ pax_force_retaddr
25654 ret
25655 CFI_ENDPROC
25656 ENDPROC(__get_user_1)
25657@@ -52,14 +75,28 @@ ENDPROC(__get_user_1)
25658 ENTRY(__get_user_2)
25659 CFI_STARTPROC
25660 add $1,%_ASM_AX
25661+
25662+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
25663 jc bad_get_user
25664 GET_THREAD_INFO(%_ASM_DX)
25665 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
25666 jae bad_get_user
25667 ASM_STAC
25668-2: movzwl -1(%_ASM_AX),%edx
25669+
25670+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25671+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
25672+ cmp %_ASM_DX,%_ASM_AX
25673+ jae 1234f
25674+ add %_ASM_DX,%_ASM_AX
25675+1234:
25676+#endif
25677+
25678+#endif
25679+
25680+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
25681 xor %eax,%eax
25682 ASM_CLAC
25683+ pax_force_retaddr
25684 ret
25685 CFI_ENDPROC
25686 ENDPROC(__get_user_2)
25687@@ -67,14 +104,28 @@ ENDPROC(__get_user_2)
25688 ENTRY(__get_user_4)
25689 CFI_STARTPROC
25690 add $3,%_ASM_AX
25691+
25692+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
25693 jc bad_get_user
25694 GET_THREAD_INFO(%_ASM_DX)
25695 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
25696 jae bad_get_user
25697 ASM_STAC
25698-3: mov -3(%_ASM_AX),%edx
25699+
25700+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25701+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
25702+ cmp %_ASM_DX,%_ASM_AX
25703+ jae 1234f
25704+ add %_ASM_DX,%_ASM_AX
25705+1234:
25706+#endif
25707+
25708+#endif
25709+
25710+3: __copyuser_seg mov -3(%_ASM_AX),%edx
25711 xor %eax,%eax
25712 ASM_CLAC
25713+ pax_force_retaddr
25714 ret
25715 CFI_ENDPROC
25716 ENDPROC(__get_user_4)
25717@@ -87,10 +138,20 @@ ENTRY(__get_user_8)
25718 GET_THREAD_INFO(%_ASM_DX)
25719 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
25720 jae bad_get_user
25721+
25722+#ifdef CONFIG_PAX_MEMORY_UDEREF
25723+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
25724+ cmp %_ASM_DX,%_ASM_AX
25725+ jae 1234f
25726+ add %_ASM_DX,%_ASM_AX
25727+1234:
25728+#endif
25729+
25730 ASM_STAC
25731 4: movq -7(%_ASM_AX),%_ASM_DX
25732 xor %eax,%eax
25733 ASM_CLAC
25734+ pax_force_retaddr
25735 ret
25736 CFI_ENDPROC
25737 ENDPROC(__get_user_8)
25738@@ -101,6 +162,7 @@ bad_get_user:
25739 xor %edx,%edx
25740 mov $(-EFAULT),%_ASM_AX
25741 ASM_CLAC
25742+ pax_force_retaddr
25743 ret
25744 CFI_ENDPROC
25745 END(bad_get_user)
25746diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
25747index 54fcffe..7be149e 100644
25748--- a/arch/x86/lib/insn.c
25749+++ b/arch/x86/lib/insn.c
25750@@ -20,8 +20,10 @@
25751
25752 #ifdef __KERNEL__
25753 #include <linux/string.h>
25754+#include <asm/pgtable_types.h>
25755 #else
25756 #include <string.h>
25757+#define ktla_ktva(addr) addr
25758 #endif
25759 #include <asm/inat.h>
25760 #include <asm/insn.h>
25761@@ -53,8 +55,8 @@
25762 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
25763 {
25764 memset(insn, 0, sizeof(*insn));
25765- insn->kaddr = kaddr;
25766- insn->next_byte = kaddr;
25767+ insn->kaddr = ktla_ktva(kaddr);
25768+ insn->next_byte = ktla_ktva(kaddr);
25769 insn->x86_64 = x86_64 ? 1 : 0;
25770 insn->opnd_bytes = 4;
25771 if (x86_64)
25772diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
25773index 05a95e7..326f2fa 100644
25774--- a/arch/x86/lib/iomap_copy_64.S
25775+++ b/arch/x86/lib/iomap_copy_64.S
25776@@ -17,6 +17,7 @@
25777
25778 #include <linux/linkage.h>
25779 #include <asm/dwarf2.h>
25780+#include <asm/alternative-asm.h>
25781
25782 /*
25783 * override generic version in lib/iomap_copy.c
25784@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
25785 CFI_STARTPROC
25786 movl %edx,%ecx
25787 rep movsd
25788+ pax_force_retaddr
25789 ret
25790 CFI_ENDPROC
25791 ENDPROC(__iowrite32_copy)
25792diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
25793index 1c273be..da9cc0e 100644
25794--- a/arch/x86/lib/memcpy_64.S
25795+++ b/arch/x86/lib/memcpy_64.S
25796@@ -33,6 +33,7 @@
25797 rep movsq
25798 movl %edx, %ecx
25799 rep movsb
25800+ pax_force_retaddr
25801 ret
25802 .Lmemcpy_e:
25803 .previous
25804@@ -49,6 +50,7 @@
25805 movq %rdi, %rax
25806 movq %rdx, %rcx
25807 rep movsb
25808+ pax_force_retaddr
25809 ret
25810 .Lmemcpy_e_e:
25811 .previous
25812@@ -76,13 +78,13 @@ ENTRY(memcpy)
25813 */
25814 movq 0*8(%rsi), %r8
25815 movq 1*8(%rsi), %r9
25816- movq 2*8(%rsi), %r10
25817+ movq 2*8(%rsi), %rcx
25818 movq 3*8(%rsi), %r11
25819 leaq 4*8(%rsi), %rsi
25820
25821 movq %r8, 0*8(%rdi)
25822 movq %r9, 1*8(%rdi)
25823- movq %r10, 2*8(%rdi)
25824+ movq %rcx, 2*8(%rdi)
25825 movq %r11, 3*8(%rdi)
25826 leaq 4*8(%rdi), %rdi
25827 jae .Lcopy_forward_loop
25828@@ -105,12 +107,12 @@ ENTRY(memcpy)
25829 subq $0x20, %rdx
25830 movq -1*8(%rsi), %r8
25831 movq -2*8(%rsi), %r9
25832- movq -3*8(%rsi), %r10
25833+ movq -3*8(%rsi), %rcx
25834 movq -4*8(%rsi), %r11
25835 leaq -4*8(%rsi), %rsi
25836 movq %r8, -1*8(%rdi)
25837 movq %r9, -2*8(%rdi)
25838- movq %r10, -3*8(%rdi)
25839+ movq %rcx, -3*8(%rdi)
25840 movq %r11, -4*8(%rdi)
25841 leaq -4*8(%rdi), %rdi
25842 jae .Lcopy_backward_loop
25843@@ -130,12 +132,13 @@ ENTRY(memcpy)
25844 */
25845 movq 0*8(%rsi), %r8
25846 movq 1*8(%rsi), %r9
25847- movq -2*8(%rsi, %rdx), %r10
25848+ movq -2*8(%rsi, %rdx), %rcx
25849 movq -1*8(%rsi, %rdx), %r11
25850 movq %r8, 0*8(%rdi)
25851 movq %r9, 1*8(%rdi)
25852- movq %r10, -2*8(%rdi, %rdx)
25853+ movq %rcx, -2*8(%rdi, %rdx)
25854 movq %r11, -1*8(%rdi, %rdx)
25855+ pax_force_retaddr
25856 retq
25857 .p2align 4
25858 .Lless_16bytes:
25859@@ -148,6 +151,7 @@ ENTRY(memcpy)
25860 movq -1*8(%rsi, %rdx), %r9
25861 movq %r8, 0*8(%rdi)
25862 movq %r9, -1*8(%rdi, %rdx)
25863+ pax_force_retaddr
25864 retq
25865 .p2align 4
25866 .Lless_8bytes:
25867@@ -161,6 +165,7 @@ ENTRY(memcpy)
25868 movl -4(%rsi, %rdx), %r8d
25869 movl %ecx, (%rdi)
25870 movl %r8d, -4(%rdi, %rdx)
25871+ pax_force_retaddr
25872 retq
25873 .p2align 4
25874 .Lless_3bytes:
25875@@ -179,6 +184,7 @@ ENTRY(memcpy)
25876 movb %cl, (%rdi)
25877
25878 .Lend:
25879+ pax_force_retaddr
25880 retq
25881 CFI_ENDPROC
25882 ENDPROC(memcpy)
25883diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
25884index ee16461..c39c199 100644
25885--- a/arch/x86/lib/memmove_64.S
25886+++ b/arch/x86/lib/memmove_64.S
25887@@ -61,13 +61,13 @@ ENTRY(memmove)
25888 5:
25889 sub $0x20, %rdx
25890 movq 0*8(%rsi), %r11
25891- movq 1*8(%rsi), %r10
25892+ movq 1*8(%rsi), %rcx
25893 movq 2*8(%rsi), %r9
25894 movq 3*8(%rsi), %r8
25895 leaq 4*8(%rsi), %rsi
25896
25897 movq %r11, 0*8(%rdi)
25898- movq %r10, 1*8(%rdi)
25899+ movq %rcx, 1*8(%rdi)
25900 movq %r9, 2*8(%rdi)
25901 movq %r8, 3*8(%rdi)
25902 leaq 4*8(%rdi), %rdi
25903@@ -81,10 +81,10 @@ ENTRY(memmove)
25904 4:
25905 movq %rdx, %rcx
25906 movq -8(%rsi, %rdx), %r11
25907- lea -8(%rdi, %rdx), %r10
25908+ lea -8(%rdi, %rdx), %r9
25909 shrq $3, %rcx
25910 rep movsq
25911- movq %r11, (%r10)
25912+ movq %r11, (%r9)
25913 jmp 13f
25914 .Lmemmove_end_forward:
25915
25916@@ -95,14 +95,14 @@ ENTRY(memmove)
25917 7:
25918 movq %rdx, %rcx
25919 movq (%rsi), %r11
25920- movq %rdi, %r10
25921+ movq %rdi, %r9
25922 leaq -8(%rsi, %rdx), %rsi
25923 leaq -8(%rdi, %rdx), %rdi
25924 shrq $3, %rcx
25925 std
25926 rep movsq
25927 cld
25928- movq %r11, (%r10)
25929+ movq %r11, (%r9)
25930 jmp 13f
25931
25932 /*
25933@@ -127,13 +127,13 @@ ENTRY(memmove)
25934 8:
25935 subq $0x20, %rdx
25936 movq -1*8(%rsi), %r11
25937- movq -2*8(%rsi), %r10
25938+ movq -2*8(%rsi), %rcx
25939 movq -3*8(%rsi), %r9
25940 movq -4*8(%rsi), %r8
25941 leaq -4*8(%rsi), %rsi
25942
25943 movq %r11, -1*8(%rdi)
25944- movq %r10, -2*8(%rdi)
25945+ movq %rcx, -2*8(%rdi)
25946 movq %r9, -3*8(%rdi)
25947 movq %r8, -4*8(%rdi)
25948 leaq -4*8(%rdi), %rdi
25949@@ -151,11 +151,11 @@ ENTRY(memmove)
25950 * Move data from 16 bytes to 31 bytes.
25951 */
25952 movq 0*8(%rsi), %r11
25953- movq 1*8(%rsi), %r10
25954+ movq 1*8(%rsi), %rcx
25955 movq -2*8(%rsi, %rdx), %r9
25956 movq -1*8(%rsi, %rdx), %r8
25957 movq %r11, 0*8(%rdi)
25958- movq %r10, 1*8(%rdi)
25959+ movq %rcx, 1*8(%rdi)
25960 movq %r9, -2*8(%rdi, %rdx)
25961 movq %r8, -1*8(%rdi, %rdx)
25962 jmp 13f
25963@@ -167,9 +167,9 @@ ENTRY(memmove)
25964 * Move data from 8 bytes to 15 bytes.
25965 */
25966 movq 0*8(%rsi), %r11
25967- movq -1*8(%rsi, %rdx), %r10
25968+ movq -1*8(%rsi, %rdx), %r9
25969 movq %r11, 0*8(%rdi)
25970- movq %r10, -1*8(%rdi, %rdx)
25971+ movq %r9, -1*8(%rdi, %rdx)
25972 jmp 13f
25973 10:
25974 cmpq $4, %rdx
25975@@ -178,9 +178,9 @@ ENTRY(memmove)
25976 * Move data from 4 bytes to 7 bytes.
25977 */
25978 movl (%rsi), %r11d
25979- movl -4(%rsi, %rdx), %r10d
25980+ movl -4(%rsi, %rdx), %r9d
25981 movl %r11d, (%rdi)
25982- movl %r10d, -4(%rdi, %rdx)
25983+ movl %r9d, -4(%rdi, %rdx)
25984 jmp 13f
25985 11:
25986 cmp $2, %rdx
25987@@ -189,9 +189,9 @@ ENTRY(memmove)
25988 * Move data from 2 bytes to 3 bytes.
25989 */
25990 movw (%rsi), %r11w
25991- movw -2(%rsi, %rdx), %r10w
25992+ movw -2(%rsi, %rdx), %r9w
25993 movw %r11w, (%rdi)
25994- movw %r10w, -2(%rdi, %rdx)
25995+ movw %r9w, -2(%rdi, %rdx)
25996 jmp 13f
25997 12:
25998 cmp $1, %rdx
25999@@ -202,6 +202,7 @@ ENTRY(memmove)
26000 movb (%rsi), %r11b
26001 movb %r11b, (%rdi)
26002 13:
26003+ pax_force_retaddr
26004 retq
26005 CFI_ENDPROC
26006
26007@@ -210,6 +211,7 @@ ENTRY(memmove)
26008 /* Forward moving data. */
26009 movq %rdx, %rcx
26010 rep movsb
26011+ pax_force_retaddr
26012 retq
26013 .Lmemmove_end_forward_efs:
26014 .previous
26015diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
26016index 2dcb380..963660a 100644
26017--- a/arch/x86/lib/memset_64.S
26018+++ b/arch/x86/lib/memset_64.S
26019@@ -30,6 +30,7 @@
26020 movl %edx,%ecx
26021 rep stosb
26022 movq %r9,%rax
26023+ pax_force_retaddr
26024 ret
26025 .Lmemset_e:
26026 .previous
26027@@ -52,6 +53,7 @@
26028 movq %rdx,%rcx
26029 rep stosb
26030 movq %r9,%rax
26031+ pax_force_retaddr
26032 ret
26033 .Lmemset_e_e:
26034 .previous
26035@@ -59,7 +61,7 @@
26036 ENTRY(memset)
26037 ENTRY(__memset)
26038 CFI_STARTPROC
26039- movq %rdi,%r10
26040+ movq %rdi,%r11
26041
26042 /* expand byte value */
26043 movzbl %sil,%ecx
26044@@ -117,7 +119,8 @@ ENTRY(__memset)
26045 jnz .Lloop_1
26046
26047 .Lende:
26048- movq %r10,%rax
26049+ movq %r11,%rax
26050+ pax_force_retaddr
26051 ret
26052
26053 CFI_RESTORE_STATE
26054diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
26055index c9f2d9b..e7fd2c0 100644
26056--- a/arch/x86/lib/mmx_32.c
26057+++ b/arch/x86/lib/mmx_32.c
26058@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
26059 {
26060 void *p;
26061 int i;
26062+ unsigned long cr0;
26063
26064 if (unlikely(in_interrupt()))
26065 return __memcpy(to, from, len);
26066@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
26067 kernel_fpu_begin();
26068
26069 __asm__ __volatile__ (
26070- "1: prefetch (%0)\n" /* This set is 28 bytes */
26071- " prefetch 64(%0)\n"
26072- " prefetch 128(%0)\n"
26073- " prefetch 192(%0)\n"
26074- " prefetch 256(%0)\n"
26075+ "1: prefetch (%1)\n" /* This set is 28 bytes */
26076+ " prefetch 64(%1)\n"
26077+ " prefetch 128(%1)\n"
26078+ " prefetch 192(%1)\n"
26079+ " prefetch 256(%1)\n"
26080 "2: \n"
26081 ".section .fixup, \"ax\"\n"
26082- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
26083+ "3: \n"
26084+
26085+#ifdef CONFIG_PAX_KERNEXEC
26086+ " movl %%cr0, %0\n"
26087+ " movl %0, %%eax\n"
26088+ " andl $0xFFFEFFFF, %%eax\n"
26089+ " movl %%eax, %%cr0\n"
26090+#endif
26091+
26092+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
26093+
26094+#ifdef CONFIG_PAX_KERNEXEC
26095+ " movl %0, %%cr0\n"
26096+#endif
26097+
26098 " jmp 2b\n"
26099 ".previous\n"
26100 _ASM_EXTABLE(1b, 3b)
26101- : : "r" (from));
26102+ : "=&r" (cr0) : "r" (from) : "ax");
26103
26104 for ( ; i > 5; i--) {
26105 __asm__ __volatile__ (
26106- "1: prefetch 320(%0)\n"
26107- "2: movq (%0), %%mm0\n"
26108- " movq 8(%0), %%mm1\n"
26109- " movq 16(%0), %%mm2\n"
26110- " movq 24(%0), %%mm3\n"
26111- " movq %%mm0, (%1)\n"
26112- " movq %%mm1, 8(%1)\n"
26113- " movq %%mm2, 16(%1)\n"
26114- " movq %%mm3, 24(%1)\n"
26115- " movq 32(%0), %%mm0\n"
26116- " movq 40(%0), %%mm1\n"
26117- " movq 48(%0), %%mm2\n"
26118- " movq 56(%0), %%mm3\n"
26119- " movq %%mm0, 32(%1)\n"
26120- " movq %%mm1, 40(%1)\n"
26121- " movq %%mm2, 48(%1)\n"
26122- " movq %%mm3, 56(%1)\n"
26123+ "1: prefetch 320(%1)\n"
26124+ "2: movq (%1), %%mm0\n"
26125+ " movq 8(%1), %%mm1\n"
26126+ " movq 16(%1), %%mm2\n"
26127+ " movq 24(%1), %%mm3\n"
26128+ " movq %%mm0, (%2)\n"
26129+ " movq %%mm1, 8(%2)\n"
26130+ " movq %%mm2, 16(%2)\n"
26131+ " movq %%mm3, 24(%2)\n"
26132+ " movq 32(%1), %%mm0\n"
26133+ " movq 40(%1), %%mm1\n"
26134+ " movq 48(%1), %%mm2\n"
26135+ " movq 56(%1), %%mm3\n"
26136+ " movq %%mm0, 32(%2)\n"
26137+ " movq %%mm1, 40(%2)\n"
26138+ " movq %%mm2, 48(%2)\n"
26139+ " movq %%mm3, 56(%2)\n"
26140 ".section .fixup, \"ax\"\n"
26141- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
26142+ "3:\n"
26143+
26144+#ifdef CONFIG_PAX_KERNEXEC
26145+ " movl %%cr0, %0\n"
26146+ " movl %0, %%eax\n"
26147+ " andl $0xFFFEFFFF, %%eax\n"
26148+ " movl %%eax, %%cr0\n"
26149+#endif
26150+
26151+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
26152+
26153+#ifdef CONFIG_PAX_KERNEXEC
26154+ " movl %0, %%cr0\n"
26155+#endif
26156+
26157 " jmp 2b\n"
26158 ".previous\n"
26159 _ASM_EXTABLE(1b, 3b)
26160- : : "r" (from), "r" (to) : "memory");
26161+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
26162
26163 from += 64;
26164 to += 64;
26165@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
26166 static void fast_copy_page(void *to, void *from)
26167 {
26168 int i;
26169+ unsigned long cr0;
26170
26171 kernel_fpu_begin();
26172
26173@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
26174 * but that is for later. -AV
26175 */
26176 __asm__ __volatile__(
26177- "1: prefetch (%0)\n"
26178- " prefetch 64(%0)\n"
26179- " prefetch 128(%0)\n"
26180- " prefetch 192(%0)\n"
26181- " prefetch 256(%0)\n"
26182+ "1: prefetch (%1)\n"
26183+ " prefetch 64(%1)\n"
26184+ " prefetch 128(%1)\n"
26185+ " prefetch 192(%1)\n"
26186+ " prefetch 256(%1)\n"
26187 "2: \n"
26188 ".section .fixup, \"ax\"\n"
26189- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
26190+ "3: \n"
26191+
26192+#ifdef CONFIG_PAX_KERNEXEC
26193+ " movl %%cr0, %0\n"
26194+ " movl %0, %%eax\n"
26195+ " andl $0xFFFEFFFF, %%eax\n"
26196+ " movl %%eax, %%cr0\n"
26197+#endif
26198+
26199+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
26200+
26201+#ifdef CONFIG_PAX_KERNEXEC
26202+ " movl %0, %%cr0\n"
26203+#endif
26204+
26205 " jmp 2b\n"
26206 ".previous\n"
26207- _ASM_EXTABLE(1b, 3b) : : "r" (from));
26208+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
26209
26210 for (i = 0; i < (4096-320)/64; i++) {
26211 __asm__ __volatile__ (
26212- "1: prefetch 320(%0)\n"
26213- "2: movq (%0), %%mm0\n"
26214- " movntq %%mm0, (%1)\n"
26215- " movq 8(%0), %%mm1\n"
26216- " movntq %%mm1, 8(%1)\n"
26217- " movq 16(%0), %%mm2\n"
26218- " movntq %%mm2, 16(%1)\n"
26219- " movq 24(%0), %%mm3\n"
26220- " movntq %%mm3, 24(%1)\n"
26221- " movq 32(%0), %%mm4\n"
26222- " movntq %%mm4, 32(%1)\n"
26223- " movq 40(%0), %%mm5\n"
26224- " movntq %%mm5, 40(%1)\n"
26225- " movq 48(%0), %%mm6\n"
26226- " movntq %%mm6, 48(%1)\n"
26227- " movq 56(%0), %%mm7\n"
26228- " movntq %%mm7, 56(%1)\n"
26229+ "1: prefetch 320(%1)\n"
26230+ "2: movq (%1), %%mm0\n"
26231+ " movntq %%mm0, (%2)\n"
26232+ " movq 8(%1), %%mm1\n"
26233+ " movntq %%mm1, 8(%2)\n"
26234+ " movq 16(%1), %%mm2\n"
26235+ " movntq %%mm2, 16(%2)\n"
26236+ " movq 24(%1), %%mm3\n"
26237+ " movntq %%mm3, 24(%2)\n"
26238+ " movq 32(%1), %%mm4\n"
26239+ " movntq %%mm4, 32(%2)\n"
26240+ " movq 40(%1), %%mm5\n"
26241+ " movntq %%mm5, 40(%2)\n"
26242+ " movq 48(%1), %%mm6\n"
26243+ " movntq %%mm6, 48(%2)\n"
26244+ " movq 56(%1), %%mm7\n"
26245+ " movntq %%mm7, 56(%2)\n"
26246 ".section .fixup, \"ax\"\n"
26247- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
26248+ "3:\n"
26249+
26250+#ifdef CONFIG_PAX_KERNEXEC
26251+ " movl %%cr0, %0\n"
26252+ " movl %0, %%eax\n"
26253+ " andl $0xFFFEFFFF, %%eax\n"
26254+ " movl %%eax, %%cr0\n"
26255+#endif
26256+
26257+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
26258+
26259+#ifdef CONFIG_PAX_KERNEXEC
26260+ " movl %0, %%cr0\n"
26261+#endif
26262+
26263 " jmp 2b\n"
26264 ".previous\n"
26265- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
26266+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
26267
26268 from += 64;
26269 to += 64;
26270@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
26271 static void fast_copy_page(void *to, void *from)
26272 {
26273 int i;
26274+ unsigned long cr0;
26275
26276 kernel_fpu_begin();
26277
26278 __asm__ __volatile__ (
26279- "1: prefetch (%0)\n"
26280- " prefetch 64(%0)\n"
26281- " prefetch 128(%0)\n"
26282- " prefetch 192(%0)\n"
26283- " prefetch 256(%0)\n"
26284+ "1: prefetch (%1)\n"
26285+ " prefetch 64(%1)\n"
26286+ " prefetch 128(%1)\n"
26287+ " prefetch 192(%1)\n"
26288+ " prefetch 256(%1)\n"
26289 "2: \n"
26290 ".section .fixup, \"ax\"\n"
26291- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
26292+ "3: \n"
26293+
26294+#ifdef CONFIG_PAX_KERNEXEC
26295+ " movl %%cr0, %0\n"
26296+ " movl %0, %%eax\n"
26297+ " andl $0xFFFEFFFF, %%eax\n"
26298+ " movl %%eax, %%cr0\n"
26299+#endif
26300+
26301+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
26302+
26303+#ifdef CONFIG_PAX_KERNEXEC
26304+ " movl %0, %%cr0\n"
26305+#endif
26306+
26307 " jmp 2b\n"
26308 ".previous\n"
26309- _ASM_EXTABLE(1b, 3b) : : "r" (from));
26310+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
26311
26312 for (i = 0; i < 4096/64; i++) {
26313 __asm__ __volatile__ (
26314- "1: prefetch 320(%0)\n"
26315- "2: movq (%0), %%mm0\n"
26316- " movq 8(%0), %%mm1\n"
26317- " movq 16(%0), %%mm2\n"
26318- " movq 24(%0), %%mm3\n"
26319- " movq %%mm0, (%1)\n"
26320- " movq %%mm1, 8(%1)\n"
26321- " movq %%mm2, 16(%1)\n"
26322- " movq %%mm3, 24(%1)\n"
26323- " movq 32(%0), %%mm0\n"
26324- " movq 40(%0), %%mm1\n"
26325- " movq 48(%0), %%mm2\n"
26326- " movq 56(%0), %%mm3\n"
26327- " movq %%mm0, 32(%1)\n"
26328- " movq %%mm1, 40(%1)\n"
26329- " movq %%mm2, 48(%1)\n"
26330- " movq %%mm3, 56(%1)\n"
26331+ "1: prefetch 320(%1)\n"
26332+ "2: movq (%1), %%mm0\n"
26333+ " movq 8(%1), %%mm1\n"
26334+ " movq 16(%1), %%mm2\n"
26335+ " movq 24(%1), %%mm3\n"
26336+ " movq %%mm0, (%2)\n"
26337+ " movq %%mm1, 8(%2)\n"
26338+ " movq %%mm2, 16(%2)\n"
26339+ " movq %%mm3, 24(%2)\n"
26340+ " movq 32(%1), %%mm0\n"
26341+ " movq 40(%1), %%mm1\n"
26342+ " movq 48(%1), %%mm2\n"
26343+ " movq 56(%1), %%mm3\n"
26344+ " movq %%mm0, 32(%2)\n"
26345+ " movq %%mm1, 40(%2)\n"
26346+ " movq %%mm2, 48(%2)\n"
26347+ " movq %%mm3, 56(%2)\n"
26348 ".section .fixup, \"ax\"\n"
26349- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
26350+ "3:\n"
26351+
26352+#ifdef CONFIG_PAX_KERNEXEC
26353+ " movl %%cr0, %0\n"
26354+ " movl %0, %%eax\n"
26355+ " andl $0xFFFEFFFF, %%eax\n"
26356+ " movl %%eax, %%cr0\n"
26357+#endif
26358+
26359+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
26360+
26361+#ifdef CONFIG_PAX_KERNEXEC
26362+ " movl %0, %%cr0\n"
26363+#endif
26364+
26365 " jmp 2b\n"
26366 ".previous\n"
26367 _ASM_EXTABLE(1b, 3b)
26368- : : "r" (from), "r" (to) : "memory");
26369+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
26370
26371 from += 64;
26372 to += 64;
26373diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
26374index f6d13ee..aca5f0b 100644
26375--- a/arch/x86/lib/msr-reg.S
26376+++ b/arch/x86/lib/msr-reg.S
26377@@ -3,6 +3,7 @@
26378 #include <asm/dwarf2.h>
26379 #include <asm/asm.h>
26380 #include <asm/msr.h>
26381+#include <asm/alternative-asm.h>
26382
26383 #ifdef CONFIG_X86_64
26384 /*
26385@@ -16,7 +17,7 @@ ENTRY(\op\()_safe_regs)
26386 CFI_STARTPROC
26387 pushq_cfi %rbx
26388 pushq_cfi %rbp
26389- movq %rdi, %r10 /* Save pointer */
26390+ movq %rdi, %r9 /* Save pointer */
26391 xorl %r11d, %r11d /* Return value */
26392 movl (%rdi), %eax
26393 movl 4(%rdi), %ecx
26394@@ -27,16 +28,17 @@ ENTRY(\op\()_safe_regs)
26395 movl 28(%rdi), %edi
26396 CFI_REMEMBER_STATE
26397 1: \op
26398-2: movl %eax, (%r10)
26399+2: movl %eax, (%r9)
26400 movl %r11d, %eax /* Return value */
26401- movl %ecx, 4(%r10)
26402- movl %edx, 8(%r10)
26403- movl %ebx, 12(%r10)
26404- movl %ebp, 20(%r10)
26405- movl %esi, 24(%r10)
26406- movl %edi, 28(%r10)
26407+ movl %ecx, 4(%r9)
26408+ movl %edx, 8(%r9)
26409+ movl %ebx, 12(%r9)
26410+ movl %ebp, 20(%r9)
26411+ movl %esi, 24(%r9)
26412+ movl %edi, 28(%r9)
26413 popq_cfi %rbp
26414 popq_cfi %rbx
26415+ pax_force_retaddr
26416 ret
26417 3:
26418 CFI_RESTORE_STATE
26419diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
26420index fc6ba17..04471c5 100644
26421--- a/arch/x86/lib/putuser.S
26422+++ b/arch/x86/lib/putuser.S
26423@@ -16,7 +16,9 @@
26424 #include <asm/errno.h>
26425 #include <asm/asm.h>
26426 #include <asm/smap.h>
26427-
26428+#include <asm/segment.h>
26429+#include <asm/pgtable.h>
26430+#include <asm/alternative-asm.h>
26431
26432 /*
26433 * __put_user_X
26434@@ -30,57 +32,125 @@
26435 * as they get called from within inline assembly.
26436 */
26437
26438-#define ENTER CFI_STARTPROC ; \
26439- GET_THREAD_INFO(%_ASM_BX)
26440-#define EXIT ASM_CLAC ; \
26441- ret ; \
26442+#define ENTER CFI_STARTPROC
26443+#define EXIT ASM_CLAC ; \
26444+ pax_force_retaddr ; \
26445+ ret ; \
26446 CFI_ENDPROC
26447
26448+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26449+#define _DEST %_ASM_CX,%_ASM_BX
26450+#else
26451+#define _DEST %_ASM_CX
26452+#endif
26453+
26454+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
26455+#define __copyuser_seg gs;
26456+#else
26457+#define __copyuser_seg
26458+#endif
26459+
26460 .text
26461 ENTRY(__put_user_1)
26462 ENTER
26463+
26464+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
26465+ GET_THREAD_INFO(%_ASM_BX)
26466 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
26467 jae bad_put_user
26468 ASM_STAC
26469-1: movb %al,(%_ASM_CX)
26470+
26471+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26472+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
26473+ cmp %_ASM_BX,%_ASM_CX
26474+ jb 1234f
26475+ xor %ebx,%ebx
26476+1234:
26477+#endif
26478+
26479+#endif
26480+
26481+1: __copyuser_seg movb %al,(_DEST)
26482 xor %eax,%eax
26483 EXIT
26484 ENDPROC(__put_user_1)
26485
26486 ENTRY(__put_user_2)
26487 ENTER
26488+
26489+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
26490+ GET_THREAD_INFO(%_ASM_BX)
26491 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
26492 sub $1,%_ASM_BX
26493 cmp %_ASM_BX,%_ASM_CX
26494 jae bad_put_user
26495 ASM_STAC
26496-2: movw %ax,(%_ASM_CX)
26497+
26498+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26499+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
26500+ cmp %_ASM_BX,%_ASM_CX
26501+ jb 1234f
26502+ xor %ebx,%ebx
26503+1234:
26504+#endif
26505+
26506+#endif
26507+
26508+2: __copyuser_seg movw %ax,(_DEST)
26509 xor %eax,%eax
26510 EXIT
26511 ENDPROC(__put_user_2)
26512
26513 ENTRY(__put_user_4)
26514 ENTER
26515+
26516+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
26517+ GET_THREAD_INFO(%_ASM_BX)
26518 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
26519 sub $3,%_ASM_BX
26520 cmp %_ASM_BX,%_ASM_CX
26521 jae bad_put_user
26522 ASM_STAC
26523-3: movl %eax,(%_ASM_CX)
26524+
26525+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26526+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
26527+ cmp %_ASM_BX,%_ASM_CX
26528+ jb 1234f
26529+ xor %ebx,%ebx
26530+1234:
26531+#endif
26532+
26533+#endif
26534+
26535+3: __copyuser_seg movl %eax,(_DEST)
26536 xor %eax,%eax
26537 EXIT
26538 ENDPROC(__put_user_4)
26539
26540 ENTRY(__put_user_8)
26541 ENTER
26542+
26543+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
26544+ GET_THREAD_INFO(%_ASM_BX)
26545 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
26546 sub $7,%_ASM_BX
26547 cmp %_ASM_BX,%_ASM_CX
26548 jae bad_put_user
26549 ASM_STAC
26550-4: mov %_ASM_AX,(%_ASM_CX)
26551+
26552+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26553+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
26554+ cmp %_ASM_BX,%_ASM_CX
26555+ jb 1234f
26556+ xor %ebx,%ebx
26557+1234:
26558+#endif
26559+
26560+#endif
26561+
26562+4: __copyuser_seg mov %_ASM_AX,(_DEST)
26563 #ifdef CONFIG_X86_32
26564-5: movl %edx,4(%_ASM_CX)
26565+5: __copyuser_seg movl %edx,4(_DEST)
26566 #endif
26567 xor %eax,%eax
26568 EXIT
26569diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
26570index 1cad221..de671ee 100644
26571--- a/arch/x86/lib/rwlock.S
26572+++ b/arch/x86/lib/rwlock.S
26573@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
26574 FRAME
26575 0: LOCK_PREFIX
26576 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
26577+
26578+#ifdef CONFIG_PAX_REFCOUNT
26579+ jno 1234f
26580+ LOCK_PREFIX
26581+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
26582+ int $4
26583+1234:
26584+ _ASM_EXTABLE(1234b, 1234b)
26585+#endif
26586+
26587 1: rep; nop
26588 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
26589 jne 1b
26590 LOCK_PREFIX
26591 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
26592+
26593+#ifdef CONFIG_PAX_REFCOUNT
26594+ jno 1234f
26595+ LOCK_PREFIX
26596+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
26597+ int $4
26598+1234:
26599+ _ASM_EXTABLE(1234b, 1234b)
26600+#endif
26601+
26602 jnz 0b
26603 ENDFRAME
26604+ pax_force_retaddr
26605 ret
26606 CFI_ENDPROC
26607 END(__write_lock_failed)
26608@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
26609 FRAME
26610 0: LOCK_PREFIX
26611 READ_LOCK_SIZE(inc) (%__lock_ptr)
26612+
26613+#ifdef CONFIG_PAX_REFCOUNT
26614+ jno 1234f
26615+ LOCK_PREFIX
26616+ READ_LOCK_SIZE(dec) (%__lock_ptr)
26617+ int $4
26618+1234:
26619+ _ASM_EXTABLE(1234b, 1234b)
26620+#endif
26621+
26622 1: rep; nop
26623 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
26624 js 1b
26625 LOCK_PREFIX
26626 READ_LOCK_SIZE(dec) (%__lock_ptr)
26627+
26628+#ifdef CONFIG_PAX_REFCOUNT
26629+ jno 1234f
26630+ LOCK_PREFIX
26631+ READ_LOCK_SIZE(inc) (%__lock_ptr)
26632+ int $4
26633+1234:
26634+ _ASM_EXTABLE(1234b, 1234b)
26635+#endif
26636+
26637 js 0b
26638 ENDFRAME
26639+ pax_force_retaddr
26640 ret
26641 CFI_ENDPROC
26642 END(__read_lock_failed)
26643diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
26644index 5dff5f0..cadebf4 100644
26645--- a/arch/x86/lib/rwsem.S
26646+++ b/arch/x86/lib/rwsem.S
26647@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
26648 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
26649 CFI_RESTORE __ASM_REG(dx)
26650 restore_common_regs
26651+ pax_force_retaddr
26652 ret
26653 CFI_ENDPROC
26654 ENDPROC(call_rwsem_down_read_failed)
26655@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
26656 movq %rax,%rdi
26657 call rwsem_down_write_failed
26658 restore_common_regs
26659+ pax_force_retaddr
26660 ret
26661 CFI_ENDPROC
26662 ENDPROC(call_rwsem_down_write_failed)
26663@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
26664 movq %rax,%rdi
26665 call rwsem_wake
26666 restore_common_regs
26667-1: ret
26668+1: pax_force_retaddr
26669+ ret
26670 CFI_ENDPROC
26671 ENDPROC(call_rwsem_wake)
26672
26673@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
26674 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
26675 CFI_RESTORE __ASM_REG(dx)
26676 restore_common_regs
26677+ pax_force_retaddr
26678 ret
26679 CFI_ENDPROC
26680 ENDPROC(call_rwsem_downgrade_wake)
26681diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
26682index a63efd6..ccecad8 100644
26683--- a/arch/x86/lib/thunk_64.S
26684+++ b/arch/x86/lib/thunk_64.S
26685@@ -8,6 +8,7 @@
26686 #include <linux/linkage.h>
26687 #include <asm/dwarf2.h>
26688 #include <asm/calling.h>
26689+#include <asm/alternative-asm.h>
26690
26691 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
26692 .macro THUNK name, func, put_ret_addr_in_rdi=0
26693@@ -41,5 +42,6 @@
26694 SAVE_ARGS
26695 restore:
26696 RESTORE_ARGS
26697+ pax_force_retaddr
26698 ret
26699 CFI_ENDPROC
26700diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
26701index f0312d7..9c39d63 100644
26702--- a/arch/x86/lib/usercopy_32.c
26703+++ b/arch/x86/lib/usercopy_32.c
26704@@ -42,11 +42,13 @@ do { \
26705 int __d0; \
26706 might_fault(); \
26707 __asm__ __volatile__( \
26708+ __COPYUSER_SET_ES \
26709 ASM_STAC "\n" \
26710 "0: rep; stosl\n" \
26711 " movl %2,%0\n" \
26712 "1: rep; stosb\n" \
26713 "2: " ASM_CLAC "\n" \
26714+ __COPYUSER_RESTORE_ES \
26715 ".section .fixup,\"ax\"\n" \
26716 "3: lea 0(%2,%0,4),%0\n" \
26717 " jmp 2b\n" \
26718@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
26719
26720 #ifdef CONFIG_X86_INTEL_USERCOPY
26721 static unsigned long
26722-__copy_user_intel(void __user *to, const void *from, unsigned long size)
26723+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
26724 {
26725 int d0, d1;
26726 __asm__ __volatile__(
26727@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
26728 " .align 2,0x90\n"
26729 "3: movl 0(%4), %%eax\n"
26730 "4: movl 4(%4), %%edx\n"
26731- "5: movl %%eax, 0(%3)\n"
26732- "6: movl %%edx, 4(%3)\n"
26733+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
26734+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
26735 "7: movl 8(%4), %%eax\n"
26736 "8: movl 12(%4),%%edx\n"
26737- "9: movl %%eax, 8(%3)\n"
26738- "10: movl %%edx, 12(%3)\n"
26739+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
26740+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
26741 "11: movl 16(%4), %%eax\n"
26742 "12: movl 20(%4), %%edx\n"
26743- "13: movl %%eax, 16(%3)\n"
26744- "14: movl %%edx, 20(%3)\n"
26745+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
26746+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
26747 "15: movl 24(%4), %%eax\n"
26748 "16: movl 28(%4), %%edx\n"
26749- "17: movl %%eax, 24(%3)\n"
26750- "18: movl %%edx, 28(%3)\n"
26751+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
26752+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
26753 "19: movl 32(%4), %%eax\n"
26754 "20: movl 36(%4), %%edx\n"
26755- "21: movl %%eax, 32(%3)\n"
26756- "22: movl %%edx, 36(%3)\n"
26757+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
26758+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
26759 "23: movl 40(%4), %%eax\n"
26760 "24: movl 44(%4), %%edx\n"
26761- "25: movl %%eax, 40(%3)\n"
26762- "26: movl %%edx, 44(%3)\n"
26763+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
26764+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
26765 "27: movl 48(%4), %%eax\n"
26766 "28: movl 52(%4), %%edx\n"
26767- "29: movl %%eax, 48(%3)\n"
26768- "30: movl %%edx, 52(%3)\n"
26769+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
26770+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
26771 "31: movl 56(%4), %%eax\n"
26772 "32: movl 60(%4), %%edx\n"
26773- "33: movl %%eax, 56(%3)\n"
26774- "34: movl %%edx, 60(%3)\n"
26775+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
26776+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
26777 " addl $-64, %0\n"
26778 " addl $64, %4\n"
26779 " addl $64, %3\n"
26780@@ -149,10 +151,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
26781 " shrl $2, %0\n"
26782 " andl $3, %%eax\n"
26783 " cld\n"
26784+ __COPYUSER_SET_ES
26785 "99: rep; movsl\n"
26786 "36: movl %%eax, %0\n"
26787 "37: rep; movsb\n"
26788 "100:\n"
26789+ __COPYUSER_RESTORE_ES
26790 ".section .fixup,\"ax\"\n"
26791 "101: lea 0(%%eax,%0,4),%0\n"
26792 " jmp 100b\n"
26793@@ -202,46 +206,150 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
26794 }
26795
26796 static unsigned long
26797+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
26798+{
26799+ int d0, d1;
26800+ __asm__ __volatile__(
26801+ " .align 2,0x90\n"
26802+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
26803+ " cmpl $67, %0\n"
26804+ " jbe 3f\n"
26805+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
26806+ " .align 2,0x90\n"
26807+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
26808+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
26809+ "5: movl %%eax, 0(%3)\n"
26810+ "6: movl %%edx, 4(%3)\n"
26811+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
26812+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
26813+ "9: movl %%eax, 8(%3)\n"
26814+ "10: movl %%edx, 12(%3)\n"
26815+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
26816+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
26817+ "13: movl %%eax, 16(%3)\n"
26818+ "14: movl %%edx, 20(%3)\n"
26819+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
26820+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
26821+ "17: movl %%eax, 24(%3)\n"
26822+ "18: movl %%edx, 28(%3)\n"
26823+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
26824+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
26825+ "21: movl %%eax, 32(%3)\n"
26826+ "22: movl %%edx, 36(%3)\n"
26827+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
26828+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
26829+ "25: movl %%eax, 40(%3)\n"
26830+ "26: movl %%edx, 44(%3)\n"
26831+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
26832+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
26833+ "29: movl %%eax, 48(%3)\n"
26834+ "30: movl %%edx, 52(%3)\n"
26835+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
26836+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
26837+ "33: movl %%eax, 56(%3)\n"
26838+ "34: movl %%edx, 60(%3)\n"
26839+ " addl $-64, %0\n"
26840+ " addl $64, %4\n"
26841+ " addl $64, %3\n"
26842+ " cmpl $63, %0\n"
26843+ " ja 1b\n"
26844+ "35: movl %0, %%eax\n"
26845+ " shrl $2, %0\n"
26846+ " andl $3, %%eax\n"
26847+ " cld\n"
26848+ "99: rep; "__copyuser_seg" movsl\n"
26849+ "36: movl %%eax, %0\n"
26850+ "37: rep; "__copyuser_seg" movsb\n"
26851+ "100:\n"
26852+ ".section .fixup,\"ax\"\n"
26853+ "101: lea 0(%%eax,%0,4),%0\n"
26854+ " jmp 100b\n"
26855+ ".previous\n"
26856+ _ASM_EXTABLE(1b,100b)
26857+ _ASM_EXTABLE(2b,100b)
26858+ _ASM_EXTABLE(3b,100b)
26859+ _ASM_EXTABLE(4b,100b)
26860+ _ASM_EXTABLE(5b,100b)
26861+ _ASM_EXTABLE(6b,100b)
26862+ _ASM_EXTABLE(7b,100b)
26863+ _ASM_EXTABLE(8b,100b)
26864+ _ASM_EXTABLE(9b,100b)
26865+ _ASM_EXTABLE(10b,100b)
26866+ _ASM_EXTABLE(11b,100b)
26867+ _ASM_EXTABLE(12b,100b)
26868+ _ASM_EXTABLE(13b,100b)
26869+ _ASM_EXTABLE(14b,100b)
26870+ _ASM_EXTABLE(15b,100b)
26871+ _ASM_EXTABLE(16b,100b)
26872+ _ASM_EXTABLE(17b,100b)
26873+ _ASM_EXTABLE(18b,100b)
26874+ _ASM_EXTABLE(19b,100b)
26875+ _ASM_EXTABLE(20b,100b)
26876+ _ASM_EXTABLE(21b,100b)
26877+ _ASM_EXTABLE(22b,100b)
26878+ _ASM_EXTABLE(23b,100b)
26879+ _ASM_EXTABLE(24b,100b)
26880+ _ASM_EXTABLE(25b,100b)
26881+ _ASM_EXTABLE(26b,100b)
26882+ _ASM_EXTABLE(27b,100b)
26883+ _ASM_EXTABLE(28b,100b)
26884+ _ASM_EXTABLE(29b,100b)
26885+ _ASM_EXTABLE(30b,100b)
26886+ _ASM_EXTABLE(31b,100b)
26887+ _ASM_EXTABLE(32b,100b)
26888+ _ASM_EXTABLE(33b,100b)
26889+ _ASM_EXTABLE(34b,100b)
26890+ _ASM_EXTABLE(35b,100b)
26891+ _ASM_EXTABLE(36b,100b)
26892+ _ASM_EXTABLE(37b,100b)
26893+ _ASM_EXTABLE(99b,101b)
26894+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
26895+ : "1"(to), "2"(from), "0"(size)
26896+ : "eax", "edx", "memory");
26897+ return size;
26898+}
26899+
26900+static unsigned long __size_overflow(3)
26901 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
26902 {
26903 int d0, d1;
26904 __asm__ __volatile__(
26905 " .align 2,0x90\n"
26906- "0: movl 32(%4), %%eax\n"
26907+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
26908 " cmpl $67, %0\n"
26909 " jbe 2f\n"
26910- "1: movl 64(%4), %%eax\n"
26911+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
26912 " .align 2,0x90\n"
26913- "2: movl 0(%4), %%eax\n"
26914- "21: movl 4(%4), %%edx\n"
26915+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
26916+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
26917 " movl %%eax, 0(%3)\n"
26918 " movl %%edx, 4(%3)\n"
26919- "3: movl 8(%4), %%eax\n"
26920- "31: movl 12(%4),%%edx\n"
26921+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
26922+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
26923 " movl %%eax, 8(%3)\n"
26924 " movl %%edx, 12(%3)\n"
26925- "4: movl 16(%4), %%eax\n"
26926- "41: movl 20(%4), %%edx\n"
26927+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
26928+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
26929 " movl %%eax, 16(%3)\n"
26930 " movl %%edx, 20(%3)\n"
26931- "10: movl 24(%4), %%eax\n"
26932- "51: movl 28(%4), %%edx\n"
26933+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
26934+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
26935 " movl %%eax, 24(%3)\n"
26936 " movl %%edx, 28(%3)\n"
26937- "11: movl 32(%4), %%eax\n"
26938- "61: movl 36(%4), %%edx\n"
26939+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
26940+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
26941 " movl %%eax, 32(%3)\n"
26942 " movl %%edx, 36(%3)\n"
26943- "12: movl 40(%4), %%eax\n"
26944- "71: movl 44(%4), %%edx\n"
26945+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
26946+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
26947 " movl %%eax, 40(%3)\n"
26948 " movl %%edx, 44(%3)\n"
26949- "13: movl 48(%4), %%eax\n"
26950- "81: movl 52(%4), %%edx\n"
26951+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
26952+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
26953 " movl %%eax, 48(%3)\n"
26954 " movl %%edx, 52(%3)\n"
26955- "14: movl 56(%4), %%eax\n"
26956- "91: movl 60(%4), %%edx\n"
26957+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
26958+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
26959 " movl %%eax, 56(%3)\n"
26960 " movl %%edx, 60(%3)\n"
26961 " addl $-64, %0\n"
26962@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
26963 " shrl $2, %0\n"
26964 " andl $3, %%eax\n"
26965 " cld\n"
26966- "6: rep; movsl\n"
26967+ "6: rep; "__copyuser_seg" movsl\n"
26968 " movl %%eax,%0\n"
26969- "7: rep; movsb\n"
26970+ "7: rep; "__copyuser_seg" movsb\n"
26971 "8:\n"
26972 ".section .fixup,\"ax\"\n"
26973 "9: lea 0(%%eax,%0,4),%0\n"
26974@@ -298,48 +406,48 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
26975 * hyoshiok@miraclelinux.com
26976 */
26977
26978-static unsigned long __copy_user_zeroing_intel_nocache(void *to,
26979+static unsigned long __size_overflow(3) __copy_user_zeroing_intel_nocache(void *to,
26980 const void __user *from, unsigned long size)
26981 {
26982 int d0, d1;
26983
26984 __asm__ __volatile__(
26985 " .align 2,0x90\n"
26986- "0: movl 32(%4), %%eax\n"
26987+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
26988 " cmpl $67, %0\n"
26989 " jbe 2f\n"
26990- "1: movl 64(%4), %%eax\n"
26991+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
26992 " .align 2,0x90\n"
26993- "2: movl 0(%4), %%eax\n"
26994- "21: movl 4(%4), %%edx\n"
26995+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
26996+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
26997 " movnti %%eax, 0(%3)\n"
26998 " movnti %%edx, 4(%3)\n"
26999- "3: movl 8(%4), %%eax\n"
27000- "31: movl 12(%4),%%edx\n"
27001+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
27002+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
27003 " movnti %%eax, 8(%3)\n"
27004 " movnti %%edx, 12(%3)\n"
27005- "4: movl 16(%4), %%eax\n"
27006- "41: movl 20(%4), %%edx\n"
27007+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
27008+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
27009 " movnti %%eax, 16(%3)\n"
27010 " movnti %%edx, 20(%3)\n"
27011- "10: movl 24(%4), %%eax\n"
27012- "51: movl 28(%4), %%edx\n"
27013+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
27014+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
27015 " movnti %%eax, 24(%3)\n"
27016 " movnti %%edx, 28(%3)\n"
27017- "11: movl 32(%4), %%eax\n"
27018- "61: movl 36(%4), %%edx\n"
27019+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
27020+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
27021 " movnti %%eax, 32(%3)\n"
27022 " movnti %%edx, 36(%3)\n"
27023- "12: movl 40(%4), %%eax\n"
27024- "71: movl 44(%4), %%edx\n"
27025+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
27026+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
27027 " movnti %%eax, 40(%3)\n"
27028 " movnti %%edx, 44(%3)\n"
27029- "13: movl 48(%4), %%eax\n"
27030- "81: movl 52(%4), %%edx\n"
27031+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
27032+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
27033 " movnti %%eax, 48(%3)\n"
27034 " movnti %%edx, 52(%3)\n"
27035- "14: movl 56(%4), %%eax\n"
27036- "91: movl 60(%4), %%edx\n"
27037+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
27038+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
27039 " movnti %%eax, 56(%3)\n"
27040 " movnti %%edx, 60(%3)\n"
27041 " addl $-64, %0\n"
27042@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
27043 " shrl $2, %0\n"
27044 " andl $3, %%eax\n"
27045 " cld\n"
27046- "6: rep; movsl\n"
27047+ "6: rep; "__copyuser_seg" movsl\n"
27048 " movl %%eax,%0\n"
27049- "7: rep; movsb\n"
27050+ "7: rep; "__copyuser_seg" movsb\n"
27051 "8:\n"
27052 ".section .fixup,\"ax\"\n"
27053 "9: lea 0(%%eax,%0,4),%0\n"
27054@@ -392,48 +500,48 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
27055 return size;
27056 }
27057
27058-static unsigned long __copy_user_intel_nocache(void *to,
27059+static unsigned long __size_overflow(3) __copy_user_intel_nocache(void *to,
27060 const void __user *from, unsigned long size)
27061 {
27062 int d0, d1;
27063
27064 __asm__ __volatile__(
27065 " .align 2,0x90\n"
27066- "0: movl 32(%4), %%eax\n"
27067+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
27068 " cmpl $67, %0\n"
27069 " jbe 2f\n"
27070- "1: movl 64(%4), %%eax\n"
27071+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
27072 " .align 2,0x90\n"
27073- "2: movl 0(%4), %%eax\n"
27074- "21: movl 4(%4), %%edx\n"
27075+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
27076+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
27077 " movnti %%eax, 0(%3)\n"
27078 " movnti %%edx, 4(%3)\n"
27079- "3: movl 8(%4), %%eax\n"
27080- "31: movl 12(%4),%%edx\n"
27081+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
27082+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
27083 " movnti %%eax, 8(%3)\n"
27084 " movnti %%edx, 12(%3)\n"
27085- "4: movl 16(%4), %%eax\n"
27086- "41: movl 20(%4), %%edx\n"
27087+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
27088+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
27089 " movnti %%eax, 16(%3)\n"
27090 " movnti %%edx, 20(%3)\n"
27091- "10: movl 24(%4), %%eax\n"
27092- "51: movl 28(%4), %%edx\n"
27093+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
27094+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
27095 " movnti %%eax, 24(%3)\n"
27096 " movnti %%edx, 28(%3)\n"
27097- "11: movl 32(%4), %%eax\n"
27098- "61: movl 36(%4), %%edx\n"
27099+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
27100+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
27101 " movnti %%eax, 32(%3)\n"
27102 " movnti %%edx, 36(%3)\n"
27103- "12: movl 40(%4), %%eax\n"
27104- "71: movl 44(%4), %%edx\n"
27105+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
27106+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
27107 " movnti %%eax, 40(%3)\n"
27108 " movnti %%edx, 44(%3)\n"
27109- "13: movl 48(%4), %%eax\n"
27110- "81: movl 52(%4), %%edx\n"
27111+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
27112+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
27113 " movnti %%eax, 48(%3)\n"
27114 " movnti %%edx, 52(%3)\n"
27115- "14: movl 56(%4), %%eax\n"
27116- "91: movl 60(%4), %%edx\n"
27117+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
27118+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
27119 " movnti %%eax, 56(%3)\n"
27120 " movnti %%edx, 60(%3)\n"
27121 " addl $-64, %0\n"
27122@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
27123 " shrl $2, %0\n"
27124 " andl $3, %%eax\n"
27125 " cld\n"
27126- "6: rep; movsl\n"
27127+ "6: rep; "__copyuser_seg" movsl\n"
27128 " movl %%eax,%0\n"
27129- "7: rep; movsb\n"
27130+ "7: rep; "__copyuser_seg" movsb\n"
27131 "8:\n"
27132 ".section .fixup,\"ax\"\n"
27133 "9: lea 0(%%eax,%0,4),%0\n"
27134@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
27135 */
27136 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
27137 unsigned long size);
27138-unsigned long __copy_user_intel(void __user *to, const void *from,
27139+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
27140+ unsigned long size);
27141+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
27142 unsigned long size);
27143 unsigned long __copy_user_zeroing_intel_nocache(void *to,
27144 const void __user *from, unsigned long size);
27145 #endif /* CONFIG_X86_INTEL_USERCOPY */
27146
27147 /* Generic arbitrary sized copy. */
27148-#define __copy_user(to, from, size) \
27149+#define __copy_user(to, from, size, prefix, set, restore) \
27150 do { \
27151 int __d0, __d1, __d2; \
27152 __asm__ __volatile__( \
27153+ set \
27154 " cmp $7,%0\n" \
27155 " jbe 1f\n" \
27156 " movl %1,%0\n" \
27157 " negl %0\n" \
27158 " andl $7,%0\n" \
27159 " subl %0,%3\n" \
27160- "4: rep; movsb\n" \
27161+ "4: rep; "prefix"movsb\n" \
27162 " movl %3,%0\n" \
27163 " shrl $2,%0\n" \
27164 " andl $3,%3\n" \
27165 " .align 2,0x90\n" \
27166- "0: rep; movsl\n" \
27167+ "0: rep; "prefix"movsl\n" \
27168 " movl %3,%0\n" \
27169- "1: rep; movsb\n" \
27170+ "1: rep; "prefix"movsb\n" \
27171 "2:\n" \
27172+ restore \
27173 ".section .fixup,\"ax\"\n" \
27174 "5: addl %3,%0\n" \
27175 " jmp 2b\n" \
27176@@ -538,14 +650,14 @@ do { \
27177 " negl %0\n" \
27178 " andl $7,%0\n" \
27179 " subl %0,%3\n" \
27180- "4: rep; movsb\n" \
27181+ "4: rep; "__copyuser_seg"movsb\n" \
27182 " movl %3,%0\n" \
27183 " shrl $2,%0\n" \
27184 " andl $3,%3\n" \
27185 " .align 2,0x90\n" \
27186- "0: rep; movsl\n" \
27187+ "0: rep; "__copyuser_seg"movsl\n" \
27188 " movl %3,%0\n" \
27189- "1: rep; movsb\n" \
27190+ "1: rep; "__copyuser_seg"movsb\n" \
27191 "2:\n" \
27192 ".section .fixup,\"ax\"\n" \
27193 "5: addl %3,%0\n" \
27194@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
27195 {
27196 stac();
27197 if (movsl_is_ok(to, from, n))
27198- __copy_user(to, from, n);
27199+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
27200 else
27201- n = __copy_user_intel(to, from, n);
27202+ n = __generic_copy_to_user_intel(to, from, n);
27203 clac();
27204 return n;
27205 }
27206@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
27207 {
27208 stac();
27209 if (movsl_is_ok(to, from, n))
27210- __copy_user(to, from, n);
27211+ __copy_user(to, from, n, __copyuser_seg, "", "");
27212 else
27213- n = __copy_user_intel((void __user *)to,
27214- (const void *)from, n);
27215+ n = __generic_copy_from_user_intel(to, from, n);
27216 clac();
27217 return n;
27218 }
27219@@ -632,66 +743,51 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
27220 if (n > 64 && cpu_has_xmm2)
27221 n = __copy_user_intel_nocache(to, from, n);
27222 else
27223- __copy_user(to, from, n);
27224+ __copy_user(to, from, n, __copyuser_seg, "", "");
27225 #else
27226- __copy_user(to, from, n);
27227+ __copy_user(to, from, n, __copyuser_seg, "", "");
27228 #endif
27229 clac();
27230 return n;
27231 }
27232 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
27233
27234-/**
27235- * copy_to_user: - Copy a block of data into user space.
27236- * @to: Destination address, in user space.
27237- * @from: Source address, in kernel space.
27238- * @n: Number of bytes to copy.
27239- *
27240- * Context: User context only. This function may sleep.
27241- *
27242- * Copy data from kernel space to user space.
27243- *
27244- * Returns number of bytes that could not be copied.
27245- * On success, this will be zero.
27246- */
27247-unsigned long
27248-copy_to_user(void __user *to, const void *from, unsigned long n)
27249-{
27250- if (access_ok(VERIFY_WRITE, to, n))
27251- n = __copy_to_user(to, from, n);
27252- return n;
27253-}
27254-EXPORT_SYMBOL(copy_to_user);
27255-
27256-/**
27257- * copy_from_user: - Copy a block of data from user space.
27258- * @to: Destination address, in kernel space.
27259- * @from: Source address, in user space.
27260- * @n: Number of bytes to copy.
27261- *
27262- * Context: User context only. This function may sleep.
27263- *
27264- * Copy data from user space to kernel space.
27265- *
27266- * Returns number of bytes that could not be copied.
27267- * On success, this will be zero.
27268- *
27269- * If some data could not be copied, this function will pad the copied
27270- * data to the requested size using zero bytes.
27271- */
27272-unsigned long
27273-_copy_from_user(void *to, const void __user *from, unsigned long n)
27274-{
27275- if (access_ok(VERIFY_READ, from, n))
27276- n = __copy_from_user(to, from, n);
27277- else
27278- memset(to, 0, n);
27279- return n;
27280-}
27281-EXPORT_SYMBOL(_copy_from_user);
27282-
27283 void copy_from_user_overflow(void)
27284 {
27285 WARN(1, "Buffer overflow detected!\n");
27286 }
27287 EXPORT_SYMBOL(copy_from_user_overflow);
27288+
27289+void copy_to_user_overflow(void)
27290+{
27291+ WARN(1, "Buffer overflow detected!\n");
27292+}
27293+EXPORT_SYMBOL(copy_to_user_overflow);
27294+
27295+#ifdef CONFIG_PAX_MEMORY_UDEREF
27296+void __set_fs(mm_segment_t x)
27297+{
27298+ switch (x.seg) {
27299+ case 0:
27300+ loadsegment(gs, 0);
27301+ break;
27302+ case TASK_SIZE_MAX:
27303+ loadsegment(gs, __USER_DS);
27304+ break;
27305+ case -1UL:
27306+ loadsegment(gs, __KERNEL_DS);
27307+ break;
27308+ default:
27309+ BUG();
27310+ }
27311+ return;
27312+}
27313+EXPORT_SYMBOL(__set_fs);
27314+
27315+void set_fs(mm_segment_t x)
27316+{
27317+ current_thread_info()->addr_limit = x;
27318+ __set_fs(x);
27319+}
27320+EXPORT_SYMBOL(set_fs);
27321+#endif
27322diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
27323index 906fea3..ee8a097 100644
27324--- a/arch/x86/lib/usercopy_64.c
27325+++ b/arch/x86/lib/usercopy_64.c
27326@@ -39,7 +39,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
27327 _ASM_EXTABLE(0b,3b)
27328 _ASM_EXTABLE(1b,2b)
27329 : [size8] "=&c"(size), [dst] "=&D" (__d0)
27330- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
27331+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
27332 [zero] "r" (0UL), [eight] "r" (8UL));
27333 clac();
27334 return size;
27335@@ -54,12 +54,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
27336 }
27337 EXPORT_SYMBOL(clear_user);
27338
27339-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
27340+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
27341 {
27342- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
27343- return copy_user_generic((__force void *)to, (__force void *)from, len);
27344- }
27345- return len;
27346+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
27347+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
27348+ return len;
27349 }
27350 EXPORT_SYMBOL(copy_in_user);
27351
27352@@ -69,7 +68,7 @@ EXPORT_SYMBOL(copy_in_user);
27353 * it is not necessary to optimize tail handling.
27354 */
27355 unsigned long
27356-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
27357+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
27358 {
27359 char c;
27360 unsigned zero_len;
27361@@ -87,3 +86,15 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
27362 clac();
27363 return len;
27364 }
27365+
27366+void copy_from_user_overflow(void)
27367+{
27368+ WARN(1, "Buffer overflow detected!\n");
27369+}
27370+EXPORT_SYMBOL(copy_from_user_overflow);
27371+
27372+void copy_to_user_overflow(void)
27373+{
27374+ WARN(1, "Buffer overflow detected!\n");
27375+}
27376+EXPORT_SYMBOL(copy_to_user_overflow);
27377diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
27378index 903ec1e..c4166b2 100644
27379--- a/arch/x86/mm/extable.c
27380+++ b/arch/x86/mm/extable.c
27381@@ -6,12 +6,24 @@
27382 static inline unsigned long
27383 ex_insn_addr(const struct exception_table_entry *x)
27384 {
27385- return (unsigned long)&x->insn + x->insn;
27386+ unsigned long reloc = 0;
27387+
27388+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27389+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
27390+#endif
27391+
27392+ return (unsigned long)&x->insn + x->insn + reloc;
27393 }
27394 static inline unsigned long
27395 ex_fixup_addr(const struct exception_table_entry *x)
27396 {
27397- return (unsigned long)&x->fixup + x->fixup;
27398+ unsigned long reloc = 0;
27399+
27400+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27401+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
27402+#endif
27403+
27404+ return (unsigned long)&x->fixup + x->fixup + reloc;
27405 }
27406
27407 int fixup_exception(struct pt_regs *regs)
27408@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
27409 unsigned long new_ip;
27410
27411 #ifdef CONFIG_PNPBIOS
27412- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
27413+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
27414 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
27415 extern u32 pnp_bios_is_utter_crap;
27416 pnp_bios_is_utter_crap = 1;
27417@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
27418 i += 4;
27419 p->fixup -= i;
27420 i += 4;
27421+
27422+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27423+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
27424+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
27425+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
27426+#endif
27427+
27428 }
27429 }
27430
27431diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
27432index fb674fd..223a693 100644
27433--- a/arch/x86/mm/fault.c
27434+++ b/arch/x86/mm/fault.c
27435@@ -13,12 +13,19 @@
27436 #include <linux/perf_event.h> /* perf_sw_event */
27437 #include <linux/hugetlb.h> /* hstate_index_to_shift */
27438 #include <linux/prefetch.h> /* prefetchw */
27439+#include <linux/unistd.h>
27440+#include <linux/compiler.h>
27441
27442 #include <asm/traps.h> /* dotraplinkage, ... */
27443 #include <asm/pgalloc.h> /* pgd_*(), ... */
27444 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
27445 #include <asm/fixmap.h> /* VSYSCALL_START */
27446 #include <asm/context_tracking.h> /* exception_enter(), ... */
27447+#include <asm/tlbflush.h>
27448+
27449+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
27450+#include <asm/stacktrace.h>
27451+#endif
27452
27453 /*
27454 * Page fault error code bits:
27455@@ -56,7 +63,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
27456 int ret = 0;
27457
27458 /* kprobe_running() needs smp_processor_id() */
27459- if (kprobes_built_in() && !user_mode_vm(regs)) {
27460+ if (kprobes_built_in() && !user_mode(regs)) {
27461 preempt_disable();
27462 if (kprobe_running() && kprobe_fault_handler(regs, 14))
27463 ret = 1;
27464@@ -117,7 +124,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
27465 return !instr_lo || (instr_lo>>1) == 1;
27466 case 0x00:
27467 /* Prefetch instruction is 0x0F0D or 0x0F18 */
27468- if (probe_kernel_address(instr, opcode))
27469+ if (user_mode(regs)) {
27470+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
27471+ return 0;
27472+ } else if (probe_kernel_address(instr, opcode))
27473 return 0;
27474
27475 *prefetch = (instr_lo == 0xF) &&
27476@@ -151,7 +161,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
27477 while (instr < max_instr) {
27478 unsigned char opcode;
27479
27480- if (probe_kernel_address(instr, opcode))
27481+ if (user_mode(regs)) {
27482+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
27483+ break;
27484+ } else if (probe_kernel_address(instr, opcode))
27485 break;
27486
27487 instr++;
27488@@ -182,6 +195,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
27489 force_sig_info(si_signo, &info, tsk);
27490 }
27491
27492+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
27493+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
27494+#endif
27495+
27496+#ifdef CONFIG_PAX_EMUTRAMP
27497+static int pax_handle_fetch_fault(struct pt_regs *regs);
27498+#endif
27499+
27500+#ifdef CONFIG_PAX_PAGEEXEC
27501+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
27502+{
27503+ pgd_t *pgd;
27504+ pud_t *pud;
27505+ pmd_t *pmd;
27506+
27507+ pgd = pgd_offset(mm, address);
27508+ if (!pgd_present(*pgd))
27509+ return NULL;
27510+ pud = pud_offset(pgd, address);
27511+ if (!pud_present(*pud))
27512+ return NULL;
27513+ pmd = pmd_offset(pud, address);
27514+ if (!pmd_present(*pmd))
27515+ return NULL;
27516+ return pmd;
27517+}
27518+#endif
27519+
27520 DEFINE_SPINLOCK(pgd_lock);
27521 LIST_HEAD(pgd_list);
27522
27523@@ -232,10 +273,22 @@ void vmalloc_sync_all(void)
27524 for (address = VMALLOC_START & PMD_MASK;
27525 address >= TASK_SIZE && address < FIXADDR_TOP;
27526 address += PMD_SIZE) {
27527+
27528+#ifdef CONFIG_PAX_PER_CPU_PGD
27529+ unsigned long cpu;
27530+#else
27531 struct page *page;
27532+#endif
27533
27534 spin_lock(&pgd_lock);
27535+
27536+#ifdef CONFIG_PAX_PER_CPU_PGD
27537+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
27538+ pgd_t *pgd = get_cpu_pgd(cpu);
27539+ pmd_t *ret;
27540+#else
27541 list_for_each_entry(page, &pgd_list, lru) {
27542+ pgd_t *pgd;
27543 spinlock_t *pgt_lock;
27544 pmd_t *ret;
27545
27546@@ -243,8 +296,14 @@ void vmalloc_sync_all(void)
27547 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
27548
27549 spin_lock(pgt_lock);
27550- ret = vmalloc_sync_one(page_address(page), address);
27551+ pgd = page_address(page);
27552+#endif
27553+
27554+ ret = vmalloc_sync_one(pgd, address);
27555+
27556+#ifndef CONFIG_PAX_PER_CPU_PGD
27557 spin_unlock(pgt_lock);
27558+#endif
27559
27560 if (!ret)
27561 break;
27562@@ -278,6 +337,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
27563 * an interrupt in the middle of a task switch..
27564 */
27565 pgd_paddr = read_cr3();
27566+
27567+#ifdef CONFIG_PAX_PER_CPU_PGD
27568+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
27569+#endif
27570+
27571 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
27572 if (!pmd_k)
27573 return -1;
27574@@ -373,7 +437,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
27575 * happen within a race in page table update. In the later
27576 * case just flush:
27577 */
27578+
27579+#ifdef CONFIG_PAX_PER_CPU_PGD
27580+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
27581+ pgd = pgd_offset_cpu(smp_processor_id(), address);
27582+#else
27583 pgd = pgd_offset(current->active_mm, address);
27584+#endif
27585+
27586 pgd_ref = pgd_offset_k(address);
27587 if (pgd_none(*pgd_ref))
27588 return -1;
27589@@ -541,7 +612,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
27590 static int is_errata100(struct pt_regs *regs, unsigned long address)
27591 {
27592 #ifdef CONFIG_X86_64
27593- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
27594+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
27595 return 1;
27596 #endif
27597 return 0;
27598@@ -568,7 +639,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
27599 }
27600
27601 static const char nx_warning[] = KERN_CRIT
27602-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
27603+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
27604
27605 static void
27606 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
27607@@ -577,15 +648,27 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
27608 if (!oops_may_print())
27609 return;
27610
27611- if (error_code & PF_INSTR) {
27612+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
27613 unsigned int level;
27614
27615 pte_t *pte = lookup_address(address, &level);
27616
27617 if (pte && pte_present(*pte) && !pte_exec(*pte))
27618- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
27619+ printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
27620 }
27621
27622+#ifdef CONFIG_PAX_KERNEXEC
27623+ if (init_mm.start_code <= address && address < init_mm.end_code) {
27624+ if (current->signal->curr_ip)
27625+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
27626+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
27627+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
27628+ else
27629+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
27630+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
27631+ }
27632+#endif
27633+
27634 printk(KERN_ALERT "BUG: unable to handle kernel ");
27635 if (address < PAGE_SIZE)
27636 printk(KERN_CONT "NULL pointer dereference");
27637@@ -748,6 +831,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
27638 return;
27639 }
27640 #endif
27641+
27642+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
27643+ if (pax_is_fetch_fault(regs, error_code, address)) {
27644+
27645+#ifdef CONFIG_PAX_EMUTRAMP
27646+ switch (pax_handle_fetch_fault(regs)) {
27647+ case 2:
27648+ return;
27649+ }
27650+#endif
27651+
27652+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
27653+ do_group_exit(SIGKILL);
27654+ }
27655+#endif
27656+
27657 /* Kernel addresses are always protection faults: */
27658 if (address >= TASK_SIZE)
27659 error_code |= PF_PROT;
27660@@ -833,7 +932,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
27661 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
27662 printk(KERN_ERR
27663 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
27664- tsk->comm, tsk->pid, address);
27665+ tsk->comm, task_pid_nr(tsk), address);
27666 code = BUS_MCEERR_AR;
27667 }
27668 #endif
27669@@ -896,6 +995,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
27670 return 1;
27671 }
27672
27673+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
27674+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
27675+{
27676+ pte_t *pte;
27677+ pmd_t *pmd;
27678+ spinlock_t *ptl;
27679+ unsigned char pte_mask;
27680+
27681+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
27682+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
27683+ return 0;
27684+
27685+ /* PaX: it's our fault, let's handle it if we can */
27686+
27687+ /* PaX: take a look at read faults before acquiring any locks */
27688+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
27689+ /* instruction fetch attempt from a protected page in user mode */
27690+ up_read(&mm->mmap_sem);
27691+
27692+#ifdef CONFIG_PAX_EMUTRAMP
27693+ switch (pax_handle_fetch_fault(regs)) {
27694+ case 2:
27695+ return 1;
27696+ }
27697+#endif
27698+
27699+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
27700+ do_group_exit(SIGKILL);
27701+ }
27702+
27703+ pmd = pax_get_pmd(mm, address);
27704+ if (unlikely(!pmd))
27705+ return 0;
27706+
27707+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
27708+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
27709+ pte_unmap_unlock(pte, ptl);
27710+ return 0;
27711+ }
27712+
27713+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
27714+ /* write attempt to a protected page in user mode */
27715+ pte_unmap_unlock(pte, ptl);
27716+ return 0;
27717+ }
27718+
27719+#ifdef CONFIG_SMP
27720+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
27721+#else
27722+ if (likely(address > get_limit(regs->cs)))
27723+#endif
27724+ {
27725+ set_pte(pte, pte_mkread(*pte));
27726+ __flush_tlb_one(address);
27727+ pte_unmap_unlock(pte, ptl);
27728+ up_read(&mm->mmap_sem);
27729+ return 1;
27730+ }
27731+
27732+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
27733+
27734+ /*
27735+ * PaX: fill DTLB with user rights and retry
27736+ */
27737+ __asm__ __volatile__ (
27738+ "orb %2,(%1)\n"
27739+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
27740+/*
27741+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
27742+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
27743+ * page fault when examined during a TLB load attempt. this is true not only
27744+ * for PTEs holding a non-present entry but also present entries that will
27745+ * raise a page fault (such as those set up by PaX, or the copy-on-write
27746+ * mechanism). in effect it means that we do *not* need to flush the TLBs
27747+ * for our target pages since their PTEs are simply not in the TLBs at all.
27748+
27749+ * the best thing in omitting it is that we gain around 15-20% speed in the
27750+ * fast path of the page fault handler and can get rid of tracing since we
27751+ * can no longer flush unintended entries.
27752+ */
27753+ "invlpg (%0)\n"
27754+#endif
27755+ __copyuser_seg"testb $0,(%0)\n"
27756+ "xorb %3,(%1)\n"
27757+ :
27758+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
27759+ : "memory", "cc");
27760+ pte_unmap_unlock(pte, ptl);
27761+ up_read(&mm->mmap_sem);
27762+ return 1;
27763+}
27764+#endif
27765+
27766 /*
27767 * Handle a spurious fault caused by a stale TLB entry.
27768 *
27769@@ -968,6 +1160,9 @@ int show_unhandled_signals = 1;
27770 static inline int
27771 access_error(unsigned long error_code, struct vm_area_struct *vma)
27772 {
27773+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
27774+ return 1;
27775+
27776 if (error_code & PF_WRITE) {
27777 /* write, present and write, not present: */
27778 if (unlikely(!(vma->vm_flags & VM_WRITE)))
27779@@ -996,7 +1191,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
27780 if (error_code & PF_USER)
27781 return false;
27782
27783- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
27784+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
27785 return false;
27786
27787 return true;
27788@@ -1012,18 +1207,33 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
27789 {
27790 struct vm_area_struct *vma;
27791 struct task_struct *tsk;
27792- unsigned long address;
27793 struct mm_struct *mm;
27794 int fault;
27795 int write = error_code & PF_WRITE;
27796 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
27797 (write ? FAULT_FLAG_WRITE : 0);
27798
27799- tsk = current;
27800- mm = tsk->mm;
27801-
27802 /* Get the faulting address: */
27803- address = read_cr2();
27804+ unsigned long address = read_cr2();
27805+
27806+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
27807+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
27808+ if (!search_exception_tables(regs->ip)) {
27809+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
27810+ bad_area_nosemaphore(regs, error_code, address);
27811+ return;
27812+ }
27813+ if (address < PAX_USER_SHADOW_BASE) {
27814+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
27815+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
27816+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
27817+ } else
27818+ address -= PAX_USER_SHADOW_BASE;
27819+ }
27820+#endif
27821+
27822+ tsk = current;
27823+ mm = tsk->mm;
27824
27825 /*
27826 * Detect and handle instructions that would cause a page fault for
27827@@ -1084,7 +1294,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
27828 * User-mode registers count as a user access even for any
27829 * potential system fault or CPU buglet:
27830 */
27831- if (user_mode_vm(regs)) {
27832+ if (user_mode(regs)) {
27833 local_irq_enable();
27834 error_code |= PF_USER;
27835 } else {
27836@@ -1146,6 +1356,11 @@ retry:
27837 might_sleep();
27838 }
27839
27840+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
27841+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
27842+ return;
27843+#endif
27844+
27845 vma = find_vma(mm, address);
27846 if (unlikely(!vma)) {
27847 bad_area(regs, error_code, address);
27848@@ -1157,18 +1372,24 @@ retry:
27849 bad_area(regs, error_code, address);
27850 return;
27851 }
27852- if (error_code & PF_USER) {
27853- /*
27854- * Accessing the stack below %sp is always a bug.
27855- * The large cushion allows instructions like enter
27856- * and pusha to work. ("enter $65535, $31" pushes
27857- * 32 pointers and then decrements %sp by 65535.)
27858- */
27859- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
27860- bad_area(regs, error_code, address);
27861- return;
27862- }
27863+ /*
27864+ * Accessing the stack below %sp is always a bug.
27865+ * The large cushion allows instructions like enter
27866+ * and pusha to work. ("enter $65535, $31" pushes
27867+ * 32 pointers and then decrements %sp by 65535.)
27868+ */
27869+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
27870+ bad_area(regs, error_code, address);
27871+ return;
27872 }
27873+
27874+#ifdef CONFIG_PAX_SEGMEXEC
27875+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
27876+ bad_area(regs, error_code, address);
27877+ return;
27878+ }
27879+#endif
27880+
27881 if (unlikely(expand_stack(vma, address))) {
27882 bad_area(regs, error_code, address);
27883 return;
27884@@ -1232,3 +1453,292 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
27885 __do_page_fault(regs, error_code);
27886 exception_exit(regs);
27887 }
27888+
27889+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
27890+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
27891+{
27892+ struct mm_struct *mm = current->mm;
27893+ unsigned long ip = regs->ip;
27894+
27895+ if (v8086_mode(regs))
27896+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
27897+
27898+#ifdef CONFIG_PAX_PAGEEXEC
27899+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
27900+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
27901+ return true;
27902+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
27903+ return true;
27904+ return false;
27905+ }
27906+#endif
27907+
27908+#ifdef CONFIG_PAX_SEGMEXEC
27909+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
27910+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
27911+ return true;
27912+ return false;
27913+ }
27914+#endif
27915+
27916+ return false;
27917+}
27918+#endif
27919+
27920+#ifdef CONFIG_PAX_EMUTRAMP
27921+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
27922+{
27923+ int err;
27924+
27925+ do { /* PaX: libffi trampoline emulation */
27926+ unsigned char mov, jmp;
27927+ unsigned int addr1, addr2;
27928+
27929+#ifdef CONFIG_X86_64
27930+ if ((regs->ip + 9) >> 32)
27931+ break;
27932+#endif
27933+
27934+ err = get_user(mov, (unsigned char __user *)regs->ip);
27935+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
27936+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
27937+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
27938+
27939+ if (err)
27940+ break;
27941+
27942+ if (mov == 0xB8 && jmp == 0xE9) {
27943+ regs->ax = addr1;
27944+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
27945+ return 2;
27946+ }
27947+ } while (0);
27948+
27949+ do { /* PaX: gcc trampoline emulation #1 */
27950+ unsigned char mov1, mov2;
27951+ unsigned short jmp;
27952+ unsigned int addr1, addr2;
27953+
27954+#ifdef CONFIG_X86_64
27955+ if ((regs->ip + 11) >> 32)
27956+ break;
27957+#endif
27958+
27959+ err = get_user(mov1, (unsigned char __user *)regs->ip);
27960+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
27961+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
27962+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
27963+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
27964+
27965+ if (err)
27966+ break;
27967+
27968+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
27969+ regs->cx = addr1;
27970+ regs->ax = addr2;
27971+ regs->ip = addr2;
27972+ return 2;
27973+ }
27974+ } while (0);
27975+
27976+ do { /* PaX: gcc trampoline emulation #2 */
27977+ unsigned char mov, jmp;
27978+ unsigned int addr1, addr2;
27979+
27980+#ifdef CONFIG_X86_64
27981+ if ((regs->ip + 9) >> 32)
27982+ break;
27983+#endif
27984+
27985+ err = get_user(mov, (unsigned char __user *)regs->ip);
27986+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
27987+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
27988+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
27989+
27990+ if (err)
27991+ break;
27992+
27993+ if (mov == 0xB9 && jmp == 0xE9) {
27994+ regs->cx = addr1;
27995+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
27996+ return 2;
27997+ }
27998+ } while (0);
27999+
28000+ return 1; /* PaX in action */
28001+}
28002+
28003+#ifdef CONFIG_X86_64
28004+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
28005+{
28006+ int err;
28007+
28008+ do { /* PaX: libffi trampoline emulation */
28009+ unsigned short mov1, mov2, jmp1;
28010+ unsigned char stcclc, jmp2;
28011+ unsigned long addr1, addr2;
28012+
28013+ err = get_user(mov1, (unsigned short __user *)regs->ip);
28014+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
28015+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
28016+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
28017+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
28018+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
28019+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
28020+
28021+ if (err)
28022+ break;
28023+
28024+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
28025+ regs->r11 = addr1;
28026+ regs->r10 = addr2;
28027+ if (stcclc == 0xF8)
28028+ regs->flags &= ~X86_EFLAGS_CF;
28029+ else
28030+ regs->flags |= X86_EFLAGS_CF;
28031+ regs->ip = addr1;
28032+ return 2;
28033+ }
28034+ } while (0);
28035+
28036+ do { /* PaX: gcc trampoline emulation #1 */
28037+ unsigned short mov1, mov2, jmp1;
28038+ unsigned char jmp2;
28039+ unsigned int addr1;
28040+ unsigned long addr2;
28041+
28042+ err = get_user(mov1, (unsigned short __user *)regs->ip);
28043+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
28044+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
28045+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
28046+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
28047+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
28048+
28049+ if (err)
28050+ break;
28051+
28052+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
28053+ regs->r11 = addr1;
28054+ regs->r10 = addr2;
28055+ regs->ip = addr1;
28056+ return 2;
28057+ }
28058+ } while (0);
28059+
28060+ do { /* PaX: gcc trampoline emulation #2 */
28061+ unsigned short mov1, mov2, jmp1;
28062+ unsigned char jmp2;
28063+ unsigned long addr1, addr2;
28064+
28065+ err = get_user(mov1, (unsigned short __user *)regs->ip);
28066+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
28067+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
28068+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
28069+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
28070+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
28071+
28072+ if (err)
28073+ break;
28074+
28075+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
28076+ regs->r11 = addr1;
28077+ regs->r10 = addr2;
28078+ regs->ip = addr1;
28079+ return 2;
28080+ }
28081+ } while (0);
28082+
28083+ return 1; /* PaX in action */
28084+}
28085+#endif
28086+
28087+/*
28088+ * PaX: decide what to do with offenders (regs->ip = fault address)
28089+ *
28090+ * returns 1 when task should be killed
28091+ * 2 when gcc trampoline was detected
28092+ */
28093+static int pax_handle_fetch_fault(struct pt_regs *regs)
28094+{
28095+ if (v8086_mode(regs))
28096+ return 1;
28097+
28098+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
28099+ return 1;
28100+
28101+#ifdef CONFIG_X86_32
28102+ return pax_handle_fetch_fault_32(regs);
28103+#else
28104+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
28105+ return pax_handle_fetch_fault_32(regs);
28106+ else
28107+ return pax_handle_fetch_fault_64(regs);
28108+#endif
28109+}
28110+#endif
28111+
28112+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
28113+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
28114+{
28115+ long i;
28116+
28117+ printk(KERN_ERR "PAX: bytes at PC: ");
28118+ for (i = 0; i < 20; i++) {
28119+ unsigned char c;
28120+ if (get_user(c, (unsigned char __force_user *)pc+i))
28121+ printk(KERN_CONT "?? ");
28122+ else
28123+ printk(KERN_CONT "%02x ", c);
28124+ }
28125+ printk("\n");
28126+
28127+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
28128+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
28129+ unsigned long c;
28130+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
28131+#ifdef CONFIG_X86_32
28132+ printk(KERN_CONT "???????? ");
28133+#else
28134+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
28135+ printk(KERN_CONT "???????? ???????? ");
28136+ else
28137+ printk(KERN_CONT "???????????????? ");
28138+#endif
28139+ } else {
28140+#ifdef CONFIG_X86_64
28141+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
28142+ printk(KERN_CONT "%08x ", (unsigned int)c);
28143+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
28144+ } else
28145+#endif
28146+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
28147+ }
28148+ }
28149+ printk("\n");
28150+}
28151+#endif
28152+
28153+/**
28154+ * probe_kernel_write(): safely attempt to write to a location
28155+ * @dst: address to write to
28156+ * @src: pointer to the data that shall be written
28157+ * @size: size of the data chunk
28158+ *
28159+ * Safely write to address @dst from the buffer at @src. If a kernel fault
28160+ * happens, handle that and return -EFAULT.
28161+ */
28162+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
28163+{
28164+ long ret;
28165+ mm_segment_t old_fs = get_fs();
28166+
28167+ set_fs(KERNEL_DS);
28168+ pagefault_disable();
28169+ pax_open_kernel();
28170+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
28171+ pax_close_kernel();
28172+ pagefault_enable();
28173+ set_fs(old_fs);
28174+
28175+ return ret ? -EFAULT : 0;
28176+}
28177diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
28178index dd74e46..7d26398 100644
28179--- a/arch/x86/mm/gup.c
28180+++ b/arch/x86/mm/gup.c
28181@@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
28182 addr = start;
28183 len = (unsigned long) nr_pages << PAGE_SHIFT;
28184 end = start + len;
28185- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
28186+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
28187 (void __user *)start, len)))
28188 return 0;
28189
28190diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
28191index 6f31ee5..8ee4164 100644
28192--- a/arch/x86/mm/highmem_32.c
28193+++ b/arch/x86/mm/highmem_32.c
28194@@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
28195 idx = type + KM_TYPE_NR*smp_processor_id();
28196 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
28197 BUG_ON(!pte_none(*(kmap_pte-idx)));
28198+
28199+ pax_open_kernel();
28200 set_pte(kmap_pte-idx, mk_pte(page, prot));
28201+ pax_close_kernel();
28202+
28203 arch_flush_lazy_mmu_mode();
28204
28205 return (void *)vaddr;
28206diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
28207index ae1aa71..56316db 100644
28208--- a/arch/x86/mm/hugetlbpage.c
28209+++ b/arch/x86/mm/hugetlbpage.c
28210@@ -279,6 +279,12 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
28211 info.flags = 0;
28212 info.length = len;
28213 info.low_limit = TASK_UNMAPPED_BASE;
28214+
28215+#ifdef CONFIG_PAX_RANDMMAP
28216+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
28217+ info.low_limit += current->mm->delta_mmap;
28218+#endif
28219+
28220 info.high_limit = TASK_SIZE;
28221 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
28222 info.align_offset = 0;
28223@@ -311,6 +317,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
28224 VM_BUG_ON(addr != -ENOMEM);
28225 info.flags = 0;
28226 info.low_limit = TASK_UNMAPPED_BASE;
28227+
28228+#ifdef CONFIG_PAX_RANDMMAP
28229+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
28230+ info.low_limit += current->mm->delta_mmap;
28231+#endif
28232+
28233 info.high_limit = TASK_SIZE;
28234 addr = vm_unmapped_area(&info);
28235 }
28236@@ -325,10 +337,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
28237 struct hstate *h = hstate_file(file);
28238 struct mm_struct *mm = current->mm;
28239 struct vm_area_struct *vma;
28240+ unsigned long pax_task_size = TASK_SIZE;
28241+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
28242
28243 if (len & ~huge_page_mask(h))
28244 return -EINVAL;
28245- if (len > TASK_SIZE)
28246+
28247+#ifdef CONFIG_PAX_SEGMEXEC
28248+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
28249+ pax_task_size = SEGMEXEC_TASK_SIZE;
28250+#endif
28251+
28252+ pax_task_size -= PAGE_SIZE;
28253+
28254+ if (len > pax_task_size)
28255 return -ENOMEM;
28256
28257 if (flags & MAP_FIXED) {
28258@@ -337,11 +359,14 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
28259 return addr;
28260 }
28261
28262+#ifdef CONFIG_PAX_RANDMMAP
28263+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
28264+#endif
28265+
28266 if (addr) {
28267 addr = ALIGN(addr, huge_page_size(h));
28268 vma = find_vma(mm, addr);
28269- if (TASK_SIZE - len >= addr &&
28270- (!vma || addr + len <= vma->vm_start))
28271+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
28272 return addr;
28273 }
28274 if (mm->get_unmapped_area == arch_get_unmapped_area)
28275diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
28276index d7aea41..0fc945b 100644
28277--- a/arch/x86/mm/init.c
28278+++ b/arch/x86/mm/init.c
28279@@ -4,6 +4,7 @@
28280 #include <linux/swap.h>
28281 #include <linux/memblock.h>
28282 #include <linux/bootmem.h> /* for max_low_pfn */
28283+#include <linux/tboot.h>
28284
28285 #include <asm/cacheflush.h>
28286 #include <asm/e820.h>
28287@@ -16,6 +17,8 @@
28288 #include <asm/tlb.h>
28289 #include <asm/proto.h>
28290 #include <asm/dma.h> /* for MAX_DMA_PFN */
28291+#include <asm/desc.h>
28292+#include <asm/bios_ebda.h>
28293
28294 unsigned long __initdata pgt_buf_start;
28295 unsigned long __meminitdata pgt_buf_end;
28296@@ -44,7 +47,7 @@ static void __init find_early_table_space(struct map_range *mr, int nr_range)
28297 {
28298 int i;
28299 unsigned long puds = 0, pmds = 0, ptes = 0, tables;
28300- unsigned long start = 0, good_end;
28301+ unsigned long start = 0x100000, good_end;
28302 phys_addr_t base;
28303
28304 for (i = 0; i < nr_range; i++) {
28305@@ -321,10 +324,40 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
28306 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
28307 * mmio resources as well as potential bios/acpi data regions.
28308 */
28309+
28310+#ifdef CONFIG_GRKERNSEC_KMEM
28311+static unsigned int ebda_start __read_only;
28312+static unsigned int ebda_end __read_only;
28313+#endif
28314+
28315 int devmem_is_allowed(unsigned long pagenr)
28316 {
28317- if (pagenr < 256)
28318+#ifdef CONFIG_GRKERNSEC_KMEM
28319+ /* allow BDA */
28320+ if (!pagenr)
28321 return 1;
28322+ /* allow EBDA */
28323+ if (pagenr >= ebda_start && pagenr < ebda_end)
28324+ return 1;
28325+ /* if tboot is in use, allow access to its hardcoded serial log range */
28326+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
28327+ return 1;
28328+#else
28329+ if (!pagenr)
28330+ return 1;
28331+#ifdef CONFIG_VM86
28332+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
28333+ return 1;
28334+#endif
28335+#endif
28336+
28337+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
28338+ return 1;
28339+#ifdef CONFIG_GRKERNSEC_KMEM
28340+ /* throw out everything else below 1MB */
28341+ if (pagenr <= 256)
28342+ return 0;
28343+#endif
28344 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
28345 return 0;
28346 if (!page_is_ram(pagenr))
28347@@ -381,8 +414,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
28348 #endif
28349 }
28350
28351+#ifdef CONFIG_GRKERNSEC_KMEM
28352+static inline void gr_init_ebda(void)
28353+{
28354+ unsigned int ebda_addr;
28355+ unsigned int ebda_size = 0;
28356+
28357+ ebda_addr = get_bios_ebda();
28358+ if (ebda_addr) {
28359+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
28360+ ebda_size <<= 10;
28361+ }
28362+ if (ebda_addr && ebda_size) {
28363+ ebda_start = ebda_addr >> PAGE_SHIFT;
28364+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
28365+ } else {
28366+ ebda_start = 0x9f000 >> PAGE_SHIFT;
28367+ ebda_end = 0xa0000 >> PAGE_SHIFT;
28368+ }
28369+}
28370+#else
28371+static inline void gr_init_ebda(void) { }
28372+#endif
28373+
28374 void free_initmem(void)
28375 {
28376+#ifdef CONFIG_PAX_KERNEXEC
28377+#ifdef CONFIG_X86_32
28378+ /* PaX: limit KERNEL_CS to actual size */
28379+ unsigned long addr, limit;
28380+ struct desc_struct d;
28381+ int cpu;
28382+#else
28383+ pgd_t *pgd;
28384+ pud_t *pud;
28385+ pmd_t *pmd;
28386+ unsigned long addr, end;
28387+#endif
28388+#endif
28389+
28390+ gr_init_ebda();
28391+
28392+#ifdef CONFIG_PAX_KERNEXEC
28393+#ifdef CONFIG_X86_32
28394+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
28395+ limit = (limit - 1UL) >> PAGE_SHIFT;
28396+
28397+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
28398+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
28399+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
28400+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
28401+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
28402+ }
28403+
28404+ /* PaX: make KERNEL_CS read-only */
28405+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
28406+ if (!paravirt_enabled())
28407+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
28408+/*
28409+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
28410+ pgd = pgd_offset_k(addr);
28411+ pud = pud_offset(pgd, addr);
28412+ pmd = pmd_offset(pud, addr);
28413+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
28414+ }
28415+*/
28416+#ifdef CONFIG_X86_PAE
28417+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
28418+/*
28419+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
28420+ pgd = pgd_offset_k(addr);
28421+ pud = pud_offset(pgd, addr);
28422+ pmd = pmd_offset(pud, addr);
28423+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
28424+ }
28425+*/
28426+#endif
28427+
28428+#ifdef CONFIG_MODULES
28429+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
28430+#endif
28431+
28432+#else
28433+ /* PaX: make kernel code/rodata read-only, rest non-executable */
28434+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
28435+ pgd = pgd_offset_k(addr);
28436+ pud = pud_offset(pgd, addr);
28437+ pmd = pmd_offset(pud, addr);
28438+ if (!pmd_present(*pmd))
28439+ continue;
28440+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
28441+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
28442+ else
28443+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
28444+ }
28445+
28446+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
28447+ end = addr + KERNEL_IMAGE_SIZE;
28448+ for (; addr < end; addr += PMD_SIZE) {
28449+ pgd = pgd_offset_k(addr);
28450+ pud = pud_offset(pgd, addr);
28451+ pmd = pmd_offset(pud, addr);
28452+ if (!pmd_present(*pmd))
28453+ continue;
28454+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
28455+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
28456+ }
28457+#endif
28458+
28459+ flush_tlb_all();
28460+#endif
28461+
28462 free_init_pages("unused kernel memory",
28463 (unsigned long)(&__init_begin),
28464 (unsigned long)(&__init_end));
28465diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
28466index 745d66b..56bf568 100644
28467--- a/arch/x86/mm/init_32.c
28468+++ b/arch/x86/mm/init_32.c
28469@@ -73,36 +73,6 @@ static __init void *alloc_low_page(void)
28470 }
28471
28472 /*
28473- * Creates a middle page table and puts a pointer to it in the
28474- * given global directory entry. This only returns the gd entry
28475- * in non-PAE compilation mode, since the middle layer is folded.
28476- */
28477-static pmd_t * __init one_md_table_init(pgd_t *pgd)
28478-{
28479- pud_t *pud;
28480- pmd_t *pmd_table;
28481-
28482-#ifdef CONFIG_X86_PAE
28483- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
28484- if (after_bootmem)
28485- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
28486- else
28487- pmd_table = (pmd_t *)alloc_low_page();
28488- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
28489- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
28490- pud = pud_offset(pgd, 0);
28491- BUG_ON(pmd_table != pmd_offset(pud, 0));
28492-
28493- return pmd_table;
28494- }
28495-#endif
28496- pud = pud_offset(pgd, 0);
28497- pmd_table = pmd_offset(pud, 0);
28498-
28499- return pmd_table;
28500-}
28501-
28502-/*
28503 * Create a page table and place a pointer to it in a middle page
28504 * directory entry:
28505 */
28506@@ -122,13 +92,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
28507 page_table = (pte_t *)alloc_low_page();
28508
28509 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
28510+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
28511+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
28512+#else
28513 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
28514+#endif
28515 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
28516 }
28517
28518 return pte_offset_kernel(pmd, 0);
28519 }
28520
28521+static pmd_t * __init one_md_table_init(pgd_t *pgd)
28522+{
28523+ pud_t *pud;
28524+ pmd_t *pmd_table;
28525+
28526+ pud = pud_offset(pgd, 0);
28527+ pmd_table = pmd_offset(pud, 0);
28528+
28529+ return pmd_table;
28530+}
28531+
28532 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
28533 {
28534 int pgd_idx = pgd_index(vaddr);
28535@@ -202,6 +187,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
28536 int pgd_idx, pmd_idx;
28537 unsigned long vaddr;
28538 pgd_t *pgd;
28539+ pud_t *pud;
28540 pmd_t *pmd;
28541 pte_t *pte = NULL;
28542
28543@@ -211,8 +197,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
28544 pgd = pgd_base + pgd_idx;
28545
28546 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
28547- pmd = one_md_table_init(pgd);
28548- pmd = pmd + pmd_index(vaddr);
28549+ pud = pud_offset(pgd, vaddr);
28550+ pmd = pmd_offset(pud, vaddr);
28551+
28552+#ifdef CONFIG_X86_PAE
28553+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
28554+#endif
28555+
28556 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
28557 pmd++, pmd_idx++) {
28558 pte = page_table_kmap_check(one_page_table_init(pmd),
28559@@ -224,11 +215,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
28560 }
28561 }
28562
28563-static inline int is_kernel_text(unsigned long addr)
28564+static inline int is_kernel_text(unsigned long start, unsigned long end)
28565 {
28566- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
28567- return 1;
28568- return 0;
28569+ if ((start > ktla_ktva((unsigned long)_etext) ||
28570+ end <= ktla_ktva((unsigned long)_stext)) &&
28571+ (start > ktla_ktva((unsigned long)_einittext) ||
28572+ end <= ktla_ktva((unsigned long)_sinittext)) &&
28573+
28574+#ifdef CONFIG_ACPI_SLEEP
28575+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
28576+#endif
28577+
28578+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
28579+ return 0;
28580+ return 1;
28581 }
28582
28583 /*
28584@@ -245,9 +245,10 @@ kernel_physical_mapping_init(unsigned long start,
28585 unsigned long last_map_addr = end;
28586 unsigned long start_pfn, end_pfn;
28587 pgd_t *pgd_base = swapper_pg_dir;
28588- int pgd_idx, pmd_idx, pte_ofs;
28589+ unsigned int pgd_idx, pmd_idx, pte_ofs;
28590 unsigned long pfn;
28591 pgd_t *pgd;
28592+ pud_t *pud;
28593 pmd_t *pmd;
28594 pte_t *pte;
28595 unsigned pages_2m, pages_4k;
28596@@ -280,8 +281,13 @@ repeat:
28597 pfn = start_pfn;
28598 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
28599 pgd = pgd_base + pgd_idx;
28600- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
28601- pmd = one_md_table_init(pgd);
28602+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
28603+ pud = pud_offset(pgd, 0);
28604+ pmd = pmd_offset(pud, 0);
28605+
28606+#ifdef CONFIG_X86_PAE
28607+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
28608+#endif
28609
28610 if (pfn >= end_pfn)
28611 continue;
28612@@ -293,14 +299,13 @@ repeat:
28613 #endif
28614 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
28615 pmd++, pmd_idx++) {
28616- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
28617+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
28618
28619 /*
28620 * Map with big pages if possible, otherwise
28621 * create normal page tables:
28622 */
28623 if (use_pse) {
28624- unsigned int addr2;
28625 pgprot_t prot = PAGE_KERNEL_LARGE;
28626 /*
28627 * first pass will use the same initial
28628@@ -310,11 +315,7 @@ repeat:
28629 __pgprot(PTE_IDENT_ATTR |
28630 _PAGE_PSE);
28631
28632- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
28633- PAGE_OFFSET + PAGE_SIZE-1;
28634-
28635- if (is_kernel_text(addr) ||
28636- is_kernel_text(addr2))
28637+ if (is_kernel_text(address, address + PMD_SIZE))
28638 prot = PAGE_KERNEL_LARGE_EXEC;
28639
28640 pages_2m++;
28641@@ -331,7 +332,7 @@ repeat:
28642 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
28643 pte += pte_ofs;
28644 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
28645- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
28646+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
28647 pgprot_t prot = PAGE_KERNEL;
28648 /*
28649 * first pass will use the same initial
28650@@ -339,7 +340,7 @@ repeat:
28651 */
28652 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
28653
28654- if (is_kernel_text(addr))
28655+ if (is_kernel_text(address, address + PAGE_SIZE))
28656 prot = PAGE_KERNEL_EXEC;
28657
28658 pages_4k++;
28659@@ -465,7 +466,7 @@ void __init native_pagetable_init(void)
28660
28661 pud = pud_offset(pgd, va);
28662 pmd = pmd_offset(pud, va);
28663- if (!pmd_present(*pmd))
28664+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
28665 break;
28666
28667 pte = pte_offset_kernel(pmd, va);
28668@@ -514,12 +515,10 @@ void __init early_ioremap_page_table_range_init(void)
28669
28670 static void __init pagetable_init(void)
28671 {
28672- pgd_t *pgd_base = swapper_pg_dir;
28673-
28674- permanent_kmaps_init(pgd_base);
28675+ permanent_kmaps_init(swapper_pg_dir);
28676 }
28677
28678-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
28679+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
28680 EXPORT_SYMBOL_GPL(__supported_pte_mask);
28681
28682 /* user-defined highmem size */
28683@@ -728,6 +727,12 @@ void __init mem_init(void)
28684
28685 pci_iommu_alloc();
28686
28687+#ifdef CONFIG_PAX_PER_CPU_PGD
28688+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
28689+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
28690+ KERNEL_PGD_PTRS);
28691+#endif
28692+
28693 #ifdef CONFIG_FLATMEM
28694 BUG_ON(!mem_map);
28695 #endif
28696@@ -754,7 +759,7 @@ void __init mem_init(void)
28697 reservedpages++;
28698
28699 codesize = (unsigned long) &_etext - (unsigned long) &_text;
28700- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
28701+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
28702 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
28703
28704 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
28705@@ -795,10 +800,10 @@ void __init mem_init(void)
28706 ((unsigned long)&__init_end -
28707 (unsigned long)&__init_begin) >> 10,
28708
28709- (unsigned long)&_etext, (unsigned long)&_edata,
28710- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
28711+ (unsigned long)&_sdata, (unsigned long)&_edata,
28712+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
28713
28714- (unsigned long)&_text, (unsigned long)&_etext,
28715+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
28716 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
28717
28718 /*
28719@@ -876,6 +881,7 @@ void set_kernel_text_rw(void)
28720 if (!kernel_set_to_readonly)
28721 return;
28722
28723+ start = ktla_ktva(start);
28724 pr_debug("Set kernel text: %lx - %lx for read write\n",
28725 start, start+size);
28726
28727@@ -890,6 +896,7 @@ void set_kernel_text_ro(void)
28728 if (!kernel_set_to_readonly)
28729 return;
28730
28731+ start = ktla_ktva(start);
28732 pr_debug("Set kernel text: %lx - %lx for read only\n",
28733 start, start+size);
28734
28735@@ -918,6 +925,7 @@ void mark_rodata_ro(void)
28736 unsigned long start = PFN_ALIGN(_text);
28737 unsigned long size = PFN_ALIGN(_etext) - start;
28738
28739+ start = ktla_ktva(start);
28740 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
28741 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
28742 size >> 10);
28743diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
28744index 75c9a6a..498d677 100644
28745--- a/arch/x86/mm/init_64.c
28746+++ b/arch/x86/mm/init_64.c
28747@@ -74,7 +74,7 @@ early_param("gbpages", parse_direct_gbpages_on);
28748 * around without checking the pgd every time.
28749 */
28750
28751-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
28752+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
28753 EXPORT_SYMBOL_GPL(__supported_pte_mask);
28754
28755 int force_personality32;
28756@@ -107,12 +107,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
28757
28758 for (address = start; address <= end; address += PGDIR_SIZE) {
28759 const pgd_t *pgd_ref = pgd_offset_k(address);
28760+
28761+#ifdef CONFIG_PAX_PER_CPU_PGD
28762+ unsigned long cpu;
28763+#else
28764 struct page *page;
28765+#endif
28766
28767 if (pgd_none(*pgd_ref))
28768 continue;
28769
28770 spin_lock(&pgd_lock);
28771+
28772+#ifdef CONFIG_PAX_PER_CPU_PGD
28773+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
28774+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
28775+#else
28776 list_for_each_entry(page, &pgd_list, lru) {
28777 pgd_t *pgd;
28778 spinlock_t *pgt_lock;
28779@@ -121,6 +131,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
28780 /* the pgt_lock only for Xen */
28781 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
28782 spin_lock(pgt_lock);
28783+#endif
28784
28785 if (pgd_none(*pgd))
28786 set_pgd(pgd, *pgd_ref);
28787@@ -128,7 +139,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
28788 BUG_ON(pgd_page_vaddr(*pgd)
28789 != pgd_page_vaddr(*pgd_ref));
28790
28791+#ifndef CONFIG_PAX_PER_CPU_PGD
28792 spin_unlock(pgt_lock);
28793+#endif
28794+
28795 }
28796 spin_unlock(&pgd_lock);
28797 }
28798@@ -161,7 +175,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
28799 {
28800 if (pgd_none(*pgd)) {
28801 pud_t *pud = (pud_t *)spp_getpage();
28802- pgd_populate(&init_mm, pgd, pud);
28803+ pgd_populate_kernel(&init_mm, pgd, pud);
28804 if (pud != pud_offset(pgd, 0))
28805 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
28806 pud, pud_offset(pgd, 0));
28807@@ -173,7 +187,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
28808 {
28809 if (pud_none(*pud)) {
28810 pmd_t *pmd = (pmd_t *) spp_getpage();
28811- pud_populate(&init_mm, pud, pmd);
28812+ pud_populate_kernel(&init_mm, pud, pmd);
28813 if (pmd != pmd_offset(pud, 0))
28814 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
28815 pmd, pmd_offset(pud, 0));
28816@@ -202,7 +216,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
28817 pmd = fill_pmd(pud, vaddr);
28818 pte = fill_pte(pmd, vaddr);
28819
28820+ pax_open_kernel();
28821 set_pte(pte, new_pte);
28822+ pax_close_kernel();
28823
28824 /*
28825 * It's enough to flush this one mapping.
28826@@ -261,14 +277,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
28827 pgd = pgd_offset_k((unsigned long)__va(phys));
28828 if (pgd_none(*pgd)) {
28829 pud = (pud_t *) spp_getpage();
28830- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
28831- _PAGE_USER));
28832+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
28833 }
28834 pud = pud_offset(pgd, (unsigned long)__va(phys));
28835 if (pud_none(*pud)) {
28836 pmd = (pmd_t *) spp_getpage();
28837- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
28838- _PAGE_USER));
28839+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
28840 }
28841 pmd = pmd_offset(pud, phys);
28842 BUG_ON(!pmd_none(*pmd));
28843@@ -329,7 +343,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
28844 if (pfn >= pgt_buf_top)
28845 panic("alloc_low_page: ran out of memory");
28846
28847- adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
28848+ adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
28849 clear_page(adr);
28850 *phys = pfn * PAGE_SIZE;
28851 return adr;
28852@@ -345,7 +359,7 @@ static __ref void *map_low_page(void *virt)
28853
28854 phys = __pa(virt);
28855 left = phys & (PAGE_SIZE - 1);
28856- adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
28857+ adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
28858 adr = (void *)(((unsigned long)adr) | left);
28859
28860 return adr;
28861@@ -553,7 +567,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
28862 unmap_low_page(pmd);
28863
28864 spin_lock(&init_mm.page_table_lock);
28865- pud_populate(&init_mm, pud, __va(pmd_phys));
28866+ pud_populate_kernel(&init_mm, pud, __va(pmd_phys));
28867 spin_unlock(&init_mm.page_table_lock);
28868 }
28869 __flush_tlb_all();
28870@@ -599,7 +613,7 @@ kernel_physical_mapping_init(unsigned long start,
28871 unmap_low_page(pud);
28872
28873 spin_lock(&init_mm.page_table_lock);
28874- pgd_populate(&init_mm, pgd, __va(pud_phys));
28875+ pgd_populate_kernel(&init_mm, pgd, __va(pud_phys));
28876 spin_unlock(&init_mm.page_table_lock);
28877 pgd_changed = true;
28878 }
28879@@ -693,6 +707,12 @@ void __init mem_init(void)
28880
28881 pci_iommu_alloc();
28882
28883+#ifdef CONFIG_PAX_PER_CPU_PGD
28884+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
28885+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
28886+ KERNEL_PGD_PTRS);
28887+#endif
28888+
28889 /* clear_bss() already clear the empty_zero_page */
28890
28891 reservedpages = 0;
28892@@ -856,8 +876,8 @@ int kern_addr_valid(unsigned long addr)
28893 static struct vm_area_struct gate_vma = {
28894 .vm_start = VSYSCALL_START,
28895 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
28896- .vm_page_prot = PAGE_READONLY_EXEC,
28897- .vm_flags = VM_READ | VM_EXEC
28898+ .vm_page_prot = PAGE_READONLY,
28899+ .vm_flags = VM_READ
28900 };
28901
28902 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
28903@@ -891,7 +911,7 @@ int in_gate_area_no_mm(unsigned long addr)
28904
28905 const char *arch_vma_name(struct vm_area_struct *vma)
28906 {
28907- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
28908+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
28909 return "[vdso]";
28910 if (vma == &gate_vma)
28911 return "[vsyscall]";
28912diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
28913index 7b179b4..6bd1777 100644
28914--- a/arch/x86/mm/iomap_32.c
28915+++ b/arch/x86/mm/iomap_32.c
28916@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
28917 type = kmap_atomic_idx_push();
28918 idx = type + KM_TYPE_NR * smp_processor_id();
28919 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
28920+
28921+ pax_open_kernel();
28922 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
28923+ pax_close_kernel();
28924+
28925 arch_flush_lazy_mmu_mode();
28926
28927 return (void *)vaddr;
28928diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
28929index 78fe3f1..73b95e2 100644
28930--- a/arch/x86/mm/ioremap.c
28931+++ b/arch/x86/mm/ioremap.c
28932@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
28933 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
28934 int is_ram = page_is_ram(pfn);
28935
28936- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
28937+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
28938 return NULL;
28939 WARN_ON_ONCE(is_ram);
28940 }
28941@@ -256,7 +256,7 @@ EXPORT_SYMBOL(ioremap_prot);
28942 *
28943 * Caller must ensure there is only one unmapping for the same pointer.
28944 */
28945-void iounmap(volatile void __iomem *addr)
28946+void iounmap(const volatile void __iomem *addr)
28947 {
28948 struct vm_struct *p, *o;
28949
28950@@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
28951
28952 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
28953 if (page_is_ram(start >> PAGE_SHIFT))
28954+#ifdef CONFIG_HIGHMEM
28955+ if ((start >> PAGE_SHIFT) < max_low_pfn)
28956+#endif
28957 return __va(phys);
28958
28959 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
28960@@ -327,6 +330,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
28961 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
28962 {
28963 if (page_is_ram(phys >> PAGE_SHIFT))
28964+#ifdef CONFIG_HIGHMEM
28965+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
28966+#endif
28967 return;
28968
28969 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
28970@@ -344,7 +350,7 @@ static int __init early_ioremap_debug_setup(char *str)
28971 early_param("early_ioremap_debug", early_ioremap_debug_setup);
28972
28973 static __initdata int after_paging_init;
28974-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
28975+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
28976
28977 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
28978 {
28979@@ -381,8 +387,7 @@ void __init early_ioremap_init(void)
28980 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
28981
28982 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
28983- memset(bm_pte, 0, sizeof(bm_pte));
28984- pmd_populate_kernel(&init_mm, pmd, bm_pte);
28985+ pmd_populate_user(&init_mm, pmd, bm_pte);
28986
28987 /*
28988 * The boot-ioremap range spans multiple pmds, for which
28989diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
28990index d87dd6d..bf3fa66 100644
28991--- a/arch/x86/mm/kmemcheck/kmemcheck.c
28992+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
28993@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
28994 * memory (e.g. tracked pages)? For now, we need this to avoid
28995 * invoking kmemcheck for PnP BIOS calls.
28996 */
28997- if (regs->flags & X86_VM_MASK)
28998+ if (v8086_mode(regs))
28999 return false;
29000- if (regs->cs != __KERNEL_CS)
29001+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
29002 return false;
29003
29004 pte = kmemcheck_pte_lookup(address);
29005diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
29006index 845df68..1d8d29f 100644
29007--- a/arch/x86/mm/mmap.c
29008+++ b/arch/x86/mm/mmap.c
29009@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
29010 * Leave an at least ~128 MB hole with possible stack randomization.
29011 */
29012 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
29013-#define MAX_GAP (TASK_SIZE/6*5)
29014+#define MAX_GAP (pax_task_size/6*5)
29015
29016 static int mmap_is_legacy(void)
29017 {
29018@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
29019 return rnd << PAGE_SHIFT;
29020 }
29021
29022-static unsigned long mmap_base(void)
29023+static unsigned long mmap_base(struct mm_struct *mm)
29024 {
29025 unsigned long gap = rlimit(RLIMIT_STACK);
29026+ unsigned long pax_task_size = TASK_SIZE;
29027+
29028+#ifdef CONFIG_PAX_SEGMEXEC
29029+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
29030+ pax_task_size = SEGMEXEC_TASK_SIZE;
29031+#endif
29032
29033 if (gap < MIN_GAP)
29034 gap = MIN_GAP;
29035 else if (gap > MAX_GAP)
29036 gap = MAX_GAP;
29037
29038- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
29039+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
29040 }
29041
29042 /*
29043 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
29044 * does, but not when emulating X86_32
29045 */
29046-static unsigned long mmap_legacy_base(void)
29047+static unsigned long mmap_legacy_base(struct mm_struct *mm)
29048 {
29049- if (mmap_is_ia32())
29050+ if (mmap_is_ia32()) {
29051+
29052+#ifdef CONFIG_PAX_SEGMEXEC
29053+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
29054+ return SEGMEXEC_TASK_UNMAPPED_BASE;
29055+ else
29056+#endif
29057+
29058 return TASK_UNMAPPED_BASE;
29059- else
29060+ } else
29061 return TASK_UNMAPPED_BASE + mmap_rnd();
29062 }
29063
29064@@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
29065 void arch_pick_mmap_layout(struct mm_struct *mm)
29066 {
29067 if (mmap_is_legacy()) {
29068- mm->mmap_base = mmap_legacy_base();
29069+ mm->mmap_base = mmap_legacy_base(mm);
29070+
29071+#ifdef CONFIG_PAX_RANDMMAP
29072+ if (mm->pax_flags & MF_PAX_RANDMMAP)
29073+ mm->mmap_base += mm->delta_mmap;
29074+#endif
29075+
29076 mm->get_unmapped_area = arch_get_unmapped_area;
29077 mm->unmap_area = arch_unmap_area;
29078 } else {
29079- mm->mmap_base = mmap_base();
29080+ mm->mmap_base = mmap_base(mm);
29081+
29082+#ifdef CONFIG_PAX_RANDMMAP
29083+ if (mm->pax_flags & MF_PAX_RANDMMAP)
29084+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
29085+#endif
29086+
29087 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
29088 mm->unmap_area = arch_unmap_area_topdown;
29089 }
29090diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
29091index dc0b727..f612039 100644
29092--- a/arch/x86/mm/mmio-mod.c
29093+++ b/arch/x86/mm/mmio-mod.c
29094@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
29095 break;
29096 default:
29097 {
29098- unsigned char *ip = (unsigned char *)instptr;
29099+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
29100 my_trace->opcode = MMIO_UNKNOWN_OP;
29101 my_trace->width = 0;
29102 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
29103@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
29104 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
29105 void __iomem *addr)
29106 {
29107- static atomic_t next_id;
29108+ static atomic_unchecked_t next_id;
29109 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
29110 /* These are page-unaligned. */
29111 struct mmiotrace_map map = {
29112@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
29113 .private = trace
29114 },
29115 .phys = offset,
29116- .id = atomic_inc_return(&next_id)
29117+ .id = atomic_inc_return_unchecked(&next_id)
29118 };
29119 map.map_id = trace->id;
29120
29121@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
29122 ioremap_trace_core(offset, size, addr);
29123 }
29124
29125-static void iounmap_trace_core(volatile void __iomem *addr)
29126+static void iounmap_trace_core(const volatile void __iomem *addr)
29127 {
29128 struct mmiotrace_map map = {
29129 .phys = 0,
29130@@ -328,7 +328,7 @@ not_enabled:
29131 }
29132 }
29133
29134-void mmiotrace_iounmap(volatile void __iomem *addr)
29135+void mmiotrace_iounmap(const volatile void __iomem *addr)
29136 {
29137 might_sleep();
29138 if (is_enabled()) /* recheck and proper locking in *_core() */
29139diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
29140index 8504f36..5fc68f2 100644
29141--- a/arch/x86/mm/numa.c
29142+++ b/arch/x86/mm/numa.c
29143@@ -478,7 +478,7 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
29144 return true;
29145 }
29146
29147-static int __init numa_register_memblks(struct numa_meminfo *mi)
29148+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
29149 {
29150 unsigned long uninitialized_var(pfn_align);
29151 int i, nid;
29152diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
29153index b008656..773eac2 100644
29154--- a/arch/x86/mm/pageattr-test.c
29155+++ b/arch/x86/mm/pageattr-test.c
29156@@ -36,7 +36,7 @@ enum {
29157
29158 static int pte_testbit(pte_t pte)
29159 {
29160- return pte_flags(pte) & _PAGE_UNUSED1;
29161+ return pte_flags(pte) & _PAGE_CPA_TEST;
29162 }
29163
29164 struct split_state {
29165diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
29166index a718e0d..77419bc 100644
29167--- a/arch/x86/mm/pageattr.c
29168+++ b/arch/x86/mm/pageattr.c
29169@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
29170 */
29171 #ifdef CONFIG_PCI_BIOS
29172 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
29173- pgprot_val(forbidden) |= _PAGE_NX;
29174+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
29175 #endif
29176
29177 /*
29178@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
29179 * Does not cover __inittext since that is gone later on. On
29180 * 64bit we do not enforce !NX on the low mapping
29181 */
29182- if (within(address, (unsigned long)_text, (unsigned long)_etext))
29183- pgprot_val(forbidden) |= _PAGE_NX;
29184+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
29185+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
29186
29187+#ifdef CONFIG_DEBUG_RODATA
29188 /*
29189 * The .rodata section needs to be read-only. Using the pfn
29190 * catches all aliases.
29191@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
29192 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
29193 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
29194 pgprot_val(forbidden) |= _PAGE_RW;
29195+#endif
29196
29197 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
29198 /*
29199@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
29200 }
29201 #endif
29202
29203+#ifdef CONFIG_PAX_KERNEXEC
29204+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
29205+ pgprot_val(forbidden) |= _PAGE_RW;
29206+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
29207+ }
29208+#endif
29209+
29210 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
29211
29212 return prot;
29213@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
29214 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
29215 {
29216 /* change init_mm */
29217+ pax_open_kernel();
29218 set_pte_atomic(kpte, pte);
29219+
29220 #ifdef CONFIG_X86_32
29221 if (!SHARED_KERNEL_PMD) {
29222+
29223+#ifdef CONFIG_PAX_PER_CPU_PGD
29224+ unsigned long cpu;
29225+#else
29226 struct page *page;
29227+#endif
29228
29229+#ifdef CONFIG_PAX_PER_CPU_PGD
29230+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
29231+ pgd_t *pgd = get_cpu_pgd(cpu);
29232+#else
29233 list_for_each_entry(page, &pgd_list, lru) {
29234- pgd_t *pgd;
29235+ pgd_t *pgd = (pgd_t *)page_address(page);
29236+#endif
29237+
29238 pud_t *pud;
29239 pmd_t *pmd;
29240
29241- pgd = (pgd_t *)page_address(page) + pgd_index(address);
29242+ pgd += pgd_index(address);
29243 pud = pud_offset(pgd, address);
29244 pmd = pmd_offset(pud, address);
29245 set_pte_atomic((pte_t *)pmd, pte);
29246 }
29247 }
29248 #endif
29249+ pax_close_kernel();
29250 }
29251
29252 static int
29253diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
29254index 0eb572e..92f5c1e 100644
29255--- a/arch/x86/mm/pat.c
29256+++ b/arch/x86/mm/pat.c
29257@@ -376,7 +376,7 @@ int free_memtype(u64 start, u64 end)
29258
29259 if (!entry) {
29260 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
29261- current->comm, current->pid, start, end - 1);
29262+ current->comm, task_pid_nr(current), start, end - 1);
29263 return -EINVAL;
29264 }
29265
29266@@ -506,8 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29267
29268 while (cursor < to) {
29269 if (!devmem_is_allowed(pfn)) {
29270- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
29271- current->comm, from, to - 1);
29272+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
29273+ current->comm, from, to - 1, cursor);
29274 return 0;
29275 }
29276 cursor += PAGE_SIZE;
29277@@ -570,7 +570,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
29278 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
29279 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
29280 "for [mem %#010Lx-%#010Lx]\n",
29281- current->comm, current->pid,
29282+ current->comm, task_pid_nr(current),
29283 cattr_name(flags),
29284 base, (unsigned long long)(base + size-1));
29285 return -EINVAL;
29286@@ -605,7 +605,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
29287 flags = lookup_memtype(paddr);
29288 if (want_flags != flags) {
29289 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
29290- current->comm, current->pid,
29291+ current->comm, task_pid_nr(current),
29292 cattr_name(want_flags),
29293 (unsigned long long)paddr,
29294 (unsigned long long)(paddr + size - 1),
29295@@ -627,7 +627,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
29296 free_memtype(paddr, paddr + size);
29297 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
29298 " for [mem %#010Lx-%#010Lx], got %s\n",
29299- current->comm, current->pid,
29300+ current->comm, task_pid_nr(current),
29301 cattr_name(want_flags),
29302 (unsigned long long)paddr,
29303 (unsigned long long)(paddr + size - 1),
29304diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
29305index 9f0614d..92ae64a 100644
29306--- a/arch/x86/mm/pf_in.c
29307+++ b/arch/x86/mm/pf_in.c
29308@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
29309 int i;
29310 enum reason_type rv = OTHERS;
29311
29312- p = (unsigned char *)ins_addr;
29313+ p = (unsigned char *)ktla_ktva(ins_addr);
29314 p += skip_prefix(p, &prf);
29315 p += get_opcode(p, &opcode);
29316
29317@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
29318 struct prefix_bits prf;
29319 int i;
29320
29321- p = (unsigned char *)ins_addr;
29322+ p = (unsigned char *)ktla_ktva(ins_addr);
29323 p += skip_prefix(p, &prf);
29324 p += get_opcode(p, &opcode);
29325
29326@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
29327 struct prefix_bits prf;
29328 int i;
29329
29330- p = (unsigned char *)ins_addr;
29331+ p = (unsigned char *)ktla_ktva(ins_addr);
29332 p += skip_prefix(p, &prf);
29333 p += get_opcode(p, &opcode);
29334
29335@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
29336 struct prefix_bits prf;
29337 int i;
29338
29339- p = (unsigned char *)ins_addr;
29340+ p = (unsigned char *)ktla_ktva(ins_addr);
29341 p += skip_prefix(p, &prf);
29342 p += get_opcode(p, &opcode);
29343 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
29344@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
29345 struct prefix_bits prf;
29346 int i;
29347
29348- p = (unsigned char *)ins_addr;
29349+ p = (unsigned char *)ktla_ktva(ins_addr);
29350 p += skip_prefix(p, &prf);
29351 p += get_opcode(p, &opcode);
29352 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
29353diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
29354index e27fbf8..8b56dc9 100644
29355--- a/arch/x86/mm/pgtable.c
29356+++ b/arch/x86/mm/pgtable.c
29357@@ -84,10 +84,64 @@ static inline void pgd_list_del(pgd_t *pgd)
29358 list_del(&page->lru);
29359 }
29360
29361-#define UNSHARED_PTRS_PER_PGD \
29362- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
29363+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29364+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
29365
29366+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
29367+{
29368+ unsigned int count = USER_PGD_PTRS;
29369
29370+ while (count--)
29371+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
29372+}
29373+#endif
29374+
29375+#ifdef CONFIG_PAX_PER_CPU_PGD
29376+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
29377+{
29378+ unsigned int count = USER_PGD_PTRS;
29379+
29380+ while (count--) {
29381+ pgd_t pgd;
29382+
29383+#ifdef CONFIG_X86_64
29384+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
29385+#else
29386+ pgd = *src++;
29387+#endif
29388+
29389+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29390+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
29391+#endif
29392+
29393+ *dst++ = pgd;
29394+ }
29395+
29396+}
29397+#endif
29398+
29399+#ifdef CONFIG_X86_64
29400+#define pxd_t pud_t
29401+#define pyd_t pgd_t
29402+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
29403+#define pxd_free(mm, pud) pud_free((mm), (pud))
29404+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
29405+#define pyd_offset(mm, address) pgd_offset((mm), (address))
29406+#define PYD_SIZE PGDIR_SIZE
29407+#else
29408+#define pxd_t pmd_t
29409+#define pyd_t pud_t
29410+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
29411+#define pxd_free(mm, pud) pmd_free((mm), (pud))
29412+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
29413+#define pyd_offset(mm, address) pud_offset((mm), (address))
29414+#define PYD_SIZE PUD_SIZE
29415+#endif
29416+
29417+#ifdef CONFIG_PAX_PER_CPU_PGD
29418+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
29419+static inline void pgd_dtor(pgd_t *pgd) {}
29420+#else
29421 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
29422 {
29423 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
29424@@ -128,6 +182,7 @@ static void pgd_dtor(pgd_t *pgd)
29425 pgd_list_del(pgd);
29426 spin_unlock(&pgd_lock);
29427 }
29428+#endif
29429
29430 /*
29431 * List of all pgd's needed for non-PAE so it can invalidate entries
29432@@ -140,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
29433 * -- nyc
29434 */
29435
29436-#ifdef CONFIG_X86_PAE
29437+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
29438 /*
29439 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
29440 * updating the top-level pagetable entries to guarantee the
29441@@ -152,7 +207,7 @@ static void pgd_dtor(pgd_t *pgd)
29442 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
29443 * and initialize the kernel pmds here.
29444 */
29445-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
29446+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
29447
29448 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
29449 {
29450@@ -170,36 +225,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
29451 */
29452 flush_tlb_mm(mm);
29453 }
29454+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
29455+#define PREALLOCATED_PXDS USER_PGD_PTRS
29456 #else /* !CONFIG_X86_PAE */
29457
29458 /* No need to prepopulate any pagetable entries in non-PAE modes. */
29459-#define PREALLOCATED_PMDS 0
29460+#define PREALLOCATED_PXDS 0
29461
29462 #endif /* CONFIG_X86_PAE */
29463
29464-static void free_pmds(pmd_t *pmds[])
29465+static void free_pxds(pxd_t *pxds[])
29466 {
29467 int i;
29468
29469- for(i = 0; i < PREALLOCATED_PMDS; i++)
29470- if (pmds[i])
29471- free_page((unsigned long)pmds[i]);
29472+ for(i = 0; i < PREALLOCATED_PXDS; i++)
29473+ if (pxds[i])
29474+ free_page((unsigned long)pxds[i]);
29475 }
29476
29477-static int preallocate_pmds(pmd_t *pmds[])
29478+static int preallocate_pxds(pxd_t *pxds[])
29479 {
29480 int i;
29481 bool failed = false;
29482
29483- for(i = 0; i < PREALLOCATED_PMDS; i++) {
29484- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
29485- if (pmd == NULL)
29486+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
29487+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
29488+ if (pxd == NULL)
29489 failed = true;
29490- pmds[i] = pmd;
29491+ pxds[i] = pxd;
29492 }
29493
29494 if (failed) {
29495- free_pmds(pmds);
29496+ free_pxds(pxds);
29497 return -ENOMEM;
29498 }
29499
29500@@ -212,51 +269,55 @@ static int preallocate_pmds(pmd_t *pmds[])
29501 * preallocate which never got a corresponding vma will need to be
29502 * freed manually.
29503 */
29504-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
29505+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
29506 {
29507 int i;
29508
29509- for(i = 0; i < PREALLOCATED_PMDS; i++) {
29510+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
29511 pgd_t pgd = pgdp[i];
29512
29513 if (pgd_val(pgd) != 0) {
29514- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
29515+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
29516
29517- pgdp[i] = native_make_pgd(0);
29518+ set_pgd(pgdp + i, native_make_pgd(0));
29519
29520- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
29521- pmd_free(mm, pmd);
29522+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
29523+ pxd_free(mm, pxd);
29524 }
29525 }
29526 }
29527
29528-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
29529+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
29530 {
29531- pud_t *pud;
29532+ pyd_t *pyd;
29533 unsigned long addr;
29534 int i;
29535
29536- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
29537+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
29538 return;
29539
29540- pud = pud_offset(pgd, 0);
29541+#ifdef CONFIG_X86_64
29542+ pyd = pyd_offset(mm, 0L);
29543+#else
29544+ pyd = pyd_offset(pgd, 0L);
29545+#endif
29546
29547- for (addr = i = 0; i < PREALLOCATED_PMDS;
29548- i++, pud++, addr += PUD_SIZE) {
29549- pmd_t *pmd = pmds[i];
29550+ for (addr = i = 0; i < PREALLOCATED_PXDS;
29551+ i++, pyd++, addr += PYD_SIZE) {
29552+ pxd_t *pxd = pxds[i];
29553
29554 if (i >= KERNEL_PGD_BOUNDARY)
29555- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
29556- sizeof(pmd_t) * PTRS_PER_PMD);
29557+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
29558+ sizeof(pxd_t) * PTRS_PER_PMD);
29559
29560- pud_populate(mm, pud, pmd);
29561+ pyd_populate(mm, pyd, pxd);
29562 }
29563 }
29564
29565 pgd_t *pgd_alloc(struct mm_struct *mm)
29566 {
29567 pgd_t *pgd;
29568- pmd_t *pmds[PREALLOCATED_PMDS];
29569+ pxd_t *pxds[PREALLOCATED_PXDS];
29570
29571 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
29572
29573@@ -265,11 +326,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
29574
29575 mm->pgd = pgd;
29576
29577- if (preallocate_pmds(pmds) != 0)
29578+ if (preallocate_pxds(pxds) != 0)
29579 goto out_free_pgd;
29580
29581 if (paravirt_pgd_alloc(mm) != 0)
29582- goto out_free_pmds;
29583+ goto out_free_pxds;
29584
29585 /*
29586 * Make sure that pre-populating the pmds is atomic with
29587@@ -279,14 +340,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
29588 spin_lock(&pgd_lock);
29589
29590 pgd_ctor(mm, pgd);
29591- pgd_prepopulate_pmd(mm, pgd, pmds);
29592+ pgd_prepopulate_pxd(mm, pgd, pxds);
29593
29594 spin_unlock(&pgd_lock);
29595
29596 return pgd;
29597
29598-out_free_pmds:
29599- free_pmds(pmds);
29600+out_free_pxds:
29601+ free_pxds(pxds);
29602 out_free_pgd:
29603 free_page((unsigned long)pgd);
29604 out:
29605@@ -295,7 +356,7 @@ out:
29606
29607 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
29608 {
29609- pgd_mop_up_pmds(mm, pgd);
29610+ pgd_mop_up_pxds(mm, pgd);
29611 pgd_dtor(pgd);
29612 paravirt_pgd_free(mm, pgd);
29613 free_page((unsigned long)pgd);
29614diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
29615index a69bcb8..19068ab 100644
29616--- a/arch/x86/mm/pgtable_32.c
29617+++ b/arch/x86/mm/pgtable_32.c
29618@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
29619 return;
29620 }
29621 pte = pte_offset_kernel(pmd, vaddr);
29622+
29623+ pax_open_kernel();
29624 if (pte_val(pteval))
29625 set_pte_at(&init_mm, vaddr, pte, pteval);
29626 else
29627 pte_clear(&init_mm, vaddr, pte);
29628+ pax_close_kernel();
29629
29630 /*
29631 * It's enough to flush this one mapping.
29632diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
29633index d2e2735..5c6586f 100644
29634--- a/arch/x86/mm/physaddr.c
29635+++ b/arch/x86/mm/physaddr.c
29636@@ -8,7 +8,7 @@
29637
29638 #ifdef CONFIG_X86_64
29639
29640-unsigned long __phys_addr(unsigned long x)
29641+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
29642 {
29643 if (x >= __START_KERNEL_map) {
29644 x -= __START_KERNEL_map;
29645@@ -45,7 +45,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
29646 #else
29647
29648 #ifdef CONFIG_DEBUG_VIRTUAL
29649-unsigned long __phys_addr(unsigned long x)
29650+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
29651 {
29652 /* VMALLOC_* aren't constants */
29653 VIRTUAL_BUG_ON(x < PAGE_OFFSET);
29654diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
29655index 410531d..0f16030 100644
29656--- a/arch/x86/mm/setup_nx.c
29657+++ b/arch/x86/mm/setup_nx.c
29658@@ -5,8 +5,10 @@
29659 #include <asm/pgtable.h>
29660 #include <asm/proto.h>
29661
29662+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
29663 static int disable_nx __cpuinitdata;
29664
29665+#ifndef CONFIG_PAX_PAGEEXEC
29666 /*
29667 * noexec = on|off
29668 *
29669@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
29670 return 0;
29671 }
29672 early_param("noexec", noexec_setup);
29673+#endif
29674+
29675+#endif
29676
29677 void __cpuinit x86_configure_nx(void)
29678 {
29679+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
29680 if (cpu_has_nx && !disable_nx)
29681 __supported_pte_mask |= _PAGE_NX;
29682 else
29683+#endif
29684 __supported_pte_mask &= ~_PAGE_NX;
29685 }
29686
29687diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
29688index 13a6b29..c2fff23 100644
29689--- a/arch/x86/mm/tlb.c
29690+++ b/arch/x86/mm/tlb.c
29691@@ -48,7 +48,11 @@ void leave_mm(int cpu)
29692 BUG();
29693 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
29694 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
29695+
29696+#ifndef CONFIG_PAX_PER_CPU_PGD
29697 load_cr3(swapper_pg_dir);
29698+#endif
29699+
29700 }
29701 }
29702 EXPORT_SYMBOL_GPL(leave_mm);
29703diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
29704index 877b9a1..a8ecf42 100644
29705--- a/arch/x86/net/bpf_jit.S
29706+++ b/arch/x86/net/bpf_jit.S
29707@@ -9,6 +9,7 @@
29708 */
29709 #include <linux/linkage.h>
29710 #include <asm/dwarf2.h>
29711+#include <asm/alternative-asm.h>
29712
29713 /*
29714 * Calling convention :
29715@@ -35,6 +36,7 @@ sk_load_word_positive_offset:
29716 jle bpf_slow_path_word
29717 mov (SKBDATA,%rsi),%eax
29718 bswap %eax /* ntohl() */
29719+ pax_force_retaddr
29720 ret
29721
29722 sk_load_half:
29723@@ -52,6 +54,7 @@ sk_load_half_positive_offset:
29724 jle bpf_slow_path_half
29725 movzwl (SKBDATA,%rsi),%eax
29726 rol $8,%ax # ntohs()
29727+ pax_force_retaddr
29728 ret
29729
29730 sk_load_byte:
29731@@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
29732 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
29733 jle bpf_slow_path_byte
29734 movzbl (SKBDATA,%rsi),%eax
29735+ pax_force_retaddr
29736 ret
29737
29738 /**
29739@@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
29740 movzbl (SKBDATA,%rsi),%ebx
29741 and $15,%bl
29742 shl $2,%bl
29743+ pax_force_retaddr
29744 ret
29745
29746 /* rsi contains offset and can be scratched */
29747@@ -109,6 +114,7 @@ bpf_slow_path_word:
29748 js bpf_error
29749 mov -12(%rbp),%eax
29750 bswap %eax
29751+ pax_force_retaddr
29752 ret
29753
29754 bpf_slow_path_half:
29755@@ -117,12 +123,14 @@ bpf_slow_path_half:
29756 mov -12(%rbp),%ax
29757 rol $8,%ax
29758 movzwl %ax,%eax
29759+ pax_force_retaddr
29760 ret
29761
29762 bpf_slow_path_byte:
29763 bpf_slow_path_common(1)
29764 js bpf_error
29765 movzbl -12(%rbp),%eax
29766+ pax_force_retaddr
29767 ret
29768
29769 bpf_slow_path_byte_msh:
29770@@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
29771 and $15,%al
29772 shl $2,%al
29773 xchg %eax,%ebx
29774+ pax_force_retaddr
29775 ret
29776
29777 #define sk_negative_common(SIZE) \
29778@@ -157,6 +166,7 @@ sk_load_word_negative_offset:
29779 sk_negative_common(4)
29780 mov (%rax), %eax
29781 bswap %eax
29782+ pax_force_retaddr
29783 ret
29784
29785 bpf_slow_path_half_neg:
29786@@ -168,6 +178,7 @@ sk_load_half_negative_offset:
29787 mov (%rax),%ax
29788 rol $8,%ax
29789 movzwl %ax,%eax
29790+ pax_force_retaddr
29791 ret
29792
29793 bpf_slow_path_byte_neg:
29794@@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
29795 .globl sk_load_byte_negative_offset
29796 sk_negative_common(1)
29797 movzbl (%rax), %eax
29798+ pax_force_retaddr
29799 ret
29800
29801 bpf_slow_path_byte_msh_neg:
29802@@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
29803 and $15,%al
29804 shl $2,%al
29805 xchg %eax,%ebx
29806+ pax_force_retaddr
29807 ret
29808
29809 bpf_error:
29810@@ -197,4 +210,5 @@ bpf_error:
29811 xor %eax,%eax
29812 mov -8(%rbp),%rbx
29813 leaveq
29814+ pax_force_retaddr
29815 ret
29816diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
29817index d11a470..3f9adff3 100644
29818--- a/arch/x86/net/bpf_jit_comp.c
29819+++ b/arch/x86/net/bpf_jit_comp.c
29820@@ -12,6 +12,7 @@
29821 #include <linux/netdevice.h>
29822 #include <linux/filter.h>
29823 #include <linux/if_vlan.h>
29824+#include <linux/random.h>
29825
29826 /*
29827 * Conventions :
29828@@ -49,13 +50,87 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
29829 return ptr + len;
29830 }
29831
29832+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29833+#define MAX_INSTR_CODE_SIZE 96
29834+#else
29835+#define MAX_INSTR_CODE_SIZE 64
29836+#endif
29837+
29838 #define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0)
29839
29840 #define EMIT1(b1) EMIT(b1, 1)
29841 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
29842 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
29843 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
29844+
29845+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29846+/* original constant will appear in ecx */
29847+#define DILUTE_CONST_SEQUENCE(_off, _key) \
29848+do { \
29849+ /* mov ecx, randkey */ \
29850+ EMIT1(0xb9); \
29851+ EMIT(_key, 4); \
29852+ /* xor ecx, randkey ^ off */ \
29853+ EMIT2(0x81, 0xf1); \
29854+ EMIT((_key) ^ (_off), 4); \
29855+} while (0)
29856+
29857+#define EMIT1_off32(b1, _off) \
29858+do { \
29859+ switch (b1) { \
29860+ case 0x05: /* add eax, imm32 */ \
29861+ case 0x2d: /* sub eax, imm32 */ \
29862+ case 0x25: /* and eax, imm32 */ \
29863+ case 0x0d: /* or eax, imm32 */ \
29864+ case 0xb8: /* mov eax, imm32 */ \
29865+ case 0x3d: /* cmp eax, imm32 */ \
29866+ case 0xa9: /* test eax, imm32 */ \
29867+ DILUTE_CONST_SEQUENCE(_off, randkey); \
29868+ EMIT2((b1) - 4, 0xc8); /* convert imm instruction to eax, ecx */\
29869+ break; \
29870+ case 0xbb: /* mov ebx, imm32 */ \
29871+ DILUTE_CONST_SEQUENCE(_off, randkey); \
29872+ /* mov ebx, ecx */ \
29873+ EMIT2(0x89, 0xcb); \
29874+ break; \
29875+ case 0xbe: /* mov esi, imm32 */ \
29876+ DILUTE_CONST_SEQUENCE(_off, randkey); \
29877+ /* mov esi, ecx */ \
29878+ EMIT2(0x89, 0xce); \
29879+ break; \
29880+ case 0xe9: /* jmp rel imm32 */ \
29881+ EMIT1(b1); \
29882+ EMIT(_off, 4); \
29883+ /* prevent fall-through, we're not called if off = 0 */ \
29884+ EMIT(0xcccccccc, 4); \
29885+ EMIT(0xcccccccc, 4); \
29886+ break; \
29887+ default: \
29888+ EMIT1(b1); \
29889+ EMIT(_off, 4); \
29890+ } \
29891+} while (0)
29892+
29893+#define EMIT2_off32(b1, b2, _off) \
29894+do { \
29895+ if ((b1) == 0x8d && (b2) == 0xb3) { /* lea esi, [rbx+imm32] */ \
29896+ EMIT2(0x8d, 0xb3); /* lea esi, [rbx+randkey] */ \
29897+ EMIT(randkey, 4); \
29898+ EMIT2(0x8d, 0xb6); /* lea esi, [esi+off-randkey] */ \
29899+ EMIT((_off) - randkey, 4); \
29900+ } else if ((b1) == 0x69 && (b2) == 0xc0) { /* imul eax, imm32 */\
29901+ DILUTE_CONST_SEQUENCE(_off, randkey); \
29902+ /* imul eax, ecx */ \
29903+ EMIT3(0x0f, 0xaf, 0xc1); \
29904+ } else { \
29905+ EMIT2(b1, b2); \
29906+ EMIT(_off, 4); \
29907+ } \
29908+} while (0)
29909+#else
29910 #define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0)
29911+#define EMIT2_off32(b1, b2, off) do { EMIT2(b1, b2); EMIT(off, 4);} while (0)
29912+#endif
29913
29914 #define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
29915 #define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
29916@@ -90,6 +165,24 @@ do { \
29917 #define X86_JBE 0x76
29918 #define X86_JA 0x77
29919
29920+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29921+#define APPEND_FLOW_VERIFY() \
29922+do { \
29923+ /* mov ecx, randkey */ \
29924+ EMIT1(0xb9); \
29925+ EMIT(randkey, 4); \
29926+ /* cmp ecx, randkey */ \
29927+ EMIT2(0x81, 0xf9); \
29928+ EMIT(randkey, 4); \
29929+ /* jz after 8 int 3s */ \
29930+ EMIT2(0x74, 0x08); \
29931+ EMIT(0xcccccccc, 4); \
29932+ EMIT(0xcccccccc, 4); \
29933+} while (0)
29934+#else
29935+#define APPEND_FLOW_VERIFY() do { } while (0)
29936+#endif
29937+
29938 #define EMIT_COND_JMP(op, offset) \
29939 do { \
29940 if (is_near(offset)) \
29941@@ -97,6 +190,7 @@ do { \
29942 else { \
29943 EMIT2(0x0f, op + 0x10); \
29944 EMIT(offset, 4); /* jxx .+off32 */ \
29945+ APPEND_FLOW_VERIFY(); \
29946 } \
29947 } while (0)
29948
29949@@ -121,12 +215,17 @@ static inline void bpf_flush_icache(void *start, void *end)
29950 set_fs(old_fs);
29951 }
29952
29953+struct bpf_jit_work {
29954+ struct work_struct work;
29955+ void *image;
29956+};
29957+
29958 #define CHOOSE_LOAD_FUNC(K, func) \
29959 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
29960
29961 void bpf_jit_compile(struct sk_filter *fp)
29962 {
29963- u8 temp[64];
29964+ u8 temp[MAX_INSTR_CODE_SIZE];
29965 u8 *prog;
29966 unsigned int proglen, oldproglen = 0;
29967 int ilen, i;
29968@@ -139,6 +238,9 @@ void bpf_jit_compile(struct sk_filter *fp)
29969 unsigned int *addrs;
29970 const struct sock_filter *filter = fp->insns;
29971 int flen = fp->len;
29972+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29973+ unsigned int randkey;
29974+#endif
29975
29976 if (!bpf_jit_enable)
29977 return;
29978@@ -147,11 +249,19 @@ void bpf_jit_compile(struct sk_filter *fp)
29979 if (addrs == NULL)
29980 return;
29981
29982+ fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
29983+ if (!fp->work)
29984+ goto out;
29985+
29986+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29987+ randkey = get_random_int();
29988+#endif
29989+
29990 /* Before first pass, make a rough estimation of addrs[]
29991- * each bpf instruction is translated to less than 64 bytes
29992+ * each bpf instruction is translated to less than MAX_INSTR_CODE_SIZE bytes
29993 */
29994 for (proglen = 0, i = 0; i < flen; i++) {
29995- proglen += 64;
29996+ proglen += MAX_INSTR_CODE_SIZE;
29997 addrs[i] = proglen;
29998 }
29999 cleanup_addr = proglen; /* epilogue address */
30000@@ -261,10 +371,8 @@ void bpf_jit_compile(struct sk_filter *fp)
30001 case BPF_S_ALU_MUL_K: /* A *= K */
30002 if (is_imm8(K))
30003 EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
30004- else {
30005- EMIT2(0x69, 0xc0); /* imul imm32,%eax */
30006- EMIT(K, 4);
30007- }
30008+ else
30009+ EMIT2_off32(0x69, 0xc0, K); /* imul imm32,%eax */
30010 break;
30011 case BPF_S_ALU_DIV_X: /* A /= X; */
30012 seen |= SEEN_XREG;
30013@@ -304,13 +412,23 @@ void bpf_jit_compile(struct sk_filter *fp)
30014 break;
30015 case BPF_S_ALU_MOD_K: /* A %= K; */
30016 EMIT2(0x31, 0xd2); /* xor %edx,%edx */
30017+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
30018+ DILUTE_CONST_SEQUENCE(K, randkey);
30019+#else
30020 EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
30021+#endif
30022 EMIT2(0xf7, 0xf1); /* div %ecx */
30023 EMIT2(0x89, 0xd0); /* mov %edx,%eax */
30024 break;
30025 case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
30026+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
30027+ DILUTE_CONST_SEQUENCE(K, randkey);
30028+ // imul rax, rcx
30029+ EMIT4(0x48, 0x0f, 0xaf, 0xc1);
30030+#else
30031 EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */
30032 EMIT(K, 4);
30033+#endif
30034 EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */
30035 break;
30036 case BPF_S_ALU_AND_X:
30037@@ -564,8 +682,7 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
30038 if (is_imm8(K)) {
30039 EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
30040 } else {
30041- EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */
30042- EMIT(K, 4);
30043+ EMIT2_off32(0x8d, 0xb3, K); /* lea imm32(%rbx),%esi */
30044 }
30045 } else {
30046 EMIT2(0x89,0xde); /* mov %ebx,%esi */
30047@@ -648,17 +765,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
30048 break;
30049 default:
30050 /* hmm, too complex filter, give up with jit compiler */
30051- goto out;
30052+ goto error;
30053 }
30054 ilen = prog - temp;
30055 if (image) {
30056 if (unlikely(proglen + ilen > oldproglen)) {
30057 pr_err("bpb_jit_compile fatal error\n");
30058- kfree(addrs);
30059- module_free(NULL, image);
30060- return;
30061+ module_free_exec(NULL, image);
30062+ goto error;
30063 }
30064+ pax_open_kernel();
30065 memcpy(image + proglen, temp, ilen);
30066+ pax_close_kernel();
30067 }
30068 proglen += ilen;
30069 addrs[i] = proglen;
30070@@ -679,11 +797,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
30071 break;
30072 }
30073 if (proglen == oldproglen) {
30074- image = module_alloc(max_t(unsigned int,
30075- proglen,
30076- sizeof(struct work_struct)));
30077+ image = module_alloc_exec(proglen);
30078 if (!image)
30079- goto out;
30080+ goto error;
30081 }
30082 oldproglen = proglen;
30083 }
30084@@ -699,7 +815,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
30085 bpf_flush_icache(image, image + proglen);
30086
30087 fp->bpf_func = (void *)image;
30088- }
30089+ } else
30090+error:
30091+ kfree(fp->work);
30092+
30093 out:
30094 kfree(addrs);
30095 return;
30096@@ -707,18 +826,20 @@ out:
30097
30098 static void jit_free_defer(struct work_struct *arg)
30099 {
30100- module_free(NULL, arg);
30101+ module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
30102+ kfree(arg);
30103 }
30104
30105 /* run from softirq, we must use a work_struct to call
30106- * module_free() from process context
30107+ * module_free_exec() from process context
30108 */
30109 void bpf_jit_free(struct sk_filter *fp)
30110 {
30111 if (fp->bpf_func != sk_run_filter) {
30112- struct work_struct *work = (struct work_struct *)fp->bpf_func;
30113+ struct work_struct *work = &fp->work->work;
30114
30115 INIT_WORK(work, jit_free_defer);
30116+ fp->work->image = fp->bpf_func;
30117 schedule_work(work);
30118 }
30119 }
30120diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
30121index d6aa6e8..266395a 100644
30122--- a/arch/x86/oprofile/backtrace.c
30123+++ b/arch/x86/oprofile/backtrace.c
30124@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
30125 struct stack_frame_ia32 *fp;
30126 unsigned long bytes;
30127
30128- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
30129+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
30130 if (bytes != sizeof(bufhead))
30131 return NULL;
30132
30133- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
30134+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
30135
30136 oprofile_add_trace(bufhead[0].return_address);
30137
30138@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
30139 struct stack_frame bufhead[2];
30140 unsigned long bytes;
30141
30142- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
30143+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
30144 if (bytes != sizeof(bufhead))
30145 return NULL;
30146
30147@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
30148 {
30149 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
30150
30151- if (!user_mode_vm(regs)) {
30152+ if (!user_mode(regs)) {
30153 unsigned long stack = kernel_stack_pointer(regs);
30154 if (depth)
30155 dump_trace(NULL, regs, (unsigned long *)stack, 0,
30156diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
30157index 48768df..ba9143c 100644
30158--- a/arch/x86/oprofile/nmi_int.c
30159+++ b/arch/x86/oprofile/nmi_int.c
30160@@ -23,6 +23,7 @@
30161 #include <asm/nmi.h>
30162 #include <asm/msr.h>
30163 #include <asm/apic.h>
30164+#include <asm/pgtable.h>
30165
30166 #include "op_counter.h"
30167 #include "op_x86_model.h"
30168@@ -774,8 +775,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
30169 if (ret)
30170 return ret;
30171
30172- if (!model->num_virt_counters)
30173- model->num_virt_counters = model->num_counters;
30174+ if (!model->num_virt_counters) {
30175+ pax_open_kernel();
30176+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
30177+ pax_close_kernel();
30178+ }
30179
30180 mux_init(ops);
30181
30182diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
30183index b2b9443..be58856 100644
30184--- a/arch/x86/oprofile/op_model_amd.c
30185+++ b/arch/x86/oprofile/op_model_amd.c
30186@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
30187 num_counters = AMD64_NUM_COUNTERS;
30188 }
30189
30190- op_amd_spec.num_counters = num_counters;
30191- op_amd_spec.num_controls = num_counters;
30192- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
30193+ pax_open_kernel();
30194+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
30195+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
30196+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
30197+ pax_close_kernel();
30198
30199 return 0;
30200 }
30201diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
30202index d90528e..0127e2b 100644
30203--- a/arch/x86/oprofile/op_model_ppro.c
30204+++ b/arch/x86/oprofile/op_model_ppro.c
30205@@ -19,6 +19,7 @@
30206 #include <asm/msr.h>
30207 #include <asm/apic.h>
30208 #include <asm/nmi.h>
30209+#include <asm/pgtable.h>
30210
30211 #include "op_x86_model.h"
30212 #include "op_counter.h"
30213@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
30214
30215 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
30216
30217- op_arch_perfmon_spec.num_counters = num_counters;
30218- op_arch_perfmon_spec.num_controls = num_counters;
30219+ pax_open_kernel();
30220+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
30221+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
30222+ pax_close_kernel();
30223 }
30224
30225 static int arch_perfmon_init(struct oprofile_operations *ignore)
30226diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
30227index 71e8a67..6a313bb 100644
30228--- a/arch/x86/oprofile/op_x86_model.h
30229+++ b/arch/x86/oprofile/op_x86_model.h
30230@@ -52,7 +52,7 @@ struct op_x86_model_spec {
30231 void (*switch_ctrl)(struct op_x86_model_spec const *model,
30232 struct op_msrs const * const msrs);
30233 #endif
30234-};
30235+} __do_const;
30236
30237 struct op_counter_config;
30238
30239diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
30240index e9e6ed5..e47ae67 100644
30241--- a/arch/x86/pci/amd_bus.c
30242+++ b/arch/x86/pci/amd_bus.c
30243@@ -337,7 +337,7 @@ static int __cpuinit amd_cpu_notify(struct notifier_block *self,
30244 return NOTIFY_OK;
30245 }
30246
30247-static struct notifier_block __cpuinitdata amd_cpu_notifier = {
30248+static struct notifier_block amd_cpu_notifier = {
30249 .notifier_call = amd_cpu_notify,
30250 };
30251
30252diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
30253index 372e9b8..e775a6c 100644
30254--- a/arch/x86/pci/irq.c
30255+++ b/arch/x86/pci/irq.c
30256@@ -50,7 +50,7 @@ struct irq_router {
30257 struct irq_router_handler {
30258 u16 vendor;
30259 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
30260-};
30261+} __do_const;
30262
30263 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
30264 void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL;
30265@@ -794,7 +794,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
30266 return 0;
30267 }
30268
30269-static __initdata struct irq_router_handler pirq_routers[] = {
30270+static __initconst const struct irq_router_handler pirq_routers[] = {
30271 { PCI_VENDOR_ID_INTEL, intel_router_probe },
30272 { PCI_VENDOR_ID_AL, ali_router_probe },
30273 { PCI_VENDOR_ID_ITE, ite_router_probe },
30274@@ -821,7 +821,7 @@ static struct pci_dev *pirq_router_dev;
30275 static void __init pirq_find_router(struct irq_router *r)
30276 {
30277 struct irq_routing_table *rt = pirq_table;
30278- struct irq_router_handler *h;
30279+ const struct irq_router_handler *h;
30280
30281 #ifdef CONFIG_PCI_BIOS
30282 if (!rt->signature) {
30283@@ -1094,7 +1094,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
30284 return 0;
30285 }
30286
30287-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
30288+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
30289 {
30290 .callback = fix_broken_hp_bios_irq9,
30291 .ident = "HP Pavilion N5400 Series Laptop",
30292diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
30293index 6eb18c4..20d83de 100644
30294--- a/arch/x86/pci/mrst.c
30295+++ b/arch/x86/pci/mrst.c
30296@@ -238,7 +238,9 @@ int __init pci_mrst_init(void)
30297 printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n");
30298 pci_mmcfg_late_init();
30299 pcibios_enable_irq = mrst_pci_irq_enable;
30300- pci_root_ops = pci_mrst_ops;
30301+ pax_open_kernel();
30302+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
30303+ pax_close_kernel();
30304 pci_soc_mode = 1;
30305 /* Continue with standard init */
30306 return 1;
30307diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
30308index c77b24a..c979855 100644
30309--- a/arch/x86/pci/pcbios.c
30310+++ b/arch/x86/pci/pcbios.c
30311@@ -79,7 +79,7 @@ union bios32 {
30312 static struct {
30313 unsigned long address;
30314 unsigned short segment;
30315-} bios32_indirect = { 0, __KERNEL_CS };
30316+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
30317
30318 /*
30319 * Returns the entry point for the given service, NULL on error
30320@@ -92,37 +92,80 @@ static unsigned long bios32_service(unsigned long service)
30321 unsigned long length; /* %ecx */
30322 unsigned long entry; /* %edx */
30323 unsigned long flags;
30324+ struct desc_struct d, *gdt;
30325
30326 local_irq_save(flags);
30327- __asm__("lcall *(%%edi); cld"
30328+
30329+ gdt = get_cpu_gdt_table(smp_processor_id());
30330+
30331+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
30332+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
30333+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
30334+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
30335+
30336+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
30337 : "=a" (return_code),
30338 "=b" (address),
30339 "=c" (length),
30340 "=d" (entry)
30341 : "0" (service),
30342 "1" (0),
30343- "D" (&bios32_indirect));
30344+ "D" (&bios32_indirect),
30345+ "r"(__PCIBIOS_DS)
30346+ : "memory");
30347+
30348+ pax_open_kernel();
30349+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
30350+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
30351+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
30352+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
30353+ pax_close_kernel();
30354+
30355 local_irq_restore(flags);
30356
30357 switch (return_code) {
30358- case 0:
30359- return address + entry;
30360- case 0x80: /* Not present */
30361- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
30362- return 0;
30363- default: /* Shouldn't happen */
30364- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
30365- service, return_code);
30366+ case 0: {
30367+ int cpu;
30368+ unsigned char flags;
30369+
30370+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
30371+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
30372+ printk(KERN_WARNING "bios32_service: not valid\n");
30373 return 0;
30374+ }
30375+ address = address + PAGE_OFFSET;
30376+ length += 16UL; /* some BIOSs underreport this... */
30377+ flags = 4;
30378+ if (length >= 64*1024*1024) {
30379+ length >>= PAGE_SHIFT;
30380+ flags |= 8;
30381+ }
30382+
30383+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
30384+ gdt = get_cpu_gdt_table(cpu);
30385+ pack_descriptor(&d, address, length, 0x9b, flags);
30386+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
30387+ pack_descriptor(&d, address, length, 0x93, flags);
30388+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
30389+ }
30390+ return entry;
30391+ }
30392+ case 0x80: /* Not present */
30393+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
30394+ return 0;
30395+ default: /* Shouldn't happen */
30396+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
30397+ service, return_code);
30398+ return 0;
30399 }
30400 }
30401
30402 static struct {
30403 unsigned long address;
30404 unsigned short segment;
30405-} pci_indirect = { 0, __KERNEL_CS };
30406+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
30407
30408-static int pci_bios_present;
30409+static int pci_bios_present __read_only;
30410
30411 static int check_pcibios(void)
30412 {
30413@@ -131,11 +174,13 @@ static int check_pcibios(void)
30414 unsigned long flags, pcibios_entry;
30415
30416 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
30417- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
30418+ pci_indirect.address = pcibios_entry;
30419
30420 local_irq_save(flags);
30421- __asm__(
30422- "lcall *(%%edi); cld\n\t"
30423+ __asm__("movw %w6, %%ds\n\t"
30424+ "lcall *%%ss:(%%edi); cld\n\t"
30425+ "push %%ss\n\t"
30426+ "pop %%ds\n\t"
30427 "jc 1f\n\t"
30428 "xor %%ah, %%ah\n"
30429 "1:"
30430@@ -144,7 +189,8 @@ static int check_pcibios(void)
30431 "=b" (ebx),
30432 "=c" (ecx)
30433 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
30434- "D" (&pci_indirect)
30435+ "D" (&pci_indirect),
30436+ "r" (__PCIBIOS_DS)
30437 : "memory");
30438 local_irq_restore(flags);
30439
30440@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30441
30442 switch (len) {
30443 case 1:
30444- __asm__("lcall *(%%esi); cld\n\t"
30445+ __asm__("movw %w6, %%ds\n\t"
30446+ "lcall *%%ss:(%%esi); cld\n\t"
30447+ "push %%ss\n\t"
30448+ "pop %%ds\n\t"
30449 "jc 1f\n\t"
30450 "xor %%ah, %%ah\n"
30451 "1:"
30452@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30453 : "1" (PCIBIOS_READ_CONFIG_BYTE),
30454 "b" (bx),
30455 "D" ((long)reg),
30456- "S" (&pci_indirect));
30457+ "S" (&pci_indirect),
30458+ "r" (__PCIBIOS_DS));
30459 /*
30460 * Zero-extend the result beyond 8 bits, do not trust the
30461 * BIOS having done it:
30462@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30463 *value &= 0xff;
30464 break;
30465 case 2:
30466- __asm__("lcall *(%%esi); cld\n\t"
30467+ __asm__("movw %w6, %%ds\n\t"
30468+ "lcall *%%ss:(%%esi); cld\n\t"
30469+ "push %%ss\n\t"
30470+ "pop %%ds\n\t"
30471 "jc 1f\n\t"
30472 "xor %%ah, %%ah\n"
30473 "1:"
30474@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30475 : "1" (PCIBIOS_READ_CONFIG_WORD),
30476 "b" (bx),
30477 "D" ((long)reg),
30478- "S" (&pci_indirect));
30479+ "S" (&pci_indirect),
30480+ "r" (__PCIBIOS_DS));
30481 /*
30482 * Zero-extend the result beyond 16 bits, do not trust the
30483 * BIOS having done it:
30484@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30485 *value &= 0xffff;
30486 break;
30487 case 4:
30488- __asm__("lcall *(%%esi); cld\n\t"
30489+ __asm__("movw %w6, %%ds\n\t"
30490+ "lcall *%%ss:(%%esi); cld\n\t"
30491+ "push %%ss\n\t"
30492+ "pop %%ds\n\t"
30493 "jc 1f\n\t"
30494 "xor %%ah, %%ah\n"
30495 "1:"
30496@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30497 : "1" (PCIBIOS_READ_CONFIG_DWORD),
30498 "b" (bx),
30499 "D" ((long)reg),
30500- "S" (&pci_indirect));
30501+ "S" (&pci_indirect),
30502+ "r" (__PCIBIOS_DS));
30503 break;
30504 }
30505
30506@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
30507
30508 switch (len) {
30509 case 1:
30510- __asm__("lcall *(%%esi); cld\n\t"
30511+ __asm__("movw %w6, %%ds\n\t"
30512+ "lcall *%%ss:(%%esi); cld\n\t"
30513+ "push %%ss\n\t"
30514+ "pop %%ds\n\t"
30515 "jc 1f\n\t"
30516 "xor %%ah, %%ah\n"
30517 "1:"
30518@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
30519 "c" (value),
30520 "b" (bx),
30521 "D" ((long)reg),
30522- "S" (&pci_indirect));
30523+ "S" (&pci_indirect),
30524+ "r" (__PCIBIOS_DS));
30525 break;
30526 case 2:
30527- __asm__("lcall *(%%esi); cld\n\t"
30528+ __asm__("movw %w6, %%ds\n\t"
30529+ "lcall *%%ss:(%%esi); cld\n\t"
30530+ "push %%ss\n\t"
30531+ "pop %%ds\n\t"
30532 "jc 1f\n\t"
30533 "xor %%ah, %%ah\n"
30534 "1:"
30535@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
30536 "c" (value),
30537 "b" (bx),
30538 "D" ((long)reg),
30539- "S" (&pci_indirect));
30540+ "S" (&pci_indirect),
30541+ "r" (__PCIBIOS_DS));
30542 break;
30543 case 4:
30544- __asm__("lcall *(%%esi); cld\n\t"
30545+ __asm__("movw %w6, %%ds\n\t"
30546+ "lcall *%%ss:(%%esi); cld\n\t"
30547+ "push %%ss\n\t"
30548+ "pop %%ds\n\t"
30549 "jc 1f\n\t"
30550 "xor %%ah, %%ah\n"
30551 "1:"
30552@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
30553 "c" (value),
30554 "b" (bx),
30555 "D" ((long)reg),
30556- "S" (&pci_indirect));
30557+ "S" (&pci_indirect),
30558+ "r" (__PCIBIOS_DS));
30559 break;
30560 }
30561
30562@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
30563
30564 DBG("PCI: Fetching IRQ routing table... ");
30565 __asm__("push %%es\n\t"
30566+ "movw %w8, %%ds\n\t"
30567 "push %%ds\n\t"
30568 "pop %%es\n\t"
30569- "lcall *(%%esi); cld\n\t"
30570+ "lcall *%%ss:(%%esi); cld\n\t"
30571 "pop %%es\n\t"
30572+ "push %%ss\n\t"
30573+ "pop %%ds\n"
30574 "jc 1f\n\t"
30575 "xor %%ah, %%ah\n"
30576 "1:"
30577@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
30578 "1" (0),
30579 "D" ((long) &opt),
30580 "S" (&pci_indirect),
30581- "m" (opt)
30582+ "m" (opt),
30583+ "r" (__PCIBIOS_DS)
30584 : "memory");
30585 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
30586 if (ret & 0xff00)
30587@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
30588 {
30589 int ret;
30590
30591- __asm__("lcall *(%%esi); cld\n\t"
30592+ __asm__("movw %w5, %%ds\n\t"
30593+ "lcall *%%ss:(%%esi); cld\n\t"
30594+ "push %%ss\n\t"
30595+ "pop %%ds\n"
30596 "jc 1f\n\t"
30597 "xor %%ah, %%ah\n"
30598 "1:"
30599@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
30600 : "0" (PCIBIOS_SET_PCI_HW_INT),
30601 "b" ((dev->bus->number << 8) | dev->devfn),
30602 "c" ((irq << 8) | (pin + 10)),
30603- "S" (&pci_indirect));
30604+ "S" (&pci_indirect),
30605+ "r" (__PCIBIOS_DS));
30606 return !(ret & 0xff00);
30607 }
30608 EXPORT_SYMBOL(pcibios_set_irq_routing);
30609diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
30610index 40e4469..1ab536e 100644
30611--- a/arch/x86/platform/efi/efi_32.c
30612+++ b/arch/x86/platform/efi/efi_32.c
30613@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
30614 {
30615 struct desc_ptr gdt_descr;
30616
30617+#ifdef CONFIG_PAX_KERNEXEC
30618+ struct desc_struct d;
30619+#endif
30620+
30621 local_irq_save(efi_rt_eflags);
30622
30623 load_cr3(initial_page_table);
30624 __flush_tlb_all();
30625
30626+#ifdef CONFIG_PAX_KERNEXEC
30627+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
30628+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
30629+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
30630+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
30631+#endif
30632+
30633 gdt_descr.address = __pa(get_cpu_gdt_table(0));
30634 gdt_descr.size = GDT_SIZE - 1;
30635 load_gdt(&gdt_descr);
30636@@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
30637 {
30638 struct desc_ptr gdt_descr;
30639
30640+#ifdef CONFIG_PAX_KERNEXEC
30641+ struct desc_struct d;
30642+
30643+ memset(&d, 0, sizeof d);
30644+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
30645+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
30646+#endif
30647+
30648 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
30649 gdt_descr.size = GDT_SIZE - 1;
30650 load_gdt(&gdt_descr);
30651diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
30652index fbe66e6..eae5e38 100644
30653--- a/arch/x86/platform/efi/efi_stub_32.S
30654+++ b/arch/x86/platform/efi/efi_stub_32.S
30655@@ -6,7 +6,9 @@
30656 */
30657
30658 #include <linux/linkage.h>
30659+#include <linux/init.h>
30660 #include <asm/page_types.h>
30661+#include <asm/segment.h>
30662
30663 /*
30664 * efi_call_phys(void *, ...) is a function with variable parameters.
30665@@ -20,7 +22,7 @@
30666 * service functions will comply with gcc calling convention, too.
30667 */
30668
30669-.text
30670+__INIT
30671 ENTRY(efi_call_phys)
30672 /*
30673 * 0. The function can only be called in Linux kernel. So CS has been
30674@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
30675 * The mapping of lower virtual memory has been created in prelog and
30676 * epilog.
30677 */
30678- movl $1f, %edx
30679- subl $__PAGE_OFFSET, %edx
30680- jmp *%edx
30681+#ifdef CONFIG_PAX_KERNEXEC
30682+ movl $(__KERNEXEC_EFI_DS), %edx
30683+ mov %edx, %ds
30684+ mov %edx, %es
30685+ mov %edx, %ss
30686+ addl $2f,(1f)
30687+ ljmp *(1f)
30688+
30689+__INITDATA
30690+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
30691+.previous
30692+
30693+2:
30694+ subl $2b,(1b)
30695+#else
30696+ jmp 1f-__PAGE_OFFSET
30697 1:
30698+#endif
30699
30700 /*
30701 * 2. Now on the top of stack is the return
30702@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
30703 * parameter 2, ..., param n. To make things easy, we save the return
30704 * address of efi_call_phys in a global variable.
30705 */
30706- popl %edx
30707- movl %edx, saved_return_addr
30708- /* get the function pointer into ECX*/
30709- popl %ecx
30710- movl %ecx, efi_rt_function_ptr
30711- movl $2f, %edx
30712- subl $__PAGE_OFFSET, %edx
30713- pushl %edx
30714+ popl (saved_return_addr)
30715+ popl (efi_rt_function_ptr)
30716
30717 /*
30718 * 3. Clear PG bit in %CR0.
30719@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
30720 /*
30721 * 5. Call the physical function.
30722 */
30723- jmp *%ecx
30724+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
30725
30726-2:
30727 /*
30728 * 6. After EFI runtime service returns, control will return to
30729 * following instruction. We'd better readjust stack pointer first.
30730@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
30731 movl %cr0, %edx
30732 orl $0x80000000, %edx
30733 movl %edx, %cr0
30734- jmp 1f
30735-1:
30736+
30737 /*
30738 * 8. Now restore the virtual mode from flat mode by
30739 * adding EIP with PAGE_OFFSET.
30740 */
30741- movl $1f, %edx
30742- jmp *%edx
30743+#ifdef CONFIG_PAX_KERNEXEC
30744+ movl $(__KERNEL_DS), %edx
30745+ mov %edx, %ds
30746+ mov %edx, %es
30747+ mov %edx, %ss
30748+ ljmp $(__KERNEL_CS),$1f
30749+#else
30750+ jmp 1f+__PAGE_OFFSET
30751+#endif
30752 1:
30753
30754 /*
30755 * 9. Balance the stack. And because EAX contain the return value,
30756 * we'd better not clobber it.
30757 */
30758- leal efi_rt_function_ptr, %edx
30759- movl (%edx), %ecx
30760- pushl %ecx
30761+ pushl (efi_rt_function_ptr)
30762
30763 /*
30764- * 10. Push the saved return address onto the stack and return.
30765+ * 10. Return to the saved return address.
30766 */
30767- leal saved_return_addr, %edx
30768- movl (%edx), %ecx
30769- pushl %ecx
30770- ret
30771+ jmpl *(saved_return_addr)
30772 ENDPROC(efi_call_phys)
30773 .previous
30774
30775-.data
30776+__INITDATA
30777 saved_return_addr:
30778 .long 0
30779 efi_rt_function_ptr:
30780diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
30781index 4c07cca..2c8427d 100644
30782--- a/arch/x86/platform/efi/efi_stub_64.S
30783+++ b/arch/x86/platform/efi/efi_stub_64.S
30784@@ -7,6 +7,7 @@
30785 */
30786
30787 #include <linux/linkage.h>
30788+#include <asm/alternative-asm.h>
30789
30790 #define SAVE_XMM \
30791 mov %rsp, %rax; \
30792@@ -40,6 +41,7 @@ ENTRY(efi_call0)
30793 call *%rdi
30794 addq $32, %rsp
30795 RESTORE_XMM
30796+ pax_force_retaddr 0, 1
30797 ret
30798 ENDPROC(efi_call0)
30799
30800@@ -50,6 +52,7 @@ ENTRY(efi_call1)
30801 call *%rdi
30802 addq $32, %rsp
30803 RESTORE_XMM
30804+ pax_force_retaddr 0, 1
30805 ret
30806 ENDPROC(efi_call1)
30807
30808@@ -60,6 +63,7 @@ ENTRY(efi_call2)
30809 call *%rdi
30810 addq $32, %rsp
30811 RESTORE_XMM
30812+ pax_force_retaddr 0, 1
30813 ret
30814 ENDPROC(efi_call2)
30815
30816@@ -71,6 +75,7 @@ ENTRY(efi_call3)
30817 call *%rdi
30818 addq $32, %rsp
30819 RESTORE_XMM
30820+ pax_force_retaddr 0, 1
30821 ret
30822 ENDPROC(efi_call3)
30823
30824@@ -83,6 +88,7 @@ ENTRY(efi_call4)
30825 call *%rdi
30826 addq $32, %rsp
30827 RESTORE_XMM
30828+ pax_force_retaddr 0, 1
30829 ret
30830 ENDPROC(efi_call4)
30831
30832@@ -96,6 +102,7 @@ ENTRY(efi_call5)
30833 call *%rdi
30834 addq $48, %rsp
30835 RESTORE_XMM
30836+ pax_force_retaddr 0, 1
30837 ret
30838 ENDPROC(efi_call5)
30839
30840@@ -112,5 +119,6 @@ ENTRY(efi_call6)
30841 call *%rdi
30842 addq $48, %rsp
30843 RESTORE_XMM
30844+ pax_force_retaddr 0, 1
30845 ret
30846 ENDPROC(efi_call6)
30847diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
30848index e31bcd8..f12dc46 100644
30849--- a/arch/x86/platform/mrst/mrst.c
30850+++ b/arch/x86/platform/mrst/mrst.c
30851@@ -78,13 +78,15 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
30852 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
30853 int sfi_mrtc_num;
30854
30855-static void mrst_power_off(void)
30856+static __noreturn void mrst_power_off(void)
30857 {
30858+ BUG();
30859 }
30860
30861-static void mrst_reboot(void)
30862+static __noreturn void mrst_reboot(void)
30863 {
30864 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
30865+ BUG();
30866 }
30867
30868 /* parse all the mtimer info to a static mtimer array */
30869diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
30870index d6ee929..3637cb5 100644
30871--- a/arch/x86/platform/olpc/olpc_dt.c
30872+++ b/arch/x86/platform/olpc/olpc_dt.c
30873@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
30874 return res;
30875 }
30876
30877-static struct of_pdt_ops prom_olpc_ops __initdata = {
30878+static struct of_pdt_ops prom_olpc_ops __initconst = {
30879 .nextprop = olpc_dt_nextprop,
30880 .getproplen = olpc_dt_getproplen,
30881 .getproperty = olpc_dt_getproperty,
30882diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
30883index 3c68768..07e82b8 100644
30884--- a/arch/x86/power/cpu.c
30885+++ b/arch/x86/power/cpu.c
30886@@ -134,7 +134,7 @@ static void do_fpu_end(void)
30887 static void fix_processor_context(void)
30888 {
30889 int cpu = smp_processor_id();
30890- struct tss_struct *t = &per_cpu(init_tss, cpu);
30891+ struct tss_struct *t = init_tss + cpu;
30892
30893 set_tss_desc(cpu, t); /*
30894 * This just modifies memory; should not be
30895@@ -144,8 +144,6 @@ static void fix_processor_context(void)
30896 */
30897
30898 #ifdef CONFIG_X86_64
30899- get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
30900-
30901 syscall_init(); /* This sets MSR_*STAR and related */
30902 #endif
30903 load_TR_desc(); /* This does ltr */
30904diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
30905index cbca565..bae7133 100644
30906--- a/arch/x86/realmode/init.c
30907+++ b/arch/x86/realmode/init.c
30908@@ -62,7 +62,13 @@ void __init setup_real_mode(void)
30909 __va(real_mode_header->trampoline_header);
30910
30911 #ifdef CONFIG_X86_32
30912- trampoline_header->start = __pa(startup_32_smp);
30913+ trampoline_header->start = __pa(ktla_ktva(startup_32_smp));
30914+
30915+#ifdef CONFIG_PAX_KERNEXEC
30916+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
30917+#endif
30918+
30919+ trampoline_header->boot_cs = __BOOT_CS;
30920 trampoline_header->gdt_limit = __BOOT_DS + 7;
30921 trampoline_header->gdt_base = __pa(boot_gdt);
30922 #else
30923diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
30924index 8869287..d577672 100644
30925--- a/arch/x86/realmode/rm/Makefile
30926+++ b/arch/x86/realmode/rm/Makefile
30927@@ -78,5 +78,8 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
30928 $(call cc-option, -fno-unit-at-a-time)) \
30929 $(call cc-option, -fno-stack-protector) \
30930 $(call cc-option, -mpreferred-stack-boundary=2)
30931+ifdef CONSTIFY_PLUGIN
30932+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
30933+endif
30934 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
30935 GCOV_PROFILE := n
30936diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
30937index a28221d..93c40f1 100644
30938--- a/arch/x86/realmode/rm/header.S
30939+++ b/arch/x86/realmode/rm/header.S
30940@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
30941 #endif
30942 /* APM/BIOS reboot */
30943 .long pa_machine_real_restart_asm
30944-#ifdef CONFIG_X86_64
30945+#ifdef CONFIG_X86_32
30946+ .long __KERNEL_CS
30947+#else
30948 .long __KERNEL32_CS
30949 #endif
30950 END(real_mode_header)
30951diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
30952index c1b2791..f9e31c7 100644
30953--- a/arch/x86/realmode/rm/trampoline_32.S
30954+++ b/arch/x86/realmode/rm/trampoline_32.S
30955@@ -25,6 +25,12 @@
30956 #include <asm/page_types.h>
30957 #include "realmode.h"
30958
30959+#ifdef CONFIG_PAX_KERNEXEC
30960+#define ta(X) (X)
30961+#else
30962+#define ta(X) (pa_ ## X)
30963+#endif
30964+
30965 .text
30966 .code16
30967
30968@@ -39,8 +45,6 @@ ENTRY(trampoline_start)
30969
30970 cli # We should be safe anyway
30971
30972- movl tr_start, %eax # where we need to go
30973-
30974 movl $0xA5A5A5A5, trampoline_status
30975 # write marker for master knows we're running
30976
30977@@ -56,7 +60,7 @@ ENTRY(trampoline_start)
30978 movw $1, %dx # protected mode (PE) bit
30979 lmsw %dx # into protected mode
30980
30981- ljmpl $__BOOT_CS, $pa_startup_32
30982+ ljmpl *(trampoline_header)
30983
30984 .section ".text32","ax"
30985 .code32
30986@@ -67,7 +71,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
30987 .balign 8
30988 GLOBAL(trampoline_header)
30989 tr_start: .space 4
30990- tr_gdt_pad: .space 2
30991+ tr_boot_cs: .space 2
30992 tr_gdt: .space 6
30993 END(trampoline_header)
30994
30995diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
30996index bb360dc..3e5945f 100644
30997--- a/arch/x86/realmode/rm/trampoline_64.S
30998+++ b/arch/x86/realmode/rm/trampoline_64.S
30999@@ -107,7 +107,7 @@ ENTRY(startup_32)
31000 wrmsr
31001
31002 # Enable paging and in turn activate Long Mode
31003- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
31004+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
31005 movl %eax, %cr0
31006
31007 /*
31008diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
31009index 79d67bd..c7e1b90 100644
31010--- a/arch/x86/tools/relocs.c
31011+++ b/arch/x86/tools/relocs.c
31012@@ -12,10 +12,13 @@
31013 #include <regex.h>
31014 #include <tools/le_byteshift.h>
31015
31016+#include "../../../include/generated/autoconf.h"
31017+
31018 static void die(char *fmt, ...);
31019
31020 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
31021 static Elf32_Ehdr ehdr;
31022+static Elf32_Phdr *phdr;
31023 static unsigned long reloc_count, reloc_idx;
31024 static unsigned long *relocs;
31025 static unsigned long reloc16_count, reloc16_idx;
31026@@ -330,9 +333,39 @@ static void read_ehdr(FILE *fp)
31027 }
31028 }
31029
31030+static void read_phdrs(FILE *fp)
31031+{
31032+ unsigned int i;
31033+
31034+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
31035+ if (!phdr) {
31036+ die("Unable to allocate %d program headers\n",
31037+ ehdr.e_phnum);
31038+ }
31039+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
31040+ die("Seek to %d failed: %s\n",
31041+ ehdr.e_phoff, strerror(errno));
31042+ }
31043+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
31044+ die("Cannot read ELF program headers: %s\n",
31045+ strerror(errno));
31046+ }
31047+ for(i = 0; i < ehdr.e_phnum; i++) {
31048+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
31049+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
31050+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
31051+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
31052+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
31053+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
31054+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
31055+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
31056+ }
31057+
31058+}
31059+
31060 static void read_shdrs(FILE *fp)
31061 {
31062- int i;
31063+ unsigned int i;
31064 Elf32_Shdr shdr;
31065
31066 secs = calloc(ehdr.e_shnum, sizeof(struct section));
31067@@ -367,7 +400,7 @@ static void read_shdrs(FILE *fp)
31068
31069 static void read_strtabs(FILE *fp)
31070 {
31071- int i;
31072+ unsigned int i;
31073 for (i = 0; i < ehdr.e_shnum; i++) {
31074 struct section *sec = &secs[i];
31075 if (sec->shdr.sh_type != SHT_STRTAB) {
31076@@ -392,7 +425,7 @@ static void read_strtabs(FILE *fp)
31077
31078 static void read_symtabs(FILE *fp)
31079 {
31080- int i,j;
31081+ unsigned int i,j;
31082 for (i = 0; i < ehdr.e_shnum; i++) {
31083 struct section *sec = &secs[i];
31084 if (sec->shdr.sh_type != SHT_SYMTAB) {
31085@@ -423,9 +456,11 @@ static void read_symtabs(FILE *fp)
31086 }
31087
31088
31089-static void read_relocs(FILE *fp)
31090+static void read_relocs(FILE *fp, int use_real_mode)
31091 {
31092- int i,j;
31093+ unsigned int i,j;
31094+ uint32_t base;
31095+
31096 for (i = 0; i < ehdr.e_shnum; i++) {
31097 struct section *sec = &secs[i];
31098 if (sec->shdr.sh_type != SHT_REL) {
31099@@ -445,9 +480,22 @@ static void read_relocs(FILE *fp)
31100 die("Cannot read symbol table: %s\n",
31101 strerror(errno));
31102 }
31103+ base = 0;
31104+
31105+#ifdef CONFIG_X86_32
31106+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
31107+ if (phdr[j].p_type != PT_LOAD )
31108+ continue;
31109+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
31110+ continue;
31111+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
31112+ break;
31113+ }
31114+#endif
31115+
31116 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
31117 Elf32_Rel *rel = &sec->reltab[j];
31118- rel->r_offset = elf32_to_cpu(rel->r_offset);
31119+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
31120 rel->r_info = elf32_to_cpu(rel->r_info);
31121 }
31122 }
31123@@ -456,13 +504,13 @@ static void read_relocs(FILE *fp)
31124
31125 static void print_absolute_symbols(void)
31126 {
31127- int i;
31128+ unsigned int i;
31129 printf("Absolute symbols\n");
31130 printf(" Num: Value Size Type Bind Visibility Name\n");
31131 for (i = 0; i < ehdr.e_shnum; i++) {
31132 struct section *sec = &secs[i];
31133 char *sym_strtab;
31134- int j;
31135+ unsigned int j;
31136
31137 if (sec->shdr.sh_type != SHT_SYMTAB) {
31138 continue;
31139@@ -489,14 +537,14 @@ static void print_absolute_symbols(void)
31140
31141 static void print_absolute_relocs(void)
31142 {
31143- int i, printed = 0;
31144+ unsigned int i, printed = 0;
31145
31146 for (i = 0; i < ehdr.e_shnum; i++) {
31147 struct section *sec = &secs[i];
31148 struct section *sec_applies, *sec_symtab;
31149 char *sym_strtab;
31150 Elf32_Sym *sh_symtab;
31151- int j;
31152+ unsigned int j;
31153 if (sec->shdr.sh_type != SHT_REL) {
31154 continue;
31155 }
31156@@ -558,13 +606,13 @@ static void print_absolute_relocs(void)
31157 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
31158 int use_real_mode)
31159 {
31160- int i;
31161+ unsigned int i;
31162 /* Walk through the relocations */
31163 for (i = 0; i < ehdr.e_shnum; i++) {
31164 char *sym_strtab;
31165 Elf32_Sym *sh_symtab;
31166 struct section *sec_applies, *sec_symtab;
31167- int j;
31168+ unsigned int j;
31169 struct section *sec = &secs[i];
31170
31171 if (sec->shdr.sh_type != SHT_REL) {
31172@@ -588,6 +636,24 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
31173 sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
31174 r_type = ELF32_R_TYPE(rel->r_info);
31175
31176+ if (!use_real_mode) {
31177+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
31178+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
31179+ continue;
31180+
31181+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
31182+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
31183+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
31184+ continue;
31185+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
31186+ continue;
31187+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
31188+ continue;
31189+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
31190+ continue;
31191+#endif
31192+ }
31193+
31194 shn_abs = sym->st_shndx == SHN_ABS;
31195
31196 switch (r_type) {
31197@@ -681,7 +747,7 @@ static int write32(unsigned int v, FILE *f)
31198
31199 static void emit_relocs(int as_text, int use_real_mode)
31200 {
31201- int i;
31202+ unsigned int i;
31203 /* Count how many relocations I have and allocate space for them. */
31204 reloc_count = 0;
31205 walk_relocs(count_reloc, use_real_mode);
31206@@ -808,10 +874,11 @@ int main(int argc, char **argv)
31207 fname, strerror(errno));
31208 }
31209 read_ehdr(fp);
31210+ read_phdrs(fp);
31211 read_shdrs(fp);
31212 read_strtabs(fp);
31213 read_symtabs(fp);
31214- read_relocs(fp);
31215+ read_relocs(fp, use_real_mode);
31216 if (show_absolute_syms) {
31217 print_absolute_symbols();
31218 goto out;
31219diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
31220index fd14be1..e3c79c0 100644
31221--- a/arch/x86/vdso/Makefile
31222+++ b/arch/x86/vdso/Makefile
31223@@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO $@
31224 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
31225 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
31226
31227-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
31228+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
31229 GCOV_PROFILE := n
31230
31231 #
31232diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
31233index 0faad64..39ef157 100644
31234--- a/arch/x86/vdso/vdso32-setup.c
31235+++ b/arch/x86/vdso/vdso32-setup.c
31236@@ -25,6 +25,7 @@
31237 #include <asm/tlbflush.h>
31238 #include <asm/vdso.h>
31239 #include <asm/proto.h>
31240+#include <asm/mman.h>
31241
31242 enum {
31243 VDSO_DISABLED = 0,
31244@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
31245 void enable_sep_cpu(void)
31246 {
31247 int cpu = get_cpu();
31248- struct tss_struct *tss = &per_cpu(init_tss, cpu);
31249+ struct tss_struct *tss = init_tss + cpu;
31250
31251 if (!boot_cpu_has(X86_FEATURE_SEP)) {
31252 put_cpu();
31253@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
31254 gate_vma.vm_start = FIXADDR_USER_START;
31255 gate_vma.vm_end = FIXADDR_USER_END;
31256 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
31257- gate_vma.vm_page_prot = __P101;
31258+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
31259
31260 return 0;
31261 }
31262@@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
31263 if (compat)
31264 addr = VDSO_HIGH_BASE;
31265 else {
31266- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
31267+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
31268 if (IS_ERR_VALUE(addr)) {
31269 ret = addr;
31270 goto up_fail;
31271 }
31272 }
31273
31274- current->mm->context.vdso = (void *)addr;
31275+ current->mm->context.vdso = addr;
31276
31277 if (compat_uses_vma || !compat) {
31278 /*
31279@@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
31280 }
31281
31282 current_thread_info()->sysenter_return =
31283- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
31284+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
31285
31286 up_fail:
31287 if (ret)
31288- current->mm->context.vdso = NULL;
31289+ current->mm->context.vdso = 0;
31290
31291 up_write(&mm->mmap_sem);
31292
31293@@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init);
31294
31295 const char *arch_vma_name(struct vm_area_struct *vma)
31296 {
31297- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
31298+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
31299 return "[vdso]";
31300+
31301+#ifdef CONFIG_PAX_SEGMEXEC
31302+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
31303+ return "[vdso]";
31304+#endif
31305+
31306 return NULL;
31307 }
31308
31309@@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
31310 * Check to see if the corresponding task was created in compat vdso
31311 * mode.
31312 */
31313- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
31314+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
31315 return &gate_vma;
31316 return NULL;
31317 }
31318diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
31319index 431e875..cbb23f3 100644
31320--- a/arch/x86/vdso/vma.c
31321+++ b/arch/x86/vdso/vma.c
31322@@ -16,8 +16,6 @@
31323 #include <asm/vdso.h>
31324 #include <asm/page.h>
31325
31326-unsigned int __read_mostly vdso_enabled = 1;
31327-
31328 extern char vdso_start[], vdso_end[];
31329 extern unsigned short vdso_sync_cpuid;
31330
31331@@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
31332 * unaligned here as a result of stack start randomization.
31333 */
31334 addr = PAGE_ALIGN(addr);
31335- addr = align_vdso_addr(addr);
31336
31337 return addr;
31338 }
31339@@ -154,30 +151,31 @@ static int setup_additional_pages(struct linux_binprm *bprm,
31340 unsigned size)
31341 {
31342 struct mm_struct *mm = current->mm;
31343- unsigned long addr;
31344+ unsigned long addr = 0;
31345 int ret;
31346
31347- if (!vdso_enabled)
31348- return 0;
31349-
31350 down_write(&mm->mmap_sem);
31351+
31352+#ifdef CONFIG_PAX_RANDMMAP
31353+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
31354+#endif
31355+
31356 addr = vdso_addr(mm->start_stack, size);
31357+ addr = align_vdso_addr(addr);
31358 addr = get_unmapped_area(NULL, addr, size, 0, 0);
31359 if (IS_ERR_VALUE(addr)) {
31360 ret = addr;
31361 goto up_fail;
31362 }
31363
31364- current->mm->context.vdso = (void *)addr;
31365+ mm->context.vdso = addr;
31366
31367 ret = install_special_mapping(mm, addr, size,
31368 VM_READ|VM_EXEC|
31369 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
31370 pages);
31371- if (ret) {
31372- current->mm->context.vdso = NULL;
31373- goto up_fail;
31374- }
31375+ if (ret)
31376+ mm->context.vdso = 0;
31377
31378 up_fail:
31379 up_write(&mm->mmap_sem);
31380@@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
31381 vdsox32_size);
31382 }
31383 #endif
31384-
31385-static __init int vdso_setup(char *s)
31386-{
31387- vdso_enabled = simple_strtoul(s, NULL, 0);
31388- return 0;
31389-}
31390-__setup("vdso=", vdso_setup);
31391diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
31392index 2262003..f229ced 100644
31393--- a/arch/x86/xen/enlighten.c
31394+++ b/arch/x86/xen/enlighten.c
31395@@ -100,8 +100,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
31396
31397 struct shared_info xen_dummy_shared_info;
31398
31399-void *xen_initial_gdt;
31400-
31401 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
31402 __read_mostly int xen_have_vector_callback;
31403 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
31404@@ -496,8 +494,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
31405 {
31406 unsigned long va = dtr->address;
31407 unsigned int size = dtr->size + 1;
31408- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
31409- unsigned long frames[pages];
31410+ unsigned long frames[65536 / PAGE_SIZE];
31411 int f;
31412
31413 /*
31414@@ -545,8 +542,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
31415 {
31416 unsigned long va = dtr->address;
31417 unsigned int size = dtr->size + 1;
31418- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
31419- unsigned long frames[pages];
31420+ unsigned long frames[65536 / PAGE_SIZE];
31421 int f;
31422
31423 /*
31424@@ -939,7 +935,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
31425 return 0;
31426 }
31427
31428-static void set_xen_basic_apic_ops(void)
31429+static void __init set_xen_basic_apic_ops(void)
31430 {
31431 apic->read = xen_apic_read;
31432 apic->write = xen_apic_write;
31433@@ -1245,30 +1241,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
31434 #endif
31435 };
31436
31437-static void xen_reboot(int reason)
31438+static __noreturn void xen_reboot(int reason)
31439 {
31440 struct sched_shutdown r = { .reason = reason };
31441
31442- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
31443- BUG();
31444+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
31445+ BUG();
31446 }
31447
31448-static void xen_restart(char *msg)
31449+static __noreturn void xen_restart(char *msg)
31450 {
31451 xen_reboot(SHUTDOWN_reboot);
31452 }
31453
31454-static void xen_emergency_restart(void)
31455+static __noreturn void xen_emergency_restart(void)
31456 {
31457 xen_reboot(SHUTDOWN_reboot);
31458 }
31459
31460-static void xen_machine_halt(void)
31461+static __noreturn void xen_machine_halt(void)
31462 {
31463 xen_reboot(SHUTDOWN_poweroff);
31464 }
31465
31466-static void xen_machine_power_off(void)
31467+static __noreturn void xen_machine_power_off(void)
31468 {
31469 if (pm_power_off)
31470 pm_power_off();
31471@@ -1370,7 +1366,17 @@ asmlinkage void __init xen_start_kernel(void)
31472 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
31473
31474 /* Work out if we support NX */
31475- x86_configure_nx();
31476+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
31477+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
31478+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
31479+ unsigned l, h;
31480+
31481+ __supported_pte_mask |= _PAGE_NX;
31482+ rdmsr(MSR_EFER, l, h);
31483+ l |= EFER_NX;
31484+ wrmsr(MSR_EFER, l, h);
31485+ }
31486+#endif
31487
31488 xen_setup_features();
31489
31490@@ -1399,14 +1405,7 @@ asmlinkage void __init xen_start_kernel(void)
31491 pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit;
31492 }
31493
31494- machine_ops = xen_machine_ops;
31495-
31496- /*
31497- * The only reliable way to retain the initial address of the
31498- * percpu gdt_page is to remember it here, so we can go and
31499- * mark it RW later, when the initial percpu area is freed.
31500- */
31501- xen_initial_gdt = &per_cpu(gdt_page, 0);
31502+ memcpy((void *)&machine_ops, &xen_machine_ops, sizeof machine_ops);
31503
31504 xen_smp_init();
31505
31506@@ -1598,7 +1597,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
31507 return NOTIFY_OK;
31508 }
31509
31510-static struct notifier_block xen_hvm_cpu_notifier __cpuinitdata = {
31511+static struct notifier_block xen_hvm_cpu_notifier = {
31512 .notifier_call = xen_hvm_cpu_notify,
31513 };
31514
31515diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
31516index 01de35c..0bda07b 100644
31517--- a/arch/x86/xen/mmu.c
31518+++ b/arch/x86/xen/mmu.c
31519@@ -1881,6 +1881,9 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
31520 /* L3_k[510] -> level2_kernel_pgt
31521 * L3_i[511] -> level2_fixmap_pgt */
31522 convert_pfn_mfn(level3_kernel_pgt);
31523+ convert_pfn_mfn(level3_vmalloc_start_pgt);
31524+ convert_pfn_mfn(level3_vmalloc_end_pgt);
31525+ convert_pfn_mfn(level3_vmemmap_pgt);
31526
31527 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
31528 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
31529@@ -1910,8 +1913,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
31530 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
31531 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
31532 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
31533+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
31534+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
31535+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
31536 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
31537 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
31538+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
31539 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
31540 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
31541
31542@@ -2097,6 +2104,7 @@ static void __init xen_post_allocator_init(void)
31543 pv_mmu_ops.set_pud = xen_set_pud;
31544 #if PAGETABLE_LEVELS == 4
31545 pv_mmu_ops.set_pgd = xen_set_pgd;
31546+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
31547 #endif
31548
31549 /* This will work as long as patching hasn't happened yet
31550@@ -2178,6 +2186,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
31551 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
31552 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
31553 .set_pgd = xen_set_pgd_hyper,
31554+ .set_pgd_batched = xen_set_pgd_hyper,
31555
31556 .alloc_pud = xen_alloc_pmd_init,
31557 .release_pud = xen_release_pmd_init,
31558diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
31559index 34bc4ce..c34aa24 100644
31560--- a/arch/x86/xen/smp.c
31561+++ b/arch/x86/xen/smp.c
31562@@ -229,11 +229,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
31563 {
31564 BUG_ON(smp_processor_id() != 0);
31565 native_smp_prepare_boot_cpu();
31566-
31567- /* We've switched to the "real" per-cpu gdt, so make sure the
31568- old memory can be recycled */
31569- make_lowmem_page_readwrite(xen_initial_gdt);
31570-
31571 xen_filter_cpu_maps();
31572 xen_setup_vcpu_info_placement();
31573 }
31574@@ -300,12 +295,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
31575 gdt = get_cpu_gdt_table(cpu);
31576
31577 ctxt->flags = VGCF_IN_KERNEL;
31578- ctxt->user_regs.ds = __USER_DS;
31579- ctxt->user_regs.es = __USER_DS;
31580+ ctxt->user_regs.ds = __KERNEL_DS;
31581+ ctxt->user_regs.es = __KERNEL_DS;
31582 ctxt->user_regs.ss = __KERNEL_DS;
31583 #ifdef CONFIG_X86_32
31584 ctxt->user_regs.fs = __KERNEL_PERCPU;
31585- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
31586+ savesegment(gs, ctxt->user_regs.gs);
31587 #else
31588 ctxt->gs_base_kernel = per_cpu_offset(cpu);
31589 #endif
31590@@ -355,13 +350,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle)
31591 int rc;
31592
31593 per_cpu(current_task, cpu) = idle;
31594+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
31595 #ifdef CONFIG_X86_32
31596 irq_ctx_init(cpu);
31597 #else
31598 clear_tsk_thread_flag(idle, TIF_FORK);
31599- per_cpu(kernel_stack, cpu) =
31600- (unsigned long)task_stack_page(idle) -
31601- KERNEL_STACK_OFFSET + THREAD_SIZE;
31602+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
31603 #endif
31604 xen_setup_runstate_info(cpu);
31605 xen_setup_timer(cpu);
31606@@ -630,7 +624,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
31607
31608 void __init xen_smp_init(void)
31609 {
31610- smp_ops = xen_smp_ops;
31611+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
31612 xen_fill_possible_map();
31613 xen_init_spinlocks();
31614 }
31615diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
31616index 33ca6e4..0ded929 100644
31617--- a/arch/x86/xen/xen-asm_32.S
31618+++ b/arch/x86/xen/xen-asm_32.S
31619@@ -84,14 +84,14 @@ ENTRY(xen_iret)
31620 ESP_OFFSET=4 # bytes pushed onto stack
31621
31622 /*
31623- * Store vcpu_info pointer for easy access. Do it this way to
31624- * avoid having to reload %fs
31625+ * Store vcpu_info pointer for easy access.
31626 */
31627 #ifdef CONFIG_SMP
31628- GET_THREAD_INFO(%eax)
31629- movl %ss:TI_cpu(%eax), %eax
31630- movl %ss:__per_cpu_offset(,%eax,4), %eax
31631- mov %ss:xen_vcpu(%eax), %eax
31632+ push %fs
31633+ mov $(__KERNEL_PERCPU), %eax
31634+ mov %eax, %fs
31635+ mov PER_CPU_VAR(xen_vcpu), %eax
31636+ pop %fs
31637 #else
31638 movl %ss:xen_vcpu, %eax
31639 #endif
31640diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
31641index 7faed58..ba4427c 100644
31642--- a/arch/x86/xen/xen-head.S
31643+++ b/arch/x86/xen/xen-head.S
31644@@ -19,6 +19,17 @@ ENTRY(startup_xen)
31645 #ifdef CONFIG_X86_32
31646 mov %esi,xen_start_info
31647 mov $init_thread_union+THREAD_SIZE,%esp
31648+#ifdef CONFIG_SMP
31649+ movl $cpu_gdt_table,%edi
31650+ movl $__per_cpu_load,%eax
31651+ movw %ax,__KERNEL_PERCPU + 2(%edi)
31652+ rorl $16,%eax
31653+ movb %al,__KERNEL_PERCPU + 4(%edi)
31654+ movb %ah,__KERNEL_PERCPU + 7(%edi)
31655+ movl $__per_cpu_end - 1,%eax
31656+ subl $__per_cpu_start,%eax
31657+ movw %ax,__KERNEL_PERCPU + 0(%edi)
31658+#endif
31659 #else
31660 mov %rsi,xen_start_info
31661 mov $init_thread_union+THREAD_SIZE,%rsp
31662diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
31663index a95b417..b6dbd0b 100644
31664--- a/arch/x86/xen/xen-ops.h
31665+++ b/arch/x86/xen/xen-ops.h
31666@@ -10,8 +10,6 @@
31667 extern const char xen_hypervisor_callback[];
31668 extern const char xen_failsafe_callback[];
31669
31670-extern void *xen_initial_gdt;
31671-
31672 struct trap_info;
31673 void xen_copy_trap_info(struct trap_info *traps);
31674
31675diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
31676index 525bd3d..ef888b1 100644
31677--- a/arch/xtensa/variants/dc232b/include/variant/core.h
31678+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
31679@@ -119,9 +119,9 @@
31680 ----------------------------------------------------------------------*/
31681
31682 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
31683-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
31684 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
31685 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
31686+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
31687
31688 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
31689 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
31690diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
31691index 2f33760..835e50a 100644
31692--- a/arch/xtensa/variants/fsf/include/variant/core.h
31693+++ b/arch/xtensa/variants/fsf/include/variant/core.h
31694@@ -11,6 +11,7 @@
31695 #ifndef _XTENSA_CORE_H
31696 #define _XTENSA_CORE_H
31697
31698+#include <linux/const.h>
31699
31700 /****************************************************************************
31701 Parameters Useful for Any Code, USER or PRIVILEGED
31702@@ -112,9 +113,9 @@
31703 ----------------------------------------------------------------------*/
31704
31705 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
31706-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
31707 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
31708 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
31709+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
31710
31711 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
31712 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
31713diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
31714index af00795..2bb8105 100644
31715--- a/arch/xtensa/variants/s6000/include/variant/core.h
31716+++ b/arch/xtensa/variants/s6000/include/variant/core.h
31717@@ -11,6 +11,7 @@
31718 #ifndef _XTENSA_CORE_CONFIGURATION_H
31719 #define _XTENSA_CORE_CONFIGURATION_H
31720
31721+#include <linux/const.h>
31722
31723 /****************************************************************************
31724 Parameters Useful for Any Code, USER or PRIVILEGED
31725@@ -118,9 +119,9 @@
31726 ----------------------------------------------------------------------*/
31727
31728 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
31729-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
31730 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
31731 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
31732+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
31733
31734 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
31735 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
31736diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
31737index 58916af..eb9dbcf6 100644
31738--- a/block/blk-iopoll.c
31739+++ b/block/blk-iopoll.c
31740@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
31741 }
31742 EXPORT_SYMBOL(blk_iopoll_complete);
31743
31744-static void blk_iopoll_softirq(struct softirq_action *h)
31745+static void blk_iopoll_softirq(void)
31746 {
31747 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
31748 int rearm = 0, budget = blk_iopoll_budget;
31749@@ -209,7 +209,7 @@ static int __cpuinit blk_iopoll_cpu_notify(struct notifier_block *self,
31750 return NOTIFY_OK;
31751 }
31752
31753-static struct notifier_block __cpuinitdata blk_iopoll_cpu_notifier = {
31754+static struct notifier_block blk_iopoll_cpu_notifier = {
31755 .notifier_call = blk_iopoll_cpu_notify,
31756 };
31757
31758diff --git a/block/blk-map.c b/block/blk-map.c
31759index 623e1cd..ca1e109 100644
31760--- a/block/blk-map.c
31761+++ b/block/blk-map.c
31762@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
31763 if (!len || !kbuf)
31764 return -EINVAL;
31765
31766- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
31767+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
31768 if (do_copy)
31769 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
31770 else
31771diff --git a/block/blk-softirq.c b/block/blk-softirq.c
31772index 467c8de..f3628c5 100644
31773--- a/block/blk-softirq.c
31774+++ b/block/blk-softirq.c
31775@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
31776 * Softirq action handler - move entries to local list and loop over them
31777 * while passing them to the queue registered handler.
31778 */
31779-static void blk_done_softirq(struct softirq_action *h)
31780+static void blk_done_softirq(void)
31781 {
31782 struct list_head *cpu_list, local_list;
31783
31784@@ -98,7 +98,7 @@ static int __cpuinit blk_cpu_notify(struct notifier_block *self,
31785 return NOTIFY_OK;
31786 }
31787
31788-static struct notifier_block __cpuinitdata blk_cpu_notifier = {
31789+static struct notifier_block blk_cpu_notifier = {
31790 .notifier_call = blk_cpu_notify,
31791 };
31792
31793diff --git a/block/bsg.c b/block/bsg.c
31794index ff64ae3..593560c 100644
31795--- a/block/bsg.c
31796+++ b/block/bsg.c
31797@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
31798 struct sg_io_v4 *hdr, struct bsg_device *bd,
31799 fmode_t has_write_perm)
31800 {
31801+ unsigned char tmpcmd[sizeof(rq->__cmd)];
31802+ unsigned char *cmdptr;
31803+
31804 if (hdr->request_len > BLK_MAX_CDB) {
31805 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
31806 if (!rq->cmd)
31807 return -ENOMEM;
31808- }
31809+ cmdptr = rq->cmd;
31810+ } else
31811+ cmdptr = tmpcmd;
31812
31813- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
31814+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
31815 hdr->request_len))
31816 return -EFAULT;
31817
31818+ if (cmdptr != rq->cmd)
31819+ memcpy(rq->cmd, cmdptr, hdr->request_len);
31820+
31821 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
31822 if (blk_verify_command(rq->cmd, has_write_perm))
31823 return -EPERM;
31824diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
31825index 7c668c8..db3521c 100644
31826--- a/block/compat_ioctl.c
31827+++ b/block/compat_ioctl.c
31828@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
31829 err |= __get_user(f->spec1, &uf->spec1);
31830 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
31831 err |= __get_user(name, &uf->name);
31832- f->name = compat_ptr(name);
31833+ f->name = (void __force_kernel *)compat_ptr(name);
31834 if (err) {
31835 err = -EFAULT;
31836 goto out;
31837diff --git a/block/partitions/efi.c b/block/partitions/efi.c
31838index b62fb88..bdab4c4 100644
31839--- a/block/partitions/efi.c
31840+++ b/block/partitions/efi.c
31841@@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
31842 if (!gpt)
31843 return NULL;
31844
31845+ if (!le32_to_cpu(gpt->num_partition_entries))
31846+ return NULL;
31847+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
31848+ if (!pte)
31849+ return NULL;
31850+
31851 count = le32_to_cpu(gpt->num_partition_entries) *
31852 le32_to_cpu(gpt->sizeof_partition_entry);
31853- if (!count)
31854- return NULL;
31855- pte = kzalloc(count, GFP_KERNEL);
31856- if (!pte)
31857- return NULL;
31858-
31859 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
31860 (u8 *) pte,
31861 count) < count) {
31862diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
31863index 9a87daa..fb17486 100644
31864--- a/block/scsi_ioctl.c
31865+++ b/block/scsi_ioctl.c
31866@@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
31867 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
31868 struct sg_io_hdr *hdr, fmode_t mode)
31869 {
31870- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
31871+ unsigned char tmpcmd[sizeof(rq->__cmd)];
31872+ unsigned char *cmdptr;
31873+
31874+ if (rq->cmd != rq->__cmd)
31875+ cmdptr = rq->cmd;
31876+ else
31877+ cmdptr = tmpcmd;
31878+
31879+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
31880 return -EFAULT;
31881+
31882+ if (cmdptr != rq->cmd)
31883+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
31884+
31885 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
31886 return -EPERM;
31887
31888@@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
31889 int err;
31890 unsigned int in_len, out_len, bytes, opcode, cmdlen;
31891 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
31892+ unsigned char tmpcmd[sizeof(rq->__cmd)];
31893+ unsigned char *cmdptr;
31894
31895 if (!sic)
31896 return -EINVAL;
31897@@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
31898 */
31899 err = -EFAULT;
31900 rq->cmd_len = cmdlen;
31901- if (copy_from_user(rq->cmd, sic->data, cmdlen))
31902+
31903+ if (rq->cmd != rq->__cmd)
31904+ cmdptr = rq->cmd;
31905+ else
31906+ cmdptr = tmpcmd;
31907+
31908+ if (copy_from_user(cmdptr, sic->data, cmdlen))
31909 goto error;
31910
31911+ if (rq->cmd != cmdptr)
31912+ memcpy(rq->cmd, cmdptr, cmdlen);
31913+
31914 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
31915 goto error;
31916
31917diff --git a/crypto/cryptd.c b/crypto/cryptd.c
31918index 7bdd61b..afec999 100644
31919--- a/crypto/cryptd.c
31920+++ b/crypto/cryptd.c
31921@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
31922
31923 struct cryptd_blkcipher_request_ctx {
31924 crypto_completion_t complete;
31925-};
31926+} __no_const;
31927
31928 struct cryptd_hash_ctx {
31929 struct crypto_shash *child;
31930@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
31931
31932 struct cryptd_aead_request_ctx {
31933 crypto_completion_t complete;
31934-};
31935+} __no_const;
31936
31937 static void cryptd_queue_worker(struct work_struct *work);
31938
31939diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
31940index f6d9baf..dfd511f 100644
31941--- a/crypto/crypto_user.c
31942+++ b/crypto/crypto_user.c
31943@@ -30,6 +30,8 @@
31944
31945 #include "internal.h"
31946
31947+#define null_terminated(x) (strnlen(x, sizeof(x)) < sizeof(x))
31948+
31949 static DEFINE_MUTEX(crypto_cfg_mutex);
31950
31951 /* The crypto netlink socket */
31952@@ -196,7 +198,10 @@ static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
31953 struct crypto_dump_info info;
31954 int err;
31955
31956- if (!p->cru_driver_name)
31957+ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
31958+ return -EINVAL;
31959+
31960+ if (!p->cru_driver_name[0])
31961 return -EINVAL;
31962
31963 alg = crypto_alg_match(p, 1);
31964@@ -260,6 +265,9 @@ static int crypto_update_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
31965 struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
31966 LIST_HEAD(list);
31967
31968+ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
31969+ return -EINVAL;
31970+
31971 if (priority && !strlen(p->cru_driver_name))
31972 return -EINVAL;
31973
31974@@ -287,6 +295,9 @@ static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
31975 struct crypto_alg *alg;
31976 struct crypto_user_alg *p = nlmsg_data(nlh);
31977
31978+ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
31979+ return -EINVAL;
31980+
31981 alg = crypto_alg_match(p, 1);
31982 if (!alg)
31983 return -ENOENT;
31984@@ -368,6 +379,9 @@ static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
31985 struct crypto_user_alg *p = nlmsg_data(nlh);
31986 struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
31987
31988+ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
31989+ return -EINVAL;
31990+
31991 if (strlen(p->cru_driver_name))
31992 exact = 1;
31993
31994diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
31995index f220d64..d359ad6 100644
31996--- a/drivers/acpi/apei/apei-internal.h
31997+++ b/drivers/acpi/apei/apei-internal.h
31998@@ -20,7 +20,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
31999 struct apei_exec_ins_type {
32000 u32 flags;
32001 apei_exec_ins_func_t run;
32002-};
32003+} __do_const;
32004
32005 struct apei_exec_context {
32006 u32 ip;
32007diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
32008index e6defd8..c26a225 100644
32009--- a/drivers/acpi/apei/cper.c
32010+++ b/drivers/acpi/apei/cper.c
32011@@ -38,12 +38,12 @@
32012 */
32013 u64 cper_next_record_id(void)
32014 {
32015- static atomic64_t seq;
32016+ static atomic64_unchecked_t seq;
32017
32018- if (!atomic64_read(&seq))
32019- atomic64_set(&seq, ((u64)get_seconds()) << 32);
32020+ if (!atomic64_read_unchecked(&seq))
32021+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
32022
32023- return atomic64_inc_return(&seq);
32024+ return atomic64_inc_return_unchecked(&seq);
32025 }
32026 EXPORT_SYMBOL_GPL(cper_next_record_id);
32027
32028diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
32029index be60399..778b33e8 100644
32030--- a/drivers/acpi/bgrt.c
32031+++ b/drivers/acpi/bgrt.c
32032@@ -87,8 +87,10 @@ static int __init bgrt_init(void)
32033 return -ENODEV;
32034
32035 sysfs_bin_attr_init(&image_attr);
32036- image_attr.private = bgrt_image;
32037- image_attr.size = bgrt_image_size;
32038+ pax_open_kernel();
32039+ *(void **)&image_attr.private = bgrt_image;
32040+ *(size_t *)&image_attr.size = bgrt_image_size;
32041+ pax_close_kernel();
32042
32043 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
32044 if (!bgrt_kobj)
32045diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
32046index cb96296..b81293b 100644
32047--- a/drivers/acpi/blacklist.c
32048+++ b/drivers/acpi/blacklist.c
32049@@ -52,7 +52,7 @@ struct acpi_blacklist_item {
32050 u32 is_critical_error;
32051 };
32052
32053-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
32054+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
32055
32056 /*
32057 * POLICY: If *anything* doesn't work, put it on the blacklist.
32058@@ -193,7 +193,7 @@ static int __init dmi_disable_osi_win7(const struct dmi_system_id *d)
32059 return 0;
32060 }
32061
32062-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
32063+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
32064 {
32065 .callback = dmi_disable_osi_vista,
32066 .ident = "Fujitsu Siemens",
32067diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
32068index 7586544..636a2f0 100644
32069--- a/drivers/acpi/ec_sys.c
32070+++ b/drivers/acpi/ec_sys.c
32071@@ -12,6 +12,7 @@
32072 #include <linux/acpi.h>
32073 #include <linux/debugfs.h>
32074 #include <linux/module.h>
32075+#include <linux/uaccess.h>
32076 #include "internal.h"
32077
32078 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
32079@@ -34,7 +35,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
32080 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
32081 */
32082 unsigned int size = EC_SPACE_SIZE;
32083- u8 *data = (u8 *) buf;
32084+ u8 data;
32085 loff_t init_off = *off;
32086 int err = 0;
32087
32088@@ -47,9 +48,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
32089 size = count;
32090
32091 while (size) {
32092- err = ec_read(*off, &data[*off - init_off]);
32093+ err = ec_read(*off, &data);
32094 if (err)
32095 return err;
32096+ if (put_user(data, &buf[*off - init_off]))
32097+ return -EFAULT;
32098 *off += 1;
32099 size--;
32100 }
32101@@ -65,7 +68,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
32102
32103 unsigned int size = count;
32104 loff_t init_off = *off;
32105- u8 *data = (u8 *) buf;
32106 int err = 0;
32107
32108 if (*off >= EC_SPACE_SIZE)
32109@@ -76,7 +78,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
32110 }
32111
32112 while (size) {
32113- u8 byte_write = data[*off - init_off];
32114+ u8 byte_write;
32115+ if (get_user(byte_write, &buf[*off - init_off]))
32116+ return -EFAULT;
32117 err = ec_write(*off, byte_write);
32118 if (err)
32119 return err;
32120diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
32121index e83311b..142b5cc 100644
32122--- a/drivers/acpi/processor_driver.c
32123+++ b/drivers/acpi/processor_driver.c
32124@@ -558,7 +558,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
32125 return 0;
32126 #endif
32127
32128- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
32129+ BUG_ON(pr->id >= nr_cpu_ids);
32130
32131 /*
32132 * Buggy BIOS check
32133diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
32134index ed9a1cc..f4a354c 100644
32135--- a/drivers/acpi/processor_idle.c
32136+++ b/drivers/acpi/processor_idle.c
32137@@ -1005,7 +1005,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
32138 {
32139 int i, count = CPUIDLE_DRIVER_STATE_START;
32140 struct acpi_processor_cx *cx;
32141- struct cpuidle_state *state;
32142+ cpuidle_state_no_const *state;
32143 struct cpuidle_driver *drv = &acpi_idle_driver;
32144
32145 if (!pr->flags.power_setup_done)
32146diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
32147index ea61ca9..3fdd70d 100644
32148--- a/drivers/acpi/sysfs.c
32149+++ b/drivers/acpi/sysfs.c
32150@@ -420,11 +420,11 @@ static u32 num_counters;
32151 static struct attribute **all_attrs;
32152 static u32 acpi_gpe_count;
32153
32154-static struct attribute_group interrupt_stats_attr_group = {
32155+static attribute_group_no_const interrupt_stats_attr_group = {
32156 .name = "interrupts",
32157 };
32158
32159-static struct kobj_attribute *counter_attrs;
32160+static kobj_attribute_no_const *counter_attrs;
32161
32162 static void delete_gpe_attr_array(void)
32163 {
32164diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
32165index 6cd7805..07facb3 100644
32166--- a/drivers/ata/libahci.c
32167+++ b/drivers/ata/libahci.c
32168@@ -1230,7 +1230,7 @@ int ahci_kick_engine(struct ata_port *ap)
32169 }
32170 EXPORT_SYMBOL_GPL(ahci_kick_engine);
32171
32172-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
32173+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
32174 struct ata_taskfile *tf, int is_cmd, u16 flags,
32175 unsigned long timeout_msec)
32176 {
32177diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
32178index 46cd3f4..0871ad0 100644
32179--- a/drivers/ata/libata-core.c
32180+++ b/drivers/ata/libata-core.c
32181@@ -4780,7 +4780,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
32182 struct ata_port *ap;
32183 unsigned int tag;
32184
32185- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
32186+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
32187 ap = qc->ap;
32188
32189 qc->flags = 0;
32190@@ -4796,7 +4796,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
32191 struct ata_port *ap;
32192 struct ata_link *link;
32193
32194- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
32195+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
32196 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
32197 ap = qc->ap;
32198 link = qc->dev->link;
32199@@ -5892,6 +5892,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
32200 return;
32201
32202 spin_lock(&lock);
32203+ pax_open_kernel();
32204
32205 for (cur = ops->inherits; cur; cur = cur->inherits) {
32206 void **inherit = (void **)cur;
32207@@ -5905,8 +5906,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
32208 if (IS_ERR(*pp))
32209 *pp = NULL;
32210
32211- ops->inherits = NULL;
32212+ *(struct ata_port_operations **)&ops->inherits = NULL;
32213
32214+ pax_close_kernel();
32215 spin_unlock(&lock);
32216 }
32217
32218diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
32219index 405022d..fb70e53 100644
32220--- a/drivers/ata/pata_arasan_cf.c
32221+++ b/drivers/ata/pata_arasan_cf.c
32222@@ -864,7 +864,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
32223 /* Handle platform specific quirks */
32224 if (pdata->quirk) {
32225 if (pdata->quirk & CF_BROKEN_PIO) {
32226- ap->ops->set_piomode = NULL;
32227+ pax_open_kernel();
32228+ *(void **)&ap->ops->set_piomode = NULL;
32229+ pax_close_kernel();
32230 ap->pio_mask = 0;
32231 }
32232 if (pdata->quirk & CF_BROKEN_MWDMA)
32233diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
32234index f9b983a..887b9d8 100644
32235--- a/drivers/atm/adummy.c
32236+++ b/drivers/atm/adummy.c
32237@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
32238 vcc->pop(vcc, skb);
32239 else
32240 dev_kfree_skb_any(skb);
32241- atomic_inc(&vcc->stats->tx);
32242+ atomic_inc_unchecked(&vcc->stats->tx);
32243
32244 return 0;
32245 }
32246diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
32247index 77a7480..05cde58 100644
32248--- a/drivers/atm/ambassador.c
32249+++ b/drivers/atm/ambassador.c
32250@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
32251 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
32252
32253 // VC layer stats
32254- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
32255+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
32256
32257 // free the descriptor
32258 kfree (tx_descr);
32259@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
32260 dump_skb ("<<<", vc, skb);
32261
32262 // VC layer stats
32263- atomic_inc(&atm_vcc->stats->rx);
32264+ atomic_inc_unchecked(&atm_vcc->stats->rx);
32265 __net_timestamp(skb);
32266 // end of our responsibility
32267 atm_vcc->push (atm_vcc, skb);
32268@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
32269 } else {
32270 PRINTK (KERN_INFO, "dropped over-size frame");
32271 // should we count this?
32272- atomic_inc(&atm_vcc->stats->rx_drop);
32273+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
32274 }
32275
32276 } else {
32277@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
32278 }
32279
32280 if (check_area (skb->data, skb->len)) {
32281- atomic_inc(&atm_vcc->stats->tx_err);
32282+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
32283 return -ENOMEM; // ?
32284 }
32285
32286diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
32287index b22d71c..d6e1049 100644
32288--- a/drivers/atm/atmtcp.c
32289+++ b/drivers/atm/atmtcp.c
32290@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
32291 if (vcc->pop) vcc->pop(vcc,skb);
32292 else dev_kfree_skb(skb);
32293 if (dev_data) return 0;
32294- atomic_inc(&vcc->stats->tx_err);
32295+ atomic_inc_unchecked(&vcc->stats->tx_err);
32296 return -ENOLINK;
32297 }
32298 size = skb->len+sizeof(struct atmtcp_hdr);
32299@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
32300 if (!new_skb) {
32301 if (vcc->pop) vcc->pop(vcc,skb);
32302 else dev_kfree_skb(skb);
32303- atomic_inc(&vcc->stats->tx_err);
32304+ atomic_inc_unchecked(&vcc->stats->tx_err);
32305 return -ENOBUFS;
32306 }
32307 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
32308@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
32309 if (vcc->pop) vcc->pop(vcc,skb);
32310 else dev_kfree_skb(skb);
32311 out_vcc->push(out_vcc,new_skb);
32312- atomic_inc(&vcc->stats->tx);
32313- atomic_inc(&out_vcc->stats->rx);
32314+ atomic_inc_unchecked(&vcc->stats->tx);
32315+ atomic_inc_unchecked(&out_vcc->stats->rx);
32316 return 0;
32317 }
32318
32319@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
32320 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
32321 read_unlock(&vcc_sklist_lock);
32322 if (!out_vcc) {
32323- atomic_inc(&vcc->stats->tx_err);
32324+ atomic_inc_unchecked(&vcc->stats->tx_err);
32325 goto done;
32326 }
32327 skb_pull(skb,sizeof(struct atmtcp_hdr));
32328@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
32329 __net_timestamp(new_skb);
32330 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
32331 out_vcc->push(out_vcc,new_skb);
32332- atomic_inc(&vcc->stats->tx);
32333- atomic_inc(&out_vcc->stats->rx);
32334+ atomic_inc_unchecked(&vcc->stats->tx);
32335+ atomic_inc_unchecked(&out_vcc->stats->rx);
32336 done:
32337 if (vcc->pop) vcc->pop(vcc,skb);
32338 else dev_kfree_skb(skb);
32339diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
32340index c1eb6fa..4c71be9 100644
32341--- a/drivers/atm/eni.c
32342+++ b/drivers/atm/eni.c
32343@@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
32344 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
32345 vcc->dev->number);
32346 length = 0;
32347- atomic_inc(&vcc->stats->rx_err);
32348+ atomic_inc_unchecked(&vcc->stats->rx_err);
32349 }
32350 else {
32351 length = ATM_CELL_SIZE-1; /* no HEC */
32352@@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
32353 size);
32354 }
32355 eff = length = 0;
32356- atomic_inc(&vcc->stats->rx_err);
32357+ atomic_inc_unchecked(&vcc->stats->rx_err);
32358 }
32359 else {
32360 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
32361@@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
32362 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
32363 vcc->dev->number,vcc->vci,length,size << 2,descr);
32364 length = eff = 0;
32365- atomic_inc(&vcc->stats->rx_err);
32366+ atomic_inc_unchecked(&vcc->stats->rx_err);
32367 }
32368 }
32369 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
32370@@ -767,7 +767,7 @@ rx_dequeued++;
32371 vcc->push(vcc,skb);
32372 pushed++;
32373 }
32374- atomic_inc(&vcc->stats->rx);
32375+ atomic_inc_unchecked(&vcc->stats->rx);
32376 }
32377 wake_up(&eni_dev->rx_wait);
32378 }
32379@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
32380 PCI_DMA_TODEVICE);
32381 if (vcc->pop) vcc->pop(vcc,skb);
32382 else dev_kfree_skb_irq(skb);
32383- atomic_inc(&vcc->stats->tx);
32384+ atomic_inc_unchecked(&vcc->stats->tx);
32385 wake_up(&eni_dev->tx_wait);
32386 dma_complete++;
32387 }
32388diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
32389index b41c948..a002b17 100644
32390--- a/drivers/atm/firestream.c
32391+++ b/drivers/atm/firestream.c
32392@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
32393 }
32394 }
32395
32396- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
32397+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
32398
32399 fs_dprintk (FS_DEBUG_TXMEM, "i");
32400 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
32401@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
32402 #endif
32403 skb_put (skb, qe->p1 & 0xffff);
32404 ATM_SKB(skb)->vcc = atm_vcc;
32405- atomic_inc(&atm_vcc->stats->rx);
32406+ atomic_inc_unchecked(&atm_vcc->stats->rx);
32407 __net_timestamp(skb);
32408 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
32409 atm_vcc->push (atm_vcc, skb);
32410@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
32411 kfree (pe);
32412 }
32413 if (atm_vcc)
32414- atomic_inc(&atm_vcc->stats->rx_drop);
32415+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
32416 break;
32417 case 0x1f: /* Reassembly abort: no buffers. */
32418 /* Silently increment error counter. */
32419 if (atm_vcc)
32420- atomic_inc(&atm_vcc->stats->rx_drop);
32421+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
32422 break;
32423 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
32424 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
32425diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
32426index 204814e..cede831 100644
32427--- a/drivers/atm/fore200e.c
32428+++ b/drivers/atm/fore200e.c
32429@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
32430 #endif
32431 /* check error condition */
32432 if (*entry->status & STATUS_ERROR)
32433- atomic_inc(&vcc->stats->tx_err);
32434+ atomic_inc_unchecked(&vcc->stats->tx_err);
32435 else
32436- atomic_inc(&vcc->stats->tx);
32437+ atomic_inc_unchecked(&vcc->stats->tx);
32438 }
32439 }
32440
32441@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
32442 if (skb == NULL) {
32443 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
32444
32445- atomic_inc(&vcc->stats->rx_drop);
32446+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32447 return -ENOMEM;
32448 }
32449
32450@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
32451
32452 dev_kfree_skb_any(skb);
32453
32454- atomic_inc(&vcc->stats->rx_drop);
32455+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32456 return -ENOMEM;
32457 }
32458
32459 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
32460
32461 vcc->push(vcc, skb);
32462- atomic_inc(&vcc->stats->rx);
32463+ atomic_inc_unchecked(&vcc->stats->rx);
32464
32465 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
32466
32467@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
32468 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
32469 fore200e->atm_dev->number,
32470 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
32471- atomic_inc(&vcc->stats->rx_err);
32472+ atomic_inc_unchecked(&vcc->stats->rx_err);
32473 }
32474 }
32475
32476@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
32477 goto retry_here;
32478 }
32479
32480- atomic_inc(&vcc->stats->tx_err);
32481+ atomic_inc_unchecked(&vcc->stats->tx_err);
32482
32483 fore200e->tx_sat++;
32484 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
32485diff --git a/drivers/atm/he.c b/drivers/atm/he.c
32486index 72b6960..cf9167a 100644
32487--- a/drivers/atm/he.c
32488+++ b/drivers/atm/he.c
32489@@ -1699,7 +1699,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
32490
32491 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
32492 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
32493- atomic_inc(&vcc->stats->rx_drop);
32494+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32495 goto return_host_buffers;
32496 }
32497
32498@@ -1726,7 +1726,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
32499 RBRQ_LEN_ERR(he_dev->rbrq_head)
32500 ? "LEN_ERR" : "",
32501 vcc->vpi, vcc->vci);
32502- atomic_inc(&vcc->stats->rx_err);
32503+ atomic_inc_unchecked(&vcc->stats->rx_err);
32504 goto return_host_buffers;
32505 }
32506
32507@@ -1778,7 +1778,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
32508 vcc->push(vcc, skb);
32509 spin_lock(&he_dev->global_lock);
32510
32511- atomic_inc(&vcc->stats->rx);
32512+ atomic_inc_unchecked(&vcc->stats->rx);
32513
32514 return_host_buffers:
32515 ++pdus_assembled;
32516@@ -2104,7 +2104,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
32517 tpd->vcc->pop(tpd->vcc, tpd->skb);
32518 else
32519 dev_kfree_skb_any(tpd->skb);
32520- atomic_inc(&tpd->vcc->stats->tx_err);
32521+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
32522 }
32523 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
32524 return;
32525@@ -2516,7 +2516,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
32526 vcc->pop(vcc, skb);
32527 else
32528 dev_kfree_skb_any(skb);
32529- atomic_inc(&vcc->stats->tx_err);
32530+ atomic_inc_unchecked(&vcc->stats->tx_err);
32531 return -EINVAL;
32532 }
32533
32534@@ -2527,7 +2527,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
32535 vcc->pop(vcc, skb);
32536 else
32537 dev_kfree_skb_any(skb);
32538- atomic_inc(&vcc->stats->tx_err);
32539+ atomic_inc_unchecked(&vcc->stats->tx_err);
32540 return -EINVAL;
32541 }
32542 #endif
32543@@ -2539,7 +2539,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
32544 vcc->pop(vcc, skb);
32545 else
32546 dev_kfree_skb_any(skb);
32547- atomic_inc(&vcc->stats->tx_err);
32548+ atomic_inc_unchecked(&vcc->stats->tx_err);
32549 spin_unlock_irqrestore(&he_dev->global_lock, flags);
32550 return -ENOMEM;
32551 }
32552@@ -2581,7 +2581,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
32553 vcc->pop(vcc, skb);
32554 else
32555 dev_kfree_skb_any(skb);
32556- atomic_inc(&vcc->stats->tx_err);
32557+ atomic_inc_unchecked(&vcc->stats->tx_err);
32558 spin_unlock_irqrestore(&he_dev->global_lock, flags);
32559 return -ENOMEM;
32560 }
32561@@ -2612,7 +2612,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
32562 __enqueue_tpd(he_dev, tpd, cid);
32563 spin_unlock_irqrestore(&he_dev->global_lock, flags);
32564
32565- atomic_inc(&vcc->stats->tx);
32566+ atomic_inc_unchecked(&vcc->stats->tx);
32567
32568 return 0;
32569 }
32570diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
32571index 1dc0519..1aadaf7 100644
32572--- a/drivers/atm/horizon.c
32573+++ b/drivers/atm/horizon.c
32574@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
32575 {
32576 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
32577 // VC layer stats
32578- atomic_inc(&vcc->stats->rx);
32579+ atomic_inc_unchecked(&vcc->stats->rx);
32580 __net_timestamp(skb);
32581 // end of our responsibility
32582 vcc->push (vcc, skb);
32583@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
32584 dev->tx_iovec = NULL;
32585
32586 // VC layer stats
32587- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
32588+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
32589
32590 // free the skb
32591 hrz_kfree_skb (skb);
32592diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
32593index 272f009..a18ba55 100644
32594--- a/drivers/atm/idt77252.c
32595+++ b/drivers/atm/idt77252.c
32596@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
32597 else
32598 dev_kfree_skb(skb);
32599
32600- atomic_inc(&vcc->stats->tx);
32601+ atomic_inc_unchecked(&vcc->stats->tx);
32602 }
32603
32604 atomic_dec(&scq->used);
32605@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32606 if ((sb = dev_alloc_skb(64)) == NULL) {
32607 printk("%s: Can't allocate buffers for aal0.\n",
32608 card->name);
32609- atomic_add(i, &vcc->stats->rx_drop);
32610+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
32611 break;
32612 }
32613 if (!atm_charge(vcc, sb->truesize)) {
32614 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
32615 card->name);
32616- atomic_add(i - 1, &vcc->stats->rx_drop);
32617+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
32618 dev_kfree_skb(sb);
32619 break;
32620 }
32621@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32622 ATM_SKB(sb)->vcc = vcc;
32623 __net_timestamp(sb);
32624 vcc->push(vcc, sb);
32625- atomic_inc(&vcc->stats->rx);
32626+ atomic_inc_unchecked(&vcc->stats->rx);
32627
32628 cell += ATM_CELL_PAYLOAD;
32629 }
32630@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32631 "(CDC: %08x)\n",
32632 card->name, len, rpp->len, readl(SAR_REG_CDC));
32633 recycle_rx_pool_skb(card, rpp);
32634- atomic_inc(&vcc->stats->rx_err);
32635+ atomic_inc_unchecked(&vcc->stats->rx_err);
32636 return;
32637 }
32638 if (stat & SAR_RSQE_CRC) {
32639 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
32640 recycle_rx_pool_skb(card, rpp);
32641- atomic_inc(&vcc->stats->rx_err);
32642+ atomic_inc_unchecked(&vcc->stats->rx_err);
32643 return;
32644 }
32645 if (skb_queue_len(&rpp->queue) > 1) {
32646@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32647 RXPRINTK("%s: Can't alloc RX skb.\n",
32648 card->name);
32649 recycle_rx_pool_skb(card, rpp);
32650- atomic_inc(&vcc->stats->rx_err);
32651+ atomic_inc_unchecked(&vcc->stats->rx_err);
32652 return;
32653 }
32654 if (!atm_charge(vcc, skb->truesize)) {
32655@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32656 __net_timestamp(skb);
32657
32658 vcc->push(vcc, skb);
32659- atomic_inc(&vcc->stats->rx);
32660+ atomic_inc_unchecked(&vcc->stats->rx);
32661
32662 return;
32663 }
32664@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32665 __net_timestamp(skb);
32666
32667 vcc->push(vcc, skb);
32668- atomic_inc(&vcc->stats->rx);
32669+ atomic_inc_unchecked(&vcc->stats->rx);
32670
32671 if (skb->truesize > SAR_FB_SIZE_3)
32672 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
32673@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
32674 if (vcc->qos.aal != ATM_AAL0) {
32675 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
32676 card->name, vpi, vci);
32677- atomic_inc(&vcc->stats->rx_drop);
32678+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32679 goto drop;
32680 }
32681
32682 if ((sb = dev_alloc_skb(64)) == NULL) {
32683 printk("%s: Can't allocate buffers for AAL0.\n",
32684 card->name);
32685- atomic_inc(&vcc->stats->rx_err);
32686+ atomic_inc_unchecked(&vcc->stats->rx_err);
32687 goto drop;
32688 }
32689
32690@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
32691 ATM_SKB(sb)->vcc = vcc;
32692 __net_timestamp(sb);
32693 vcc->push(vcc, sb);
32694- atomic_inc(&vcc->stats->rx);
32695+ atomic_inc_unchecked(&vcc->stats->rx);
32696
32697 drop:
32698 skb_pull(queue, 64);
32699@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
32700
32701 if (vc == NULL) {
32702 printk("%s: NULL connection in send().\n", card->name);
32703- atomic_inc(&vcc->stats->tx_err);
32704+ atomic_inc_unchecked(&vcc->stats->tx_err);
32705 dev_kfree_skb(skb);
32706 return -EINVAL;
32707 }
32708 if (!test_bit(VCF_TX, &vc->flags)) {
32709 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
32710- atomic_inc(&vcc->stats->tx_err);
32711+ atomic_inc_unchecked(&vcc->stats->tx_err);
32712 dev_kfree_skb(skb);
32713 return -EINVAL;
32714 }
32715@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
32716 break;
32717 default:
32718 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
32719- atomic_inc(&vcc->stats->tx_err);
32720+ atomic_inc_unchecked(&vcc->stats->tx_err);
32721 dev_kfree_skb(skb);
32722 return -EINVAL;
32723 }
32724
32725 if (skb_shinfo(skb)->nr_frags != 0) {
32726 printk("%s: No scatter-gather yet.\n", card->name);
32727- atomic_inc(&vcc->stats->tx_err);
32728+ atomic_inc_unchecked(&vcc->stats->tx_err);
32729 dev_kfree_skb(skb);
32730 return -EINVAL;
32731 }
32732@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
32733
32734 err = queue_skb(card, vc, skb, oam);
32735 if (err) {
32736- atomic_inc(&vcc->stats->tx_err);
32737+ atomic_inc_unchecked(&vcc->stats->tx_err);
32738 dev_kfree_skb(skb);
32739 return err;
32740 }
32741@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
32742 skb = dev_alloc_skb(64);
32743 if (!skb) {
32744 printk("%s: Out of memory in send_oam().\n", card->name);
32745- atomic_inc(&vcc->stats->tx_err);
32746+ atomic_inc_unchecked(&vcc->stats->tx_err);
32747 return -ENOMEM;
32748 }
32749 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
32750diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
32751index 4217f29..88f547a 100644
32752--- a/drivers/atm/iphase.c
32753+++ b/drivers/atm/iphase.c
32754@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
32755 status = (u_short) (buf_desc_ptr->desc_mode);
32756 if (status & (RX_CER | RX_PTE | RX_OFL))
32757 {
32758- atomic_inc(&vcc->stats->rx_err);
32759+ atomic_inc_unchecked(&vcc->stats->rx_err);
32760 IF_ERR(printk("IA: bad packet, dropping it");)
32761 if (status & RX_CER) {
32762 IF_ERR(printk(" cause: packet CRC error\n");)
32763@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
32764 len = dma_addr - buf_addr;
32765 if (len > iadev->rx_buf_sz) {
32766 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
32767- atomic_inc(&vcc->stats->rx_err);
32768+ atomic_inc_unchecked(&vcc->stats->rx_err);
32769 goto out_free_desc;
32770 }
32771
32772@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
32773 ia_vcc = INPH_IA_VCC(vcc);
32774 if (ia_vcc == NULL)
32775 {
32776- atomic_inc(&vcc->stats->rx_err);
32777+ atomic_inc_unchecked(&vcc->stats->rx_err);
32778 atm_return(vcc, skb->truesize);
32779 dev_kfree_skb_any(skb);
32780 goto INCR_DLE;
32781@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
32782 if ((length > iadev->rx_buf_sz) || (length >
32783 (skb->len - sizeof(struct cpcs_trailer))))
32784 {
32785- atomic_inc(&vcc->stats->rx_err);
32786+ atomic_inc_unchecked(&vcc->stats->rx_err);
32787 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
32788 length, skb->len);)
32789 atm_return(vcc, skb->truesize);
32790@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
32791
32792 IF_RX(printk("rx_dle_intr: skb push");)
32793 vcc->push(vcc,skb);
32794- atomic_inc(&vcc->stats->rx);
32795+ atomic_inc_unchecked(&vcc->stats->rx);
32796 iadev->rx_pkt_cnt++;
32797 }
32798 INCR_DLE:
32799@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
32800 {
32801 struct k_sonet_stats *stats;
32802 stats = &PRIV(_ia_dev[board])->sonet_stats;
32803- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
32804- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
32805- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
32806- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
32807- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
32808- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
32809- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
32810- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
32811- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
32812+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
32813+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
32814+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
32815+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
32816+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
32817+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
32818+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
32819+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
32820+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
32821 }
32822 ia_cmds.status = 0;
32823 break;
32824@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
32825 if ((desc == 0) || (desc > iadev->num_tx_desc))
32826 {
32827 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
32828- atomic_inc(&vcc->stats->tx);
32829+ atomic_inc_unchecked(&vcc->stats->tx);
32830 if (vcc->pop)
32831 vcc->pop(vcc, skb);
32832 else
32833@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
32834 ATM_DESC(skb) = vcc->vci;
32835 skb_queue_tail(&iadev->tx_dma_q, skb);
32836
32837- atomic_inc(&vcc->stats->tx);
32838+ atomic_inc_unchecked(&vcc->stats->tx);
32839 iadev->tx_pkt_cnt++;
32840 /* Increment transaction counter */
32841 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
32842
32843 #if 0
32844 /* add flow control logic */
32845- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
32846+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
32847 if (iavcc->vc_desc_cnt > 10) {
32848 vcc->tx_quota = vcc->tx_quota * 3 / 4;
32849 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
32850diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
32851index fa7d701..1e404c7 100644
32852--- a/drivers/atm/lanai.c
32853+++ b/drivers/atm/lanai.c
32854@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
32855 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
32856 lanai_endtx(lanai, lvcc);
32857 lanai_free_skb(lvcc->tx.atmvcc, skb);
32858- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
32859+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
32860 }
32861
32862 /* Try to fill the buffer - don't call unless there is backlog */
32863@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
32864 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
32865 __net_timestamp(skb);
32866 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
32867- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
32868+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
32869 out:
32870 lvcc->rx.buf.ptr = end;
32871 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
32872@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
32873 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
32874 "vcc %d\n", lanai->number, (unsigned int) s, vci);
32875 lanai->stats.service_rxnotaal5++;
32876- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
32877+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
32878 return 0;
32879 }
32880 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
32881@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
32882 int bytes;
32883 read_unlock(&vcc_sklist_lock);
32884 DPRINTK("got trashed rx pdu on vci %d\n", vci);
32885- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
32886+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
32887 lvcc->stats.x.aal5.service_trash++;
32888 bytes = (SERVICE_GET_END(s) * 16) -
32889 (((unsigned long) lvcc->rx.buf.ptr) -
32890@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
32891 }
32892 if (s & SERVICE_STREAM) {
32893 read_unlock(&vcc_sklist_lock);
32894- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
32895+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
32896 lvcc->stats.x.aal5.service_stream++;
32897 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
32898 "PDU on VCI %d!\n", lanai->number, vci);
32899@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
32900 return 0;
32901 }
32902 DPRINTK("got rx crc error on vci %d\n", vci);
32903- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
32904+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
32905 lvcc->stats.x.aal5.service_rxcrc++;
32906 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
32907 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
32908diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
32909index ed1d2b7..8cffc1f 100644
32910--- a/drivers/atm/nicstar.c
32911+++ b/drivers/atm/nicstar.c
32912@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
32913 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
32914 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
32915 card->index);
32916- atomic_inc(&vcc->stats->tx_err);
32917+ atomic_inc_unchecked(&vcc->stats->tx_err);
32918 dev_kfree_skb_any(skb);
32919 return -EINVAL;
32920 }
32921@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
32922 if (!vc->tx) {
32923 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
32924 card->index);
32925- atomic_inc(&vcc->stats->tx_err);
32926+ atomic_inc_unchecked(&vcc->stats->tx_err);
32927 dev_kfree_skb_any(skb);
32928 return -EINVAL;
32929 }
32930@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
32931 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
32932 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
32933 card->index);
32934- atomic_inc(&vcc->stats->tx_err);
32935+ atomic_inc_unchecked(&vcc->stats->tx_err);
32936 dev_kfree_skb_any(skb);
32937 return -EINVAL;
32938 }
32939
32940 if (skb_shinfo(skb)->nr_frags != 0) {
32941 printk("nicstar%d: No scatter-gather yet.\n", card->index);
32942- atomic_inc(&vcc->stats->tx_err);
32943+ atomic_inc_unchecked(&vcc->stats->tx_err);
32944 dev_kfree_skb_any(skb);
32945 return -EINVAL;
32946 }
32947@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
32948 }
32949
32950 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
32951- atomic_inc(&vcc->stats->tx_err);
32952+ atomic_inc_unchecked(&vcc->stats->tx_err);
32953 dev_kfree_skb_any(skb);
32954 return -EIO;
32955 }
32956- atomic_inc(&vcc->stats->tx);
32957+ atomic_inc_unchecked(&vcc->stats->tx);
32958
32959 return 0;
32960 }
32961@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32962 printk
32963 ("nicstar%d: Can't allocate buffers for aal0.\n",
32964 card->index);
32965- atomic_add(i, &vcc->stats->rx_drop);
32966+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
32967 break;
32968 }
32969 if (!atm_charge(vcc, sb->truesize)) {
32970 RXPRINTK
32971 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
32972 card->index);
32973- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
32974+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
32975 dev_kfree_skb_any(sb);
32976 break;
32977 }
32978@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32979 ATM_SKB(sb)->vcc = vcc;
32980 __net_timestamp(sb);
32981 vcc->push(vcc, sb);
32982- atomic_inc(&vcc->stats->rx);
32983+ atomic_inc_unchecked(&vcc->stats->rx);
32984 cell += ATM_CELL_PAYLOAD;
32985 }
32986
32987@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32988 if (iovb == NULL) {
32989 printk("nicstar%d: Out of iovec buffers.\n",
32990 card->index);
32991- atomic_inc(&vcc->stats->rx_drop);
32992+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32993 recycle_rx_buf(card, skb);
32994 return;
32995 }
32996@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32997 small or large buffer itself. */
32998 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
32999 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
33000- atomic_inc(&vcc->stats->rx_err);
33001+ atomic_inc_unchecked(&vcc->stats->rx_err);
33002 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
33003 NS_MAX_IOVECS);
33004 NS_PRV_IOVCNT(iovb) = 0;
33005@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33006 ("nicstar%d: Expected a small buffer, and this is not one.\n",
33007 card->index);
33008 which_list(card, skb);
33009- atomic_inc(&vcc->stats->rx_err);
33010+ atomic_inc_unchecked(&vcc->stats->rx_err);
33011 recycle_rx_buf(card, skb);
33012 vc->rx_iov = NULL;
33013 recycle_iov_buf(card, iovb);
33014@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33015 ("nicstar%d: Expected a large buffer, and this is not one.\n",
33016 card->index);
33017 which_list(card, skb);
33018- atomic_inc(&vcc->stats->rx_err);
33019+ atomic_inc_unchecked(&vcc->stats->rx_err);
33020 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
33021 NS_PRV_IOVCNT(iovb));
33022 vc->rx_iov = NULL;
33023@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33024 printk(" - PDU size mismatch.\n");
33025 else
33026 printk(".\n");
33027- atomic_inc(&vcc->stats->rx_err);
33028+ atomic_inc_unchecked(&vcc->stats->rx_err);
33029 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
33030 NS_PRV_IOVCNT(iovb));
33031 vc->rx_iov = NULL;
33032@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33033 /* skb points to a small buffer */
33034 if (!atm_charge(vcc, skb->truesize)) {
33035 push_rxbufs(card, skb);
33036- atomic_inc(&vcc->stats->rx_drop);
33037+ atomic_inc_unchecked(&vcc->stats->rx_drop);
33038 } else {
33039 skb_put(skb, len);
33040 dequeue_sm_buf(card, skb);
33041@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33042 ATM_SKB(skb)->vcc = vcc;
33043 __net_timestamp(skb);
33044 vcc->push(vcc, skb);
33045- atomic_inc(&vcc->stats->rx);
33046+ atomic_inc_unchecked(&vcc->stats->rx);
33047 }
33048 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
33049 struct sk_buff *sb;
33050@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33051 if (len <= NS_SMBUFSIZE) {
33052 if (!atm_charge(vcc, sb->truesize)) {
33053 push_rxbufs(card, sb);
33054- atomic_inc(&vcc->stats->rx_drop);
33055+ atomic_inc_unchecked(&vcc->stats->rx_drop);
33056 } else {
33057 skb_put(sb, len);
33058 dequeue_sm_buf(card, sb);
33059@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33060 ATM_SKB(sb)->vcc = vcc;
33061 __net_timestamp(sb);
33062 vcc->push(vcc, sb);
33063- atomic_inc(&vcc->stats->rx);
33064+ atomic_inc_unchecked(&vcc->stats->rx);
33065 }
33066
33067 push_rxbufs(card, skb);
33068@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33069
33070 if (!atm_charge(vcc, skb->truesize)) {
33071 push_rxbufs(card, skb);
33072- atomic_inc(&vcc->stats->rx_drop);
33073+ atomic_inc_unchecked(&vcc->stats->rx_drop);
33074 } else {
33075 dequeue_lg_buf(card, skb);
33076 #ifdef NS_USE_DESTRUCTORS
33077@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33078 ATM_SKB(skb)->vcc = vcc;
33079 __net_timestamp(skb);
33080 vcc->push(vcc, skb);
33081- atomic_inc(&vcc->stats->rx);
33082+ atomic_inc_unchecked(&vcc->stats->rx);
33083 }
33084
33085 push_rxbufs(card, sb);
33086@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33087 printk
33088 ("nicstar%d: Out of huge buffers.\n",
33089 card->index);
33090- atomic_inc(&vcc->stats->rx_drop);
33091+ atomic_inc_unchecked(&vcc->stats->rx_drop);
33092 recycle_iovec_rx_bufs(card,
33093 (struct iovec *)
33094 iovb->data,
33095@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33096 card->hbpool.count++;
33097 } else
33098 dev_kfree_skb_any(hb);
33099- atomic_inc(&vcc->stats->rx_drop);
33100+ atomic_inc_unchecked(&vcc->stats->rx_drop);
33101 } else {
33102 /* Copy the small buffer to the huge buffer */
33103 sb = (struct sk_buff *)iov->iov_base;
33104@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
33105 #endif /* NS_USE_DESTRUCTORS */
33106 __net_timestamp(hb);
33107 vcc->push(vcc, hb);
33108- atomic_inc(&vcc->stats->rx);
33109+ atomic_inc_unchecked(&vcc->stats->rx);
33110 }
33111 }
33112
33113diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
33114index 0474a89..06ea4a1 100644
33115--- a/drivers/atm/solos-pci.c
33116+++ b/drivers/atm/solos-pci.c
33117@@ -838,7 +838,7 @@ void solos_bh(unsigned long card_arg)
33118 }
33119 atm_charge(vcc, skb->truesize);
33120 vcc->push(vcc, skb);
33121- atomic_inc(&vcc->stats->rx);
33122+ atomic_inc_unchecked(&vcc->stats->rx);
33123 break;
33124
33125 case PKT_STATUS:
33126@@ -1117,7 +1117,7 @@ static uint32_t fpga_tx(struct solos_card *card)
33127 vcc = SKB_CB(oldskb)->vcc;
33128
33129 if (vcc) {
33130- atomic_inc(&vcc->stats->tx);
33131+ atomic_inc_unchecked(&vcc->stats->tx);
33132 solos_pop(vcc, oldskb);
33133 } else {
33134 dev_kfree_skb_irq(oldskb);
33135diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
33136index 0215934..ce9f5b1 100644
33137--- a/drivers/atm/suni.c
33138+++ b/drivers/atm/suni.c
33139@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
33140
33141
33142 #define ADD_LIMITED(s,v) \
33143- atomic_add((v),&stats->s); \
33144- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
33145+ atomic_add_unchecked((v),&stats->s); \
33146+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
33147
33148
33149 static void suni_hz(unsigned long from_timer)
33150diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
33151index 5120a96..e2572bd 100644
33152--- a/drivers/atm/uPD98402.c
33153+++ b/drivers/atm/uPD98402.c
33154@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
33155 struct sonet_stats tmp;
33156 int error = 0;
33157
33158- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
33159+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
33160 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
33161 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
33162 if (zero && !error) {
33163@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
33164
33165
33166 #define ADD_LIMITED(s,v) \
33167- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
33168- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
33169- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
33170+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
33171+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
33172+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
33173
33174
33175 static void stat_event(struct atm_dev *dev)
33176@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
33177 if (reason & uPD98402_INT_PFM) stat_event(dev);
33178 if (reason & uPD98402_INT_PCO) {
33179 (void) GET(PCOCR); /* clear interrupt cause */
33180- atomic_add(GET(HECCT),
33181+ atomic_add_unchecked(GET(HECCT),
33182 &PRIV(dev)->sonet_stats.uncorr_hcs);
33183 }
33184 if ((reason & uPD98402_INT_RFO) &&
33185@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
33186 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
33187 uPD98402_INT_LOS),PIMR); /* enable them */
33188 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
33189- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
33190- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
33191- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
33192+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
33193+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
33194+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
33195 return 0;
33196 }
33197
33198diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
33199index 969c3c2..9b72956 100644
33200--- a/drivers/atm/zatm.c
33201+++ b/drivers/atm/zatm.c
33202@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
33203 }
33204 if (!size) {
33205 dev_kfree_skb_irq(skb);
33206- if (vcc) atomic_inc(&vcc->stats->rx_err);
33207+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
33208 continue;
33209 }
33210 if (!atm_charge(vcc,skb->truesize)) {
33211@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
33212 skb->len = size;
33213 ATM_SKB(skb)->vcc = vcc;
33214 vcc->push(vcc,skb);
33215- atomic_inc(&vcc->stats->rx);
33216+ atomic_inc_unchecked(&vcc->stats->rx);
33217 }
33218 zout(pos & 0xffff,MTA(mbx));
33219 #if 0 /* probably a stupid idea */
33220@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
33221 skb_queue_head(&zatm_vcc->backlog,skb);
33222 break;
33223 }
33224- atomic_inc(&vcc->stats->tx);
33225+ atomic_inc_unchecked(&vcc->stats->tx);
33226 wake_up(&zatm_vcc->tx_wait);
33227 }
33228
33229diff --git a/drivers/base/bus.c b/drivers/base/bus.c
33230index 6856303..0602d70 100644
33231--- a/drivers/base/bus.c
33232+++ b/drivers/base/bus.c
33233@@ -1163,7 +1163,7 @@ int subsys_interface_register(struct subsys_interface *sif)
33234 return -EINVAL;
33235
33236 mutex_lock(&subsys->p->mutex);
33237- list_add_tail(&sif->node, &subsys->p->interfaces);
33238+ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
33239 if (sif->add_dev) {
33240 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
33241 while ((dev = subsys_dev_iter_next(&iter)))
33242@@ -1188,7 +1188,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
33243 subsys = sif->subsys;
33244
33245 mutex_lock(&subsys->p->mutex);
33246- list_del_init(&sif->node);
33247+ pax_list_del_init((struct list_head *)&sif->node);
33248 if (sif->remove_dev) {
33249 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
33250 while ((dev = subsys_dev_iter_next(&iter)))
33251diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
33252index 17cf7ca..7e553e1 100644
33253--- a/drivers/base/devtmpfs.c
33254+++ b/drivers/base/devtmpfs.c
33255@@ -347,7 +347,7 @@ int devtmpfs_mount(const char *mntdir)
33256 if (!thread)
33257 return 0;
33258
33259- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
33260+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
33261 if (err)
33262 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
33263 else
33264diff --git a/drivers/base/node.c b/drivers/base/node.c
33265index fac124a..66bd4ab 100644
33266--- a/drivers/base/node.c
33267+++ b/drivers/base/node.c
33268@@ -625,7 +625,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
33269 struct node_attr {
33270 struct device_attribute attr;
33271 enum node_states state;
33272-};
33273+} __do_const;
33274
33275 static ssize_t show_node_state(struct device *dev,
33276 struct device_attribute *attr, char *buf)
33277diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
33278index acc3a8d..981c236 100644
33279--- a/drivers/base/power/domain.c
33280+++ b/drivers/base/power/domain.c
33281@@ -1851,7 +1851,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
33282 {
33283 struct cpuidle_driver *cpuidle_drv;
33284 struct gpd_cpu_data *cpu_data;
33285- struct cpuidle_state *idle_state;
33286+ cpuidle_state_no_const *idle_state;
33287 int ret = 0;
33288
33289 if (IS_ERR_OR_NULL(genpd) || state < 0)
33290@@ -1919,7 +1919,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
33291 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
33292 {
33293 struct gpd_cpu_data *cpu_data;
33294- struct cpuidle_state *idle_state;
33295+ cpuidle_state_no_const *idle_state;
33296 int ret = 0;
33297
33298 if (IS_ERR_OR_NULL(genpd))
33299diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
33300index e6ee5e8..98ad7fc 100644
33301--- a/drivers/base/power/wakeup.c
33302+++ b/drivers/base/power/wakeup.c
33303@@ -29,14 +29,14 @@ bool events_check_enabled __read_mostly;
33304 * They need to be modified together atomically, so it's better to use one
33305 * atomic variable to hold them both.
33306 */
33307-static atomic_t combined_event_count = ATOMIC_INIT(0);
33308+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
33309
33310 #define IN_PROGRESS_BITS (sizeof(int) * 4)
33311 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
33312
33313 static void split_counters(unsigned int *cnt, unsigned int *inpr)
33314 {
33315- unsigned int comb = atomic_read(&combined_event_count);
33316+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
33317
33318 *cnt = (comb >> IN_PROGRESS_BITS);
33319 *inpr = comb & MAX_IN_PROGRESS;
33320@@ -389,7 +389,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
33321 ws->start_prevent_time = ws->last_time;
33322
33323 /* Increment the counter of events in progress. */
33324- cec = atomic_inc_return(&combined_event_count);
33325+ cec = atomic_inc_return_unchecked(&combined_event_count);
33326
33327 trace_wakeup_source_activate(ws->name, cec);
33328 }
33329@@ -515,7 +515,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
33330 * Increment the counter of registered wakeup events and decrement the
33331 * couter of wakeup events in progress simultaneously.
33332 */
33333- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
33334+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
33335 trace_wakeup_source_deactivate(ws->name, cec);
33336
33337 split_counters(&cnt, &inpr);
33338diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
33339index e8d11b6..7b1b36f 100644
33340--- a/drivers/base/syscore.c
33341+++ b/drivers/base/syscore.c
33342@@ -21,7 +21,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
33343 void register_syscore_ops(struct syscore_ops *ops)
33344 {
33345 mutex_lock(&syscore_ops_lock);
33346- list_add_tail(&ops->node, &syscore_ops_list);
33347+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
33348 mutex_unlock(&syscore_ops_lock);
33349 }
33350 EXPORT_SYMBOL_GPL(register_syscore_ops);
33351@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
33352 void unregister_syscore_ops(struct syscore_ops *ops)
33353 {
33354 mutex_lock(&syscore_ops_lock);
33355- list_del(&ops->node);
33356+ pax_list_del((struct list_head *)&ops->node);
33357 mutex_unlock(&syscore_ops_lock);
33358 }
33359 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
33360diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
33361index ade58bc..867143d 100644
33362--- a/drivers/block/cciss.c
33363+++ b/drivers/block/cciss.c
33364@@ -1196,6 +1196,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
33365 int err;
33366 u32 cp;
33367
33368+ memset(&arg64, 0, sizeof(arg64));
33369+
33370 err = 0;
33371 err |=
33372 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
33373@@ -3005,7 +3007,7 @@ static void start_io(ctlr_info_t *h)
33374 while (!list_empty(&h->reqQ)) {
33375 c = list_entry(h->reqQ.next, CommandList_struct, list);
33376 /* can't do anything if fifo is full */
33377- if ((h->access.fifo_full(h))) {
33378+ if ((h->access->fifo_full(h))) {
33379 dev_warn(&h->pdev->dev, "fifo full\n");
33380 break;
33381 }
33382@@ -3015,7 +3017,7 @@ static void start_io(ctlr_info_t *h)
33383 h->Qdepth--;
33384
33385 /* Tell the controller execute command */
33386- h->access.submit_command(h, c);
33387+ h->access->submit_command(h, c);
33388
33389 /* Put job onto the completed Q */
33390 addQ(&h->cmpQ, c);
33391@@ -3441,17 +3443,17 @@ startio:
33392
33393 static inline unsigned long get_next_completion(ctlr_info_t *h)
33394 {
33395- return h->access.command_completed(h);
33396+ return h->access->command_completed(h);
33397 }
33398
33399 static inline int interrupt_pending(ctlr_info_t *h)
33400 {
33401- return h->access.intr_pending(h);
33402+ return h->access->intr_pending(h);
33403 }
33404
33405 static inline long interrupt_not_for_us(ctlr_info_t *h)
33406 {
33407- return ((h->access.intr_pending(h) == 0) ||
33408+ return ((h->access->intr_pending(h) == 0) ||
33409 (h->interrupts_enabled == 0));
33410 }
33411
33412@@ -3484,7 +3486,7 @@ static inline u32 next_command(ctlr_info_t *h)
33413 u32 a;
33414
33415 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
33416- return h->access.command_completed(h);
33417+ return h->access->command_completed(h);
33418
33419 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
33420 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
33421@@ -4041,7 +4043,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
33422 trans_support & CFGTBL_Trans_use_short_tags);
33423
33424 /* Change the access methods to the performant access methods */
33425- h->access = SA5_performant_access;
33426+ h->access = &SA5_performant_access;
33427 h->transMethod = CFGTBL_Trans_Performant;
33428
33429 return;
33430@@ -4310,7 +4312,7 @@ static int cciss_pci_init(ctlr_info_t *h)
33431 if (prod_index < 0)
33432 return -ENODEV;
33433 h->product_name = products[prod_index].product_name;
33434- h->access = *(products[prod_index].access);
33435+ h->access = products[prod_index].access;
33436
33437 if (cciss_board_disabled(h)) {
33438 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
33439@@ -5032,7 +5034,7 @@ reinit_after_soft_reset:
33440 }
33441
33442 /* make sure the board interrupts are off */
33443- h->access.set_intr_mask(h, CCISS_INTR_OFF);
33444+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
33445 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
33446 if (rc)
33447 goto clean2;
33448@@ -5082,7 +5084,7 @@ reinit_after_soft_reset:
33449 * fake ones to scoop up any residual completions.
33450 */
33451 spin_lock_irqsave(&h->lock, flags);
33452- h->access.set_intr_mask(h, CCISS_INTR_OFF);
33453+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
33454 spin_unlock_irqrestore(&h->lock, flags);
33455 free_irq(h->intr[h->intr_mode], h);
33456 rc = cciss_request_irq(h, cciss_msix_discard_completions,
33457@@ -5102,9 +5104,9 @@ reinit_after_soft_reset:
33458 dev_info(&h->pdev->dev, "Board READY.\n");
33459 dev_info(&h->pdev->dev,
33460 "Waiting for stale completions to drain.\n");
33461- h->access.set_intr_mask(h, CCISS_INTR_ON);
33462+ h->access->set_intr_mask(h, CCISS_INTR_ON);
33463 msleep(10000);
33464- h->access.set_intr_mask(h, CCISS_INTR_OFF);
33465+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
33466
33467 rc = controller_reset_failed(h->cfgtable);
33468 if (rc)
33469@@ -5127,7 +5129,7 @@ reinit_after_soft_reset:
33470 cciss_scsi_setup(h);
33471
33472 /* Turn the interrupts on so we can service requests */
33473- h->access.set_intr_mask(h, CCISS_INTR_ON);
33474+ h->access->set_intr_mask(h, CCISS_INTR_ON);
33475
33476 /* Get the firmware version */
33477 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
33478@@ -5199,7 +5201,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
33479 kfree(flush_buf);
33480 if (return_code != IO_OK)
33481 dev_warn(&h->pdev->dev, "Error flushing cache\n");
33482- h->access.set_intr_mask(h, CCISS_INTR_OFF);
33483+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
33484 free_irq(h->intr[h->intr_mode], h);
33485 }
33486
33487diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
33488index 7fda30e..eb5dfe0 100644
33489--- a/drivers/block/cciss.h
33490+++ b/drivers/block/cciss.h
33491@@ -101,7 +101,7 @@ struct ctlr_info
33492 /* information about each logical volume */
33493 drive_info_struct *drv[CISS_MAX_LUN];
33494
33495- struct access_method access;
33496+ struct access_method *access;
33497
33498 /* queue and queue Info */
33499 struct list_head reqQ;
33500diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
33501index 3f08713..56a586a 100644
33502--- a/drivers/block/cpqarray.c
33503+++ b/drivers/block/cpqarray.c
33504@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
33505 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
33506 goto Enomem4;
33507 }
33508- hba[i]->access.set_intr_mask(hba[i], 0);
33509+ hba[i]->access->set_intr_mask(hba[i], 0);
33510 if (request_irq(hba[i]->intr, do_ida_intr,
33511 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
33512 {
33513@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
33514 add_timer(&hba[i]->timer);
33515
33516 /* Enable IRQ now that spinlock and rate limit timer are set up */
33517- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
33518+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
33519
33520 for(j=0; j<NWD; j++) {
33521 struct gendisk *disk = ida_gendisk[i][j];
33522@@ -694,7 +694,7 @@ DBGINFO(
33523 for(i=0; i<NR_PRODUCTS; i++) {
33524 if (board_id == products[i].board_id) {
33525 c->product_name = products[i].product_name;
33526- c->access = *(products[i].access);
33527+ c->access = products[i].access;
33528 break;
33529 }
33530 }
33531@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
33532 hba[ctlr]->intr = intr;
33533 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
33534 hba[ctlr]->product_name = products[j].product_name;
33535- hba[ctlr]->access = *(products[j].access);
33536+ hba[ctlr]->access = products[j].access;
33537 hba[ctlr]->ctlr = ctlr;
33538 hba[ctlr]->board_id = board_id;
33539 hba[ctlr]->pci_dev = NULL; /* not PCI */
33540@@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
33541
33542 while((c = h->reqQ) != NULL) {
33543 /* Can't do anything if we're busy */
33544- if (h->access.fifo_full(h) == 0)
33545+ if (h->access->fifo_full(h) == 0)
33546 return;
33547
33548 /* Get the first entry from the request Q */
33549@@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
33550 h->Qdepth--;
33551
33552 /* Tell the controller to do our bidding */
33553- h->access.submit_command(h, c);
33554+ h->access->submit_command(h, c);
33555
33556 /* Get onto the completion Q */
33557 addQ(&h->cmpQ, c);
33558@@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
33559 unsigned long flags;
33560 __u32 a,a1;
33561
33562- istat = h->access.intr_pending(h);
33563+ istat = h->access->intr_pending(h);
33564 /* Is this interrupt for us? */
33565 if (istat == 0)
33566 return IRQ_NONE;
33567@@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
33568 */
33569 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
33570 if (istat & FIFO_NOT_EMPTY) {
33571- while((a = h->access.command_completed(h))) {
33572+ while((a = h->access->command_completed(h))) {
33573 a1 = a; a &= ~3;
33574 if ((c = h->cmpQ) == NULL)
33575 {
33576@@ -1449,11 +1449,11 @@ static int sendcmd(
33577 /*
33578 * Disable interrupt
33579 */
33580- info_p->access.set_intr_mask(info_p, 0);
33581+ info_p->access->set_intr_mask(info_p, 0);
33582 /* Make sure there is room in the command FIFO */
33583 /* Actually it should be completely empty at this time. */
33584 for (i = 200000; i > 0; i--) {
33585- temp = info_p->access.fifo_full(info_p);
33586+ temp = info_p->access->fifo_full(info_p);
33587 if (temp != 0) {
33588 break;
33589 }
33590@@ -1466,7 +1466,7 @@ DBG(
33591 /*
33592 * Send the cmd
33593 */
33594- info_p->access.submit_command(info_p, c);
33595+ info_p->access->submit_command(info_p, c);
33596 complete = pollcomplete(ctlr);
33597
33598 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
33599@@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
33600 * we check the new geometry. Then turn interrupts back on when
33601 * we're done.
33602 */
33603- host->access.set_intr_mask(host, 0);
33604+ host->access->set_intr_mask(host, 0);
33605 getgeometry(ctlr);
33606- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
33607+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
33608
33609 for(i=0; i<NWD; i++) {
33610 struct gendisk *disk = ida_gendisk[ctlr][i];
33611@@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
33612 /* Wait (up to 2 seconds) for a command to complete */
33613
33614 for (i = 200000; i > 0; i--) {
33615- done = hba[ctlr]->access.command_completed(hba[ctlr]);
33616+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
33617 if (done == 0) {
33618 udelay(10); /* a short fixed delay */
33619 } else
33620diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
33621index be73e9d..7fbf140 100644
33622--- a/drivers/block/cpqarray.h
33623+++ b/drivers/block/cpqarray.h
33624@@ -99,7 +99,7 @@ struct ctlr_info {
33625 drv_info_t drv[NWD];
33626 struct proc_dir_entry *proc;
33627
33628- struct access_method access;
33629+ struct access_method *access;
33630
33631 cmdlist_t *reqQ;
33632 cmdlist_t *cmpQ;
33633diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
33634index 6b51afa..17e1191 100644
33635--- a/drivers/block/drbd/drbd_int.h
33636+++ b/drivers/block/drbd/drbd_int.h
33637@@ -582,7 +582,7 @@ struct drbd_epoch {
33638 struct drbd_tconn *tconn;
33639 struct list_head list;
33640 unsigned int barrier_nr;
33641- atomic_t epoch_size; /* increased on every request added. */
33642+ atomic_unchecked_t epoch_size; /* increased on every request added. */
33643 atomic_t active; /* increased on every req. added, and dec on every finished. */
33644 unsigned long flags;
33645 };
33646@@ -1011,7 +1011,7 @@ struct drbd_conf {
33647 int al_tr_cycle;
33648 int al_tr_pos; /* position of the next transaction in the journal */
33649 wait_queue_head_t seq_wait;
33650- atomic_t packet_seq;
33651+ atomic_unchecked_t packet_seq;
33652 unsigned int peer_seq;
33653 spinlock_t peer_seq_lock;
33654 unsigned int minor;
33655@@ -1527,7 +1527,7 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
33656 char __user *uoptval;
33657 int err;
33658
33659- uoptval = (char __user __force *)optval;
33660+ uoptval = (char __force_user *)optval;
33661
33662 set_fs(KERNEL_DS);
33663 if (level == SOL_SOCKET)
33664diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
33665index 8c13eeb..217adee 100644
33666--- a/drivers/block/drbd/drbd_main.c
33667+++ b/drivers/block/drbd/drbd_main.c
33668@@ -1317,7 +1317,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
33669 p->sector = sector;
33670 p->block_id = block_id;
33671 p->blksize = blksize;
33672- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
33673+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
33674 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
33675 }
33676
33677@@ -1619,7 +1619,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
33678 return -EIO;
33679 p->sector = cpu_to_be64(req->i.sector);
33680 p->block_id = (unsigned long)req;
33681- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
33682+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
33683 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
33684 if (mdev->state.conn >= C_SYNC_SOURCE &&
33685 mdev->state.conn <= C_PAUSED_SYNC_T)
33686@@ -2574,8 +2574,8 @@ void conn_destroy(struct kref *kref)
33687 {
33688 struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
33689
33690- if (atomic_read(&tconn->current_epoch->epoch_size) != 0)
33691- conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size));
33692+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size) != 0)
33693+ conn_err(tconn, "epoch_size:%d\n", atomic_read_unchecked(&tconn->current_epoch->epoch_size));
33694 kfree(tconn->current_epoch);
33695
33696 idr_destroy(&tconn->volumes);
33697diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
33698index a9eccfc..f5efe87 100644
33699--- a/drivers/block/drbd/drbd_receiver.c
33700+++ b/drivers/block/drbd/drbd_receiver.c
33701@@ -833,7 +833,7 @@ int drbd_connected(struct drbd_conf *mdev)
33702 {
33703 int err;
33704
33705- atomic_set(&mdev->packet_seq, 0);
33706+ atomic_set_unchecked(&mdev->packet_seq, 0);
33707 mdev->peer_seq = 0;
33708
33709 mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
33710@@ -1191,7 +1191,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
33711 do {
33712 next_epoch = NULL;
33713
33714- epoch_size = atomic_read(&epoch->epoch_size);
33715+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
33716
33717 switch (ev & ~EV_CLEANUP) {
33718 case EV_PUT:
33719@@ -1231,7 +1231,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
33720 rv = FE_DESTROYED;
33721 } else {
33722 epoch->flags = 0;
33723- atomic_set(&epoch->epoch_size, 0);
33724+ atomic_set_unchecked(&epoch->epoch_size, 0);
33725 /* atomic_set(&epoch->active, 0); is already zero */
33726 if (rv == FE_STILL_LIVE)
33727 rv = FE_RECYCLED;
33728@@ -1449,7 +1449,7 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
33729 conn_wait_active_ee_empty(tconn);
33730 drbd_flush(tconn);
33731
33732- if (atomic_read(&tconn->current_epoch->epoch_size)) {
33733+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
33734 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
33735 if (epoch)
33736 break;
33737@@ -1462,11 +1462,11 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
33738 }
33739
33740 epoch->flags = 0;
33741- atomic_set(&epoch->epoch_size, 0);
33742+ atomic_set_unchecked(&epoch->epoch_size, 0);
33743 atomic_set(&epoch->active, 0);
33744
33745 spin_lock(&tconn->epoch_lock);
33746- if (atomic_read(&tconn->current_epoch->epoch_size)) {
33747+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
33748 list_add(&epoch->list, &tconn->current_epoch->list);
33749 tconn->current_epoch = epoch;
33750 tconn->epochs++;
33751@@ -2170,7 +2170,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
33752
33753 err = wait_for_and_update_peer_seq(mdev, peer_seq);
33754 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
33755- atomic_inc(&tconn->current_epoch->epoch_size);
33756+ atomic_inc_unchecked(&tconn->current_epoch->epoch_size);
33757 err2 = drbd_drain_block(mdev, pi->size);
33758 if (!err)
33759 err = err2;
33760@@ -2204,7 +2204,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
33761
33762 spin_lock(&tconn->epoch_lock);
33763 peer_req->epoch = tconn->current_epoch;
33764- atomic_inc(&peer_req->epoch->epoch_size);
33765+ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
33766 atomic_inc(&peer_req->epoch->active);
33767 spin_unlock(&tconn->epoch_lock);
33768
33769@@ -4346,7 +4346,7 @@ struct data_cmd {
33770 int expect_payload;
33771 size_t pkt_size;
33772 int (*fn)(struct drbd_tconn *, struct packet_info *);
33773-};
33774+} __do_const;
33775
33776 static struct data_cmd drbd_cmd_handler[] = {
33777 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
33778@@ -4466,7 +4466,7 @@ static void conn_disconnect(struct drbd_tconn *tconn)
33779 if (!list_empty(&tconn->current_epoch->list))
33780 conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
33781 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
33782- atomic_set(&tconn->current_epoch->epoch_size, 0);
33783+ atomic_set_unchecked(&tconn->current_epoch->epoch_size, 0);
33784 tconn->send.seen_any_write_yet = false;
33785
33786 conn_info(tconn, "Connection closed\n");
33787@@ -5222,7 +5222,7 @@ static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
33788 struct asender_cmd {
33789 size_t pkt_size;
33790 int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
33791-};
33792+} __do_const;
33793
33794 static struct asender_cmd asender_tbl[] = {
33795 [P_PING] = { 0, got_Ping },
33796diff --git a/drivers/block/loop.c b/drivers/block/loop.c
33797index f74f2c0..bb668af 100644
33798--- a/drivers/block/loop.c
33799+++ b/drivers/block/loop.c
33800@@ -226,7 +226,7 @@ static int __do_lo_send_write(struct file *file,
33801 mm_segment_t old_fs = get_fs();
33802
33803 set_fs(get_ds());
33804- bw = file->f_op->write(file, buf, len, &pos);
33805+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
33806 set_fs(old_fs);
33807 if (likely(bw == len))
33808 return 0;
33809diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
33810index d620b44..587561e 100644
33811--- a/drivers/cdrom/cdrom.c
33812+++ b/drivers/cdrom/cdrom.c
33813@@ -416,7 +416,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
33814 ENSURE(reset, CDC_RESET);
33815 ENSURE(generic_packet, CDC_GENERIC_PACKET);
33816 cdi->mc_flags = 0;
33817- cdo->n_minors = 0;
33818 cdi->options = CDO_USE_FFLAGS;
33819
33820 if (autoclose==1 && CDROM_CAN(CDC_CLOSE_TRAY))
33821@@ -436,8 +435,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
33822 else
33823 cdi->cdda_method = CDDA_OLD;
33824
33825- if (!cdo->generic_packet)
33826- cdo->generic_packet = cdrom_dummy_generic_packet;
33827+ if (!cdo->generic_packet) {
33828+ pax_open_kernel();
33829+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
33830+ pax_close_kernel();
33831+ }
33832
33833 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
33834 mutex_lock(&cdrom_mutex);
33835@@ -458,7 +460,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
33836 if (cdi->exit)
33837 cdi->exit(cdi);
33838
33839- cdi->ops->n_minors--;
33840 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
33841 }
33842
33843diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
33844index d59cdcb..11afddf 100644
33845--- a/drivers/cdrom/gdrom.c
33846+++ b/drivers/cdrom/gdrom.c
33847@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
33848 .audio_ioctl = gdrom_audio_ioctl,
33849 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
33850 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
33851- .n_minors = 1,
33852 };
33853
33854 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
33855diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
33856index 72bedad..8181ce1 100644
33857--- a/drivers/char/Kconfig
33858+++ b/drivers/char/Kconfig
33859@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
33860
33861 config DEVKMEM
33862 bool "/dev/kmem virtual device support"
33863- default y
33864+ default n
33865+ depends on !GRKERNSEC_KMEM
33866 help
33867 Say Y here if you want to support the /dev/kmem device. The
33868 /dev/kmem device is rarely used, but can be used for certain
33869@@ -581,6 +582,7 @@ config DEVPORT
33870 bool
33871 depends on !M68K
33872 depends on ISA || PCI
33873+ depends on !GRKERNSEC_KMEM
33874 default y
33875
33876 source "drivers/s390/char/Kconfig"
33877diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
33878index 2e04433..22afc64 100644
33879--- a/drivers/char/agp/frontend.c
33880+++ b/drivers/char/agp/frontend.c
33881@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
33882 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
33883 return -EFAULT;
33884
33885- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
33886+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
33887 return -EFAULT;
33888
33889 client = agp_find_client_by_pid(reserve.pid);
33890diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
33891index 21cb980..f15107c 100644
33892--- a/drivers/char/genrtc.c
33893+++ b/drivers/char/genrtc.c
33894@@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct file *file,
33895 switch (cmd) {
33896
33897 case RTC_PLL_GET:
33898+ memset(&pll, 0, sizeof(pll));
33899 if (get_rtc_pll(&pll))
33900 return -EINVAL;
33901 else
33902diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
33903index fe6d4be..89f32100 100644
33904--- a/drivers/char/hpet.c
33905+++ b/drivers/char/hpet.c
33906@@ -571,7 +571,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
33907 }
33908
33909 static int
33910-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
33911+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
33912 struct hpet_info *info)
33913 {
33914 struct hpet_timer __iomem *timer;
33915diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
33916index 053201b0..8335cce 100644
33917--- a/drivers/char/ipmi/ipmi_msghandler.c
33918+++ b/drivers/char/ipmi/ipmi_msghandler.c
33919@@ -420,7 +420,7 @@ struct ipmi_smi {
33920 struct proc_dir_entry *proc_dir;
33921 char proc_dir_name[10];
33922
33923- atomic_t stats[IPMI_NUM_STATS];
33924+ atomic_unchecked_t stats[IPMI_NUM_STATS];
33925
33926 /*
33927 * run_to_completion duplicate of smb_info, smi_info
33928@@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
33929
33930
33931 #define ipmi_inc_stat(intf, stat) \
33932- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
33933+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
33934 #define ipmi_get_stat(intf, stat) \
33935- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
33936+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
33937
33938 static int is_lan_addr(struct ipmi_addr *addr)
33939 {
33940@@ -2884,7 +2884,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
33941 INIT_LIST_HEAD(&intf->cmd_rcvrs);
33942 init_waitqueue_head(&intf->waitq);
33943 for (i = 0; i < IPMI_NUM_STATS; i++)
33944- atomic_set(&intf->stats[i], 0);
33945+ atomic_set_unchecked(&intf->stats[i], 0);
33946
33947 intf->proc_dir = NULL;
33948
33949diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
33950index 1c7fdcd..4899100 100644
33951--- a/drivers/char/ipmi/ipmi_si_intf.c
33952+++ b/drivers/char/ipmi/ipmi_si_intf.c
33953@@ -275,7 +275,7 @@ struct smi_info {
33954 unsigned char slave_addr;
33955
33956 /* Counters and things for the proc filesystem. */
33957- atomic_t stats[SI_NUM_STATS];
33958+ atomic_unchecked_t stats[SI_NUM_STATS];
33959
33960 struct task_struct *thread;
33961
33962@@ -284,9 +284,9 @@ struct smi_info {
33963 };
33964
33965 #define smi_inc_stat(smi, stat) \
33966- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
33967+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
33968 #define smi_get_stat(smi, stat) \
33969- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
33970+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
33971
33972 #define SI_MAX_PARMS 4
33973
33974@@ -3225,7 +3225,7 @@ static int try_smi_init(struct smi_info *new_smi)
33975 atomic_set(&new_smi->req_events, 0);
33976 new_smi->run_to_completion = 0;
33977 for (i = 0; i < SI_NUM_STATS; i++)
33978- atomic_set(&new_smi->stats[i], 0);
33979+ atomic_set_unchecked(&new_smi->stats[i], 0);
33980
33981 new_smi->interrupt_disabled = 1;
33982 atomic_set(&new_smi->stop_operation, 0);
33983diff --git a/drivers/char/mem.c b/drivers/char/mem.c
33984index c6fa3bc..4ca3e42 100644
33985--- a/drivers/char/mem.c
33986+++ b/drivers/char/mem.c
33987@@ -18,6 +18,7 @@
33988 #include <linux/raw.h>
33989 #include <linux/tty.h>
33990 #include <linux/capability.h>
33991+#include <linux/security.h>
33992 #include <linux/ptrace.h>
33993 #include <linux/device.h>
33994 #include <linux/highmem.h>
33995@@ -37,6 +38,10 @@
33996
33997 #define DEVPORT_MINOR 4
33998
33999+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
34000+extern const struct file_operations grsec_fops;
34001+#endif
34002+
34003 static inline unsigned long size_inside_page(unsigned long start,
34004 unsigned long size)
34005 {
34006@@ -68,9 +73,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
34007
34008 while (cursor < to) {
34009 if (!devmem_is_allowed(pfn)) {
34010+#ifdef CONFIG_GRKERNSEC_KMEM
34011+ gr_handle_mem_readwrite(from, to);
34012+#else
34013 printk(KERN_INFO
34014 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
34015 current->comm, from, to);
34016+#endif
34017 return 0;
34018 }
34019 cursor += PAGE_SIZE;
34020@@ -78,6 +87,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
34021 }
34022 return 1;
34023 }
34024+#elif defined(CONFIG_GRKERNSEC_KMEM)
34025+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
34026+{
34027+ return 0;
34028+}
34029 #else
34030 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
34031 {
34032@@ -120,6 +134,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
34033
34034 while (count > 0) {
34035 unsigned long remaining;
34036+ char *temp;
34037
34038 sz = size_inside_page(p, count);
34039
34040@@ -135,7 +150,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
34041 if (!ptr)
34042 return -EFAULT;
34043
34044- remaining = copy_to_user(buf, ptr, sz);
34045+#ifdef CONFIG_PAX_USERCOPY
34046+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
34047+ if (!temp) {
34048+ unxlate_dev_mem_ptr(p, ptr);
34049+ return -ENOMEM;
34050+ }
34051+ memcpy(temp, ptr, sz);
34052+#else
34053+ temp = ptr;
34054+#endif
34055+
34056+ remaining = copy_to_user(buf, temp, sz);
34057+
34058+#ifdef CONFIG_PAX_USERCOPY
34059+ kfree(temp);
34060+#endif
34061+
34062 unxlate_dev_mem_ptr(p, ptr);
34063 if (remaining)
34064 return -EFAULT;
34065@@ -398,9 +429,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
34066 size_t count, loff_t *ppos)
34067 {
34068 unsigned long p = *ppos;
34069- ssize_t low_count, read, sz;
34070+ ssize_t low_count, read, sz, err = 0;
34071 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
34072- int err = 0;
34073
34074 read = 0;
34075 if (p < (unsigned long) high_memory) {
34076@@ -422,6 +452,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
34077 }
34078 #endif
34079 while (low_count > 0) {
34080+ char *temp;
34081+
34082 sz = size_inside_page(p, low_count);
34083
34084 /*
34085@@ -431,7 +463,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
34086 */
34087 kbuf = xlate_dev_kmem_ptr((char *)p);
34088
34089- if (copy_to_user(buf, kbuf, sz))
34090+#ifdef CONFIG_PAX_USERCOPY
34091+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
34092+ if (!temp)
34093+ return -ENOMEM;
34094+ memcpy(temp, kbuf, sz);
34095+#else
34096+ temp = kbuf;
34097+#endif
34098+
34099+ err = copy_to_user(buf, temp, sz);
34100+
34101+#ifdef CONFIG_PAX_USERCOPY
34102+ kfree(temp);
34103+#endif
34104+
34105+ if (err)
34106 return -EFAULT;
34107 buf += sz;
34108 p += sz;
34109@@ -833,6 +880,9 @@ static const struct memdev {
34110 #ifdef CONFIG_CRASH_DUMP
34111 [12] = { "oldmem", 0, &oldmem_fops, NULL },
34112 #endif
34113+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
34114+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
34115+#endif
34116 };
34117
34118 static int memory_open(struct inode *inode, struct file *filp)
34119diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
34120index 9df78e2..01ba9ae 100644
34121--- a/drivers/char/nvram.c
34122+++ b/drivers/char/nvram.c
34123@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
34124
34125 spin_unlock_irq(&rtc_lock);
34126
34127- if (copy_to_user(buf, contents, tmp - contents))
34128+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
34129 return -EFAULT;
34130
34131 *ppos = i;
34132diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
34133index b66eaa0..2619d1b 100644
34134--- a/drivers/char/pcmcia/synclink_cs.c
34135+++ b/drivers/char/pcmcia/synclink_cs.c
34136@@ -2348,9 +2348,9 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
34137
34138 if (debug_level >= DEBUG_LEVEL_INFO)
34139 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
34140- __FILE__,__LINE__, info->device_name, port->count);
34141+ __FILE__,__LINE__, info->device_name, atomic_read(&port->count));
34142
34143- WARN_ON(!port->count);
34144+ WARN_ON(!atomic_read(&port->count));
34145
34146 if (tty_port_close_start(port, tty, filp) == 0)
34147 goto cleanup;
34148@@ -2368,7 +2368,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
34149 cleanup:
34150 if (debug_level >= DEBUG_LEVEL_INFO)
34151 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__,__LINE__,
34152- tty->driver->name, port->count);
34153+ tty->driver->name, atomic_read(&port->count));
34154 }
34155
34156 /* Wait until the transmitter is empty.
34157@@ -2510,7 +2510,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
34158
34159 if (debug_level >= DEBUG_LEVEL_INFO)
34160 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
34161- __FILE__,__LINE__,tty->driver->name, port->count);
34162+ __FILE__,__LINE__,tty->driver->name, atomic_read(&port->count));
34163
34164 /* If port is closing, signal caller to try again */
34165 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
34166@@ -2530,11 +2530,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
34167 goto cleanup;
34168 }
34169 spin_lock(&port->lock);
34170- port->count++;
34171+ atomic_inc(&port->count);
34172 spin_unlock(&port->lock);
34173 spin_unlock_irqrestore(&info->netlock, flags);
34174
34175- if (port->count == 1) {
34176+ if (atomic_read(&port->count) == 1) {
34177 /* 1st open on this device, init hardware */
34178 retval = startup(info, tty);
34179 if (retval < 0)
34180@@ -3889,7 +3889,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
34181 unsigned short new_crctype;
34182
34183 /* return error if TTY interface open */
34184- if (info->port.count)
34185+ if (atomic_read(&info->port.count))
34186 return -EBUSY;
34187
34188 switch (encoding)
34189@@ -3992,7 +3992,7 @@ static int hdlcdev_open(struct net_device *dev)
34190
34191 /* arbitrate between network and tty opens */
34192 spin_lock_irqsave(&info->netlock, flags);
34193- if (info->port.count != 0 || info->netcount != 0) {
34194+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
34195 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
34196 spin_unlock_irqrestore(&info->netlock, flags);
34197 return -EBUSY;
34198@@ -4081,7 +4081,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
34199 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
34200
34201 /* return error if TTY interface open */
34202- if (info->port.count)
34203+ if (atomic_read(&info->port.count))
34204 return -EBUSY;
34205
34206 if (cmd != SIOCWANDEV)
34207diff --git a/drivers/char/random.c b/drivers/char/random.c
34208index 57d4b15..253207b 100644
34209--- a/drivers/char/random.c
34210+++ b/drivers/char/random.c
34211@@ -272,8 +272,13 @@
34212 /*
34213 * Configuration information
34214 */
34215+#ifdef CONFIG_GRKERNSEC_RANDNET
34216+#define INPUT_POOL_WORDS 512
34217+#define OUTPUT_POOL_WORDS 128
34218+#else
34219 #define INPUT_POOL_WORDS 128
34220 #define OUTPUT_POOL_WORDS 32
34221+#endif
34222 #define SEC_XFER_SIZE 512
34223 #define EXTRACT_SIZE 10
34224
34225@@ -313,10 +318,17 @@ static struct poolinfo {
34226 int poolwords;
34227 int tap1, tap2, tap3, tap4, tap5;
34228 } poolinfo_table[] = {
34229+#ifdef CONFIG_GRKERNSEC_RANDNET
34230+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
34231+ { 512, 411, 308, 208, 104, 1 },
34232+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
34233+ { 128, 103, 76, 51, 25, 1 },
34234+#else
34235 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
34236 { 128, 103, 76, 51, 25, 1 },
34237 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
34238 { 32, 26, 20, 14, 7, 1 },
34239+#endif
34240 #if 0
34241 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
34242 { 2048, 1638, 1231, 819, 411, 1 },
34243@@ -524,8 +536,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
34244 input_rotate += i ? 7 : 14;
34245 }
34246
34247- ACCESS_ONCE(r->input_rotate) = input_rotate;
34248- ACCESS_ONCE(r->add_ptr) = i;
34249+ ACCESS_ONCE_RW(r->input_rotate) = input_rotate;
34250+ ACCESS_ONCE_RW(r->add_ptr) = i;
34251 smp_wmb();
34252
34253 if (out)
34254@@ -1024,7 +1036,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
34255
34256 extract_buf(r, tmp);
34257 i = min_t(int, nbytes, EXTRACT_SIZE);
34258- if (copy_to_user(buf, tmp, i)) {
34259+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
34260 ret = -EFAULT;
34261 break;
34262 }
34263@@ -1360,7 +1372,7 @@ EXPORT_SYMBOL(generate_random_uuid);
34264 #include <linux/sysctl.h>
34265
34266 static int min_read_thresh = 8, min_write_thresh;
34267-static int max_read_thresh = INPUT_POOL_WORDS * 32;
34268+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
34269 static int max_write_thresh = INPUT_POOL_WORDS * 32;
34270 static char sysctl_bootid[16];
34271
34272@@ -1376,7 +1388,7 @@ static char sysctl_bootid[16];
34273 static int proc_do_uuid(ctl_table *table, int write,
34274 void __user *buffer, size_t *lenp, loff_t *ppos)
34275 {
34276- ctl_table fake_table;
34277+ ctl_table_no_const fake_table;
34278 unsigned char buf[64], tmp_uuid[16], *uuid;
34279
34280 uuid = table->data;
34281diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
34282index d780295..b29f3a8 100644
34283--- a/drivers/char/sonypi.c
34284+++ b/drivers/char/sonypi.c
34285@@ -54,6 +54,7 @@
34286
34287 #include <asm/uaccess.h>
34288 #include <asm/io.h>
34289+#include <asm/local.h>
34290
34291 #include <linux/sonypi.h>
34292
34293@@ -490,7 +491,7 @@ static struct sonypi_device {
34294 spinlock_t fifo_lock;
34295 wait_queue_head_t fifo_proc_list;
34296 struct fasync_struct *fifo_async;
34297- int open_count;
34298+ local_t open_count;
34299 int model;
34300 struct input_dev *input_jog_dev;
34301 struct input_dev *input_key_dev;
34302@@ -897,7 +898,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
34303 static int sonypi_misc_release(struct inode *inode, struct file *file)
34304 {
34305 mutex_lock(&sonypi_device.lock);
34306- sonypi_device.open_count--;
34307+ local_dec(&sonypi_device.open_count);
34308 mutex_unlock(&sonypi_device.lock);
34309 return 0;
34310 }
34311@@ -906,9 +907,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
34312 {
34313 mutex_lock(&sonypi_device.lock);
34314 /* Flush input queue on first open */
34315- if (!sonypi_device.open_count)
34316+ if (!local_read(&sonypi_device.open_count))
34317 kfifo_reset(&sonypi_device.fifo);
34318- sonypi_device.open_count++;
34319+ local_inc(&sonypi_device.open_count);
34320 mutex_unlock(&sonypi_device.lock);
34321
34322 return 0;
34323diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
34324index 93211df..c7805f7 100644
34325--- a/drivers/char/tpm/tpm.c
34326+++ b/drivers/char/tpm/tpm.c
34327@@ -410,7 +410,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
34328 chip->vendor.req_complete_val)
34329 goto out_recv;
34330
34331- if ((status == chip->vendor.req_canceled)) {
34332+ if (status == chip->vendor.req_canceled) {
34333 dev_err(chip->dev, "Operation Canceled\n");
34334 rc = -ECANCELED;
34335 goto out;
34336diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
34337index 56051d0..11cf3b7 100644
34338--- a/drivers/char/tpm/tpm_acpi.c
34339+++ b/drivers/char/tpm/tpm_acpi.c
34340@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
34341 virt = acpi_os_map_memory(start, len);
34342 if (!virt) {
34343 kfree(log->bios_event_log);
34344+ log->bios_event_log = NULL;
34345 printk("%s: ERROR - Unable to map memory\n", __func__);
34346 return -EIO;
34347 }
34348
34349- memcpy_fromio(log->bios_event_log, virt, len);
34350+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
34351
34352 acpi_os_unmap_memory(virt, len);
34353 return 0;
34354diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
34355index 84ddc55..1d32f1e 100644
34356--- a/drivers/char/tpm/tpm_eventlog.c
34357+++ b/drivers/char/tpm/tpm_eventlog.c
34358@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
34359 event = addr;
34360
34361 if ((event->event_type == 0 && event->event_size == 0) ||
34362- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
34363+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
34364 return NULL;
34365
34366 return addr;
34367@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
34368 return NULL;
34369
34370 if ((event->event_type == 0 && event->event_size == 0) ||
34371- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
34372+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
34373 return NULL;
34374
34375 (*pos)++;
34376@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
34377 int i;
34378
34379 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
34380- seq_putc(m, data[i]);
34381+ if (!seq_putc(m, data[i]))
34382+ return -EFAULT;
34383
34384 return 0;
34385 }
34386diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
34387index a4b7aa0..2faa0bc 100644
34388--- a/drivers/char/virtio_console.c
34389+++ b/drivers/char/virtio_console.c
34390@@ -685,7 +685,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
34391 if (to_user) {
34392 ssize_t ret;
34393
34394- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
34395+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
34396 if (ret)
34397 return -EFAULT;
34398 } else {
34399@@ -784,7 +784,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
34400 if (!port_has_data(port) && !port->host_connected)
34401 return 0;
34402
34403- return fill_readbuf(port, ubuf, count, true);
34404+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
34405 }
34406
34407 static int wait_port_writable(struct port *port, bool nonblock)
34408diff --git a/drivers/clocksource/arm_generic.c b/drivers/clocksource/arm_generic.c
34409index 8ae1a61..9c00613 100644
34410--- a/drivers/clocksource/arm_generic.c
34411+++ b/drivers/clocksource/arm_generic.c
34412@@ -181,7 +181,7 @@ static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
34413 return NOTIFY_OK;
34414 }
34415
34416-static struct notifier_block __cpuinitdata arch_timer_cpu_nb = {
34417+static struct notifier_block arch_timer_cpu_nb = {
34418 .notifier_call = arch_timer_cpu_notify,
34419 };
34420
34421diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
34422index 7b0d49d..134fac9 100644
34423--- a/drivers/cpufreq/acpi-cpufreq.c
34424+++ b/drivers/cpufreq/acpi-cpufreq.c
34425@@ -172,7 +172,7 @@ static ssize_t show_global_boost(struct kobject *kobj,
34426 return sprintf(buf, "%u\n", boost_enabled);
34427 }
34428
34429-static struct global_attr global_boost = __ATTR(boost, 0644,
34430+static global_attr_no_const global_boost = __ATTR(boost, 0644,
34431 show_global_boost,
34432 store_global_boost);
34433
34434@@ -712,8 +712,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
34435 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
34436 per_cpu(acfreq_data, cpu) = data;
34437
34438- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
34439- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
34440+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
34441+ pax_open_kernel();
34442+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
34443+ pax_close_kernel();
34444+ }
34445
34446 result = acpi_processor_register_performance(data->acpi_data, cpu);
34447 if (result)
34448@@ -835,7 +838,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
34449 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
34450 break;
34451 case ACPI_ADR_SPACE_FIXED_HARDWARE:
34452- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
34453+ pax_open_kernel();
34454+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
34455+ pax_close_kernel();
34456 policy->cur = get_cur_freq_on_cpu(cpu);
34457 break;
34458 default:
34459@@ -846,8 +851,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
34460 acpi_processor_notify_smm(THIS_MODULE);
34461
34462 /* Check for APERF/MPERF support in hardware */
34463- if (boot_cpu_has(X86_FEATURE_APERFMPERF))
34464- acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
34465+ if (boot_cpu_has(X86_FEATURE_APERFMPERF)) {
34466+ pax_open_kernel();
34467+ *(void **)&acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
34468+ pax_close_kernel();
34469+ }
34470
34471 pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
34472 for (i = 0; i < perf->state_count; i++)
34473diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
34474index 1f93dbd..305cef1 100644
34475--- a/drivers/cpufreq/cpufreq.c
34476+++ b/drivers/cpufreq/cpufreq.c
34477@@ -1843,7 +1843,7 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
34478 return NOTIFY_OK;
34479 }
34480
34481-static struct notifier_block __refdata cpufreq_cpu_notifier = {
34482+static struct notifier_block cpufreq_cpu_notifier = {
34483 .notifier_call = cpufreq_cpu_callback,
34484 };
34485
34486@@ -1875,8 +1875,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
34487
34488 pr_debug("trying to register driver %s\n", driver_data->name);
34489
34490- if (driver_data->setpolicy)
34491- driver_data->flags |= CPUFREQ_CONST_LOOPS;
34492+ if (driver_data->setpolicy) {
34493+ pax_open_kernel();
34494+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
34495+ pax_close_kernel();
34496+ }
34497
34498 spin_lock_irqsave(&cpufreq_driver_lock, flags);
34499 if (cpufreq_driver) {
34500diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
34501index 6c5f1d3..c7e2f35e 100644
34502--- a/drivers/cpufreq/cpufreq_governor.c
34503+++ b/drivers/cpufreq/cpufreq_governor.c
34504@@ -243,7 +243,7 @@ int cpufreq_governor_dbs(struct dbs_data *dbs_data,
34505 * governor, thus we are bound to jiffes/HZ
34506 */
34507 if (dbs_data->governor == GOV_CONSERVATIVE) {
34508- struct cs_ops *ops = dbs_data->gov_ops;
34509+ const struct cs_ops *ops = dbs_data->gov_ops;
34510
34511 cpufreq_register_notifier(ops->notifier_block,
34512 CPUFREQ_TRANSITION_NOTIFIER);
34513@@ -251,7 +251,7 @@ int cpufreq_governor_dbs(struct dbs_data *dbs_data,
34514 dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
34515 jiffies_to_usecs(10);
34516 } else {
34517- struct od_ops *ops = dbs_data->gov_ops;
34518+ const struct od_ops *ops = dbs_data->gov_ops;
34519
34520 od_tuners->io_is_busy = ops->io_busy();
34521 }
34522@@ -268,7 +268,7 @@ second_time:
34523 cs_dbs_info->enable = 1;
34524 cs_dbs_info->requested_freq = policy->cur;
34525 } else {
34526- struct od_ops *ops = dbs_data->gov_ops;
34527+ const struct od_ops *ops = dbs_data->gov_ops;
34528 od_dbs_info->rate_mult = 1;
34529 od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
34530 ops->powersave_bias_init_cpu(cpu);
34531@@ -289,7 +289,7 @@ second_time:
34532 mutex_destroy(&cpu_cdbs->timer_mutex);
34533 dbs_data->enable--;
34534 if (!dbs_data->enable) {
34535- struct cs_ops *ops = dbs_data->gov_ops;
34536+ const struct cs_ops *ops = dbs_data->gov_ops;
34537
34538 sysfs_remove_group(cpufreq_global_kobject,
34539 dbs_data->attr_group);
34540diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
34541index f661654..6c8e638 100644
34542--- a/drivers/cpufreq/cpufreq_governor.h
34543+++ b/drivers/cpufreq/cpufreq_governor.h
34544@@ -142,7 +142,7 @@ struct dbs_data {
34545 void (*gov_check_cpu)(int cpu, unsigned int load);
34546
34547 /* Governor specific ops, see below */
34548- void *gov_ops;
34549+ const void *gov_ops;
34550 };
34551
34552 /* Governor specific ops, will be passed to dbs_data->gov_ops */
34553diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
34554index 9d7732b..0b1a793 100644
34555--- a/drivers/cpufreq/cpufreq_stats.c
34556+++ b/drivers/cpufreq/cpufreq_stats.c
34557@@ -340,7 +340,7 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
34558 }
34559
34560 /* priority=1 so this will get called before cpufreq_remove_dev */
34561-static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
34562+static struct notifier_block cpufreq_stat_cpu_notifier = {
34563 .notifier_call = cpufreq_stat_cpu_callback,
34564 .priority = 1,
34565 };
34566diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
34567index 827629c9..0bc6a03 100644
34568--- a/drivers/cpufreq/p4-clockmod.c
34569+++ b/drivers/cpufreq/p4-clockmod.c
34570@@ -167,10 +167,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
34571 case 0x0F: /* Core Duo */
34572 case 0x16: /* Celeron Core */
34573 case 0x1C: /* Atom */
34574- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34575+ pax_open_kernel();
34576+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34577+ pax_close_kernel();
34578 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
34579 case 0x0D: /* Pentium M (Dothan) */
34580- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34581+ pax_open_kernel();
34582+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34583+ pax_close_kernel();
34584 /* fall through */
34585 case 0x09: /* Pentium M (Banias) */
34586 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
34587@@ -182,7 +186,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
34588
34589 /* on P-4s, the TSC runs with constant frequency independent whether
34590 * throttling is active or not. */
34591- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34592+ pax_open_kernel();
34593+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34594+ pax_close_kernel();
34595
34596 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
34597 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
34598diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
34599index 3a953d5..f5993f6 100644
34600--- a/drivers/cpufreq/speedstep-centrino.c
34601+++ b/drivers/cpufreq/speedstep-centrino.c
34602@@ -353,8 +353,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
34603 !cpu_has(cpu, X86_FEATURE_EST))
34604 return -ENODEV;
34605
34606- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
34607- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
34608+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
34609+ pax_open_kernel();
34610+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
34611+ pax_close_kernel();
34612+ }
34613
34614 if (policy->cpu != 0)
34615 return -ENODEV;
34616diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
34617index e1f6860..f8de20b 100644
34618--- a/drivers/cpuidle/cpuidle.c
34619+++ b/drivers/cpuidle/cpuidle.c
34620@@ -279,7 +279,7 @@ static int poll_idle(struct cpuidle_device *dev,
34621
34622 static void poll_idle_init(struct cpuidle_driver *drv)
34623 {
34624- struct cpuidle_state *state = &drv->states[0];
34625+ cpuidle_state_no_const *state = &drv->states[0];
34626
34627 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
34628 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
34629diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
34630index ea2f8e7..70ac501 100644
34631--- a/drivers/cpuidle/governor.c
34632+++ b/drivers/cpuidle/governor.c
34633@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
34634 mutex_lock(&cpuidle_lock);
34635 if (__cpuidle_find_governor(gov->name) == NULL) {
34636 ret = 0;
34637- list_add_tail(&gov->governor_list, &cpuidle_governors);
34638+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
34639 if (!cpuidle_curr_governor ||
34640 cpuidle_curr_governor->rating < gov->rating)
34641 cpuidle_switch_governor(gov);
34642@@ -135,7 +135,7 @@ void cpuidle_unregister_governor(struct cpuidle_governor *gov)
34643 new_gov = cpuidle_replace_governor(gov->rating);
34644 cpuidle_switch_governor(new_gov);
34645 }
34646- list_del(&gov->governor_list);
34647+ pax_list_del((struct list_head *)&gov->governor_list);
34648 mutex_unlock(&cpuidle_lock);
34649 }
34650
34651diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
34652index 428754a..8bdf9cc 100644
34653--- a/drivers/cpuidle/sysfs.c
34654+++ b/drivers/cpuidle/sysfs.c
34655@@ -131,7 +131,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
34656 NULL
34657 };
34658
34659-static struct attribute_group cpuidle_attr_group = {
34660+static attribute_group_no_const cpuidle_attr_group = {
34661 .attrs = cpuidle_default_attrs,
34662 .name = "cpuidle",
34663 };
34664diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
34665index 3b36797..289c16a 100644
34666--- a/drivers/devfreq/devfreq.c
34667+++ b/drivers/devfreq/devfreq.c
34668@@ -588,7 +588,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
34669 goto err_out;
34670 }
34671
34672- list_add(&governor->node, &devfreq_governor_list);
34673+ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
34674
34675 list_for_each_entry(devfreq, &devfreq_list, node) {
34676 int ret = 0;
34677@@ -676,7 +676,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
34678 }
34679 }
34680
34681- list_del(&governor->node);
34682+ pax_list_del((struct list_head *)&governor->node);
34683 err_out:
34684 mutex_unlock(&devfreq_list_lock);
34685
34686diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c
34687index b70709b..1d8d02a 100644
34688--- a/drivers/dma/sh/shdma.c
34689+++ b/drivers/dma/sh/shdma.c
34690@@ -476,7 +476,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
34691 return ret;
34692 }
34693
34694-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
34695+static struct notifier_block sh_dmae_nmi_notifier = {
34696 .notifier_call = sh_dmae_nmi_handler,
34697
34698 /* Run before NMI debug handler and KGDB */
34699diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
34700index 0ca1ca7..6e6f454 100644
34701--- a/drivers/edac/edac_mc_sysfs.c
34702+++ b/drivers/edac/edac_mc_sysfs.c
34703@@ -148,7 +148,7 @@ static const char *edac_caps[] = {
34704 struct dev_ch_attribute {
34705 struct device_attribute attr;
34706 int channel;
34707-};
34708+} __do_const;
34709
34710 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
34711 struct dev_ch_attribute dev_attr_legacy_##_name = \
34712diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
34713index 0056c4d..23b54d9 100644
34714--- a/drivers/edac/edac_pci_sysfs.c
34715+++ b/drivers/edac/edac_pci_sysfs.c
34716@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
34717 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
34718 static int edac_pci_poll_msec = 1000; /* one second workq period */
34719
34720-static atomic_t pci_parity_count = ATOMIC_INIT(0);
34721-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
34722+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
34723+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
34724
34725 static struct kobject *edac_pci_top_main_kobj;
34726 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
34727@@ -235,7 +235,7 @@ struct edac_pci_dev_attribute {
34728 void *value;
34729 ssize_t(*show) (void *, char *);
34730 ssize_t(*store) (void *, const char *, size_t);
34731-};
34732+} __do_const;
34733
34734 /* Set of show/store abstract level functions for PCI Parity object */
34735 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
34736@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34737 edac_printk(KERN_CRIT, EDAC_PCI,
34738 "Signaled System Error on %s\n",
34739 pci_name(dev));
34740- atomic_inc(&pci_nonparity_count);
34741+ atomic_inc_unchecked(&pci_nonparity_count);
34742 }
34743
34744 if (status & (PCI_STATUS_PARITY)) {
34745@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34746 "Master Data Parity Error on %s\n",
34747 pci_name(dev));
34748
34749- atomic_inc(&pci_parity_count);
34750+ atomic_inc_unchecked(&pci_parity_count);
34751 }
34752
34753 if (status & (PCI_STATUS_DETECTED_PARITY)) {
34754@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34755 "Detected Parity Error on %s\n",
34756 pci_name(dev));
34757
34758- atomic_inc(&pci_parity_count);
34759+ atomic_inc_unchecked(&pci_parity_count);
34760 }
34761 }
34762
34763@@ -618,7 +618,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34764 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
34765 "Signaled System Error on %s\n",
34766 pci_name(dev));
34767- atomic_inc(&pci_nonparity_count);
34768+ atomic_inc_unchecked(&pci_nonparity_count);
34769 }
34770
34771 if (status & (PCI_STATUS_PARITY)) {
34772@@ -626,7 +626,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34773 "Master Data Parity Error on "
34774 "%s\n", pci_name(dev));
34775
34776- atomic_inc(&pci_parity_count);
34777+ atomic_inc_unchecked(&pci_parity_count);
34778 }
34779
34780 if (status & (PCI_STATUS_DETECTED_PARITY)) {
34781@@ -634,7 +634,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34782 "Detected Parity Error on %s\n",
34783 pci_name(dev));
34784
34785- atomic_inc(&pci_parity_count);
34786+ atomic_inc_unchecked(&pci_parity_count);
34787 }
34788 }
34789 }
34790@@ -672,7 +672,7 @@ void edac_pci_do_parity_check(void)
34791 if (!check_pci_errors)
34792 return;
34793
34794- before_count = atomic_read(&pci_parity_count);
34795+ before_count = atomic_read_unchecked(&pci_parity_count);
34796
34797 /* scan all PCI devices looking for a Parity Error on devices and
34798 * bridges.
34799@@ -684,7 +684,7 @@ void edac_pci_do_parity_check(void)
34800 /* Only if operator has selected panic on PCI Error */
34801 if (edac_pci_get_panic_on_pe()) {
34802 /* If the count is different 'after' from 'before' */
34803- if (before_count != atomic_read(&pci_parity_count))
34804+ if (before_count != atomic_read_unchecked(&pci_parity_count))
34805 panic("EDAC: PCI Parity Error");
34806 }
34807 }
34808diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
34809index 6796799..99e8377 100644
34810--- a/drivers/edac/mce_amd.h
34811+++ b/drivers/edac/mce_amd.h
34812@@ -78,7 +78,7 @@ extern const char * const ii_msgs[];
34813 struct amd_decoder_ops {
34814 bool (*mc0_mce)(u16, u8);
34815 bool (*mc1_mce)(u16, u8);
34816-};
34817+} __no_const;
34818
34819 void amd_report_gart_errors(bool);
34820 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
34821diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
34822index 57ea7f4..789e3c3 100644
34823--- a/drivers/firewire/core-card.c
34824+++ b/drivers/firewire/core-card.c
34825@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
34826
34827 void fw_core_remove_card(struct fw_card *card)
34828 {
34829- struct fw_card_driver dummy_driver = dummy_driver_template;
34830+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
34831
34832 card->driver->update_phy_reg(card, 4,
34833 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
34834diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
34835index f8d2287..5aaf4db 100644
34836--- a/drivers/firewire/core-cdev.c
34837+++ b/drivers/firewire/core-cdev.c
34838@@ -1365,8 +1365,7 @@ static int init_iso_resource(struct client *client,
34839 int ret;
34840
34841 if ((request->channels == 0 && request->bandwidth == 0) ||
34842- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
34843- request->bandwidth < 0)
34844+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
34845 return -EINVAL;
34846
34847 r = kmalloc(sizeof(*r), GFP_KERNEL);
34848diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
34849index af3e8aa..eb2f227 100644
34850--- a/drivers/firewire/core-device.c
34851+++ b/drivers/firewire/core-device.c
34852@@ -232,7 +232,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
34853 struct config_rom_attribute {
34854 struct device_attribute attr;
34855 u32 key;
34856-};
34857+} __do_const;
34858
34859 static ssize_t show_immediate(struct device *dev,
34860 struct device_attribute *dattr, char *buf)
34861diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
34862index 28a94c7..58da63a 100644
34863--- a/drivers/firewire/core-transaction.c
34864+++ b/drivers/firewire/core-transaction.c
34865@@ -38,6 +38,7 @@
34866 #include <linux/timer.h>
34867 #include <linux/types.h>
34868 #include <linux/workqueue.h>
34869+#include <linux/sched.h>
34870
34871 #include <asm/byteorder.h>
34872
34873diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
34874index 515a42c..5ecf3ba 100644
34875--- a/drivers/firewire/core.h
34876+++ b/drivers/firewire/core.h
34877@@ -111,6 +111,7 @@ struct fw_card_driver {
34878
34879 int (*stop_iso)(struct fw_iso_context *ctx);
34880 };
34881+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
34882
34883 void fw_card_initialize(struct fw_card *card,
34884 const struct fw_card_driver *driver, struct device *device);
34885diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
34886index 94a58a0..f5eba42 100644
34887--- a/drivers/firmware/dmi-id.c
34888+++ b/drivers/firmware/dmi-id.c
34889@@ -16,7 +16,7 @@
34890 struct dmi_device_attribute{
34891 struct device_attribute dev_attr;
34892 int field;
34893-};
34894+} __do_const;
34895 #define to_dmi_dev_attr(_dev_attr) \
34896 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
34897
34898diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
34899index 4cd392d..4b629e1 100644
34900--- a/drivers/firmware/dmi_scan.c
34901+++ b/drivers/firmware/dmi_scan.c
34902@@ -490,11 +490,6 @@ void __init dmi_scan_machine(void)
34903 }
34904 }
34905 else {
34906- /*
34907- * no iounmap() for that ioremap(); it would be a no-op, but
34908- * it's so early in setup that sucker gets confused into doing
34909- * what it shouldn't if we actually call it.
34910- */
34911 p = dmi_ioremap(0xF0000, 0x10000);
34912 if (p == NULL)
34913 goto error;
34914@@ -769,7 +764,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
34915 if (buf == NULL)
34916 return -1;
34917
34918- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
34919+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
34920
34921 iounmap(buf);
34922 return 0;
34923diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
34924index b07cb37..2a51037 100644
34925--- a/drivers/firmware/efivars.c
34926+++ b/drivers/firmware/efivars.c
34927@@ -138,7 +138,7 @@ struct efivar_attribute {
34928 };
34929
34930 static struct efivars __efivars;
34931-static struct efivar_operations ops;
34932+static efivar_operations_no_const ops __read_only;
34933
34934 #define PSTORE_EFI_ATTRIBUTES \
34935 (EFI_VARIABLE_NON_VOLATILE | \
34936@@ -1834,7 +1834,7 @@ efivar_create_sysfs_entry(struct efivars *efivars,
34937 static int
34938 create_efivars_bin_attributes(struct efivars *efivars)
34939 {
34940- struct bin_attribute *attr;
34941+ bin_attribute_no_const *attr;
34942 int error;
34943
34944 /* new_var */
34945diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
34946index 2a90ba6..07f3733 100644
34947--- a/drivers/firmware/google/memconsole.c
34948+++ b/drivers/firmware/google/memconsole.c
34949@@ -147,7 +147,9 @@ static int __init memconsole_init(void)
34950 if (!found_memconsole())
34951 return -ENODEV;
34952
34953- memconsole_bin_attr.size = memconsole_length;
34954+ pax_open_kernel();
34955+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
34956+ pax_close_kernel();
34957
34958 ret = sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
34959
34960diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
34961index 6f2306d..af9476a 100644
34962--- a/drivers/gpio/gpio-ich.c
34963+++ b/drivers/gpio/gpio-ich.c
34964@@ -69,7 +69,7 @@ struct ichx_desc {
34965 /* Some chipsets have quirks, let these use their own request/get */
34966 int (*request)(struct gpio_chip *chip, unsigned offset);
34967 int (*get)(struct gpio_chip *chip, unsigned offset);
34968-};
34969+} __do_const;
34970
34971 static struct {
34972 spinlock_t lock;
34973diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
34974index 9902732..64b62dd 100644
34975--- a/drivers/gpio/gpio-vr41xx.c
34976+++ b/drivers/gpio/gpio-vr41xx.c
34977@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
34978 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
34979 maskl, pendl, maskh, pendh);
34980
34981- atomic_inc(&irq_err_count);
34982+ atomic_inc_unchecked(&irq_err_count);
34983
34984 return -EINVAL;
34985 }
34986diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
34987index 7b2d378..cc947ea 100644
34988--- a/drivers/gpu/drm/drm_crtc_helper.c
34989+++ b/drivers/gpu/drm/drm_crtc_helper.c
34990@@ -319,7 +319,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
34991 struct drm_crtc *tmp;
34992 int crtc_mask = 1;
34993
34994- WARN(!crtc, "checking null crtc?\n");
34995+ BUG_ON(!crtc);
34996
34997 dev = crtc->dev;
34998
34999diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
35000index be174ca..7f38143 100644
35001--- a/drivers/gpu/drm/drm_drv.c
35002+++ b/drivers/gpu/drm/drm_drv.c
35003@@ -307,7 +307,7 @@ module_exit(drm_core_exit);
35004 /**
35005 * Copy and IOCTL return string to user space
35006 */
35007-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
35008+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
35009 {
35010 int len;
35011
35012@@ -377,7 +377,7 @@ long drm_ioctl(struct file *filp,
35013 struct drm_file *file_priv = filp->private_data;
35014 struct drm_device *dev;
35015 struct drm_ioctl_desc *ioctl;
35016- drm_ioctl_t *func;
35017+ drm_ioctl_no_const_t func;
35018 unsigned int nr = DRM_IOCTL_NR(cmd);
35019 int retcode = -EINVAL;
35020 char stack_kdata[128];
35021@@ -390,7 +390,7 @@ long drm_ioctl(struct file *filp,
35022 return -ENODEV;
35023
35024 atomic_inc(&dev->ioctl_count);
35025- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
35026+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
35027 ++file_priv->ioctl_count;
35028
35029 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
35030diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
35031index 133b413..fd68225 100644
35032--- a/drivers/gpu/drm/drm_fops.c
35033+++ b/drivers/gpu/drm/drm_fops.c
35034@@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
35035 }
35036
35037 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
35038- atomic_set(&dev->counts[i], 0);
35039+ atomic_set_unchecked(&dev->counts[i], 0);
35040
35041 dev->sigdata.lock = NULL;
35042
35043@@ -134,7 +134,7 @@ int drm_open(struct inode *inode, struct file *filp)
35044 if (drm_device_is_unplugged(dev))
35045 return -ENODEV;
35046
35047- if (!dev->open_count++)
35048+ if (local_inc_return(&dev->open_count) == 1)
35049 need_setup = 1;
35050 mutex_lock(&dev->struct_mutex);
35051 old_mapping = dev->dev_mapping;
35052@@ -149,7 +149,7 @@ int drm_open(struct inode *inode, struct file *filp)
35053 retcode = drm_open_helper(inode, filp, dev);
35054 if (retcode)
35055 goto err_undo;
35056- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
35057+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
35058 if (need_setup) {
35059 retcode = drm_setup(dev);
35060 if (retcode)
35061@@ -164,7 +164,7 @@ err_undo:
35062 iput(container_of(dev->dev_mapping, struct inode, i_data));
35063 dev->dev_mapping = old_mapping;
35064 mutex_unlock(&dev->struct_mutex);
35065- dev->open_count--;
35066+ local_dec(&dev->open_count);
35067 return retcode;
35068 }
35069 EXPORT_SYMBOL(drm_open);
35070@@ -438,7 +438,7 @@ int drm_release(struct inode *inode, struct file *filp)
35071
35072 mutex_lock(&drm_global_mutex);
35073
35074- DRM_DEBUG("open_count = %d\n", dev->open_count);
35075+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
35076
35077 if (dev->driver->preclose)
35078 dev->driver->preclose(dev, file_priv);
35079@@ -447,10 +447,10 @@ int drm_release(struct inode *inode, struct file *filp)
35080 * Begin inline drm_release
35081 */
35082
35083- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
35084+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
35085 task_pid_nr(current),
35086 (long)old_encode_dev(file_priv->minor->device),
35087- dev->open_count);
35088+ local_read(&dev->open_count));
35089
35090 /* Release any auth tokens that might point to this file_priv,
35091 (do that under the drm_global_mutex) */
35092@@ -547,8 +547,8 @@ int drm_release(struct inode *inode, struct file *filp)
35093 * End inline drm_release
35094 */
35095
35096- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
35097- if (!--dev->open_count) {
35098+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
35099+ if (local_dec_and_test(&dev->open_count)) {
35100 if (atomic_read(&dev->ioctl_count)) {
35101 DRM_ERROR("Device busy: %d\n",
35102 atomic_read(&dev->ioctl_count));
35103diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
35104index f731116..629842c 100644
35105--- a/drivers/gpu/drm/drm_global.c
35106+++ b/drivers/gpu/drm/drm_global.c
35107@@ -36,7 +36,7 @@
35108 struct drm_global_item {
35109 struct mutex mutex;
35110 void *object;
35111- int refcount;
35112+ atomic_t refcount;
35113 };
35114
35115 static struct drm_global_item glob[DRM_GLOBAL_NUM];
35116@@ -49,7 +49,7 @@ void drm_global_init(void)
35117 struct drm_global_item *item = &glob[i];
35118 mutex_init(&item->mutex);
35119 item->object = NULL;
35120- item->refcount = 0;
35121+ atomic_set(&item->refcount, 0);
35122 }
35123 }
35124
35125@@ -59,7 +59,7 @@ void drm_global_release(void)
35126 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
35127 struct drm_global_item *item = &glob[i];
35128 BUG_ON(item->object != NULL);
35129- BUG_ON(item->refcount != 0);
35130+ BUG_ON(atomic_read(&item->refcount) != 0);
35131 }
35132 }
35133
35134@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
35135 void *object;
35136
35137 mutex_lock(&item->mutex);
35138- if (item->refcount == 0) {
35139+ if (atomic_read(&item->refcount) == 0) {
35140 item->object = kzalloc(ref->size, GFP_KERNEL);
35141 if (unlikely(item->object == NULL)) {
35142 ret = -ENOMEM;
35143@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
35144 goto out_err;
35145
35146 }
35147- ++item->refcount;
35148+ atomic_inc(&item->refcount);
35149 ref->object = item->object;
35150 object = item->object;
35151 mutex_unlock(&item->mutex);
35152@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
35153 struct drm_global_item *item = &glob[ref->global_type];
35154
35155 mutex_lock(&item->mutex);
35156- BUG_ON(item->refcount == 0);
35157+ BUG_ON(atomic_read(&item->refcount) == 0);
35158 BUG_ON(ref->object != item->object);
35159- if (--item->refcount == 0) {
35160+ if (atomic_dec_and_test(&item->refcount)) {
35161 ref->release(ref);
35162 item->object = NULL;
35163 }
35164diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
35165index d4b20ce..77a8d41 100644
35166--- a/drivers/gpu/drm/drm_info.c
35167+++ b/drivers/gpu/drm/drm_info.c
35168@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
35169 struct drm_local_map *map;
35170 struct drm_map_list *r_list;
35171
35172- /* Hardcoded from _DRM_FRAME_BUFFER,
35173- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
35174- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
35175- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
35176+ static const char * const types[] = {
35177+ [_DRM_FRAME_BUFFER] = "FB",
35178+ [_DRM_REGISTERS] = "REG",
35179+ [_DRM_SHM] = "SHM",
35180+ [_DRM_AGP] = "AGP",
35181+ [_DRM_SCATTER_GATHER] = "SG",
35182+ [_DRM_CONSISTENT] = "PCI",
35183+ [_DRM_GEM] = "GEM" };
35184 const char *type;
35185 int i;
35186
35187@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
35188 map = r_list->map;
35189 if (!map)
35190 continue;
35191- if (map->type < 0 || map->type > 5)
35192+ if (map->type >= ARRAY_SIZE(types))
35193 type = "??";
35194 else
35195 type = types[map->type];
35196@@ -253,7 +257,11 @@ int drm_vma_info(struct seq_file *m, void *data)
35197 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
35198 vma->vm_flags & VM_LOCKED ? 'l' : '-',
35199 vma->vm_flags & VM_IO ? 'i' : '-',
35200+#ifdef CONFIG_GRKERNSEC_HIDESYM
35201+ 0);
35202+#else
35203 vma->vm_pgoff);
35204+#endif
35205
35206 #if defined(__i386__)
35207 pgprot = pgprot_val(vma->vm_page_prot);
35208diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
35209index 2f4c434..dd12cd2 100644
35210--- a/drivers/gpu/drm/drm_ioc32.c
35211+++ b/drivers/gpu/drm/drm_ioc32.c
35212@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
35213 request = compat_alloc_user_space(nbytes);
35214 if (!access_ok(VERIFY_WRITE, request, nbytes))
35215 return -EFAULT;
35216- list = (struct drm_buf_desc *) (request + 1);
35217+ list = (struct drm_buf_desc __user *) (request + 1);
35218
35219 if (__put_user(count, &request->count)
35220 || __put_user(list, &request->list))
35221@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
35222 request = compat_alloc_user_space(nbytes);
35223 if (!access_ok(VERIFY_WRITE, request, nbytes))
35224 return -EFAULT;
35225- list = (struct drm_buf_pub *) (request + 1);
35226+ list = (struct drm_buf_pub __user *) (request + 1);
35227
35228 if (__put_user(count, &request->count)
35229 || __put_user(list, &request->list))
35230@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
35231 return 0;
35232 }
35233
35234-drm_ioctl_compat_t *drm_compat_ioctls[] = {
35235+drm_ioctl_compat_t drm_compat_ioctls[] = {
35236 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
35237 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
35238 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
35239@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
35240 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35241 {
35242 unsigned int nr = DRM_IOCTL_NR(cmd);
35243- drm_ioctl_compat_t *fn;
35244 int ret;
35245
35246 /* Assume that ioctls without an explicit compat routine will just
35247@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35248 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
35249 return drm_ioctl(filp, cmd, arg);
35250
35251- fn = drm_compat_ioctls[nr];
35252-
35253- if (fn != NULL)
35254- ret = (*fn) (filp, cmd, arg);
35255+ if (drm_compat_ioctls[nr] != NULL)
35256+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
35257 else
35258 ret = drm_ioctl(filp, cmd, arg);
35259
35260diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
35261index e77bd8b..1571b85 100644
35262--- a/drivers/gpu/drm/drm_ioctl.c
35263+++ b/drivers/gpu/drm/drm_ioctl.c
35264@@ -252,7 +252,7 @@ int drm_getstats(struct drm_device *dev, void *data,
35265 stats->data[i].value =
35266 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
35267 else
35268- stats->data[i].value = atomic_read(&dev->counts[i]);
35269+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
35270 stats->data[i].type = dev->types[i];
35271 }
35272
35273diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
35274index d752c96..fe08455 100644
35275--- a/drivers/gpu/drm/drm_lock.c
35276+++ b/drivers/gpu/drm/drm_lock.c
35277@@ -86,7 +86,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
35278 if (drm_lock_take(&master->lock, lock->context)) {
35279 master->lock.file_priv = file_priv;
35280 master->lock.lock_time = jiffies;
35281- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
35282+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
35283 break; /* Got lock */
35284 }
35285
35286@@ -157,7 +157,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
35287 return -EINVAL;
35288 }
35289
35290- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
35291+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
35292
35293 if (drm_lock_free(&master->lock, lock->context)) {
35294 /* FIXME: Should really bail out here. */
35295diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
35296index 200e104..59facda 100644
35297--- a/drivers/gpu/drm/drm_stub.c
35298+++ b/drivers/gpu/drm/drm_stub.c
35299@@ -516,7 +516,7 @@ void drm_unplug_dev(struct drm_device *dev)
35300
35301 drm_device_set_unplugged(dev);
35302
35303- if (dev->open_count == 0) {
35304+ if (local_read(&dev->open_count) == 0) {
35305 drm_put_dev(dev);
35306 }
35307 mutex_unlock(&drm_global_mutex);
35308diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
35309index 004ecdf..db1f6e0 100644
35310--- a/drivers/gpu/drm/i810/i810_dma.c
35311+++ b/drivers/gpu/drm/i810/i810_dma.c
35312@@ -945,8 +945,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
35313 dma->buflist[vertex->idx],
35314 vertex->discard, vertex->used);
35315
35316- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
35317- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
35318+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
35319+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
35320 sarea_priv->last_enqueue = dev_priv->counter - 1;
35321 sarea_priv->last_dispatch = (int)hw_status[5];
35322
35323@@ -1106,8 +1106,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
35324 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
35325 mc->last_render);
35326
35327- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
35328- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
35329+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
35330+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
35331 sarea_priv->last_enqueue = dev_priv->counter - 1;
35332 sarea_priv->last_dispatch = (int)hw_status[5];
35333
35334diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
35335index 6e0acad..93c8289 100644
35336--- a/drivers/gpu/drm/i810/i810_drv.h
35337+++ b/drivers/gpu/drm/i810/i810_drv.h
35338@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
35339 int page_flipping;
35340
35341 wait_queue_head_t irq_queue;
35342- atomic_t irq_received;
35343- atomic_t irq_emitted;
35344+ atomic_unchecked_t irq_received;
35345+ atomic_unchecked_t irq_emitted;
35346
35347 int front_offset;
35348 } drm_i810_private_t;
35349diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
35350index 261efc8e..27af8a5 100644
35351--- a/drivers/gpu/drm/i915/i915_debugfs.c
35352+++ b/drivers/gpu/drm/i915/i915_debugfs.c
35353@@ -496,7 +496,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
35354 I915_READ(GTIMR));
35355 }
35356 seq_printf(m, "Interrupts received: %d\n",
35357- atomic_read(&dev_priv->irq_received));
35358+ atomic_read_unchecked(&dev_priv->irq_received));
35359 for_each_ring(ring, dev_priv, i) {
35360 if (IS_GEN6(dev) || IS_GEN7(dev)) {
35361 seq_printf(m,
35362diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
35363index 99daa89..84ebd44 100644
35364--- a/drivers/gpu/drm/i915/i915_dma.c
35365+++ b/drivers/gpu/drm/i915/i915_dma.c
35366@@ -1253,7 +1253,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
35367 bool can_switch;
35368
35369 spin_lock(&dev->count_lock);
35370- can_switch = (dev->open_count == 0);
35371+ can_switch = (local_read(&dev->open_count) == 0);
35372 spin_unlock(&dev->count_lock);
35373 return can_switch;
35374 }
35375diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
35376index 7339a4b..445aaba 100644
35377--- a/drivers/gpu/drm/i915/i915_drv.h
35378+++ b/drivers/gpu/drm/i915/i915_drv.h
35379@@ -656,7 +656,7 @@ typedef struct drm_i915_private {
35380 drm_dma_handle_t *status_page_dmah;
35381 struct resource mch_res;
35382
35383- atomic_t irq_received;
35384+ atomic_unchecked_t irq_received;
35385
35386 /* protects the irq masks */
35387 spinlock_t irq_lock;
35388@@ -1102,7 +1102,7 @@ struct drm_i915_gem_object {
35389 * will be page flipped away on the next vblank. When it
35390 * reaches 0, dev_priv->pending_flip_queue will be woken up.
35391 */
35392- atomic_t pending_flip;
35393+ atomic_unchecked_t pending_flip;
35394 };
35395 #define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
35396
35397@@ -1633,7 +1633,7 @@ extern struct i2c_adapter *intel_gmbus_get_adapter(
35398 struct drm_i915_private *dev_priv, unsigned port);
35399 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
35400 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
35401-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
35402+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
35403 {
35404 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
35405 }
35406diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
35407index 7adf5a7..e24fb51 100644
35408--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
35409+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
35410@@ -672,7 +672,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
35411 i915_gem_clflush_object(obj);
35412
35413 if (obj->base.pending_write_domain)
35414- flips |= atomic_read(&obj->pending_flip);
35415+ flips |= atomic_read_unchecked(&obj->pending_flip);
35416
35417 flush_domains |= obj->base.write_domain;
35418 }
35419@@ -703,9 +703,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
35420
35421 static int
35422 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
35423- int count)
35424+ unsigned int count)
35425 {
35426- int i;
35427+ unsigned int i;
35428 int relocs_total = 0;
35429 int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
35430
35431@@ -1202,7 +1202,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
35432 return -ENOMEM;
35433 }
35434 ret = copy_from_user(exec2_list,
35435- (struct drm_i915_relocation_entry __user *)
35436+ (struct drm_i915_gem_exec_object2 __user *)
35437 (uintptr_t) args->buffers_ptr,
35438 sizeof(*exec2_list) * args->buffer_count);
35439 if (ret != 0) {
35440diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
35441index 3c59584..500f2e9 100644
35442--- a/drivers/gpu/drm/i915/i915_ioc32.c
35443+++ b/drivers/gpu/drm/i915/i915_ioc32.c
35444@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
35445 (unsigned long)request);
35446 }
35447
35448-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
35449+static drm_ioctl_compat_t i915_compat_ioctls[] = {
35450 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
35451 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
35452 [DRM_I915_GETPARAM] = compat_i915_getparam,
35453@@ -202,18 +202,15 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
35454 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35455 {
35456 unsigned int nr = DRM_IOCTL_NR(cmd);
35457- drm_ioctl_compat_t *fn = NULL;
35458 int ret;
35459
35460 if (nr < DRM_COMMAND_BASE)
35461 return drm_compat_ioctl(filp, cmd, arg);
35462
35463- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
35464- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
35465-
35466- if (fn != NULL)
35467+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls)) {
35468+ drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
35469 ret = (*fn) (filp, cmd, arg);
35470- else
35471+ } else
35472 ret = drm_ioctl(filp, cmd, arg);
35473
35474 return ret;
35475diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
35476index fe84338..a863190 100644
35477--- a/drivers/gpu/drm/i915/i915_irq.c
35478+++ b/drivers/gpu/drm/i915/i915_irq.c
35479@@ -535,7 +535,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
35480 u32 pipe_stats[I915_MAX_PIPES];
35481 bool blc_event;
35482
35483- atomic_inc(&dev_priv->irq_received);
35484+ atomic_inc_unchecked(&dev_priv->irq_received);
35485
35486 while (true) {
35487 iir = I915_READ(VLV_IIR);
35488@@ -688,7 +688,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
35489 irqreturn_t ret = IRQ_NONE;
35490 int i;
35491
35492- atomic_inc(&dev_priv->irq_received);
35493+ atomic_inc_unchecked(&dev_priv->irq_received);
35494
35495 /* disable master interrupt before clearing iir */
35496 de_ier = I915_READ(DEIER);
35497@@ -760,7 +760,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
35498 int ret = IRQ_NONE;
35499 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
35500
35501- atomic_inc(&dev_priv->irq_received);
35502+ atomic_inc_unchecked(&dev_priv->irq_received);
35503
35504 /* disable master interrupt before clearing iir */
35505 de_ier = I915_READ(DEIER);
35506@@ -1787,7 +1787,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
35507 {
35508 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
35509
35510- atomic_set(&dev_priv->irq_received, 0);
35511+ atomic_set_unchecked(&dev_priv->irq_received, 0);
35512
35513 I915_WRITE(HWSTAM, 0xeffe);
35514
35515@@ -1813,7 +1813,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
35516 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
35517 int pipe;
35518
35519- atomic_set(&dev_priv->irq_received, 0);
35520+ atomic_set_unchecked(&dev_priv->irq_received, 0);
35521
35522 /* VLV magic */
35523 I915_WRITE(VLV_IMR, 0);
35524@@ -2108,7 +2108,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
35525 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
35526 int pipe;
35527
35528- atomic_set(&dev_priv->irq_received, 0);
35529+ atomic_set_unchecked(&dev_priv->irq_received, 0);
35530
35531 for_each_pipe(pipe)
35532 I915_WRITE(PIPESTAT(pipe), 0);
35533@@ -2159,7 +2159,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
35534 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
35535 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
35536
35537- atomic_inc(&dev_priv->irq_received);
35538+ atomic_inc_unchecked(&dev_priv->irq_received);
35539
35540 iir = I915_READ16(IIR);
35541 if (iir == 0)
35542@@ -2244,7 +2244,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
35543 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
35544 int pipe;
35545
35546- atomic_set(&dev_priv->irq_received, 0);
35547+ atomic_set_unchecked(&dev_priv->irq_received, 0);
35548
35549 if (I915_HAS_HOTPLUG(dev)) {
35550 I915_WRITE(PORT_HOTPLUG_EN, 0);
35551@@ -2339,7 +2339,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
35552 };
35553 int pipe, ret = IRQ_NONE;
35554
35555- atomic_inc(&dev_priv->irq_received);
35556+ atomic_inc_unchecked(&dev_priv->irq_received);
35557
35558 iir = I915_READ(IIR);
35559 do {
35560@@ -2465,7 +2465,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
35561 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
35562 int pipe;
35563
35564- atomic_set(&dev_priv->irq_received, 0);
35565+ atomic_set_unchecked(&dev_priv->irq_received, 0);
35566
35567 I915_WRITE(PORT_HOTPLUG_EN, 0);
35568 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
35569@@ -2572,7 +2572,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
35570 int irq_received;
35571 int ret = IRQ_NONE, pipe;
35572
35573- atomic_inc(&dev_priv->irq_received);
35574+ atomic_inc_unchecked(&dev_priv->irq_received);
35575
35576 iir = I915_READ(IIR);
35577
35578diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
35579index e6e4df7..6a9a1bd 100644
35580--- a/drivers/gpu/drm/i915/intel_display.c
35581+++ b/drivers/gpu/drm/i915/intel_display.c
35582@@ -2255,7 +2255,7 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
35583
35584 wait_event(dev_priv->pending_flip_queue,
35585 atomic_read(&dev_priv->mm.wedged) ||
35586- atomic_read(&obj->pending_flip) == 0);
35587+ atomic_read_unchecked(&obj->pending_flip) == 0);
35588
35589 /* Big Hammer, we also need to ensure that any pending
35590 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
35591@@ -7122,8 +7122,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
35592
35593 obj = work->old_fb_obj;
35594
35595- atomic_clear_mask(1 << intel_crtc->plane,
35596- &obj->pending_flip.counter);
35597+ atomic_clear_mask_unchecked(1 << intel_crtc->plane, &obj->pending_flip);
35598 wake_up(&dev_priv->pending_flip_queue);
35599
35600 queue_work(dev_priv->wq, &work->work);
35601@@ -7486,7 +7485,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
35602 /* Block clients from rendering to the new back buffer until
35603 * the flip occurs and the object is no longer visible.
35604 */
35605- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
35606+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
35607 atomic_inc(&intel_crtc->unpin_work_count);
35608
35609 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
35610@@ -7504,7 +7503,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
35611 cleanup_pending:
35612 atomic_dec(&intel_crtc->unpin_work_count);
35613 crtc->fb = old_fb;
35614- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
35615+ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
35616 drm_gem_object_unreference(&work->old_fb_obj->base);
35617 drm_gem_object_unreference(&obj->base);
35618 mutex_unlock(&dev->struct_mutex);
35619@@ -8846,13 +8845,13 @@ struct intel_quirk {
35620 int subsystem_vendor;
35621 int subsystem_device;
35622 void (*hook)(struct drm_device *dev);
35623-};
35624+} __do_const;
35625
35626 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
35627 struct intel_dmi_quirk {
35628 void (*hook)(struct drm_device *dev);
35629 const struct dmi_system_id (*dmi_id_list)[];
35630-};
35631+} __do_const;
35632
35633 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
35634 {
35635@@ -8860,18 +8859,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
35636 return 1;
35637 }
35638
35639+static const struct dmi_system_id intel_dmi_quirks_table[] = {
35640+ {
35641+ .callback = intel_dmi_reverse_brightness,
35642+ .ident = "NCR Corporation",
35643+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
35644+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
35645+ },
35646+ },
35647+ { } /* terminating entry */
35648+};
35649+
35650 static const struct intel_dmi_quirk intel_dmi_quirks[] = {
35651 {
35652- .dmi_id_list = &(const struct dmi_system_id[]) {
35653- {
35654- .callback = intel_dmi_reverse_brightness,
35655- .ident = "NCR Corporation",
35656- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
35657- DMI_MATCH(DMI_PRODUCT_NAME, ""),
35658- },
35659- },
35660- { } /* terminating entry */
35661- },
35662+ .dmi_id_list = &intel_dmi_quirks_table,
35663 .hook = quirk_invert_brightness,
35664 },
35665 };
35666diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
35667index 54558a0..2d97005 100644
35668--- a/drivers/gpu/drm/mga/mga_drv.h
35669+++ b/drivers/gpu/drm/mga/mga_drv.h
35670@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
35671 u32 clear_cmd;
35672 u32 maccess;
35673
35674- atomic_t vbl_received; /**< Number of vblanks received. */
35675+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
35676 wait_queue_head_t fence_queue;
35677- atomic_t last_fence_retired;
35678+ atomic_unchecked_t last_fence_retired;
35679 u32 next_fence_to_post;
35680
35681 unsigned int fb_cpp;
35682diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
35683index 709e90d..89a1c0d 100644
35684--- a/drivers/gpu/drm/mga/mga_ioc32.c
35685+++ b/drivers/gpu/drm/mga/mga_ioc32.c
35686@@ -189,7 +189,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
35687 return 0;
35688 }
35689
35690-drm_ioctl_compat_t *mga_compat_ioctls[] = {
35691+drm_ioctl_compat_t mga_compat_ioctls[] = {
35692 [DRM_MGA_INIT] = compat_mga_init,
35693 [DRM_MGA_GETPARAM] = compat_mga_getparam,
35694 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
35695@@ -207,18 +207,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
35696 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35697 {
35698 unsigned int nr = DRM_IOCTL_NR(cmd);
35699- drm_ioctl_compat_t *fn = NULL;
35700 int ret;
35701
35702 if (nr < DRM_COMMAND_BASE)
35703 return drm_compat_ioctl(filp, cmd, arg);
35704
35705- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
35706- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
35707-
35708- if (fn != NULL)
35709+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) {
35710+ drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
35711 ret = (*fn) (filp, cmd, arg);
35712- else
35713+ } else
35714 ret = drm_ioctl(filp, cmd, arg);
35715
35716 return ret;
35717diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
35718index 598c281..60d590e 100644
35719--- a/drivers/gpu/drm/mga/mga_irq.c
35720+++ b/drivers/gpu/drm/mga/mga_irq.c
35721@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
35722 if (crtc != 0)
35723 return 0;
35724
35725- return atomic_read(&dev_priv->vbl_received);
35726+ return atomic_read_unchecked(&dev_priv->vbl_received);
35727 }
35728
35729
35730@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
35731 /* VBLANK interrupt */
35732 if (status & MGA_VLINEPEN) {
35733 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
35734- atomic_inc(&dev_priv->vbl_received);
35735+ atomic_inc_unchecked(&dev_priv->vbl_received);
35736 drm_handle_vblank(dev, 0);
35737 handled = 1;
35738 }
35739@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
35740 if ((prim_start & ~0x03) != (prim_end & ~0x03))
35741 MGA_WRITE(MGA_PRIMEND, prim_end);
35742
35743- atomic_inc(&dev_priv->last_fence_retired);
35744+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
35745 DRM_WAKEUP(&dev_priv->fence_queue);
35746 handled = 1;
35747 }
35748@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
35749 * using fences.
35750 */
35751 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
35752- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
35753+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
35754 - *sequence) <= (1 << 23)));
35755
35756 *sequence = cur_fence;
35757diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
35758index 865eddf..62c4cc3 100644
35759--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
35760+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
35761@@ -1015,7 +1015,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
35762 struct bit_table {
35763 const char id;
35764 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
35765-};
35766+} __no_const;
35767
35768 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
35769
35770diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
35771index aa89eb9..d45d38b 100644
35772--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
35773+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
35774@@ -80,7 +80,7 @@ struct nouveau_drm {
35775 struct drm_global_reference mem_global_ref;
35776 struct ttm_bo_global_ref bo_global_ref;
35777 struct ttm_bo_device bdev;
35778- atomic_t validate_sequence;
35779+ atomic_unchecked_t validate_sequence;
35780 int (*move)(struct nouveau_channel *,
35781 struct ttm_buffer_object *,
35782 struct ttm_mem_reg *, struct ttm_mem_reg *);
35783diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
35784index cdb83ac..27f0a16 100644
35785--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
35786+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
35787@@ -43,7 +43,7 @@ struct nouveau_fence_priv {
35788 int (*sync)(struct nouveau_fence *, struct nouveau_channel *,
35789 struct nouveau_channel *);
35790 u32 (*read)(struct nouveau_channel *);
35791-};
35792+} __no_const;
35793
35794 #define nouveau_fence(drm) ((struct nouveau_fence_priv *)(drm)->fence)
35795
35796diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
35797index 8bf695c..9fbc90a 100644
35798--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
35799+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
35800@@ -321,7 +321,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
35801 int trycnt = 0;
35802 int ret, i;
35803
35804- sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
35805+ sequence = atomic_add_return_unchecked(1, &drm->ttm.validate_sequence);
35806 retry:
35807 if (++trycnt > 100000) {
35808 NV_ERROR(drm, "%s failed and gave up.\n", __func__);
35809diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
35810index 08214bc..9208577 100644
35811--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
35812+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
35813@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
35814 unsigned long arg)
35815 {
35816 unsigned int nr = DRM_IOCTL_NR(cmd);
35817- drm_ioctl_compat_t *fn = NULL;
35818+ drm_ioctl_compat_t fn = NULL;
35819 int ret;
35820
35821 if (nr < DRM_COMMAND_BASE)
35822diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
35823index 25d3495..d81aaf6 100644
35824--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
35825+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
35826@@ -62,7 +62,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
35827 bool can_switch;
35828
35829 spin_lock(&dev->count_lock);
35830- can_switch = (dev->open_count == 0);
35831+ can_switch = (local_read(&dev->open_count) == 0);
35832 spin_unlock(&dev->count_lock);
35833 return can_switch;
35834 }
35835diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
35836index d4660cf..70dbe65 100644
35837--- a/drivers/gpu/drm/r128/r128_cce.c
35838+++ b/drivers/gpu/drm/r128/r128_cce.c
35839@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
35840
35841 /* GH: Simple idle check.
35842 */
35843- atomic_set(&dev_priv->idle_count, 0);
35844+ atomic_set_unchecked(&dev_priv->idle_count, 0);
35845
35846 /* We don't support anything other than bus-mastering ring mode,
35847 * but the ring can be in either AGP or PCI space for the ring
35848diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
35849index 930c71b..499aded 100644
35850--- a/drivers/gpu/drm/r128/r128_drv.h
35851+++ b/drivers/gpu/drm/r128/r128_drv.h
35852@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
35853 int is_pci;
35854 unsigned long cce_buffers_offset;
35855
35856- atomic_t idle_count;
35857+ atomic_unchecked_t idle_count;
35858
35859 int page_flipping;
35860 int current_page;
35861 u32 crtc_offset;
35862 u32 crtc_offset_cntl;
35863
35864- atomic_t vbl_received;
35865+ atomic_unchecked_t vbl_received;
35866
35867 u32 color_fmt;
35868 unsigned int front_offset;
35869diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
35870index a954c54..9cc595c 100644
35871--- a/drivers/gpu/drm/r128/r128_ioc32.c
35872+++ b/drivers/gpu/drm/r128/r128_ioc32.c
35873@@ -177,7 +177,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
35874 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
35875 }
35876
35877-drm_ioctl_compat_t *r128_compat_ioctls[] = {
35878+drm_ioctl_compat_t r128_compat_ioctls[] = {
35879 [DRM_R128_INIT] = compat_r128_init,
35880 [DRM_R128_DEPTH] = compat_r128_depth,
35881 [DRM_R128_STIPPLE] = compat_r128_stipple,
35882@@ -196,18 +196,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
35883 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35884 {
35885 unsigned int nr = DRM_IOCTL_NR(cmd);
35886- drm_ioctl_compat_t *fn = NULL;
35887 int ret;
35888
35889 if (nr < DRM_COMMAND_BASE)
35890 return drm_compat_ioctl(filp, cmd, arg);
35891
35892- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls))
35893- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
35894-
35895- if (fn != NULL)
35896+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls)) {
35897+ drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
35898 ret = (*fn) (filp, cmd, arg);
35899- else
35900+ } else
35901 ret = drm_ioctl(filp, cmd, arg);
35902
35903 return ret;
35904diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
35905index 2ea4f09..d391371 100644
35906--- a/drivers/gpu/drm/r128/r128_irq.c
35907+++ b/drivers/gpu/drm/r128/r128_irq.c
35908@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
35909 if (crtc != 0)
35910 return 0;
35911
35912- return atomic_read(&dev_priv->vbl_received);
35913+ return atomic_read_unchecked(&dev_priv->vbl_received);
35914 }
35915
35916 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
35917@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
35918 /* VBLANK interrupt */
35919 if (status & R128_CRTC_VBLANK_INT) {
35920 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
35921- atomic_inc(&dev_priv->vbl_received);
35922+ atomic_inc_unchecked(&dev_priv->vbl_received);
35923 drm_handle_vblank(dev, 0);
35924 return IRQ_HANDLED;
35925 }
35926diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
35927index 19bb7e6..de7e2a2 100644
35928--- a/drivers/gpu/drm/r128/r128_state.c
35929+++ b/drivers/gpu/drm/r128/r128_state.c
35930@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
35931
35932 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
35933 {
35934- if (atomic_read(&dev_priv->idle_count) == 0)
35935+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
35936 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
35937 else
35938- atomic_set(&dev_priv->idle_count, 0);
35939+ atomic_set_unchecked(&dev_priv->idle_count, 0);
35940 }
35941
35942 #endif
35943diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
35944index 5a82b6b..9e69c73 100644
35945--- a/drivers/gpu/drm/radeon/mkregtable.c
35946+++ b/drivers/gpu/drm/radeon/mkregtable.c
35947@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
35948 regex_t mask_rex;
35949 regmatch_t match[4];
35950 char buf[1024];
35951- size_t end;
35952+ long end;
35953 int len;
35954 int done = 0;
35955 int r;
35956 unsigned o;
35957 struct offset *offset;
35958 char last_reg_s[10];
35959- int last_reg;
35960+ unsigned long last_reg;
35961
35962 if (regcomp
35963 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
35964diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
35965index 0d6562b..a154330 100644
35966--- a/drivers/gpu/drm/radeon/radeon_device.c
35967+++ b/drivers/gpu/drm/radeon/radeon_device.c
35968@@ -969,7 +969,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
35969 bool can_switch;
35970
35971 spin_lock(&dev->count_lock);
35972- can_switch = (dev->open_count == 0);
35973+ can_switch = (local_read(&dev->open_count) == 0);
35974 spin_unlock(&dev->count_lock);
35975 return can_switch;
35976 }
35977diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
35978index e7fdf16..f4f6490 100644
35979--- a/drivers/gpu/drm/radeon/radeon_drv.h
35980+++ b/drivers/gpu/drm/radeon/radeon_drv.h
35981@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
35982
35983 /* SW interrupt */
35984 wait_queue_head_t swi_queue;
35985- atomic_t swi_emitted;
35986+ atomic_unchecked_t swi_emitted;
35987 int vblank_crtc;
35988 uint32_t irq_enable_reg;
35989 uint32_t r500_disp_irq_reg;
35990diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
35991index c180df8..5fd8186 100644
35992--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
35993+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
35994@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
35995 request = compat_alloc_user_space(sizeof(*request));
35996 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
35997 || __put_user(req32.param, &request->param)
35998- || __put_user((void __user *)(unsigned long)req32.value,
35999+ || __put_user((unsigned long)req32.value,
36000 &request->value))
36001 return -EFAULT;
36002
36003@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
36004 #define compat_radeon_cp_setparam NULL
36005 #endif /* X86_64 || IA64 */
36006
36007-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
36008+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
36009 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
36010 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
36011 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
36012@@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
36013 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
36014 {
36015 unsigned int nr = DRM_IOCTL_NR(cmd);
36016- drm_ioctl_compat_t *fn = NULL;
36017 int ret;
36018
36019 if (nr < DRM_COMMAND_BASE)
36020 return drm_compat_ioctl(filp, cmd, arg);
36021
36022- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
36023- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
36024-
36025- if (fn != NULL)
36026+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls)) {
36027+ drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
36028 ret = (*fn) (filp, cmd, arg);
36029- else
36030+ } else
36031 ret = drm_ioctl(filp, cmd, arg);
36032
36033 return ret;
36034diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
36035index e771033..a0bc6b3 100644
36036--- a/drivers/gpu/drm/radeon/radeon_irq.c
36037+++ b/drivers/gpu/drm/radeon/radeon_irq.c
36038@@ -224,8 +224,8 @@ static int radeon_emit_irq(struct drm_device * dev)
36039 unsigned int ret;
36040 RING_LOCALS;
36041
36042- atomic_inc(&dev_priv->swi_emitted);
36043- ret = atomic_read(&dev_priv->swi_emitted);
36044+ atomic_inc_unchecked(&dev_priv->swi_emitted);
36045+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
36046
36047 BEGIN_RING(4);
36048 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
36049@@ -351,7 +351,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
36050 drm_radeon_private_t *dev_priv =
36051 (drm_radeon_private_t *) dev->dev_private;
36052
36053- atomic_set(&dev_priv->swi_emitted, 0);
36054+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
36055 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
36056
36057 dev->max_vblank_count = 0x001fffff;
36058diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
36059index 8e9057b..af6dacb 100644
36060--- a/drivers/gpu/drm/radeon/radeon_state.c
36061+++ b/drivers/gpu/drm/radeon/radeon_state.c
36062@@ -2166,7 +2166,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
36063 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
36064 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
36065
36066- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
36067+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
36068 sarea_priv->nbox * sizeof(depth_boxes[0])))
36069 return -EFAULT;
36070
36071@@ -3029,7 +3029,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
36072 {
36073 drm_radeon_private_t *dev_priv = dev->dev_private;
36074 drm_radeon_getparam_t *param = data;
36075- int value;
36076+ int value = 0;
36077
36078 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
36079
36080diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
36081index 93f760e..8088227 100644
36082--- a/drivers/gpu/drm/radeon/radeon_ttm.c
36083+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
36084@@ -782,7 +782,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
36085 man->size = size >> PAGE_SHIFT;
36086 }
36087
36088-static struct vm_operations_struct radeon_ttm_vm_ops;
36089+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
36090 static const struct vm_operations_struct *ttm_vm_ops = NULL;
36091
36092 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
36093@@ -823,8 +823,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
36094 }
36095 if (unlikely(ttm_vm_ops == NULL)) {
36096 ttm_vm_ops = vma->vm_ops;
36097+ pax_open_kernel();
36098 radeon_ttm_vm_ops = *ttm_vm_ops;
36099 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
36100+ pax_close_kernel();
36101 }
36102 vma->vm_ops = &radeon_ttm_vm_ops;
36103 return 0;
36104@@ -862,28 +864,33 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
36105 sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
36106 else
36107 sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
36108- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
36109- radeon_mem_types_list[i].show = &radeon_mm_dump_table;
36110- radeon_mem_types_list[i].driver_features = 0;
36111+ pax_open_kernel();
36112+ *(const char **)&radeon_mem_types_list[i].name = radeon_mem_types_names[i];
36113+ *(void **)&radeon_mem_types_list[i].show = &radeon_mm_dump_table;
36114+ *(u32 *)&radeon_mem_types_list[i].driver_features = 0;
36115 if (i == 0)
36116- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
36117+ *(void **)&radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
36118 else
36119- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
36120-
36121+ *(void **)&radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
36122+ pax_close_kernel();
36123 }
36124 /* Add ttm page pool to debugfs */
36125 sprintf(radeon_mem_types_names[i], "ttm_page_pool");
36126- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
36127- radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
36128- radeon_mem_types_list[i].driver_features = 0;
36129- radeon_mem_types_list[i++].data = NULL;
36130+ pax_open_kernel();
36131+ *(const char **)&radeon_mem_types_list[i].name = radeon_mem_types_names[i];
36132+ *(void **)&radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
36133+ *(u32 *)&radeon_mem_types_list[i].driver_features = 0;
36134+ *(void **)&radeon_mem_types_list[i++].data = NULL;
36135+ pax_close_kernel();
36136 #ifdef CONFIG_SWIOTLB
36137 if (swiotlb_nr_tbl()) {
36138 sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
36139- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
36140- radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
36141- radeon_mem_types_list[i].driver_features = 0;
36142- radeon_mem_types_list[i++].data = NULL;
36143+ pax_open_kernel();
36144+ *(const char **)&radeon_mem_types_list[i].name = radeon_mem_types_names[i];
36145+ *(void **)&radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
36146+ *(u32 *)&radeon_mem_types_list[i].driver_features = 0;
36147+ *(void **)&radeon_mem_types_list[i++].data = NULL;
36148+ pax_close_kernel();
36149 }
36150 #endif
36151 return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
36152diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
36153index 5706d2a..17aedaa 100644
36154--- a/drivers/gpu/drm/radeon/rs690.c
36155+++ b/drivers/gpu/drm/radeon/rs690.c
36156@@ -304,9 +304,11 @@ static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
36157 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
36158 rdev->pm.sideport_bandwidth.full)
36159 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
36160- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
36161+ read_delay_latency.full = dfixed_const(800 * 1000);
36162 read_delay_latency.full = dfixed_div(read_delay_latency,
36163 rdev->pm.igp_sideport_mclk);
36164+ a.full = dfixed_const(370);
36165+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
36166 } else {
36167 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
36168 rdev->pm.k8_bandwidth.full)
36169diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
36170index bd2a3b4..122d9ad 100644
36171--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
36172+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
36173@@ -394,9 +394,9 @@ static int ttm_pool_get_num_unused_pages(void)
36174 static int ttm_pool_mm_shrink(struct shrinker *shrink,
36175 struct shrink_control *sc)
36176 {
36177- static atomic_t start_pool = ATOMIC_INIT(0);
36178+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
36179 unsigned i;
36180- unsigned pool_offset = atomic_add_return(1, &start_pool);
36181+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
36182 struct ttm_page_pool *pool;
36183 int shrink_pages = sc->nr_to_scan;
36184
36185diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
36186index 1eb060c..188b1fc 100644
36187--- a/drivers/gpu/drm/udl/udl_fb.c
36188+++ b/drivers/gpu/drm/udl/udl_fb.c
36189@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
36190 fb_deferred_io_cleanup(info);
36191 kfree(info->fbdefio);
36192 info->fbdefio = NULL;
36193- info->fbops->fb_mmap = udl_fb_mmap;
36194 }
36195
36196 pr_warn("released /dev/fb%d user=%d count=%d\n",
36197diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
36198index 893a650..6190d3b 100644
36199--- a/drivers/gpu/drm/via/via_drv.h
36200+++ b/drivers/gpu/drm/via/via_drv.h
36201@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
36202 typedef uint32_t maskarray_t[5];
36203
36204 typedef struct drm_via_irq {
36205- atomic_t irq_received;
36206+ atomic_unchecked_t irq_received;
36207 uint32_t pending_mask;
36208 uint32_t enable_mask;
36209 wait_queue_head_t irq_queue;
36210@@ -75,7 +75,7 @@ typedef struct drm_via_private {
36211 struct timeval last_vblank;
36212 int last_vblank_valid;
36213 unsigned usec_per_vblank;
36214- atomic_t vbl_received;
36215+ atomic_unchecked_t vbl_received;
36216 drm_via_state_t hc_state;
36217 char pci_buf[VIA_PCI_BUF_SIZE];
36218 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
36219diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
36220index ac98964..5dbf512 100644
36221--- a/drivers/gpu/drm/via/via_irq.c
36222+++ b/drivers/gpu/drm/via/via_irq.c
36223@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
36224 if (crtc != 0)
36225 return 0;
36226
36227- return atomic_read(&dev_priv->vbl_received);
36228+ return atomic_read_unchecked(&dev_priv->vbl_received);
36229 }
36230
36231 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
36232@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
36233
36234 status = VIA_READ(VIA_REG_INTERRUPT);
36235 if (status & VIA_IRQ_VBLANK_PENDING) {
36236- atomic_inc(&dev_priv->vbl_received);
36237- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
36238+ atomic_inc_unchecked(&dev_priv->vbl_received);
36239+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
36240 do_gettimeofday(&cur_vblank);
36241 if (dev_priv->last_vblank_valid) {
36242 dev_priv->usec_per_vblank =
36243@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
36244 dev_priv->last_vblank = cur_vblank;
36245 dev_priv->last_vblank_valid = 1;
36246 }
36247- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
36248+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
36249 DRM_DEBUG("US per vblank is: %u\n",
36250 dev_priv->usec_per_vblank);
36251 }
36252@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
36253
36254 for (i = 0; i < dev_priv->num_irqs; ++i) {
36255 if (status & cur_irq->pending_mask) {
36256- atomic_inc(&cur_irq->irq_received);
36257+ atomic_inc_unchecked(&cur_irq->irq_received);
36258 DRM_WAKEUP(&cur_irq->irq_queue);
36259 handled = 1;
36260 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
36261@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
36262 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
36263 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
36264 masks[irq][4]));
36265- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
36266+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
36267 } else {
36268 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
36269 (((cur_irq_sequence =
36270- atomic_read(&cur_irq->irq_received)) -
36271+ atomic_read_unchecked(&cur_irq->irq_received)) -
36272 *sequence) <= (1 << 23)));
36273 }
36274 *sequence = cur_irq_sequence;
36275@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
36276 }
36277
36278 for (i = 0; i < dev_priv->num_irqs; ++i) {
36279- atomic_set(&cur_irq->irq_received, 0);
36280+ atomic_set_unchecked(&cur_irq->irq_received, 0);
36281 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
36282 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
36283 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
36284@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
36285 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
36286 case VIA_IRQ_RELATIVE:
36287 irqwait->request.sequence +=
36288- atomic_read(&cur_irq->irq_received);
36289+ atomic_read_unchecked(&cur_irq->irq_received);
36290 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
36291 case VIA_IRQ_ABSOLUTE:
36292 break;
36293diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
36294index 13aeda7..4a952d1 100644
36295--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
36296+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
36297@@ -290,7 +290,7 @@ struct vmw_private {
36298 * Fencing and IRQs.
36299 */
36300
36301- atomic_t marker_seq;
36302+ atomic_unchecked_t marker_seq;
36303 wait_queue_head_t fence_queue;
36304 wait_queue_head_t fifo_queue;
36305 int fence_queue_waiters; /* Protected by hw_mutex */
36306diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
36307index 3eb1486..0a47ee9 100644
36308--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
36309+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
36310@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
36311 (unsigned int) min,
36312 (unsigned int) fifo->capabilities);
36313
36314- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
36315+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
36316 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
36317 vmw_marker_queue_init(&fifo->marker_queue);
36318 return vmw_fifo_send_fence(dev_priv, &dummy);
36319@@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
36320 if (reserveable)
36321 iowrite32(bytes, fifo_mem +
36322 SVGA_FIFO_RESERVED);
36323- return fifo_mem + (next_cmd >> 2);
36324+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
36325 } else {
36326 need_bounce = true;
36327 }
36328@@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
36329
36330 fm = vmw_fifo_reserve(dev_priv, bytes);
36331 if (unlikely(fm == NULL)) {
36332- *seqno = atomic_read(&dev_priv->marker_seq);
36333+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
36334 ret = -ENOMEM;
36335 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
36336 false, 3*HZ);
36337@@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
36338 }
36339
36340 do {
36341- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
36342+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
36343 } while (*seqno == 0);
36344
36345 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
36346diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
36347index 4640adb..e1384ed 100644
36348--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
36349+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
36350@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
36351 * emitted. Then the fence is stale and signaled.
36352 */
36353
36354- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
36355+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
36356 > VMW_FENCE_WRAP);
36357
36358 return ret;
36359@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
36360
36361 if (fifo_idle)
36362 down_read(&fifo_state->rwsem);
36363- signal_seq = atomic_read(&dev_priv->marker_seq);
36364+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
36365 ret = 0;
36366
36367 for (;;) {
36368diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
36369index 8a8725c..afed796 100644
36370--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
36371+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
36372@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
36373 while (!vmw_lag_lt(queue, us)) {
36374 spin_lock(&queue->lock);
36375 if (list_empty(&queue->head))
36376- seqno = atomic_read(&dev_priv->marker_seq);
36377+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
36378 else {
36379 marker = list_first_entry(&queue->head,
36380 struct vmw_marker, head);
36381diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
36382index ceb3040..6160c5c 100644
36383--- a/drivers/hid/hid-core.c
36384+++ b/drivers/hid/hid-core.c
36385@@ -2242,7 +2242,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
36386
36387 int hid_add_device(struct hid_device *hdev)
36388 {
36389- static atomic_t id = ATOMIC_INIT(0);
36390+ static atomic_unchecked_t id = ATOMIC_INIT(0);
36391 int ret;
36392
36393 if (WARN_ON(hdev->status & HID_STAT_ADDED))
36394@@ -2276,7 +2276,7 @@ int hid_add_device(struct hid_device *hdev)
36395 /* XXX hack, any other cleaner solution after the driver core
36396 * is converted to allow more than 20 bytes as the device name? */
36397 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
36398- hdev->vendor, hdev->product, atomic_inc_return(&id));
36399+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
36400
36401 hid_debug_register(hdev, dev_name(&hdev->dev));
36402 ret = device_add(&hdev->dev);
36403diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
36404index eec3291..8ed706b 100644
36405--- a/drivers/hid/hid-wiimote-debug.c
36406+++ b/drivers/hid/hid-wiimote-debug.c
36407@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
36408 else if (size == 0)
36409 return -EIO;
36410
36411- if (copy_to_user(u, buf, size))
36412+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
36413 return -EFAULT;
36414
36415 *off += size;
36416diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
36417index 773a2f2..7ce08bc 100644
36418--- a/drivers/hv/channel.c
36419+++ b/drivers/hv/channel.c
36420@@ -394,8 +394,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
36421 int ret = 0;
36422 int t;
36423
36424- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
36425- atomic_inc(&vmbus_connection.next_gpadl_handle);
36426+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
36427+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
36428
36429 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
36430 if (ret)
36431diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
36432index 3648f8f..30ef30d 100644
36433--- a/drivers/hv/hv.c
36434+++ b/drivers/hv/hv.c
36435@@ -111,7 +111,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
36436 u64 output_address = (output) ? virt_to_phys(output) : 0;
36437 u32 output_address_hi = output_address >> 32;
36438 u32 output_address_lo = output_address & 0xFFFFFFFF;
36439- void *hypercall_page = hv_context.hypercall_page;
36440+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
36441
36442 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
36443 "=a"(hv_status_lo) : "d" (control_hi),
36444diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
36445index d8d1fad..b91caf7 100644
36446--- a/drivers/hv/hyperv_vmbus.h
36447+++ b/drivers/hv/hyperv_vmbus.h
36448@@ -594,7 +594,7 @@ enum vmbus_connect_state {
36449 struct vmbus_connection {
36450 enum vmbus_connect_state conn_state;
36451
36452- atomic_t next_gpadl_handle;
36453+ atomic_unchecked_t next_gpadl_handle;
36454
36455 /*
36456 * Represents channel interrupts. Each bit position represents a
36457diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
36458index 8e1a9ec..4687821 100644
36459--- a/drivers/hv/vmbus_drv.c
36460+++ b/drivers/hv/vmbus_drv.c
36461@@ -629,10 +629,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
36462 {
36463 int ret = 0;
36464
36465- static atomic_t device_num = ATOMIC_INIT(0);
36466+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
36467
36468 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
36469- atomic_inc_return(&device_num));
36470+ atomic_inc_return_unchecked(&device_num));
36471
36472 child_device_obj->device.bus = &hv_bus;
36473 child_device_obj->device.parent = &hv_acpi_dev->dev;
36474diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
36475index 1672e2a..4a6297c 100644
36476--- a/drivers/hwmon/acpi_power_meter.c
36477+++ b/drivers/hwmon/acpi_power_meter.c
36478@@ -117,7 +117,7 @@ struct sensor_template {
36479 struct device_attribute *devattr,
36480 const char *buf, size_t count);
36481 int index;
36482-};
36483+} __do_const;
36484
36485 /* Averaging interval */
36486 static int update_avg_interval(struct acpi_power_meter_resource *resource)
36487@@ -629,7 +629,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
36488 struct sensor_template *attrs)
36489 {
36490 struct device *dev = &resource->acpi_dev->dev;
36491- struct sensor_device_attribute *sensors =
36492+ sensor_device_attribute_no_const *sensors =
36493 &resource->sensors[resource->num_sensors];
36494 int res = 0;
36495
36496diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
36497index b41baff..4953e4d 100644
36498--- a/drivers/hwmon/applesmc.c
36499+++ b/drivers/hwmon/applesmc.c
36500@@ -1084,7 +1084,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
36501 {
36502 struct applesmc_node_group *grp;
36503 struct applesmc_dev_attr *node;
36504- struct attribute *attr;
36505+ attribute_no_const *attr;
36506 int ret, i;
36507
36508 for (grp = groups; grp->format; grp++) {
36509diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
36510index 56dbcfb..9874bf1 100644
36511--- a/drivers/hwmon/asus_atk0110.c
36512+++ b/drivers/hwmon/asus_atk0110.c
36513@@ -152,10 +152,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
36514 struct atk_sensor_data {
36515 struct list_head list;
36516 struct atk_data *data;
36517- struct device_attribute label_attr;
36518- struct device_attribute input_attr;
36519- struct device_attribute limit1_attr;
36520- struct device_attribute limit2_attr;
36521+ device_attribute_no_const label_attr;
36522+ device_attribute_no_const input_attr;
36523+ device_attribute_no_const limit1_attr;
36524+ device_attribute_no_const limit2_attr;
36525 char label_attr_name[ATTR_NAME_SIZE];
36526 char input_attr_name[ATTR_NAME_SIZE];
36527 char limit1_attr_name[ATTR_NAME_SIZE];
36528@@ -275,7 +275,7 @@ static ssize_t atk_name_show(struct device *dev,
36529 static struct device_attribute atk_name_attr =
36530 __ATTR(name, 0444, atk_name_show, NULL);
36531
36532-static void atk_init_attribute(struct device_attribute *attr, char *name,
36533+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
36534 sysfs_show_func show)
36535 {
36536 sysfs_attr_init(&attr->attr);
36537diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
36538index d64923d..72591e8 100644
36539--- a/drivers/hwmon/coretemp.c
36540+++ b/drivers/hwmon/coretemp.c
36541@@ -790,7 +790,7 @@ static int __cpuinit coretemp_cpu_callback(struct notifier_block *nfb,
36542 return NOTIFY_OK;
36543 }
36544
36545-static struct notifier_block coretemp_cpu_notifier __refdata = {
36546+static struct notifier_block coretemp_cpu_notifier = {
36547 .notifier_call = coretemp_cpu_callback,
36548 };
36549
36550diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
36551index a14f634..2916ee2 100644
36552--- a/drivers/hwmon/ibmaem.c
36553+++ b/drivers/hwmon/ibmaem.c
36554@@ -925,7 +925,7 @@ static int aem_register_sensors(struct aem_data *data,
36555 struct aem_rw_sensor_template *rw)
36556 {
36557 struct device *dev = &data->pdev->dev;
36558- struct sensor_device_attribute *sensors = data->sensors;
36559+ sensor_device_attribute_no_const *sensors = data->sensors;
36560 int err;
36561
36562 /* Set up read-only sensors */
36563diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
36564index 7d19b1b..8fdaaac 100644
36565--- a/drivers/hwmon/pmbus/pmbus_core.c
36566+++ b/drivers/hwmon/pmbus/pmbus_core.c
36567@@ -811,7 +811,7 @@ static ssize_t pmbus_show_label(struct device *dev,
36568
36569 #define PMBUS_ADD_ATTR(data, _name, _idx, _mode, _type, _show, _set) \
36570 do { \
36571- struct sensor_device_attribute *a \
36572+ sensor_device_attribute_no_const *a \
36573 = &data->_type##s[data->num_##_type##s].attribute; \
36574 BUG_ON(data->num_attributes >= data->max_attributes); \
36575 sysfs_attr_init(&a->dev_attr.attr); \
36576diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
36577index 8047fed..1e956f0 100644
36578--- a/drivers/hwmon/sht15.c
36579+++ b/drivers/hwmon/sht15.c
36580@@ -169,7 +169,7 @@ struct sht15_data {
36581 int supply_uV;
36582 bool supply_uV_valid;
36583 struct work_struct update_supply_work;
36584- atomic_t interrupt_handled;
36585+ atomic_unchecked_t interrupt_handled;
36586 };
36587
36588 /**
36589@@ -512,13 +512,13 @@ static int sht15_measurement(struct sht15_data *data,
36590 return ret;
36591
36592 gpio_direction_input(data->pdata->gpio_data);
36593- atomic_set(&data->interrupt_handled, 0);
36594+ atomic_set_unchecked(&data->interrupt_handled, 0);
36595
36596 enable_irq(gpio_to_irq(data->pdata->gpio_data));
36597 if (gpio_get_value(data->pdata->gpio_data) == 0) {
36598 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
36599 /* Only relevant if the interrupt hasn't occurred. */
36600- if (!atomic_read(&data->interrupt_handled))
36601+ if (!atomic_read_unchecked(&data->interrupt_handled))
36602 schedule_work(&data->read_work);
36603 }
36604 ret = wait_event_timeout(data->wait_queue,
36605@@ -785,7 +785,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
36606
36607 /* First disable the interrupt */
36608 disable_irq_nosync(irq);
36609- atomic_inc(&data->interrupt_handled);
36610+ atomic_inc_unchecked(&data->interrupt_handled);
36611 /* Then schedule a reading work struct */
36612 if (data->state != SHT15_READING_NOTHING)
36613 schedule_work(&data->read_work);
36614@@ -807,11 +807,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
36615 * If not, then start the interrupt again - care here as could
36616 * have gone low in meantime so verify it hasn't!
36617 */
36618- atomic_set(&data->interrupt_handled, 0);
36619+ atomic_set_unchecked(&data->interrupt_handled, 0);
36620 enable_irq(gpio_to_irq(data->pdata->gpio_data));
36621 /* If still not occurred or another handler was scheduled */
36622 if (gpio_get_value(data->pdata->gpio_data)
36623- || atomic_read(&data->interrupt_handled))
36624+ || atomic_read_unchecked(&data->interrupt_handled))
36625 return;
36626 }
36627
36628diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
36629index 76f157b..9c0db1b 100644
36630--- a/drivers/hwmon/via-cputemp.c
36631+++ b/drivers/hwmon/via-cputemp.c
36632@@ -296,7 +296,7 @@ static int __cpuinit via_cputemp_cpu_callback(struct notifier_block *nfb,
36633 return NOTIFY_OK;
36634 }
36635
36636-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
36637+static struct notifier_block via_cputemp_cpu_notifier = {
36638 .notifier_call = via_cputemp_cpu_callback,
36639 };
36640
36641diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
36642index 378fcb5..5e91fa8 100644
36643--- a/drivers/i2c/busses/i2c-amd756-s4882.c
36644+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
36645@@ -43,7 +43,7 @@
36646 extern struct i2c_adapter amd756_smbus;
36647
36648 static struct i2c_adapter *s4882_adapter;
36649-static struct i2c_algorithm *s4882_algo;
36650+static i2c_algorithm_no_const *s4882_algo;
36651
36652 /* Wrapper access functions for multiplexed SMBus */
36653 static DEFINE_MUTEX(amd756_lock);
36654diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
36655index 29015eb..af2d8e9 100644
36656--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
36657+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
36658@@ -41,7 +41,7 @@
36659 extern struct i2c_adapter *nforce2_smbus;
36660
36661 static struct i2c_adapter *s4985_adapter;
36662-static struct i2c_algorithm *s4985_algo;
36663+static i2c_algorithm_no_const *s4985_algo;
36664
36665 /* Wrapper access functions for multiplexed SMBus */
36666 static DEFINE_MUTEX(nforce2_lock);
36667diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
36668index 8126824..55a2798 100644
36669--- a/drivers/ide/ide-cd.c
36670+++ b/drivers/ide/ide-cd.c
36671@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
36672 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
36673 if ((unsigned long)buf & alignment
36674 || blk_rq_bytes(rq) & q->dma_pad_mask
36675- || object_is_on_stack(buf))
36676+ || object_starts_on_stack(buf))
36677 drive->dma = 0;
36678 }
36679 }
36680diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
36681index 8848f16..f8e6dd8 100644
36682--- a/drivers/iio/industrialio-core.c
36683+++ b/drivers/iio/industrialio-core.c
36684@@ -506,7 +506,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
36685 }
36686
36687 static
36688-int __iio_device_attr_init(struct device_attribute *dev_attr,
36689+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
36690 const char *postfix,
36691 struct iio_chan_spec const *chan,
36692 ssize_t (*readfunc)(struct device *dev,
36693diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
36694index 394fea2..c833880 100644
36695--- a/drivers/infiniband/core/cm.c
36696+++ b/drivers/infiniband/core/cm.c
36697@@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
36698
36699 struct cm_counter_group {
36700 struct kobject obj;
36701- atomic_long_t counter[CM_ATTR_COUNT];
36702+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
36703 };
36704
36705 struct cm_counter_attribute {
36706@@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
36707 struct ib_mad_send_buf *msg = NULL;
36708 int ret;
36709
36710- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36711+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36712 counter[CM_REQ_COUNTER]);
36713
36714 /* Quick state check to discard duplicate REQs. */
36715@@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
36716 if (!cm_id_priv)
36717 return;
36718
36719- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36720+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36721 counter[CM_REP_COUNTER]);
36722 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
36723 if (ret)
36724@@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
36725 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
36726 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
36727 spin_unlock_irq(&cm_id_priv->lock);
36728- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36729+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36730 counter[CM_RTU_COUNTER]);
36731 goto out;
36732 }
36733@@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
36734 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
36735 dreq_msg->local_comm_id);
36736 if (!cm_id_priv) {
36737- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36738+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36739 counter[CM_DREQ_COUNTER]);
36740 cm_issue_drep(work->port, work->mad_recv_wc);
36741 return -EINVAL;
36742@@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
36743 case IB_CM_MRA_REP_RCVD:
36744 break;
36745 case IB_CM_TIMEWAIT:
36746- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36747+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36748 counter[CM_DREQ_COUNTER]);
36749 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
36750 goto unlock;
36751@@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
36752 cm_free_msg(msg);
36753 goto deref;
36754 case IB_CM_DREQ_RCVD:
36755- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36756+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36757 counter[CM_DREQ_COUNTER]);
36758 goto unlock;
36759 default:
36760@@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
36761 ib_modify_mad(cm_id_priv->av.port->mad_agent,
36762 cm_id_priv->msg, timeout)) {
36763 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
36764- atomic_long_inc(&work->port->
36765+ atomic_long_inc_unchecked(&work->port->
36766 counter_group[CM_RECV_DUPLICATES].
36767 counter[CM_MRA_COUNTER]);
36768 goto out;
36769@@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
36770 break;
36771 case IB_CM_MRA_REQ_RCVD:
36772 case IB_CM_MRA_REP_RCVD:
36773- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36774+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36775 counter[CM_MRA_COUNTER]);
36776 /* fall through */
36777 default:
36778@@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
36779 case IB_CM_LAP_IDLE:
36780 break;
36781 case IB_CM_MRA_LAP_SENT:
36782- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36783+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36784 counter[CM_LAP_COUNTER]);
36785 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
36786 goto unlock;
36787@@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
36788 cm_free_msg(msg);
36789 goto deref;
36790 case IB_CM_LAP_RCVD:
36791- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36792+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36793 counter[CM_LAP_COUNTER]);
36794 goto unlock;
36795 default:
36796@@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
36797 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
36798 if (cur_cm_id_priv) {
36799 spin_unlock_irq(&cm.lock);
36800- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36801+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36802 counter[CM_SIDR_REQ_COUNTER]);
36803 goto out; /* Duplicate message. */
36804 }
36805@@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
36806 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
36807 msg->retries = 1;
36808
36809- atomic_long_add(1 + msg->retries,
36810+ atomic_long_add_unchecked(1 + msg->retries,
36811 &port->counter_group[CM_XMIT].counter[attr_index]);
36812 if (msg->retries)
36813- atomic_long_add(msg->retries,
36814+ atomic_long_add_unchecked(msg->retries,
36815 &port->counter_group[CM_XMIT_RETRIES].
36816 counter[attr_index]);
36817
36818@@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
36819 }
36820
36821 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
36822- atomic_long_inc(&port->counter_group[CM_RECV].
36823+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
36824 counter[attr_id - CM_ATTR_ID_OFFSET]);
36825
36826 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
36827@@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
36828 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
36829
36830 return sprintf(buf, "%ld\n",
36831- atomic_long_read(&group->counter[cm_attr->index]));
36832+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
36833 }
36834
36835 static const struct sysfs_ops cm_counter_ops = {
36836diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
36837index 176c8f9..2627b62 100644
36838--- a/drivers/infiniband/core/fmr_pool.c
36839+++ b/drivers/infiniband/core/fmr_pool.c
36840@@ -98,8 +98,8 @@ struct ib_fmr_pool {
36841
36842 struct task_struct *thread;
36843
36844- atomic_t req_ser;
36845- atomic_t flush_ser;
36846+ atomic_unchecked_t req_ser;
36847+ atomic_unchecked_t flush_ser;
36848
36849 wait_queue_head_t force_wait;
36850 };
36851@@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
36852 struct ib_fmr_pool *pool = pool_ptr;
36853
36854 do {
36855- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
36856+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
36857 ib_fmr_batch_release(pool);
36858
36859- atomic_inc(&pool->flush_ser);
36860+ atomic_inc_unchecked(&pool->flush_ser);
36861 wake_up_interruptible(&pool->force_wait);
36862
36863 if (pool->flush_function)
36864@@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
36865 }
36866
36867 set_current_state(TASK_INTERRUPTIBLE);
36868- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
36869+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
36870 !kthread_should_stop())
36871 schedule();
36872 __set_current_state(TASK_RUNNING);
36873@@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
36874 pool->dirty_watermark = params->dirty_watermark;
36875 pool->dirty_len = 0;
36876 spin_lock_init(&pool->pool_lock);
36877- atomic_set(&pool->req_ser, 0);
36878- atomic_set(&pool->flush_ser, 0);
36879+ atomic_set_unchecked(&pool->req_ser, 0);
36880+ atomic_set_unchecked(&pool->flush_ser, 0);
36881 init_waitqueue_head(&pool->force_wait);
36882
36883 pool->thread = kthread_run(ib_fmr_cleanup_thread,
36884@@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
36885 }
36886 spin_unlock_irq(&pool->pool_lock);
36887
36888- serial = atomic_inc_return(&pool->req_ser);
36889+ serial = atomic_inc_return_unchecked(&pool->req_ser);
36890 wake_up_process(pool->thread);
36891
36892 if (wait_event_interruptible(pool->force_wait,
36893- atomic_read(&pool->flush_ser) - serial >= 0))
36894+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
36895 return -EINTR;
36896
36897 return 0;
36898@@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
36899 } else {
36900 list_add_tail(&fmr->list, &pool->dirty_list);
36901 if (++pool->dirty_len >= pool->dirty_watermark) {
36902- atomic_inc(&pool->req_ser);
36903+ atomic_inc_unchecked(&pool->req_ser);
36904 wake_up_process(pool->thread);
36905 }
36906 }
36907diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
36908index afd8179..598063f 100644
36909--- a/drivers/infiniband/hw/cxgb4/mem.c
36910+++ b/drivers/infiniband/hw/cxgb4/mem.c
36911@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
36912 int err;
36913 struct fw_ri_tpte tpt;
36914 u32 stag_idx;
36915- static atomic_t key;
36916+ static atomic_unchecked_t key;
36917
36918 if (c4iw_fatal_error(rdev))
36919 return -EIO;
36920@@ -139,7 +139,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
36921 if (rdev->stats.stag.cur > rdev->stats.stag.max)
36922 rdev->stats.stag.max = rdev->stats.stag.cur;
36923 mutex_unlock(&rdev->stats.lock);
36924- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
36925+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
36926 }
36927 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
36928 __func__, stag_state, type, pdid, stag_idx);
36929diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
36930index 79b3dbc..96e5fcc 100644
36931--- a/drivers/infiniband/hw/ipath/ipath_rc.c
36932+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
36933@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
36934 struct ib_atomic_eth *ateth;
36935 struct ipath_ack_entry *e;
36936 u64 vaddr;
36937- atomic64_t *maddr;
36938+ atomic64_unchecked_t *maddr;
36939 u64 sdata;
36940 u32 rkey;
36941 u8 next;
36942@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
36943 IB_ACCESS_REMOTE_ATOMIC)))
36944 goto nack_acc_unlck;
36945 /* Perform atomic OP and save result. */
36946- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
36947+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
36948 sdata = be64_to_cpu(ateth->swap_data);
36949 e = &qp->s_ack_queue[qp->r_head_ack_queue];
36950 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
36951- (u64) atomic64_add_return(sdata, maddr) - sdata :
36952+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
36953 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
36954 be64_to_cpu(ateth->compare_data),
36955 sdata);
36956diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
36957index 1f95bba..9530f87 100644
36958--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
36959+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
36960@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
36961 unsigned long flags;
36962 struct ib_wc wc;
36963 u64 sdata;
36964- atomic64_t *maddr;
36965+ atomic64_unchecked_t *maddr;
36966 enum ib_wc_status send_status;
36967
36968 /*
36969@@ -382,11 +382,11 @@ again:
36970 IB_ACCESS_REMOTE_ATOMIC)))
36971 goto acc_err;
36972 /* Perform atomic OP and save result. */
36973- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
36974+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
36975 sdata = wqe->wr.wr.atomic.compare_add;
36976 *(u64 *) sqp->s_sge.sge.vaddr =
36977 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
36978- (u64) atomic64_add_return(sdata, maddr) - sdata :
36979+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
36980 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
36981 sdata, wqe->wr.wr.atomic.swap);
36982 goto send_comp;
36983diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
36984index 9d3e5c1..d9afe4a 100644
36985--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
36986+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
36987@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
36988 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
36989 }
36990
36991-int mthca_QUERY_FW(struct mthca_dev *dev)
36992+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
36993 {
36994 struct mthca_mailbox *mailbox;
36995 u32 *outbox;
36996diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
36997index ed9a989..e0c5871 100644
36998--- a/drivers/infiniband/hw/mthca/mthca_mr.c
36999+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
37000@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
37001 return key;
37002 }
37003
37004-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
37005+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
37006 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
37007 {
37008 struct mthca_mailbox *mailbox;
37009diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
37010index 5b152a3..c1f3e83 100644
37011--- a/drivers/infiniband/hw/nes/nes.c
37012+++ b/drivers/infiniband/hw/nes/nes.c
37013@@ -98,7 +98,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
37014 LIST_HEAD(nes_adapter_list);
37015 static LIST_HEAD(nes_dev_list);
37016
37017-atomic_t qps_destroyed;
37018+atomic_unchecked_t qps_destroyed;
37019
37020 static unsigned int ee_flsh_adapter;
37021 static unsigned int sysfs_nonidx_addr;
37022@@ -267,7 +267,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
37023 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
37024 struct nes_adapter *nesadapter = nesdev->nesadapter;
37025
37026- atomic_inc(&qps_destroyed);
37027+ atomic_inc_unchecked(&qps_destroyed);
37028
37029 /* Free the control structures */
37030
37031diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
37032index 33cc589..3bd6538 100644
37033--- a/drivers/infiniband/hw/nes/nes.h
37034+++ b/drivers/infiniband/hw/nes/nes.h
37035@@ -177,17 +177,17 @@ extern unsigned int nes_debug_level;
37036 extern unsigned int wqm_quanta;
37037 extern struct list_head nes_adapter_list;
37038
37039-extern atomic_t cm_connects;
37040-extern atomic_t cm_accepts;
37041-extern atomic_t cm_disconnects;
37042-extern atomic_t cm_closes;
37043-extern atomic_t cm_connecteds;
37044-extern atomic_t cm_connect_reqs;
37045-extern atomic_t cm_rejects;
37046-extern atomic_t mod_qp_timouts;
37047-extern atomic_t qps_created;
37048-extern atomic_t qps_destroyed;
37049-extern atomic_t sw_qps_destroyed;
37050+extern atomic_unchecked_t cm_connects;
37051+extern atomic_unchecked_t cm_accepts;
37052+extern atomic_unchecked_t cm_disconnects;
37053+extern atomic_unchecked_t cm_closes;
37054+extern atomic_unchecked_t cm_connecteds;
37055+extern atomic_unchecked_t cm_connect_reqs;
37056+extern atomic_unchecked_t cm_rejects;
37057+extern atomic_unchecked_t mod_qp_timouts;
37058+extern atomic_unchecked_t qps_created;
37059+extern atomic_unchecked_t qps_destroyed;
37060+extern atomic_unchecked_t sw_qps_destroyed;
37061 extern u32 mh_detected;
37062 extern u32 mh_pauses_sent;
37063 extern u32 cm_packets_sent;
37064@@ -196,16 +196,16 @@ extern u32 cm_packets_created;
37065 extern u32 cm_packets_received;
37066 extern u32 cm_packets_dropped;
37067 extern u32 cm_packets_retrans;
37068-extern atomic_t cm_listens_created;
37069-extern atomic_t cm_listens_destroyed;
37070+extern atomic_unchecked_t cm_listens_created;
37071+extern atomic_unchecked_t cm_listens_destroyed;
37072 extern u32 cm_backlog_drops;
37073-extern atomic_t cm_loopbacks;
37074-extern atomic_t cm_nodes_created;
37075-extern atomic_t cm_nodes_destroyed;
37076-extern atomic_t cm_accel_dropped_pkts;
37077-extern atomic_t cm_resets_recvd;
37078-extern atomic_t pau_qps_created;
37079-extern atomic_t pau_qps_destroyed;
37080+extern atomic_unchecked_t cm_loopbacks;
37081+extern atomic_unchecked_t cm_nodes_created;
37082+extern atomic_unchecked_t cm_nodes_destroyed;
37083+extern atomic_unchecked_t cm_accel_dropped_pkts;
37084+extern atomic_unchecked_t cm_resets_recvd;
37085+extern atomic_unchecked_t pau_qps_created;
37086+extern atomic_unchecked_t pau_qps_destroyed;
37087
37088 extern u32 int_mod_timer_init;
37089 extern u32 int_mod_cq_depth_256;
37090diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
37091index 22ea67e..dcbe3bc 100644
37092--- a/drivers/infiniband/hw/nes/nes_cm.c
37093+++ b/drivers/infiniband/hw/nes/nes_cm.c
37094@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
37095 u32 cm_packets_retrans;
37096 u32 cm_packets_created;
37097 u32 cm_packets_received;
37098-atomic_t cm_listens_created;
37099-atomic_t cm_listens_destroyed;
37100+atomic_unchecked_t cm_listens_created;
37101+atomic_unchecked_t cm_listens_destroyed;
37102 u32 cm_backlog_drops;
37103-atomic_t cm_loopbacks;
37104-atomic_t cm_nodes_created;
37105-atomic_t cm_nodes_destroyed;
37106-atomic_t cm_accel_dropped_pkts;
37107-atomic_t cm_resets_recvd;
37108+atomic_unchecked_t cm_loopbacks;
37109+atomic_unchecked_t cm_nodes_created;
37110+atomic_unchecked_t cm_nodes_destroyed;
37111+atomic_unchecked_t cm_accel_dropped_pkts;
37112+atomic_unchecked_t cm_resets_recvd;
37113
37114 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
37115 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
37116@@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
37117
37118 static struct nes_cm_core *g_cm_core;
37119
37120-atomic_t cm_connects;
37121-atomic_t cm_accepts;
37122-atomic_t cm_disconnects;
37123-atomic_t cm_closes;
37124-atomic_t cm_connecteds;
37125-atomic_t cm_connect_reqs;
37126-atomic_t cm_rejects;
37127+atomic_unchecked_t cm_connects;
37128+atomic_unchecked_t cm_accepts;
37129+atomic_unchecked_t cm_disconnects;
37130+atomic_unchecked_t cm_closes;
37131+atomic_unchecked_t cm_connecteds;
37132+atomic_unchecked_t cm_connect_reqs;
37133+atomic_unchecked_t cm_rejects;
37134
37135 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
37136 {
37137@@ -1272,7 +1272,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
37138 kfree(listener);
37139 listener = NULL;
37140 ret = 0;
37141- atomic_inc(&cm_listens_destroyed);
37142+ atomic_inc_unchecked(&cm_listens_destroyed);
37143 } else {
37144 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
37145 }
37146@@ -1466,7 +1466,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
37147 cm_node->rem_mac);
37148
37149 add_hte_node(cm_core, cm_node);
37150- atomic_inc(&cm_nodes_created);
37151+ atomic_inc_unchecked(&cm_nodes_created);
37152
37153 return cm_node;
37154 }
37155@@ -1524,7 +1524,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
37156 }
37157
37158 atomic_dec(&cm_core->node_cnt);
37159- atomic_inc(&cm_nodes_destroyed);
37160+ atomic_inc_unchecked(&cm_nodes_destroyed);
37161 nesqp = cm_node->nesqp;
37162 if (nesqp) {
37163 nesqp->cm_node = NULL;
37164@@ -1588,7 +1588,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
37165
37166 static void drop_packet(struct sk_buff *skb)
37167 {
37168- atomic_inc(&cm_accel_dropped_pkts);
37169+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
37170 dev_kfree_skb_any(skb);
37171 }
37172
37173@@ -1651,7 +1651,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
37174 {
37175
37176 int reset = 0; /* whether to send reset in case of err.. */
37177- atomic_inc(&cm_resets_recvd);
37178+ atomic_inc_unchecked(&cm_resets_recvd);
37179 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
37180 " refcnt=%d\n", cm_node, cm_node->state,
37181 atomic_read(&cm_node->ref_count));
37182@@ -2292,7 +2292,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
37183 rem_ref_cm_node(cm_node->cm_core, cm_node);
37184 return NULL;
37185 }
37186- atomic_inc(&cm_loopbacks);
37187+ atomic_inc_unchecked(&cm_loopbacks);
37188 loopbackremotenode->loopbackpartner = cm_node;
37189 loopbackremotenode->tcp_cntxt.rcv_wscale =
37190 NES_CM_DEFAULT_RCV_WND_SCALE;
37191@@ -2567,7 +2567,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
37192 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
37193 else {
37194 rem_ref_cm_node(cm_core, cm_node);
37195- atomic_inc(&cm_accel_dropped_pkts);
37196+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
37197 dev_kfree_skb_any(skb);
37198 }
37199 break;
37200@@ -2875,7 +2875,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
37201
37202 if ((cm_id) && (cm_id->event_handler)) {
37203 if (issue_disconn) {
37204- atomic_inc(&cm_disconnects);
37205+ atomic_inc_unchecked(&cm_disconnects);
37206 cm_event.event = IW_CM_EVENT_DISCONNECT;
37207 cm_event.status = disconn_status;
37208 cm_event.local_addr = cm_id->local_addr;
37209@@ -2897,7 +2897,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
37210 }
37211
37212 if (issue_close) {
37213- atomic_inc(&cm_closes);
37214+ atomic_inc_unchecked(&cm_closes);
37215 nes_disconnect(nesqp, 1);
37216
37217 cm_id->provider_data = nesqp;
37218@@ -3033,7 +3033,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
37219
37220 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
37221 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
37222- atomic_inc(&cm_accepts);
37223+ atomic_inc_unchecked(&cm_accepts);
37224
37225 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
37226 netdev_refcnt_read(nesvnic->netdev));
37227@@ -3228,7 +3228,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
37228 struct nes_cm_core *cm_core;
37229 u8 *start_buff;
37230
37231- atomic_inc(&cm_rejects);
37232+ atomic_inc_unchecked(&cm_rejects);
37233 cm_node = (struct nes_cm_node *)cm_id->provider_data;
37234 loopback = cm_node->loopbackpartner;
37235 cm_core = cm_node->cm_core;
37236@@ -3288,7 +3288,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
37237 ntohl(cm_id->local_addr.sin_addr.s_addr),
37238 ntohs(cm_id->local_addr.sin_port));
37239
37240- atomic_inc(&cm_connects);
37241+ atomic_inc_unchecked(&cm_connects);
37242 nesqp->active_conn = 1;
37243
37244 /* cache the cm_id in the qp */
37245@@ -3398,7 +3398,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
37246 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
37247 return err;
37248 }
37249- atomic_inc(&cm_listens_created);
37250+ atomic_inc_unchecked(&cm_listens_created);
37251 }
37252
37253 cm_id->add_ref(cm_id);
37254@@ -3499,7 +3499,7 @@ static void cm_event_connected(struct nes_cm_event *event)
37255
37256 if (nesqp->destroyed)
37257 return;
37258- atomic_inc(&cm_connecteds);
37259+ atomic_inc_unchecked(&cm_connecteds);
37260 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
37261 " local port 0x%04X. jiffies = %lu.\n",
37262 nesqp->hwqp.qp_id,
37263@@ -3679,7 +3679,7 @@ static void cm_event_reset(struct nes_cm_event *event)
37264
37265 cm_id->add_ref(cm_id);
37266 ret = cm_id->event_handler(cm_id, &cm_event);
37267- atomic_inc(&cm_closes);
37268+ atomic_inc_unchecked(&cm_closes);
37269 cm_event.event = IW_CM_EVENT_CLOSE;
37270 cm_event.status = 0;
37271 cm_event.provider_data = cm_id->provider_data;
37272@@ -3715,7 +3715,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
37273 return;
37274 cm_id = cm_node->cm_id;
37275
37276- atomic_inc(&cm_connect_reqs);
37277+ atomic_inc_unchecked(&cm_connect_reqs);
37278 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
37279 cm_node, cm_id, jiffies);
37280
37281@@ -3755,7 +3755,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
37282 return;
37283 cm_id = cm_node->cm_id;
37284
37285- atomic_inc(&cm_connect_reqs);
37286+ atomic_inc_unchecked(&cm_connect_reqs);
37287 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
37288 cm_node, cm_id, jiffies);
37289
37290diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
37291index 4166452..fc952c3 100644
37292--- a/drivers/infiniband/hw/nes/nes_mgt.c
37293+++ b/drivers/infiniband/hw/nes/nes_mgt.c
37294@@ -40,8 +40,8 @@
37295 #include "nes.h"
37296 #include "nes_mgt.h"
37297
37298-atomic_t pau_qps_created;
37299-atomic_t pau_qps_destroyed;
37300+atomic_unchecked_t pau_qps_created;
37301+atomic_unchecked_t pau_qps_destroyed;
37302
37303 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
37304 {
37305@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
37306 {
37307 struct sk_buff *skb;
37308 unsigned long flags;
37309- atomic_inc(&pau_qps_destroyed);
37310+ atomic_inc_unchecked(&pau_qps_destroyed);
37311
37312 /* Free packets that have not yet been forwarded */
37313 /* Lock is acquired by skb_dequeue when removing the skb */
37314@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
37315 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
37316 skb_queue_head_init(&nesqp->pau_list);
37317 spin_lock_init(&nesqp->pau_lock);
37318- atomic_inc(&pau_qps_created);
37319+ atomic_inc_unchecked(&pau_qps_created);
37320 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
37321 }
37322
37323diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
37324index 9542e16..a008c40 100644
37325--- a/drivers/infiniband/hw/nes/nes_nic.c
37326+++ b/drivers/infiniband/hw/nes/nes_nic.c
37327@@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
37328 target_stat_values[++index] = mh_detected;
37329 target_stat_values[++index] = mh_pauses_sent;
37330 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
37331- target_stat_values[++index] = atomic_read(&cm_connects);
37332- target_stat_values[++index] = atomic_read(&cm_accepts);
37333- target_stat_values[++index] = atomic_read(&cm_disconnects);
37334- target_stat_values[++index] = atomic_read(&cm_connecteds);
37335- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
37336- target_stat_values[++index] = atomic_read(&cm_rejects);
37337- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
37338- target_stat_values[++index] = atomic_read(&qps_created);
37339- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
37340- target_stat_values[++index] = atomic_read(&qps_destroyed);
37341- target_stat_values[++index] = atomic_read(&cm_closes);
37342+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
37343+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
37344+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
37345+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
37346+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
37347+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
37348+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
37349+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
37350+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
37351+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
37352+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
37353 target_stat_values[++index] = cm_packets_sent;
37354 target_stat_values[++index] = cm_packets_bounced;
37355 target_stat_values[++index] = cm_packets_created;
37356 target_stat_values[++index] = cm_packets_received;
37357 target_stat_values[++index] = cm_packets_dropped;
37358 target_stat_values[++index] = cm_packets_retrans;
37359- target_stat_values[++index] = atomic_read(&cm_listens_created);
37360- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
37361+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
37362+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
37363 target_stat_values[++index] = cm_backlog_drops;
37364- target_stat_values[++index] = atomic_read(&cm_loopbacks);
37365- target_stat_values[++index] = atomic_read(&cm_nodes_created);
37366- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
37367- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
37368- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
37369+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
37370+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
37371+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
37372+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
37373+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
37374 target_stat_values[++index] = nesadapter->free_4kpbl;
37375 target_stat_values[++index] = nesadapter->free_256pbl;
37376 target_stat_values[++index] = int_mod_timer_init;
37377 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
37378 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
37379 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
37380- target_stat_values[++index] = atomic_read(&pau_qps_created);
37381- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
37382+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
37383+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
37384 }
37385
37386 /**
37387diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
37388index 07e4fba..685f041 100644
37389--- a/drivers/infiniband/hw/nes/nes_verbs.c
37390+++ b/drivers/infiniband/hw/nes/nes_verbs.c
37391@@ -46,9 +46,9 @@
37392
37393 #include <rdma/ib_umem.h>
37394
37395-atomic_t mod_qp_timouts;
37396-atomic_t qps_created;
37397-atomic_t sw_qps_destroyed;
37398+atomic_unchecked_t mod_qp_timouts;
37399+atomic_unchecked_t qps_created;
37400+atomic_unchecked_t sw_qps_destroyed;
37401
37402 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
37403
37404@@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
37405 if (init_attr->create_flags)
37406 return ERR_PTR(-EINVAL);
37407
37408- atomic_inc(&qps_created);
37409+ atomic_inc_unchecked(&qps_created);
37410 switch (init_attr->qp_type) {
37411 case IB_QPT_RC:
37412 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
37413@@ -1462,7 +1462,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
37414 struct iw_cm_event cm_event;
37415 int ret = 0;
37416
37417- atomic_inc(&sw_qps_destroyed);
37418+ atomic_inc_unchecked(&sw_qps_destroyed);
37419 nesqp->destroyed = 1;
37420
37421 /* Blow away the connection if it exists. */
37422diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
37423index 4d11575..3e890e5 100644
37424--- a/drivers/infiniband/hw/qib/qib.h
37425+++ b/drivers/infiniband/hw/qib/qib.h
37426@@ -51,6 +51,7 @@
37427 #include <linux/completion.h>
37428 #include <linux/kref.h>
37429 #include <linux/sched.h>
37430+#include <linux/slab.h>
37431
37432 #include "qib_common.h"
37433 #include "qib_verbs.h"
37434diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
37435index da739d9..da1c7f4 100644
37436--- a/drivers/input/gameport/gameport.c
37437+++ b/drivers/input/gameport/gameport.c
37438@@ -487,14 +487,14 @@ EXPORT_SYMBOL(gameport_set_phys);
37439 */
37440 static void gameport_init_port(struct gameport *gameport)
37441 {
37442- static atomic_t gameport_no = ATOMIC_INIT(0);
37443+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
37444
37445 __module_get(THIS_MODULE);
37446
37447 mutex_init(&gameport->drv_mutex);
37448 device_initialize(&gameport->dev);
37449 dev_set_name(&gameport->dev, "gameport%lu",
37450- (unsigned long)atomic_inc_return(&gameport_no) - 1);
37451+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
37452 gameport->dev.bus = &gameport_bus;
37453 gameport->dev.release = gameport_release_port;
37454 if (gameport->parent)
37455diff --git a/drivers/input/input.c b/drivers/input/input.c
37456index c044699..174d71a 100644
37457--- a/drivers/input/input.c
37458+++ b/drivers/input/input.c
37459@@ -2019,7 +2019,7 @@ static void devm_input_device_unregister(struct device *dev, void *res)
37460 */
37461 int input_register_device(struct input_dev *dev)
37462 {
37463- static atomic_t input_no = ATOMIC_INIT(0);
37464+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
37465 struct input_devres *devres = NULL;
37466 struct input_handler *handler;
37467 unsigned int packet_size;
37468@@ -2074,7 +2074,7 @@ int input_register_device(struct input_dev *dev)
37469 dev->setkeycode = input_default_setkeycode;
37470
37471 dev_set_name(&dev->dev, "input%ld",
37472- (unsigned long) atomic_inc_return(&input_no) - 1);
37473+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
37474
37475 error = device_add(&dev->dev);
37476 if (error)
37477diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
37478index 04c69af..5f92d00 100644
37479--- a/drivers/input/joystick/sidewinder.c
37480+++ b/drivers/input/joystick/sidewinder.c
37481@@ -30,6 +30,7 @@
37482 #include <linux/kernel.h>
37483 #include <linux/module.h>
37484 #include <linux/slab.h>
37485+#include <linux/sched.h>
37486 #include <linux/init.h>
37487 #include <linux/input.h>
37488 #include <linux/gameport.h>
37489diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
37490index d6cbfe9..6225402 100644
37491--- a/drivers/input/joystick/xpad.c
37492+++ b/drivers/input/joystick/xpad.c
37493@@ -735,7 +735,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
37494
37495 static int xpad_led_probe(struct usb_xpad *xpad)
37496 {
37497- static atomic_t led_seq = ATOMIC_INIT(0);
37498+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
37499 long led_no;
37500 struct xpad_led *led;
37501 struct led_classdev *led_cdev;
37502@@ -748,7 +748,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
37503 if (!led)
37504 return -ENOMEM;
37505
37506- led_no = (long)atomic_inc_return(&led_seq) - 1;
37507+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
37508
37509 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
37510 led->xpad = xpad;
37511diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
37512index fe1df23..5b710f3 100644
37513--- a/drivers/input/mouse/psmouse.h
37514+++ b/drivers/input/mouse/psmouse.h
37515@@ -115,7 +115,7 @@ struct psmouse_attribute {
37516 ssize_t (*set)(struct psmouse *psmouse, void *data,
37517 const char *buf, size_t count);
37518 bool protect;
37519-};
37520+} __do_const;
37521 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
37522
37523 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
37524diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
37525index 4c842c3..590b0bf 100644
37526--- a/drivers/input/mousedev.c
37527+++ b/drivers/input/mousedev.c
37528@@ -738,7 +738,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
37529
37530 spin_unlock_irq(&client->packet_lock);
37531
37532- if (copy_to_user(buffer, data, count))
37533+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
37534 return -EFAULT;
37535
37536 return count;
37537diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
37538index 25fc597..558bf3b 100644
37539--- a/drivers/input/serio/serio.c
37540+++ b/drivers/input/serio/serio.c
37541@@ -496,7 +496,7 @@ static void serio_release_port(struct device *dev)
37542 */
37543 static void serio_init_port(struct serio *serio)
37544 {
37545- static atomic_t serio_no = ATOMIC_INIT(0);
37546+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
37547
37548 __module_get(THIS_MODULE);
37549
37550@@ -507,7 +507,7 @@ static void serio_init_port(struct serio *serio)
37551 mutex_init(&serio->drv_mutex);
37552 device_initialize(&serio->dev);
37553 dev_set_name(&serio->dev, "serio%ld",
37554- (long)atomic_inc_return(&serio_no) - 1);
37555+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
37556 serio->dev.bus = &serio_bus;
37557 serio->dev.release = serio_release_port;
37558 serio->dev.groups = serio_device_attr_groups;
37559diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
37560index ddbdaca..be18a78 100644
37561--- a/drivers/iommu/iommu.c
37562+++ b/drivers/iommu/iommu.c
37563@@ -554,7 +554,7 @@ static struct notifier_block iommu_bus_nb = {
37564 static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
37565 {
37566 bus_register_notifier(bus, &iommu_bus_nb);
37567- bus_for_each_dev(bus, NULL, ops, add_iommu_group);
37568+ bus_for_each_dev(bus, NULL, (void *)ops, add_iommu_group);
37569 }
37570
37571 /**
37572diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
37573index 89562a8..218999b 100644
37574--- a/drivers/isdn/capi/capi.c
37575+++ b/drivers/isdn/capi/capi.c
37576@@ -81,8 +81,8 @@ struct capiminor {
37577
37578 struct capi20_appl *ap;
37579 u32 ncci;
37580- atomic_t datahandle;
37581- atomic_t msgid;
37582+ atomic_unchecked_t datahandle;
37583+ atomic_unchecked_t msgid;
37584
37585 struct tty_port port;
37586 int ttyinstop;
37587@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
37588 capimsg_setu16(s, 2, mp->ap->applid);
37589 capimsg_setu8 (s, 4, CAPI_DATA_B3);
37590 capimsg_setu8 (s, 5, CAPI_RESP);
37591- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
37592+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
37593 capimsg_setu32(s, 8, mp->ncci);
37594 capimsg_setu16(s, 12, datahandle);
37595 }
37596@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
37597 mp->outbytes -= len;
37598 spin_unlock_bh(&mp->outlock);
37599
37600- datahandle = atomic_inc_return(&mp->datahandle);
37601+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
37602 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
37603 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
37604 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
37605 capimsg_setu16(skb->data, 2, mp->ap->applid);
37606 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
37607 capimsg_setu8 (skb->data, 5, CAPI_REQ);
37608- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
37609+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
37610 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
37611 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
37612 capimsg_setu16(skb->data, 16, len); /* Data length */
37613diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
37614index 67abf3f..076b3a6 100644
37615--- a/drivers/isdn/gigaset/interface.c
37616+++ b/drivers/isdn/gigaset/interface.c
37617@@ -160,9 +160,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
37618 }
37619 tty->driver_data = cs;
37620
37621- ++cs->port.count;
37622+ atomic_inc(&cs->port.count);
37623
37624- if (cs->port.count == 1) {
37625+ if (atomic_read(&cs->port.count) == 1) {
37626 tty_port_tty_set(&cs->port, tty);
37627 tty->low_latency = 1;
37628 }
37629@@ -186,9 +186,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
37630
37631 if (!cs->connected)
37632 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
37633- else if (!cs->port.count)
37634+ else if (!atomic_read(&cs->port.count))
37635 dev_warn(cs->dev, "%s: device not opened\n", __func__);
37636- else if (!--cs->port.count)
37637+ else if (!atomic_dec_return(&cs->port.count))
37638 tty_port_tty_set(&cs->port, NULL);
37639
37640 mutex_unlock(&cs->mutex);
37641diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
37642index 821f7ac..28d4030 100644
37643--- a/drivers/isdn/hardware/avm/b1.c
37644+++ b/drivers/isdn/hardware/avm/b1.c
37645@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
37646 }
37647 if (left) {
37648 if (t4file->user) {
37649- if (copy_from_user(buf, dp, left))
37650+ if (left > sizeof buf || copy_from_user(buf, dp, left))
37651 return -EFAULT;
37652 } else {
37653 memcpy(buf, dp, left);
37654@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
37655 }
37656 if (left) {
37657 if (config->user) {
37658- if (copy_from_user(buf, dp, left))
37659+ if (left > sizeof buf || copy_from_user(buf, dp, left))
37660 return -EFAULT;
37661 } else {
37662 memcpy(buf, dp, left);
37663diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
37664index e09dc8a..15e2efb 100644
37665--- a/drivers/isdn/i4l/isdn_tty.c
37666+++ b/drivers/isdn/i4l/isdn_tty.c
37667@@ -1513,9 +1513,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
37668
37669 #ifdef ISDN_DEBUG_MODEM_OPEN
37670 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
37671- port->count);
37672+ atomic_read(&port->count));
37673 #endif
37674- port->count++;
37675+ atomic_inc(&port->count);
37676 port->tty = tty;
37677 /*
37678 * Start up serial port
37679@@ -1559,7 +1559,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
37680 #endif
37681 return;
37682 }
37683- if ((tty->count == 1) && (port->count != 1)) {
37684+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
37685 /*
37686 * Uh, oh. tty->count is 1, which means that the tty
37687 * structure will be freed. Info->count should always
37688@@ -1568,15 +1568,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
37689 * serial port won't be shutdown.
37690 */
37691 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
37692- "info->count is %d\n", port->count);
37693- port->count = 1;
37694+ "info->count is %d\n", atomic_read(&port->count));
37695+ atomic_set(&port->count, 1);
37696 }
37697- if (--port->count < 0) {
37698+ if (atomic_dec_return(&port->count) < 0) {
37699 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
37700- info->line, port->count);
37701- port->count = 0;
37702+ info->line, atomic_read(&port->count));
37703+ atomic_set(&port->count, 0);
37704 }
37705- if (port->count) {
37706+ if (atomic_read(&port->count)) {
37707 #ifdef ISDN_DEBUG_MODEM_OPEN
37708 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
37709 #endif
37710@@ -1630,7 +1630,7 @@ isdn_tty_hangup(struct tty_struct *tty)
37711 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
37712 return;
37713 isdn_tty_shutdown(info);
37714- port->count = 0;
37715+ atomic_set(&port->count, 0);
37716 port->flags &= ~ASYNC_NORMAL_ACTIVE;
37717 port->tty = NULL;
37718 wake_up_interruptible(&port->open_wait);
37719@@ -1975,7 +1975,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
37720 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
37721 modem_info *info = &dev->mdm.info[i];
37722
37723- if (info->port.count == 0)
37724+ if (atomic_read(&info->port.count) == 0)
37725 continue;
37726 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
37727 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
37728diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
37729index e74df7c..03a03ba 100644
37730--- a/drivers/isdn/icn/icn.c
37731+++ b/drivers/isdn/icn/icn.c
37732@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
37733 if (count > len)
37734 count = len;
37735 if (user) {
37736- if (copy_from_user(msg, buf, count))
37737+ if (count > sizeof msg || copy_from_user(msg, buf, count))
37738 return -EFAULT;
37739 } else
37740 memcpy(msg, buf, count);
37741diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
37742index 6a8405d..0bd1c7e 100644
37743--- a/drivers/leds/leds-clevo-mail.c
37744+++ b/drivers/leds/leds-clevo-mail.c
37745@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
37746 * detected as working, but in reality it is not) as low as
37747 * possible.
37748 */
37749-static struct dmi_system_id __initdata clevo_mail_led_dmi_table[] = {
37750+static const struct dmi_system_id __initconst clevo_mail_led_dmi_table[] = {
37751 {
37752 .callback = clevo_mail_led_dmi_callback,
37753 .ident = "Clevo D410J",
37754diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
37755index ec9b287..65c9bf4 100644
37756--- a/drivers/leds/leds-ss4200.c
37757+++ b/drivers/leds/leds-ss4200.c
37758@@ -92,7 +92,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
37759 * detected as working, but in reality it is not) as low as
37760 * possible.
37761 */
37762-static struct dmi_system_id __initdata nas_led_whitelist[] = {
37763+static const struct dmi_system_id __initconst nas_led_whitelist[] = {
37764 {
37765 .callback = ss4200_led_dmi_callback,
37766 .ident = "Intel SS4200-E",
37767diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
37768index a5ebc00..982886f 100644
37769--- a/drivers/lguest/core.c
37770+++ b/drivers/lguest/core.c
37771@@ -92,9 +92,17 @@ static __init int map_switcher(void)
37772 * it's worked so far. The end address needs +1 because __get_vm_area
37773 * allocates an extra guard page, so we need space for that.
37774 */
37775+
37776+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
37777+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
37778+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
37779+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
37780+#else
37781 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
37782 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
37783 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
37784+#endif
37785+
37786 if (!switcher_vma) {
37787 err = -ENOMEM;
37788 printk("lguest: could not map switcher pages high\n");
37789@@ -119,7 +127,7 @@ static __init int map_switcher(void)
37790 * Now the Switcher is mapped at the right address, we can't fail!
37791 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
37792 */
37793- memcpy(switcher_vma->addr, start_switcher_text,
37794+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
37795 end_switcher_text - start_switcher_text);
37796
37797 printk(KERN_INFO "lguest: mapped switcher at %p\n",
37798diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
37799index 3b62be16..e33134a 100644
37800--- a/drivers/lguest/page_tables.c
37801+++ b/drivers/lguest/page_tables.c
37802@@ -532,7 +532,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
37803 /*:*/
37804
37805 #ifdef CONFIG_X86_PAE
37806-static void release_pmd(pmd_t *spmd)
37807+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
37808 {
37809 /* If the entry's not present, there's nothing to release. */
37810 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
37811diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
37812index 4af12e1..0e89afe 100644
37813--- a/drivers/lguest/x86/core.c
37814+++ b/drivers/lguest/x86/core.c
37815@@ -59,7 +59,7 @@ static struct {
37816 /* Offset from where switcher.S was compiled to where we've copied it */
37817 static unsigned long switcher_offset(void)
37818 {
37819- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
37820+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
37821 }
37822
37823 /* This cpu's struct lguest_pages. */
37824@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
37825 * These copies are pretty cheap, so we do them unconditionally: */
37826 /* Save the current Host top-level page directory.
37827 */
37828+
37829+#ifdef CONFIG_PAX_PER_CPU_PGD
37830+ pages->state.host_cr3 = read_cr3();
37831+#else
37832 pages->state.host_cr3 = __pa(current->mm->pgd);
37833+#endif
37834+
37835 /*
37836 * Set up the Guest's page tables to see this CPU's pages (and no
37837 * other CPU's pages).
37838@@ -476,7 +482,7 @@ void __init lguest_arch_host_init(void)
37839 * compiled-in switcher code and the high-mapped copy we just made.
37840 */
37841 for (i = 0; i < IDT_ENTRIES; i++)
37842- default_idt_entries[i] += switcher_offset();
37843+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
37844
37845 /*
37846 * Set up the Switcher's per-cpu areas.
37847@@ -559,7 +565,7 @@ void __init lguest_arch_host_init(void)
37848 * it will be undisturbed when we switch. To change %cs and jump we
37849 * need this structure to feed to Intel's "lcall" instruction.
37850 */
37851- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
37852+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
37853 lguest_entry.segment = LGUEST_CS;
37854
37855 /*
37856diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
37857index 40634b0..4f5855e 100644
37858--- a/drivers/lguest/x86/switcher_32.S
37859+++ b/drivers/lguest/x86/switcher_32.S
37860@@ -87,6 +87,7 @@
37861 #include <asm/page.h>
37862 #include <asm/segment.h>
37863 #include <asm/lguest.h>
37864+#include <asm/processor-flags.h>
37865
37866 // We mark the start of the code to copy
37867 // It's placed in .text tho it's never run here
37868@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
37869 // Changes type when we load it: damn Intel!
37870 // For after we switch over our page tables
37871 // That entry will be read-only: we'd crash.
37872+
37873+#ifdef CONFIG_PAX_KERNEXEC
37874+ mov %cr0, %edx
37875+ xor $X86_CR0_WP, %edx
37876+ mov %edx, %cr0
37877+#endif
37878+
37879 movl $(GDT_ENTRY_TSS*8), %edx
37880 ltr %dx
37881
37882@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
37883 // Let's clear it again for our return.
37884 // The GDT descriptor of the Host
37885 // Points to the table after two "size" bytes
37886- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
37887+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
37888 // Clear "used" from type field (byte 5, bit 2)
37889- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
37890+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
37891+
37892+#ifdef CONFIG_PAX_KERNEXEC
37893+ mov %cr0, %eax
37894+ xor $X86_CR0_WP, %eax
37895+ mov %eax, %cr0
37896+#endif
37897
37898 // Once our page table's switched, the Guest is live!
37899 // The Host fades as we run this final step.
37900@@ -295,13 +309,12 @@ deliver_to_host:
37901 // I consulted gcc, and it gave
37902 // These instructions, which I gladly credit:
37903 leal (%edx,%ebx,8), %eax
37904- movzwl (%eax),%edx
37905- movl 4(%eax), %eax
37906- xorw %ax, %ax
37907- orl %eax, %edx
37908+ movl 4(%eax), %edx
37909+ movw (%eax), %dx
37910 // Now the address of the handler's in %edx
37911 // We call it now: its "iret" drops us home.
37912- jmp *%edx
37913+ ljmp $__KERNEL_CS, $1f
37914+1: jmp *%edx
37915
37916 // Every interrupt can come to us here
37917 // But we must truly tell each apart.
37918diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
37919index 7155945..4bcc562 100644
37920--- a/drivers/md/bitmap.c
37921+++ b/drivers/md/bitmap.c
37922@@ -1779,7 +1779,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
37923 chunk_kb ? "KB" : "B");
37924 if (bitmap->storage.file) {
37925 seq_printf(seq, ", file: ");
37926- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
37927+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
37928 }
37929
37930 seq_printf(seq, "\n");
37931diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
37932index eee353d..74504c4 100644
37933--- a/drivers/md/dm-ioctl.c
37934+++ b/drivers/md/dm-ioctl.c
37935@@ -1632,7 +1632,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
37936 cmd == DM_LIST_VERSIONS_CMD)
37937 return 0;
37938
37939- if ((cmd == DM_DEV_CREATE_CMD)) {
37940+ if (cmd == DM_DEV_CREATE_CMD) {
37941 if (!*param->name) {
37942 DMWARN("name not supplied when creating device");
37943 return -EINVAL;
37944diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
37945index 7f24190..0e18099 100644
37946--- a/drivers/md/dm-raid1.c
37947+++ b/drivers/md/dm-raid1.c
37948@@ -40,7 +40,7 @@ enum dm_raid1_error {
37949
37950 struct mirror {
37951 struct mirror_set *ms;
37952- atomic_t error_count;
37953+ atomic_unchecked_t error_count;
37954 unsigned long error_type;
37955 struct dm_dev *dev;
37956 sector_t offset;
37957@@ -183,7 +183,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
37958 struct mirror *m;
37959
37960 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
37961- if (!atomic_read(&m->error_count))
37962+ if (!atomic_read_unchecked(&m->error_count))
37963 return m;
37964
37965 return NULL;
37966@@ -215,7 +215,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
37967 * simple way to tell if a device has encountered
37968 * errors.
37969 */
37970- atomic_inc(&m->error_count);
37971+ atomic_inc_unchecked(&m->error_count);
37972
37973 if (test_and_set_bit(error_type, &m->error_type))
37974 return;
37975@@ -406,7 +406,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
37976 struct mirror *m = get_default_mirror(ms);
37977
37978 do {
37979- if (likely(!atomic_read(&m->error_count)))
37980+ if (likely(!atomic_read_unchecked(&m->error_count)))
37981 return m;
37982
37983 if (m-- == ms->mirror)
37984@@ -420,7 +420,7 @@ static int default_ok(struct mirror *m)
37985 {
37986 struct mirror *default_mirror = get_default_mirror(m->ms);
37987
37988- return !atomic_read(&default_mirror->error_count);
37989+ return !atomic_read_unchecked(&default_mirror->error_count);
37990 }
37991
37992 static int mirror_available(struct mirror_set *ms, struct bio *bio)
37993@@ -557,7 +557,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
37994 */
37995 if (likely(region_in_sync(ms, region, 1)))
37996 m = choose_mirror(ms, bio->bi_sector);
37997- else if (m && atomic_read(&m->error_count))
37998+ else if (m && atomic_read_unchecked(&m->error_count))
37999 m = NULL;
38000
38001 if (likely(m))
38002@@ -924,7 +924,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
38003 }
38004
38005 ms->mirror[mirror].ms = ms;
38006- atomic_set(&(ms->mirror[mirror].error_count), 0);
38007+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
38008 ms->mirror[mirror].error_type = 0;
38009 ms->mirror[mirror].offset = offset;
38010
38011@@ -1337,7 +1337,7 @@ static void mirror_resume(struct dm_target *ti)
38012 */
38013 static char device_status_char(struct mirror *m)
38014 {
38015- if (!atomic_read(&(m->error_count)))
38016+ if (!atomic_read_unchecked(&(m->error_count)))
38017 return 'A';
38018
38019 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
38020diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
38021index aaecefa..23b3026 100644
38022--- a/drivers/md/dm-stripe.c
38023+++ b/drivers/md/dm-stripe.c
38024@@ -20,7 +20,7 @@ struct stripe {
38025 struct dm_dev *dev;
38026 sector_t physical_start;
38027
38028- atomic_t error_count;
38029+ atomic_unchecked_t error_count;
38030 };
38031
38032 struct stripe_c {
38033@@ -184,7 +184,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
38034 kfree(sc);
38035 return r;
38036 }
38037- atomic_set(&(sc->stripe[i].error_count), 0);
38038+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
38039 }
38040
38041 ti->private = sc;
38042@@ -325,7 +325,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
38043 DMEMIT("%d ", sc->stripes);
38044 for (i = 0; i < sc->stripes; i++) {
38045 DMEMIT("%s ", sc->stripe[i].dev->name);
38046- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
38047+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
38048 'D' : 'A';
38049 }
38050 buffer[i] = '\0';
38051@@ -370,8 +370,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
38052 */
38053 for (i = 0; i < sc->stripes; i++)
38054 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
38055- atomic_inc(&(sc->stripe[i].error_count));
38056- if (atomic_read(&(sc->stripe[i].error_count)) <
38057+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
38058+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
38059 DM_IO_ERROR_THRESHOLD)
38060 schedule_work(&sc->trigger_event);
38061 }
38062diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
38063index daf25d0..d74f49f 100644
38064--- a/drivers/md/dm-table.c
38065+++ b/drivers/md/dm-table.c
38066@@ -390,7 +390,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
38067 if (!dev_size)
38068 return 0;
38069
38070- if ((start >= dev_size) || (start + len > dev_size)) {
38071+ if ((start >= dev_size) || (len > dev_size - start)) {
38072 DMWARN("%s: %s too small for target: "
38073 "start=%llu, len=%llu, dev_size=%llu",
38074 dm_device_name(ti->table->md), bdevname(bdev, b),
38075diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
38076index 4d6e853..a234157 100644
38077--- a/drivers/md/dm-thin-metadata.c
38078+++ b/drivers/md/dm-thin-metadata.c
38079@@ -397,7 +397,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
38080 {
38081 pmd->info.tm = pmd->tm;
38082 pmd->info.levels = 2;
38083- pmd->info.value_type.context = pmd->data_sm;
38084+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
38085 pmd->info.value_type.size = sizeof(__le64);
38086 pmd->info.value_type.inc = data_block_inc;
38087 pmd->info.value_type.dec = data_block_dec;
38088@@ -416,7 +416,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
38089
38090 pmd->bl_info.tm = pmd->tm;
38091 pmd->bl_info.levels = 1;
38092- pmd->bl_info.value_type.context = pmd->data_sm;
38093+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
38094 pmd->bl_info.value_type.size = sizeof(__le64);
38095 pmd->bl_info.value_type.inc = data_block_inc;
38096 pmd->bl_info.value_type.dec = data_block_dec;
38097diff --git a/drivers/md/dm.c b/drivers/md/dm.c
38098index 0d8f086..f5a91d5 100644
38099--- a/drivers/md/dm.c
38100+++ b/drivers/md/dm.c
38101@@ -170,9 +170,9 @@ struct mapped_device {
38102 /*
38103 * Event handling.
38104 */
38105- atomic_t event_nr;
38106+ atomic_unchecked_t event_nr;
38107 wait_queue_head_t eventq;
38108- atomic_t uevent_seq;
38109+ atomic_unchecked_t uevent_seq;
38110 struct list_head uevent_list;
38111 spinlock_t uevent_lock; /* Protect access to uevent_list */
38112
38113@@ -1872,8 +1872,8 @@ static struct mapped_device *alloc_dev(int minor)
38114 rwlock_init(&md->map_lock);
38115 atomic_set(&md->holders, 1);
38116 atomic_set(&md->open_count, 0);
38117- atomic_set(&md->event_nr, 0);
38118- atomic_set(&md->uevent_seq, 0);
38119+ atomic_set_unchecked(&md->event_nr, 0);
38120+ atomic_set_unchecked(&md->uevent_seq, 0);
38121 INIT_LIST_HEAD(&md->uevent_list);
38122 spin_lock_init(&md->uevent_lock);
38123
38124@@ -2026,7 +2026,7 @@ static void event_callback(void *context)
38125
38126 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
38127
38128- atomic_inc(&md->event_nr);
38129+ atomic_inc_unchecked(&md->event_nr);
38130 wake_up(&md->eventq);
38131 }
38132
38133@@ -2683,18 +2683,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
38134
38135 uint32_t dm_next_uevent_seq(struct mapped_device *md)
38136 {
38137- return atomic_add_return(1, &md->uevent_seq);
38138+ return atomic_add_return_unchecked(1, &md->uevent_seq);
38139 }
38140
38141 uint32_t dm_get_event_nr(struct mapped_device *md)
38142 {
38143- return atomic_read(&md->event_nr);
38144+ return atomic_read_unchecked(&md->event_nr);
38145 }
38146
38147 int dm_wait_event(struct mapped_device *md, int event_nr)
38148 {
38149 return wait_event_interruptible(md->eventq,
38150- (event_nr != atomic_read(&md->event_nr)));
38151+ (event_nr != atomic_read_unchecked(&md->event_nr)));
38152 }
38153
38154 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
38155diff --git a/drivers/md/md.c b/drivers/md/md.c
38156index f363135..9b38815 100644
38157--- a/drivers/md/md.c
38158+++ b/drivers/md/md.c
38159@@ -240,10 +240,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
38160 * start build, activate spare
38161 */
38162 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
38163-static atomic_t md_event_count;
38164+static atomic_unchecked_t md_event_count;
38165 void md_new_event(struct mddev *mddev)
38166 {
38167- atomic_inc(&md_event_count);
38168+ atomic_inc_unchecked(&md_event_count);
38169 wake_up(&md_event_waiters);
38170 }
38171 EXPORT_SYMBOL_GPL(md_new_event);
38172@@ -253,7 +253,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
38173 */
38174 static void md_new_event_inintr(struct mddev *mddev)
38175 {
38176- atomic_inc(&md_event_count);
38177+ atomic_inc_unchecked(&md_event_count);
38178 wake_up(&md_event_waiters);
38179 }
38180
38181@@ -1507,7 +1507,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
38182 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
38183 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
38184 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
38185- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
38186+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
38187
38188 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
38189 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
38190@@ -1751,7 +1751,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
38191 else
38192 sb->resync_offset = cpu_to_le64(0);
38193
38194- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
38195+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
38196
38197 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
38198 sb->size = cpu_to_le64(mddev->dev_sectors);
38199@@ -2751,7 +2751,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
38200 static ssize_t
38201 errors_show(struct md_rdev *rdev, char *page)
38202 {
38203- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
38204+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
38205 }
38206
38207 static ssize_t
38208@@ -2760,7 +2760,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
38209 char *e;
38210 unsigned long n = simple_strtoul(buf, &e, 10);
38211 if (*buf && (*e == 0 || *e == '\n')) {
38212- atomic_set(&rdev->corrected_errors, n);
38213+ atomic_set_unchecked(&rdev->corrected_errors, n);
38214 return len;
38215 }
38216 return -EINVAL;
38217@@ -3210,8 +3210,8 @@ int md_rdev_init(struct md_rdev *rdev)
38218 rdev->sb_loaded = 0;
38219 rdev->bb_page = NULL;
38220 atomic_set(&rdev->nr_pending, 0);
38221- atomic_set(&rdev->read_errors, 0);
38222- atomic_set(&rdev->corrected_errors, 0);
38223+ atomic_set_unchecked(&rdev->read_errors, 0);
38224+ atomic_set_unchecked(&rdev->corrected_errors, 0);
38225
38226 INIT_LIST_HEAD(&rdev->same_set);
38227 init_waitqueue_head(&rdev->blocked_wait);
38228@@ -6987,7 +6987,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
38229
38230 spin_unlock(&pers_lock);
38231 seq_printf(seq, "\n");
38232- seq->poll_event = atomic_read(&md_event_count);
38233+ seq->poll_event = atomic_read_unchecked(&md_event_count);
38234 return 0;
38235 }
38236 if (v == (void*)2) {
38237@@ -7090,7 +7090,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
38238 return error;
38239
38240 seq = file->private_data;
38241- seq->poll_event = atomic_read(&md_event_count);
38242+ seq->poll_event = atomic_read_unchecked(&md_event_count);
38243 return error;
38244 }
38245
38246@@ -7104,7 +7104,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
38247 /* always allow read */
38248 mask = POLLIN | POLLRDNORM;
38249
38250- if (seq->poll_event != atomic_read(&md_event_count))
38251+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
38252 mask |= POLLERR | POLLPRI;
38253 return mask;
38254 }
38255@@ -7148,7 +7148,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
38256 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
38257 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
38258 (int)part_stat_read(&disk->part0, sectors[1]) -
38259- atomic_read(&disk->sync_io);
38260+ atomic_read_unchecked(&disk->sync_io);
38261 /* sync IO will cause sync_io to increase before the disk_stats
38262 * as sync_io is counted when a request starts, and
38263 * disk_stats is counted when it completes.
38264diff --git a/drivers/md/md.h b/drivers/md/md.h
38265index eca59c3..7c42285 100644
38266--- a/drivers/md/md.h
38267+++ b/drivers/md/md.h
38268@@ -94,13 +94,13 @@ struct md_rdev {
38269 * only maintained for arrays that
38270 * support hot removal
38271 */
38272- atomic_t read_errors; /* number of consecutive read errors that
38273+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
38274 * we have tried to ignore.
38275 */
38276 struct timespec last_read_error; /* monotonic time since our
38277 * last read error
38278 */
38279- atomic_t corrected_errors; /* number of corrected read errors,
38280+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
38281 * for reporting to userspace and storing
38282 * in superblock.
38283 */
38284@@ -434,7 +434,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
38285
38286 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
38287 {
38288- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
38289+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
38290 }
38291
38292 struct md_personality
38293diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
38294index 1cbfc6b..56e1dbb 100644
38295--- a/drivers/md/persistent-data/dm-space-map.h
38296+++ b/drivers/md/persistent-data/dm-space-map.h
38297@@ -60,6 +60,7 @@ struct dm_space_map {
38298 int (*root_size)(struct dm_space_map *sm, size_t *result);
38299 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
38300 };
38301+typedef struct dm_space_map __no_const dm_space_map_no_const;
38302
38303 /*----------------------------------------------------------------*/
38304
38305diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
38306index 75b1f89..00ba344 100644
38307--- a/drivers/md/raid1.c
38308+++ b/drivers/md/raid1.c
38309@@ -1819,7 +1819,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
38310 if (r1_sync_page_io(rdev, sect, s,
38311 bio->bi_io_vec[idx].bv_page,
38312 READ) != 0)
38313- atomic_add(s, &rdev->corrected_errors);
38314+ atomic_add_unchecked(s, &rdev->corrected_errors);
38315 }
38316 sectors -= s;
38317 sect += s;
38318@@ -2041,7 +2041,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
38319 test_bit(In_sync, &rdev->flags)) {
38320 if (r1_sync_page_io(rdev, sect, s,
38321 conf->tmppage, READ)) {
38322- atomic_add(s, &rdev->corrected_errors);
38323+ atomic_add_unchecked(s, &rdev->corrected_errors);
38324 printk(KERN_INFO
38325 "md/raid1:%s: read error corrected "
38326 "(%d sectors at %llu on %s)\n",
38327diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
38328index 8d925dc..11d674f 100644
38329--- a/drivers/md/raid10.c
38330+++ b/drivers/md/raid10.c
38331@@ -1878,7 +1878,7 @@ static void end_sync_read(struct bio *bio, int error)
38332 /* The write handler will notice the lack of
38333 * R10BIO_Uptodate and record any errors etc
38334 */
38335- atomic_add(r10_bio->sectors,
38336+ atomic_add_unchecked(r10_bio->sectors,
38337 &conf->mirrors[d].rdev->corrected_errors);
38338
38339 /* for reconstruct, we always reschedule after a read.
38340@@ -2227,7 +2227,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
38341 {
38342 struct timespec cur_time_mon;
38343 unsigned long hours_since_last;
38344- unsigned int read_errors = atomic_read(&rdev->read_errors);
38345+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
38346
38347 ktime_get_ts(&cur_time_mon);
38348
38349@@ -2249,9 +2249,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
38350 * overflowing the shift of read_errors by hours_since_last.
38351 */
38352 if (hours_since_last >= 8 * sizeof(read_errors))
38353- atomic_set(&rdev->read_errors, 0);
38354+ atomic_set_unchecked(&rdev->read_errors, 0);
38355 else
38356- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
38357+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
38358 }
38359
38360 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
38361@@ -2305,8 +2305,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
38362 return;
38363
38364 check_decay_read_errors(mddev, rdev);
38365- atomic_inc(&rdev->read_errors);
38366- if (atomic_read(&rdev->read_errors) > max_read_errors) {
38367+ atomic_inc_unchecked(&rdev->read_errors);
38368+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
38369 char b[BDEVNAME_SIZE];
38370 bdevname(rdev->bdev, b);
38371
38372@@ -2314,7 +2314,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
38373 "md/raid10:%s: %s: Raid device exceeded "
38374 "read_error threshold [cur %d:max %d]\n",
38375 mdname(mddev), b,
38376- atomic_read(&rdev->read_errors), max_read_errors);
38377+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
38378 printk(KERN_NOTICE
38379 "md/raid10:%s: %s: Failing raid device\n",
38380 mdname(mddev), b);
38381@@ -2469,7 +2469,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
38382 sect +
38383 choose_data_offset(r10_bio, rdev)),
38384 bdevname(rdev->bdev, b));
38385- atomic_add(s, &rdev->corrected_errors);
38386+ atomic_add_unchecked(s, &rdev->corrected_errors);
38387 }
38388
38389 rdev_dec_pending(rdev, mddev);
38390diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
38391index 94ce78e..df99e24 100644
38392--- a/drivers/md/raid5.c
38393+++ b/drivers/md/raid5.c
38394@@ -1800,21 +1800,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
38395 mdname(conf->mddev), STRIPE_SECTORS,
38396 (unsigned long long)s,
38397 bdevname(rdev->bdev, b));
38398- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
38399+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
38400 clear_bit(R5_ReadError, &sh->dev[i].flags);
38401 clear_bit(R5_ReWrite, &sh->dev[i].flags);
38402 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
38403 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
38404
38405- if (atomic_read(&rdev->read_errors))
38406- atomic_set(&rdev->read_errors, 0);
38407+ if (atomic_read_unchecked(&rdev->read_errors))
38408+ atomic_set_unchecked(&rdev->read_errors, 0);
38409 } else {
38410 const char *bdn = bdevname(rdev->bdev, b);
38411 int retry = 0;
38412 int set_bad = 0;
38413
38414 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
38415- atomic_inc(&rdev->read_errors);
38416+ atomic_inc_unchecked(&rdev->read_errors);
38417 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
38418 printk_ratelimited(
38419 KERN_WARNING
38420@@ -1842,7 +1842,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
38421 mdname(conf->mddev),
38422 (unsigned long long)s,
38423 bdn);
38424- } else if (atomic_read(&rdev->read_errors)
38425+ } else if (atomic_read_unchecked(&rdev->read_errors)
38426 > conf->max_nr_stripes)
38427 printk(KERN_WARNING
38428 "md/raid:%s: Too many read errors, failing device %s.\n",
38429diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
38430index d33101a..6b13069 100644
38431--- a/drivers/media/dvb-core/dvbdev.c
38432+++ b/drivers/media/dvb-core/dvbdev.c
38433@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
38434 const struct dvb_device *template, void *priv, int type)
38435 {
38436 struct dvb_device *dvbdev;
38437- struct file_operations *dvbdevfops;
38438+ file_operations_no_const *dvbdevfops;
38439 struct device *clsdev;
38440 int minor;
38441 int id;
38442diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
38443index 404f63a..4796533 100644
38444--- a/drivers/media/dvb-frontends/dib3000.h
38445+++ b/drivers/media/dvb-frontends/dib3000.h
38446@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
38447 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
38448 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
38449 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
38450-};
38451+} __no_const;
38452
38453 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
38454 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
38455diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
38456index bc78354..42c9459 100644
38457--- a/drivers/media/pci/cx88/cx88-video.c
38458+++ b/drivers/media/pci/cx88/cx88-video.c
38459@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
38460
38461 /* ------------------------------------------------------------------ */
38462
38463-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
38464-static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
38465-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
38466+static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
38467+static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
38468+static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
38469
38470 module_param_array(video_nr, int, NULL, 0444);
38471 module_param_array(vbi_nr, int, NULL, 0444);
38472diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
38473index 8e9a668..78d6310 100644
38474--- a/drivers/media/platform/omap/omap_vout.c
38475+++ b/drivers/media/platform/omap/omap_vout.c
38476@@ -63,7 +63,6 @@ enum omap_vout_channels {
38477 OMAP_VIDEO2,
38478 };
38479
38480-static struct videobuf_queue_ops video_vbq_ops;
38481 /* Variables configurable through module params*/
38482 static u32 video1_numbuffers = 3;
38483 static u32 video2_numbuffers = 3;
38484@@ -1012,6 +1011,12 @@ static int omap_vout_open(struct file *file)
38485 {
38486 struct videobuf_queue *q;
38487 struct omap_vout_device *vout = NULL;
38488+ static struct videobuf_queue_ops video_vbq_ops = {
38489+ .buf_setup = omap_vout_buffer_setup,
38490+ .buf_prepare = omap_vout_buffer_prepare,
38491+ .buf_release = omap_vout_buffer_release,
38492+ .buf_queue = omap_vout_buffer_queue,
38493+ };
38494
38495 vout = video_drvdata(file);
38496 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
38497@@ -1029,10 +1034,6 @@ static int omap_vout_open(struct file *file)
38498 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
38499
38500 q = &vout->vbq;
38501- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
38502- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
38503- video_vbq_ops.buf_release = omap_vout_buffer_release;
38504- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
38505 spin_lock_init(&vout->vbq_lock);
38506
38507 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
38508diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
38509index b671e20..34088b7 100644
38510--- a/drivers/media/platform/s5p-tv/mixer.h
38511+++ b/drivers/media/platform/s5p-tv/mixer.h
38512@@ -155,7 +155,7 @@ struct mxr_layer {
38513 /** layer index (unique identifier) */
38514 int idx;
38515 /** callbacks for layer methods */
38516- struct mxr_layer_ops ops;
38517+ struct mxr_layer_ops *ops;
38518 /** format array */
38519 const struct mxr_format **fmt_array;
38520 /** size of format array */
38521diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
38522index b93a21f..2535195 100644
38523--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
38524+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
38525@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
38526 {
38527 struct mxr_layer *layer;
38528 int ret;
38529- struct mxr_layer_ops ops = {
38530+ static struct mxr_layer_ops ops = {
38531 .release = mxr_graph_layer_release,
38532 .buffer_set = mxr_graph_buffer_set,
38533 .stream_set = mxr_graph_stream_set,
38534diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
38535index 3b1670a..595c939 100644
38536--- a/drivers/media/platform/s5p-tv/mixer_reg.c
38537+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
38538@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
38539 layer->update_buf = next;
38540 }
38541
38542- layer->ops.buffer_set(layer, layer->update_buf);
38543+ layer->ops->buffer_set(layer, layer->update_buf);
38544
38545 if (done && done != layer->shadow_buf)
38546 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
38547diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
38548index 1f3b743..e839271 100644
38549--- a/drivers/media/platform/s5p-tv/mixer_video.c
38550+++ b/drivers/media/platform/s5p-tv/mixer_video.c
38551@@ -208,7 +208,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
38552 layer->geo.src.height = layer->geo.src.full_height;
38553
38554 mxr_geometry_dump(mdev, &layer->geo);
38555- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
38556+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
38557 mxr_geometry_dump(mdev, &layer->geo);
38558 }
38559
38560@@ -226,7 +226,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
38561 layer->geo.dst.full_width = mbus_fmt.width;
38562 layer->geo.dst.full_height = mbus_fmt.height;
38563 layer->geo.dst.field = mbus_fmt.field;
38564- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
38565+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
38566
38567 mxr_geometry_dump(mdev, &layer->geo);
38568 }
38569@@ -332,7 +332,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
38570 /* set source size to highest accepted value */
38571 geo->src.full_width = max(geo->dst.full_width, pix->width);
38572 geo->src.full_height = max(geo->dst.full_height, pix->height);
38573- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
38574+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
38575 mxr_geometry_dump(mdev, &layer->geo);
38576 /* set cropping to total visible screen */
38577 geo->src.width = pix->width;
38578@@ -340,12 +340,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
38579 geo->src.x_offset = 0;
38580 geo->src.y_offset = 0;
38581 /* assure consistency of geometry */
38582- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
38583+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
38584 mxr_geometry_dump(mdev, &layer->geo);
38585 /* set full size to lowest possible value */
38586 geo->src.full_width = 0;
38587 geo->src.full_height = 0;
38588- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
38589+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
38590 mxr_geometry_dump(mdev, &layer->geo);
38591
38592 /* returning results */
38593@@ -472,7 +472,7 @@ static int mxr_s_selection(struct file *file, void *fh,
38594 target->width = s->r.width;
38595 target->height = s->r.height;
38596
38597- layer->ops.fix_geometry(layer, stage, s->flags);
38598+ layer->ops->fix_geometry(layer, stage, s->flags);
38599
38600 /* retrieve update selection rectangle */
38601 res.left = target->x_offset;
38602@@ -937,13 +937,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
38603 mxr_output_get(mdev);
38604
38605 mxr_layer_update_output(layer);
38606- layer->ops.format_set(layer);
38607+ layer->ops->format_set(layer);
38608 /* enabling layer in hardware */
38609 spin_lock_irqsave(&layer->enq_slock, flags);
38610 layer->state = MXR_LAYER_STREAMING;
38611 spin_unlock_irqrestore(&layer->enq_slock, flags);
38612
38613- layer->ops.stream_set(layer, MXR_ENABLE);
38614+ layer->ops->stream_set(layer, MXR_ENABLE);
38615 mxr_streamer_get(mdev);
38616
38617 return 0;
38618@@ -1013,7 +1013,7 @@ static int stop_streaming(struct vb2_queue *vq)
38619 spin_unlock_irqrestore(&layer->enq_slock, flags);
38620
38621 /* disabling layer in hardware */
38622- layer->ops.stream_set(layer, MXR_DISABLE);
38623+ layer->ops->stream_set(layer, MXR_DISABLE);
38624 /* remove one streamer */
38625 mxr_streamer_put(mdev);
38626 /* allow changes in output configuration */
38627@@ -1052,8 +1052,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
38628
38629 void mxr_layer_release(struct mxr_layer *layer)
38630 {
38631- if (layer->ops.release)
38632- layer->ops.release(layer);
38633+ if (layer->ops->release)
38634+ layer->ops->release(layer);
38635 }
38636
38637 void mxr_base_layer_release(struct mxr_layer *layer)
38638@@ -1079,7 +1079,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
38639
38640 layer->mdev = mdev;
38641 layer->idx = idx;
38642- layer->ops = *ops;
38643+ layer->ops = ops;
38644
38645 spin_lock_init(&layer->enq_slock);
38646 INIT_LIST_HEAD(&layer->enq_list);
38647diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
38648index 3d13a63..da31bf1 100644
38649--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
38650+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
38651@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
38652 {
38653 struct mxr_layer *layer;
38654 int ret;
38655- struct mxr_layer_ops ops = {
38656+ static struct mxr_layer_ops ops = {
38657 .release = mxr_vp_layer_release,
38658 .buffer_set = mxr_vp_buffer_set,
38659 .stream_set = mxr_vp_stream_set,
38660diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
38661index 643d80a..56bb96b 100644
38662--- a/drivers/media/radio/radio-cadet.c
38663+++ b/drivers/media/radio/radio-cadet.c
38664@@ -302,6 +302,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
38665 unsigned char readbuf[RDS_BUFFER];
38666 int i = 0;
38667
38668+ if (count > RDS_BUFFER)
38669+ return -EFAULT;
38670 mutex_lock(&dev->lock);
38671 if (dev->rdsstat == 0)
38672 cadet_start_rds(dev);
38673@@ -317,7 +319,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
38674 while (i < count && dev->rdsin != dev->rdsout)
38675 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
38676
38677- if (i && copy_to_user(data, readbuf, i))
38678+ if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
38679 i = -EFAULT;
38680 unlock:
38681 mutex_unlock(&dev->lock);
38682diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
38683index 3940bb0..fb3952a 100644
38684--- a/drivers/media/usb/dvb-usb/cxusb.c
38685+++ b/drivers/media/usb/dvb-usb/cxusb.c
38686@@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
38687
38688 struct dib0700_adapter_state {
38689 int (*set_param_save) (struct dvb_frontend *);
38690-};
38691+} __no_const;
38692
38693 static int dib7070_set_param_override(struct dvb_frontend *fe)
38694 {
38695diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
38696index 9382895..ac8093c 100644
38697--- a/drivers/media/usb/dvb-usb/dw2102.c
38698+++ b/drivers/media/usb/dvb-usb/dw2102.c
38699@@ -95,7 +95,7 @@ struct su3000_state {
38700
38701 struct s6x0_state {
38702 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
38703-};
38704+} __no_const;
38705
38706 /* debug */
38707 static int dvb_usb_dw2102_debug;
38708diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
38709index aa6e7c7..4cd8061 100644
38710--- a/drivers/media/v4l2-core/v4l2-ioctl.c
38711+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
38712@@ -1923,7 +1923,8 @@ struct v4l2_ioctl_info {
38713 struct file *file, void *fh, void *p);
38714 } u;
38715 void (*debug)(const void *arg, bool write_only);
38716-};
38717+} __do_const;
38718+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
38719
38720 /* This control needs a priority check */
38721 #define INFO_FL_PRIO (1 << 0)
38722@@ -2108,7 +2109,7 @@ static long __video_do_ioctl(struct file *file,
38723 struct video_device *vfd = video_devdata(file);
38724 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
38725 bool write_only = false;
38726- struct v4l2_ioctl_info default_info;
38727+ v4l2_ioctl_info_no_const default_info;
38728 const struct v4l2_ioctl_info *info;
38729 void *fh = file->private_data;
38730 struct v4l2_fh *vfh = NULL;
38731diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c
38732index 29b2172..a7c5b31 100644
38733--- a/drivers/memstick/host/r592.c
38734+++ b/drivers/memstick/host/r592.c
38735@@ -454,7 +454,7 @@ static int r592_transfer_fifo_pio(struct r592_device *dev)
38736 /* Executes one TPC (data is read/written from small or large fifo) */
38737 static void r592_execute_tpc(struct r592_device *dev)
38738 {
38739- bool is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS;
38740+ bool is_write;
38741 int len, error;
38742 u32 status, reg;
38743
38744@@ -463,6 +463,7 @@ static void r592_execute_tpc(struct r592_device *dev)
38745 return;
38746 }
38747
38748+ is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS;
38749 len = dev->req->long_data ?
38750 dev->req->sg.length : dev->req->data_len;
38751
38752diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
38753index fb69baa..3aeea2e 100644
38754--- a/drivers/message/fusion/mptbase.c
38755+++ b/drivers/message/fusion/mptbase.c
38756@@ -6755,8 +6755,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
38757 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
38758 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
38759
38760+#ifdef CONFIG_GRKERNSEC_HIDESYM
38761+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
38762+#else
38763 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
38764 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
38765+#endif
38766+
38767 /*
38768 * Rounding UP to nearest 4-kB boundary here...
38769 */
38770@@ -6769,7 +6774,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
38771 ioc->facts.GlobalCredits);
38772
38773 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
38774+#ifdef CONFIG_GRKERNSEC_HIDESYM
38775+ NULL, NULL);
38776+#else
38777 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
38778+#endif
38779 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
38780 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
38781 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
38782diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
38783index fa43c39..daeb158 100644
38784--- a/drivers/message/fusion/mptsas.c
38785+++ b/drivers/message/fusion/mptsas.c
38786@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
38787 return 0;
38788 }
38789
38790+static inline void
38791+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
38792+{
38793+ if (phy_info->port_details) {
38794+ phy_info->port_details->rphy = rphy;
38795+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
38796+ ioc->name, rphy));
38797+ }
38798+
38799+ if (rphy) {
38800+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
38801+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
38802+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
38803+ ioc->name, rphy, rphy->dev.release));
38804+ }
38805+}
38806+
38807 /* no mutex */
38808 static void
38809 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
38810@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
38811 return NULL;
38812 }
38813
38814-static inline void
38815-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
38816-{
38817- if (phy_info->port_details) {
38818- phy_info->port_details->rphy = rphy;
38819- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
38820- ioc->name, rphy));
38821- }
38822-
38823- if (rphy) {
38824- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
38825- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
38826- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
38827- ioc->name, rphy, rphy->dev.release));
38828- }
38829-}
38830-
38831 static inline struct sas_port *
38832 mptsas_get_port(struct mptsas_phyinfo *phy_info)
38833 {
38834diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
38835index 164afa7..b6b2e74 100644
38836--- a/drivers/message/fusion/mptscsih.c
38837+++ b/drivers/message/fusion/mptscsih.c
38838@@ -1271,15 +1271,16 @@ mptscsih_info(struct Scsi_Host *SChost)
38839
38840 h = shost_priv(SChost);
38841
38842- if (h) {
38843- if (h->info_kbuf == NULL)
38844- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
38845- return h->info_kbuf;
38846- h->info_kbuf[0] = '\0';
38847+ if (!h)
38848+ return NULL;
38849
38850- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
38851- h->info_kbuf[size-1] = '\0';
38852- }
38853+ if (h->info_kbuf == NULL)
38854+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
38855+ return h->info_kbuf;
38856+ h->info_kbuf[0] = '\0';
38857+
38858+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
38859+ h->info_kbuf[size-1] = '\0';
38860
38861 return h->info_kbuf;
38862 }
38863diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
38864index 8001aa6..b137580 100644
38865--- a/drivers/message/i2o/i2o_proc.c
38866+++ b/drivers/message/i2o/i2o_proc.c
38867@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
38868 "Array Controller Device"
38869 };
38870
38871-static char *chtostr(char *tmp, u8 *chars, int n)
38872-{
38873- tmp[0] = 0;
38874- return strncat(tmp, (char *)chars, n);
38875-}
38876-
38877 static int i2o_report_query_status(struct seq_file *seq, int block_status,
38878 char *group)
38879 {
38880@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
38881 } *result;
38882
38883 i2o_exec_execute_ddm_table ddm_table;
38884- char tmp[28 + 1];
38885
38886 result = kmalloc(sizeof(*result), GFP_KERNEL);
38887 if (!result)
38888@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
38889
38890 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
38891 seq_printf(seq, "%-#8x", ddm_table.module_id);
38892- seq_printf(seq, "%-29s",
38893- chtostr(tmp, ddm_table.module_name_version, 28));
38894+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
38895 seq_printf(seq, "%9d ", ddm_table.data_size);
38896 seq_printf(seq, "%8d", ddm_table.code_size);
38897
38898@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
38899
38900 i2o_driver_result_table *result;
38901 i2o_driver_store_table *dst;
38902- char tmp[28 + 1];
38903
38904 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
38905 if (result == NULL)
38906@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
38907
38908 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
38909 seq_printf(seq, "%-#8x", dst->module_id);
38910- seq_printf(seq, "%-29s",
38911- chtostr(tmp, dst->module_name_version, 28));
38912- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
38913+ seq_printf(seq, "%-.28s", dst->module_name_version);
38914+ seq_printf(seq, "%-.8s", dst->date);
38915 seq_printf(seq, "%8d ", dst->module_size);
38916 seq_printf(seq, "%8d ", dst->mpb_size);
38917 seq_printf(seq, "0x%04x", dst->module_flags);
38918@@ -1250,7 +1240,6 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
38919 // == (allow) 512d bytes (max)
38920 static u16 *work16 = (u16 *) work32;
38921 int token;
38922- char tmp[16 + 1];
38923
38924 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
38925
38926@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
38927 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
38928 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
38929 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
38930- seq_printf(seq, "Vendor info : %s\n",
38931- chtostr(tmp, (u8 *) (work32 + 2), 16));
38932- seq_printf(seq, "Product info : %s\n",
38933- chtostr(tmp, (u8 *) (work32 + 6), 16));
38934- seq_printf(seq, "Description : %s\n",
38935- chtostr(tmp, (u8 *) (work32 + 10), 16));
38936- seq_printf(seq, "Product rev. : %s\n",
38937- chtostr(tmp, (u8 *) (work32 + 14), 8));
38938+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
38939+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
38940+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
38941+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
38942
38943 seq_printf(seq, "Serial number : ");
38944 print_serial_number(seq, (u8 *) (work32 + 16),
38945@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
38946 u8 pad[256]; // allow up to 256 byte (max) serial number
38947 } result;
38948
38949- char tmp[24 + 1];
38950-
38951 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
38952
38953 if (token < 0) {
38954@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
38955 }
38956
38957 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
38958- seq_printf(seq, "Module name : %s\n",
38959- chtostr(tmp, result.module_name, 24));
38960- seq_printf(seq, "Module revision : %s\n",
38961- chtostr(tmp, result.module_rev, 8));
38962+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
38963+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
38964
38965 seq_printf(seq, "Serial number : ");
38966 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
38967@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
38968 u8 instance_number[4];
38969 } result;
38970
38971- char tmp[64 + 1];
38972-
38973 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
38974
38975 if (token < 0) {
38976@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
38977 return 0;
38978 }
38979
38980- seq_printf(seq, "Device name : %s\n",
38981- chtostr(tmp, result.device_name, 64));
38982- seq_printf(seq, "Service name : %s\n",
38983- chtostr(tmp, result.service_name, 64));
38984- seq_printf(seq, "Physical name : %s\n",
38985- chtostr(tmp, result.physical_location, 64));
38986- seq_printf(seq, "Instance number : %s\n",
38987- chtostr(tmp, result.instance_number, 4));
38988+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
38989+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
38990+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
38991+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
38992
38993 return 0;
38994 }
38995diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
38996index a8c08f3..155fe3d 100644
38997--- a/drivers/message/i2o/iop.c
38998+++ b/drivers/message/i2o/iop.c
38999@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
39000
39001 spin_lock_irqsave(&c->context_list_lock, flags);
39002
39003- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
39004- atomic_inc(&c->context_list_counter);
39005+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
39006+ atomic_inc_unchecked(&c->context_list_counter);
39007
39008- entry->context = atomic_read(&c->context_list_counter);
39009+ entry->context = atomic_read_unchecked(&c->context_list_counter);
39010
39011 list_add(&entry->list, &c->context_list);
39012
39013@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
39014
39015 #if BITS_PER_LONG == 64
39016 spin_lock_init(&c->context_list_lock);
39017- atomic_set(&c->context_list_counter, 0);
39018+ atomic_set_unchecked(&c->context_list_counter, 0);
39019 INIT_LIST_HEAD(&c->context_list);
39020 #endif
39021
39022diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
39023index 45ece11..8efa218 100644
39024--- a/drivers/mfd/janz-cmodio.c
39025+++ b/drivers/mfd/janz-cmodio.c
39026@@ -13,6 +13,7 @@
39027
39028 #include <linux/kernel.h>
39029 #include <linux/module.h>
39030+#include <linux/slab.h>
39031 #include <linux/init.h>
39032 #include <linux/pci.h>
39033 #include <linux/interrupt.h>
39034diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
39035index a5f9888..1c0ed56 100644
39036--- a/drivers/mfd/twl4030-irq.c
39037+++ b/drivers/mfd/twl4030-irq.c
39038@@ -35,6 +35,7 @@
39039 #include <linux/of.h>
39040 #include <linux/irqdomain.h>
39041 #include <linux/i2c/twl.h>
39042+#include <asm/pgtable.h>
39043
39044 #include "twl-core.h"
39045
39046@@ -728,10 +729,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
39047 * Install an irq handler for each of the SIH modules;
39048 * clone dummy irq_chip since PIH can't *do* anything
39049 */
39050- twl4030_irq_chip = dummy_irq_chip;
39051- twl4030_irq_chip.name = "twl4030";
39052+ pax_open_kernel();
39053+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
39054+ *(const char **)&twl4030_irq_chip.name = "twl4030";
39055
39056- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
39057+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
39058+ pax_close_kernel();
39059
39060 for (i = irq_base; i < irq_end; i++) {
39061 irq_set_chip_and_handler(i, &twl4030_irq_chip,
39062diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
39063index 277a8db..0e0b754 100644
39064--- a/drivers/mfd/twl6030-irq.c
39065+++ b/drivers/mfd/twl6030-irq.c
39066@@ -387,10 +387,12 @@ int twl6030_init_irq(struct device *dev, int irq_num)
39067 * install an irq handler for each of the modules;
39068 * clone dummy irq_chip since PIH can't *do* anything
39069 */
39070- twl6030_irq_chip = dummy_irq_chip;
39071- twl6030_irq_chip.name = "twl6030";
39072- twl6030_irq_chip.irq_set_type = NULL;
39073- twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;
39074+ pax_open_kernel();
39075+ memcpy((void *)&twl6030_irq_chip, &dummy_irq_chip, sizeof twl6030_irq_chip);
39076+ *(const char **)&twl6030_irq_chip.name = "twl6030";
39077+ *(void **)&twl6030_irq_chip.irq_set_type = NULL;
39078+ *(void **)&twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;
39079+ pax_close_kernel();
39080
39081 for (i = irq_base; i < irq_end; i++) {
39082 irq_set_chip_and_handler(i, &twl6030_irq_chip,
39083diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
39084index f428d86..274c368 100644
39085--- a/drivers/misc/c2port/core.c
39086+++ b/drivers/misc/c2port/core.c
39087@@ -924,7 +924,9 @@ struct c2port_device *c2port_device_register(char *name,
39088 mutex_init(&c2dev->mutex);
39089
39090 /* Create binary file */
39091- c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
39092+ pax_open_kernel();
39093+ *(size_t *)&c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
39094+ pax_close_kernel();
39095 ret = device_create_bin_file(c2dev->dev, &c2port_bin_attrs);
39096 if (unlikely(ret))
39097 goto error_device_create_bin_file;
39098diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
39099index 3aa9a96..59cf685 100644
39100--- a/drivers/misc/kgdbts.c
39101+++ b/drivers/misc/kgdbts.c
39102@@ -832,7 +832,7 @@ static void run_plant_and_detach_test(int is_early)
39103 char before[BREAK_INSTR_SIZE];
39104 char after[BREAK_INSTR_SIZE];
39105
39106- probe_kernel_read(before, (char *)kgdbts_break_test,
39107+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
39108 BREAK_INSTR_SIZE);
39109 init_simple_test();
39110 ts.tst = plant_and_detach_test;
39111@@ -840,7 +840,7 @@ static void run_plant_and_detach_test(int is_early)
39112 /* Activate test with initial breakpoint */
39113 if (!is_early)
39114 kgdb_breakpoint();
39115- probe_kernel_read(after, (char *)kgdbts_break_test,
39116+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
39117 BREAK_INSTR_SIZE);
39118 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
39119 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
39120diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
39121index 4a87e5c..76bdf5c 100644
39122--- a/drivers/misc/lis3lv02d/lis3lv02d.c
39123+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
39124@@ -498,7 +498,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
39125 * the lid is closed. This leads to interrupts as soon as a little move
39126 * is done.
39127 */
39128- atomic_inc(&lis3->count);
39129+ atomic_inc_unchecked(&lis3->count);
39130
39131 wake_up_interruptible(&lis3->misc_wait);
39132 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
39133@@ -584,7 +584,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
39134 if (lis3->pm_dev)
39135 pm_runtime_get_sync(lis3->pm_dev);
39136
39137- atomic_set(&lis3->count, 0);
39138+ atomic_set_unchecked(&lis3->count, 0);
39139 return 0;
39140 }
39141
39142@@ -617,7 +617,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
39143 add_wait_queue(&lis3->misc_wait, &wait);
39144 while (true) {
39145 set_current_state(TASK_INTERRUPTIBLE);
39146- data = atomic_xchg(&lis3->count, 0);
39147+ data = atomic_xchg_unchecked(&lis3->count, 0);
39148 if (data)
39149 break;
39150
39151@@ -658,7 +658,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
39152 struct lis3lv02d, miscdev);
39153
39154 poll_wait(file, &lis3->misc_wait, wait);
39155- if (atomic_read(&lis3->count))
39156+ if (atomic_read_unchecked(&lis3->count))
39157 return POLLIN | POLLRDNORM;
39158 return 0;
39159 }
39160diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
39161index c439c82..1f20f57 100644
39162--- a/drivers/misc/lis3lv02d/lis3lv02d.h
39163+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
39164@@ -297,7 +297,7 @@ struct lis3lv02d {
39165 struct input_polled_dev *idev; /* input device */
39166 struct platform_device *pdev; /* platform device */
39167 struct regulator_bulk_data regulators[2];
39168- atomic_t count; /* interrupt count after last read */
39169+ atomic_unchecked_t count; /* interrupt count after last read */
39170 union axis_conversion ac; /* hw -> logical axis */
39171 int mapped_btns[3];
39172
39173diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
39174index 2f30bad..c4c13d0 100644
39175--- a/drivers/misc/sgi-gru/gruhandles.c
39176+++ b/drivers/misc/sgi-gru/gruhandles.c
39177@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
39178 unsigned long nsec;
39179
39180 nsec = CLKS2NSEC(clks);
39181- atomic_long_inc(&mcs_op_statistics[op].count);
39182- atomic_long_add(nsec, &mcs_op_statistics[op].total);
39183+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
39184+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
39185 if (mcs_op_statistics[op].max < nsec)
39186 mcs_op_statistics[op].max = nsec;
39187 }
39188diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
39189index 950dbe9..eeef0f8 100644
39190--- a/drivers/misc/sgi-gru/gruprocfs.c
39191+++ b/drivers/misc/sgi-gru/gruprocfs.c
39192@@ -32,9 +32,9 @@
39193
39194 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
39195
39196-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
39197+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
39198 {
39199- unsigned long val = atomic_long_read(v);
39200+ unsigned long val = atomic_long_read_unchecked(v);
39201
39202 seq_printf(s, "%16lu %s\n", val, id);
39203 }
39204@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
39205
39206 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
39207 for (op = 0; op < mcsop_last; op++) {
39208- count = atomic_long_read(&mcs_op_statistics[op].count);
39209- total = atomic_long_read(&mcs_op_statistics[op].total);
39210+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
39211+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
39212 max = mcs_op_statistics[op].max;
39213 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
39214 count ? total / count : 0, max);
39215diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
39216index 5c3ce24..4915ccb 100644
39217--- a/drivers/misc/sgi-gru/grutables.h
39218+++ b/drivers/misc/sgi-gru/grutables.h
39219@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
39220 * GRU statistics.
39221 */
39222 struct gru_stats_s {
39223- atomic_long_t vdata_alloc;
39224- atomic_long_t vdata_free;
39225- atomic_long_t gts_alloc;
39226- atomic_long_t gts_free;
39227- atomic_long_t gms_alloc;
39228- atomic_long_t gms_free;
39229- atomic_long_t gts_double_allocate;
39230- atomic_long_t assign_context;
39231- atomic_long_t assign_context_failed;
39232- atomic_long_t free_context;
39233- atomic_long_t load_user_context;
39234- atomic_long_t load_kernel_context;
39235- atomic_long_t lock_kernel_context;
39236- atomic_long_t unlock_kernel_context;
39237- atomic_long_t steal_user_context;
39238- atomic_long_t steal_kernel_context;
39239- atomic_long_t steal_context_failed;
39240- atomic_long_t nopfn;
39241- atomic_long_t asid_new;
39242- atomic_long_t asid_next;
39243- atomic_long_t asid_wrap;
39244- atomic_long_t asid_reuse;
39245- atomic_long_t intr;
39246- atomic_long_t intr_cbr;
39247- atomic_long_t intr_tfh;
39248- atomic_long_t intr_spurious;
39249- atomic_long_t intr_mm_lock_failed;
39250- atomic_long_t call_os;
39251- atomic_long_t call_os_wait_queue;
39252- atomic_long_t user_flush_tlb;
39253- atomic_long_t user_unload_context;
39254- atomic_long_t user_exception;
39255- atomic_long_t set_context_option;
39256- atomic_long_t check_context_retarget_intr;
39257- atomic_long_t check_context_unload;
39258- atomic_long_t tlb_dropin;
39259- atomic_long_t tlb_preload_page;
39260- atomic_long_t tlb_dropin_fail_no_asid;
39261- atomic_long_t tlb_dropin_fail_upm;
39262- atomic_long_t tlb_dropin_fail_invalid;
39263- atomic_long_t tlb_dropin_fail_range_active;
39264- atomic_long_t tlb_dropin_fail_idle;
39265- atomic_long_t tlb_dropin_fail_fmm;
39266- atomic_long_t tlb_dropin_fail_no_exception;
39267- atomic_long_t tfh_stale_on_fault;
39268- atomic_long_t mmu_invalidate_range;
39269- atomic_long_t mmu_invalidate_page;
39270- atomic_long_t flush_tlb;
39271- atomic_long_t flush_tlb_gru;
39272- atomic_long_t flush_tlb_gru_tgh;
39273- atomic_long_t flush_tlb_gru_zero_asid;
39274+ atomic_long_unchecked_t vdata_alloc;
39275+ atomic_long_unchecked_t vdata_free;
39276+ atomic_long_unchecked_t gts_alloc;
39277+ atomic_long_unchecked_t gts_free;
39278+ atomic_long_unchecked_t gms_alloc;
39279+ atomic_long_unchecked_t gms_free;
39280+ atomic_long_unchecked_t gts_double_allocate;
39281+ atomic_long_unchecked_t assign_context;
39282+ atomic_long_unchecked_t assign_context_failed;
39283+ atomic_long_unchecked_t free_context;
39284+ atomic_long_unchecked_t load_user_context;
39285+ atomic_long_unchecked_t load_kernel_context;
39286+ atomic_long_unchecked_t lock_kernel_context;
39287+ atomic_long_unchecked_t unlock_kernel_context;
39288+ atomic_long_unchecked_t steal_user_context;
39289+ atomic_long_unchecked_t steal_kernel_context;
39290+ atomic_long_unchecked_t steal_context_failed;
39291+ atomic_long_unchecked_t nopfn;
39292+ atomic_long_unchecked_t asid_new;
39293+ atomic_long_unchecked_t asid_next;
39294+ atomic_long_unchecked_t asid_wrap;
39295+ atomic_long_unchecked_t asid_reuse;
39296+ atomic_long_unchecked_t intr;
39297+ atomic_long_unchecked_t intr_cbr;
39298+ atomic_long_unchecked_t intr_tfh;
39299+ atomic_long_unchecked_t intr_spurious;
39300+ atomic_long_unchecked_t intr_mm_lock_failed;
39301+ atomic_long_unchecked_t call_os;
39302+ atomic_long_unchecked_t call_os_wait_queue;
39303+ atomic_long_unchecked_t user_flush_tlb;
39304+ atomic_long_unchecked_t user_unload_context;
39305+ atomic_long_unchecked_t user_exception;
39306+ atomic_long_unchecked_t set_context_option;
39307+ atomic_long_unchecked_t check_context_retarget_intr;
39308+ atomic_long_unchecked_t check_context_unload;
39309+ atomic_long_unchecked_t tlb_dropin;
39310+ atomic_long_unchecked_t tlb_preload_page;
39311+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
39312+ atomic_long_unchecked_t tlb_dropin_fail_upm;
39313+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
39314+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
39315+ atomic_long_unchecked_t tlb_dropin_fail_idle;
39316+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
39317+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
39318+ atomic_long_unchecked_t tfh_stale_on_fault;
39319+ atomic_long_unchecked_t mmu_invalidate_range;
39320+ atomic_long_unchecked_t mmu_invalidate_page;
39321+ atomic_long_unchecked_t flush_tlb;
39322+ atomic_long_unchecked_t flush_tlb_gru;
39323+ atomic_long_unchecked_t flush_tlb_gru_tgh;
39324+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
39325
39326- atomic_long_t copy_gpa;
39327- atomic_long_t read_gpa;
39328+ atomic_long_unchecked_t copy_gpa;
39329+ atomic_long_unchecked_t read_gpa;
39330
39331- atomic_long_t mesq_receive;
39332- atomic_long_t mesq_receive_none;
39333- atomic_long_t mesq_send;
39334- atomic_long_t mesq_send_failed;
39335- atomic_long_t mesq_noop;
39336- atomic_long_t mesq_send_unexpected_error;
39337- atomic_long_t mesq_send_lb_overflow;
39338- atomic_long_t mesq_send_qlimit_reached;
39339- atomic_long_t mesq_send_amo_nacked;
39340- atomic_long_t mesq_send_put_nacked;
39341- atomic_long_t mesq_page_overflow;
39342- atomic_long_t mesq_qf_locked;
39343- atomic_long_t mesq_qf_noop_not_full;
39344- atomic_long_t mesq_qf_switch_head_failed;
39345- atomic_long_t mesq_qf_unexpected_error;
39346- atomic_long_t mesq_noop_unexpected_error;
39347- atomic_long_t mesq_noop_lb_overflow;
39348- atomic_long_t mesq_noop_qlimit_reached;
39349- atomic_long_t mesq_noop_amo_nacked;
39350- atomic_long_t mesq_noop_put_nacked;
39351- atomic_long_t mesq_noop_page_overflow;
39352+ atomic_long_unchecked_t mesq_receive;
39353+ atomic_long_unchecked_t mesq_receive_none;
39354+ atomic_long_unchecked_t mesq_send;
39355+ atomic_long_unchecked_t mesq_send_failed;
39356+ atomic_long_unchecked_t mesq_noop;
39357+ atomic_long_unchecked_t mesq_send_unexpected_error;
39358+ atomic_long_unchecked_t mesq_send_lb_overflow;
39359+ atomic_long_unchecked_t mesq_send_qlimit_reached;
39360+ atomic_long_unchecked_t mesq_send_amo_nacked;
39361+ atomic_long_unchecked_t mesq_send_put_nacked;
39362+ atomic_long_unchecked_t mesq_page_overflow;
39363+ atomic_long_unchecked_t mesq_qf_locked;
39364+ atomic_long_unchecked_t mesq_qf_noop_not_full;
39365+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
39366+ atomic_long_unchecked_t mesq_qf_unexpected_error;
39367+ atomic_long_unchecked_t mesq_noop_unexpected_error;
39368+ atomic_long_unchecked_t mesq_noop_lb_overflow;
39369+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
39370+ atomic_long_unchecked_t mesq_noop_amo_nacked;
39371+ atomic_long_unchecked_t mesq_noop_put_nacked;
39372+ atomic_long_unchecked_t mesq_noop_page_overflow;
39373
39374 };
39375
39376@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
39377 tghop_invalidate, mcsop_last};
39378
39379 struct mcs_op_statistic {
39380- atomic_long_t count;
39381- atomic_long_t total;
39382+ atomic_long_unchecked_t count;
39383+ atomic_long_unchecked_t total;
39384 unsigned long max;
39385 };
39386
39387@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
39388
39389 #define STAT(id) do { \
39390 if (gru_options & OPT_STATS) \
39391- atomic_long_inc(&gru_stats.id); \
39392+ atomic_long_inc_unchecked(&gru_stats.id); \
39393 } while (0)
39394
39395 #ifdef CONFIG_SGI_GRU_DEBUG
39396diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
39397index c862cd4..0d176fe 100644
39398--- a/drivers/misc/sgi-xp/xp.h
39399+++ b/drivers/misc/sgi-xp/xp.h
39400@@ -288,7 +288,7 @@ struct xpc_interface {
39401 xpc_notify_func, void *);
39402 void (*received) (short, int, void *);
39403 enum xp_retval (*partid_to_nasids) (short, void *);
39404-};
39405+} __no_const;
39406
39407 extern struct xpc_interface xpc_interface;
39408
39409diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
39410index b94d5f7..7f494c5 100644
39411--- a/drivers/misc/sgi-xp/xpc.h
39412+++ b/drivers/misc/sgi-xp/xpc.h
39413@@ -835,6 +835,7 @@ struct xpc_arch_operations {
39414 void (*received_payload) (struct xpc_channel *, void *);
39415 void (*notify_senders_of_disconnect) (struct xpc_channel *);
39416 };
39417+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
39418
39419 /* struct xpc_partition act_state values (for XPC HB) */
39420
39421@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
39422 /* found in xpc_main.c */
39423 extern struct device *xpc_part;
39424 extern struct device *xpc_chan;
39425-extern struct xpc_arch_operations xpc_arch_ops;
39426+extern xpc_arch_operations_no_const xpc_arch_ops;
39427 extern int xpc_disengage_timelimit;
39428 extern int xpc_disengage_timedout;
39429 extern int xpc_activate_IRQ_rcvd;
39430diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
39431index d971817..33bdca5 100644
39432--- a/drivers/misc/sgi-xp/xpc_main.c
39433+++ b/drivers/misc/sgi-xp/xpc_main.c
39434@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
39435 .notifier_call = xpc_system_die,
39436 };
39437
39438-struct xpc_arch_operations xpc_arch_ops;
39439+xpc_arch_operations_no_const xpc_arch_ops;
39440
39441 /*
39442 * Timer function to enforce the timelimit on the partition disengage.
39443@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
39444
39445 if (((die_args->trapnr == X86_TRAP_MF) ||
39446 (die_args->trapnr == X86_TRAP_XF)) &&
39447- !user_mode_vm(die_args->regs))
39448+ !user_mode(die_args->regs))
39449 xpc_die_deactivate();
39450
39451 break;
39452diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
39453index 6d8f701..35b6369 100644
39454--- a/drivers/mmc/core/mmc_ops.c
39455+++ b/drivers/mmc/core/mmc_ops.c
39456@@ -247,7 +247,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
39457 void *data_buf;
39458 int is_on_stack;
39459
39460- is_on_stack = object_is_on_stack(buf);
39461+ is_on_stack = object_starts_on_stack(buf);
39462 if (is_on_stack) {
39463 /*
39464 * dma onto stack is unsafe/nonportable, but callers to this
39465diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
39466index 53b8fd9..615b462 100644
39467--- a/drivers/mmc/host/dw_mmc.h
39468+++ b/drivers/mmc/host/dw_mmc.h
39469@@ -205,5 +205,5 @@ struct dw_mci_drv_data {
39470 int (*parse_dt)(struct dw_mci *host);
39471 int (*setup_bus)(struct dw_mci *host,
39472 struct device_node *slot_np, u8 bus_width);
39473-};
39474+} __do_const;
39475 #endif /* _DW_MMC_H_ */
39476diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
39477index 82a8de1..3c56ccb 100644
39478--- a/drivers/mmc/host/sdhci-s3c.c
39479+++ b/drivers/mmc/host/sdhci-s3c.c
39480@@ -721,9 +721,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
39481 * we can use overriding functions instead of default.
39482 */
39483 if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
39484- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
39485- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
39486- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
39487+ pax_open_kernel();
39488+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
39489+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
39490+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
39491+ pax_close_kernel();
39492 }
39493
39494 /* It supports additional host capabilities if needed */
39495diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
39496index a4eb8b5..8c0628f 100644
39497--- a/drivers/mtd/devices/doc2000.c
39498+++ b/drivers/mtd/devices/doc2000.c
39499@@ -753,7 +753,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
39500
39501 /* The ECC will not be calculated correctly if less than 512 is written */
39502 /* DBB-
39503- if (len != 0x200 && eccbuf)
39504+ if (len != 0x200)
39505 printk(KERN_WARNING
39506 "ECC needs a full sector write (adr: %lx size %lx)\n",
39507 (long) to, (long) len);
39508diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
39509index 0c8bb6b..6f35deb 100644
39510--- a/drivers/mtd/nand/denali.c
39511+++ b/drivers/mtd/nand/denali.c
39512@@ -24,6 +24,7 @@
39513 #include <linux/slab.h>
39514 #include <linux/mtd/mtd.h>
39515 #include <linux/module.h>
39516+#include <linux/slab.h>
39517
39518 #include "denali.h"
39519
39520diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
39521index 51b9d6a..52af9a7 100644
39522--- a/drivers/mtd/nftlmount.c
39523+++ b/drivers/mtd/nftlmount.c
39524@@ -24,6 +24,7 @@
39525 #include <asm/errno.h>
39526 #include <linux/delay.h>
39527 #include <linux/slab.h>
39528+#include <linux/sched.h>
39529 #include <linux/mtd/mtd.h>
39530 #include <linux/mtd/nand.h>
39531 #include <linux/mtd/nftl.h>
39532diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
39533index 8dd6ba5..419cc1d 100644
39534--- a/drivers/mtd/sm_ftl.c
39535+++ b/drivers/mtd/sm_ftl.c
39536@@ -56,7 +56,7 @@ ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
39537 #define SM_CIS_VENDOR_OFFSET 0x59
39538 struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
39539 {
39540- struct attribute_group *attr_group;
39541+ attribute_group_no_const *attr_group;
39542 struct attribute **attributes;
39543 struct sm_sysfs_attribute *vendor_attribute;
39544
39545diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
39546index 27cdf1f..8c37357 100644
39547--- a/drivers/net/bonding/bond_main.c
39548+++ b/drivers/net/bonding/bond_main.c
39549@@ -4859,7 +4859,7 @@ static unsigned int bond_get_num_tx_queues(void)
39550 return tx_queues;
39551 }
39552
39553-static struct rtnl_link_ops bond_link_ops __read_mostly = {
39554+static struct rtnl_link_ops bond_link_ops = {
39555 .kind = "bond",
39556 .priv_size = sizeof(struct bonding),
39557 .setup = bond_setup,
39558@@ -4975,8 +4975,8 @@ static void __exit bonding_exit(void)
39559
39560 bond_destroy_debugfs();
39561
39562- rtnl_link_unregister(&bond_link_ops);
39563 unregister_pernet_subsys(&bond_net_ops);
39564+ rtnl_link_unregister(&bond_link_ops);
39565
39566 #ifdef CONFIG_NET_POLL_CONTROLLER
39567 /*
39568diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
39569index 70dba5d..11a0919 100644
39570--- a/drivers/net/ethernet/8390/ax88796.c
39571+++ b/drivers/net/ethernet/8390/ax88796.c
39572@@ -872,9 +872,11 @@ static int ax_probe(struct platform_device *pdev)
39573 if (ax->plat->reg_offsets)
39574 ei_local->reg_offset = ax->plat->reg_offsets;
39575 else {
39576+ resource_size_t _mem_size = mem_size;
39577+ do_div(_mem_size, 0x18);
39578 ei_local->reg_offset = ax->reg_offsets;
39579 for (ret = 0; ret < 0x18; ret++)
39580- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
39581+ ax->reg_offsets[ret] = _mem_size * ret;
39582 }
39583
39584 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
39585diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
39586index 0991534..8098e92 100644
39587--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
39588+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
39589@@ -1094,7 +1094,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
39590 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
39591 {
39592 /* RX_MODE controlling object */
39593- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
39594+ bnx2x_init_rx_mode_obj(bp);
39595
39596 /* multicast configuration controlling object */
39597 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
39598diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
39599index 10bc093..a2fb42a 100644
39600--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
39601+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
39602@@ -2136,12 +2136,12 @@ static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
39603 break;
39604 default:
39605 BNX2X_ERR("Non valid capability ID\n");
39606- rval = -EINVAL;
39607+ rval = 1;
39608 break;
39609 }
39610 } else {
39611 DP(BNX2X_MSG_DCB, "DCB disabled\n");
39612- rval = -EINVAL;
39613+ rval = 1;
39614 }
39615
39616 DP(BNX2X_MSG_DCB, "capid %d:%x\n", capid, *cap);
39617@@ -2167,12 +2167,12 @@ static int bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num)
39618 break;
39619 default:
39620 BNX2X_ERR("Non valid TC-ID\n");
39621- rval = -EINVAL;
39622+ rval = 1;
39623 break;
39624 }
39625 } else {
39626 DP(BNX2X_MSG_DCB, "DCB disabled\n");
39627- rval = -EINVAL;
39628+ rval = 1;
39629 }
39630
39631 return rval;
39632@@ -2185,7 +2185,7 @@ static int bnx2x_dcbnl_set_numtcs(struct net_device *netdev, int tcid, u8 num)
39633 return -EINVAL;
39634 }
39635
39636-static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev)
39637+static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev)
39638 {
39639 struct bnx2x *bp = netdev_priv(netdev);
39640 DP(BNX2X_MSG_DCB, "state = %d\n", bp->dcbx_local_feat.pfc.enabled);
39641@@ -2387,12 +2387,12 @@ static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid,
39642 break;
39643 default:
39644 BNX2X_ERR("Non valid featrue-ID\n");
39645- rval = -EINVAL;
39646+ rval = 1;
39647 break;
39648 }
39649 } else {
39650 DP(BNX2X_MSG_DCB, "DCB disabled\n");
39651- rval = -EINVAL;
39652+ rval = 1;
39653 }
39654
39655 return rval;
39656@@ -2428,12 +2428,12 @@ static u8 bnx2x_dcbnl_set_featcfg(struct net_device *netdev, int featid,
39657 break;
39658 default:
39659 BNX2X_ERR("Non valid featrue-ID\n");
39660- rval = -EINVAL;
39661+ rval = 1;
39662 break;
39663 }
39664 } else {
39665 DP(BNX2X_MSG_DCB, "dcbnl call not valid\n");
39666- rval = -EINVAL;
39667+ rval = 1;
39668 }
39669
39670 return rval;
39671diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
39672index 09b625e..15b16fe 100644
39673--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
39674+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
39675@@ -2375,15 +2375,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
39676 return rc;
39677 }
39678
39679-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
39680- struct bnx2x_rx_mode_obj *o)
39681+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
39682 {
39683 if (CHIP_IS_E1x(bp)) {
39684- o->wait_comp = bnx2x_empty_rx_mode_wait;
39685- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
39686+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
39687+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
39688 } else {
39689- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
39690- o->config_rx_mode = bnx2x_set_rx_mode_e2;
39691+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
39692+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
39693 }
39694 }
39695
39696diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
39697index adbd91b..58ec94a 100644
39698--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
39699+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
39700@@ -1293,8 +1293,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
39701
39702 /********************* RX MODE ****************/
39703
39704-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
39705- struct bnx2x_rx_mode_obj *o);
39706+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
39707
39708 /**
39709 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
39710diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
39711index d330e81..ce1fb9a 100644
39712--- a/drivers/net/ethernet/broadcom/tg3.h
39713+++ b/drivers/net/ethernet/broadcom/tg3.h
39714@@ -146,6 +146,7 @@
39715 #define CHIPREV_ID_5750_A0 0x4000
39716 #define CHIPREV_ID_5750_A1 0x4001
39717 #define CHIPREV_ID_5750_A3 0x4003
39718+#define CHIPREV_ID_5750_C1 0x4201
39719 #define CHIPREV_ID_5750_C2 0x4202
39720 #define CHIPREV_ID_5752_A0_HW 0x5000
39721 #define CHIPREV_ID_5752_A0 0x6000
39722diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
39723index 8cffcdf..aadf043 100644
39724--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
39725+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
39726@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
39727 */
39728 struct l2t_skb_cb {
39729 arp_failure_handler_func arp_failure_handler;
39730-};
39731+} __no_const;
39732
39733 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
39734
39735diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
39736index 4c83003..2a2a5b9 100644
39737--- a/drivers/net/ethernet/dec/tulip/de4x5.c
39738+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
39739@@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39740 for (i=0; i<ETH_ALEN; i++) {
39741 tmp.addr[i] = dev->dev_addr[i];
39742 }
39743- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
39744+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
39745 break;
39746
39747 case DE4X5_SET_HWADDR: /* Set the hardware address */
39748@@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39749 spin_lock_irqsave(&lp->lock, flags);
39750 memcpy(&statbuf, &lp->pktStats, ioc->len);
39751 spin_unlock_irqrestore(&lp->lock, flags);
39752- if (copy_to_user(ioc->data, &statbuf, ioc->len))
39753+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
39754 return -EFAULT;
39755 break;
39756 }
39757diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
39758index 4d6f3c5..6169e60 100644
39759--- a/drivers/net/ethernet/emulex/benet/be_main.c
39760+++ b/drivers/net/ethernet/emulex/benet/be_main.c
39761@@ -455,7 +455,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
39762
39763 if (wrapped)
39764 newacc += 65536;
39765- ACCESS_ONCE(*acc) = newacc;
39766+ ACCESS_ONCE_RW(*acc) = newacc;
39767 }
39768
39769 void be_parse_stats(struct be_adapter *adapter)
39770diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
39771index 74d749e..eefb1bd 100644
39772--- a/drivers/net/ethernet/faraday/ftgmac100.c
39773+++ b/drivers/net/ethernet/faraday/ftgmac100.c
39774@@ -31,6 +31,8 @@
39775 #include <linux/netdevice.h>
39776 #include <linux/phy.h>
39777 #include <linux/platform_device.h>
39778+#include <linux/interrupt.h>
39779+#include <linux/irqreturn.h>
39780 #include <net/ip.h>
39781
39782 #include "ftgmac100.h"
39783diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
39784index b901a01..1ff32ee 100644
39785--- a/drivers/net/ethernet/faraday/ftmac100.c
39786+++ b/drivers/net/ethernet/faraday/ftmac100.c
39787@@ -31,6 +31,8 @@
39788 #include <linux/module.h>
39789 #include <linux/netdevice.h>
39790 #include <linux/platform_device.h>
39791+#include <linux/interrupt.h>
39792+#include <linux/irqreturn.h>
39793
39794 #include "ftmac100.h"
39795
39796diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
39797index bb9256a..56d8752 100644
39798--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
39799+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
39800@@ -806,7 +806,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
39801 }
39802
39803 /* update the base incval used to calculate frequency adjustment */
39804- ACCESS_ONCE(adapter->base_incval) = incval;
39805+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
39806 smp_mb();
39807
39808 /* need lock to prevent incorrect read while modifying cyclecounter */
39809diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
39810index c124e67..db9b897 100644
39811--- a/drivers/net/ethernet/lantiq_etop.c
39812+++ b/drivers/net/ethernet/lantiq_etop.c
39813@@ -769,7 +769,7 @@ ltq_etop_probe(struct platform_device *pdev)
39814 return 0;
39815
39816 err_free:
39817- kfree(dev);
39818+ free_netdev(dev);
39819 err_out:
39820 return err;
39821 }
39822diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
39823index fbe5363..266b4e3 100644
39824--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
39825+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
39826@@ -3461,7 +3461,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
39827 struct __vxge_hw_fifo *fifo;
39828 struct vxge_hw_fifo_config *config;
39829 u32 txdl_size, txdl_per_memblock;
39830- struct vxge_hw_mempool_cbs fifo_mp_callback;
39831+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
39832+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
39833+ };
39834+
39835 struct __vxge_hw_virtualpath *vpath;
39836
39837 if ((vp == NULL) || (attr == NULL)) {
39838@@ -3544,8 +3547,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
39839 goto exit;
39840 }
39841
39842- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
39843-
39844 fifo->mempool =
39845 __vxge_hw_mempool_create(vpath->hldev,
39846 fifo->config->memblock_size,
39847diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
39848index 998974f..ecd26db 100644
39849--- a/drivers/net/ethernet/realtek/r8169.c
39850+++ b/drivers/net/ethernet/realtek/r8169.c
39851@@ -741,22 +741,22 @@ struct rtl8169_private {
39852 struct mdio_ops {
39853 void (*write)(struct rtl8169_private *, int, int);
39854 int (*read)(struct rtl8169_private *, int);
39855- } mdio_ops;
39856+ } __no_const mdio_ops;
39857
39858 struct pll_power_ops {
39859 void (*down)(struct rtl8169_private *);
39860 void (*up)(struct rtl8169_private *);
39861- } pll_power_ops;
39862+ } __no_const pll_power_ops;
39863
39864 struct jumbo_ops {
39865 void (*enable)(struct rtl8169_private *);
39866 void (*disable)(struct rtl8169_private *);
39867- } jumbo_ops;
39868+ } __no_const jumbo_ops;
39869
39870 struct csi_ops {
39871 void (*write)(struct rtl8169_private *, int, int);
39872 u32 (*read)(struct rtl8169_private *, int);
39873- } csi_ops;
39874+ } __no_const csi_ops;
39875
39876 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
39877 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
39878diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
39879index 3f93624..cf01144 100644
39880--- a/drivers/net/ethernet/sfc/ptp.c
39881+++ b/drivers/net/ethernet/sfc/ptp.c
39882@@ -553,7 +553,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
39883 (u32)((u64)ptp->start.dma_addr >> 32));
39884
39885 /* Clear flag that signals MC ready */
39886- ACCESS_ONCE(*start) = 0;
39887+ ACCESS_ONCE_RW(*start) = 0;
39888 efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
39889 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
39890
39891diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
39892index 0c74a70..3bc6f68 100644
39893--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
39894+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
39895@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
39896
39897 writel(value, ioaddr + MMC_CNTRL);
39898
39899- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
39900- MMC_CNTRL, value);
39901+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
39902+// MMC_CNTRL, value);
39903 }
39904
39905 /* To mask all all interrupts.*/
39906diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
39907index e6fe0d8..2b7d752 100644
39908--- a/drivers/net/hyperv/hyperv_net.h
39909+++ b/drivers/net/hyperv/hyperv_net.h
39910@@ -101,7 +101,7 @@ struct rndis_device {
39911
39912 enum rndis_device_state state;
39913 bool link_state;
39914- atomic_t new_req_id;
39915+ atomic_unchecked_t new_req_id;
39916
39917 spinlock_t request_lock;
39918 struct list_head req_list;
39919diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
39920index 2b657d4..9903bc0 100644
39921--- a/drivers/net/hyperv/rndis_filter.c
39922+++ b/drivers/net/hyperv/rndis_filter.c
39923@@ -107,7 +107,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
39924 * template
39925 */
39926 set = &rndis_msg->msg.set_req;
39927- set->req_id = atomic_inc_return(&dev->new_req_id);
39928+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
39929
39930 /* Add to the request list */
39931 spin_lock_irqsave(&dev->request_lock, flags);
39932@@ -758,7 +758,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
39933
39934 /* Setup the rndis set */
39935 halt = &request->request_msg.msg.halt_req;
39936- halt->req_id = atomic_inc_return(&dev->new_req_id);
39937+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
39938
39939 /* Ignore return since this msg is optional. */
39940 rndis_filter_send_request(dev, request);
39941diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
39942index 1e9cb0b..7839125 100644
39943--- a/drivers/net/ieee802154/fakehard.c
39944+++ b/drivers/net/ieee802154/fakehard.c
39945@@ -386,7 +386,7 @@ static int ieee802154fake_probe(struct platform_device *pdev)
39946 phy->transmit_power = 0xbf;
39947
39948 dev->netdev_ops = &fake_ops;
39949- dev->ml_priv = &fake_mlme;
39950+ dev->ml_priv = (void *)&fake_mlme;
39951
39952 priv = netdev_priv(dev);
39953 priv->phy = phy;
39954diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
39955index e5cb723..1fc0461 100644
39956--- a/drivers/net/macvlan.c
39957+++ b/drivers/net/macvlan.c
39958@@ -852,13 +852,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
39959 int macvlan_link_register(struct rtnl_link_ops *ops)
39960 {
39961 /* common fields */
39962- ops->priv_size = sizeof(struct macvlan_dev);
39963- ops->validate = macvlan_validate;
39964- ops->maxtype = IFLA_MACVLAN_MAX;
39965- ops->policy = macvlan_policy;
39966- ops->changelink = macvlan_changelink;
39967- ops->get_size = macvlan_get_size;
39968- ops->fill_info = macvlan_fill_info;
39969+ pax_open_kernel();
39970+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
39971+ *(void **)&ops->validate = macvlan_validate;
39972+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
39973+ *(const void **)&ops->policy = macvlan_policy;
39974+ *(void **)&ops->changelink = macvlan_changelink;
39975+ *(void **)&ops->get_size = macvlan_get_size;
39976+ *(void **)&ops->fill_info = macvlan_fill_info;
39977+ pax_close_kernel();
39978
39979 return rtnl_link_register(ops);
39980 };
39981@@ -914,7 +916,7 @@ static int macvlan_device_event(struct notifier_block *unused,
39982 return NOTIFY_DONE;
39983 }
39984
39985-static struct notifier_block macvlan_notifier_block __read_mostly = {
39986+static struct notifier_block macvlan_notifier_block = {
39987 .notifier_call = macvlan_device_event,
39988 };
39989
39990diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
39991index 0f0f9ce..0ca5819 100644
39992--- a/drivers/net/macvtap.c
39993+++ b/drivers/net/macvtap.c
39994@@ -1100,7 +1100,7 @@ static int macvtap_device_event(struct notifier_block *unused,
39995 return NOTIFY_DONE;
39996 }
39997
39998-static struct notifier_block macvtap_notifier_block __read_mostly = {
39999+static struct notifier_block macvtap_notifier_block = {
40000 .notifier_call = macvtap_device_event,
40001 };
40002
40003diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
40004index daec9b0..6428fcb 100644
40005--- a/drivers/net/phy/mdio-bitbang.c
40006+++ b/drivers/net/phy/mdio-bitbang.c
40007@@ -234,6 +234,7 @@ void free_mdio_bitbang(struct mii_bus *bus)
40008 struct mdiobb_ctrl *ctrl = bus->priv;
40009
40010 module_put(ctrl->ops->owner);
40011+ mdiobus_unregister(bus);
40012 mdiobus_free(bus);
40013 }
40014 EXPORT_SYMBOL(free_mdio_bitbang);
40015diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
40016index 508570e..f706dc7 100644
40017--- a/drivers/net/ppp/ppp_generic.c
40018+++ b/drivers/net/ppp/ppp_generic.c
40019@@ -999,7 +999,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
40020 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
40021 struct ppp_stats stats;
40022 struct ppp_comp_stats cstats;
40023- char *vers;
40024
40025 switch (cmd) {
40026 case SIOCGPPPSTATS:
40027@@ -1021,8 +1020,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
40028 break;
40029
40030 case SIOCGPPPVER:
40031- vers = PPP_VERSION;
40032- if (copy_to_user(addr, vers, strlen(vers) + 1))
40033+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
40034 break;
40035 err = 0;
40036 break;
40037diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
40038index 8efe47a..a8075c5 100644
40039--- a/drivers/net/team/team.c
40040+++ b/drivers/net/team/team.c
40041@@ -2603,7 +2603,7 @@ static int team_device_event(struct notifier_block *unused,
40042 return NOTIFY_DONE;
40043 }
40044
40045-static struct notifier_block team_notifier_block __read_mostly = {
40046+static struct notifier_block team_notifier_block = {
40047 .notifier_call = team_device_event,
40048 };
40049
40050diff --git a/drivers/net/tun.c b/drivers/net/tun.c
40051index cb95fe5..a5bdab5 100644
40052--- a/drivers/net/tun.c
40053+++ b/drivers/net/tun.c
40054@@ -1838,7 +1838,7 @@ unlock:
40055 }
40056
40057 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
40058- unsigned long arg, int ifreq_len)
40059+ unsigned long arg, size_t ifreq_len)
40060 {
40061 struct tun_file *tfile = file->private_data;
40062 struct tun_struct *tun;
40063@@ -1850,6 +1850,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
40064 int vnet_hdr_sz;
40065 int ret;
40066
40067+ if (ifreq_len > sizeof ifr)
40068+ return -EFAULT;
40069+
40070 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
40071 if (copy_from_user(&ifr, argp, ifreq_len))
40072 return -EFAULT;
40073diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
40074index cd8ccb2..cff5144 100644
40075--- a/drivers/net/usb/hso.c
40076+++ b/drivers/net/usb/hso.c
40077@@ -71,7 +71,7 @@
40078 #include <asm/byteorder.h>
40079 #include <linux/serial_core.h>
40080 #include <linux/serial.h>
40081-
40082+#include <asm/local.h>
40083
40084 #define MOD_AUTHOR "Option Wireless"
40085 #define MOD_DESCRIPTION "USB High Speed Option driver"
40086@@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
40087 struct urb *urb;
40088
40089 urb = serial->rx_urb[0];
40090- if (serial->port.count > 0) {
40091+ if (atomic_read(&serial->port.count) > 0) {
40092 count = put_rxbuf_data(urb, serial);
40093 if (count == -1)
40094 return;
40095@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
40096 DUMP1(urb->transfer_buffer, urb->actual_length);
40097
40098 /* Anyone listening? */
40099- if (serial->port.count == 0)
40100+ if (atomic_read(&serial->port.count) == 0)
40101 return;
40102
40103 if (status == 0) {
40104@@ -1298,8 +1298,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
40105 tty_port_tty_set(&serial->port, tty);
40106
40107 /* check for port already opened, if not set the termios */
40108- serial->port.count++;
40109- if (serial->port.count == 1) {
40110+ if (atomic_inc_return(&serial->port.count) == 1) {
40111 serial->rx_state = RX_IDLE;
40112 /* Force default termio settings */
40113 _hso_serial_set_termios(tty, NULL);
40114@@ -1311,7 +1310,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
40115 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
40116 if (result) {
40117 hso_stop_serial_device(serial->parent);
40118- serial->port.count--;
40119+ atomic_dec(&serial->port.count);
40120 kref_put(&serial->parent->ref, hso_serial_ref_free);
40121 }
40122 } else {
40123@@ -1348,10 +1347,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
40124
40125 /* reset the rts and dtr */
40126 /* do the actual close */
40127- serial->port.count--;
40128+ atomic_dec(&serial->port.count);
40129
40130- if (serial->port.count <= 0) {
40131- serial->port.count = 0;
40132+ if (atomic_read(&serial->port.count) <= 0) {
40133+ atomic_set(&serial->port.count, 0);
40134 tty_port_tty_set(&serial->port, NULL);
40135 if (!usb_gone)
40136 hso_stop_serial_device(serial->parent);
40137@@ -1427,7 +1426,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
40138
40139 /* the actual setup */
40140 spin_lock_irqsave(&serial->serial_lock, flags);
40141- if (serial->port.count)
40142+ if (atomic_read(&serial->port.count))
40143 _hso_serial_set_termios(tty, old);
40144 else
40145 tty->termios = *old;
40146@@ -1886,7 +1885,7 @@ static void intr_callback(struct urb *urb)
40147 D1("Pending read interrupt on port %d\n", i);
40148 spin_lock(&serial->serial_lock);
40149 if (serial->rx_state == RX_IDLE &&
40150- serial->port.count > 0) {
40151+ atomic_read(&serial->port.count) > 0) {
40152 /* Setup and send a ctrl req read on
40153 * port i */
40154 if (!serial->rx_urb_filled[0]) {
40155@@ -3079,7 +3078,7 @@ static int hso_resume(struct usb_interface *iface)
40156 /* Start all serial ports */
40157 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
40158 if (serial_table[i] && (serial_table[i]->interface == iface)) {
40159- if (dev2ser(serial_table[i])->port.count) {
40160+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
40161 result =
40162 hso_start_serial_device(serial_table[i], GFP_NOIO);
40163 hso_kick_transmit(dev2ser(serial_table[i]));
40164diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
40165index 6993bfa..9053a34 100644
40166--- a/drivers/net/vxlan.c
40167+++ b/drivers/net/vxlan.c
40168@@ -1428,7 +1428,7 @@ nla_put_failure:
40169 return -EMSGSIZE;
40170 }
40171
40172-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
40173+static struct rtnl_link_ops vxlan_link_ops = {
40174 .kind = "vxlan",
40175 .maxtype = IFLA_VXLAN_MAX,
40176 .policy = vxlan_policy,
40177diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
40178index 77fa428..996b355 100644
40179--- a/drivers/net/wireless/at76c50x-usb.c
40180+++ b/drivers/net/wireless/at76c50x-usb.c
40181@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
40182 }
40183
40184 /* Convert timeout from the DFU status to jiffies */
40185-static inline unsigned long at76_get_timeout(struct dfu_status *s)
40186+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
40187 {
40188 return msecs_to_jiffies((s->poll_timeout[2] << 16)
40189 | (s->poll_timeout[1] << 8)
40190diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
40191index 8d78253..bebbb68 100644
40192--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
40193+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
40194@@ -184,8 +184,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40195 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
40196 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
40197
40198- ACCESS_ONCE(ads->ds_link) = i->link;
40199- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
40200+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
40201+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
40202
40203 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
40204 ctl6 = SM(i->keytype, AR_EncrType);
40205@@ -199,26 +199,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40206
40207 if ((i->is_first || i->is_last) &&
40208 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
40209- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
40210+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
40211 | set11nTries(i->rates, 1)
40212 | set11nTries(i->rates, 2)
40213 | set11nTries(i->rates, 3)
40214 | (i->dur_update ? AR_DurUpdateEna : 0)
40215 | SM(0, AR_BurstDur);
40216
40217- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
40218+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
40219 | set11nRate(i->rates, 1)
40220 | set11nRate(i->rates, 2)
40221 | set11nRate(i->rates, 3);
40222 } else {
40223- ACCESS_ONCE(ads->ds_ctl2) = 0;
40224- ACCESS_ONCE(ads->ds_ctl3) = 0;
40225+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
40226+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
40227 }
40228
40229 if (!i->is_first) {
40230- ACCESS_ONCE(ads->ds_ctl0) = 0;
40231- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
40232- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
40233+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
40234+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
40235+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
40236 return;
40237 }
40238
40239@@ -243,7 +243,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40240 break;
40241 }
40242
40243- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
40244+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
40245 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
40246 | SM(i->txpower, AR_XmitPower)
40247 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
40248@@ -253,19 +253,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40249 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
40250 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
40251
40252- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
40253- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
40254+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
40255+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
40256
40257 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
40258 return;
40259
40260- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
40261+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
40262 | set11nPktDurRTSCTS(i->rates, 1);
40263
40264- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
40265+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
40266 | set11nPktDurRTSCTS(i->rates, 3);
40267
40268- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
40269+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
40270 | set11nRateFlags(i->rates, 1)
40271 | set11nRateFlags(i->rates, 2)
40272 | set11nRateFlags(i->rates, 3)
40273diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
40274index 301bf72..3f5654f 100644
40275--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
40276+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
40277@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40278 (i->qcu << AR_TxQcuNum_S) | desc_len;
40279
40280 checksum += val;
40281- ACCESS_ONCE(ads->info) = val;
40282+ ACCESS_ONCE_RW(ads->info) = val;
40283
40284 checksum += i->link;
40285- ACCESS_ONCE(ads->link) = i->link;
40286+ ACCESS_ONCE_RW(ads->link) = i->link;
40287
40288 checksum += i->buf_addr[0];
40289- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
40290+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
40291 checksum += i->buf_addr[1];
40292- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
40293+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
40294 checksum += i->buf_addr[2];
40295- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
40296+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
40297 checksum += i->buf_addr[3];
40298- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
40299+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
40300
40301 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
40302- ACCESS_ONCE(ads->ctl3) = val;
40303+ ACCESS_ONCE_RW(ads->ctl3) = val;
40304 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
40305- ACCESS_ONCE(ads->ctl5) = val;
40306+ ACCESS_ONCE_RW(ads->ctl5) = val;
40307 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
40308- ACCESS_ONCE(ads->ctl7) = val;
40309+ ACCESS_ONCE_RW(ads->ctl7) = val;
40310 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
40311- ACCESS_ONCE(ads->ctl9) = val;
40312+ ACCESS_ONCE_RW(ads->ctl9) = val;
40313
40314 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
40315- ACCESS_ONCE(ads->ctl10) = checksum;
40316+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
40317
40318 if (i->is_first || i->is_last) {
40319- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
40320+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
40321 | set11nTries(i->rates, 1)
40322 | set11nTries(i->rates, 2)
40323 | set11nTries(i->rates, 3)
40324 | (i->dur_update ? AR_DurUpdateEna : 0)
40325 | SM(0, AR_BurstDur);
40326
40327- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
40328+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
40329 | set11nRate(i->rates, 1)
40330 | set11nRate(i->rates, 2)
40331 | set11nRate(i->rates, 3);
40332 } else {
40333- ACCESS_ONCE(ads->ctl13) = 0;
40334- ACCESS_ONCE(ads->ctl14) = 0;
40335+ ACCESS_ONCE_RW(ads->ctl13) = 0;
40336+ ACCESS_ONCE_RW(ads->ctl14) = 0;
40337 }
40338
40339 ads->ctl20 = 0;
40340@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40341
40342 ctl17 = SM(i->keytype, AR_EncrType);
40343 if (!i->is_first) {
40344- ACCESS_ONCE(ads->ctl11) = 0;
40345- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
40346- ACCESS_ONCE(ads->ctl15) = 0;
40347- ACCESS_ONCE(ads->ctl16) = 0;
40348- ACCESS_ONCE(ads->ctl17) = ctl17;
40349- ACCESS_ONCE(ads->ctl18) = 0;
40350- ACCESS_ONCE(ads->ctl19) = 0;
40351+ ACCESS_ONCE_RW(ads->ctl11) = 0;
40352+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
40353+ ACCESS_ONCE_RW(ads->ctl15) = 0;
40354+ ACCESS_ONCE_RW(ads->ctl16) = 0;
40355+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
40356+ ACCESS_ONCE_RW(ads->ctl18) = 0;
40357+ ACCESS_ONCE_RW(ads->ctl19) = 0;
40358 return;
40359 }
40360
40361- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
40362+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
40363 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
40364 | SM(i->txpower, AR_XmitPower)
40365 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
40366@@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40367 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
40368 ctl12 |= SM(val, AR_PAPRDChainMask);
40369
40370- ACCESS_ONCE(ads->ctl12) = ctl12;
40371- ACCESS_ONCE(ads->ctl17) = ctl17;
40372+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
40373+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
40374
40375- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
40376+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
40377 | set11nPktDurRTSCTS(i->rates, 1);
40378
40379- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
40380+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
40381 | set11nPktDurRTSCTS(i->rates, 3);
40382
40383- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
40384+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
40385 | set11nRateFlags(i->rates, 1)
40386 | set11nRateFlags(i->rates, 2)
40387 | set11nRateFlags(i->rates, 3)
40388 | SM(i->rtscts_rate, AR_RTSCTSRate);
40389
40390- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
40391+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
40392 }
40393
40394 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
40395diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
40396index 9d26fc5..60d9f14 100644
40397--- a/drivers/net/wireless/ath/ath9k/hw.h
40398+++ b/drivers/net/wireless/ath/ath9k/hw.h
40399@@ -658,7 +658,7 @@ struct ath_hw_private_ops {
40400
40401 /* ANI */
40402 void (*ani_cache_ini_regs)(struct ath_hw *ah);
40403-};
40404+} __no_const;
40405
40406 /**
40407 * struct ath_hw_ops - callbacks used by hardware code and driver code
40408@@ -688,7 +688,7 @@ struct ath_hw_ops {
40409 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
40410 struct ath_hw_antcomb_conf *antconf);
40411 void (*antctrl_shared_chain_lnadiv)(struct ath_hw *hw, bool enable);
40412-};
40413+} __no_const;
40414
40415 struct ath_nf_limits {
40416 s16 max;
40417diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
40418index 3726cd6..b655808 100644
40419--- a/drivers/net/wireless/iwlegacy/3945-mac.c
40420+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
40421@@ -3615,7 +3615,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
40422 */
40423 if (il3945_mod_params.disable_hw_scan) {
40424 D_INFO("Disabling hw_scan\n");
40425- il3945_mac_ops.hw_scan = NULL;
40426+ pax_open_kernel();
40427+ *(void **)&il3945_mac_ops.hw_scan = NULL;
40428+ pax_close_kernel();
40429 }
40430
40431 D_INFO("*** LOAD DRIVER ***\n");
40432diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
40433index 5b9533e..7733880 100644
40434--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
40435+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
40436@@ -203,7 +203,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
40437 {
40438 struct iwl_priv *priv = file->private_data;
40439 char buf[64];
40440- int buf_size;
40441+ size_t buf_size;
40442 u32 offset, len;
40443
40444 memset(buf, 0, sizeof(buf));
40445@@ -473,7 +473,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
40446 struct iwl_priv *priv = file->private_data;
40447
40448 char buf[8];
40449- int buf_size;
40450+ size_t buf_size;
40451 u32 reset_flag;
40452
40453 memset(buf, 0, sizeof(buf));
40454@@ -554,7 +554,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
40455 {
40456 struct iwl_priv *priv = file->private_data;
40457 char buf[8];
40458- int buf_size;
40459+ size_t buf_size;
40460 int ht40;
40461
40462 memset(buf, 0, sizeof(buf));
40463@@ -606,7 +606,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
40464 {
40465 struct iwl_priv *priv = file->private_data;
40466 char buf[8];
40467- int buf_size;
40468+ size_t buf_size;
40469 int value;
40470
40471 memset(buf, 0, sizeof(buf));
40472@@ -1871,7 +1871,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
40473 {
40474 struct iwl_priv *priv = file->private_data;
40475 char buf[8];
40476- int buf_size;
40477+ size_t buf_size;
40478 int clear;
40479
40480 memset(buf, 0, sizeof(buf));
40481@@ -1916,7 +1916,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
40482 {
40483 struct iwl_priv *priv = file->private_data;
40484 char buf[8];
40485- int buf_size;
40486+ size_t buf_size;
40487 int trace;
40488
40489 memset(buf, 0, sizeof(buf));
40490@@ -1987,7 +1987,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
40491 {
40492 struct iwl_priv *priv = file->private_data;
40493 char buf[8];
40494- int buf_size;
40495+ size_t buf_size;
40496 int missed;
40497
40498 memset(buf, 0, sizeof(buf));
40499@@ -2028,7 +2028,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
40500
40501 struct iwl_priv *priv = file->private_data;
40502 char buf[8];
40503- int buf_size;
40504+ size_t buf_size;
40505 int plcp;
40506
40507 memset(buf, 0, sizeof(buf));
40508@@ -2088,7 +2088,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
40509
40510 struct iwl_priv *priv = file->private_data;
40511 char buf[8];
40512- int buf_size;
40513+ size_t buf_size;
40514 int flush;
40515
40516 memset(buf, 0, sizeof(buf));
40517@@ -2178,7 +2178,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
40518
40519 struct iwl_priv *priv = file->private_data;
40520 char buf[8];
40521- int buf_size;
40522+ size_t buf_size;
40523 int rts;
40524
40525 if (!priv->cfg->ht_params)
40526@@ -2220,7 +2220,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
40527 {
40528 struct iwl_priv *priv = file->private_data;
40529 char buf[8];
40530- int buf_size;
40531+ size_t buf_size;
40532
40533 memset(buf, 0, sizeof(buf));
40534 buf_size = min(count, sizeof(buf) - 1);
40535@@ -2256,7 +2256,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
40536 struct iwl_priv *priv = file->private_data;
40537 u32 event_log_flag;
40538 char buf[8];
40539- int buf_size;
40540+ size_t buf_size;
40541
40542 /* check that the interface is up */
40543 if (!iwl_is_ready(priv))
40544@@ -2310,7 +2310,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
40545 struct iwl_priv *priv = file->private_data;
40546 char buf[8];
40547 u32 calib_disabled;
40548- int buf_size;
40549+ size_t buf_size;
40550
40551 memset(buf, 0, sizeof(buf));
40552 buf_size = min(count, sizeof(buf) - 1);
40553diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
40554index 35708b9..31f7754 100644
40555--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
40556+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
40557@@ -1100,7 +1100,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
40558 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
40559
40560 char buf[8];
40561- int buf_size;
40562+ size_t buf_size;
40563 u32 reset_flag;
40564
40565 memset(buf, 0, sizeof(buf));
40566@@ -1121,7 +1121,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
40567 {
40568 struct iwl_trans *trans = file->private_data;
40569 char buf[8];
40570- int buf_size;
40571+ size_t buf_size;
40572 int csr;
40573
40574 memset(buf, 0, sizeof(buf));
40575diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
40576index ff90855..e46d223 100644
40577--- a/drivers/net/wireless/mac80211_hwsim.c
40578+++ b/drivers/net/wireless/mac80211_hwsim.c
40579@@ -2062,25 +2062,19 @@ static int __init init_mac80211_hwsim(void)
40580
40581 if (channels > 1) {
40582 hwsim_if_comb.num_different_channels = channels;
40583- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
40584- mac80211_hwsim_ops.cancel_hw_scan =
40585- mac80211_hwsim_cancel_hw_scan;
40586- mac80211_hwsim_ops.sw_scan_start = NULL;
40587- mac80211_hwsim_ops.sw_scan_complete = NULL;
40588- mac80211_hwsim_ops.remain_on_channel =
40589- mac80211_hwsim_roc;
40590- mac80211_hwsim_ops.cancel_remain_on_channel =
40591- mac80211_hwsim_croc;
40592- mac80211_hwsim_ops.add_chanctx =
40593- mac80211_hwsim_add_chanctx;
40594- mac80211_hwsim_ops.remove_chanctx =
40595- mac80211_hwsim_remove_chanctx;
40596- mac80211_hwsim_ops.change_chanctx =
40597- mac80211_hwsim_change_chanctx;
40598- mac80211_hwsim_ops.assign_vif_chanctx =
40599- mac80211_hwsim_assign_vif_chanctx;
40600- mac80211_hwsim_ops.unassign_vif_chanctx =
40601- mac80211_hwsim_unassign_vif_chanctx;
40602+ pax_open_kernel();
40603+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
40604+ *(void **)&mac80211_hwsim_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
40605+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
40606+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
40607+ *(void **)&mac80211_hwsim_ops.remain_on_channel = mac80211_hwsim_roc;
40608+ *(void **)&mac80211_hwsim_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
40609+ *(void **)&mac80211_hwsim_ops.add_chanctx = mac80211_hwsim_add_chanctx;
40610+ *(void **)&mac80211_hwsim_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
40611+ *(void **)&mac80211_hwsim_ops.change_chanctx = mac80211_hwsim_change_chanctx;
40612+ *(void **)&mac80211_hwsim_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
40613+ *(void **)&mac80211_hwsim_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
40614+ pax_close_kernel();
40615 }
40616
40617 spin_lock_init(&hwsim_radio_lock);
40618diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
40619index cdb11b3..3eca710 100644
40620--- a/drivers/net/wireless/mwifiex/cfg80211.c
40621+++ b/drivers/net/wireless/mwifiex/cfg80211.c
40622@@ -1846,7 +1846,8 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
40623 }
40624 }
40625
40626- for (i = 0; i < request->n_channels; i++) {
40627+ for (i = 0; i < min_t(u32, request->n_channels,
40628+ MWIFIEX_USER_SCAN_CHAN_MAX); i++) {
40629 chan = request->channels[i];
40630 priv->user_scan_cfg->chan_list[i].chan_number = chan->hw_value;
40631 priv->user_scan_cfg->chan_list[i].radio_type = chan->band;
40632diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
40633index abe1d03..fb02c22 100644
40634--- a/drivers/net/wireless/rndis_wlan.c
40635+++ b/drivers/net/wireless/rndis_wlan.c
40636@@ -1238,7 +1238,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
40637
40638 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
40639
40640- if (rts_threshold < 0 || rts_threshold > 2347)
40641+ if (rts_threshold > 2347)
40642 rts_threshold = 2347;
40643
40644 tmp = cpu_to_le32(rts_threshold);
40645diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
40646index 0751b35..246ba3e 100644
40647--- a/drivers/net/wireless/rt2x00/rt2x00.h
40648+++ b/drivers/net/wireless/rt2x00/rt2x00.h
40649@@ -398,7 +398,7 @@ struct rt2x00_intf {
40650 * for hardware which doesn't support hardware
40651 * sequence counting.
40652 */
40653- atomic_t seqno;
40654+ atomic_unchecked_t seqno;
40655 };
40656
40657 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
40658diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
40659index e488b94..14b6a0c 100644
40660--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
40661+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
40662@@ -240,9 +240,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
40663 * sequence counter given by mac80211.
40664 */
40665 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
40666- seqno = atomic_add_return(0x10, &intf->seqno);
40667+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
40668 else
40669- seqno = atomic_read(&intf->seqno);
40670+ seqno = atomic_read_unchecked(&intf->seqno);
40671
40672 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
40673 hdr->seq_ctrl |= cpu_to_le16(seqno);
40674diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
40675index e57ee48..541cf6c 100644
40676--- a/drivers/net/wireless/ti/wl1251/sdio.c
40677+++ b/drivers/net/wireless/ti/wl1251/sdio.c
40678@@ -269,13 +269,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
40679
40680 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
40681
40682- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
40683- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
40684+ pax_open_kernel();
40685+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
40686+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
40687+ pax_close_kernel();
40688
40689 wl1251_info("using dedicated interrupt line");
40690 } else {
40691- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
40692- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
40693+ pax_open_kernel();
40694+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
40695+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
40696+ pax_close_kernel();
40697
40698 wl1251_info("using SDIO interrupt");
40699 }
40700diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
40701index e5f5f8f..fdf15b7 100644
40702--- a/drivers/net/wireless/ti/wl12xx/main.c
40703+++ b/drivers/net/wireless/ti/wl12xx/main.c
40704@@ -644,7 +644,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
40705 sizeof(wl->conf.mem));
40706
40707 /* read data preparation is only needed by wl127x */
40708- wl->ops->prepare_read = wl127x_prepare_read;
40709+ pax_open_kernel();
40710+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
40711+ pax_close_kernel();
40712
40713 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER, WL127X_IFTYPE_VER,
40714 WL127X_MAJOR_VER, WL127X_SUBTYPE_VER,
40715@@ -665,7 +667,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
40716 sizeof(wl->conf.mem));
40717
40718 /* read data preparation is only needed by wl127x */
40719- wl->ops->prepare_read = wl127x_prepare_read;
40720+ pax_open_kernel();
40721+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
40722+ pax_close_kernel();
40723
40724 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER, WL127X_IFTYPE_VER,
40725 WL127X_MAJOR_VER, WL127X_SUBTYPE_VER,
40726diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
40727index 8d8c1f8..e754844 100644
40728--- a/drivers/net/wireless/ti/wl18xx/main.c
40729+++ b/drivers/net/wireless/ti/wl18xx/main.c
40730@@ -1489,8 +1489,10 @@ static int wl18xx_setup(struct wl1271 *wl)
40731 }
40732
40733 if (!checksum_param) {
40734- wl18xx_ops.set_rx_csum = NULL;
40735- wl18xx_ops.init_vif = NULL;
40736+ pax_open_kernel();
40737+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
40738+ *(void **)&wl18xx_ops.init_vif = NULL;
40739+ pax_close_kernel();
40740 }
40741
40742 /* Enable 11a Band only if we have 5G antennas */
40743diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
40744index ef2b171..bb513a6 100644
40745--- a/drivers/net/wireless/zd1211rw/zd_usb.c
40746+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
40747@@ -387,7 +387,7 @@ static inline void handle_regs_int(struct urb *urb)
40748 {
40749 struct zd_usb *usb = urb->context;
40750 struct zd_usb_interrupt *intr = &usb->intr;
40751- int len;
40752+ unsigned int len;
40753 u16 int_num;
40754
40755 ZD_ASSERT(in_interrupt());
40756diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
40757index d93b2b6..ae50401 100644
40758--- a/drivers/oprofile/buffer_sync.c
40759+++ b/drivers/oprofile/buffer_sync.c
40760@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
40761 if (cookie == NO_COOKIE)
40762 offset = pc;
40763 if (cookie == INVALID_COOKIE) {
40764- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
40765+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
40766 offset = pc;
40767 }
40768 if (cookie != last_cookie) {
40769@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
40770 /* add userspace sample */
40771
40772 if (!mm) {
40773- atomic_inc(&oprofile_stats.sample_lost_no_mm);
40774+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
40775 return 0;
40776 }
40777
40778 cookie = lookup_dcookie(mm, s->eip, &offset);
40779
40780 if (cookie == INVALID_COOKIE) {
40781- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
40782+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
40783 return 0;
40784 }
40785
40786@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
40787 /* ignore backtraces if failed to add a sample */
40788 if (state == sb_bt_start) {
40789 state = sb_bt_ignore;
40790- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
40791+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
40792 }
40793 }
40794 release_mm(mm);
40795diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
40796index c0cc4e7..44d4e54 100644
40797--- a/drivers/oprofile/event_buffer.c
40798+++ b/drivers/oprofile/event_buffer.c
40799@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
40800 }
40801
40802 if (buffer_pos == buffer_size) {
40803- atomic_inc(&oprofile_stats.event_lost_overflow);
40804+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
40805 return;
40806 }
40807
40808diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
40809index ed2c3ec..deda85a 100644
40810--- a/drivers/oprofile/oprof.c
40811+++ b/drivers/oprofile/oprof.c
40812@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
40813 if (oprofile_ops.switch_events())
40814 return;
40815
40816- atomic_inc(&oprofile_stats.multiplex_counter);
40817+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
40818 start_switch_worker();
40819 }
40820
40821diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
40822index 84a208d..d61b0a1 100644
40823--- a/drivers/oprofile/oprofile_files.c
40824+++ b/drivers/oprofile/oprofile_files.c
40825@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
40826
40827 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
40828
40829-static ssize_t timeout_read(struct file *file, char __user *buf,
40830+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
40831 size_t count, loff_t *offset)
40832 {
40833 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
40834diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
40835index 917d28e..d62d981 100644
40836--- a/drivers/oprofile/oprofile_stats.c
40837+++ b/drivers/oprofile/oprofile_stats.c
40838@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
40839 cpu_buf->sample_invalid_eip = 0;
40840 }
40841
40842- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
40843- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
40844- atomic_set(&oprofile_stats.event_lost_overflow, 0);
40845- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
40846- atomic_set(&oprofile_stats.multiplex_counter, 0);
40847+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
40848+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
40849+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
40850+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
40851+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
40852 }
40853
40854
40855diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
40856index 38b6fc0..b5cbfce 100644
40857--- a/drivers/oprofile/oprofile_stats.h
40858+++ b/drivers/oprofile/oprofile_stats.h
40859@@ -13,11 +13,11 @@
40860 #include <linux/atomic.h>
40861
40862 struct oprofile_stat_struct {
40863- atomic_t sample_lost_no_mm;
40864- atomic_t sample_lost_no_mapping;
40865- atomic_t bt_lost_no_mapping;
40866- atomic_t event_lost_overflow;
40867- atomic_t multiplex_counter;
40868+ atomic_unchecked_t sample_lost_no_mm;
40869+ atomic_unchecked_t sample_lost_no_mapping;
40870+ atomic_unchecked_t bt_lost_no_mapping;
40871+ atomic_unchecked_t event_lost_overflow;
40872+ atomic_unchecked_t multiplex_counter;
40873 };
40874
40875 extern struct oprofile_stat_struct oprofile_stats;
40876diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
40877index 849357c..b83c1e0 100644
40878--- a/drivers/oprofile/oprofilefs.c
40879+++ b/drivers/oprofile/oprofilefs.c
40880@@ -185,7 +185,7 @@ static const struct file_operations atomic_ro_fops = {
40881
40882
40883 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
40884- char const *name, atomic_t *val)
40885+ char const *name, atomic_unchecked_t *val)
40886 {
40887 return __oprofilefs_create_file(sb, root, name,
40888 &atomic_ro_fops, 0444, val);
40889diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
40890index 93404f7..4a313d8 100644
40891--- a/drivers/oprofile/timer_int.c
40892+++ b/drivers/oprofile/timer_int.c
40893@@ -93,7 +93,7 @@ static int __cpuinit oprofile_cpu_notify(struct notifier_block *self,
40894 return NOTIFY_OK;
40895 }
40896
40897-static struct notifier_block __refdata oprofile_cpu_notifier = {
40898+static struct notifier_block oprofile_cpu_notifier = {
40899 .notifier_call = oprofile_cpu_notify,
40900 };
40901
40902diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
40903index 3f56bc0..707d642 100644
40904--- a/drivers/parport/procfs.c
40905+++ b/drivers/parport/procfs.c
40906@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
40907
40908 *ppos += len;
40909
40910- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
40911+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
40912 }
40913
40914 #ifdef CONFIG_PARPORT_1284
40915@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
40916
40917 *ppos += len;
40918
40919- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
40920+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
40921 }
40922 #endif /* IEEE1284.3 support. */
40923
40924diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
40925index c35e8ad..fc33beb 100644
40926--- a/drivers/pci/hotplug/acpiphp_ibm.c
40927+++ b/drivers/pci/hotplug/acpiphp_ibm.c
40928@@ -464,7 +464,9 @@ static int __init ibm_acpiphp_init(void)
40929 goto init_cleanup;
40930 }
40931
40932- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
40933+ pax_open_kernel();
40934+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
40935+ pax_close_kernel();
40936 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
40937
40938 return retval;
40939diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
40940index a6a71c4..c91097b 100644
40941--- a/drivers/pci/hotplug/cpcihp_generic.c
40942+++ b/drivers/pci/hotplug/cpcihp_generic.c
40943@@ -73,7 +73,6 @@ static u16 port;
40944 static unsigned int enum_bit;
40945 static u8 enum_mask;
40946
40947-static struct cpci_hp_controller_ops generic_hpc_ops;
40948 static struct cpci_hp_controller generic_hpc;
40949
40950 static int __init validate_parameters(void)
40951@@ -139,6 +138,10 @@ static int query_enum(void)
40952 return ((value & enum_mask) == enum_mask);
40953 }
40954
40955+static struct cpci_hp_controller_ops generic_hpc_ops = {
40956+ .query_enum = query_enum,
40957+};
40958+
40959 static int __init cpcihp_generic_init(void)
40960 {
40961 int status;
40962@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
40963 pci_dev_put(dev);
40964
40965 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
40966- generic_hpc_ops.query_enum = query_enum;
40967 generic_hpc.ops = &generic_hpc_ops;
40968
40969 status = cpci_hp_register_controller(&generic_hpc);
40970diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
40971index 449b4bb..257e2e8 100644
40972--- a/drivers/pci/hotplug/cpcihp_zt5550.c
40973+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
40974@@ -59,7 +59,6 @@
40975 /* local variables */
40976 static bool debug;
40977 static bool poll;
40978-static struct cpci_hp_controller_ops zt5550_hpc_ops;
40979 static struct cpci_hp_controller zt5550_hpc;
40980
40981 /* Primary cPCI bus bridge device */
40982@@ -205,6 +204,10 @@ static int zt5550_hc_disable_irq(void)
40983 return 0;
40984 }
40985
40986+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
40987+ .query_enum = zt5550_hc_query_enum,
40988+};
40989+
40990 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
40991 {
40992 int status;
40993@@ -216,16 +219,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
40994 dbg("returned from zt5550_hc_config");
40995
40996 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
40997- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
40998 zt5550_hpc.ops = &zt5550_hpc_ops;
40999 if(!poll) {
41000 zt5550_hpc.irq = hc_dev->irq;
41001 zt5550_hpc.irq_flags = IRQF_SHARED;
41002 zt5550_hpc.dev_id = hc_dev;
41003
41004- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
41005- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
41006- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
41007+ pax_open_kernel();
41008+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
41009+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
41010+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
41011+ pax_open_kernel();
41012 } else {
41013 info("using ENUM# polling mode");
41014 }
41015diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
41016index 76ba8a1..20ca857 100644
41017--- a/drivers/pci/hotplug/cpqphp_nvram.c
41018+++ b/drivers/pci/hotplug/cpqphp_nvram.c
41019@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
41020
41021 void compaq_nvram_init (void __iomem *rom_start)
41022 {
41023+
41024+#ifndef CONFIG_PAX_KERNEXEC
41025 if (rom_start) {
41026 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
41027 }
41028+#endif
41029+
41030 dbg("int15 entry = %p\n", compaq_int15_entry_point);
41031
41032 /* initialize our int15 lock */
41033diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
41034index 202f4a9..8ee47d0 100644
41035--- a/drivers/pci/hotplug/pci_hotplug_core.c
41036+++ b/drivers/pci/hotplug/pci_hotplug_core.c
41037@@ -448,8 +448,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
41038 return -EINVAL;
41039 }
41040
41041- slot->ops->owner = owner;
41042- slot->ops->mod_name = mod_name;
41043+ pax_open_kernel();
41044+ *(struct module **)&slot->ops->owner = owner;
41045+ *(const char **)&slot->ops->mod_name = mod_name;
41046+ pax_close_kernel();
41047
41048 mutex_lock(&pci_hp_mutex);
41049 /*
41050diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
41051index 939bd1d..a1459c9 100644
41052--- a/drivers/pci/hotplug/pciehp_core.c
41053+++ b/drivers/pci/hotplug/pciehp_core.c
41054@@ -91,7 +91,7 @@ static int init_slot(struct controller *ctrl)
41055 struct slot *slot = ctrl->slot;
41056 struct hotplug_slot *hotplug = NULL;
41057 struct hotplug_slot_info *info = NULL;
41058- struct hotplug_slot_ops *ops = NULL;
41059+ hotplug_slot_ops_no_const *ops = NULL;
41060 char name[SLOT_NAME_SIZE];
41061 int retval = -ENOMEM;
41062
41063diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
41064index 9c6e9bb..2916736 100644
41065--- a/drivers/pci/pci-sysfs.c
41066+++ b/drivers/pci/pci-sysfs.c
41067@@ -1071,7 +1071,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
41068 {
41069 /* allocate attribute structure, piggyback attribute name */
41070 int name_len = write_combine ? 13 : 10;
41071- struct bin_attribute *res_attr;
41072+ bin_attribute_no_const *res_attr;
41073 int retval;
41074
41075 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
41076@@ -1256,7 +1256,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
41077 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
41078 {
41079 int retval;
41080- struct bin_attribute *attr;
41081+ bin_attribute_no_const *attr;
41082
41083 /* If the device has VPD, try to expose it in sysfs. */
41084 if (dev->vpd) {
41085@@ -1303,7 +1303,7 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
41086 {
41087 int retval;
41088 int rom_size = 0;
41089- struct bin_attribute *attr;
41090+ bin_attribute_no_const *attr;
41091
41092 if (!sysfs_initialized)
41093 return -EACCES;
41094diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
41095index e851829..a1a7196 100644
41096--- a/drivers/pci/pci.h
41097+++ b/drivers/pci/pci.h
41098@@ -98,7 +98,7 @@ struct pci_vpd_ops {
41099 struct pci_vpd {
41100 unsigned int len;
41101 const struct pci_vpd_ops *ops;
41102- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
41103+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
41104 };
41105
41106 extern int pci_vpd_pci22_init(struct pci_dev *dev);
41107diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
41108index 8474b6a..ee81993 100644
41109--- a/drivers/pci/pcie/aspm.c
41110+++ b/drivers/pci/pcie/aspm.c
41111@@ -27,9 +27,9 @@
41112 #define MODULE_PARAM_PREFIX "pcie_aspm."
41113
41114 /* Note: those are not register definitions */
41115-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
41116-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
41117-#define ASPM_STATE_L1 (4) /* L1 state */
41118+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
41119+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
41120+#define ASPM_STATE_L1 (4U) /* L1 state */
41121 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
41122 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
41123
41124diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
41125index 6186f03..1a78714 100644
41126--- a/drivers/pci/probe.c
41127+++ b/drivers/pci/probe.c
41128@@ -173,7 +173,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
41129 struct pci_bus_region region;
41130 bool bar_too_big = false, bar_disabled = false;
41131
41132- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
41133+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
41134
41135 /* No printks while decoding is disabled! */
41136 if (!dev->mmio_always_on) {
41137diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
41138index 9b8505c..f00870a 100644
41139--- a/drivers/pci/proc.c
41140+++ b/drivers/pci/proc.c
41141@@ -465,7 +465,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
41142 static int __init pci_proc_init(void)
41143 {
41144 struct pci_dev *dev = NULL;
41145+
41146+#ifdef CONFIG_GRKERNSEC_PROC_ADD
41147+#ifdef CONFIG_GRKERNSEC_PROC_USER
41148+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
41149+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
41150+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
41151+#endif
41152+#else
41153 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
41154+#endif
41155 proc_create("devices", 0, proc_bus_pci_dir,
41156 &proc_bus_pci_dev_operations);
41157 proc_initialized = 1;
41158diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
41159index 2111dbb..79e434b 100644
41160--- a/drivers/platform/x86/msi-laptop.c
41161+++ b/drivers/platform/x86/msi-laptop.c
41162@@ -820,12 +820,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
41163 int result;
41164
41165 /* allow userland write sysfs file */
41166- dev_attr_bluetooth.store = store_bluetooth;
41167- dev_attr_wlan.store = store_wlan;
41168- dev_attr_threeg.store = store_threeg;
41169- dev_attr_bluetooth.attr.mode |= S_IWUSR;
41170- dev_attr_wlan.attr.mode |= S_IWUSR;
41171- dev_attr_threeg.attr.mode |= S_IWUSR;
41172+ pax_open_kernel();
41173+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
41174+ *(void **)&dev_attr_wlan.store = store_wlan;
41175+ *(void **)&dev_attr_threeg.store = store_threeg;
41176+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
41177+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
41178+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
41179+ pax_close_kernel();
41180
41181 /* disable hardware control by fn key */
41182 result = ec_read(MSI_STANDARD_EC_SCM_LOAD_ADDRESS, &data);
41183diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
41184index 0fe987f..6f3d5c3 100644
41185--- a/drivers/platform/x86/sony-laptop.c
41186+++ b/drivers/platform/x86/sony-laptop.c
41187@@ -2356,7 +2356,7 @@ static void sony_nc_lid_resume_cleanup(struct platform_device *pd)
41188 }
41189
41190 /* High speed charging function */
41191-static struct device_attribute *hsc_handle;
41192+static device_attribute_no_const *hsc_handle;
41193
41194 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
41195 struct device_attribute *attr,
41196diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
41197index f946ca7..f25c833 100644
41198--- a/drivers/platform/x86/thinkpad_acpi.c
41199+++ b/drivers/platform/x86/thinkpad_acpi.c
41200@@ -2097,7 +2097,7 @@ static int hotkey_mask_get(void)
41201 return 0;
41202 }
41203
41204-void static hotkey_mask_warn_incomplete_mask(void)
41205+static void hotkey_mask_warn_incomplete_mask(void)
41206 {
41207 /* log only what the user can fix... */
41208 const u32 wantedmask = hotkey_driver_mask &
41209@@ -2328,11 +2328,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
41210 }
41211 }
41212
41213-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
41214- struct tp_nvram_state *newn,
41215- const u32 event_mask)
41216-{
41217-
41218 #define TPACPI_COMPARE_KEY(__scancode, __member) \
41219 do { \
41220 if ((event_mask & (1 << __scancode)) && \
41221@@ -2346,36 +2341,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
41222 tpacpi_hotkey_send_key(__scancode); \
41223 } while (0)
41224
41225- void issue_volchange(const unsigned int oldvol,
41226- const unsigned int newvol)
41227- {
41228- unsigned int i = oldvol;
41229+static void issue_volchange(const unsigned int oldvol,
41230+ const unsigned int newvol,
41231+ const u32 event_mask)
41232+{
41233+ unsigned int i = oldvol;
41234
41235- while (i > newvol) {
41236- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
41237- i--;
41238- }
41239- while (i < newvol) {
41240- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
41241- i++;
41242- }
41243+ while (i > newvol) {
41244+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
41245+ i--;
41246 }
41247+ while (i < newvol) {
41248+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
41249+ i++;
41250+ }
41251+}
41252
41253- void issue_brightnesschange(const unsigned int oldbrt,
41254- const unsigned int newbrt)
41255- {
41256- unsigned int i = oldbrt;
41257+static void issue_brightnesschange(const unsigned int oldbrt,
41258+ const unsigned int newbrt,
41259+ const u32 event_mask)
41260+{
41261+ unsigned int i = oldbrt;
41262
41263- while (i > newbrt) {
41264- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
41265- i--;
41266- }
41267- while (i < newbrt) {
41268- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
41269- i++;
41270- }
41271+ while (i > newbrt) {
41272+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
41273+ i--;
41274+ }
41275+ while (i < newbrt) {
41276+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
41277+ i++;
41278 }
41279+}
41280
41281+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
41282+ struct tp_nvram_state *newn,
41283+ const u32 event_mask)
41284+{
41285 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
41286 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
41287 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
41288@@ -2409,7 +2410,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
41289 oldn->volume_level != newn->volume_level) {
41290 /* recently muted, or repeated mute keypress, or
41291 * multiple presses ending in mute */
41292- issue_volchange(oldn->volume_level, newn->volume_level);
41293+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
41294 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
41295 }
41296 } else {
41297@@ -2419,7 +2420,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
41298 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
41299 }
41300 if (oldn->volume_level != newn->volume_level) {
41301- issue_volchange(oldn->volume_level, newn->volume_level);
41302+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
41303 } else if (oldn->volume_toggle != newn->volume_toggle) {
41304 /* repeated vol up/down keypress at end of scale ? */
41305 if (newn->volume_level == 0)
41306@@ -2432,7 +2433,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
41307 /* handle brightness */
41308 if (oldn->brightness_level != newn->brightness_level) {
41309 issue_brightnesschange(oldn->brightness_level,
41310- newn->brightness_level);
41311+ newn->brightness_level,
41312+ event_mask);
41313 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
41314 /* repeated key presses that didn't change state */
41315 if (newn->brightness_level == 0)
41316@@ -2441,10 +2443,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
41317 && !tp_features.bright_unkfw)
41318 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
41319 }
41320+}
41321
41322 #undef TPACPI_COMPARE_KEY
41323 #undef TPACPI_MAY_SEND_KEY
41324-}
41325
41326 /*
41327 * Polling driver
41328diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
41329index 769d265..a3a05ca 100644
41330--- a/drivers/pnp/pnpbios/bioscalls.c
41331+++ b/drivers/pnp/pnpbios/bioscalls.c
41332@@ -58,7 +58,7 @@ do { \
41333 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
41334 } while(0)
41335
41336-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
41337+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
41338 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
41339
41340 /*
41341@@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
41342
41343 cpu = get_cpu();
41344 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
41345+
41346+ pax_open_kernel();
41347 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
41348+ pax_close_kernel();
41349
41350 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
41351 spin_lock_irqsave(&pnp_bios_lock, flags);
41352@@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
41353 :"memory");
41354 spin_unlock_irqrestore(&pnp_bios_lock, flags);
41355
41356+ pax_open_kernel();
41357 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
41358+ pax_close_kernel();
41359+
41360 put_cpu();
41361
41362 /* If we get here and this is set then the PnP BIOS faulted on us. */
41363@@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
41364 return status;
41365 }
41366
41367-void pnpbios_calls_init(union pnp_bios_install_struct *header)
41368+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
41369 {
41370 int i;
41371
41372@@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
41373 pnp_bios_callpoint.offset = header->fields.pm16offset;
41374 pnp_bios_callpoint.segment = PNP_CS16;
41375
41376+ pax_open_kernel();
41377+
41378 for_each_possible_cpu(i) {
41379 struct desc_struct *gdt = get_cpu_gdt_table(i);
41380 if (!gdt)
41381@@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
41382 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
41383 (unsigned long)__va(header->fields.pm16dseg));
41384 }
41385+
41386+ pax_close_kernel();
41387 }
41388diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
41389index 3e6db1c..1fbbdae 100644
41390--- a/drivers/pnp/resource.c
41391+++ b/drivers/pnp/resource.c
41392@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
41393 return 1;
41394
41395 /* check if the resource is valid */
41396- if (*irq < 0 || *irq > 15)
41397+ if (*irq > 15)
41398 return 0;
41399
41400 /* check if the resource is reserved */
41401@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
41402 return 1;
41403
41404 /* check if the resource is valid */
41405- if (*dma < 0 || *dma == 4 || *dma > 7)
41406+ if (*dma == 4 || *dma > 7)
41407 return 0;
41408
41409 /* check if the resource is reserved */
41410diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
41411index 7df7c5f..bd48c47 100644
41412--- a/drivers/power/pda_power.c
41413+++ b/drivers/power/pda_power.c
41414@@ -37,7 +37,11 @@ static int polling;
41415
41416 #ifdef CONFIG_USB_OTG_UTILS
41417 static struct usb_phy *transceiver;
41418-static struct notifier_block otg_nb;
41419+static int otg_handle_notification(struct notifier_block *nb,
41420+ unsigned long event, void *unused);
41421+static struct notifier_block otg_nb = {
41422+ .notifier_call = otg_handle_notification
41423+};
41424 #endif
41425
41426 static struct regulator *ac_draw;
41427@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
41428
41429 #ifdef CONFIG_USB_OTG_UTILS
41430 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
41431- otg_nb.notifier_call = otg_handle_notification;
41432 ret = usb_register_notifier(transceiver, &otg_nb);
41433 if (ret) {
41434 dev_err(dev, "failure to register otg notifier\n");
41435diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
41436index cc439fd..8fa30df 100644
41437--- a/drivers/power/power_supply.h
41438+++ b/drivers/power/power_supply.h
41439@@ -16,12 +16,12 @@ struct power_supply;
41440
41441 #ifdef CONFIG_SYSFS
41442
41443-extern void power_supply_init_attrs(struct device_type *dev_type);
41444+extern void power_supply_init_attrs(void);
41445 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
41446
41447 #else
41448
41449-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
41450+static inline void power_supply_init_attrs(void) {}
41451 #define power_supply_uevent NULL
41452
41453 #endif /* CONFIG_SYSFS */
41454diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
41455index 8a7cfb3..72e6e9b 100644
41456--- a/drivers/power/power_supply_core.c
41457+++ b/drivers/power/power_supply_core.c
41458@@ -24,7 +24,10 @@
41459 struct class *power_supply_class;
41460 EXPORT_SYMBOL_GPL(power_supply_class);
41461
41462-static struct device_type power_supply_dev_type;
41463+extern const struct attribute_group *power_supply_attr_groups[];
41464+static struct device_type power_supply_dev_type = {
41465+ .groups = power_supply_attr_groups,
41466+};
41467
41468 static int __power_supply_changed_work(struct device *dev, void *data)
41469 {
41470@@ -393,7 +396,7 @@ static int __init power_supply_class_init(void)
41471 return PTR_ERR(power_supply_class);
41472
41473 power_supply_class->dev_uevent = power_supply_uevent;
41474- power_supply_init_attrs(&power_supply_dev_type);
41475+ power_supply_init_attrs();
41476
41477 return 0;
41478 }
41479diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
41480index 40fa3b7..d9c2e0e 100644
41481--- a/drivers/power/power_supply_sysfs.c
41482+++ b/drivers/power/power_supply_sysfs.c
41483@@ -229,17 +229,15 @@ static struct attribute_group power_supply_attr_group = {
41484 .is_visible = power_supply_attr_is_visible,
41485 };
41486
41487-static const struct attribute_group *power_supply_attr_groups[] = {
41488+const struct attribute_group *power_supply_attr_groups[] = {
41489 &power_supply_attr_group,
41490 NULL,
41491 };
41492
41493-void power_supply_init_attrs(struct device_type *dev_type)
41494+void power_supply_init_attrs(void)
41495 {
41496 int i;
41497
41498- dev_type->groups = power_supply_attr_groups;
41499-
41500 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
41501 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
41502 }
41503diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
41504index 4d7c635..9860196 100644
41505--- a/drivers/regulator/max8660.c
41506+++ b/drivers/regulator/max8660.c
41507@@ -333,8 +333,10 @@ static int max8660_probe(struct i2c_client *client,
41508 max8660->shadow_regs[MAX8660_OVER1] = 5;
41509 } else {
41510 /* Otherwise devices can be toggled via software */
41511- max8660_dcdc_ops.enable = max8660_dcdc_enable;
41512- max8660_dcdc_ops.disable = max8660_dcdc_disable;
41513+ pax_open_kernel();
41514+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
41515+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
41516+ pax_close_kernel();
41517 }
41518
41519 /*
41520diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
41521index 9a8ea91..c483dd9 100644
41522--- a/drivers/regulator/max8973-regulator.c
41523+++ b/drivers/regulator/max8973-regulator.c
41524@@ -401,9 +401,11 @@ static int max8973_probe(struct i2c_client *client,
41525 if (!pdata->enable_ext_control) {
41526 max->desc.enable_reg = MAX8973_VOUT;
41527 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
41528- max8973_dcdc_ops.enable = regulator_enable_regmap;
41529- max8973_dcdc_ops.disable = regulator_disable_regmap;
41530- max8973_dcdc_ops.is_enabled = regulator_is_enabled_regmap;
41531+ pax_open_kernel();
41532+ *(void **)&max8973_dcdc_ops.enable = regulator_enable_regmap;
41533+ *(void **)&max8973_dcdc_ops.disable = regulator_disable_regmap;
41534+ *(void **)&max8973_dcdc_ops.is_enabled = regulator_is_enabled_regmap;
41535+ pax_close_kernel();
41536 }
41537
41538 max->enable_external_control = pdata->enable_ext_control;
41539diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
41540index 0d84b1f..c2da6ac 100644
41541--- a/drivers/regulator/mc13892-regulator.c
41542+++ b/drivers/regulator/mc13892-regulator.c
41543@@ -540,10 +540,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
41544 }
41545 mc13xxx_unlock(mc13892);
41546
41547- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
41548+ pax_open_kernel();
41549+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
41550 = mc13892_vcam_set_mode;
41551- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
41552+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
41553 = mc13892_vcam_get_mode;
41554+ pax_close_kernel();
41555
41556 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
41557 ARRAY_SIZE(mc13892_regulators));
41558diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
41559index 16630aa..6afc992 100644
41560--- a/drivers/rtc/rtc-cmos.c
41561+++ b/drivers/rtc/rtc-cmos.c
41562@@ -724,7 +724,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
41563 hpet_rtc_timer_init();
41564
41565 /* export at least the first block of NVRAM */
41566- nvram.size = address_space - NVRAM_OFFSET;
41567+ pax_open_kernel();
41568+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
41569+ pax_close_kernel();
41570 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
41571 if (retval < 0) {
41572 dev_dbg(dev, "can't create nvram file? %d\n", retval);
41573diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
41574index 9a86b4b..3a383dc 100644
41575--- a/drivers/rtc/rtc-dev.c
41576+++ b/drivers/rtc/rtc-dev.c
41577@@ -14,6 +14,7 @@
41578 #include <linux/module.h>
41579 #include <linux/rtc.h>
41580 #include <linux/sched.h>
41581+#include <linux/grsecurity.h>
41582 #include "rtc-core.h"
41583
41584 static dev_t rtc_devt;
41585@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
41586 if (copy_from_user(&tm, uarg, sizeof(tm)))
41587 return -EFAULT;
41588
41589+ gr_log_timechange();
41590+
41591 return rtc_set_time(rtc, &tm);
41592
41593 case RTC_PIE_ON:
41594diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
41595index e0d0ba4..3c65868 100644
41596--- a/drivers/rtc/rtc-ds1307.c
41597+++ b/drivers/rtc/rtc-ds1307.c
41598@@ -106,7 +106,7 @@ struct ds1307 {
41599 u8 offset; /* register's offset */
41600 u8 regs[11];
41601 u16 nvram_offset;
41602- struct bin_attribute *nvram;
41603+ bin_attribute_no_const *nvram;
41604 enum ds_type type;
41605 unsigned long flags;
41606 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
41607diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
41608index 130f29a..6179d03 100644
41609--- a/drivers/rtc/rtc-m48t59.c
41610+++ b/drivers/rtc/rtc-m48t59.c
41611@@ -482,7 +482,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
41612 goto out;
41613 }
41614
41615- m48t59_nvram_attr.size = pdata->offset;
41616+ pax_open_kernel();
41617+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
41618+ pax_close_kernel();
41619
41620 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
41621 if (ret) {
41622diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
41623index e693af6..2e525b6 100644
41624--- a/drivers/scsi/bfa/bfa_fcpim.h
41625+++ b/drivers/scsi/bfa/bfa_fcpim.h
41626@@ -36,7 +36,7 @@ struct bfa_iotag_s {
41627
41628 struct bfa_itn_s {
41629 bfa_isr_func_t isr;
41630-};
41631+} __no_const;
41632
41633 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
41634 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
41635diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
41636index 23a90e7..9cf04ee 100644
41637--- a/drivers/scsi/bfa/bfa_ioc.h
41638+++ b/drivers/scsi/bfa/bfa_ioc.h
41639@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
41640 bfa_ioc_disable_cbfn_t disable_cbfn;
41641 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
41642 bfa_ioc_reset_cbfn_t reset_cbfn;
41643-};
41644+} __no_const;
41645
41646 /*
41647 * IOC event notification mechanism.
41648@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
41649 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
41650 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
41651 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
41652-};
41653+} __no_const;
41654
41655 /*
41656 * Queue element to wait for room in request queue. FIFO order is
41657diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
41658index 593085a..47aa999 100644
41659--- a/drivers/scsi/hosts.c
41660+++ b/drivers/scsi/hosts.c
41661@@ -42,7 +42,7 @@
41662 #include "scsi_logging.h"
41663
41664
41665-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
41666+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
41667
41668
41669 static void scsi_host_cls_release(struct device *dev)
41670@@ -361,7 +361,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
41671 * subtract one because we increment first then return, but we need to
41672 * know what the next host number was before increment
41673 */
41674- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
41675+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
41676 shost->dma_channel = 0xff;
41677
41678 /* These three are default values which can be overridden */
41679diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
41680index 4f33806..afd6f60 100644
41681--- a/drivers/scsi/hpsa.c
41682+++ b/drivers/scsi/hpsa.c
41683@@ -554,7 +554,7 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
41684 unsigned long flags;
41685
41686 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
41687- return h->access.command_completed(h, q);
41688+ return h->access->command_completed(h, q);
41689
41690 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
41691 a = rq->head[rq->current_entry];
41692@@ -3374,7 +3374,7 @@ static void start_io(struct ctlr_info *h)
41693 while (!list_empty(&h->reqQ)) {
41694 c = list_entry(h->reqQ.next, struct CommandList, list);
41695 /* can't do anything if fifo is full */
41696- if ((h->access.fifo_full(h))) {
41697+ if ((h->access->fifo_full(h))) {
41698 dev_warn(&h->pdev->dev, "fifo full\n");
41699 break;
41700 }
41701@@ -3396,7 +3396,7 @@ static void start_io(struct ctlr_info *h)
41702
41703 /* Tell the controller execute command */
41704 spin_unlock_irqrestore(&h->lock, flags);
41705- h->access.submit_command(h, c);
41706+ h->access->submit_command(h, c);
41707 spin_lock_irqsave(&h->lock, flags);
41708 }
41709 spin_unlock_irqrestore(&h->lock, flags);
41710@@ -3404,17 +3404,17 @@ static void start_io(struct ctlr_info *h)
41711
41712 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
41713 {
41714- return h->access.command_completed(h, q);
41715+ return h->access->command_completed(h, q);
41716 }
41717
41718 static inline bool interrupt_pending(struct ctlr_info *h)
41719 {
41720- return h->access.intr_pending(h);
41721+ return h->access->intr_pending(h);
41722 }
41723
41724 static inline long interrupt_not_for_us(struct ctlr_info *h)
41725 {
41726- return (h->access.intr_pending(h) == 0) ||
41727+ return (h->access->intr_pending(h) == 0) ||
41728 (h->interrupts_enabled == 0);
41729 }
41730
41731@@ -4316,7 +4316,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
41732 if (prod_index < 0)
41733 return -ENODEV;
41734 h->product_name = products[prod_index].product_name;
41735- h->access = *(products[prod_index].access);
41736+ h->access = products[prod_index].access;
41737
41738 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
41739 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
41740@@ -4598,7 +4598,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
41741
41742 assert_spin_locked(&lockup_detector_lock);
41743 remove_ctlr_from_lockup_detector_list(h);
41744- h->access.set_intr_mask(h, HPSA_INTR_OFF);
41745+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
41746 spin_lock_irqsave(&h->lock, flags);
41747 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
41748 spin_unlock_irqrestore(&h->lock, flags);
41749@@ -4775,7 +4775,7 @@ reinit_after_soft_reset:
41750 }
41751
41752 /* make sure the board interrupts are off */
41753- h->access.set_intr_mask(h, HPSA_INTR_OFF);
41754+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
41755
41756 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
41757 goto clean2;
41758@@ -4809,7 +4809,7 @@ reinit_after_soft_reset:
41759 * fake ones to scoop up any residual completions.
41760 */
41761 spin_lock_irqsave(&h->lock, flags);
41762- h->access.set_intr_mask(h, HPSA_INTR_OFF);
41763+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
41764 spin_unlock_irqrestore(&h->lock, flags);
41765 free_irqs(h);
41766 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
41767@@ -4828,9 +4828,9 @@ reinit_after_soft_reset:
41768 dev_info(&h->pdev->dev, "Board READY.\n");
41769 dev_info(&h->pdev->dev,
41770 "Waiting for stale completions to drain.\n");
41771- h->access.set_intr_mask(h, HPSA_INTR_ON);
41772+ h->access->set_intr_mask(h, HPSA_INTR_ON);
41773 msleep(10000);
41774- h->access.set_intr_mask(h, HPSA_INTR_OFF);
41775+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
41776
41777 rc = controller_reset_failed(h->cfgtable);
41778 if (rc)
41779@@ -4851,7 +4851,7 @@ reinit_after_soft_reset:
41780 }
41781
41782 /* Turn the interrupts on so we can service requests */
41783- h->access.set_intr_mask(h, HPSA_INTR_ON);
41784+ h->access->set_intr_mask(h, HPSA_INTR_ON);
41785
41786 hpsa_hba_inquiry(h);
41787 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
41788@@ -4903,7 +4903,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
41789 * To write all data in the battery backed cache to disks
41790 */
41791 hpsa_flush_cache(h);
41792- h->access.set_intr_mask(h, HPSA_INTR_OFF);
41793+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
41794 hpsa_free_irqs_and_disable_msix(h);
41795 }
41796
41797@@ -5071,7 +5071,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
41798 return;
41799 }
41800 /* Change the access methods to the performant access methods */
41801- h->access = SA5_performant_access;
41802+ h->access = &SA5_performant_access;
41803 h->transMethod = CFGTBL_Trans_Performant;
41804 }
41805
41806diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
41807index 9816479..c5d4e97 100644
41808--- a/drivers/scsi/hpsa.h
41809+++ b/drivers/scsi/hpsa.h
41810@@ -79,7 +79,7 @@ struct ctlr_info {
41811 unsigned int msix_vector;
41812 unsigned int msi_vector;
41813 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
41814- struct access_method access;
41815+ struct access_method *access;
41816
41817 /* queue and queue Info */
41818 struct list_head reqQ;
41819diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
41820index c772d8d..35c362c 100644
41821--- a/drivers/scsi/libfc/fc_exch.c
41822+++ b/drivers/scsi/libfc/fc_exch.c
41823@@ -100,12 +100,12 @@ struct fc_exch_mgr {
41824 u16 pool_max_index;
41825
41826 struct {
41827- atomic_t no_free_exch;
41828- atomic_t no_free_exch_xid;
41829- atomic_t xid_not_found;
41830- atomic_t xid_busy;
41831- atomic_t seq_not_found;
41832- atomic_t non_bls_resp;
41833+ atomic_unchecked_t no_free_exch;
41834+ atomic_unchecked_t no_free_exch_xid;
41835+ atomic_unchecked_t xid_not_found;
41836+ atomic_unchecked_t xid_busy;
41837+ atomic_unchecked_t seq_not_found;
41838+ atomic_unchecked_t non_bls_resp;
41839 } stats;
41840 };
41841
41842@@ -725,7 +725,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
41843 /* allocate memory for exchange */
41844 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
41845 if (!ep) {
41846- atomic_inc(&mp->stats.no_free_exch);
41847+ atomic_inc_unchecked(&mp->stats.no_free_exch);
41848 goto out;
41849 }
41850 memset(ep, 0, sizeof(*ep));
41851@@ -786,7 +786,7 @@ out:
41852 return ep;
41853 err:
41854 spin_unlock_bh(&pool->lock);
41855- atomic_inc(&mp->stats.no_free_exch_xid);
41856+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
41857 mempool_free(ep, mp->ep_pool);
41858 return NULL;
41859 }
41860@@ -929,7 +929,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41861 xid = ntohs(fh->fh_ox_id); /* we originated exch */
41862 ep = fc_exch_find(mp, xid);
41863 if (!ep) {
41864- atomic_inc(&mp->stats.xid_not_found);
41865+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41866 reject = FC_RJT_OX_ID;
41867 goto out;
41868 }
41869@@ -959,7 +959,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41870 ep = fc_exch_find(mp, xid);
41871 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
41872 if (ep) {
41873- atomic_inc(&mp->stats.xid_busy);
41874+ atomic_inc_unchecked(&mp->stats.xid_busy);
41875 reject = FC_RJT_RX_ID;
41876 goto rel;
41877 }
41878@@ -970,7 +970,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41879 }
41880 xid = ep->xid; /* get our XID */
41881 } else if (!ep) {
41882- atomic_inc(&mp->stats.xid_not_found);
41883+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41884 reject = FC_RJT_RX_ID; /* XID not found */
41885 goto out;
41886 }
41887@@ -987,7 +987,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41888 } else {
41889 sp = &ep->seq;
41890 if (sp->id != fh->fh_seq_id) {
41891- atomic_inc(&mp->stats.seq_not_found);
41892+ atomic_inc_unchecked(&mp->stats.seq_not_found);
41893 if (f_ctl & FC_FC_END_SEQ) {
41894 /*
41895 * Update sequence_id based on incoming last
41896@@ -1437,22 +1437,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41897
41898 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
41899 if (!ep) {
41900- atomic_inc(&mp->stats.xid_not_found);
41901+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41902 goto out;
41903 }
41904 if (ep->esb_stat & ESB_ST_COMPLETE) {
41905- atomic_inc(&mp->stats.xid_not_found);
41906+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41907 goto rel;
41908 }
41909 if (ep->rxid == FC_XID_UNKNOWN)
41910 ep->rxid = ntohs(fh->fh_rx_id);
41911 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
41912- atomic_inc(&mp->stats.xid_not_found);
41913+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41914 goto rel;
41915 }
41916 if (ep->did != ntoh24(fh->fh_s_id) &&
41917 ep->did != FC_FID_FLOGI) {
41918- atomic_inc(&mp->stats.xid_not_found);
41919+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41920 goto rel;
41921 }
41922 sof = fr_sof(fp);
41923@@ -1461,7 +1461,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41924 sp->ssb_stat |= SSB_ST_RESP;
41925 sp->id = fh->fh_seq_id;
41926 } else if (sp->id != fh->fh_seq_id) {
41927- atomic_inc(&mp->stats.seq_not_found);
41928+ atomic_inc_unchecked(&mp->stats.seq_not_found);
41929 goto rel;
41930 }
41931
41932@@ -1525,9 +1525,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41933 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
41934
41935 if (!sp)
41936- atomic_inc(&mp->stats.xid_not_found);
41937+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41938 else
41939- atomic_inc(&mp->stats.non_bls_resp);
41940+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
41941
41942 fc_frame_free(fp);
41943 }
41944@@ -2174,13 +2174,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
41945
41946 list_for_each_entry(ema, &lport->ema_list, ema_list) {
41947 mp = ema->mp;
41948- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
41949+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
41950 st->fc_no_free_exch_xid +=
41951- atomic_read(&mp->stats.no_free_exch_xid);
41952- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
41953- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
41954- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
41955- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
41956+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
41957+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
41958+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
41959+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
41960+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
41961 }
41962 }
41963 EXPORT_SYMBOL(fc_exch_update_stats);
41964diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
41965index bdb81cd..d3c7c2c 100644
41966--- a/drivers/scsi/libsas/sas_ata.c
41967+++ b/drivers/scsi/libsas/sas_ata.c
41968@@ -554,7 +554,7 @@ static struct ata_port_operations sas_sata_ops = {
41969 .postreset = ata_std_postreset,
41970 .error_handler = ata_std_error_handler,
41971 .post_internal_cmd = sas_ata_post_internal,
41972- .qc_defer = ata_std_qc_defer,
41973+ .qc_defer = ata_std_qc_defer,
41974 .qc_prep = ata_noop_qc_prep,
41975 .qc_issue = sas_ata_qc_issue,
41976 .qc_fill_rtf = sas_ata_qc_fill_rtf,
41977diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
41978index df4c13a..a51e90c 100644
41979--- a/drivers/scsi/lpfc/lpfc.h
41980+++ b/drivers/scsi/lpfc/lpfc.h
41981@@ -424,7 +424,7 @@ struct lpfc_vport {
41982 struct dentry *debug_nodelist;
41983 struct dentry *vport_debugfs_root;
41984 struct lpfc_debugfs_trc *disc_trc;
41985- atomic_t disc_trc_cnt;
41986+ atomic_unchecked_t disc_trc_cnt;
41987 #endif
41988 uint8_t stat_data_enabled;
41989 uint8_t stat_data_blocked;
41990@@ -842,8 +842,8 @@ struct lpfc_hba {
41991 struct timer_list fabric_block_timer;
41992 unsigned long bit_flags;
41993 #define FABRIC_COMANDS_BLOCKED 0
41994- atomic_t num_rsrc_err;
41995- atomic_t num_cmd_success;
41996+ atomic_unchecked_t num_rsrc_err;
41997+ atomic_unchecked_t num_cmd_success;
41998 unsigned long last_rsrc_error_time;
41999 unsigned long last_ramp_down_time;
42000 unsigned long last_ramp_up_time;
42001@@ -879,7 +879,7 @@ struct lpfc_hba {
42002
42003 struct dentry *debug_slow_ring_trc;
42004 struct lpfc_debugfs_trc *slow_ring_trc;
42005- atomic_t slow_ring_trc_cnt;
42006+ atomic_unchecked_t slow_ring_trc_cnt;
42007 /* iDiag debugfs sub-directory */
42008 struct dentry *idiag_root;
42009 struct dentry *idiag_pci_cfg;
42010diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
42011index f63f5ff..de29189 100644
42012--- a/drivers/scsi/lpfc/lpfc_debugfs.c
42013+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
42014@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
42015
42016 #include <linux/debugfs.h>
42017
42018-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
42019+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
42020 static unsigned long lpfc_debugfs_start_time = 0L;
42021
42022 /* iDiag */
42023@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
42024 lpfc_debugfs_enable = 0;
42025
42026 len = 0;
42027- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
42028+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
42029 (lpfc_debugfs_max_disc_trc - 1);
42030 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
42031 dtp = vport->disc_trc + i;
42032@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
42033 lpfc_debugfs_enable = 0;
42034
42035 len = 0;
42036- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
42037+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
42038 (lpfc_debugfs_max_slow_ring_trc - 1);
42039 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
42040 dtp = phba->slow_ring_trc + i;
42041@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
42042 !vport || !vport->disc_trc)
42043 return;
42044
42045- index = atomic_inc_return(&vport->disc_trc_cnt) &
42046+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
42047 (lpfc_debugfs_max_disc_trc - 1);
42048 dtp = vport->disc_trc + index;
42049 dtp->fmt = fmt;
42050 dtp->data1 = data1;
42051 dtp->data2 = data2;
42052 dtp->data3 = data3;
42053- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
42054+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
42055 dtp->jif = jiffies;
42056 #endif
42057 return;
42058@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
42059 !phba || !phba->slow_ring_trc)
42060 return;
42061
42062- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
42063+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
42064 (lpfc_debugfs_max_slow_ring_trc - 1);
42065 dtp = phba->slow_ring_trc + index;
42066 dtp->fmt = fmt;
42067 dtp->data1 = data1;
42068 dtp->data2 = data2;
42069 dtp->data3 = data3;
42070- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
42071+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
42072 dtp->jif = jiffies;
42073 #endif
42074 return;
42075@@ -4182,7 +4182,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
42076 "slow_ring buffer\n");
42077 goto debug_failed;
42078 }
42079- atomic_set(&phba->slow_ring_trc_cnt, 0);
42080+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
42081 memset(phba->slow_ring_trc, 0,
42082 (sizeof(struct lpfc_debugfs_trc) *
42083 lpfc_debugfs_max_slow_ring_trc));
42084@@ -4228,7 +4228,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
42085 "buffer\n");
42086 goto debug_failed;
42087 }
42088- atomic_set(&vport->disc_trc_cnt, 0);
42089+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
42090
42091 snprintf(name, sizeof(name), "discovery_trace");
42092 vport->debug_disc_trc =
42093diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
42094index 89ad558..76956c4 100644
42095--- a/drivers/scsi/lpfc/lpfc_init.c
42096+++ b/drivers/scsi/lpfc/lpfc_init.c
42097@@ -10618,8 +10618,10 @@ lpfc_init(void)
42098 "misc_register returned with status %d", error);
42099
42100 if (lpfc_enable_npiv) {
42101- lpfc_transport_functions.vport_create = lpfc_vport_create;
42102- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
42103+ pax_open_kernel();
42104+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
42105+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
42106+ pax_close_kernel();
42107 }
42108 lpfc_transport_template =
42109 fc_attach_transport(&lpfc_transport_functions);
42110diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
42111index 60e5a17..ff7a793 100644
42112--- a/drivers/scsi/lpfc/lpfc_scsi.c
42113+++ b/drivers/scsi/lpfc/lpfc_scsi.c
42114@@ -305,7 +305,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
42115 uint32_t evt_posted;
42116
42117 spin_lock_irqsave(&phba->hbalock, flags);
42118- atomic_inc(&phba->num_rsrc_err);
42119+ atomic_inc_unchecked(&phba->num_rsrc_err);
42120 phba->last_rsrc_error_time = jiffies;
42121
42122 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
42123@@ -346,7 +346,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
42124 unsigned long flags;
42125 struct lpfc_hba *phba = vport->phba;
42126 uint32_t evt_posted;
42127- atomic_inc(&phba->num_cmd_success);
42128+ atomic_inc_unchecked(&phba->num_cmd_success);
42129
42130 if (vport->cfg_lun_queue_depth <= queue_depth)
42131 return;
42132@@ -390,8 +390,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
42133 unsigned long num_rsrc_err, num_cmd_success;
42134 int i;
42135
42136- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
42137- num_cmd_success = atomic_read(&phba->num_cmd_success);
42138+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
42139+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
42140
42141 /*
42142 * The error and success command counters are global per
42143@@ -419,8 +419,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
42144 }
42145 }
42146 lpfc_destroy_vport_work_array(phba, vports);
42147- atomic_set(&phba->num_rsrc_err, 0);
42148- atomic_set(&phba->num_cmd_success, 0);
42149+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
42150+ atomic_set_unchecked(&phba->num_cmd_success, 0);
42151 }
42152
42153 /**
42154@@ -454,8 +454,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
42155 }
42156 }
42157 lpfc_destroy_vport_work_array(phba, vports);
42158- atomic_set(&phba->num_rsrc_err, 0);
42159- atomic_set(&phba->num_cmd_success, 0);
42160+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
42161+ atomic_set_unchecked(&phba->num_cmd_success, 0);
42162 }
42163
42164 /**
42165diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
42166index b46f5e9..c4c4ccb 100644
42167--- a/drivers/scsi/pmcraid.c
42168+++ b/drivers/scsi/pmcraid.c
42169@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
42170 res->scsi_dev = scsi_dev;
42171 scsi_dev->hostdata = res;
42172 res->change_detected = 0;
42173- atomic_set(&res->read_failures, 0);
42174- atomic_set(&res->write_failures, 0);
42175+ atomic_set_unchecked(&res->read_failures, 0);
42176+ atomic_set_unchecked(&res->write_failures, 0);
42177 rc = 0;
42178 }
42179 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
42180@@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
42181
42182 /* If this was a SCSI read/write command keep count of errors */
42183 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
42184- atomic_inc(&res->read_failures);
42185+ atomic_inc_unchecked(&res->read_failures);
42186 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
42187- atomic_inc(&res->write_failures);
42188+ atomic_inc_unchecked(&res->write_failures);
42189
42190 if (!RES_IS_GSCSI(res->cfg_entry) &&
42191 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
42192@@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
42193 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
42194 * hrrq_id assigned here in queuecommand
42195 */
42196- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
42197+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
42198 pinstance->num_hrrq;
42199 cmd->cmd_done = pmcraid_io_done;
42200
42201@@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
42202 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
42203 * hrrq_id assigned here in queuecommand
42204 */
42205- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
42206+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
42207 pinstance->num_hrrq;
42208
42209 if (request_size) {
42210@@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
42211
42212 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
42213 /* add resources only after host is added into system */
42214- if (!atomic_read(&pinstance->expose_resources))
42215+ if (!atomic_read_unchecked(&pinstance->expose_resources))
42216 return;
42217
42218 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
42219@@ -5324,8 +5324,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
42220 init_waitqueue_head(&pinstance->reset_wait_q);
42221
42222 atomic_set(&pinstance->outstanding_cmds, 0);
42223- atomic_set(&pinstance->last_message_id, 0);
42224- atomic_set(&pinstance->expose_resources, 0);
42225+ atomic_set_unchecked(&pinstance->last_message_id, 0);
42226+ atomic_set_unchecked(&pinstance->expose_resources, 0);
42227
42228 INIT_LIST_HEAD(&pinstance->free_res_q);
42229 INIT_LIST_HEAD(&pinstance->used_res_q);
42230@@ -6038,7 +6038,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
42231 /* Schedule worker thread to handle CCN and take care of adding and
42232 * removing devices to OS
42233 */
42234- atomic_set(&pinstance->expose_resources, 1);
42235+ atomic_set_unchecked(&pinstance->expose_resources, 1);
42236 schedule_work(&pinstance->worker_q);
42237 return rc;
42238
42239diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
42240index e1d150f..6c6df44 100644
42241--- a/drivers/scsi/pmcraid.h
42242+++ b/drivers/scsi/pmcraid.h
42243@@ -748,7 +748,7 @@ struct pmcraid_instance {
42244 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
42245
42246 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
42247- atomic_t last_message_id;
42248+ atomic_unchecked_t last_message_id;
42249
42250 /* configuration table */
42251 struct pmcraid_config_table *cfg_table;
42252@@ -777,7 +777,7 @@ struct pmcraid_instance {
42253 atomic_t outstanding_cmds;
42254
42255 /* should add/delete resources to mid-layer now ?*/
42256- atomic_t expose_resources;
42257+ atomic_unchecked_t expose_resources;
42258
42259
42260
42261@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
42262 struct pmcraid_config_table_entry_ext cfg_entry_ext;
42263 };
42264 struct scsi_device *scsi_dev; /* Link scsi_device structure */
42265- atomic_t read_failures; /* count of failed READ commands */
42266- atomic_t write_failures; /* count of failed WRITE commands */
42267+ atomic_unchecked_t read_failures; /* count of failed READ commands */
42268+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
42269
42270 /* To indicate add/delete/modify during CCN */
42271 u8 change_detected;
42272diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
42273index 83d7984..a27d947 100644
42274--- a/drivers/scsi/qla2xxx/qla_attr.c
42275+++ b/drivers/scsi/qla2xxx/qla_attr.c
42276@@ -1969,7 +1969,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
42277 return 0;
42278 }
42279
42280-struct fc_function_template qla2xxx_transport_functions = {
42281+fc_function_template_no_const qla2xxx_transport_functions = {
42282
42283 .show_host_node_name = 1,
42284 .show_host_port_name = 1,
42285@@ -2016,7 +2016,7 @@ struct fc_function_template qla2xxx_transport_functions = {
42286 .bsg_timeout = qla24xx_bsg_timeout,
42287 };
42288
42289-struct fc_function_template qla2xxx_transport_vport_functions = {
42290+fc_function_template_no_const qla2xxx_transport_vport_functions = {
42291
42292 .show_host_node_name = 1,
42293 .show_host_port_name = 1,
42294diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
42295index 2411d1a..4673766 100644
42296--- a/drivers/scsi/qla2xxx/qla_gbl.h
42297+++ b/drivers/scsi/qla2xxx/qla_gbl.h
42298@@ -515,8 +515,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
42299 struct device_attribute;
42300 extern struct device_attribute *qla2x00_host_attrs[];
42301 struct fc_function_template;
42302-extern struct fc_function_template qla2xxx_transport_functions;
42303-extern struct fc_function_template qla2xxx_transport_vport_functions;
42304+extern fc_function_template_no_const qla2xxx_transport_functions;
42305+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
42306 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
42307 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
42308 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
42309diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
42310index 10d23f8..a7d5d4c 100644
42311--- a/drivers/scsi/qla2xxx/qla_os.c
42312+++ b/drivers/scsi/qla2xxx/qla_os.c
42313@@ -1472,8 +1472,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
42314 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
42315 /* Ok, a 64bit DMA mask is applicable. */
42316 ha->flags.enable_64bit_addressing = 1;
42317- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
42318- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
42319+ pax_open_kernel();
42320+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
42321+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
42322+ pax_close_kernel();
42323 return;
42324 }
42325 }
42326diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
42327index 329d553..f20d31d 100644
42328--- a/drivers/scsi/qla4xxx/ql4_def.h
42329+++ b/drivers/scsi/qla4xxx/ql4_def.h
42330@@ -273,7 +273,7 @@ struct ddb_entry {
42331 * (4000 only) */
42332 atomic_t relogin_timer; /* Max Time to wait for
42333 * relogin to complete */
42334- atomic_t relogin_retry_count; /* Num of times relogin has been
42335+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
42336 * retried */
42337 uint32_t default_time2wait; /* Default Min time between
42338 * relogins (+aens) */
42339diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
42340index 4cec123..7c1329f 100644
42341--- a/drivers/scsi/qla4xxx/ql4_os.c
42342+++ b/drivers/scsi/qla4xxx/ql4_os.c
42343@@ -2621,12 +2621,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
42344 */
42345 if (!iscsi_is_session_online(cls_sess)) {
42346 /* Reset retry relogin timer */
42347- atomic_inc(&ddb_entry->relogin_retry_count);
42348+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
42349 DEBUG2(ql4_printk(KERN_INFO, ha,
42350 "%s: index[%d] relogin timed out-retrying"
42351 " relogin (%d), retry (%d)\n", __func__,
42352 ddb_entry->fw_ddb_index,
42353- atomic_read(&ddb_entry->relogin_retry_count),
42354+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
42355 ddb_entry->default_time2wait + 4));
42356 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
42357 atomic_set(&ddb_entry->retry_relogin_timer,
42358@@ -4738,7 +4738,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
42359
42360 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
42361 atomic_set(&ddb_entry->relogin_timer, 0);
42362- atomic_set(&ddb_entry->relogin_retry_count, 0);
42363+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
42364 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
42365 ddb_entry->default_relogin_timeout =
42366 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
42367diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
42368index 2c0d0ec..4e8681a 100644
42369--- a/drivers/scsi/scsi.c
42370+++ b/drivers/scsi/scsi.c
42371@@ -661,7 +661,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
42372 unsigned long timeout;
42373 int rtn = 0;
42374
42375- atomic_inc(&cmd->device->iorequest_cnt);
42376+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
42377
42378 /* check if the device is still usable */
42379 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
42380diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
42381index f1bf5af..f67e943 100644
42382--- a/drivers/scsi/scsi_lib.c
42383+++ b/drivers/scsi/scsi_lib.c
42384@@ -1454,7 +1454,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
42385 shost = sdev->host;
42386 scsi_init_cmd_errh(cmd);
42387 cmd->result = DID_NO_CONNECT << 16;
42388- atomic_inc(&cmd->device->iorequest_cnt);
42389+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
42390
42391 /*
42392 * SCSI request completion path will do scsi_device_unbusy(),
42393@@ -1480,9 +1480,9 @@ static void scsi_softirq_done(struct request *rq)
42394
42395 INIT_LIST_HEAD(&cmd->eh_entry);
42396
42397- atomic_inc(&cmd->device->iodone_cnt);
42398+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
42399 if (cmd->result)
42400- atomic_inc(&cmd->device->ioerr_cnt);
42401+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
42402
42403 disposition = scsi_decide_disposition(cmd);
42404 if (disposition != SUCCESS &&
42405diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
42406index 931a7d9..0c2a754 100644
42407--- a/drivers/scsi/scsi_sysfs.c
42408+++ b/drivers/scsi/scsi_sysfs.c
42409@@ -658,7 +658,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
42410 char *buf) \
42411 { \
42412 struct scsi_device *sdev = to_scsi_device(dev); \
42413- unsigned long long count = atomic_read(&sdev->field); \
42414+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
42415 return snprintf(buf, 20, "0x%llx\n", count); \
42416 } \
42417 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
42418diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
42419index 84a1fdf..693b0d6 100644
42420--- a/drivers/scsi/scsi_tgt_lib.c
42421+++ b/drivers/scsi/scsi_tgt_lib.c
42422@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
42423 int err;
42424
42425 dprintk("%lx %u\n", uaddr, len);
42426- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
42427+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
42428 if (err) {
42429 /*
42430 * TODO: need to fixup sg_tablesize, max_segment_size,
42431diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
42432index e894ca7..de9d7660 100644
42433--- a/drivers/scsi/scsi_transport_fc.c
42434+++ b/drivers/scsi/scsi_transport_fc.c
42435@@ -498,7 +498,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
42436 * Netlink Infrastructure
42437 */
42438
42439-static atomic_t fc_event_seq;
42440+static atomic_unchecked_t fc_event_seq;
42441
42442 /**
42443 * fc_get_event_number - Obtain the next sequential FC event number
42444@@ -511,7 +511,7 @@ static atomic_t fc_event_seq;
42445 u32
42446 fc_get_event_number(void)
42447 {
42448- return atomic_add_return(1, &fc_event_seq);
42449+ return atomic_add_return_unchecked(1, &fc_event_seq);
42450 }
42451 EXPORT_SYMBOL(fc_get_event_number);
42452
42453@@ -659,7 +659,7 @@ static __init int fc_transport_init(void)
42454 {
42455 int error;
42456
42457- atomic_set(&fc_event_seq, 0);
42458+ atomic_set_unchecked(&fc_event_seq, 0);
42459
42460 error = transport_class_register(&fc_host_class);
42461 if (error)
42462@@ -849,7 +849,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
42463 char *cp;
42464
42465 *val = simple_strtoul(buf, &cp, 0);
42466- if ((*cp && (*cp != '\n')) || (*val < 0))
42467+ if (*cp && (*cp != '\n'))
42468 return -EINVAL;
42469 /*
42470 * Check for overflow; dev_loss_tmo is u32
42471diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
42472index 31969f2..2b348f0 100644
42473--- a/drivers/scsi/scsi_transport_iscsi.c
42474+++ b/drivers/scsi/scsi_transport_iscsi.c
42475@@ -79,7 +79,7 @@ struct iscsi_internal {
42476 struct transport_container session_cont;
42477 };
42478
42479-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
42480+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
42481 static struct workqueue_struct *iscsi_eh_timer_workq;
42482
42483 static DEFINE_IDA(iscsi_sess_ida);
42484@@ -1064,7 +1064,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
42485 int err;
42486
42487 ihost = shost->shost_data;
42488- session->sid = atomic_add_return(1, &iscsi_session_nr);
42489+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
42490
42491 if (target_id == ISCSI_MAX_TARGET) {
42492 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
42493@@ -2943,7 +2943,7 @@ static __init int iscsi_transport_init(void)
42494 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
42495 ISCSI_TRANSPORT_VERSION);
42496
42497- atomic_set(&iscsi_session_nr, 0);
42498+ atomic_set_unchecked(&iscsi_session_nr, 0);
42499
42500 err = class_register(&iscsi_transport_class);
42501 if (err)
42502diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
42503index f379c7f..e8fc69c 100644
42504--- a/drivers/scsi/scsi_transport_srp.c
42505+++ b/drivers/scsi/scsi_transport_srp.c
42506@@ -33,7 +33,7 @@
42507 #include "scsi_transport_srp_internal.h"
42508
42509 struct srp_host_attrs {
42510- atomic_t next_port_id;
42511+ atomic_unchecked_t next_port_id;
42512 };
42513 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
42514
42515@@ -61,7 +61,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
42516 struct Scsi_Host *shost = dev_to_shost(dev);
42517 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
42518
42519- atomic_set(&srp_host->next_port_id, 0);
42520+ atomic_set_unchecked(&srp_host->next_port_id, 0);
42521 return 0;
42522 }
42523
42524@@ -210,7 +210,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
42525 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
42526 rport->roles = ids->roles;
42527
42528- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
42529+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
42530 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
42531
42532 transport_setup_device(&rport->dev);
42533diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
42534index 7992635..609faf8 100644
42535--- a/drivers/scsi/sd.c
42536+++ b/drivers/scsi/sd.c
42537@@ -2909,7 +2909,7 @@ static int sd_probe(struct device *dev)
42538 sdkp->disk = gd;
42539 sdkp->index = index;
42540 atomic_set(&sdkp->openers, 0);
42541- atomic_set(&sdkp->device->ioerr_cnt, 0);
42542+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
42543
42544 if (!sdp->request_queue->rq_timeout) {
42545 if (sdp->type != TYPE_MOD)
42546diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
42547index be2c9a6..275525c 100644
42548--- a/drivers/scsi/sg.c
42549+++ b/drivers/scsi/sg.c
42550@@ -1101,7 +1101,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
42551 sdp->disk->disk_name,
42552 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
42553 NULL,
42554- (char *)arg);
42555+ (char __user *)arg);
42556 case BLKTRACESTART:
42557 return blk_trace_startstop(sdp->device->request_queue, 1);
42558 case BLKTRACESTOP:
42559diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
42560index 19ee901..6e8c2ef 100644
42561--- a/drivers/spi/spi.c
42562+++ b/drivers/spi/spi.c
42563@@ -1616,7 +1616,7 @@ int spi_bus_unlock(struct spi_master *master)
42564 EXPORT_SYMBOL_GPL(spi_bus_unlock);
42565
42566 /* portable code must never pass more than 32 bytes */
42567-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
42568+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
42569
42570 static u8 *buf;
42571
42572diff --git a/drivers/staging/iio/iio_hwmon.c b/drivers/staging/iio/iio_hwmon.c
42573index c7a5f97..71ecd35 100644
42574--- a/drivers/staging/iio/iio_hwmon.c
42575+++ b/drivers/staging/iio/iio_hwmon.c
42576@@ -72,7 +72,7 @@ static void iio_hwmon_free_attrs(struct iio_hwmon_state *st)
42577 static int iio_hwmon_probe(struct platform_device *pdev)
42578 {
42579 struct iio_hwmon_state *st;
42580- struct sensor_device_attribute *a;
42581+ sensor_device_attribute_no_const *a;
42582 int ret, i;
42583 int in_i = 1, temp_i = 1, curr_i = 1;
42584 enum iio_chan_type type;
42585diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
42586index 34afc16..ffe44dd 100644
42587--- a/drivers/staging/octeon/ethernet-rx.c
42588+++ b/drivers/staging/octeon/ethernet-rx.c
42589@@ -421,11 +421,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
42590 /* Increment RX stats for virtual ports */
42591 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
42592 #ifdef CONFIG_64BIT
42593- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
42594- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
42595+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
42596+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
42597 #else
42598- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
42599- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
42600+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
42601+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
42602 #endif
42603 }
42604 netif_receive_skb(skb);
42605@@ -437,9 +437,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
42606 dev->name);
42607 */
42608 #ifdef CONFIG_64BIT
42609- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
42610+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
42611 #else
42612- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
42613+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
42614 #endif
42615 dev_kfree_skb_irq(skb);
42616 }
42617diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
42618index ef32dc1..a159d68 100644
42619--- a/drivers/staging/octeon/ethernet.c
42620+++ b/drivers/staging/octeon/ethernet.c
42621@@ -252,11 +252,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
42622 * since the RX tasklet also increments it.
42623 */
42624 #ifdef CONFIG_64BIT
42625- atomic64_add(rx_status.dropped_packets,
42626- (atomic64_t *)&priv->stats.rx_dropped);
42627+ atomic64_add_unchecked(rx_status.dropped_packets,
42628+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
42629 #else
42630- atomic_add(rx_status.dropped_packets,
42631- (atomic_t *)&priv->stats.rx_dropped);
42632+ atomic_add_unchecked(rx_status.dropped_packets,
42633+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
42634 #endif
42635 }
42636
42637diff --git a/drivers/staging/ramster/tmem.c b/drivers/staging/ramster/tmem.c
42638index a2b7e03..aaf3630 100644
42639--- a/drivers/staging/ramster/tmem.c
42640+++ b/drivers/staging/ramster/tmem.c
42641@@ -50,25 +50,25 @@
42642 * A tmem host implementation must use this function to register callbacks
42643 * for memory allocation.
42644 */
42645-static struct tmem_hostops tmem_hostops;
42646+static struct tmem_hostops *tmem_hostops;
42647
42648 static void tmem_objnode_tree_init(void);
42649
42650 void tmem_register_hostops(struct tmem_hostops *m)
42651 {
42652 tmem_objnode_tree_init();
42653- tmem_hostops = *m;
42654+ tmem_hostops = m;
42655 }
42656
42657 /*
42658 * A tmem host implementation must use this function to register
42659 * callbacks for a page-accessible memory (PAM) implementation.
42660 */
42661-static struct tmem_pamops tmem_pamops;
42662+static struct tmem_pamops *tmem_pamops;
42663
42664 void tmem_register_pamops(struct tmem_pamops *m)
42665 {
42666- tmem_pamops = *m;
42667+ tmem_pamops = m;
42668 }
42669
42670 /*
42671@@ -174,7 +174,7 @@ static void tmem_obj_init(struct tmem_obj *obj, struct tmem_hashbucket *hb,
42672 obj->pampd_count = 0;
42673 #ifdef CONFIG_RAMSTER
42674 if (tmem_pamops.new_obj != NULL)
42675- (*tmem_pamops.new_obj)(obj);
42676+ (tmem_pamops->new_obj)(obj);
42677 #endif
42678 SET_SENTINEL(obj, OBJ);
42679
42680@@ -210,7 +210,7 @@ static void tmem_pool_flush(struct tmem_pool *pool, bool destroy)
42681 rbnode = rb_next(rbnode);
42682 tmem_pampd_destroy_all_in_obj(obj, true);
42683 tmem_obj_free(obj, hb);
42684- (*tmem_hostops.obj_free)(obj, pool);
42685+ (tmem_hostops->obj_free)(obj, pool);
42686 }
42687 spin_unlock(&hb->lock);
42688 }
42689@@ -261,7 +261,7 @@ static struct tmem_objnode *tmem_objnode_alloc(struct tmem_obj *obj)
42690 ASSERT_SENTINEL(obj, OBJ);
42691 BUG_ON(obj->pool == NULL);
42692 ASSERT_SENTINEL(obj->pool, POOL);
42693- objnode = (*tmem_hostops.objnode_alloc)(obj->pool);
42694+ objnode = (tmem_hostops->objnode_alloc)(obj->pool);
42695 if (unlikely(objnode == NULL))
42696 goto out;
42697 objnode->obj = obj;
42698@@ -290,7 +290,7 @@ static void tmem_objnode_free(struct tmem_objnode *objnode)
42699 ASSERT_SENTINEL(pool, POOL);
42700 objnode->obj->objnode_count--;
42701 objnode->obj = NULL;
42702- (*tmem_hostops.objnode_free)(objnode, pool);
42703+ (tmem_hostops->objnode_free)(objnode, pool);
42704 }
42705
42706 /*
42707@@ -348,7 +348,7 @@ static void *tmem_pampd_replace_in_obj(struct tmem_obj *obj, uint32_t index,
42708 void *old_pampd = *(void **)slot;
42709 *(void **)slot = new_pampd;
42710 if (!no_free)
42711- (*tmem_pamops.free)(old_pampd, obj->pool,
42712+ (tmem_pamops->free)(old_pampd, obj->pool,
42713 NULL, 0, false);
42714 ret = new_pampd;
42715 }
42716@@ -505,7 +505,7 @@ static void tmem_objnode_node_destroy(struct tmem_obj *obj,
42717 if (objnode->slots[i]) {
42718 if (ht == 1) {
42719 obj->pampd_count--;
42720- (*tmem_pamops.free)(objnode->slots[i],
42721+ (tmem_pamops->free)(objnode->slots[i],
42722 obj->pool, NULL, 0, true);
42723 objnode->slots[i] = NULL;
42724 continue;
42725@@ -524,7 +524,7 @@ static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj,
42726 return;
42727 if (obj->objnode_tree_height == 0) {
42728 obj->pampd_count--;
42729- (*tmem_pamops.free)(obj->objnode_tree_root,
42730+ (tmem_pamops->free)(obj->objnode_tree_root,
42731 obj->pool, NULL, 0, true);
42732 } else {
42733 tmem_objnode_node_destroy(obj, obj->objnode_tree_root,
42734@@ -535,7 +535,7 @@ static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj,
42735 obj->objnode_tree_root = NULL;
42736 #ifdef CONFIG_RAMSTER
42737 if (tmem_pamops.free_obj != NULL)
42738- (*tmem_pamops.free_obj)(obj->pool, obj, pool_destroy);
42739+ (tmem_pamops->free_obj)(obj->pool, obj, pool_destroy);
42740 #endif
42741 }
42742
42743@@ -574,7 +574,7 @@ int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
42744 /* if found, is a dup put, flush the old one */
42745 pampd_del = tmem_pampd_delete_from_obj(obj, index);
42746 BUG_ON(pampd_del != pampd);
42747- (*tmem_pamops.free)(pampd, pool, oidp, index, true);
42748+ (tmem_pamops->free)(pampd, pool, oidp, index, true);
42749 if (obj->pampd_count == 0) {
42750 objnew = obj;
42751 objfound = NULL;
42752@@ -582,7 +582,7 @@ int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
42753 pampd = NULL;
42754 }
42755 } else {
42756- obj = objnew = (*tmem_hostops.obj_alloc)(pool);
42757+ obj = objnew = (tmem_hostops->obj_alloc)(pool);
42758 if (unlikely(obj == NULL)) {
42759 ret = -ENOMEM;
42760 goto out;
42761@@ -597,16 +597,16 @@ int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
42762 if (unlikely(ret == -ENOMEM))
42763 /* may have partially built objnode tree ("stump") */
42764 goto delete_and_free;
42765- (*tmem_pamops.create_finish)(pampd, is_ephemeral(pool));
42766+ (tmem_pamops->create_finish)(pampd, is_ephemeral(pool));
42767 goto out;
42768
42769 delete_and_free:
42770 (void)tmem_pampd_delete_from_obj(obj, index);
42771 if (pampd)
42772- (*tmem_pamops.free)(pampd, pool, NULL, 0, true);
42773+ (tmem_pamops->free)(pampd, pool, NULL, 0, true);
42774 if (objnew) {
42775 tmem_obj_free(objnew, hb);
42776- (*tmem_hostops.obj_free)(objnew, pool);
42777+ (tmem_hostops->obj_free)(objnew, pool);
42778 }
42779 out:
42780 spin_unlock(&hb->lock);
42781@@ -651,7 +651,7 @@ void tmem_localify_finish(struct tmem_obj *obj, uint32_t index,
42782 if (pampd != NULL) {
42783 BUG_ON(obj == NULL);
42784 (void)tmem_pampd_replace_in_obj(obj, index, pampd, 1);
42785- (*tmem_pamops.create_finish)(pampd, is_ephemeral(obj->pool));
42786+ (tmem_pamops->create_finish)(pampd, is_ephemeral(obj->pool));
42787 } else if (delete) {
42788 BUG_ON(obj == NULL);
42789 (void)tmem_pampd_delete_from_obj(obj, index);
42790@@ -671,7 +671,7 @@ static int tmem_repatriate(void **ppampd, struct tmem_hashbucket *hb,
42791 int ret = 0;
42792
42793 if (!is_ephemeral(pool))
42794- new_pampd = (*tmem_pamops.repatriate_preload)(
42795+ new_pampd = (tmem_pamops->repatriate_preload)(
42796 old_pampd, pool, oidp, index, &intransit);
42797 if (intransit)
42798 ret = -EAGAIN;
42799@@ -680,7 +680,7 @@ static int tmem_repatriate(void **ppampd, struct tmem_hashbucket *hb,
42800 /* must release the hb->lock else repatriate can't sleep */
42801 spin_unlock(&hb->lock);
42802 if (!intransit)
42803- ret = (*tmem_pamops.repatriate)(old_pampd, new_pampd, pool,
42804+ ret = (tmem_pamops->repatriate)(old_pampd, new_pampd, pool,
42805 oidp, index, free, data);
42806 if (ret == -EAGAIN) {
42807 /* rare I think, but should cond_resched()??? */
42808@@ -714,7 +714,7 @@ int tmem_replace(struct tmem_pool *pool, struct tmem_oid *oidp,
42809 new_pampd = tmem_pampd_replace_in_obj(obj, index, new_pampd, 0);
42810 /* if we bug here, pamops wasn't properly set up for ramster */
42811 BUG_ON(tmem_pamops.replace_in_obj == NULL);
42812- ret = (*tmem_pamops.replace_in_obj)(new_pampd, obj);
42813+ ret = (tmem_pamops->replace_in_obj)(new_pampd, obj);
42814 out:
42815 spin_unlock(&hb->lock);
42816 return ret;
42817@@ -776,15 +776,15 @@ int tmem_get(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
42818 if (free) {
42819 if (obj->pampd_count == 0) {
42820 tmem_obj_free(obj, hb);
42821- (*tmem_hostops.obj_free)(obj, pool);
42822+ (tmem_hostops->obj_free)(obj, pool);
42823 obj = NULL;
42824 }
42825 }
42826 if (free)
42827- ret = (*tmem_pamops.get_data_and_free)(
42828+ ret = (tmem_pamops->get_data_and_free)(
42829 data, sizep, raw, pampd, pool, oidp, index);
42830 else
42831- ret = (*tmem_pamops.get_data)(
42832+ ret = (tmem_pamops->get_data)(
42833 data, sizep, raw, pampd, pool, oidp, index);
42834 if (ret < 0)
42835 goto out;
42836@@ -816,10 +816,10 @@ int tmem_flush_page(struct tmem_pool *pool,
42837 pampd = tmem_pampd_delete_from_obj(obj, index);
42838 if (pampd == NULL)
42839 goto out;
42840- (*tmem_pamops.free)(pampd, pool, oidp, index, true);
42841+ (tmem_pamops->free)(pampd, pool, oidp, index, true);
42842 if (obj->pampd_count == 0) {
42843 tmem_obj_free(obj, hb);
42844- (*tmem_hostops.obj_free)(obj, pool);
42845+ (tmem_hostops->obj_free)(obj, pool);
42846 }
42847 ret = 0;
42848
42849@@ -844,7 +844,7 @@ int tmem_flush_object(struct tmem_pool *pool, struct tmem_oid *oidp)
42850 goto out;
42851 tmem_pampd_destroy_all_in_obj(obj, false);
42852 tmem_obj_free(obj, hb);
42853- (*tmem_hostops.obj_free)(obj, pool);
42854+ (tmem_hostops->obj_free)(obj, pool);
42855 ret = 0;
42856
42857 out:
42858diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
42859index dc23395..cf7e9b1 100644
42860--- a/drivers/staging/rtl8712/rtl871x_io.h
42861+++ b/drivers/staging/rtl8712/rtl871x_io.h
42862@@ -108,7 +108,7 @@ struct _io_ops {
42863 u8 *pmem);
42864 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
42865 u8 *pmem);
42866-};
42867+} __no_const;
42868
42869 struct io_req {
42870 struct list_head list;
42871diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
42872index 1f5088b..0e59820 100644
42873--- a/drivers/staging/sbe-2t3e3/netdev.c
42874+++ b/drivers/staging/sbe-2t3e3/netdev.c
42875@@ -51,7 +51,7 @@ static int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
42876 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
42877
42878 if (rlen)
42879- if (copy_to_user(data, &resp, rlen))
42880+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
42881 return -EFAULT;
42882
42883 return 0;
42884diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
42885index 5dddc4d..34fcb2f 100644
42886--- a/drivers/staging/usbip/vhci.h
42887+++ b/drivers/staging/usbip/vhci.h
42888@@ -83,7 +83,7 @@ struct vhci_hcd {
42889 unsigned resuming:1;
42890 unsigned long re_timeout;
42891
42892- atomic_t seqnum;
42893+ atomic_unchecked_t seqnum;
42894
42895 /*
42896 * NOTE:
42897diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
42898index c3aa219..bf8b3de 100644
42899--- a/drivers/staging/usbip/vhci_hcd.c
42900+++ b/drivers/staging/usbip/vhci_hcd.c
42901@@ -451,7 +451,7 @@ static void vhci_tx_urb(struct urb *urb)
42902 return;
42903 }
42904
42905- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
42906+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
42907 if (priv->seqnum == 0xffff)
42908 dev_info(&urb->dev->dev, "seqnum max\n");
42909
42910@@ -703,7 +703,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
42911 return -ENOMEM;
42912 }
42913
42914- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
42915+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
42916 if (unlink->seqnum == 0xffff)
42917 pr_info("seqnum max\n");
42918
42919@@ -907,7 +907,7 @@ static int vhci_start(struct usb_hcd *hcd)
42920 vdev->rhport = rhport;
42921 }
42922
42923- atomic_set(&vhci->seqnum, 0);
42924+ atomic_set_unchecked(&vhci->seqnum, 0);
42925 spin_lock_init(&vhci->lock);
42926
42927 hcd->power_budget = 0; /* no limit */
42928diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
42929index ba5f1c0..11d8122 100644
42930--- a/drivers/staging/usbip/vhci_rx.c
42931+++ b/drivers/staging/usbip/vhci_rx.c
42932@@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
42933 if (!urb) {
42934 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
42935 pr_info("max seqnum %d\n",
42936- atomic_read(&the_controller->seqnum));
42937+ atomic_read_unchecked(&the_controller->seqnum));
42938 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
42939 return;
42940 }
42941diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
42942index 5f13890..36a044b 100644
42943--- a/drivers/staging/vt6655/hostap.c
42944+++ b/drivers/staging/vt6655/hostap.c
42945@@ -73,14 +73,13 @@ static int msglevel =MSG_LEVEL_INFO;
42946 *
42947 */
42948
42949+static net_device_ops_no_const apdev_netdev_ops;
42950+
42951 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42952 {
42953 PSDevice apdev_priv;
42954 struct net_device *dev = pDevice->dev;
42955 int ret;
42956- const struct net_device_ops apdev_netdev_ops = {
42957- .ndo_start_xmit = pDevice->tx_80211,
42958- };
42959
42960 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
42961
42962@@ -92,6 +91,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42963 *apdev_priv = *pDevice;
42964 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
42965
42966+ /* only half broken now */
42967+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
42968 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
42969
42970 pDevice->apdev->type = ARPHRD_IEEE80211;
42971diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
42972index 26a7d0e..897b083 100644
42973--- a/drivers/staging/vt6656/hostap.c
42974+++ b/drivers/staging/vt6656/hostap.c
42975@@ -60,14 +60,13 @@ static int msglevel =MSG_LEVEL_INFO;
42976 *
42977 */
42978
42979+static net_device_ops_no_const apdev_netdev_ops;
42980+
42981 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42982 {
42983 PSDevice apdev_priv;
42984 struct net_device *dev = pDevice->dev;
42985 int ret;
42986- const struct net_device_ops apdev_netdev_ops = {
42987- .ndo_start_xmit = pDevice->tx_80211,
42988- };
42989
42990 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
42991
42992@@ -79,6 +78,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42993 *apdev_priv = *pDevice;
42994 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
42995
42996+ /* only half broken now */
42997+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
42998 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
42999
43000 pDevice->apdev->type = ARPHRD_IEEE80211;
43001diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
43002index 56c8e60..1920c63 100644
43003--- a/drivers/staging/zcache/tmem.c
43004+++ b/drivers/staging/zcache/tmem.c
43005@@ -39,7 +39,7 @@
43006 * A tmem host implementation must use this function to register callbacks
43007 * for memory allocation.
43008 */
43009-static struct tmem_hostops tmem_hostops;
43010+static tmem_hostops_no_const tmem_hostops;
43011
43012 static void tmem_objnode_tree_init(void);
43013
43014@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
43015 * A tmem host implementation must use this function to register
43016 * callbacks for a page-accessible memory (PAM) implementation
43017 */
43018-static struct tmem_pamops tmem_pamops;
43019+static tmem_pamops_no_const tmem_pamops;
43020
43021 void tmem_register_pamops(struct tmem_pamops *m)
43022 {
43023diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
43024index 0d4aa82..f7832d4 100644
43025--- a/drivers/staging/zcache/tmem.h
43026+++ b/drivers/staging/zcache/tmem.h
43027@@ -180,6 +180,7 @@ struct tmem_pamops {
43028 void (*new_obj)(struct tmem_obj *);
43029 int (*replace_in_obj)(void *, struct tmem_obj *);
43030 };
43031+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
43032 extern void tmem_register_pamops(struct tmem_pamops *m);
43033
43034 /* memory allocation methods provided by the host implementation */
43035@@ -189,6 +190,7 @@ struct tmem_hostops {
43036 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
43037 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
43038 };
43039+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
43040 extern void tmem_register_hostops(struct tmem_hostops *m);
43041
43042 /* core tmem accessor functions */
43043diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
43044index 96f4981..4daaa7e 100644
43045--- a/drivers/target/target_core_device.c
43046+++ b/drivers/target/target_core_device.c
43047@@ -1370,7 +1370,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
43048 spin_lock_init(&dev->se_port_lock);
43049 spin_lock_init(&dev->se_tmr_lock);
43050 spin_lock_init(&dev->qf_cmd_lock);
43051- atomic_set(&dev->dev_ordered_id, 0);
43052+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
43053 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
43054 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
43055 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
43056diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
43057index fcf880f..a4d1e8f 100644
43058--- a/drivers/target/target_core_transport.c
43059+++ b/drivers/target/target_core_transport.c
43060@@ -1077,7 +1077,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
43061 * Used to determine when ORDERED commands should go from
43062 * Dormant to Active status.
43063 */
43064- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
43065+ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
43066 smp_mb__after_atomic_inc();
43067 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
43068 cmd->se_ordered_id, cmd->sam_task_attr,
43069diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
43070index b09c8d1f..c4225c0 100644
43071--- a/drivers/tty/cyclades.c
43072+++ b/drivers/tty/cyclades.c
43073@@ -1589,10 +1589,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
43074 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
43075 info->port.count);
43076 #endif
43077- info->port.count++;
43078+ atomic_inc(&info->port.count);
43079 #ifdef CY_DEBUG_COUNT
43080 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
43081- current->pid, info->port.count);
43082+ current->pid, atomic_read(&info->port.count));
43083 #endif
43084
43085 /*
43086@@ -3991,7 +3991,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
43087 for (j = 0; j < cy_card[i].nports; j++) {
43088 info = &cy_card[i].ports[j];
43089
43090- if (info->port.count) {
43091+ if (atomic_read(&info->port.count)) {
43092 /* XXX is the ldisc num worth this? */
43093 struct tty_struct *tty;
43094 struct tty_ldisc *ld;
43095diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
43096index 13ee53b..418d164 100644
43097--- a/drivers/tty/hvc/hvc_console.c
43098+++ b/drivers/tty/hvc/hvc_console.c
43099@@ -338,7 +338,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
43100
43101 spin_lock_irqsave(&hp->port.lock, flags);
43102 /* Check and then increment for fast path open. */
43103- if (hp->port.count++ > 0) {
43104+ if (atomic_inc_return(&hp->port.count) > 1) {
43105 spin_unlock_irqrestore(&hp->port.lock, flags);
43106 hvc_kick();
43107 return 0;
43108@@ -388,7 +388,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
43109
43110 spin_lock_irqsave(&hp->port.lock, flags);
43111
43112- if (--hp->port.count == 0) {
43113+ if (atomic_dec_return(&hp->port.count) == 0) {
43114 spin_unlock_irqrestore(&hp->port.lock, flags);
43115 /* We are done with the tty pointer now. */
43116 tty_port_tty_set(&hp->port, NULL);
43117@@ -406,9 +406,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
43118 */
43119 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
43120 } else {
43121- if (hp->port.count < 0)
43122+ if (atomic_read(&hp->port.count) < 0)
43123 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
43124- hp->vtermno, hp->port.count);
43125+ hp->vtermno, atomic_read(&hp->port.count));
43126 spin_unlock_irqrestore(&hp->port.lock, flags);
43127 }
43128 }
43129@@ -438,12 +438,12 @@ static void hvc_hangup(struct tty_struct *tty)
43130 * open->hangup case this can be called after the final close so prevent
43131 * that from happening for now.
43132 */
43133- if (hp->port.count <= 0) {
43134+ if (atomic_read(&hp->port.count) <= 0) {
43135 spin_unlock_irqrestore(&hp->port.lock, flags);
43136 return;
43137 }
43138
43139- hp->port.count = 0;
43140+ atomic_set(&hp->port.count, 0);
43141 spin_unlock_irqrestore(&hp->port.lock, flags);
43142 tty_port_tty_set(&hp->port, NULL);
43143
43144@@ -491,7 +491,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
43145 return -EPIPE;
43146
43147 /* FIXME what's this (unprotected) check for? */
43148- if (hp->port.count <= 0)
43149+ if (atomic_read(&hp->port.count) <= 0)
43150 return -EIO;
43151
43152 spin_lock_irqsave(&hp->lock, flags);
43153diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
43154index 8776357..b2d4afd 100644
43155--- a/drivers/tty/hvc/hvcs.c
43156+++ b/drivers/tty/hvc/hvcs.c
43157@@ -83,6 +83,7 @@
43158 #include <asm/hvcserver.h>
43159 #include <asm/uaccess.h>
43160 #include <asm/vio.h>
43161+#include <asm/local.h>
43162
43163 /*
43164 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
43165@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
43166
43167 spin_lock_irqsave(&hvcsd->lock, flags);
43168
43169- if (hvcsd->port.count > 0) {
43170+ if (atomic_read(&hvcsd->port.count) > 0) {
43171 spin_unlock_irqrestore(&hvcsd->lock, flags);
43172 printk(KERN_INFO "HVCS: vterm state unchanged. "
43173 "The hvcs device node is still in use.\n");
43174@@ -1132,7 +1133,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
43175 }
43176 }
43177
43178- hvcsd->port.count = 0;
43179+ atomic_set(&hvcsd->port.count, 0);
43180 hvcsd->port.tty = tty;
43181 tty->driver_data = hvcsd;
43182
43183@@ -1185,7 +1186,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
43184 unsigned long flags;
43185
43186 spin_lock_irqsave(&hvcsd->lock, flags);
43187- hvcsd->port.count++;
43188+ atomic_inc(&hvcsd->port.count);
43189 hvcsd->todo_mask |= HVCS_SCHED_READ;
43190 spin_unlock_irqrestore(&hvcsd->lock, flags);
43191
43192@@ -1221,7 +1222,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
43193 hvcsd = tty->driver_data;
43194
43195 spin_lock_irqsave(&hvcsd->lock, flags);
43196- if (--hvcsd->port.count == 0) {
43197+ if (atomic_dec_and_test(&hvcsd->port.count)) {
43198
43199 vio_disable_interrupts(hvcsd->vdev);
43200
43201@@ -1246,10 +1247,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
43202
43203 free_irq(irq, hvcsd);
43204 return;
43205- } else if (hvcsd->port.count < 0) {
43206+ } else if (atomic_read(&hvcsd->port.count) < 0) {
43207 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
43208 " is missmanaged.\n",
43209- hvcsd->vdev->unit_address, hvcsd->port.count);
43210+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
43211 }
43212
43213 spin_unlock_irqrestore(&hvcsd->lock, flags);
43214@@ -1271,7 +1272,7 @@ static void hvcs_hangup(struct tty_struct * tty)
43215
43216 spin_lock_irqsave(&hvcsd->lock, flags);
43217 /* Preserve this so that we know how many kref refs to put */
43218- temp_open_count = hvcsd->port.count;
43219+ temp_open_count = atomic_read(&hvcsd->port.count);
43220
43221 /*
43222 * Don't kref put inside the spinlock because the destruction
43223@@ -1286,7 +1287,7 @@ static void hvcs_hangup(struct tty_struct * tty)
43224 tty->driver_data = NULL;
43225 hvcsd->port.tty = NULL;
43226
43227- hvcsd->port.count = 0;
43228+ atomic_set(&hvcsd->port.count, 0);
43229
43230 /* This will drop any buffered data on the floor which is OK in a hangup
43231 * scenario. */
43232@@ -1357,7 +1358,7 @@ static int hvcs_write(struct tty_struct *tty,
43233 * the middle of a write operation? This is a crummy place to do this
43234 * but we want to keep it all in the spinlock.
43235 */
43236- if (hvcsd->port.count <= 0) {
43237+ if (atomic_read(&hvcsd->port.count) <= 0) {
43238 spin_unlock_irqrestore(&hvcsd->lock, flags);
43239 return -ENODEV;
43240 }
43241@@ -1431,7 +1432,7 @@ static int hvcs_write_room(struct tty_struct *tty)
43242 {
43243 struct hvcs_struct *hvcsd = tty->driver_data;
43244
43245- if (!hvcsd || hvcsd->port.count <= 0)
43246+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
43247 return 0;
43248
43249 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
43250diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
43251index 2cde13d..645d78f 100644
43252--- a/drivers/tty/ipwireless/tty.c
43253+++ b/drivers/tty/ipwireless/tty.c
43254@@ -29,6 +29,7 @@
43255 #include <linux/tty_driver.h>
43256 #include <linux/tty_flip.h>
43257 #include <linux/uaccess.h>
43258+#include <asm/local.h>
43259
43260 #include "tty.h"
43261 #include "network.h"
43262@@ -99,10 +100,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
43263 mutex_unlock(&tty->ipw_tty_mutex);
43264 return -ENODEV;
43265 }
43266- if (tty->port.count == 0)
43267+ if (atomic_read(&tty->port.count) == 0)
43268 tty->tx_bytes_queued = 0;
43269
43270- tty->port.count++;
43271+ atomic_inc(&tty->port.count);
43272
43273 tty->port.tty = linux_tty;
43274 linux_tty->driver_data = tty;
43275@@ -118,9 +119,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
43276
43277 static void do_ipw_close(struct ipw_tty *tty)
43278 {
43279- tty->port.count--;
43280-
43281- if (tty->port.count == 0) {
43282+ if (atomic_dec_return(&tty->port.count) == 0) {
43283 struct tty_struct *linux_tty = tty->port.tty;
43284
43285 if (linux_tty != NULL) {
43286@@ -141,7 +140,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
43287 return;
43288
43289 mutex_lock(&tty->ipw_tty_mutex);
43290- if (tty->port.count == 0) {
43291+ if (atomic_read(&tty->port.count) == 0) {
43292 mutex_unlock(&tty->ipw_tty_mutex);
43293 return;
43294 }
43295@@ -170,7 +169,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
43296 return;
43297 }
43298
43299- if (!tty->port.count) {
43300+ if (!atomic_read(&tty->port.count)) {
43301 mutex_unlock(&tty->ipw_tty_mutex);
43302 return;
43303 }
43304@@ -212,7 +211,7 @@ static int ipw_write(struct tty_struct *linux_tty,
43305 return -ENODEV;
43306
43307 mutex_lock(&tty->ipw_tty_mutex);
43308- if (!tty->port.count) {
43309+ if (!atomic_read(&tty->port.count)) {
43310 mutex_unlock(&tty->ipw_tty_mutex);
43311 return -EINVAL;
43312 }
43313@@ -252,7 +251,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
43314 if (!tty)
43315 return -ENODEV;
43316
43317- if (!tty->port.count)
43318+ if (!atomic_read(&tty->port.count))
43319 return -EINVAL;
43320
43321 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
43322@@ -294,7 +293,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
43323 if (!tty)
43324 return 0;
43325
43326- if (!tty->port.count)
43327+ if (!atomic_read(&tty->port.count))
43328 return 0;
43329
43330 return tty->tx_bytes_queued;
43331@@ -375,7 +374,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
43332 if (!tty)
43333 return -ENODEV;
43334
43335- if (!tty->port.count)
43336+ if (!atomic_read(&tty->port.count))
43337 return -EINVAL;
43338
43339 return get_control_lines(tty);
43340@@ -391,7 +390,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
43341 if (!tty)
43342 return -ENODEV;
43343
43344- if (!tty->port.count)
43345+ if (!atomic_read(&tty->port.count))
43346 return -EINVAL;
43347
43348 return set_control_lines(tty, set, clear);
43349@@ -405,7 +404,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
43350 if (!tty)
43351 return -ENODEV;
43352
43353- if (!tty->port.count)
43354+ if (!atomic_read(&tty->port.count))
43355 return -EINVAL;
43356
43357 /* FIXME: Exactly how is the tty object locked here .. */
43358@@ -561,7 +560,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
43359 * are gone */
43360 mutex_lock(&ttyj->ipw_tty_mutex);
43361 }
43362- while (ttyj->port.count)
43363+ while (atomic_read(&ttyj->port.count))
43364 do_ipw_close(ttyj);
43365 ipwireless_disassociate_network_ttys(network,
43366 ttyj->channel_idx);
43367diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
43368index f9d2850..b006f04 100644
43369--- a/drivers/tty/moxa.c
43370+++ b/drivers/tty/moxa.c
43371@@ -1193,7 +1193,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
43372 }
43373
43374 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
43375- ch->port.count++;
43376+ atomic_inc(&ch->port.count);
43377 tty->driver_data = ch;
43378 tty_port_tty_set(&ch->port, tty);
43379 mutex_lock(&ch->port.mutex);
43380diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
43381index bfd6771..e0d93c4 100644
43382--- a/drivers/tty/n_gsm.c
43383+++ b/drivers/tty/n_gsm.c
43384@@ -1636,7 +1636,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
43385 spin_lock_init(&dlci->lock);
43386 mutex_init(&dlci->mutex);
43387 dlci->fifo = &dlci->_fifo;
43388- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
43389+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
43390 kfree(dlci);
43391 return NULL;
43392 }
43393@@ -2936,7 +2936,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
43394 struct gsm_dlci *dlci = tty->driver_data;
43395 struct tty_port *port = &dlci->port;
43396
43397- port->count++;
43398+ atomic_inc(&port->count);
43399 dlci_get(dlci);
43400 dlci_get(dlci->gsm->dlci[0]);
43401 mux_get(dlci->gsm);
43402diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
43403index 19083ef..6e34e97 100644
43404--- a/drivers/tty/n_tty.c
43405+++ b/drivers/tty/n_tty.c
43406@@ -2196,6 +2196,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
43407 {
43408 *ops = tty_ldisc_N_TTY;
43409 ops->owner = NULL;
43410- ops->refcount = ops->flags = 0;
43411+ atomic_set(&ops->refcount, 0);
43412+ ops->flags = 0;
43413 }
43414 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
43415diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
43416index ac35c90..c47deac 100644
43417--- a/drivers/tty/pty.c
43418+++ b/drivers/tty/pty.c
43419@@ -790,8 +790,10 @@ static void __init unix98_pty_init(void)
43420 panic("Couldn't register Unix98 pts driver");
43421
43422 /* Now create the /dev/ptmx special device */
43423+ pax_open_kernel();
43424 tty_default_fops(&ptmx_fops);
43425- ptmx_fops.open = ptmx_open;
43426+ *(void **)&ptmx_fops.open = ptmx_open;
43427+ pax_close_kernel();
43428
43429 cdev_init(&ptmx_cdev, &ptmx_fops);
43430 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
43431diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
43432index e42009a..566a036 100644
43433--- a/drivers/tty/rocket.c
43434+++ b/drivers/tty/rocket.c
43435@@ -925,7 +925,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
43436 tty->driver_data = info;
43437 tty_port_tty_set(port, tty);
43438
43439- if (port->count++ == 0) {
43440+ if (atomic_inc_return(&port->count) == 1) {
43441 atomic_inc(&rp_num_ports_open);
43442
43443 #ifdef ROCKET_DEBUG_OPEN
43444@@ -934,7 +934,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
43445 #endif
43446 }
43447 #ifdef ROCKET_DEBUG_OPEN
43448- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
43449+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
43450 #endif
43451
43452 /*
43453@@ -1529,7 +1529,7 @@ static void rp_hangup(struct tty_struct *tty)
43454 spin_unlock_irqrestore(&info->port.lock, flags);
43455 return;
43456 }
43457- if (info->port.count)
43458+ if (atomic_read(&info->port.count))
43459 atomic_dec(&rp_num_ports_open);
43460 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
43461 spin_unlock_irqrestore(&info->port.lock, flags);
43462diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
43463index 1002054..dd644a8 100644
43464--- a/drivers/tty/serial/kgdboc.c
43465+++ b/drivers/tty/serial/kgdboc.c
43466@@ -24,8 +24,9 @@
43467 #define MAX_CONFIG_LEN 40
43468
43469 static struct kgdb_io kgdboc_io_ops;
43470+static struct kgdb_io kgdboc_io_ops_console;
43471
43472-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
43473+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
43474 static int configured = -1;
43475
43476 static char config[MAX_CONFIG_LEN];
43477@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
43478 kgdboc_unregister_kbd();
43479 if (configured == 1)
43480 kgdb_unregister_io_module(&kgdboc_io_ops);
43481+ else if (configured == 2)
43482+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
43483 }
43484
43485 static int configure_kgdboc(void)
43486@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
43487 int err;
43488 char *cptr = config;
43489 struct console *cons;
43490+ int is_console = 0;
43491
43492 err = kgdboc_option_setup(config);
43493 if (err || !strlen(config) || isspace(config[0]))
43494 goto noconfig;
43495
43496 err = -ENODEV;
43497- kgdboc_io_ops.is_console = 0;
43498 kgdb_tty_driver = NULL;
43499
43500 kgdboc_use_kms = 0;
43501@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
43502 int idx;
43503 if (cons->device && cons->device(cons, &idx) == p &&
43504 idx == tty_line) {
43505- kgdboc_io_ops.is_console = 1;
43506+ is_console = 1;
43507 break;
43508 }
43509 cons = cons->next;
43510@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
43511 kgdb_tty_line = tty_line;
43512
43513 do_register:
43514- err = kgdb_register_io_module(&kgdboc_io_ops);
43515+ if (is_console) {
43516+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
43517+ configured = 2;
43518+ } else {
43519+ err = kgdb_register_io_module(&kgdboc_io_ops);
43520+ configured = 1;
43521+ }
43522 if (err)
43523 goto noconfig;
43524
43525@@ -205,8 +214,6 @@ do_register:
43526 if (err)
43527 goto nmi_con_failed;
43528
43529- configured = 1;
43530-
43531 return 0;
43532
43533 nmi_con_failed:
43534@@ -223,7 +230,7 @@ noconfig:
43535 static int __init init_kgdboc(void)
43536 {
43537 /* Already configured? */
43538- if (configured == 1)
43539+ if (configured >= 1)
43540 return 0;
43541
43542 return configure_kgdboc();
43543@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
43544 if (config[len - 1] == '\n')
43545 config[len - 1] = '\0';
43546
43547- if (configured == 1)
43548+ if (configured >= 1)
43549 cleanup_kgdboc();
43550
43551 /* Go and configure with the new params. */
43552@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
43553 .post_exception = kgdboc_post_exp_handler,
43554 };
43555
43556+static struct kgdb_io kgdboc_io_ops_console = {
43557+ .name = "kgdboc",
43558+ .read_char = kgdboc_get_char,
43559+ .write_char = kgdboc_put_char,
43560+ .pre_exception = kgdboc_pre_exp_handler,
43561+ .post_exception = kgdboc_post_exp_handler,
43562+ .is_console = 1
43563+};
43564+
43565 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
43566 /* This is only available if kgdboc is a built in for early debugging */
43567 static int __init kgdboc_early_init(char *opt)
43568diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
43569index e514b3a..c73d614 100644
43570--- a/drivers/tty/serial/samsung.c
43571+++ b/drivers/tty/serial/samsung.c
43572@@ -453,11 +453,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
43573 }
43574 }
43575
43576+static int s3c64xx_serial_startup(struct uart_port *port);
43577 static int s3c24xx_serial_startup(struct uart_port *port)
43578 {
43579 struct s3c24xx_uart_port *ourport = to_ourport(port);
43580 int ret;
43581
43582+ /* Startup sequence is different for s3c64xx and higher SoC's */
43583+ if (s3c24xx_serial_has_interrupt_mask(port))
43584+ return s3c64xx_serial_startup(port);
43585+
43586 dbg("s3c24xx_serial_startup: port=%p (%08lx,%p)\n",
43587 port->mapbase, port->membase);
43588
43589@@ -1122,10 +1127,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
43590 /* setup info for port */
43591 port->dev = &platdev->dev;
43592
43593- /* Startup sequence is different for s3c64xx and higher SoC's */
43594- if (s3c24xx_serial_has_interrupt_mask(port))
43595- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
43596-
43597 port->uartclk = 1;
43598
43599 if (cfg->uart_flags & UPF_CONS_FLOW) {
43600diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
43601index 2c7230a..2104f16 100644
43602--- a/drivers/tty/serial/serial_core.c
43603+++ b/drivers/tty/serial/serial_core.c
43604@@ -1455,7 +1455,7 @@ static void uart_hangup(struct tty_struct *tty)
43605 uart_flush_buffer(tty);
43606 uart_shutdown(tty, state);
43607 spin_lock_irqsave(&port->lock, flags);
43608- port->count = 0;
43609+ atomic_set(&port->count, 0);
43610 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
43611 spin_unlock_irqrestore(&port->lock, flags);
43612 tty_port_tty_set(port, NULL);
43613@@ -1551,7 +1551,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
43614 goto end;
43615 }
43616
43617- port->count++;
43618+ atomic_inc(&port->count);
43619 if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
43620 retval = -ENXIO;
43621 goto err_dec_count;
43622@@ -1578,7 +1578,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
43623 /*
43624 * Make sure the device is in D0 state.
43625 */
43626- if (port->count == 1)
43627+ if (atomic_read(&port->count) == 1)
43628 uart_change_pm(state, 0);
43629
43630 /*
43631@@ -1596,7 +1596,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
43632 end:
43633 return retval;
43634 err_dec_count:
43635- port->count--;
43636+ atomic_inc(&port->count);
43637 mutex_unlock(&port->mutex);
43638 goto end;
43639 }
43640diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
43641index 9e071f6..f30ae69 100644
43642--- a/drivers/tty/synclink.c
43643+++ b/drivers/tty/synclink.c
43644@@ -3095,7 +3095,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
43645
43646 if (debug_level >= DEBUG_LEVEL_INFO)
43647 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
43648- __FILE__,__LINE__, info->device_name, info->port.count);
43649+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
43650
43651 if (tty_port_close_start(&info->port, tty, filp) == 0)
43652 goto cleanup;
43653@@ -3113,7 +3113,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
43654 cleanup:
43655 if (debug_level >= DEBUG_LEVEL_INFO)
43656 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
43657- tty->driver->name, info->port.count);
43658+ tty->driver->name, atomic_read(&info->port.count));
43659
43660 } /* end of mgsl_close() */
43661
43662@@ -3212,8 +3212,8 @@ static void mgsl_hangup(struct tty_struct *tty)
43663
43664 mgsl_flush_buffer(tty);
43665 shutdown(info);
43666-
43667- info->port.count = 0;
43668+
43669+ atomic_set(&info->port.count, 0);
43670 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
43671 info->port.tty = NULL;
43672
43673@@ -3302,12 +3302,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
43674
43675 if (debug_level >= DEBUG_LEVEL_INFO)
43676 printk("%s(%d):block_til_ready before block on %s count=%d\n",
43677- __FILE__,__LINE__, tty->driver->name, port->count );
43678+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
43679
43680 spin_lock_irqsave(&info->irq_spinlock, flags);
43681 if (!tty_hung_up_p(filp)) {
43682 extra_count = true;
43683- port->count--;
43684+ atomic_dec(&port->count);
43685 }
43686 spin_unlock_irqrestore(&info->irq_spinlock, flags);
43687 port->blocked_open++;
43688@@ -3336,7 +3336,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
43689
43690 if (debug_level >= DEBUG_LEVEL_INFO)
43691 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
43692- __FILE__,__LINE__, tty->driver->name, port->count );
43693+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
43694
43695 tty_unlock(tty);
43696 schedule();
43697@@ -3348,12 +3348,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
43698
43699 /* FIXME: Racy on hangup during close wait */
43700 if (extra_count)
43701- port->count++;
43702+ atomic_inc(&port->count);
43703 port->blocked_open--;
43704
43705 if (debug_level >= DEBUG_LEVEL_INFO)
43706 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
43707- __FILE__,__LINE__, tty->driver->name, port->count );
43708+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
43709
43710 if (!retval)
43711 port->flags |= ASYNC_NORMAL_ACTIVE;
43712@@ -3405,7 +3405,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
43713
43714 if (debug_level >= DEBUG_LEVEL_INFO)
43715 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
43716- __FILE__,__LINE__,tty->driver->name, info->port.count);
43717+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
43718
43719 /* If port is closing, signal caller to try again */
43720 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
43721@@ -3424,10 +3424,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
43722 spin_unlock_irqrestore(&info->netlock, flags);
43723 goto cleanup;
43724 }
43725- info->port.count++;
43726+ atomic_inc(&info->port.count);
43727 spin_unlock_irqrestore(&info->netlock, flags);
43728
43729- if (info->port.count == 1) {
43730+ if (atomic_read(&info->port.count) == 1) {
43731 /* 1st open on this device, init hardware */
43732 retval = startup(info);
43733 if (retval < 0)
43734@@ -3451,8 +3451,8 @@ cleanup:
43735 if (retval) {
43736 if (tty->count == 1)
43737 info->port.tty = NULL; /* tty layer will release tty struct */
43738- if(info->port.count)
43739- info->port.count--;
43740+ if (atomic_read(&info->port.count))
43741+ atomic_dec(&info->port.count);
43742 }
43743
43744 return retval;
43745@@ -7662,7 +7662,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
43746 unsigned short new_crctype;
43747
43748 /* return error if TTY interface open */
43749- if (info->port.count)
43750+ if (atomic_read(&info->port.count))
43751 return -EBUSY;
43752
43753 switch (encoding)
43754@@ -7757,7 +7757,7 @@ static int hdlcdev_open(struct net_device *dev)
43755
43756 /* arbitrate between network and tty opens */
43757 spin_lock_irqsave(&info->netlock, flags);
43758- if (info->port.count != 0 || info->netcount != 0) {
43759+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
43760 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
43761 spin_unlock_irqrestore(&info->netlock, flags);
43762 return -EBUSY;
43763@@ -7843,7 +7843,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
43764 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
43765
43766 /* return error if TTY interface open */
43767- if (info->port.count)
43768+ if (atomic_read(&info->port.count))
43769 return -EBUSY;
43770
43771 if (cmd != SIOCWANDEV)
43772diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
43773index aba1e59..877ac33 100644
43774--- a/drivers/tty/synclink_gt.c
43775+++ b/drivers/tty/synclink_gt.c
43776@@ -671,7 +671,7 @@ static int open(struct tty_struct *tty, struct file *filp)
43777 tty->driver_data = info;
43778 info->port.tty = tty;
43779
43780- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
43781+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
43782
43783 /* If port is closing, signal caller to try again */
43784 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
43785@@ -692,10 +692,10 @@ static int open(struct tty_struct *tty, struct file *filp)
43786 mutex_unlock(&info->port.mutex);
43787 goto cleanup;
43788 }
43789- info->port.count++;
43790+ atomic_inc(&info->port.count);
43791 spin_unlock_irqrestore(&info->netlock, flags);
43792
43793- if (info->port.count == 1) {
43794+ if (atomic_read(&info->port.count) == 1) {
43795 /* 1st open on this device, init hardware */
43796 retval = startup(info);
43797 if (retval < 0) {
43798@@ -716,8 +716,8 @@ cleanup:
43799 if (retval) {
43800 if (tty->count == 1)
43801 info->port.tty = NULL; /* tty layer will release tty struct */
43802- if(info->port.count)
43803- info->port.count--;
43804+ if(atomic_read(&info->port.count))
43805+ atomic_dec(&info->port.count);
43806 }
43807
43808 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
43809@@ -730,7 +730,7 @@ static void close(struct tty_struct *tty, struct file *filp)
43810
43811 if (sanity_check(info, tty->name, "close"))
43812 return;
43813- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
43814+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
43815
43816 if (tty_port_close_start(&info->port, tty, filp) == 0)
43817 goto cleanup;
43818@@ -747,7 +747,7 @@ static void close(struct tty_struct *tty, struct file *filp)
43819 tty_port_close_end(&info->port, tty);
43820 info->port.tty = NULL;
43821 cleanup:
43822- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
43823+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
43824 }
43825
43826 static void hangup(struct tty_struct *tty)
43827@@ -765,7 +765,7 @@ static void hangup(struct tty_struct *tty)
43828 shutdown(info);
43829
43830 spin_lock_irqsave(&info->port.lock, flags);
43831- info->port.count = 0;
43832+ atomic_set(&info->port.count, 0);
43833 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
43834 info->port.tty = NULL;
43835 spin_unlock_irqrestore(&info->port.lock, flags);
43836@@ -1450,7 +1450,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
43837 unsigned short new_crctype;
43838
43839 /* return error if TTY interface open */
43840- if (info->port.count)
43841+ if (atomic_read(&info->port.count))
43842 return -EBUSY;
43843
43844 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
43845@@ -1545,7 +1545,7 @@ static int hdlcdev_open(struct net_device *dev)
43846
43847 /* arbitrate between network and tty opens */
43848 spin_lock_irqsave(&info->netlock, flags);
43849- if (info->port.count != 0 || info->netcount != 0) {
43850+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
43851 DBGINFO(("%s hdlc_open busy\n", dev->name));
43852 spin_unlock_irqrestore(&info->netlock, flags);
43853 return -EBUSY;
43854@@ -1630,7 +1630,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
43855 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
43856
43857 /* return error if TTY interface open */
43858- if (info->port.count)
43859+ if (atomic_read(&info->port.count))
43860 return -EBUSY;
43861
43862 if (cmd != SIOCWANDEV)
43863@@ -2419,7 +2419,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
43864 if (port == NULL)
43865 continue;
43866 spin_lock(&port->lock);
43867- if ((port->port.count || port->netcount) &&
43868+ if ((atomic_read(&port->port.count) || port->netcount) &&
43869 port->pending_bh && !port->bh_running &&
43870 !port->bh_requested) {
43871 DBGISR(("%s bh queued\n", port->device_name));
43872@@ -3308,7 +3308,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
43873 spin_lock_irqsave(&info->lock, flags);
43874 if (!tty_hung_up_p(filp)) {
43875 extra_count = true;
43876- port->count--;
43877+ atomic_dec(&port->count);
43878 }
43879 spin_unlock_irqrestore(&info->lock, flags);
43880 port->blocked_open++;
43881@@ -3345,7 +3345,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
43882 remove_wait_queue(&port->open_wait, &wait);
43883
43884 if (extra_count)
43885- port->count++;
43886+ atomic_inc(&port->count);
43887 port->blocked_open--;
43888
43889 if (!retval)
43890diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
43891index fd43fb6..34704ad 100644
43892--- a/drivers/tty/synclinkmp.c
43893+++ b/drivers/tty/synclinkmp.c
43894@@ -751,7 +751,7 @@ static int open(struct tty_struct *tty, struct file *filp)
43895
43896 if (debug_level >= DEBUG_LEVEL_INFO)
43897 printk("%s(%d):%s open(), old ref count = %d\n",
43898- __FILE__,__LINE__,tty->driver->name, info->port.count);
43899+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
43900
43901 /* If port is closing, signal caller to try again */
43902 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
43903@@ -770,10 +770,10 @@ static int open(struct tty_struct *tty, struct file *filp)
43904 spin_unlock_irqrestore(&info->netlock, flags);
43905 goto cleanup;
43906 }
43907- info->port.count++;
43908+ atomic_inc(&info->port.count);
43909 spin_unlock_irqrestore(&info->netlock, flags);
43910
43911- if (info->port.count == 1) {
43912+ if (atomic_read(&info->port.count) == 1) {
43913 /* 1st open on this device, init hardware */
43914 retval = startup(info);
43915 if (retval < 0)
43916@@ -797,8 +797,8 @@ cleanup:
43917 if (retval) {
43918 if (tty->count == 1)
43919 info->port.tty = NULL; /* tty layer will release tty struct */
43920- if(info->port.count)
43921- info->port.count--;
43922+ if(atomic_read(&info->port.count))
43923+ atomic_dec(&info->port.count);
43924 }
43925
43926 return retval;
43927@@ -816,7 +816,7 @@ static void close(struct tty_struct *tty, struct file *filp)
43928
43929 if (debug_level >= DEBUG_LEVEL_INFO)
43930 printk("%s(%d):%s close() entry, count=%d\n",
43931- __FILE__,__LINE__, info->device_name, info->port.count);
43932+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
43933
43934 if (tty_port_close_start(&info->port, tty, filp) == 0)
43935 goto cleanup;
43936@@ -835,7 +835,7 @@ static void close(struct tty_struct *tty, struct file *filp)
43937 cleanup:
43938 if (debug_level >= DEBUG_LEVEL_INFO)
43939 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
43940- tty->driver->name, info->port.count);
43941+ tty->driver->name, atomic_read(&info->port.count));
43942 }
43943
43944 /* Called by tty_hangup() when a hangup is signaled.
43945@@ -858,7 +858,7 @@ static void hangup(struct tty_struct *tty)
43946 shutdown(info);
43947
43948 spin_lock_irqsave(&info->port.lock, flags);
43949- info->port.count = 0;
43950+ atomic_set(&info->port.count, 0);
43951 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
43952 info->port.tty = NULL;
43953 spin_unlock_irqrestore(&info->port.lock, flags);
43954@@ -1566,7 +1566,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
43955 unsigned short new_crctype;
43956
43957 /* return error if TTY interface open */
43958- if (info->port.count)
43959+ if (atomic_read(&info->port.count))
43960 return -EBUSY;
43961
43962 switch (encoding)
43963@@ -1661,7 +1661,7 @@ static int hdlcdev_open(struct net_device *dev)
43964
43965 /* arbitrate between network and tty opens */
43966 spin_lock_irqsave(&info->netlock, flags);
43967- if (info->port.count != 0 || info->netcount != 0) {
43968+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
43969 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
43970 spin_unlock_irqrestore(&info->netlock, flags);
43971 return -EBUSY;
43972@@ -1747,7 +1747,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
43973 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
43974
43975 /* return error if TTY interface open */
43976- if (info->port.count)
43977+ if (atomic_read(&info->port.count))
43978 return -EBUSY;
43979
43980 if (cmd != SIOCWANDEV)
43981@@ -2632,7 +2632,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
43982 * do not request bottom half processing if the
43983 * device is not open in a normal mode.
43984 */
43985- if ( port && (port->port.count || port->netcount) &&
43986+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
43987 port->pending_bh && !port->bh_running &&
43988 !port->bh_requested ) {
43989 if ( debug_level >= DEBUG_LEVEL_ISR )
43990@@ -3330,12 +3330,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
43991
43992 if (debug_level >= DEBUG_LEVEL_INFO)
43993 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
43994- __FILE__,__LINE__, tty->driver->name, port->count );
43995+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
43996
43997 spin_lock_irqsave(&info->lock, flags);
43998 if (!tty_hung_up_p(filp)) {
43999 extra_count = true;
44000- port->count--;
44001+ atomic_dec(&port->count);
44002 }
44003 spin_unlock_irqrestore(&info->lock, flags);
44004 port->blocked_open++;
44005@@ -3364,7 +3364,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
44006
44007 if (debug_level >= DEBUG_LEVEL_INFO)
44008 printk("%s(%d):%s block_til_ready() count=%d\n",
44009- __FILE__,__LINE__, tty->driver->name, port->count );
44010+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
44011
44012 tty_unlock(tty);
44013 schedule();
44014@@ -3375,12 +3375,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
44015 remove_wait_queue(&port->open_wait, &wait);
44016
44017 if (extra_count)
44018- port->count++;
44019+ atomic_inc(&port->count);
44020 port->blocked_open--;
44021
44022 if (debug_level >= DEBUG_LEVEL_INFO)
44023 printk("%s(%d):%s block_til_ready() after, count=%d\n",
44024- __FILE__,__LINE__, tty->driver->name, port->count );
44025+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
44026
44027 if (!retval)
44028 port->flags |= ASYNC_NORMAL_ACTIVE;
44029diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
44030index b3c4a25..723916f 100644
44031--- a/drivers/tty/sysrq.c
44032+++ b/drivers/tty/sysrq.c
44033@@ -867,7 +867,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
44034 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
44035 size_t count, loff_t *ppos)
44036 {
44037- if (count) {
44038+ if (count && capable(CAP_SYS_ADMIN)) {
44039 char c;
44040
44041 if (get_user(c, buf))
44042diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
44043index da9fde8..c07975f 100644
44044--- a/drivers/tty/tty_io.c
44045+++ b/drivers/tty/tty_io.c
44046@@ -3391,7 +3391,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
44047
44048 void tty_default_fops(struct file_operations *fops)
44049 {
44050- *fops = tty_fops;
44051+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
44052 }
44053
44054 /*
44055diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
44056index c578229..45aa9ee 100644
44057--- a/drivers/tty/tty_ldisc.c
44058+++ b/drivers/tty/tty_ldisc.c
44059@@ -56,7 +56,7 @@ static void put_ldisc(struct tty_ldisc *ld)
44060 if (atomic_dec_and_test(&ld->users)) {
44061 struct tty_ldisc_ops *ldo = ld->ops;
44062
44063- ldo->refcount--;
44064+ atomic_dec(&ldo->refcount);
44065 module_put(ldo->owner);
44066 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
44067
44068@@ -91,7 +91,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
44069 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
44070 tty_ldiscs[disc] = new_ldisc;
44071 new_ldisc->num = disc;
44072- new_ldisc->refcount = 0;
44073+ atomic_set(&new_ldisc->refcount, 0);
44074 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
44075
44076 return ret;
44077@@ -119,7 +119,7 @@ int tty_unregister_ldisc(int disc)
44078 return -EINVAL;
44079
44080 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
44081- if (tty_ldiscs[disc]->refcount)
44082+ if (atomic_read(&tty_ldiscs[disc]->refcount))
44083 ret = -EBUSY;
44084 else
44085 tty_ldiscs[disc] = NULL;
44086@@ -140,7 +140,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
44087 if (ldops) {
44088 ret = ERR_PTR(-EAGAIN);
44089 if (try_module_get(ldops->owner)) {
44090- ldops->refcount++;
44091+ atomic_inc(&ldops->refcount);
44092 ret = ldops;
44093 }
44094 }
44095@@ -153,7 +153,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
44096 unsigned long flags;
44097
44098 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
44099- ldops->refcount--;
44100+ atomic_dec(&ldops->refcount);
44101 module_put(ldops->owner);
44102 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
44103 }
44104diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
44105index b7ff59d..7c6105e 100644
44106--- a/drivers/tty/tty_port.c
44107+++ b/drivers/tty/tty_port.c
44108@@ -218,7 +218,7 @@ void tty_port_hangup(struct tty_port *port)
44109 unsigned long flags;
44110
44111 spin_lock_irqsave(&port->lock, flags);
44112- port->count = 0;
44113+ atomic_set(&port->count, 0);
44114 port->flags &= ~ASYNC_NORMAL_ACTIVE;
44115 if (port->tty) {
44116 set_bit(TTY_IO_ERROR, &port->tty->flags);
44117@@ -344,7 +344,7 @@ int tty_port_block_til_ready(struct tty_port *port,
44118 /* The port lock protects the port counts */
44119 spin_lock_irqsave(&port->lock, flags);
44120 if (!tty_hung_up_p(filp))
44121- port->count--;
44122+ atomic_dec(&port->count);
44123 port->blocked_open++;
44124 spin_unlock_irqrestore(&port->lock, flags);
44125
44126@@ -386,7 +386,7 @@ int tty_port_block_til_ready(struct tty_port *port,
44127 we must not mess that up further */
44128 spin_lock_irqsave(&port->lock, flags);
44129 if (!tty_hung_up_p(filp))
44130- port->count++;
44131+ atomic_inc(&port->count);
44132 port->blocked_open--;
44133 if (retval == 0)
44134 port->flags |= ASYNC_NORMAL_ACTIVE;
44135@@ -406,19 +406,19 @@ int tty_port_close_start(struct tty_port *port,
44136 return 0;
44137 }
44138
44139- if (tty->count == 1 && port->count != 1) {
44140+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
44141 printk(KERN_WARNING
44142 "tty_port_close_start: tty->count = 1 port count = %d.\n",
44143- port->count);
44144- port->count = 1;
44145+ atomic_read(&port->count));
44146+ atomic_set(&port->count, 1);
44147 }
44148- if (--port->count < 0) {
44149+ if (atomic_dec_return(&port->count) < 0) {
44150 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
44151- port->count);
44152- port->count = 0;
44153+ atomic_read(&port->count));
44154+ atomic_set(&port->count, 0);
44155 }
44156
44157- if (port->count) {
44158+ if (atomic_read(&port->count)) {
44159 spin_unlock_irqrestore(&port->lock, flags);
44160 if (port->ops->drop)
44161 port->ops->drop(port);
44162@@ -516,7 +516,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
44163 {
44164 spin_lock_irq(&port->lock);
44165 if (!tty_hung_up_p(filp))
44166- ++port->count;
44167+ atomic_inc(&port->count);
44168 spin_unlock_irq(&port->lock);
44169 tty_port_tty_set(port, tty);
44170
44171diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
44172index 681765b..d3ccdf2 100644
44173--- a/drivers/tty/vt/keyboard.c
44174+++ b/drivers/tty/vt/keyboard.c
44175@@ -660,6 +660,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
44176 kbd->kbdmode == VC_OFF) &&
44177 value != KVAL(K_SAK))
44178 return; /* SAK is allowed even in raw mode */
44179+
44180+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
44181+ {
44182+ void *func = fn_handler[value];
44183+ if (func == fn_show_state || func == fn_show_ptregs ||
44184+ func == fn_show_mem)
44185+ return;
44186+ }
44187+#endif
44188+
44189 fn_handler[value](vc);
44190 }
44191
44192@@ -1808,9 +1818,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
44193 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
44194 return -EFAULT;
44195
44196- if (!capable(CAP_SYS_TTY_CONFIG))
44197- perm = 0;
44198-
44199 switch (cmd) {
44200 case KDGKBENT:
44201 /* Ensure another thread doesn't free it under us */
44202@@ -1825,6 +1832,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
44203 spin_unlock_irqrestore(&kbd_event_lock, flags);
44204 return put_user(val, &user_kbe->kb_value);
44205 case KDSKBENT:
44206+ if (!capable(CAP_SYS_TTY_CONFIG))
44207+ perm = 0;
44208+
44209 if (!perm)
44210 return -EPERM;
44211 if (!i && v == K_NOSUCHMAP) {
44212@@ -1915,9 +1925,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
44213 int i, j, k;
44214 int ret;
44215
44216- if (!capable(CAP_SYS_TTY_CONFIG))
44217- perm = 0;
44218-
44219 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
44220 if (!kbs) {
44221 ret = -ENOMEM;
44222@@ -1951,6 +1958,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
44223 kfree(kbs);
44224 return ((p && *p) ? -EOVERFLOW : 0);
44225 case KDSKBSENT:
44226+ if (!capable(CAP_SYS_TTY_CONFIG))
44227+ perm = 0;
44228+
44229 if (!perm) {
44230 ret = -EPERM;
44231 goto reterr;
44232diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
44233index 5110f36..8dc0a74 100644
44234--- a/drivers/uio/uio.c
44235+++ b/drivers/uio/uio.c
44236@@ -25,6 +25,7 @@
44237 #include <linux/kobject.h>
44238 #include <linux/cdev.h>
44239 #include <linux/uio_driver.h>
44240+#include <asm/local.h>
44241
44242 #define UIO_MAX_DEVICES (1U << MINORBITS)
44243
44244@@ -32,10 +33,10 @@ struct uio_device {
44245 struct module *owner;
44246 struct device *dev;
44247 int minor;
44248- atomic_t event;
44249+ atomic_unchecked_t event;
44250 struct fasync_struct *async_queue;
44251 wait_queue_head_t wait;
44252- int vma_count;
44253+ local_t vma_count;
44254 struct uio_info *info;
44255 struct kobject *map_dir;
44256 struct kobject *portio_dir;
44257@@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
44258 struct device_attribute *attr, char *buf)
44259 {
44260 struct uio_device *idev = dev_get_drvdata(dev);
44261- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
44262+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
44263 }
44264
44265 static struct device_attribute uio_class_attributes[] = {
44266@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
44267 {
44268 struct uio_device *idev = info->uio_dev;
44269
44270- atomic_inc(&idev->event);
44271+ atomic_inc_unchecked(&idev->event);
44272 wake_up_interruptible(&idev->wait);
44273 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
44274 }
44275@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
44276 }
44277
44278 listener->dev = idev;
44279- listener->event_count = atomic_read(&idev->event);
44280+ listener->event_count = atomic_read_unchecked(&idev->event);
44281 filep->private_data = listener;
44282
44283 if (idev->info->open) {
44284@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
44285 return -EIO;
44286
44287 poll_wait(filep, &idev->wait, wait);
44288- if (listener->event_count != atomic_read(&idev->event))
44289+ if (listener->event_count != atomic_read_unchecked(&idev->event))
44290 return POLLIN | POLLRDNORM;
44291 return 0;
44292 }
44293@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
44294 do {
44295 set_current_state(TASK_INTERRUPTIBLE);
44296
44297- event_count = atomic_read(&idev->event);
44298+ event_count = atomic_read_unchecked(&idev->event);
44299 if (event_count != listener->event_count) {
44300 if (copy_to_user(buf, &event_count, count))
44301 retval = -EFAULT;
44302@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
44303 static void uio_vma_open(struct vm_area_struct *vma)
44304 {
44305 struct uio_device *idev = vma->vm_private_data;
44306- idev->vma_count++;
44307+ local_inc(&idev->vma_count);
44308 }
44309
44310 static void uio_vma_close(struct vm_area_struct *vma)
44311 {
44312 struct uio_device *idev = vma->vm_private_data;
44313- idev->vma_count--;
44314+ local_dec(&idev->vma_count);
44315 }
44316
44317 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
44318@@ -819,7 +820,7 @@ int __uio_register_device(struct module *owner,
44319 idev->owner = owner;
44320 idev->info = info;
44321 init_waitqueue_head(&idev->wait);
44322- atomic_set(&idev->event, 0);
44323+ atomic_set_unchecked(&idev->event, 0);
44324
44325 ret = uio_get_minor(idev);
44326 if (ret)
44327diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
44328index b7eb86a..36d28af 100644
44329--- a/drivers/usb/atm/cxacru.c
44330+++ b/drivers/usb/atm/cxacru.c
44331@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
44332 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
44333 if (ret < 2)
44334 return -EINVAL;
44335- if (index < 0 || index > 0x7f)
44336+ if (index > 0x7f)
44337 return -EINVAL;
44338 pos += tmp;
44339
44340diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
44341index 35f10bf..6a38a0b 100644
44342--- a/drivers/usb/atm/usbatm.c
44343+++ b/drivers/usb/atm/usbatm.c
44344@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
44345 if (printk_ratelimit())
44346 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
44347 __func__, vpi, vci);
44348- atomic_inc(&vcc->stats->rx_err);
44349+ atomic_inc_unchecked(&vcc->stats->rx_err);
44350 return;
44351 }
44352
44353@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
44354 if (length > ATM_MAX_AAL5_PDU) {
44355 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
44356 __func__, length, vcc);
44357- atomic_inc(&vcc->stats->rx_err);
44358+ atomic_inc_unchecked(&vcc->stats->rx_err);
44359 goto out;
44360 }
44361
44362@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
44363 if (sarb->len < pdu_length) {
44364 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
44365 __func__, pdu_length, sarb->len, vcc);
44366- atomic_inc(&vcc->stats->rx_err);
44367+ atomic_inc_unchecked(&vcc->stats->rx_err);
44368 goto out;
44369 }
44370
44371 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
44372 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
44373 __func__, vcc);
44374- atomic_inc(&vcc->stats->rx_err);
44375+ atomic_inc_unchecked(&vcc->stats->rx_err);
44376 goto out;
44377 }
44378
44379@@ -389,7 +389,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
44380 if (printk_ratelimit())
44381 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
44382 __func__, length);
44383- atomic_inc(&vcc->stats->rx_drop);
44384+ atomic_inc_unchecked(&vcc->stats->rx_drop);
44385 goto out;
44386 }
44387
44388@@ -417,7 +417,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
44389
44390 vcc->push(vcc, skb);
44391
44392- atomic_inc(&vcc->stats->rx);
44393+ atomic_inc_unchecked(&vcc->stats->rx);
44394 out:
44395 skb_trim(sarb, 0);
44396 }
44397@@ -623,7 +623,7 @@ static void usbatm_tx_process(unsigned long data)
44398 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
44399
44400 usbatm_pop(vcc, skb);
44401- atomic_inc(&vcc->stats->tx);
44402+ atomic_inc_unchecked(&vcc->stats->tx);
44403
44404 skb = skb_dequeue(&instance->sndqueue);
44405 }
44406@@ -779,11 +779,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
44407 if (!left--)
44408 return sprintf(page,
44409 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
44410- atomic_read(&atm_dev->stats.aal5.tx),
44411- atomic_read(&atm_dev->stats.aal5.tx_err),
44412- atomic_read(&atm_dev->stats.aal5.rx),
44413- atomic_read(&atm_dev->stats.aal5.rx_err),
44414- atomic_read(&atm_dev->stats.aal5.rx_drop));
44415+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
44416+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
44417+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
44418+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
44419+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
44420
44421 if (!left--) {
44422 if (instance->disconnected)
44423diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
44424index cbacea9..246cccd 100644
44425--- a/drivers/usb/core/devices.c
44426+++ b/drivers/usb/core/devices.c
44427@@ -126,7 +126,7 @@ static const char format_endpt[] =
44428 * time it gets called.
44429 */
44430 static struct device_connect_event {
44431- atomic_t count;
44432+ atomic_unchecked_t count;
44433 wait_queue_head_t wait;
44434 } device_event = {
44435 .count = ATOMIC_INIT(1),
44436@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
44437
44438 void usbfs_conn_disc_event(void)
44439 {
44440- atomic_add(2, &device_event.count);
44441+ atomic_add_unchecked(2, &device_event.count);
44442 wake_up(&device_event.wait);
44443 }
44444
44445@@ -645,7 +645,7 @@ static unsigned int usb_device_poll(struct file *file,
44446
44447 poll_wait(file, &device_event.wait, wait);
44448
44449- event_count = atomic_read(&device_event.count);
44450+ event_count = atomic_read_unchecked(&device_event.count);
44451 if (file->f_version != event_count) {
44452 file->f_version = event_count;
44453 return POLLIN | POLLRDNORM;
44454diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
44455index 8e64adf..9a33a3c 100644
44456--- a/drivers/usb/core/hcd.c
44457+++ b/drivers/usb/core/hcd.c
44458@@ -1522,7 +1522,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
44459 */
44460 usb_get_urb(urb);
44461 atomic_inc(&urb->use_count);
44462- atomic_inc(&urb->dev->urbnum);
44463+ atomic_inc_unchecked(&urb->dev->urbnum);
44464 usbmon_urb_submit(&hcd->self, urb);
44465
44466 /* NOTE requirements on root-hub callers (usbfs and the hub
44467@@ -1549,7 +1549,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
44468 urb->hcpriv = NULL;
44469 INIT_LIST_HEAD(&urb->urb_list);
44470 atomic_dec(&urb->use_count);
44471- atomic_dec(&urb->dev->urbnum);
44472+ atomic_dec_unchecked(&urb->dev->urbnum);
44473 if (atomic_read(&urb->reject))
44474 wake_up(&usb_kill_urb_queue);
44475 usb_put_urb(urb);
44476diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
44477index 131f736..99004c3 100644
44478--- a/drivers/usb/core/message.c
44479+++ b/drivers/usb/core/message.c
44480@@ -129,7 +129,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
44481 * method can wait for it to complete. Since you don't have a handle on the
44482 * URB used, you can't cancel the request.
44483 */
44484-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
44485+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
44486 __u8 requesttype, __u16 value, __u16 index, void *data,
44487 __u16 size, int timeout)
44488 {
44489diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
44490index 818e4a0..0fc9589 100644
44491--- a/drivers/usb/core/sysfs.c
44492+++ b/drivers/usb/core/sysfs.c
44493@@ -226,7 +226,7 @@ show_urbnum(struct device *dev, struct device_attribute *attr, char *buf)
44494 struct usb_device *udev;
44495
44496 udev = to_usb_device(dev);
44497- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
44498+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
44499 }
44500 static DEVICE_ATTR(urbnum, S_IRUGO, show_urbnum, NULL);
44501
44502diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
44503index f81b925..78d22ec 100644
44504--- a/drivers/usb/core/usb.c
44505+++ b/drivers/usb/core/usb.c
44506@@ -388,7 +388,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
44507 set_dev_node(&dev->dev, dev_to_node(bus->controller));
44508 dev->state = USB_STATE_ATTACHED;
44509 dev->lpm_disable_count = 1;
44510- atomic_set(&dev->urbnum, 0);
44511+ atomic_set_unchecked(&dev->urbnum, 0);
44512
44513 INIT_LIST_HEAD(&dev->ep0.urb_list);
44514 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
44515diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
44516index 5e29dde..eca992f 100644
44517--- a/drivers/usb/early/ehci-dbgp.c
44518+++ b/drivers/usb/early/ehci-dbgp.c
44519@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
44520
44521 #ifdef CONFIG_KGDB
44522 static struct kgdb_io kgdbdbgp_io_ops;
44523-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
44524+static struct kgdb_io kgdbdbgp_io_ops_console;
44525+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
44526 #else
44527 #define dbgp_kgdb_mode (0)
44528 #endif
44529@@ -1047,6 +1048,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
44530 .write_char = kgdbdbgp_write_char,
44531 };
44532
44533+static struct kgdb_io kgdbdbgp_io_ops_console = {
44534+ .name = "kgdbdbgp",
44535+ .read_char = kgdbdbgp_read_char,
44536+ .write_char = kgdbdbgp_write_char,
44537+ .is_console = 1
44538+};
44539+
44540 static int kgdbdbgp_wait_time;
44541
44542 static int __init kgdbdbgp_parse_config(char *str)
44543@@ -1062,8 +1070,10 @@ static int __init kgdbdbgp_parse_config(char *str)
44544 ptr++;
44545 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
44546 }
44547- kgdb_register_io_module(&kgdbdbgp_io_ops);
44548- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
44549+ if (early_dbgp_console.index != -1)
44550+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
44551+ else
44552+ kgdb_register_io_module(&kgdbdbgp_io_ops);
44553
44554 return 0;
44555 }
44556diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
44557index 598dcc1..032dd4f 100644
44558--- a/drivers/usb/gadget/u_serial.c
44559+++ b/drivers/usb/gadget/u_serial.c
44560@@ -735,9 +735,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
44561 spin_lock_irq(&port->port_lock);
44562
44563 /* already open? Great. */
44564- if (port->port.count) {
44565+ if (atomic_read(&port->port.count)) {
44566 status = 0;
44567- port->port.count++;
44568+ atomic_inc(&port->port.count);
44569
44570 /* currently opening/closing? wait ... */
44571 } else if (port->openclose) {
44572@@ -796,7 +796,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
44573 tty->driver_data = port;
44574 port->port.tty = tty;
44575
44576- port->port.count = 1;
44577+ atomic_set(&port->port.count, 1);
44578 port->openclose = false;
44579
44580 /* if connected, start the I/O stream */
44581@@ -838,11 +838,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
44582
44583 spin_lock_irq(&port->port_lock);
44584
44585- if (port->port.count != 1) {
44586- if (port->port.count == 0)
44587+ if (atomic_read(&port->port.count) != 1) {
44588+ if (atomic_read(&port->port.count) == 0)
44589 WARN_ON(1);
44590 else
44591- --port->port.count;
44592+ atomic_dec(&port->port.count);
44593 goto exit;
44594 }
44595
44596@@ -852,7 +852,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
44597 * and sleep if necessary
44598 */
44599 port->openclose = true;
44600- port->port.count = 0;
44601+ atomic_set(&port->port.count, 0);
44602
44603 gser = port->port_usb;
44604 if (gser && gser->disconnect)
44605@@ -1159,7 +1159,7 @@ static int gs_closed(struct gs_port *port)
44606 int cond;
44607
44608 spin_lock_irq(&port->port_lock);
44609- cond = (port->port.count == 0) && !port->openclose;
44610+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
44611 spin_unlock_irq(&port->port_lock);
44612 return cond;
44613 }
44614@@ -1273,7 +1273,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
44615 /* if it's already open, start I/O ... and notify the serial
44616 * protocol about open/close status (connect/disconnect).
44617 */
44618- if (port->port.count) {
44619+ if (atomic_read(&port->port.count)) {
44620 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
44621 gs_start_io(port);
44622 if (gser->connect)
44623@@ -1320,7 +1320,7 @@ void gserial_disconnect(struct gserial *gser)
44624
44625 port->port_usb = NULL;
44626 gser->ioport = NULL;
44627- if (port->port.count > 0 || port->openclose) {
44628+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
44629 wake_up_interruptible(&port->drain_wait);
44630 if (port->port.tty)
44631 tty_hangup(port->port.tty);
44632@@ -1336,7 +1336,7 @@ void gserial_disconnect(struct gserial *gser)
44633
44634 /* finally, free any unused/unusable I/O buffers */
44635 spin_lock_irqsave(&port->port_lock, flags);
44636- if (port->port.count == 0 && !port->openclose)
44637+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
44638 gs_buf_free(&port->port_write_buf);
44639 gs_free_requests(gser->out, &port->read_pool, NULL);
44640 gs_free_requests(gser->out, &port->read_queue, NULL);
44641diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
44642index 5f3bcd3..bfca43f 100644
44643--- a/drivers/usb/serial/console.c
44644+++ b/drivers/usb/serial/console.c
44645@@ -124,7 +124,7 @@ static int usb_console_setup(struct console *co, char *options)
44646
44647 info->port = port;
44648
44649- ++port->port.count;
44650+ atomic_inc(&port->port.count);
44651 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
44652 if (serial->type->set_termios) {
44653 /*
44654@@ -174,7 +174,7 @@ static int usb_console_setup(struct console *co, char *options)
44655 }
44656 /* Now that any required fake tty operations are completed restore
44657 * the tty port count */
44658- --port->port.count;
44659+ atomic_dec(&port->port.count);
44660 /* The console is special in terms of closing the device so
44661 * indicate this port is now acting as a system console. */
44662 port->port.console = 1;
44663@@ -187,7 +187,7 @@ static int usb_console_setup(struct console *co, char *options)
44664 free_tty:
44665 kfree(tty);
44666 reset_open_count:
44667- port->port.count = 0;
44668+ atomic_set(&port->port.count, 0);
44669 usb_autopm_put_interface(serial->interface);
44670 error_get_interface:
44671 usb_serial_put(serial);
44672diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
44673index 6c3586a..a94e621 100644
44674--- a/drivers/usb/storage/realtek_cr.c
44675+++ b/drivers/usb/storage/realtek_cr.c
44676@@ -429,7 +429,7 @@ static int rts51x_read_status(struct us_data *us,
44677
44678 buf = kmalloc(len, GFP_NOIO);
44679 if (buf == NULL)
44680- return USB_STOR_TRANSPORT_ERROR;
44681+ return -ENOMEM;
44682
44683 US_DEBUGP("%s, lun = %d\n", __func__, lun);
44684
44685diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
44686index 75f70f0..d467e1a 100644
44687--- a/drivers/usb/storage/usb.h
44688+++ b/drivers/usb/storage/usb.h
44689@@ -63,7 +63,7 @@ struct us_unusual_dev {
44690 __u8 useProtocol;
44691 __u8 useTransport;
44692 int (*initFunction)(struct us_data *);
44693-};
44694+} __do_const;
44695
44696
44697 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
44698diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
44699index d6bea3e..60b250e 100644
44700--- a/drivers/usb/wusbcore/wa-hc.h
44701+++ b/drivers/usb/wusbcore/wa-hc.h
44702@@ -192,7 +192,7 @@ struct wahc {
44703 struct list_head xfer_delayed_list;
44704 spinlock_t xfer_list_lock;
44705 struct work_struct xfer_work;
44706- atomic_t xfer_id_count;
44707+ atomic_unchecked_t xfer_id_count;
44708 };
44709
44710
44711@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
44712 INIT_LIST_HEAD(&wa->xfer_delayed_list);
44713 spin_lock_init(&wa->xfer_list_lock);
44714 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
44715- atomic_set(&wa->xfer_id_count, 1);
44716+ atomic_set_unchecked(&wa->xfer_id_count, 1);
44717 }
44718
44719 /**
44720diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
44721index 57c01ab..8a05959 100644
44722--- a/drivers/usb/wusbcore/wa-xfer.c
44723+++ b/drivers/usb/wusbcore/wa-xfer.c
44724@@ -296,7 +296,7 @@ out:
44725 */
44726 static void wa_xfer_id_init(struct wa_xfer *xfer)
44727 {
44728- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
44729+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
44730 }
44731
44732 /*
44733diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
44734index 8c55011..eed4ae1a 100644
44735--- a/drivers/video/aty/aty128fb.c
44736+++ b/drivers/video/aty/aty128fb.c
44737@@ -149,7 +149,7 @@ enum {
44738 };
44739
44740 /* Must match above enum */
44741-static char * const r128_family[] = {
44742+static const char * const r128_family[] = {
44743 "AGP",
44744 "PCI",
44745 "PRO AGP",
44746diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
44747index 4f27fdc..d3537e6 100644
44748--- a/drivers/video/aty/atyfb_base.c
44749+++ b/drivers/video/aty/atyfb_base.c
44750@@ -1325,10 +1325,14 @@ static int atyfb_set_par(struct fb_info *info)
44751 par->accel_flags = var->accel_flags; /* hack */
44752
44753 if (var->accel_flags) {
44754- info->fbops->fb_sync = atyfb_sync;
44755+ pax_open_kernel();
44756+ *(void **)&info->fbops->fb_sync = atyfb_sync;
44757+ pax_close_kernel();
44758 info->flags &= ~FBINFO_HWACCEL_DISABLED;
44759 } else {
44760- info->fbops->fb_sync = NULL;
44761+ pax_open_kernel();
44762+ *(void **)&info->fbops->fb_sync = NULL;
44763+ pax_close_kernel();
44764 info->flags |= FBINFO_HWACCEL_DISABLED;
44765 }
44766
44767diff --git a/drivers/video/aty/mach64_cursor.c b/drivers/video/aty/mach64_cursor.c
44768index 95ec042..e6affdd 100644
44769--- a/drivers/video/aty/mach64_cursor.c
44770+++ b/drivers/video/aty/mach64_cursor.c
44771@@ -7,6 +7,7 @@
44772 #include <linux/string.h>
44773
44774 #include <asm/io.h>
44775+#include <asm/pgtable.h>
44776
44777 #ifdef __sparc__
44778 #include <asm/fbio.h>
44779@@ -208,7 +209,9 @@ int aty_init_cursor(struct fb_info *info)
44780 info->sprite.buf_align = 16; /* and 64 lines tall. */
44781 info->sprite.flags = FB_PIXMAP_IO;
44782
44783- info->fbops->fb_cursor = atyfb_cursor;
44784+ pax_open_kernel();
44785+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
44786+ pax_close_kernel();
44787
44788 return 0;
44789 }
44790diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
44791index 6c5ed6b..b727c88 100644
44792--- a/drivers/video/backlight/kb3886_bl.c
44793+++ b/drivers/video/backlight/kb3886_bl.c
44794@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
44795 static unsigned long kb3886bl_flags;
44796 #define KB3886BL_SUSPENDED 0x01
44797
44798-static struct dmi_system_id __initdata kb3886bl_device_table[] = {
44799+static const struct dmi_system_id __initconst kb3886bl_device_table[] = {
44800 {
44801 .ident = "Sahara Touch-iT",
44802 .matches = {
44803diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
44804index 88cad6b..dd746c7 100644
44805--- a/drivers/video/fb_defio.c
44806+++ b/drivers/video/fb_defio.c
44807@@ -206,7 +206,9 @@ void fb_deferred_io_init(struct fb_info *info)
44808
44809 BUG_ON(!fbdefio);
44810 mutex_init(&fbdefio->lock);
44811- info->fbops->fb_mmap = fb_deferred_io_mmap;
44812+ pax_open_kernel();
44813+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
44814+ pax_close_kernel();
44815 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
44816 INIT_LIST_HEAD(&fbdefio->pagelist);
44817 if (fbdefio->delay == 0) /* set a default of 1 s */
44818@@ -237,7 +239,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
44819 page->mapping = NULL;
44820 }
44821
44822- info->fbops->fb_mmap = NULL;
44823+ *(void **)&info->fbops->fb_mmap = NULL;
44824 mutex_destroy(&fbdefio->lock);
44825 }
44826 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
44827diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
44828index 5c3960d..15cf8fc 100644
44829--- a/drivers/video/fbcmap.c
44830+++ b/drivers/video/fbcmap.c
44831@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
44832 rc = -ENODEV;
44833 goto out;
44834 }
44835- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
44836- !info->fbops->fb_setcmap)) {
44837+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
44838 rc = -EINVAL;
44839 goto out1;
44840 }
44841diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
44842index dc61c12..e29796e 100644
44843--- a/drivers/video/fbmem.c
44844+++ b/drivers/video/fbmem.c
44845@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
44846 image->dx += image->width + 8;
44847 }
44848 } else if (rotate == FB_ROTATE_UD) {
44849- for (x = 0; x < num && image->dx >= 0; x++) {
44850+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
44851 info->fbops->fb_imageblit(info, image);
44852 image->dx -= image->width + 8;
44853 }
44854@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
44855 image->dy += image->height + 8;
44856 }
44857 } else if (rotate == FB_ROTATE_CCW) {
44858- for (x = 0; x < num && image->dy >= 0; x++) {
44859+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
44860 info->fbops->fb_imageblit(info, image);
44861 image->dy -= image->height + 8;
44862 }
44863@@ -1166,7 +1166,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
44864 return -EFAULT;
44865 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
44866 return -EINVAL;
44867- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
44868+ if (con2fb.framebuffer >= FB_MAX)
44869 return -EINVAL;
44870 if (!registered_fb[con2fb.framebuffer])
44871 request_module("fb%d", con2fb.framebuffer);
44872diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
44873index 7672d2e..b56437f 100644
44874--- a/drivers/video/i810/i810_accel.c
44875+++ b/drivers/video/i810/i810_accel.c
44876@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
44877 }
44878 }
44879 printk("ringbuffer lockup!!!\n");
44880+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
44881 i810_report_error(mmio);
44882 par->dev_flags |= LOCKUP;
44883 info->pixmap.scan_align = 1;
44884diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
44885index 3c14e43..eafa544 100644
44886--- a/drivers/video/logo/logo_linux_clut224.ppm
44887+++ b/drivers/video/logo/logo_linux_clut224.ppm
44888@@ -1,1604 +1,1123 @@
44889 P3
44890-# Standard 224-color Linux logo
44891 80 80
44892 255
44893- 0 0 0 0 0 0 0 0 0 0 0 0
44894- 0 0 0 0 0 0 0 0 0 0 0 0
44895- 0 0 0 0 0 0 0 0 0 0 0 0
44896- 0 0 0 0 0 0 0 0 0 0 0 0
44897- 0 0 0 0 0 0 0 0 0 0 0 0
44898- 0 0 0 0 0 0 0 0 0 0 0 0
44899- 0 0 0 0 0 0 0 0 0 0 0 0
44900- 0 0 0 0 0 0 0 0 0 0 0 0
44901- 0 0 0 0 0 0 0 0 0 0 0 0
44902- 6 6 6 6 6 6 10 10 10 10 10 10
44903- 10 10 10 6 6 6 6 6 6 6 6 6
44904- 0 0 0 0 0 0 0 0 0 0 0 0
44905- 0 0 0 0 0 0 0 0 0 0 0 0
44906- 0 0 0 0 0 0 0 0 0 0 0 0
44907- 0 0 0 0 0 0 0 0 0 0 0 0
44908- 0 0 0 0 0 0 0 0 0 0 0 0
44909- 0 0 0 0 0 0 0 0 0 0 0 0
44910- 0 0 0 0 0 0 0 0 0 0 0 0
44911- 0 0 0 0 0 0 0 0 0 0 0 0
44912- 0 0 0 0 0 0 0 0 0 0 0 0
44913- 0 0 0 0 0 0 0 0 0 0 0 0
44914- 0 0 0 0 0 0 0 0 0 0 0 0
44915- 0 0 0 0 0 0 0 0 0 0 0 0
44916- 0 0 0 0 0 0 0 0 0 0 0 0
44917- 0 0 0 0 0 0 0 0 0 0 0 0
44918- 0 0 0 0 0 0 0 0 0 0 0 0
44919- 0 0 0 0 0 0 0 0 0 0 0 0
44920- 0 0 0 0 0 0 0 0 0 0 0 0
44921- 0 0 0 6 6 6 10 10 10 14 14 14
44922- 22 22 22 26 26 26 30 30 30 34 34 34
44923- 30 30 30 30 30 30 26 26 26 18 18 18
44924- 14 14 14 10 10 10 6 6 6 0 0 0
44925- 0 0 0 0 0 0 0 0 0 0 0 0
44926- 0 0 0 0 0 0 0 0 0 0 0 0
44927- 0 0 0 0 0 0 0 0 0 0 0 0
44928- 0 0 0 0 0 0 0 0 0 0 0 0
44929- 0 0 0 0 0 0 0 0 0 0 0 0
44930- 0 0 0 0 0 0 0 0 0 0 0 0
44931- 0 0 0 0 0 0 0 0 0 0 0 0
44932- 0 0 0 0 0 0 0 0 0 0 0 0
44933- 0 0 0 0 0 0 0 0 0 0 0 0
44934- 0 0 0 0 0 1 0 0 1 0 0 0
44935- 0 0 0 0 0 0 0 0 0 0 0 0
44936- 0 0 0 0 0 0 0 0 0 0 0 0
44937- 0 0 0 0 0 0 0 0 0 0 0 0
44938- 0 0 0 0 0 0 0 0 0 0 0 0
44939- 0 0 0 0 0 0 0 0 0 0 0 0
44940- 0 0 0 0 0 0 0 0 0 0 0 0
44941- 6 6 6 14 14 14 26 26 26 42 42 42
44942- 54 54 54 66 66 66 78 78 78 78 78 78
44943- 78 78 78 74 74 74 66 66 66 54 54 54
44944- 42 42 42 26 26 26 18 18 18 10 10 10
44945- 6 6 6 0 0 0 0 0 0 0 0 0
44946- 0 0 0 0 0 0 0 0 0 0 0 0
44947- 0 0 0 0 0 0 0 0 0 0 0 0
44948- 0 0 0 0 0 0 0 0 0 0 0 0
44949- 0 0 0 0 0 0 0 0 0 0 0 0
44950- 0 0 0 0 0 0 0 0 0 0 0 0
44951- 0 0 0 0 0 0 0 0 0 0 0 0
44952- 0 0 0 0 0 0 0 0 0 0 0 0
44953- 0 0 0 0 0 0 0 0 0 0 0 0
44954- 0 0 1 0 0 0 0 0 0 0 0 0
44955- 0 0 0 0 0 0 0 0 0 0 0 0
44956- 0 0 0 0 0 0 0 0 0 0 0 0
44957- 0 0 0 0 0 0 0 0 0 0 0 0
44958- 0 0 0 0 0 0 0 0 0 0 0 0
44959- 0 0 0 0 0 0 0 0 0 0 0 0
44960- 0 0 0 0 0 0 0 0 0 10 10 10
44961- 22 22 22 42 42 42 66 66 66 86 86 86
44962- 66 66 66 38 38 38 38 38 38 22 22 22
44963- 26 26 26 34 34 34 54 54 54 66 66 66
44964- 86 86 86 70 70 70 46 46 46 26 26 26
44965- 14 14 14 6 6 6 0 0 0 0 0 0
44966- 0 0 0 0 0 0 0 0 0 0 0 0
44967- 0 0 0 0 0 0 0 0 0 0 0 0
44968- 0 0 0 0 0 0 0 0 0 0 0 0
44969- 0 0 0 0 0 0 0 0 0 0 0 0
44970- 0 0 0 0 0 0 0 0 0 0 0 0
44971- 0 0 0 0 0 0 0 0 0 0 0 0
44972- 0 0 0 0 0 0 0 0 0 0 0 0
44973- 0 0 0 0 0 0 0 0 0 0 0 0
44974- 0 0 1 0 0 1 0 0 1 0 0 0
44975- 0 0 0 0 0 0 0 0 0 0 0 0
44976- 0 0 0 0 0 0 0 0 0 0 0 0
44977- 0 0 0 0 0 0 0 0 0 0 0 0
44978- 0 0 0 0 0 0 0 0 0 0 0 0
44979- 0 0 0 0 0 0 0 0 0 0 0 0
44980- 0 0 0 0 0 0 10 10 10 26 26 26
44981- 50 50 50 82 82 82 58 58 58 6 6 6
44982- 2 2 6 2 2 6 2 2 6 2 2 6
44983- 2 2 6 2 2 6 2 2 6 2 2 6
44984- 6 6 6 54 54 54 86 86 86 66 66 66
44985- 38 38 38 18 18 18 6 6 6 0 0 0
44986- 0 0 0 0 0 0 0 0 0 0 0 0
44987- 0 0 0 0 0 0 0 0 0 0 0 0
44988- 0 0 0 0 0 0 0 0 0 0 0 0
44989- 0 0 0 0 0 0 0 0 0 0 0 0
44990- 0 0 0 0 0 0 0 0 0 0 0 0
44991- 0 0 0 0 0 0 0 0 0 0 0 0
44992- 0 0 0 0 0 0 0 0 0 0 0 0
44993- 0 0 0 0 0 0 0 0 0 0 0 0
44994- 0 0 0 0 0 0 0 0 0 0 0 0
44995- 0 0 0 0 0 0 0 0 0 0 0 0
44996- 0 0 0 0 0 0 0 0 0 0 0 0
44997- 0 0 0 0 0 0 0 0 0 0 0 0
44998- 0 0 0 0 0 0 0 0 0 0 0 0
44999- 0 0 0 0 0 0 0 0 0 0 0 0
45000- 0 0 0 6 6 6 22 22 22 50 50 50
45001- 78 78 78 34 34 34 2 2 6 2 2 6
45002- 2 2 6 2 2 6 2 2 6 2 2 6
45003- 2 2 6 2 2 6 2 2 6 2 2 6
45004- 2 2 6 2 2 6 6 6 6 70 70 70
45005- 78 78 78 46 46 46 22 22 22 6 6 6
45006- 0 0 0 0 0 0 0 0 0 0 0 0
45007- 0 0 0 0 0 0 0 0 0 0 0 0
45008- 0 0 0 0 0 0 0 0 0 0 0 0
45009- 0 0 0 0 0 0 0 0 0 0 0 0
45010- 0 0 0 0 0 0 0 0 0 0 0 0
45011- 0 0 0 0 0 0 0 0 0 0 0 0
45012- 0 0 0 0 0 0 0 0 0 0 0 0
45013- 0 0 0 0 0 0 0 0 0 0 0 0
45014- 0 0 1 0 0 1 0 0 1 0 0 0
45015- 0 0 0 0 0 0 0 0 0 0 0 0
45016- 0 0 0 0 0 0 0 0 0 0 0 0
45017- 0 0 0 0 0 0 0 0 0 0 0 0
45018- 0 0 0 0 0 0 0 0 0 0 0 0
45019- 0 0 0 0 0 0 0 0 0 0 0 0
45020- 6 6 6 18 18 18 42 42 42 82 82 82
45021- 26 26 26 2 2 6 2 2 6 2 2 6
45022- 2 2 6 2 2 6 2 2 6 2 2 6
45023- 2 2 6 2 2 6 2 2 6 14 14 14
45024- 46 46 46 34 34 34 6 6 6 2 2 6
45025- 42 42 42 78 78 78 42 42 42 18 18 18
45026- 6 6 6 0 0 0 0 0 0 0 0 0
45027- 0 0 0 0 0 0 0 0 0 0 0 0
45028- 0 0 0 0 0 0 0 0 0 0 0 0
45029- 0 0 0 0 0 0 0 0 0 0 0 0
45030- 0 0 0 0 0 0 0 0 0 0 0 0
45031- 0 0 0 0 0 0 0 0 0 0 0 0
45032- 0 0 0 0 0 0 0 0 0 0 0 0
45033- 0 0 0 0 0 0 0 0 0 0 0 0
45034- 0 0 1 0 0 0 0 0 1 0 0 0
45035- 0 0 0 0 0 0 0 0 0 0 0 0
45036- 0 0 0 0 0 0 0 0 0 0 0 0
45037- 0 0 0 0 0 0 0 0 0 0 0 0
45038- 0 0 0 0 0 0 0 0 0 0 0 0
45039- 0 0 0 0 0 0 0 0 0 0 0 0
45040- 10 10 10 30 30 30 66 66 66 58 58 58
45041- 2 2 6 2 2 6 2 2 6 2 2 6
45042- 2 2 6 2 2 6 2 2 6 2 2 6
45043- 2 2 6 2 2 6 2 2 6 26 26 26
45044- 86 86 86 101 101 101 46 46 46 10 10 10
45045- 2 2 6 58 58 58 70 70 70 34 34 34
45046- 10 10 10 0 0 0 0 0 0 0 0 0
45047- 0 0 0 0 0 0 0 0 0 0 0 0
45048- 0 0 0 0 0 0 0 0 0 0 0 0
45049- 0 0 0 0 0 0 0 0 0 0 0 0
45050- 0 0 0 0 0 0 0 0 0 0 0 0
45051- 0 0 0 0 0 0 0 0 0 0 0 0
45052- 0 0 0 0 0 0 0 0 0 0 0 0
45053- 0 0 0 0 0 0 0 0 0 0 0 0
45054- 0 0 1 0 0 1 0 0 1 0 0 0
45055- 0 0 0 0 0 0 0 0 0 0 0 0
45056- 0 0 0 0 0 0 0 0 0 0 0 0
45057- 0 0 0 0 0 0 0 0 0 0 0 0
45058- 0 0 0 0 0 0 0 0 0 0 0 0
45059- 0 0 0 0 0 0 0 0 0 0 0 0
45060- 14 14 14 42 42 42 86 86 86 10 10 10
45061- 2 2 6 2 2 6 2 2 6 2 2 6
45062- 2 2 6 2 2 6 2 2 6 2 2 6
45063- 2 2 6 2 2 6 2 2 6 30 30 30
45064- 94 94 94 94 94 94 58 58 58 26 26 26
45065- 2 2 6 6 6 6 78 78 78 54 54 54
45066- 22 22 22 6 6 6 0 0 0 0 0 0
45067- 0 0 0 0 0 0 0 0 0 0 0 0
45068- 0 0 0 0 0 0 0 0 0 0 0 0
45069- 0 0 0 0 0 0 0 0 0 0 0 0
45070- 0 0 0 0 0 0 0 0 0 0 0 0
45071- 0 0 0 0 0 0 0 0 0 0 0 0
45072- 0 0 0 0 0 0 0 0 0 0 0 0
45073- 0 0 0 0 0 0 0 0 0 0 0 0
45074- 0 0 0 0 0 0 0 0 0 0 0 0
45075- 0 0 0 0 0 0 0 0 0 0 0 0
45076- 0 0 0 0 0 0 0 0 0 0 0 0
45077- 0 0 0 0 0 0 0 0 0 0 0 0
45078- 0 0 0 0 0 0 0 0 0 0 0 0
45079- 0 0 0 0 0 0 0 0 0 6 6 6
45080- 22 22 22 62 62 62 62 62 62 2 2 6
45081- 2 2 6 2 2 6 2 2 6 2 2 6
45082- 2 2 6 2 2 6 2 2 6 2 2 6
45083- 2 2 6 2 2 6 2 2 6 26 26 26
45084- 54 54 54 38 38 38 18 18 18 10 10 10
45085- 2 2 6 2 2 6 34 34 34 82 82 82
45086- 38 38 38 14 14 14 0 0 0 0 0 0
45087- 0 0 0 0 0 0 0 0 0 0 0 0
45088- 0 0 0 0 0 0 0 0 0 0 0 0
45089- 0 0 0 0 0 0 0 0 0 0 0 0
45090- 0 0 0 0 0 0 0 0 0 0 0 0
45091- 0 0 0 0 0 0 0 0 0 0 0 0
45092- 0 0 0 0 0 0 0 0 0 0 0 0
45093- 0 0 0 0 0 0 0 0 0 0 0 0
45094- 0 0 0 0 0 1 0 0 1 0 0 0
45095- 0 0 0 0 0 0 0 0 0 0 0 0
45096- 0 0 0 0 0 0 0 0 0 0 0 0
45097- 0 0 0 0 0 0 0 0 0 0 0 0
45098- 0 0 0 0 0 0 0 0 0 0 0 0
45099- 0 0 0 0 0 0 0 0 0 6 6 6
45100- 30 30 30 78 78 78 30 30 30 2 2 6
45101- 2 2 6 2 2 6 2 2 6 2 2 6
45102- 2 2 6 2 2 6 2 2 6 2 2 6
45103- 2 2 6 2 2 6 2 2 6 10 10 10
45104- 10 10 10 2 2 6 2 2 6 2 2 6
45105- 2 2 6 2 2 6 2 2 6 78 78 78
45106- 50 50 50 18 18 18 6 6 6 0 0 0
45107- 0 0 0 0 0 0 0 0 0 0 0 0
45108- 0 0 0 0 0 0 0 0 0 0 0 0
45109- 0 0 0 0 0 0 0 0 0 0 0 0
45110- 0 0 0 0 0 0 0 0 0 0 0 0
45111- 0 0 0 0 0 0 0 0 0 0 0 0
45112- 0 0 0 0 0 0 0 0 0 0 0 0
45113- 0 0 0 0 0 0 0 0 0 0 0 0
45114- 0 0 1 0 0 0 0 0 0 0 0 0
45115- 0 0 0 0 0 0 0 0 0 0 0 0
45116- 0 0 0 0 0 0 0 0 0 0 0 0
45117- 0 0 0 0 0 0 0 0 0 0 0 0
45118- 0 0 0 0 0 0 0 0 0 0 0 0
45119- 0 0 0 0 0 0 0 0 0 10 10 10
45120- 38 38 38 86 86 86 14 14 14 2 2 6
45121- 2 2 6 2 2 6 2 2 6 2 2 6
45122- 2 2 6 2 2 6 2 2 6 2 2 6
45123- 2 2 6 2 2 6 2 2 6 2 2 6
45124- 2 2 6 2 2 6 2 2 6 2 2 6
45125- 2 2 6 2 2 6 2 2 6 54 54 54
45126- 66 66 66 26 26 26 6 6 6 0 0 0
45127- 0 0 0 0 0 0 0 0 0 0 0 0
45128- 0 0 0 0 0 0 0 0 0 0 0 0
45129- 0 0 0 0 0 0 0 0 0 0 0 0
45130- 0 0 0 0 0 0 0 0 0 0 0 0
45131- 0 0 0 0 0 0 0 0 0 0 0 0
45132- 0 0 0 0 0 0 0 0 0 0 0 0
45133- 0 0 0 0 0 0 0 0 0 0 0 0
45134- 0 0 0 0 0 1 0 0 1 0 0 0
45135- 0 0 0 0 0 0 0 0 0 0 0 0
45136- 0 0 0 0 0 0 0 0 0 0 0 0
45137- 0 0 0 0 0 0 0 0 0 0 0 0
45138- 0 0 0 0 0 0 0 0 0 0 0 0
45139- 0 0 0 0 0 0 0 0 0 14 14 14
45140- 42 42 42 82 82 82 2 2 6 2 2 6
45141- 2 2 6 6 6 6 10 10 10 2 2 6
45142- 2 2 6 2 2 6 2 2 6 2 2 6
45143- 2 2 6 2 2 6 2 2 6 6 6 6
45144- 14 14 14 10 10 10 2 2 6 2 2 6
45145- 2 2 6 2 2 6 2 2 6 18 18 18
45146- 82 82 82 34 34 34 10 10 10 0 0 0
45147- 0 0 0 0 0 0 0 0 0 0 0 0
45148- 0 0 0 0 0 0 0 0 0 0 0 0
45149- 0 0 0 0 0 0 0 0 0 0 0 0
45150- 0 0 0 0 0 0 0 0 0 0 0 0
45151- 0 0 0 0 0 0 0 0 0 0 0 0
45152- 0 0 0 0 0 0 0 0 0 0 0 0
45153- 0 0 0 0 0 0 0 0 0 0 0 0
45154- 0 0 1 0 0 0 0 0 0 0 0 0
45155- 0 0 0 0 0 0 0 0 0 0 0 0
45156- 0 0 0 0 0 0 0 0 0 0 0 0
45157- 0 0 0 0 0 0 0 0 0 0 0 0
45158- 0 0 0 0 0 0 0 0 0 0 0 0
45159- 0 0 0 0 0 0 0 0 0 14 14 14
45160- 46 46 46 86 86 86 2 2 6 2 2 6
45161- 6 6 6 6 6 6 22 22 22 34 34 34
45162- 6 6 6 2 2 6 2 2 6 2 2 6
45163- 2 2 6 2 2 6 18 18 18 34 34 34
45164- 10 10 10 50 50 50 22 22 22 2 2 6
45165- 2 2 6 2 2 6 2 2 6 10 10 10
45166- 86 86 86 42 42 42 14 14 14 0 0 0
45167- 0 0 0 0 0 0 0 0 0 0 0 0
45168- 0 0 0 0 0 0 0 0 0 0 0 0
45169- 0 0 0 0 0 0 0 0 0 0 0 0
45170- 0 0 0 0 0 0 0 0 0 0 0 0
45171- 0 0 0 0 0 0 0 0 0 0 0 0
45172- 0 0 0 0 0 0 0 0 0 0 0 0
45173- 0 0 0 0 0 0 0 0 0 0 0 0
45174- 0 0 1 0 0 1 0 0 1 0 0 0
45175- 0 0 0 0 0 0 0 0 0 0 0 0
45176- 0 0 0 0 0 0 0 0 0 0 0 0
45177- 0 0 0 0 0 0 0 0 0 0 0 0
45178- 0 0 0 0 0 0 0 0 0 0 0 0
45179- 0 0 0 0 0 0 0 0 0 14 14 14
45180- 46 46 46 86 86 86 2 2 6 2 2 6
45181- 38 38 38 116 116 116 94 94 94 22 22 22
45182- 22 22 22 2 2 6 2 2 6 2 2 6
45183- 14 14 14 86 86 86 138 138 138 162 162 162
45184-154 154 154 38 38 38 26 26 26 6 6 6
45185- 2 2 6 2 2 6 2 2 6 2 2 6
45186- 86 86 86 46 46 46 14 14 14 0 0 0
45187- 0 0 0 0 0 0 0 0 0 0 0 0
45188- 0 0 0 0 0 0 0 0 0 0 0 0
45189- 0 0 0 0 0 0 0 0 0 0 0 0
45190- 0 0 0 0 0 0 0 0 0 0 0 0
45191- 0 0 0 0 0 0 0 0 0 0 0 0
45192- 0 0 0 0 0 0 0 0 0 0 0 0
45193- 0 0 0 0 0 0 0 0 0 0 0 0
45194- 0 0 0 0 0 0 0 0 0 0 0 0
45195- 0 0 0 0 0 0 0 0 0 0 0 0
45196- 0 0 0 0 0 0 0 0 0 0 0 0
45197- 0 0 0 0 0 0 0 0 0 0 0 0
45198- 0 0 0 0 0 0 0 0 0 0 0 0
45199- 0 0 0 0 0 0 0 0 0 14 14 14
45200- 46 46 46 86 86 86 2 2 6 14 14 14
45201-134 134 134 198 198 198 195 195 195 116 116 116
45202- 10 10 10 2 2 6 2 2 6 6 6 6
45203-101 98 89 187 187 187 210 210 210 218 218 218
45204-214 214 214 134 134 134 14 14 14 6 6 6
45205- 2 2 6 2 2 6 2 2 6 2 2 6
45206- 86 86 86 50 50 50 18 18 18 6 6 6
45207- 0 0 0 0 0 0 0 0 0 0 0 0
45208- 0 0 0 0 0 0 0 0 0 0 0 0
45209- 0 0 0 0 0 0 0 0 0 0 0 0
45210- 0 0 0 0 0 0 0 0 0 0 0 0
45211- 0 0 0 0 0 0 0 0 0 0 0 0
45212- 0 0 0 0 0 0 0 0 0 0 0 0
45213- 0 0 0 0 0 0 0 0 1 0 0 0
45214- 0 0 1 0 0 1 0 0 1 0 0 0
45215- 0 0 0 0 0 0 0 0 0 0 0 0
45216- 0 0 0 0 0 0 0 0 0 0 0 0
45217- 0 0 0 0 0 0 0 0 0 0 0 0
45218- 0 0 0 0 0 0 0 0 0 0 0 0
45219- 0 0 0 0 0 0 0 0 0 14 14 14
45220- 46 46 46 86 86 86 2 2 6 54 54 54
45221-218 218 218 195 195 195 226 226 226 246 246 246
45222- 58 58 58 2 2 6 2 2 6 30 30 30
45223-210 210 210 253 253 253 174 174 174 123 123 123
45224-221 221 221 234 234 234 74 74 74 2 2 6
45225- 2 2 6 2 2 6 2 2 6 2 2 6
45226- 70 70 70 58 58 58 22 22 22 6 6 6
45227- 0 0 0 0 0 0 0 0 0 0 0 0
45228- 0 0 0 0 0 0 0 0 0 0 0 0
45229- 0 0 0 0 0 0 0 0 0 0 0 0
45230- 0 0 0 0 0 0 0 0 0 0 0 0
45231- 0 0 0 0 0 0 0 0 0 0 0 0
45232- 0 0 0 0 0 0 0 0 0 0 0 0
45233- 0 0 0 0 0 0 0 0 0 0 0 0
45234- 0 0 0 0 0 0 0 0 0 0 0 0
45235- 0 0 0 0 0 0 0 0 0 0 0 0
45236- 0 0 0 0 0 0 0 0 0 0 0 0
45237- 0 0 0 0 0 0 0 0 0 0 0 0
45238- 0 0 0 0 0 0 0 0 0 0 0 0
45239- 0 0 0 0 0 0 0 0 0 14 14 14
45240- 46 46 46 82 82 82 2 2 6 106 106 106
45241-170 170 170 26 26 26 86 86 86 226 226 226
45242-123 123 123 10 10 10 14 14 14 46 46 46
45243-231 231 231 190 190 190 6 6 6 70 70 70
45244- 90 90 90 238 238 238 158 158 158 2 2 6
45245- 2 2 6 2 2 6 2 2 6 2 2 6
45246- 70 70 70 58 58 58 22 22 22 6 6 6
45247- 0 0 0 0 0 0 0 0 0 0 0 0
45248- 0 0 0 0 0 0 0 0 0 0 0 0
45249- 0 0 0 0 0 0 0 0 0 0 0 0
45250- 0 0 0 0 0 0 0 0 0 0 0 0
45251- 0 0 0 0 0 0 0 0 0 0 0 0
45252- 0 0 0 0 0 0 0 0 0 0 0 0
45253- 0 0 0 0 0 0 0 0 1 0 0 0
45254- 0 0 1 0 0 1 0 0 1 0 0 0
45255- 0 0 0 0 0 0 0 0 0 0 0 0
45256- 0 0 0 0 0 0 0 0 0 0 0 0
45257- 0 0 0 0 0 0 0 0 0 0 0 0
45258- 0 0 0 0 0 0 0 0 0 0 0 0
45259- 0 0 0 0 0 0 0 0 0 14 14 14
45260- 42 42 42 86 86 86 6 6 6 116 116 116
45261-106 106 106 6 6 6 70 70 70 149 149 149
45262-128 128 128 18 18 18 38 38 38 54 54 54
45263-221 221 221 106 106 106 2 2 6 14 14 14
45264- 46 46 46 190 190 190 198 198 198 2 2 6
45265- 2 2 6 2 2 6 2 2 6 2 2 6
45266- 74 74 74 62 62 62 22 22 22 6 6 6
45267- 0 0 0 0 0 0 0 0 0 0 0 0
45268- 0 0 0 0 0 0 0 0 0 0 0 0
45269- 0 0 0 0 0 0 0 0 0 0 0 0
45270- 0 0 0 0 0 0 0 0 0 0 0 0
45271- 0 0 0 0 0 0 0 0 0 0 0 0
45272- 0 0 0 0 0 0 0 0 0 0 0 0
45273- 0 0 0 0 0 0 0 0 1 0 0 0
45274- 0 0 1 0 0 0 0 0 1 0 0 0
45275- 0 0 0 0 0 0 0 0 0 0 0 0
45276- 0 0 0 0 0 0 0 0 0 0 0 0
45277- 0 0 0 0 0 0 0 0 0 0 0 0
45278- 0 0 0 0 0 0 0 0 0 0 0 0
45279- 0 0 0 0 0 0 0 0 0 14 14 14
45280- 42 42 42 94 94 94 14 14 14 101 101 101
45281-128 128 128 2 2 6 18 18 18 116 116 116
45282-118 98 46 121 92 8 121 92 8 98 78 10
45283-162 162 162 106 106 106 2 2 6 2 2 6
45284- 2 2 6 195 195 195 195 195 195 6 6 6
45285- 2 2 6 2 2 6 2 2 6 2 2 6
45286- 74 74 74 62 62 62 22 22 22 6 6 6
45287- 0 0 0 0 0 0 0 0 0 0 0 0
45288- 0 0 0 0 0 0 0 0 0 0 0 0
45289- 0 0 0 0 0 0 0 0 0 0 0 0
45290- 0 0 0 0 0 0 0 0 0 0 0 0
45291- 0 0 0 0 0 0 0 0 0 0 0 0
45292- 0 0 0 0 0 0 0 0 0 0 0 0
45293- 0 0 0 0 0 0 0 0 1 0 0 1
45294- 0 0 1 0 0 0 0 0 1 0 0 0
45295- 0 0 0 0 0 0 0 0 0 0 0 0
45296- 0 0 0 0 0 0 0 0 0 0 0 0
45297- 0 0 0 0 0 0 0 0 0 0 0 0
45298- 0 0 0 0 0 0 0 0 0 0 0 0
45299- 0 0 0 0 0 0 0 0 0 10 10 10
45300- 38 38 38 90 90 90 14 14 14 58 58 58
45301-210 210 210 26 26 26 54 38 6 154 114 10
45302-226 170 11 236 186 11 225 175 15 184 144 12
45303-215 174 15 175 146 61 37 26 9 2 2 6
45304- 70 70 70 246 246 246 138 138 138 2 2 6
45305- 2 2 6 2 2 6 2 2 6 2 2 6
45306- 70 70 70 66 66 66 26 26 26 6 6 6
45307- 0 0 0 0 0 0 0 0 0 0 0 0
45308- 0 0 0 0 0 0 0 0 0 0 0 0
45309- 0 0 0 0 0 0 0 0 0 0 0 0
45310- 0 0 0 0 0 0 0 0 0 0 0 0
45311- 0 0 0 0 0 0 0 0 0 0 0 0
45312- 0 0 0 0 0 0 0 0 0 0 0 0
45313- 0 0 0 0 0 0 0 0 0 0 0 0
45314- 0 0 0 0 0 0 0 0 0 0 0 0
45315- 0 0 0 0 0 0 0 0 0 0 0 0
45316- 0 0 0 0 0 0 0 0 0 0 0 0
45317- 0 0 0 0 0 0 0 0 0 0 0 0
45318- 0 0 0 0 0 0 0 0 0 0 0 0
45319- 0 0 0 0 0 0 0 0 0 10 10 10
45320- 38 38 38 86 86 86 14 14 14 10 10 10
45321-195 195 195 188 164 115 192 133 9 225 175 15
45322-239 182 13 234 190 10 232 195 16 232 200 30
45323-245 207 45 241 208 19 232 195 16 184 144 12
45324-218 194 134 211 206 186 42 42 42 2 2 6
45325- 2 2 6 2 2 6 2 2 6 2 2 6
45326- 50 50 50 74 74 74 30 30 30 6 6 6
45327- 0 0 0 0 0 0 0 0 0 0 0 0
45328- 0 0 0 0 0 0 0 0 0 0 0 0
45329- 0 0 0 0 0 0 0 0 0 0 0 0
45330- 0 0 0 0 0 0 0 0 0 0 0 0
45331- 0 0 0 0 0 0 0 0 0 0 0 0
45332- 0 0 0 0 0 0 0 0 0 0 0 0
45333- 0 0 0 0 0 0 0 0 0 0 0 0
45334- 0 0 0 0 0 0 0 0 0 0 0 0
45335- 0 0 0 0 0 0 0 0 0 0 0 0
45336- 0 0 0 0 0 0 0 0 0 0 0 0
45337- 0 0 0 0 0 0 0 0 0 0 0 0
45338- 0 0 0 0 0 0 0 0 0 0 0 0
45339- 0 0 0 0 0 0 0 0 0 10 10 10
45340- 34 34 34 86 86 86 14 14 14 2 2 6
45341-121 87 25 192 133 9 219 162 10 239 182 13
45342-236 186 11 232 195 16 241 208 19 244 214 54
45343-246 218 60 246 218 38 246 215 20 241 208 19
45344-241 208 19 226 184 13 121 87 25 2 2 6
45345- 2 2 6 2 2 6 2 2 6 2 2 6
45346- 50 50 50 82 82 82 34 34 34 10 10 10
45347- 0 0 0 0 0 0 0 0 0 0 0 0
45348- 0 0 0 0 0 0 0 0 0 0 0 0
45349- 0 0 0 0 0 0 0 0 0 0 0 0
45350- 0 0 0 0 0 0 0 0 0 0 0 0
45351- 0 0 0 0 0 0 0 0 0 0 0 0
45352- 0 0 0 0 0 0 0 0 0 0 0 0
45353- 0 0 0 0 0 0 0 0 0 0 0 0
45354- 0 0 0 0 0 0 0 0 0 0 0 0
45355- 0 0 0 0 0 0 0 0 0 0 0 0
45356- 0 0 0 0 0 0 0 0 0 0 0 0
45357- 0 0 0 0 0 0 0 0 0 0 0 0
45358- 0 0 0 0 0 0 0 0 0 0 0 0
45359- 0 0 0 0 0 0 0 0 0 10 10 10
45360- 34 34 34 82 82 82 30 30 30 61 42 6
45361-180 123 7 206 145 10 230 174 11 239 182 13
45362-234 190 10 238 202 15 241 208 19 246 218 74
45363-246 218 38 246 215 20 246 215 20 246 215 20
45364-226 184 13 215 174 15 184 144 12 6 6 6
45365- 2 2 6 2 2 6 2 2 6 2 2 6
45366- 26 26 26 94 94 94 42 42 42 14 14 14
45367- 0 0 0 0 0 0 0 0 0 0 0 0
45368- 0 0 0 0 0 0 0 0 0 0 0 0
45369- 0 0 0 0 0 0 0 0 0 0 0 0
45370- 0 0 0 0 0 0 0 0 0 0 0 0
45371- 0 0 0 0 0 0 0 0 0 0 0 0
45372- 0 0 0 0 0 0 0 0 0 0 0 0
45373- 0 0 0 0 0 0 0 0 0 0 0 0
45374- 0 0 0 0 0 0 0 0 0 0 0 0
45375- 0 0 0 0 0 0 0 0 0 0 0 0
45376- 0 0 0 0 0 0 0 0 0 0 0 0
45377- 0 0 0 0 0 0 0 0 0 0 0 0
45378- 0 0 0 0 0 0 0 0 0 0 0 0
45379- 0 0 0 0 0 0 0 0 0 10 10 10
45380- 30 30 30 78 78 78 50 50 50 104 69 6
45381-192 133 9 216 158 10 236 178 12 236 186 11
45382-232 195 16 241 208 19 244 214 54 245 215 43
45383-246 215 20 246 215 20 241 208 19 198 155 10
45384-200 144 11 216 158 10 156 118 10 2 2 6
45385- 2 2 6 2 2 6 2 2 6 2 2 6
45386- 6 6 6 90 90 90 54 54 54 18 18 18
45387- 6 6 6 0 0 0 0 0 0 0 0 0
45388- 0 0 0 0 0 0 0 0 0 0 0 0
45389- 0 0 0 0 0 0 0 0 0 0 0 0
45390- 0 0 0 0 0 0 0 0 0 0 0 0
45391- 0 0 0 0 0 0 0 0 0 0 0 0
45392- 0 0 0 0 0 0 0 0 0 0 0 0
45393- 0 0 0 0 0 0 0 0 0 0 0 0
45394- 0 0 0 0 0 0 0 0 0 0 0 0
45395- 0 0 0 0 0 0 0 0 0 0 0 0
45396- 0 0 0 0 0 0 0 0 0 0 0 0
45397- 0 0 0 0 0 0 0 0 0 0 0 0
45398- 0 0 0 0 0 0 0 0 0 0 0 0
45399- 0 0 0 0 0 0 0 0 0 10 10 10
45400- 30 30 30 78 78 78 46 46 46 22 22 22
45401-137 92 6 210 162 10 239 182 13 238 190 10
45402-238 202 15 241 208 19 246 215 20 246 215 20
45403-241 208 19 203 166 17 185 133 11 210 150 10
45404-216 158 10 210 150 10 102 78 10 2 2 6
45405- 6 6 6 54 54 54 14 14 14 2 2 6
45406- 2 2 6 62 62 62 74 74 74 30 30 30
45407- 10 10 10 0 0 0 0 0 0 0 0 0
45408- 0 0 0 0 0 0 0 0 0 0 0 0
45409- 0 0 0 0 0 0 0 0 0 0 0 0
45410- 0 0 0 0 0 0 0 0 0 0 0 0
45411- 0 0 0 0 0 0 0 0 0 0 0 0
45412- 0 0 0 0 0 0 0 0 0 0 0 0
45413- 0 0 0 0 0 0 0 0 0 0 0 0
45414- 0 0 0 0 0 0 0 0 0 0 0 0
45415- 0 0 0 0 0 0 0 0 0 0 0 0
45416- 0 0 0 0 0 0 0 0 0 0 0 0
45417- 0 0 0 0 0 0 0 0 0 0 0 0
45418- 0 0 0 0 0 0 0 0 0 0 0 0
45419- 0 0 0 0 0 0 0 0 0 10 10 10
45420- 34 34 34 78 78 78 50 50 50 6 6 6
45421- 94 70 30 139 102 15 190 146 13 226 184 13
45422-232 200 30 232 195 16 215 174 15 190 146 13
45423-168 122 10 192 133 9 210 150 10 213 154 11
45424-202 150 34 182 157 106 101 98 89 2 2 6
45425- 2 2 6 78 78 78 116 116 116 58 58 58
45426- 2 2 6 22 22 22 90 90 90 46 46 46
45427- 18 18 18 6 6 6 0 0 0 0 0 0
45428- 0 0 0 0 0 0 0 0 0 0 0 0
45429- 0 0 0 0 0 0 0 0 0 0 0 0
45430- 0 0 0 0 0 0 0 0 0 0 0 0
45431- 0 0 0 0 0 0 0 0 0 0 0 0
45432- 0 0 0 0 0 0 0 0 0 0 0 0
45433- 0 0 0 0 0 0 0 0 0 0 0 0
45434- 0 0 0 0 0 0 0 0 0 0 0 0
45435- 0 0 0 0 0 0 0 0 0 0 0 0
45436- 0 0 0 0 0 0 0 0 0 0 0 0
45437- 0 0 0 0 0 0 0 0 0 0 0 0
45438- 0 0 0 0 0 0 0 0 0 0 0 0
45439- 0 0 0 0 0 0 0 0 0 10 10 10
45440- 38 38 38 86 86 86 50 50 50 6 6 6
45441-128 128 128 174 154 114 156 107 11 168 122 10
45442-198 155 10 184 144 12 197 138 11 200 144 11
45443-206 145 10 206 145 10 197 138 11 188 164 115
45444-195 195 195 198 198 198 174 174 174 14 14 14
45445- 2 2 6 22 22 22 116 116 116 116 116 116
45446- 22 22 22 2 2 6 74 74 74 70 70 70
45447- 30 30 30 10 10 10 0 0 0 0 0 0
45448- 0 0 0 0 0 0 0 0 0 0 0 0
45449- 0 0 0 0 0 0 0 0 0 0 0 0
45450- 0 0 0 0 0 0 0 0 0 0 0 0
45451- 0 0 0 0 0 0 0 0 0 0 0 0
45452- 0 0 0 0 0 0 0 0 0 0 0 0
45453- 0 0 0 0 0 0 0 0 0 0 0 0
45454- 0 0 0 0 0 0 0 0 0 0 0 0
45455- 0 0 0 0 0 0 0 0 0 0 0 0
45456- 0 0 0 0 0 0 0 0 0 0 0 0
45457- 0 0 0 0 0 0 0 0 0 0 0 0
45458- 0 0 0 0 0 0 0 0 0 0 0 0
45459- 0 0 0 0 0 0 6 6 6 18 18 18
45460- 50 50 50 101 101 101 26 26 26 10 10 10
45461-138 138 138 190 190 190 174 154 114 156 107 11
45462-197 138 11 200 144 11 197 138 11 192 133 9
45463-180 123 7 190 142 34 190 178 144 187 187 187
45464-202 202 202 221 221 221 214 214 214 66 66 66
45465- 2 2 6 2 2 6 50 50 50 62 62 62
45466- 6 6 6 2 2 6 10 10 10 90 90 90
45467- 50 50 50 18 18 18 6 6 6 0 0 0
45468- 0 0 0 0 0 0 0 0 0 0 0 0
45469- 0 0 0 0 0 0 0 0 0 0 0 0
45470- 0 0 0 0 0 0 0 0 0 0 0 0
45471- 0 0 0 0 0 0 0 0 0 0 0 0
45472- 0 0 0 0 0 0 0 0 0 0 0 0
45473- 0 0 0 0 0 0 0 0 0 0 0 0
45474- 0 0 0 0 0 0 0 0 0 0 0 0
45475- 0 0 0 0 0 0 0 0 0 0 0 0
45476- 0 0 0 0 0 0 0 0 0 0 0 0
45477- 0 0 0 0 0 0 0 0 0 0 0 0
45478- 0 0 0 0 0 0 0 0 0 0 0 0
45479- 0 0 0 0 0 0 10 10 10 34 34 34
45480- 74 74 74 74 74 74 2 2 6 6 6 6
45481-144 144 144 198 198 198 190 190 190 178 166 146
45482-154 121 60 156 107 11 156 107 11 168 124 44
45483-174 154 114 187 187 187 190 190 190 210 210 210
45484-246 246 246 253 253 253 253 253 253 182 182 182
45485- 6 6 6 2 2 6 2 2 6 2 2 6
45486- 2 2 6 2 2 6 2 2 6 62 62 62
45487- 74 74 74 34 34 34 14 14 14 0 0 0
45488- 0 0 0 0 0 0 0 0 0 0 0 0
45489- 0 0 0 0 0 0 0 0 0 0 0 0
45490- 0 0 0 0 0 0 0 0 0 0 0 0
45491- 0 0 0 0 0 0 0 0 0 0 0 0
45492- 0 0 0 0 0 0 0 0 0 0 0 0
45493- 0 0 0 0 0 0 0 0 0 0 0 0
45494- 0 0 0 0 0 0 0 0 0 0 0 0
45495- 0 0 0 0 0 0 0 0 0 0 0 0
45496- 0 0 0 0 0 0 0 0 0 0 0 0
45497- 0 0 0 0 0 0 0 0 0 0 0 0
45498- 0 0 0 0 0 0 0 0 0 0 0 0
45499- 0 0 0 10 10 10 22 22 22 54 54 54
45500- 94 94 94 18 18 18 2 2 6 46 46 46
45501-234 234 234 221 221 221 190 190 190 190 190 190
45502-190 190 190 187 187 187 187 187 187 190 190 190
45503-190 190 190 195 195 195 214 214 214 242 242 242
45504-253 253 253 253 253 253 253 253 253 253 253 253
45505- 82 82 82 2 2 6 2 2 6 2 2 6
45506- 2 2 6 2 2 6 2 2 6 14 14 14
45507- 86 86 86 54 54 54 22 22 22 6 6 6
45508- 0 0 0 0 0 0 0 0 0 0 0 0
45509- 0 0 0 0 0 0 0 0 0 0 0 0
45510- 0 0 0 0 0 0 0 0 0 0 0 0
45511- 0 0 0 0 0 0 0 0 0 0 0 0
45512- 0 0 0 0 0 0 0 0 0 0 0 0
45513- 0 0 0 0 0 0 0 0 0 0 0 0
45514- 0 0 0 0 0 0 0 0 0 0 0 0
45515- 0 0 0 0 0 0 0 0 0 0 0 0
45516- 0 0 0 0 0 0 0 0 0 0 0 0
45517- 0 0 0 0 0 0 0 0 0 0 0 0
45518- 0 0 0 0 0 0 0 0 0 0 0 0
45519- 6 6 6 18 18 18 46 46 46 90 90 90
45520- 46 46 46 18 18 18 6 6 6 182 182 182
45521-253 253 253 246 246 246 206 206 206 190 190 190
45522-190 190 190 190 190 190 190 190 190 190 190 190
45523-206 206 206 231 231 231 250 250 250 253 253 253
45524-253 253 253 253 253 253 253 253 253 253 253 253
45525-202 202 202 14 14 14 2 2 6 2 2 6
45526- 2 2 6 2 2 6 2 2 6 2 2 6
45527- 42 42 42 86 86 86 42 42 42 18 18 18
45528- 6 6 6 0 0 0 0 0 0 0 0 0
45529- 0 0 0 0 0 0 0 0 0 0 0 0
45530- 0 0 0 0 0 0 0 0 0 0 0 0
45531- 0 0 0 0 0 0 0 0 0 0 0 0
45532- 0 0 0 0 0 0 0 0 0 0 0 0
45533- 0 0 0 0 0 0 0 0 0 0 0 0
45534- 0 0 0 0 0 0 0 0 0 0 0 0
45535- 0 0 0 0 0 0 0 0 0 0 0 0
45536- 0 0 0 0 0 0 0 0 0 0 0 0
45537- 0 0 0 0 0 0 0 0 0 0 0 0
45538- 0 0 0 0 0 0 0 0 0 6 6 6
45539- 14 14 14 38 38 38 74 74 74 66 66 66
45540- 2 2 6 6 6 6 90 90 90 250 250 250
45541-253 253 253 253 253 253 238 238 238 198 198 198
45542-190 190 190 190 190 190 195 195 195 221 221 221
45543-246 246 246 253 253 253 253 253 253 253 253 253
45544-253 253 253 253 253 253 253 253 253 253 253 253
45545-253 253 253 82 82 82 2 2 6 2 2 6
45546- 2 2 6 2 2 6 2 2 6 2 2 6
45547- 2 2 6 78 78 78 70 70 70 34 34 34
45548- 14 14 14 6 6 6 0 0 0 0 0 0
45549- 0 0 0 0 0 0 0 0 0 0 0 0
45550- 0 0 0 0 0 0 0 0 0 0 0 0
45551- 0 0 0 0 0 0 0 0 0 0 0 0
45552- 0 0 0 0 0 0 0 0 0 0 0 0
45553- 0 0 0 0 0 0 0 0 0 0 0 0
45554- 0 0 0 0 0 0 0 0 0 0 0 0
45555- 0 0 0 0 0 0 0 0 0 0 0 0
45556- 0 0 0 0 0 0 0 0 0 0 0 0
45557- 0 0 0 0 0 0 0 0 0 0 0 0
45558- 0 0 0 0 0 0 0 0 0 14 14 14
45559- 34 34 34 66 66 66 78 78 78 6 6 6
45560- 2 2 6 18 18 18 218 218 218 253 253 253
45561-253 253 253 253 253 253 253 253 253 246 246 246
45562-226 226 226 231 231 231 246 246 246 253 253 253
45563-253 253 253 253 253 253 253 253 253 253 253 253
45564-253 253 253 253 253 253 253 253 253 253 253 253
45565-253 253 253 178 178 178 2 2 6 2 2 6
45566- 2 2 6 2 2 6 2 2 6 2 2 6
45567- 2 2 6 18 18 18 90 90 90 62 62 62
45568- 30 30 30 10 10 10 0 0 0 0 0 0
45569- 0 0 0 0 0 0 0 0 0 0 0 0
45570- 0 0 0 0 0 0 0 0 0 0 0 0
45571- 0 0 0 0 0 0 0 0 0 0 0 0
45572- 0 0 0 0 0 0 0 0 0 0 0 0
45573- 0 0 0 0 0 0 0 0 0 0 0 0
45574- 0 0 0 0 0 0 0 0 0 0 0 0
45575- 0 0 0 0 0 0 0 0 0 0 0 0
45576- 0 0 0 0 0 0 0 0 0 0 0 0
45577- 0 0 0 0 0 0 0 0 0 0 0 0
45578- 0 0 0 0 0 0 10 10 10 26 26 26
45579- 58 58 58 90 90 90 18 18 18 2 2 6
45580- 2 2 6 110 110 110 253 253 253 253 253 253
45581-253 253 253 253 253 253 253 253 253 253 253 253
45582-250 250 250 253 253 253 253 253 253 253 253 253
45583-253 253 253 253 253 253 253 253 253 253 253 253
45584-253 253 253 253 253 253 253 253 253 253 253 253
45585-253 253 253 231 231 231 18 18 18 2 2 6
45586- 2 2 6 2 2 6 2 2 6 2 2 6
45587- 2 2 6 2 2 6 18 18 18 94 94 94
45588- 54 54 54 26 26 26 10 10 10 0 0 0
45589- 0 0 0 0 0 0 0 0 0 0 0 0
45590- 0 0 0 0 0 0 0 0 0 0 0 0
45591- 0 0 0 0 0 0 0 0 0 0 0 0
45592- 0 0 0 0 0 0 0 0 0 0 0 0
45593- 0 0 0 0 0 0 0 0 0 0 0 0
45594- 0 0 0 0 0 0 0 0 0 0 0 0
45595- 0 0 0 0 0 0 0 0 0 0 0 0
45596- 0 0 0 0 0 0 0 0 0 0 0 0
45597- 0 0 0 0 0 0 0 0 0 0 0 0
45598- 0 0 0 6 6 6 22 22 22 50 50 50
45599- 90 90 90 26 26 26 2 2 6 2 2 6
45600- 14 14 14 195 195 195 250 250 250 253 253 253
45601-253 253 253 253 253 253 253 253 253 253 253 253
45602-253 253 253 253 253 253 253 253 253 253 253 253
45603-253 253 253 253 253 253 253 253 253 253 253 253
45604-253 253 253 253 253 253 253 253 253 253 253 253
45605-250 250 250 242 242 242 54 54 54 2 2 6
45606- 2 2 6 2 2 6 2 2 6 2 2 6
45607- 2 2 6 2 2 6 2 2 6 38 38 38
45608- 86 86 86 50 50 50 22 22 22 6 6 6
45609- 0 0 0 0 0 0 0 0 0 0 0 0
45610- 0 0 0 0 0 0 0 0 0 0 0 0
45611- 0 0 0 0 0 0 0 0 0 0 0 0
45612- 0 0 0 0 0 0 0 0 0 0 0 0
45613- 0 0 0 0 0 0 0 0 0 0 0 0
45614- 0 0 0 0 0 0 0 0 0 0 0 0
45615- 0 0 0 0 0 0 0 0 0 0 0 0
45616- 0 0 0 0 0 0 0 0 0 0 0 0
45617- 0 0 0 0 0 0 0 0 0 0 0 0
45618- 6 6 6 14 14 14 38 38 38 82 82 82
45619- 34 34 34 2 2 6 2 2 6 2 2 6
45620- 42 42 42 195 195 195 246 246 246 253 253 253
45621-253 253 253 253 253 253 253 253 253 250 250 250
45622-242 242 242 242 242 242 250 250 250 253 253 253
45623-253 253 253 253 253 253 253 253 253 253 253 253
45624-253 253 253 250 250 250 246 246 246 238 238 238
45625-226 226 226 231 231 231 101 101 101 6 6 6
45626- 2 2 6 2 2 6 2 2 6 2 2 6
45627- 2 2 6 2 2 6 2 2 6 2 2 6
45628- 38 38 38 82 82 82 42 42 42 14 14 14
45629- 6 6 6 0 0 0 0 0 0 0 0 0
45630- 0 0 0 0 0 0 0 0 0 0 0 0
45631- 0 0 0 0 0 0 0 0 0 0 0 0
45632- 0 0 0 0 0 0 0 0 0 0 0 0
45633- 0 0 0 0 0 0 0 0 0 0 0 0
45634- 0 0 0 0 0 0 0 0 0 0 0 0
45635- 0 0 0 0 0 0 0 0 0 0 0 0
45636- 0 0 0 0 0 0 0 0 0 0 0 0
45637- 0 0 0 0 0 0 0 0 0 0 0 0
45638- 10 10 10 26 26 26 62 62 62 66 66 66
45639- 2 2 6 2 2 6 2 2 6 6 6 6
45640- 70 70 70 170 170 170 206 206 206 234 234 234
45641-246 246 246 250 250 250 250 250 250 238 238 238
45642-226 226 226 231 231 231 238 238 238 250 250 250
45643-250 250 250 250 250 250 246 246 246 231 231 231
45644-214 214 214 206 206 206 202 202 202 202 202 202
45645-198 198 198 202 202 202 182 182 182 18 18 18
45646- 2 2 6 2 2 6 2 2 6 2 2 6
45647- 2 2 6 2 2 6 2 2 6 2 2 6
45648- 2 2 6 62 62 62 66 66 66 30 30 30
45649- 10 10 10 0 0 0 0 0 0 0 0 0
45650- 0 0 0 0 0 0 0 0 0 0 0 0
45651- 0 0 0 0 0 0 0 0 0 0 0 0
45652- 0 0 0 0 0 0 0 0 0 0 0 0
45653- 0 0 0 0 0 0 0 0 0 0 0 0
45654- 0 0 0 0 0 0 0 0 0 0 0 0
45655- 0 0 0 0 0 0 0 0 0 0 0 0
45656- 0 0 0 0 0 0 0 0 0 0 0 0
45657- 0 0 0 0 0 0 0 0 0 0 0 0
45658- 14 14 14 42 42 42 82 82 82 18 18 18
45659- 2 2 6 2 2 6 2 2 6 10 10 10
45660- 94 94 94 182 182 182 218 218 218 242 242 242
45661-250 250 250 253 253 253 253 253 253 250 250 250
45662-234 234 234 253 253 253 253 253 253 253 253 253
45663-253 253 253 253 253 253 253 253 253 246 246 246
45664-238 238 238 226 226 226 210 210 210 202 202 202
45665-195 195 195 195 195 195 210 210 210 158 158 158
45666- 6 6 6 14 14 14 50 50 50 14 14 14
45667- 2 2 6 2 2 6 2 2 6 2 2 6
45668- 2 2 6 6 6 6 86 86 86 46 46 46
45669- 18 18 18 6 6 6 0 0 0 0 0 0
45670- 0 0 0 0 0 0 0 0 0 0 0 0
45671- 0 0 0 0 0 0 0 0 0 0 0 0
45672- 0 0 0 0 0 0 0 0 0 0 0 0
45673- 0 0 0 0 0 0 0 0 0 0 0 0
45674- 0 0 0 0 0 0 0 0 0 0 0 0
45675- 0 0 0 0 0 0 0 0 0 0 0 0
45676- 0 0 0 0 0 0 0 0 0 0 0 0
45677- 0 0 0 0 0 0 0 0 0 6 6 6
45678- 22 22 22 54 54 54 70 70 70 2 2 6
45679- 2 2 6 10 10 10 2 2 6 22 22 22
45680-166 166 166 231 231 231 250 250 250 253 253 253
45681-253 253 253 253 253 253 253 253 253 250 250 250
45682-242 242 242 253 253 253 253 253 253 253 253 253
45683-253 253 253 253 253 253 253 253 253 253 253 253
45684-253 253 253 253 253 253 253 253 253 246 246 246
45685-231 231 231 206 206 206 198 198 198 226 226 226
45686- 94 94 94 2 2 6 6 6 6 38 38 38
45687- 30 30 30 2 2 6 2 2 6 2 2 6
45688- 2 2 6 2 2 6 62 62 62 66 66 66
45689- 26 26 26 10 10 10 0 0 0 0 0 0
45690- 0 0 0 0 0 0 0 0 0 0 0 0
45691- 0 0 0 0 0 0 0 0 0 0 0 0
45692- 0 0 0 0 0 0 0 0 0 0 0 0
45693- 0 0 0 0 0 0 0 0 0 0 0 0
45694- 0 0 0 0 0 0 0 0 0 0 0 0
45695- 0 0 0 0 0 0 0 0 0 0 0 0
45696- 0 0 0 0 0 0 0 0 0 0 0 0
45697- 0 0 0 0 0 0 0 0 0 10 10 10
45698- 30 30 30 74 74 74 50 50 50 2 2 6
45699- 26 26 26 26 26 26 2 2 6 106 106 106
45700-238 238 238 253 253 253 253 253 253 253 253 253
45701-253 253 253 253 253 253 253 253 253 253 253 253
45702-253 253 253 253 253 253 253 253 253 253 253 253
45703-253 253 253 253 253 253 253 253 253 253 253 253
45704-253 253 253 253 253 253 253 253 253 253 253 253
45705-253 253 253 246 246 246 218 218 218 202 202 202
45706-210 210 210 14 14 14 2 2 6 2 2 6
45707- 30 30 30 22 22 22 2 2 6 2 2 6
45708- 2 2 6 2 2 6 18 18 18 86 86 86
45709- 42 42 42 14 14 14 0 0 0 0 0 0
45710- 0 0 0 0 0 0 0 0 0 0 0 0
45711- 0 0 0 0 0 0 0 0 0 0 0 0
45712- 0 0 0 0 0 0 0 0 0 0 0 0
45713- 0 0 0 0 0 0 0 0 0 0 0 0
45714- 0 0 0 0 0 0 0 0 0 0 0 0
45715- 0 0 0 0 0 0 0 0 0 0 0 0
45716- 0 0 0 0 0 0 0 0 0 0 0 0
45717- 0 0 0 0 0 0 0 0 0 14 14 14
45718- 42 42 42 90 90 90 22 22 22 2 2 6
45719- 42 42 42 2 2 6 18 18 18 218 218 218
45720-253 253 253 253 253 253 253 253 253 253 253 253
45721-253 253 253 253 253 253 253 253 253 253 253 253
45722-253 253 253 253 253 253 253 253 253 253 253 253
45723-253 253 253 253 253 253 253 253 253 253 253 253
45724-253 253 253 253 253 253 253 253 253 253 253 253
45725-253 253 253 253 253 253 250 250 250 221 221 221
45726-218 218 218 101 101 101 2 2 6 14 14 14
45727- 18 18 18 38 38 38 10 10 10 2 2 6
45728- 2 2 6 2 2 6 2 2 6 78 78 78
45729- 58 58 58 22 22 22 6 6 6 0 0 0
45730- 0 0 0 0 0 0 0 0 0 0 0 0
45731- 0 0 0 0 0 0 0 0 0 0 0 0
45732- 0 0 0 0 0 0 0 0 0 0 0 0
45733- 0 0 0 0 0 0 0 0 0 0 0 0
45734- 0 0 0 0 0 0 0 0 0 0 0 0
45735- 0 0 0 0 0 0 0 0 0 0 0 0
45736- 0 0 0 0 0 0 0 0 0 0 0 0
45737- 0 0 0 0 0 0 6 6 6 18 18 18
45738- 54 54 54 82 82 82 2 2 6 26 26 26
45739- 22 22 22 2 2 6 123 123 123 253 253 253
45740-253 253 253 253 253 253 253 253 253 253 253 253
45741-253 253 253 253 253 253 253 253 253 253 253 253
45742-253 253 253 253 253 253 253 253 253 253 253 253
45743-253 253 253 253 253 253 253 253 253 253 253 253
45744-253 253 253 253 253 253 253 253 253 253 253 253
45745-253 253 253 253 253 253 253 253 253 250 250 250
45746-238 238 238 198 198 198 6 6 6 38 38 38
45747- 58 58 58 26 26 26 38 38 38 2 2 6
45748- 2 2 6 2 2 6 2 2 6 46 46 46
45749- 78 78 78 30 30 30 10 10 10 0 0 0
45750- 0 0 0 0 0 0 0 0 0 0 0 0
45751- 0 0 0 0 0 0 0 0 0 0 0 0
45752- 0 0 0 0 0 0 0 0 0 0 0 0
45753- 0 0 0 0 0 0 0 0 0 0 0 0
45754- 0 0 0 0 0 0 0 0 0 0 0 0
45755- 0 0 0 0 0 0 0 0 0 0 0 0
45756- 0 0 0 0 0 0 0 0 0 0 0 0
45757- 0 0 0 0 0 0 10 10 10 30 30 30
45758- 74 74 74 58 58 58 2 2 6 42 42 42
45759- 2 2 6 22 22 22 231 231 231 253 253 253
45760-253 253 253 253 253 253 253 253 253 253 253 253
45761-253 253 253 253 253 253 253 253 253 250 250 250
45762-253 253 253 253 253 253 253 253 253 253 253 253
45763-253 253 253 253 253 253 253 253 253 253 253 253
45764-253 253 253 253 253 253 253 253 253 253 253 253
45765-253 253 253 253 253 253 253 253 253 253 253 253
45766-253 253 253 246 246 246 46 46 46 38 38 38
45767- 42 42 42 14 14 14 38 38 38 14 14 14
45768- 2 2 6 2 2 6 2 2 6 6 6 6
45769- 86 86 86 46 46 46 14 14 14 0 0 0
45770- 0 0 0 0 0 0 0 0 0 0 0 0
45771- 0 0 0 0 0 0 0 0 0 0 0 0
45772- 0 0 0 0 0 0 0 0 0 0 0 0
45773- 0 0 0 0 0 0 0 0 0 0 0 0
45774- 0 0 0 0 0 0 0 0 0 0 0 0
45775- 0 0 0 0 0 0 0 0 0 0 0 0
45776- 0 0 0 0 0 0 0 0 0 0 0 0
45777- 0 0 0 6 6 6 14 14 14 42 42 42
45778- 90 90 90 18 18 18 18 18 18 26 26 26
45779- 2 2 6 116 116 116 253 253 253 253 253 253
45780-253 253 253 253 253 253 253 253 253 253 253 253
45781-253 253 253 253 253 253 250 250 250 238 238 238
45782-253 253 253 253 253 253 253 253 253 253 253 253
45783-253 253 253 253 253 253 253 253 253 253 253 253
45784-253 253 253 253 253 253 253 253 253 253 253 253
45785-253 253 253 253 253 253 253 253 253 253 253 253
45786-253 253 253 253 253 253 94 94 94 6 6 6
45787- 2 2 6 2 2 6 10 10 10 34 34 34
45788- 2 2 6 2 2 6 2 2 6 2 2 6
45789- 74 74 74 58 58 58 22 22 22 6 6 6
45790- 0 0 0 0 0 0 0 0 0 0 0 0
45791- 0 0 0 0 0 0 0 0 0 0 0 0
45792- 0 0 0 0 0 0 0 0 0 0 0 0
45793- 0 0 0 0 0 0 0 0 0 0 0 0
45794- 0 0 0 0 0 0 0 0 0 0 0 0
45795- 0 0 0 0 0 0 0 0 0 0 0 0
45796- 0 0 0 0 0 0 0 0 0 0 0 0
45797- 0 0 0 10 10 10 26 26 26 66 66 66
45798- 82 82 82 2 2 6 38 38 38 6 6 6
45799- 14 14 14 210 210 210 253 253 253 253 253 253
45800-253 253 253 253 253 253 253 253 253 253 253 253
45801-253 253 253 253 253 253 246 246 246 242 242 242
45802-253 253 253 253 253 253 253 253 253 253 253 253
45803-253 253 253 253 253 253 253 253 253 253 253 253
45804-253 253 253 253 253 253 253 253 253 253 253 253
45805-253 253 253 253 253 253 253 253 253 253 253 253
45806-253 253 253 253 253 253 144 144 144 2 2 6
45807- 2 2 6 2 2 6 2 2 6 46 46 46
45808- 2 2 6 2 2 6 2 2 6 2 2 6
45809- 42 42 42 74 74 74 30 30 30 10 10 10
45810- 0 0 0 0 0 0 0 0 0 0 0 0
45811- 0 0 0 0 0 0 0 0 0 0 0 0
45812- 0 0 0 0 0 0 0 0 0 0 0 0
45813- 0 0 0 0 0 0 0 0 0 0 0 0
45814- 0 0 0 0 0 0 0 0 0 0 0 0
45815- 0 0 0 0 0 0 0 0 0 0 0 0
45816- 0 0 0 0 0 0 0 0 0 0 0 0
45817- 6 6 6 14 14 14 42 42 42 90 90 90
45818- 26 26 26 6 6 6 42 42 42 2 2 6
45819- 74 74 74 250 250 250 253 253 253 253 253 253
45820-253 253 253 253 253 253 253 253 253 253 253 253
45821-253 253 253 253 253 253 242 242 242 242 242 242
45822-253 253 253 253 253 253 253 253 253 253 253 253
45823-253 253 253 253 253 253 253 253 253 253 253 253
45824-253 253 253 253 253 253 253 253 253 253 253 253
45825-253 253 253 253 253 253 253 253 253 253 253 253
45826-253 253 253 253 253 253 182 182 182 2 2 6
45827- 2 2 6 2 2 6 2 2 6 46 46 46
45828- 2 2 6 2 2 6 2 2 6 2 2 6
45829- 10 10 10 86 86 86 38 38 38 10 10 10
45830- 0 0 0 0 0 0 0 0 0 0 0 0
45831- 0 0 0 0 0 0 0 0 0 0 0 0
45832- 0 0 0 0 0 0 0 0 0 0 0 0
45833- 0 0 0 0 0 0 0 0 0 0 0 0
45834- 0 0 0 0 0 0 0 0 0 0 0 0
45835- 0 0 0 0 0 0 0 0 0 0 0 0
45836- 0 0 0 0 0 0 0 0 0 0 0 0
45837- 10 10 10 26 26 26 66 66 66 82 82 82
45838- 2 2 6 22 22 22 18 18 18 2 2 6
45839-149 149 149 253 253 253 253 253 253 253 253 253
45840-253 253 253 253 253 253 253 253 253 253 253 253
45841-253 253 253 253 253 253 234 234 234 242 242 242
45842-253 253 253 253 253 253 253 253 253 253 253 253
45843-253 253 253 253 253 253 253 253 253 253 253 253
45844-253 253 253 253 253 253 253 253 253 253 253 253
45845-253 253 253 253 253 253 253 253 253 253 253 253
45846-253 253 253 253 253 253 206 206 206 2 2 6
45847- 2 2 6 2 2 6 2 2 6 38 38 38
45848- 2 2 6 2 2 6 2 2 6 2 2 6
45849- 6 6 6 86 86 86 46 46 46 14 14 14
45850- 0 0 0 0 0 0 0 0 0 0 0 0
45851- 0 0 0 0 0 0 0 0 0 0 0 0
45852- 0 0 0 0 0 0 0 0 0 0 0 0
45853- 0 0 0 0 0 0 0 0 0 0 0 0
45854- 0 0 0 0 0 0 0 0 0 0 0 0
45855- 0 0 0 0 0 0 0 0 0 0 0 0
45856- 0 0 0 0 0 0 0 0 0 6 6 6
45857- 18 18 18 46 46 46 86 86 86 18 18 18
45858- 2 2 6 34 34 34 10 10 10 6 6 6
45859-210 210 210 253 253 253 253 253 253 253 253 253
45860-253 253 253 253 253 253 253 253 253 253 253 253
45861-253 253 253 253 253 253 234 234 234 242 242 242
45862-253 253 253 253 253 253 253 253 253 253 253 253
45863-253 253 253 253 253 253 253 253 253 253 253 253
45864-253 253 253 253 253 253 253 253 253 253 253 253
45865-253 253 253 253 253 253 253 253 253 253 253 253
45866-253 253 253 253 253 253 221 221 221 6 6 6
45867- 2 2 6 2 2 6 6 6 6 30 30 30
45868- 2 2 6 2 2 6 2 2 6 2 2 6
45869- 2 2 6 82 82 82 54 54 54 18 18 18
45870- 6 6 6 0 0 0 0 0 0 0 0 0
45871- 0 0 0 0 0 0 0 0 0 0 0 0
45872- 0 0 0 0 0 0 0 0 0 0 0 0
45873- 0 0 0 0 0 0 0 0 0 0 0 0
45874- 0 0 0 0 0 0 0 0 0 0 0 0
45875- 0 0 0 0 0 0 0 0 0 0 0 0
45876- 0 0 0 0 0 0 0 0 0 10 10 10
45877- 26 26 26 66 66 66 62 62 62 2 2 6
45878- 2 2 6 38 38 38 10 10 10 26 26 26
45879-238 238 238 253 253 253 253 253 253 253 253 253
45880-253 253 253 253 253 253 253 253 253 253 253 253
45881-253 253 253 253 253 253 231 231 231 238 238 238
45882-253 253 253 253 253 253 253 253 253 253 253 253
45883-253 253 253 253 253 253 253 253 253 253 253 253
45884-253 253 253 253 253 253 253 253 253 253 253 253
45885-253 253 253 253 253 253 253 253 253 253 253 253
45886-253 253 253 253 253 253 231 231 231 6 6 6
45887- 2 2 6 2 2 6 10 10 10 30 30 30
45888- 2 2 6 2 2 6 2 2 6 2 2 6
45889- 2 2 6 66 66 66 58 58 58 22 22 22
45890- 6 6 6 0 0 0 0 0 0 0 0 0
45891- 0 0 0 0 0 0 0 0 0 0 0 0
45892- 0 0 0 0 0 0 0 0 0 0 0 0
45893- 0 0 0 0 0 0 0 0 0 0 0 0
45894- 0 0 0 0 0 0 0 0 0 0 0 0
45895- 0 0 0 0 0 0 0 0 0 0 0 0
45896- 0 0 0 0 0 0 0 0 0 10 10 10
45897- 38 38 38 78 78 78 6 6 6 2 2 6
45898- 2 2 6 46 46 46 14 14 14 42 42 42
45899-246 246 246 253 253 253 253 253 253 253 253 253
45900-253 253 253 253 253 253 253 253 253 253 253 253
45901-253 253 253 253 253 253 231 231 231 242 242 242
45902-253 253 253 253 253 253 253 253 253 253 253 253
45903-253 253 253 253 253 253 253 253 253 253 253 253
45904-253 253 253 253 253 253 253 253 253 253 253 253
45905-253 253 253 253 253 253 253 253 253 253 253 253
45906-253 253 253 253 253 253 234 234 234 10 10 10
45907- 2 2 6 2 2 6 22 22 22 14 14 14
45908- 2 2 6 2 2 6 2 2 6 2 2 6
45909- 2 2 6 66 66 66 62 62 62 22 22 22
45910- 6 6 6 0 0 0 0 0 0 0 0 0
45911- 0 0 0 0 0 0 0 0 0 0 0 0
45912- 0 0 0 0 0 0 0 0 0 0 0 0
45913- 0 0 0 0 0 0 0 0 0 0 0 0
45914- 0 0 0 0 0 0 0 0 0 0 0 0
45915- 0 0 0 0 0 0 0 0 0 0 0 0
45916- 0 0 0 0 0 0 6 6 6 18 18 18
45917- 50 50 50 74 74 74 2 2 6 2 2 6
45918- 14 14 14 70 70 70 34 34 34 62 62 62
45919-250 250 250 253 253 253 253 253 253 253 253 253
45920-253 253 253 253 253 253 253 253 253 253 253 253
45921-253 253 253 253 253 253 231 231 231 246 246 246
45922-253 253 253 253 253 253 253 253 253 253 253 253
45923-253 253 253 253 253 253 253 253 253 253 253 253
45924-253 253 253 253 253 253 253 253 253 253 253 253
45925-253 253 253 253 253 253 253 253 253 253 253 253
45926-253 253 253 253 253 253 234 234 234 14 14 14
45927- 2 2 6 2 2 6 30 30 30 2 2 6
45928- 2 2 6 2 2 6 2 2 6 2 2 6
45929- 2 2 6 66 66 66 62 62 62 22 22 22
45930- 6 6 6 0 0 0 0 0 0 0 0 0
45931- 0 0 0 0 0 0 0 0 0 0 0 0
45932- 0 0 0 0 0 0 0 0 0 0 0 0
45933- 0 0 0 0 0 0 0 0 0 0 0 0
45934- 0 0 0 0 0 0 0 0 0 0 0 0
45935- 0 0 0 0 0 0 0 0 0 0 0 0
45936- 0 0 0 0 0 0 6 6 6 18 18 18
45937- 54 54 54 62 62 62 2 2 6 2 2 6
45938- 2 2 6 30 30 30 46 46 46 70 70 70
45939-250 250 250 253 253 253 253 253 253 253 253 253
45940-253 253 253 253 253 253 253 253 253 253 253 253
45941-253 253 253 253 253 253 231 231 231 246 246 246
45942-253 253 253 253 253 253 253 253 253 253 253 253
45943-253 253 253 253 253 253 253 253 253 253 253 253
45944-253 253 253 253 253 253 253 253 253 253 253 253
45945-253 253 253 253 253 253 253 253 253 253 253 253
45946-253 253 253 253 253 253 226 226 226 10 10 10
45947- 2 2 6 6 6 6 30 30 30 2 2 6
45948- 2 2 6 2 2 6 2 2 6 2 2 6
45949- 2 2 6 66 66 66 58 58 58 22 22 22
45950- 6 6 6 0 0 0 0 0 0 0 0 0
45951- 0 0 0 0 0 0 0 0 0 0 0 0
45952- 0 0 0 0 0 0 0 0 0 0 0 0
45953- 0 0 0 0 0 0 0 0 0 0 0 0
45954- 0 0 0 0 0 0 0 0 0 0 0 0
45955- 0 0 0 0 0 0 0 0 0 0 0 0
45956- 0 0 0 0 0 0 6 6 6 22 22 22
45957- 58 58 58 62 62 62 2 2 6 2 2 6
45958- 2 2 6 2 2 6 30 30 30 78 78 78
45959-250 250 250 253 253 253 253 253 253 253 253 253
45960-253 253 253 253 253 253 253 253 253 253 253 253
45961-253 253 253 253 253 253 231 231 231 246 246 246
45962-253 253 253 253 253 253 253 253 253 253 253 253
45963-253 253 253 253 253 253 253 253 253 253 253 253
45964-253 253 253 253 253 253 253 253 253 253 253 253
45965-253 253 253 253 253 253 253 253 253 253 253 253
45966-253 253 253 253 253 253 206 206 206 2 2 6
45967- 22 22 22 34 34 34 18 14 6 22 22 22
45968- 26 26 26 18 18 18 6 6 6 2 2 6
45969- 2 2 6 82 82 82 54 54 54 18 18 18
45970- 6 6 6 0 0 0 0 0 0 0 0 0
45971- 0 0 0 0 0 0 0 0 0 0 0 0
45972- 0 0 0 0 0 0 0 0 0 0 0 0
45973- 0 0 0 0 0 0 0 0 0 0 0 0
45974- 0 0 0 0 0 0 0 0 0 0 0 0
45975- 0 0 0 0 0 0 0 0 0 0 0 0
45976- 0 0 0 0 0 0 6 6 6 26 26 26
45977- 62 62 62 106 106 106 74 54 14 185 133 11
45978-210 162 10 121 92 8 6 6 6 62 62 62
45979-238 238 238 253 253 253 253 253 253 253 253 253
45980-253 253 253 253 253 253 253 253 253 253 253 253
45981-253 253 253 253 253 253 231 231 231 246 246 246
45982-253 253 253 253 253 253 253 253 253 253 253 253
45983-253 253 253 253 253 253 253 253 253 253 253 253
45984-253 253 253 253 253 253 253 253 253 253 253 253
45985-253 253 253 253 253 253 253 253 253 253 253 253
45986-253 253 253 253 253 253 158 158 158 18 18 18
45987- 14 14 14 2 2 6 2 2 6 2 2 6
45988- 6 6 6 18 18 18 66 66 66 38 38 38
45989- 6 6 6 94 94 94 50 50 50 18 18 18
45990- 6 6 6 0 0 0 0 0 0 0 0 0
45991- 0 0 0 0 0 0 0 0 0 0 0 0
45992- 0 0 0 0 0 0 0 0 0 0 0 0
45993- 0 0 0 0 0 0 0 0 0 0 0 0
45994- 0 0 0 0 0 0 0 0 0 0 0 0
45995- 0 0 0 0 0 0 0 0 0 6 6 6
45996- 10 10 10 10 10 10 18 18 18 38 38 38
45997- 78 78 78 142 134 106 216 158 10 242 186 14
45998-246 190 14 246 190 14 156 118 10 10 10 10
45999- 90 90 90 238 238 238 253 253 253 253 253 253
46000-253 253 253 253 253 253 253 253 253 253 253 253
46001-253 253 253 253 253 253 231 231 231 250 250 250
46002-253 253 253 253 253 253 253 253 253 253 253 253
46003-253 253 253 253 253 253 253 253 253 253 253 253
46004-253 253 253 253 253 253 253 253 253 253 253 253
46005-253 253 253 253 253 253 253 253 253 246 230 190
46006-238 204 91 238 204 91 181 142 44 37 26 9
46007- 2 2 6 2 2 6 2 2 6 2 2 6
46008- 2 2 6 2 2 6 38 38 38 46 46 46
46009- 26 26 26 106 106 106 54 54 54 18 18 18
46010- 6 6 6 0 0 0 0 0 0 0 0 0
46011- 0 0 0 0 0 0 0 0 0 0 0 0
46012- 0 0 0 0 0 0 0 0 0 0 0 0
46013- 0 0 0 0 0 0 0 0 0 0 0 0
46014- 0 0 0 0 0 0 0 0 0 0 0 0
46015- 0 0 0 6 6 6 14 14 14 22 22 22
46016- 30 30 30 38 38 38 50 50 50 70 70 70
46017-106 106 106 190 142 34 226 170 11 242 186 14
46018-246 190 14 246 190 14 246 190 14 154 114 10
46019- 6 6 6 74 74 74 226 226 226 253 253 253
46020-253 253 253 253 253 253 253 253 253 253 253 253
46021-253 253 253 253 253 253 231 231 231 250 250 250
46022-253 253 253 253 253 253 253 253 253 253 253 253
46023-253 253 253 253 253 253 253 253 253 253 253 253
46024-253 253 253 253 253 253 253 253 253 253 253 253
46025-253 253 253 253 253 253 253 253 253 228 184 62
46026-241 196 14 241 208 19 232 195 16 38 30 10
46027- 2 2 6 2 2 6 2 2 6 2 2 6
46028- 2 2 6 6 6 6 30 30 30 26 26 26
46029-203 166 17 154 142 90 66 66 66 26 26 26
46030- 6 6 6 0 0 0 0 0 0 0 0 0
46031- 0 0 0 0 0 0 0 0 0 0 0 0
46032- 0 0 0 0 0 0 0 0 0 0 0 0
46033- 0 0 0 0 0 0 0 0 0 0 0 0
46034- 0 0 0 0 0 0 0 0 0 0 0 0
46035- 6 6 6 18 18 18 38 38 38 58 58 58
46036- 78 78 78 86 86 86 101 101 101 123 123 123
46037-175 146 61 210 150 10 234 174 13 246 186 14
46038-246 190 14 246 190 14 246 190 14 238 190 10
46039-102 78 10 2 2 6 46 46 46 198 198 198
46040-253 253 253 253 253 253 253 253 253 253 253 253
46041-253 253 253 253 253 253 234 234 234 242 242 242
46042-253 253 253 253 253 253 253 253 253 253 253 253
46043-253 253 253 253 253 253 253 253 253 253 253 253
46044-253 253 253 253 253 253 253 253 253 253 253 253
46045-253 253 253 253 253 253 253 253 253 224 178 62
46046-242 186 14 241 196 14 210 166 10 22 18 6
46047- 2 2 6 2 2 6 2 2 6 2 2 6
46048- 2 2 6 2 2 6 6 6 6 121 92 8
46049-238 202 15 232 195 16 82 82 82 34 34 34
46050- 10 10 10 0 0 0 0 0 0 0 0 0
46051- 0 0 0 0 0 0 0 0 0 0 0 0
46052- 0 0 0 0 0 0 0 0 0 0 0 0
46053- 0 0 0 0 0 0 0 0 0 0 0 0
46054- 0 0 0 0 0 0 0 0 0 0 0 0
46055- 14 14 14 38 38 38 70 70 70 154 122 46
46056-190 142 34 200 144 11 197 138 11 197 138 11
46057-213 154 11 226 170 11 242 186 14 246 190 14
46058-246 190 14 246 190 14 246 190 14 246 190 14
46059-225 175 15 46 32 6 2 2 6 22 22 22
46060-158 158 158 250 250 250 253 253 253 253 253 253
46061-253 253 253 253 253 253 253 253 253 253 253 253
46062-253 253 253 253 253 253 253 253 253 253 253 253
46063-253 253 253 253 253 253 253 253 253 253 253 253
46064-253 253 253 253 253 253 253 253 253 253 253 253
46065-253 253 253 250 250 250 242 242 242 224 178 62
46066-239 182 13 236 186 11 213 154 11 46 32 6
46067- 2 2 6 2 2 6 2 2 6 2 2 6
46068- 2 2 6 2 2 6 61 42 6 225 175 15
46069-238 190 10 236 186 11 112 100 78 42 42 42
46070- 14 14 14 0 0 0 0 0 0 0 0 0
46071- 0 0 0 0 0 0 0 0 0 0 0 0
46072- 0 0 0 0 0 0 0 0 0 0 0 0
46073- 0 0 0 0 0 0 0 0 0 0 0 0
46074- 0 0 0 0 0 0 0 0 0 6 6 6
46075- 22 22 22 54 54 54 154 122 46 213 154 11
46076-226 170 11 230 174 11 226 170 11 226 170 11
46077-236 178 12 242 186 14 246 190 14 246 190 14
46078-246 190 14 246 190 14 246 190 14 246 190 14
46079-241 196 14 184 144 12 10 10 10 2 2 6
46080- 6 6 6 116 116 116 242 242 242 253 253 253
46081-253 253 253 253 253 253 253 253 253 253 253 253
46082-253 253 253 253 253 253 253 253 253 253 253 253
46083-253 253 253 253 253 253 253 253 253 253 253 253
46084-253 253 253 253 253 253 253 253 253 253 253 253
46085-253 253 253 231 231 231 198 198 198 214 170 54
46086-236 178 12 236 178 12 210 150 10 137 92 6
46087- 18 14 6 2 2 6 2 2 6 2 2 6
46088- 6 6 6 70 47 6 200 144 11 236 178 12
46089-239 182 13 239 182 13 124 112 88 58 58 58
46090- 22 22 22 6 6 6 0 0 0 0 0 0
46091- 0 0 0 0 0 0 0 0 0 0 0 0
46092- 0 0 0 0 0 0 0 0 0 0 0 0
46093- 0 0 0 0 0 0 0 0 0 0 0 0
46094- 0 0 0 0 0 0 0 0 0 10 10 10
46095- 30 30 30 70 70 70 180 133 36 226 170 11
46096-239 182 13 242 186 14 242 186 14 246 186 14
46097-246 190 14 246 190 14 246 190 14 246 190 14
46098-246 190 14 246 190 14 246 190 14 246 190 14
46099-246 190 14 232 195 16 98 70 6 2 2 6
46100- 2 2 6 2 2 6 66 66 66 221 221 221
46101-253 253 253 253 253 253 253 253 253 253 253 253
46102-253 253 253 253 253 253 253 253 253 253 253 253
46103-253 253 253 253 253 253 253 253 253 253 253 253
46104-253 253 253 253 253 253 253 253 253 253 253 253
46105-253 253 253 206 206 206 198 198 198 214 166 58
46106-230 174 11 230 174 11 216 158 10 192 133 9
46107-163 110 8 116 81 8 102 78 10 116 81 8
46108-167 114 7 197 138 11 226 170 11 239 182 13
46109-242 186 14 242 186 14 162 146 94 78 78 78
46110- 34 34 34 14 14 14 6 6 6 0 0 0
46111- 0 0 0 0 0 0 0 0 0 0 0 0
46112- 0 0 0 0 0 0 0 0 0 0 0 0
46113- 0 0 0 0 0 0 0 0 0 0 0 0
46114- 0 0 0 0 0 0 0 0 0 6 6 6
46115- 30 30 30 78 78 78 190 142 34 226 170 11
46116-239 182 13 246 190 14 246 190 14 246 190 14
46117-246 190 14 246 190 14 246 190 14 246 190 14
46118-246 190 14 246 190 14 246 190 14 246 190 14
46119-246 190 14 241 196 14 203 166 17 22 18 6
46120- 2 2 6 2 2 6 2 2 6 38 38 38
46121-218 218 218 253 253 253 253 253 253 253 253 253
46122-253 253 253 253 253 253 253 253 253 253 253 253
46123-253 253 253 253 253 253 253 253 253 253 253 253
46124-253 253 253 253 253 253 253 253 253 253 253 253
46125-250 250 250 206 206 206 198 198 198 202 162 69
46126-226 170 11 236 178 12 224 166 10 210 150 10
46127-200 144 11 197 138 11 192 133 9 197 138 11
46128-210 150 10 226 170 11 242 186 14 246 190 14
46129-246 190 14 246 186 14 225 175 15 124 112 88
46130- 62 62 62 30 30 30 14 14 14 6 6 6
46131- 0 0 0 0 0 0 0 0 0 0 0 0
46132- 0 0 0 0 0 0 0 0 0 0 0 0
46133- 0 0 0 0 0 0 0 0 0 0 0 0
46134- 0 0 0 0 0 0 0 0 0 10 10 10
46135- 30 30 30 78 78 78 174 135 50 224 166 10
46136-239 182 13 246 190 14 246 190 14 246 190 14
46137-246 190 14 246 190 14 246 190 14 246 190 14
46138-246 190 14 246 190 14 246 190 14 246 190 14
46139-246 190 14 246 190 14 241 196 14 139 102 15
46140- 2 2 6 2 2 6 2 2 6 2 2 6
46141- 78 78 78 250 250 250 253 253 253 253 253 253
46142-253 253 253 253 253 253 253 253 253 253 253 253
46143-253 253 253 253 253 253 253 253 253 253 253 253
46144-253 253 253 253 253 253 253 253 253 253 253 253
46145-250 250 250 214 214 214 198 198 198 190 150 46
46146-219 162 10 236 178 12 234 174 13 224 166 10
46147-216 158 10 213 154 11 213 154 11 216 158 10
46148-226 170 11 239 182 13 246 190 14 246 190 14
46149-246 190 14 246 190 14 242 186 14 206 162 42
46150-101 101 101 58 58 58 30 30 30 14 14 14
46151- 6 6 6 0 0 0 0 0 0 0 0 0
46152- 0 0 0 0 0 0 0 0 0 0 0 0
46153- 0 0 0 0 0 0 0 0 0 0 0 0
46154- 0 0 0 0 0 0 0 0 0 10 10 10
46155- 30 30 30 74 74 74 174 135 50 216 158 10
46156-236 178 12 246 190 14 246 190 14 246 190 14
46157-246 190 14 246 190 14 246 190 14 246 190 14
46158-246 190 14 246 190 14 246 190 14 246 190 14
46159-246 190 14 246 190 14 241 196 14 226 184 13
46160- 61 42 6 2 2 6 2 2 6 2 2 6
46161- 22 22 22 238 238 238 253 253 253 253 253 253
46162-253 253 253 253 253 253 253 253 253 253 253 253
46163-253 253 253 253 253 253 253 253 253 253 253 253
46164-253 253 253 253 253 253 253 253 253 253 253 253
46165-253 253 253 226 226 226 187 187 187 180 133 36
46166-216 158 10 236 178 12 239 182 13 236 178 12
46167-230 174 11 226 170 11 226 170 11 230 174 11
46168-236 178 12 242 186 14 246 190 14 246 190 14
46169-246 190 14 246 190 14 246 186 14 239 182 13
46170-206 162 42 106 106 106 66 66 66 34 34 34
46171- 14 14 14 6 6 6 0 0 0 0 0 0
46172- 0 0 0 0 0 0 0 0 0 0 0 0
46173- 0 0 0 0 0 0 0 0 0 0 0 0
46174- 0 0 0 0 0 0 0 0 0 6 6 6
46175- 26 26 26 70 70 70 163 133 67 213 154 11
46176-236 178 12 246 190 14 246 190 14 246 190 14
46177-246 190 14 246 190 14 246 190 14 246 190 14
46178-246 190 14 246 190 14 246 190 14 246 190 14
46179-246 190 14 246 190 14 246 190 14 241 196 14
46180-190 146 13 18 14 6 2 2 6 2 2 6
46181- 46 46 46 246 246 246 253 253 253 253 253 253
46182-253 253 253 253 253 253 253 253 253 253 253 253
46183-253 253 253 253 253 253 253 253 253 253 253 253
46184-253 253 253 253 253 253 253 253 253 253 253 253
46185-253 253 253 221 221 221 86 86 86 156 107 11
46186-216 158 10 236 178 12 242 186 14 246 186 14
46187-242 186 14 239 182 13 239 182 13 242 186 14
46188-242 186 14 246 186 14 246 190 14 246 190 14
46189-246 190 14 246 190 14 246 190 14 246 190 14
46190-242 186 14 225 175 15 142 122 72 66 66 66
46191- 30 30 30 10 10 10 0 0 0 0 0 0
46192- 0 0 0 0 0 0 0 0 0 0 0 0
46193- 0 0 0 0 0 0 0 0 0 0 0 0
46194- 0 0 0 0 0 0 0 0 0 6 6 6
46195- 26 26 26 70 70 70 163 133 67 210 150 10
46196-236 178 12 246 190 14 246 190 14 246 190 14
46197-246 190 14 246 190 14 246 190 14 246 190 14
46198-246 190 14 246 190 14 246 190 14 246 190 14
46199-246 190 14 246 190 14 246 190 14 246 190 14
46200-232 195 16 121 92 8 34 34 34 106 106 106
46201-221 221 221 253 253 253 253 253 253 253 253 253
46202-253 253 253 253 253 253 253 253 253 253 253 253
46203-253 253 253 253 253 253 253 253 253 253 253 253
46204-253 253 253 253 253 253 253 253 253 253 253 253
46205-242 242 242 82 82 82 18 14 6 163 110 8
46206-216 158 10 236 178 12 242 186 14 246 190 14
46207-246 190 14 246 190 14 246 190 14 246 190 14
46208-246 190 14 246 190 14 246 190 14 246 190 14
46209-246 190 14 246 190 14 246 190 14 246 190 14
46210-246 190 14 246 190 14 242 186 14 163 133 67
46211- 46 46 46 18 18 18 6 6 6 0 0 0
46212- 0 0 0 0 0 0 0 0 0 0 0 0
46213- 0 0 0 0 0 0 0 0 0 0 0 0
46214- 0 0 0 0 0 0 0 0 0 10 10 10
46215- 30 30 30 78 78 78 163 133 67 210 150 10
46216-236 178 12 246 186 14 246 190 14 246 190 14
46217-246 190 14 246 190 14 246 190 14 246 190 14
46218-246 190 14 246 190 14 246 190 14 246 190 14
46219-246 190 14 246 190 14 246 190 14 246 190 14
46220-241 196 14 215 174 15 190 178 144 253 253 253
46221-253 253 253 253 253 253 253 253 253 253 253 253
46222-253 253 253 253 253 253 253 253 253 253 253 253
46223-253 253 253 253 253 253 253 253 253 253 253 253
46224-253 253 253 253 253 253 253 253 253 218 218 218
46225- 58 58 58 2 2 6 22 18 6 167 114 7
46226-216 158 10 236 178 12 246 186 14 246 190 14
46227-246 190 14 246 190 14 246 190 14 246 190 14
46228-246 190 14 246 190 14 246 190 14 246 190 14
46229-246 190 14 246 190 14 246 190 14 246 190 14
46230-246 190 14 246 186 14 242 186 14 190 150 46
46231- 54 54 54 22 22 22 6 6 6 0 0 0
46232- 0 0 0 0 0 0 0 0 0 0 0 0
46233- 0 0 0 0 0 0 0 0 0 0 0 0
46234- 0 0 0 0 0 0 0 0 0 14 14 14
46235- 38 38 38 86 86 86 180 133 36 213 154 11
46236-236 178 12 246 186 14 246 190 14 246 190 14
46237-246 190 14 246 190 14 246 190 14 246 190 14
46238-246 190 14 246 190 14 246 190 14 246 190 14
46239-246 190 14 246 190 14 246 190 14 246 190 14
46240-246 190 14 232 195 16 190 146 13 214 214 214
46241-253 253 253 253 253 253 253 253 253 253 253 253
46242-253 253 253 253 253 253 253 253 253 253 253 253
46243-253 253 253 253 253 253 253 253 253 253 253 253
46244-253 253 253 250 250 250 170 170 170 26 26 26
46245- 2 2 6 2 2 6 37 26 9 163 110 8
46246-219 162 10 239 182 13 246 186 14 246 190 14
46247-246 190 14 246 190 14 246 190 14 246 190 14
46248-246 190 14 246 190 14 246 190 14 246 190 14
46249-246 190 14 246 190 14 246 190 14 246 190 14
46250-246 186 14 236 178 12 224 166 10 142 122 72
46251- 46 46 46 18 18 18 6 6 6 0 0 0
46252- 0 0 0 0 0 0 0 0 0 0 0 0
46253- 0 0 0 0 0 0 0 0 0 0 0 0
46254- 0 0 0 0 0 0 6 6 6 18 18 18
46255- 50 50 50 109 106 95 192 133 9 224 166 10
46256-242 186 14 246 190 14 246 190 14 246 190 14
46257-246 190 14 246 190 14 246 190 14 246 190 14
46258-246 190 14 246 190 14 246 190 14 246 190 14
46259-246 190 14 246 190 14 246 190 14 246 190 14
46260-242 186 14 226 184 13 210 162 10 142 110 46
46261-226 226 226 253 253 253 253 253 253 253 253 253
46262-253 253 253 253 253 253 253 253 253 253 253 253
46263-253 253 253 253 253 253 253 253 253 253 253 253
46264-198 198 198 66 66 66 2 2 6 2 2 6
46265- 2 2 6 2 2 6 50 34 6 156 107 11
46266-219 162 10 239 182 13 246 186 14 246 190 14
46267-246 190 14 246 190 14 246 190 14 246 190 14
46268-246 190 14 246 190 14 246 190 14 246 190 14
46269-246 190 14 246 190 14 246 190 14 242 186 14
46270-234 174 13 213 154 11 154 122 46 66 66 66
46271- 30 30 30 10 10 10 0 0 0 0 0 0
46272- 0 0 0 0 0 0 0 0 0 0 0 0
46273- 0 0 0 0 0 0 0 0 0 0 0 0
46274- 0 0 0 0 0 0 6 6 6 22 22 22
46275- 58 58 58 154 121 60 206 145 10 234 174 13
46276-242 186 14 246 186 14 246 190 14 246 190 14
46277-246 190 14 246 190 14 246 190 14 246 190 14
46278-246 190 14 246 190 14 246 190 14 246 190 14
46279-246 190 14 246 190 14 246 190 14 246 190 14
46280-246 186 14 236 178 12 210 162 10 163 110 8
46281- 61 42 6 138 138 138 218 218 218 250 250 250
46282-253 253 253 253 253 253 253 253 253 250 250 250
46283-242 242 242 210 210 210 144 144 144 66 66 66
46284- 6 6 6 2 2 6 2 2 6 2 2 6
46285- 2 2 6 2 2 6 61 42 6 163 110 8
46286-216 158 10 236 178 12 246 190 14 246 190 14
46287-246 190 14 246 190 14 246 190 14 246 190 14
46288-246 190 14 246 190 14 246 190 14 246 190 14
46289-246 190 14 239 182 13 230 174 11 216 158 10
46290-190 142 34 124 112 88 70 70 70 38 38 38
46291- 18 18 18 6 6 6 0 0 0 0 0 0
46292- 0 0 0 0 0 0 0 0 0 0 0 0
46293- 0 0 0 0 0 0 0 0 0 0 0 0
46294- 0 0 0 0 0 0 6 6 6 22 22 22
46295- 62 62 62 168 124 44 206 145 10 224 166 10
46296-236 178 12 239 182 13 242 186 14 242 186 14
46297-246 186 14 246 190 14 246 190 14 246 190 14
46298-246 190 14 246 190 14 246 190 14 246 190 14
46299-246 190 14 246 190 14 246 190 14 246 190 14
46300-246 190 14 236 178 12 216 158 10 175 118 6
46301- 80 54 7 2 2 6 6 6 6 30 30 30
46302- 54 54 54 62 62 62 50 50 50 38 38 38
46303- 14 14 14 2 2 6 2 2 6 2 2 6
46304- 2 2 6 2 2 6 2 2 6 2 2 6
46305- 2 2 6 6 6 6 80 54 7 167 114 7
46306-213 154 11 236 178 12 246 190 14 246 190 14
46307-246 190 14 246 190 14 246 190 14 246 190 14
46308-246 190 14 242 186 14 239 182 13 239 182 13
46309-230 174 11 210 150 10 174 135 50 124 112 88
46310- 82 82 82 54 54 54 34 34 34 18 18 18
46311- 6 6 6 0 0 0 0 0 0 0 0 0
46312- 0 0 0 0 0 0 0 0 0 0 0 0
46313- 0 0 0 0 0 0 0 0 0 0 0 0
46314- 0 0 0 0 0 0 6 6 6 18 18 18
46315- 50 50 50 158 118 36 192 133 9 200 144 11
46316-216 158 10 219 162 10 224 166 10 226 170 11
46317-230 174 11 236 178 12 239 182 13 239 182 13
46318-242 186 14 246 186 14 246 190 14 246 190 14
46319-246 190 14 246 190 14 246 190 14 246 190 14
46320-246 186 14 230 174 11 210 150 10 163 110 8
46321-104 69 6 10 10 10 2 2 6 2 2 6
46322- 2 2 6 2 2 6 2 2 6 2 2 6
46323- 2 2 6 2 2 6 2 2 6 2 2 6
46324- 2 2 6 2 2 6 2 2 6 2 2 6
46325- 2 2 6 6 6 6 91 60 6 167 114 7
46326-206 145 10 230 174 11 242 186 14 246 190 14
46327-246 190 14 246 190 14 246 186 14 242 186 14
46328-239 182 13 230 174 11 224 166 10 213 154 11
46329-180 133 36 124 112 88 86 86 86 58 58 58
46330- 38 38 38 22 22 22 10 10 10 6 6 6
46331- 0 0 0 0 0 0 0 0 0 0 0 0
46332- 0 0 0 0 0 0 0 0 0 0 0 0
46333- 0 0 0 0 0 0 0 0 0 0 0 0
46334- 0 0 0 0 0 0 0 0 0 14 14 14
46335- 34 34 34 70 70 70 138 110 50 158 118 36
46336-167 114 7 180 123 7 192 133 9 197 138 11
46337-200 144 11 206 145 10 213 154 11 219 162 10
46338-224 166 10 230 174 11 239 182 13 242 186 14
46339-246 186 14 246 186 14 246 186 14 246 186 14
46340-239 182 13 216 158 10 185 133 11 152 99 6
46341-104 69 6 18 14 6 2 2 6 2 2 6
46342- 2 2 6 2 2 6 2 2 6 2 2 6
46343- 2 2 6 2 2 6 2 2 6 2 2 6
46344- 2 2 6 2 2 6 2 2 6 2 2 6
46345- 2 2 6 6 6 6 80 54 7 152 99 6
46346-192 133 9 219 162 10 236 178 12 239 182 13
46347-246 186 14 242 186 14 239 182 13 236 178 12
46348-224 166 10 206 145 10 192 133 9 154 121 60
46349- 94 94 94 62 62 62 42 42 42 22 22 22
46350- 14 14 14 6 6 6 0 0 0 0 0 0
46351- 0 0 0 0 0 0 0 0 0 0 0 0
46352- 0 0 0 0 0 0 0 0 0 0 0 0
46353- 0 0 0 0 0 0 0 0 0 0 0 0
46354- 0 0 0 0 0 0 0 0 0 6 6 6
46355- 18 18 18 34 34 34 58 58 58 78 78 78
46356-101 98 89 124 112 88 142 110 46 156 107 11
46357-163 110 8 167 114 7 175 118 6 180 123 7
46358-185 133 11 197 138 11 210 150 10 219 162 10
46359-226 170 11 236 178 12 236 178 12 234 174 13
46360-219 162 10 197 138 11 163 110 8 130 83 6
46361- 91 60 6 10 10 10 2 2 6 2 2 6
46362- 18 18 18 38 38 38 38 38 38 38 38 38
46363- 38 38 38 38 38 38 38 38 38 38 38 38
46364- 38 38 38 38 38 38 26 26 26 2 2 6
46365- 2 2 6 6 6 6 70 47 6 137 92 6
46366-175 118 6 200 144 11 219 162 10 230 174 11
46367-234 174 13 230 174 11 219 162 10 210 150 10
46368-192 133 9 163 110 8 124 112 88 82 82 82
46369- 50 50 50 30 30 30 14 14 14 6 6 6
46370- 0 0 0 0 0 0 0 0 0 0 0 0
46371- 0 0 0 0 0 0 0 0 0 0 0 0
46372- 0 0 0 0 0 0 0 0 0 0 0 0
46373- 0 0 0 0 0 0 0 0 0 0 0 0
46374- 0 0 0 0 0 0 0 0 0 0 0 0
46375- 6 6 6 14 14 14 22 22 22 34 34 34
46376- 42 42 42 58 58 58 74 74 74 86 86 86
46377-101 98 89 122 102 70 130 98 46 121 87 25
46378-137 92 6 152 99 6 163 110 8 180 123 7
46379-185 133 11 197 138 11 206 145 10 200 144 11
46380-180 123 7 156 107 11 130 83 6 104 69 6
46381- 50 34 6 54 54 54 110 110 110 101 98 89
46382- 86 86 86 82 82 82 78 78 78 78 78 78
46383- 78 78 78 78 78 78 78 78 78 78 78 78
46384- 78 78 78 82 82 82 86 86 86 94 94 94
46385-106 106 106 101 101 101 86 66 34 124 80 6
46386-156 107 11 180 123 7 192 133 9 200 144 11
46387-206 145 10 200 144 11 192 133 9 175 118 6
46388-139 102 15 109 106 95 70 70 70 42 42 42
46389- 22 22 22 10 10 10 0 0 0 0 0 0
46390- 0 0 0 0 0 0 0 0 0 0 0 0
46391- 0 0 0 0 0 0 0 0 0 0 0 0
46392- 0 0 0 0 0 0 0 0 0 0 0 0
46393- 0 0 0 0 0 0 0 0 0 0 0 0
46394- 0 0 0 0 0 0 0 0 0 0 0 0
46395- 0 0 0 0 0 0 6 6 6 10 10 10
46396- 14 14 14 22 22 22 30 30 30 38 38 38
46397- 50 50 50 62 62 62 74 74 74 90 90 90
46398-101 98 89 112 100 78 121 87 25 124 80 6
46399-137 92 6 152 99 6 152 99 6 152 99 6
46400-138 86 6 124 80 6 98 70 6 86 66 30
46401-101 98 89 82 82 82 58 58 58 46 46 46
46402- 38 38 38 34 34 34 34 34 34 34 34 34
46403- 34 34 34 34 34 34 34 34 34 34 34 34
46404- 34 34 34 34 34 34 38 38 38 42 42 42
46405- 54 54 54 82 82 82 94 86 76 91 60 6
46406-134 86 6 156 107 11 167 114 7 175 118 6
46407-175 118 6 167 114 7 152 99 6 121 87 25
46408-101 98 89 62 62 62 34 34 34 18 18 18
46409- 6 6 6 0 0 0 0 0 0 0 0 0
46410- 0 0 0 0 0 0 0 0 0 0 0 0
46411- 0 0 0 0 0 0 0 0 0 0 0 0
46412- 0 0 0 0 0 0 0 0 0 0 0 0
46413- 0 0 0 0 0 0 0 0 0 0 0 0
46414- 0 0 0 0 0 0 0 0 0 0 0 0
46415- 0 0 0 0 0 0 0 0 0 0 0 0
46416- 0 0 0 6 6 6 6 6 6 10 10 10
46417- 18 18 18 22 22 22 30 30 30 42 42 42
46418- 50 50 50 66 66 66 86 86 86 101 98 89
46419-106 86 58 98 70 6 104 69 6 104 69 6
46420-104 69 6 91 60 6 82 62 34 90 90 90
46421- 62 62 62 38 38 38 22 22 22 14 14 14
46422- 10 10 10 10 10 10 10 10 10 10 10 10
46423- 10 10 10 10 10 10 6 6 6 10 10 10
46424- 10 10 10 10 10 10 10 10 10 14 14 14
46425- 22 22 22 42 42 42 70 70 70 89 81 66
46426- 80 54 7 104 69 6 124 80 6 137 92 6
46427-134 86 6 116 81 8 100 82 52 86 86 86
46428- 58 58 58 30 30 30 14 14 14 6 6 6
46429- 0 0 0 0 0 0 0 0 0 0 0 0
46430- 0 0 0 0 0 0 0 0 0 0 0 0
46431- 0 0 0 0 0 0 0 0 0 0 0 0
46432- 0 0 0 0 0 0 0 0 0 0 0 0
46433- 0 0 0 0 0 0 0 0 0 0 0 0
46434- 0 0 0 0 0 0 0 0 0 0 0 0
46435- 0 0 0 0 0 0 0 0 0 0 0 0
46436- 0 0 0 0 0 0 0 0 0 0 0 0
46437- 0 0 0 6 6 6 10 10 10 14 14 14
46438- 18 18 18 26 26 26 38 38 38 54 54 54
46439- 70 70 70 86 86 86 94 86 76 89 81 66
46440- 89 81 66 86 86 86 74 74 74 50 50 50
46441- 30 30 30 14 14 14 6 6 6 0 0 0
46442- 0 0 0 0 0 0 0 0 0 0 0 0
46443- 0 0 0 0 0 0 0 0 0 0 0 0
46444- 0 0 0 0 0 0 0 0 0 0 0 0
46445- 6 6 6 18 18 18 34 34 34 58 58 58
46446- 82 82 82 89 81 66 89 81 66 89 81 66
46447- 94 86 66 94 86 76 74 74 74 50 50 50
46448- 26 26 26 14 14 14 6 6 6 0 0 0
46449- 0 0 0 0 0 0 0 0 0 0 0 0
46450- 0 0 0 0 0 0 0 0 0 0 0 0
46451- 0 0 0 0 0 0 0 0 0 0 0 0
46452- 0 0 0 0 0 0 0 0 0 0 0 0
46453- 0 0 0 0 0 0 0 0 0 0 0 0
46454- 0 0 0 0 0 0 0 0 0 0 0 0
46455- 0 0 0 0 0 0 0 0 0 0 0 0
46456- 0 0 0 0 0 0 0 0 0 0 0 0
46457- 0 0 0 0 0 0 0 0 0 0 0 0
46458- 6 6 6 6 6 6 14 14 14 18 18 18
46459- 30 30 30 38 38 38 46 46 46 54 54 54
46460- 50 50 50 42 42 42 30 30 30 18 18 18
46461- 10 10 10 0 0 0 0 0 0 0 0 0
46462- 0 0 0 0 0 0 0 0 0 0 0 0
46463- 0 0 0 0 0 0 0 0 0 0 0 0
46464- 0 0 0 0 0 0 0 0 0 0 0 0
46465- 0 0 0 6 6 6 14 14 14 26 26 26
46466- 38 38 38 50 50 50 58 58 58 58 58 58
46467- 54 54 54 42 42 42 30 30 30 18 18 18
46468- 10 10 10 0 0 0 0 0 0 0 0 0
46469- 0 0 0 0 0 0 0 0 0 0 0 0
46470- 0 0 0 0 0 0 0 0 0 0 0 0
46471- 0 0 0 0 0 0 0 0 0 0 0 0
46472- 0 0 0 0 0 0 0 0 0 0 0 0
46473- 0 0 0 0 0 0 0 0 0 0 0 0
46474- 0 0 0 0 0 0 0 0 0 0 0 0
46475- 0 0 0 0 0 0 0 0 0 0 0 0
46476- 0 0 0 0 0 0 0 0 0 0 0 0
46477- 0 0 0 0 0 0 0 0 0 0 0 0
46478- 0 0 0 0 0 0 0 0 0 6 6 6
46479- 6 6 6 10 10 10 14 14 14 18 18 18
46480- 18 18 18 14 14 14 10 10 10 6 6 6
46481- 0 0 0 0 0 0 0 0 0 0 0 0
46482- 0 0 0 0 0 0 0 0 0 0 0 0
46483- 0 0 0 0 0 0 0 0 0 0 0 0
46484- 0 0 0 0 0 0 0 0 0 0 0 0
46485- 0 0 0 0 0 0 0 0 0 6 6 6
46486- 14 14 14 18 18 18 22 22 22 22 22 22
46487- 18 18 18 14 14 14 10 10 10 6 6 6
46488- 0 0 0 0 0 0 0 0 0 0 0 0
46489- 0 0 0 0 0 0 0 0 0 0 0 0
46490- 0 0 0 0 0 0 0 0 0 0 0 0
46491- 0 0 0 0 0 0 0 0 0 0 0 0
46492- 0 0 0 0 0 0 0 0 0 0 0 0
46493+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46494+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46495+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46496+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46497+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46498+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46499+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46500+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46501+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46502+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46503+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46504+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46505+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46506+4 4 4 4 4 4
46507+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46508+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46509+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46510+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46511+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46512+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46513+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46514+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46515+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46516+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46517+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46518+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46519+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46520+4 4 4 4 4 4
46521+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46522+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46523+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46524+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46525+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46526+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46527+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46528+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46529+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46530+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46531+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46532+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46533+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46534+4 4 4 4 4 4
46535+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46536+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46537+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46538+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46539+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46540+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46541+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46542+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46543+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46544+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46545+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46546+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46547+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46548+4 4 4 4 4 4
46549+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46550+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46551+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46552+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46553+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46554+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46555+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46556+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46557+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46558+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46559+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46560+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46561+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46562+4 4 4 4 4 4
46563+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46564+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46565+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46566+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46567+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46568+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46569+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46570+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46571+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46572+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46573+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46574+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46575+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46576+4 4 4 4 4 4
46577+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46578+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46579+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46580+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46581+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
46582+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
46583+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46584+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46585+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46586+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
46587+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
46588+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
46589+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46590+4 4 4 4 4 4
46591+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46592+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46593+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46594+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46595+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
46596+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
46597+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46598+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46599+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46600+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
46601+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
46602+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
46603+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46604+4 4 4 4 4 4
46605+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46606+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46607+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46608+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46609+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
46610+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
46611+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
46612+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46613+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46614+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
46615+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
46616+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
46617+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
46618+4 4 4 4 4 4
46619+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46620+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46621+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46622+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
46623+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
46624+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
46625+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
46626+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46627+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
46628+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
46629+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
46630+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
46631+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
46632+4 4 4 4 4 4
46633+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46634+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46635+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46636+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
46637+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
46638+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
46639+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
46640+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
46641+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
46642+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
46643+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
46644+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
46645+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
46646+4 4 4 4 4 4
46647+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46648+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46649+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
46650+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
46651+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
46652+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
46653+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
46654+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
46655+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
46656+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
46657+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
46658+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
46659+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
46660+4 4 4 4 4 4
46661+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46662+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46663+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
46664+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
46665+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
46666+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
46667+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
46668+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
46669+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
46670+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
46671+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
46672+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
46673+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
46674+4 4 4 4 4 4
46675+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46676+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46677+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
46678+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
46679+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
46680+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
46681+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
46682+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
46683+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
46684+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
46685+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
46686+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
46687+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
46688+4 4 4 4 4 4
46689+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46690+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46691+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
46692+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
46693+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
46694+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
46695+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
46696+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
46697+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
46698+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
46699+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
46700+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
46701+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
46702+4 4 4 4 4 4
46703+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46704+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46705+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
46706+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
46707+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
46708+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
46709+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
46710+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
46711+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
46712+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
46713+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
46714+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
46715+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
46716+4 4 4 4 4 4
46717+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46718+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
46719+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
46720+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
46721+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
46722+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
46723+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
46724+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
46725+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
46726+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
46727+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
46728+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
46729+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
46730+4 4 4 4 4 4
46731+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46732+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
46733+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
46734+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
46735+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
46736+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
46737+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
46738+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
46739+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
46740+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
46741+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
46742+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
46743+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
46744+0 0 0 4 4 4
46745+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
46746+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
46747+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
46748+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
46749+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
46750+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
46751+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
46752+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
46753+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
46754+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
46755+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
46756+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
46757+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
46758+2 0 0 0 0 0
46759+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
46760+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
46761+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
46762+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
46763+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
46764+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
46765+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
46766+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
46767+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
46768+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
46769+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
46770+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
46771+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
46772+37 38 37 0 0 0
46773+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
46774+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
46775+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
46776+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
46777+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
46778+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
46779+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
46780+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
46781+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
46782+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
46783+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
46784+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
46785+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
46786+85 115 134 4 0 0
46787+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
46788+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
46789+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
46790+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
46791+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
46792+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
46793+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
46794+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
46795+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
46796+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
46797+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
46798+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
46799+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
46800+60 73 81 4 0 0
46801+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
46802+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
46803+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
46804+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
46805+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
46806+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
46807+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
46808+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
46809+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
46810+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
46811+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
46812+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
46813+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
46814+16 19 21 4 0 0
46815+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
46816+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
46817+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
46818+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
46819+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
46820+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
46821+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
46822+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
46823+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
46824+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
46825+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
46826+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
46827+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
46828+4 0 0 4 3 3
46829+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
46830+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
46831+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
46832+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
46833+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
46834+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
46835+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
46836+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
46837+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
46838+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
46839+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
46840+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
46841+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
46842+3 2 2 4 4 4
46843+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
46844+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
46845+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
46846+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
46847+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
46848+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
46849+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
46850+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
46851+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
46852+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
46853+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
46854+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
46855+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
46856+4 4 4 4 4 4
46857+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
46858+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
46859+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
46860+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
46861+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
46862+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
46863+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
46864+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
46865+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
46866+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
46867+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
46868+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
46869+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
46870+4 4 4 4 4 4
46871+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
46872+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
46873+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
46874+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
46875+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
46876+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
46877+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
46878+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
46879+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
46880+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
46881+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
46882+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
46883+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
46884+5 5 5 5 5 5
46885+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
46886+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
46887+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
46888+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
46889+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
46890+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
46891+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
46892+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
46893+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
46894+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
46895+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
46896+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
46897+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
46898+5 5 5 4 4 4
46899+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
46900+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
46901+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
46902+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
46903+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
46904+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
46905+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
46906+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
46907+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
46908+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
46909+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
46910+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
46911+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46912+4 4 4 4 4 4
46913+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
46914+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
46915+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
46916+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
46917+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
46918+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
46919+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
46920+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
46921+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
46922+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
46923+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
46924+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
46925+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46926+4 4 4 4 4 4
46927+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
46928+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
46929+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
46930+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
46931+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
46932+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
46933+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
46934+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
46935+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
46936+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
46937+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
46938+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46939+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46940+4 4 4 4 4 4
46941+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
46942+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
46943+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
46944+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
46945+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
46946+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
46947+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
46948+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
46949+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
46950+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
46951+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
46952+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46953+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46954+4 4 4 4 4 4
46955+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
46956+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
46957+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
46958+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
46959+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
46960+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
46961+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
46962+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
46963+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
46964+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
46965+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46966+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46967+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46968+4 4 4 4 4 4
46969+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
46970+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
46971+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
46972+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
46973+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
46974+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
46975+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
46976+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
46977+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
46978+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
46979+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
46980+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46981+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46982+4 4 4 4 4 4
46983+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
46984+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
46985+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
46986+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
46987+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
46988+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
46989+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
46990+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
46991+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
46992+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
46993+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
46994+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46995+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46996+4 4 4 4 4 4
46997+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
46998+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
46999+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
47000+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
47001+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
47002+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
47003+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
47004+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
47005+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
47006+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
47007+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47008+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47009+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47010+4 4 4 4 4 4
47011+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
47012+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
47013+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
47014+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
47015+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
47016+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
47017+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
47018+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
47019+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
47020+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
47021+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47022+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47023+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47024+4 4 4 4 4 4
47025+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
47026+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
47027+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
47028+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
47029+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
47030+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
47031+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
47032+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
47033+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
47034+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
47035+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47036+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47037+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47038+4 4 4 4 4 4
47039+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
47040+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
47041+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
47042+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
47043+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
47044+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
47045+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
47046+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
47047+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
47048+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47049+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47050+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47051+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47052+4 4 4 4 4 4
47053+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
47054+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
47055+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
47056+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
47057+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
47058+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
47059+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
47060+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
47061+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
47062+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47063+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47064+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47065+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47066+4 4 4 4 4 4
47067+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
47068+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
47069+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
47070+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
47071+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
47072+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
47073+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
47074+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
47075+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
47076+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47077+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47078+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47079+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47080+4 4 4 4 4 4
47081+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
47082+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
47083+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
47084+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
47085+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
47086+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
47087+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
47088+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
47089+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
47090+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47091+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47092+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47093+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47094+4 4 4 4 4 4
47095+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
47096+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
47097+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
47098+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
47099+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
47100+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
47101+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
47102+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
47103+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
47104+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47105+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47106+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47107+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47108+4 4 4 4 4 4
47109+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
47110+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
47111+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
47112+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
47113+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
47114+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
47115+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
47116+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
47117+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
47118+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47119+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47120+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47121+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47122+4 4 4 4 4 4
47123+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
47124+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
47125+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
47126+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
47127+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
47128+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
47129+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
47130+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
47131+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
47132+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47133+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47134+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47135+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47136+4 4 4 4 4 4
47137+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
47138+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
47139+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
47140+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
47141+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
47142+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
47143+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
47144+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
47145+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
47146+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47147+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47148+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47149+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47150+4 4 4 4 4 4
47151+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
47152+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
47153+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
47154+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
47155+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
47156+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
47157+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
47158+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
47159+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
47160+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47161+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47162+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47163+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47164+4 4 4 4 4 4
47165+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
47166+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
47167+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
47168+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
47169+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
47170+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
47171+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
47172+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
47173+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
47174+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47175+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47176+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47177+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47178+4 4 4 4 4 4
47179+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
47180+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
47181+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
47182+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
47183+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
47184+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
47185+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
47186+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
47187+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
47188+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47189+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47190+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47191+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47192+4 4 4 4 4 4
47193+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
47194+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
47195+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
47196+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
47197+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
47198+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
47199+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
47200+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
47201+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
47202+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47203+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47204+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47205+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47206+4 4 4 4 4 4
47207+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
47208+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
47209+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
47210+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
47211+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
47212+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
47213+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
47214+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
47215+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
47216+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47217+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47218+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47219+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47220+4 4 4 4 4 4
47221+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
47222+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
47223+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
47224+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
47225+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
47226+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
47227+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
47228+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
47229+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
47230+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47231+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47232+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47233+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47234+4 4 4 4 4 4
47235+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
47236+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
47237+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
47238+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
47239+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
47240+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
47241+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
47242+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
47243+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
47244+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47245+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47246+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47248+4 4 4 4 4 4
47249+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
47250+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
47251+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
47252+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
47253+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
47254+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
47255+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
47256+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
47257+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
47258+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
47259+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47260+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47262+4 4 4 4 4 4
47263+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
47264+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
47265+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
47266+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
47267+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
47268+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
47269+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
47270+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
47271+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
47272+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
47273+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47274+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47276+4 4 4 4 4 4
47277+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
47278+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
47279+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
47280+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
47281+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
47282+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
47283+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47284+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
47285+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
47286+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
47287+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
47288+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47289+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47290+4 4 4 4 4 4
47291+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
47292+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
47293+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
47294+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
47295+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
47296+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
47297+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
47298+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
47299+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
47300+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
47301+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47302+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47303+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47304+4 4 4 4 4 4
47305+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
47306+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
47307+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
47308+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
47309+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
47310+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
47311+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
47312+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
47313+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
47314+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
47315+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47316+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47317+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47318+4 4 4 4 4 4
47319+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
47320+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
47321+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
47322+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
47323+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
47324+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
47325+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
47326+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
47327+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
47328+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
47329+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47330+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47331+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47332+4 4 4 4 4 4
47333+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
47334+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
47335+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
47336+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
47337+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
47338+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
47339+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
47340+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
47341+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
47342+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
47343+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47344+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47345+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47346+4 4 4 4 4 4
47347+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
47348+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
47349+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
47350+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
47351+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
47352+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
47353+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
47354+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
47355+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
47356+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
47357+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47358+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47359+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47360+4 4 4 4 4 4
47361+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
47362+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
47363+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
47364+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
47365+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
47366+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
47367+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
47368+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
47369+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
47370+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47371+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47372+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47373+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47374+4 4 4 4 4 4
47375+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
47376+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
47377+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
47378+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
47379+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
47380+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
47381+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
47382+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
47383+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
47384+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47385+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47386+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47387+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47388+4 4 4 4 4 4
47389+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
47390+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
47391+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
47392+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
47393+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
47394+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
47395+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
47396+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
47397+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47398+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47399+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47400+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47401+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47402+4 4 4 4 4 4
47403+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
47404+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
47405+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
47406+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
47407+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
47408+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
47409+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
47410+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
47411+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47412+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47413+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47414+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47415+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47416+4 4 4 4 4 4
47417+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
47418+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
47419+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
47420+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
47421+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
47422+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
47423+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
47424+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
47425+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47426+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47427+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47428+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47429+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47430+4 4 4 4 4 4
47431+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
47432+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
47433+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
47434+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
47435+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
47436+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
47437+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
47438+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
47439+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47440+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47441+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47442+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47443+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47444+4 4 4 4 4 4
47445+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47446+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
47447+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
47448+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
47449+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
47450+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
47451+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
47452+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
47453+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47454+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47455+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47456+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47457+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47458+4 4 4 4 4 4
47459+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47460+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
47461+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
47462+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
47463+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
47464+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
47465+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
47466+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
47467+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47468+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47469+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47470+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47471+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47472+4 4 4 4 4 4
47473+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47474+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47475+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
47476+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
47477+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
47478+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
47479+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
47480+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
47481+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47482+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47483+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47484+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47485+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47486+4 4 4 4 4 4
47487+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47488+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47489+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
47490+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
47491+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
47492+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
47493+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
47494+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47495+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47496+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47497+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47498+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47499+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47500+4 4 4 4 4 4
47501+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47502+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47503+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47504+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
47505+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
47506+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
47507+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
47508+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47509+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47510+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47511+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47512+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47513+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47514+4 4 4 4 4 4
47515+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47516+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47517+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47518+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
47519+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
47520+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
47521+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
47522+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47523+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47524+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47525+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47526+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47527+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47528+4 4 4 4 4 4
47529+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47530+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47531+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47532+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
47533+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
47534+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
47535+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
47536+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47537+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47538+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47539+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47540+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47541+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47542+4 4 4 4 4 4
47543+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47544+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47545+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47546+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
47547+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
47548+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
47549+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47550+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47551+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47552+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47553+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47554+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47555+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47556+4 4 4 4 4 4
47557+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47558+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47559+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47560+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47561+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
47562+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
47563+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
47564+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47565+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47566+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47567+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47568+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47569+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47570+4 4 4 4 4 4
47571+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47572+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47573+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47574+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47575+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
47576+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
47577+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47578+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47579+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47580+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47581+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47582+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47583+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47584+4 4 4 4 4 4
47585+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47586+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47587+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47588+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47589+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
47590+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
47591+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47592+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47593+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47594+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47595+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47596+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47597+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47598+4 4 4 4 4 4
47599+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47600+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47601+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47602+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47603+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
47604+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
47605+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47606+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47607+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47608+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47609+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47610+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47611+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47612+4 4 4 4 4 4
47613diff --git a/drivers/video/mb862xx/mb862xxfb_accel.c b/drivers/video/mb862xx/mb862xxfb_accel.c
47614index fe92eed..106e085 100644
47615--- a/drivers/video/mb862xx/mb862xxfb_accel.c
47616+++ b/drivers/video/mb862xx/mb862xxfb_accel.c
47617@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
47618 struct mb862xxfb_par *par = info->par;
47619
47620 if (info->var.bits_per_pixel == 32) {
47621- info->fbops->fb_fillrect = cfb_fillrect;
47622- info->fbops->fb_copyarea = cfb_copyarea;
47623- info->fbops->fb_imageblit = cfb_imageblit;
47624+ pax_open_kernel();
47625+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
47626+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
47627+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
47628+ pax_close_kernel();
47629 } else {
47630 outreg(disp, GC_L0EM, 3);
47631- info->fbops->fb_fillrect = mb86290fb_fillrect;
47632- info->fbops->fb_copyarea = mb86290fb_copyarea;
47633- info->fbops->fb_imageblit = mb86290fb_imageblit;
47634+ pax_open_kernel();
47635+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
47636+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
47637+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
47638+ pax_close_kernel();
47639 }
47640 outreg(draw, GDC_REG_DRAW_BASE, 0);
47641 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
47642diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
47643index ff22871..b129bed 100644
47644--- a/drivers/video/nvidia/nvidia.c
47645+++ b/drivers/video/nvidia/nvidia.c
47646@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
47647 info->fix.line_length = (info->var.xres_virtual *
47648 info->var.bits_per_pixel) >> 3;
47649 if (info->var.accel_flags) {
47650- info->fbops->fb_imageblit = nvidiafb_imageblit;
47651- info->fbops->fb_fillrect = nvidiafb_fillrect;
47652- info->fbops->fb_copyarea = nvidiafb_copyarea;
47653- info->fbops->fb_sync = nvidiafb_sync;
47654+ pax_open_kernel();
47655+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
47656+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
47657+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
47658+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
47659+ pax_close_kernel();
47660 info->pixmap.scan_align = 4;
47661 info->flags &= ~FBINFO_HWACCEL_DISABLED;
47662 info->flags |= FBINFO_READS_FAST;
47663 NVResetGraphics(info);
47664 } else {
47665- info->fbops->fb_imageblit = cfb_imageblit;
47666- info->fbops->fb_fillrect = cfb_fillrect;
47667- info->fbops->fb_copyarea = cfb_copyarea;
47668- info->fbops->fb_sync = NULL;
47669+ pax_open_kernel();
47670+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
47671+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
47672+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
47673+ *(void **)&info->fbops->fb_sync = NULL;
47674+ pax_close_kernel();
47675 info->pixmap.scan_align = 1;
47676 info->flags |= FBINFO_HWACCEL_DISABLED;
47677 info->flags &= ~FBINFO_READS_FAST;
47678@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
47679 info->pixmap.size = 8 * 1024;
47680 info->pixmap.flags = FB_PIXMAP_SYSTEM;
47681
47682- if (!hwcur)
47683- info->fbops->fb_cursor = NULL;
47684+ if (!hwcur) {
47685+ pax_open_kernel();
47686+ *(void **)&info->fbops->fb_cursor = NULL;
47687+ pax_close_kernel();
47688+ }
47689
47690 info->var.accel_flags = (!noaccel);
47691
47692diff --git a/drivers/video/s1d13xxxfb.c b/drivers/video/s1d13xxxfb.c
47693index 76d9053..dec2bfd 100644
47694--- a/drivers/video/s1d13xxxfb.c
47695+++ b/drivers/video/s1d13xxxfb.c
47696@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
47697
47698 switch(prod_id) {
47699 case S1D13506_PROD_ID: /* activate acceleration */
47700- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
47701- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
47702+ pax_open_kernel();
47703+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
47704+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
47705+ pax_close_kernel();
47706 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
47707 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
47708 break;
47709diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
47710index 97bd662..39fab85 100644
47711--- a/drivers/video/smscufx.c
47712+++ b/drivers/video/smscufx.c
47713@@ -1171,7 +1171,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
47714 fb_deferred_io_cleanup(info);
47715 kfree(info->fbdefio);
47716 info->fbdefio = NULL;
47717- info->fbops->fb_mmap = ufx_ops_mmap;
47718+ pax_open_kernel();
47719+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
47720+ pax_close_kernel();
47721 }
47722
47723 pr_debug("released /dev/fb%d user=%d count=%d",
47724diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
47725index 86d449e..8e04dc5 100644
47726--- a/drivers/video/udlfb.c
47727+++ b/drivers/video/udlfb.c
47728@@ -619,11 +619,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
47729 dlfb_urb_completion(urb);
47730
47731 error:
47732- atomic_add(bytes_sent, &dev->bytes_sent);
47733- atomic_add(bytes_identical, &dev->bytes_identical);
47734- atomic_add(width*height*2, &dev->bytes_rendered);
47735+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
47736+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
47737+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
47738 end_cycles = get_cycles();
47739- atomic_add(((unsigned int) ((end_cycles - start_cycles)
47740+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
47741 >> 10)), /* Kcycles */
47742 &dev->cpu_kcycles_used);
47743
47744@@ -744,11 +744,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
47745 dlfb_urb_completion(urb);
47746
47747 error:
47748- atomic_add(bytes_sent, &dev->bytes_sent);
47749- atomic_add(bytes_identical, &dev->bytes_identical);
47750- atomic_add(bytes_rendered, &dev->bytes_rendered);
47751+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
47752+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
47753+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
47754 end_cycles = get_cycles();
47755- atomic_add(((unsigned int) ((end_cycles - start_cycles)
47756+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
47757 >> 10)), /* Kcycles */
47758 &dev->cpu_kcycles_used);
47759 }
47760@@ -989,7 +989,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
47761 fb_deferred_io_cleanup(info);
47762 kfree(info->fbdefio);
47763 info->fbdefio = NULL;
47764- info->fbops->fb_mmap = dlfb_ops_mmap;
47765+ pax_open_kernel();
47766+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
47767+ pax_close_kernel();
47768 }
47769
47770 pr_warn("released /dev/fb%d user=%d count=%d\n",
47771@@ -1372,7 +1374,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
47772 struct fb_info *fb_info = dev_get_drvdata(fbdev);
47773 struct dlfb_data *dev = fb_info->par;
47774 return snprintf(buf, PAGE_SIZE, "%u\n",
47775- atomic_read(&dev->bytes_rendered));
47776+ atomic_read_unchecked(&dev->bytes_rendered));
47777 }
47778
47779 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
47780@@ -1380,7 +1382,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
47781 struct fb_info *fb_info = dev_get_drvdata(fbdev);
47782 struct dlfb_data *dev = fb_info->par;
47783 return snprintf(buf, PAGE_SIZE, "%u\n",
47784- atomic_read(&dev->bytes_identical));
47785+ atomic_read_unchecked(&dev->bytes_identical));
47786 }
47787
47788 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
47789@@ -1388,7 +1390,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
47790 struct fb_info *fb_info = dev_get_drvdata(fbdev);
47791 struct dlfb_data *dev = fb_info->par;
47792 return snprintf(buf, PAGE_SIZE, "%u\n",
47793- atomic_read(&dev->bytes_sent));
47794+ atomic_read_unchecked(&dev->bytes_sent));
47795 }
47796
47797 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
47798@@ -1396,7 +1398,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
47799 struct fb_info *fb_info = dev_get_drvdata(fbdev);
47800 struct dlfb_data *dev = fb_info->par;
47801 return snprintf(buf, PAGE_SIZE, "%u\n",
47802- atomic_read(&dev->cpu_kcycles_used));
47803+ atomic_read_unchecked(&dev->cpu_kcycles_used));
47804 }
47805
47806 static ssize_t edid_show(
47807@@ -1456,10 +1458,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
47808 struct fb_info *fb_info = dev_get_drvdata(fbdev);
47809 struct dlfb_data *dev = fb_info->par;
47810
47811- atomic_set(&dev->bytes_rendered, 0);
47812- atomic_set(&dev->bytes_identical, 0);
47813- atomic_set(&dev->bytes_sent, 0);
47814- atomic_set(&dev->cpu_kcycles_used, 0);
47815+ atomic_set_unchecked(&dev->bytes_rendered, 0);
47816+ atomic_set_unchecked(&dev->bytes_identical, 0);
47817+ atomic_set_unchecked(&dev->bytes_sent, 0);
47818+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
47819
47820 return count;
47821 }
47822diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
47823index b75db01..ad2f34a 100644
47824--- a/drivers/video/uvesafb.c
47825+++ b/drivers/video/uvesafb.c
47826@@ -19,6 +19,7 @@
47827 #include <linux/io.h>
47828 #include <linux/mutex.h>
47829 #include <linux/slab.h>
47830+#include <linux/moduleloader.h>
47831 #include <video/edid.h>
47832 #include <video/uvesafb.h>
47833 #ifdef CONFIG_X86
47834@@ -569,10 +570,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
47835 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
47836 par->pmi_setpal = par->ypan = 0;
47837 } else {
47838+
47839+#ifdef CONFIG_PAX_KERNEXEC
47840+#ifdef CONFIG_MODULES
47841+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
47842+#endif
47843+ if (!par->pmi_code) {
47844+ par->pmi_setpal = par->ypan = 0;
47845+ return 0;
47846+ }
47847+#endif
47848+
47849 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
47850 + task->t.regs.edi);
47851+
47852+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
47853+ pax_open_kernel();
47854+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
47855+ pax_close_kernel();
47856+
47857+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
47858+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
47859+#else
47860 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
47861 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
47862+#endif
47863+
47864 printk(KERN_INFO "uvesafb: protected mode interface info at "
47865 "%04x:%04x\n",
47866 (u16)task->t.regs.es, (u16)task->t.regs.edi);
47867@@ -817,13 +840,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
47868 par->ypan = ypan;
47869
47870 if (par->pmi_setpal || par->ypan) {
47871+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
47872 if (__supported_pte_mask & _PAGE_NX) {
47873 par->pmi_setpal = par->ypan = 0;
47874 printk(KERN_WARNING "uvesafb: NX protection is actively."
47875 "We have better not to use the PMI.\n");
47876- } else {
47877+ } else
47878+#endif
47879 uvesafb_vbe_getpmi(task, par);
47880- }
47881 }
47882 #else
47883 /* The protected mode interface is not available on non-x86. */
47884@@ -1457,8 +1481,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
47885 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
47886
47887 /* Disable blanking if the user requested so. */
47888- if (!blank)
47889- info->fbops->fb_blank = NULL;
47890+ if (!blank) {
47891+ pax_open_kernel();
47892+ *(void **)&info->fbops->fb_blank = NULL;
47893+ pax_close_kernel();
47894+ }
47895
47896 /*
47897 * Find out how much IO memory is required for the mode with
47898@@ -1534,8 +1561,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
47899 info->flags = FBINFO_FLAG_DEFAULT |
47900 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
47901
47902- if (!par->ypan)
47903- info->fbops->fb_pan_display = NULL;
47904+ if (!par->ypan) {
47905+ pax_open_kernel();
47906+ *(void **)&info->fbops->fb_pan_display = NULL;
47907+ pax_close_kernel();
47908+ }
47909 }
47910
47911 static void uvesafb_init_mtrr(struct fb_info *info)
47912@@ -1836,6 +1866,11 @@ out:
47913 if (par->vbe_modes)
47914 kfree(par->vbe_modes);
47915
47916+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
47917+ if (par->pmi_code)
47918+ module_free_exec(NULL, par->pmi_code);
47919+#endif
47920+
47921 framebuffer_release(info);
47922 return err;
47923 }
47924@@ -1862,6 +1897,12 @@ static int uvesafb_remove(struct platform_device *dev)
47925 kfree(par->vbe_state_orig);
47926 if (par->vbe_state_saved)
47927 kfree(par->vbe_state_saved);
47928+
47929+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
47930+ if (par->pmi_code)
47931+ module_free_exec(NULL, par->pmi_code);
47932+#endif
47933+
47934 }
47935
47936 framebuffer_release(info);
47937diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
47938index 501b340..d80aa17 100644
47939--- a/drivers/video/vesafb.c
47940+++ b/drivers/video/vesafb.c
47941@@ -9,6 +9,7 @@
47942 */
47943
47944 #include <linux/module.h>
47945+#include <linux/moduleloader.h>
47946 #include <linux/kernel.h>
47947 #include <linux/errno.h>
47948 #include <linux/string.h>
47949@@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
47950 static int vram_total __initdata; /* Set total amount of memory */
47951 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
47952 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
47953-static void (*pmi_start)(void) __read_mostly;
47954-static void (*pmi_pal) (void) __read_mostly;
47955+static void (*pmi_start)(void) __read_only;
47956+static void (*pmi_pal) (void) __read_only;
47957 static int depth __read_mostly;
47958 static int vga_compat __read_mostly;
47959 /* --------------------------------------------------------------------- */
47960@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
47961 unsigned int size_vmode;
47962 unsigned int size_remap;
47963 unsigned int size_total;
47964+ void *pmi_code = NULL;
47965
47966 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
47967 return -ENODEV;
47968@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
47969 size_remap = size_total;
47970 vesafb_fix.smem_len = size_remap;
47971
47972-#ifndef __i386__
47973- screen_info.vesapm_seg = 0;
47974-#endif
47975-
47976 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
47977 printk(KERN_WARNING
47978 "vesafb: cannot reserve video memory at 0x%lx\n",
47979@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
47980 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
47981 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
47982
47983+#ifdef __i386__
47984+
47985+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
47986+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
47987+ if (!pmi_code)
47988+#elif !defined(CONFIG_PAX_KERNEXEC)
47989+ if (0)
47990+#endif
47991+
47992+#endif
47993+ screen_info.vesapm_seg = 0;
47994+
47995 if (screen_info.vesapm_seg) {
47996- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
47997- screen_info.vesapm_seg,screen_info.vesapm_off);
47998+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
47999+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
48000 }
48001
48002 if (screen_info.vesapm_seg < 0xc000)
48003@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
48004
48005 if (ypan || pmi_setpal) {
48006 unsigned short *pmi_base;
48007+
48008 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
48009- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
48010- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
48011+
48012+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
48013+ pax_open_kernel();
48014+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
48015+#else
48016+ pmi_code = pmi_base;
48017+#endif
48018+
48019+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
48020+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
48021+
48022+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
48023+ pmi_start = ktva_ktla(pmi_start);
48024+ pmi_pal = ktva_ktla(pmi_pal);
48025+ pax_close_kernel();
48026+#endif
48027+
48028 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
48029 if (pmi_base[3]) {
48030 printk(KERN_INFO "vesafb: pmi: ports = ");
48031@@ -472,8 +498,11 @@ static int __init vesafb_probe(struct platform_device *dev)
48032 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
48033 (ypan ? FBINFO_HWACCEL_YPAN : 0);
48034
48035- if (!ypan)
48036- info->fbops->fb_pan_display = NULL;
48037+ if (!ypan) {
48038+ pax_open_kernel();
48039+ *(void **)&info->fbops->fb_pan_display = NULL;
48040+ pax_close_kernel();
48041+ }
48042
48043 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
48044 err = -ENOMEM;
48045@@ -488,6 +517,11 @@ static int __init vesafb_probe(struct platform_device *dev)
48046 info->node, info->fix.id);
48047 return 0;
48048 err:
48049+
48050+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
48051+ module_free_exec(NULL, pmi_code);
48052+#endif
48053+
48054 if (info->screen_base)
48055 iounmap(info->screen_base);
48056 framebuffer_release(info);
48057diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
48058index 88714ae..16c2e11 100644
48059--- a/drivers/video/via/via_clock.h
48060+++ b/drivers/video/via/via_clock.h
48061@@ -56,7 +56,7 @@ struct via_clock {
48062
48063 void (*set_engine_pll_state)(u8 state);
48064 void (*set_engine_pll)(struct via_pll_config config);
48065-};
48066+} __no_const;
48067
48068
48069 static inline u32 get_pll_internal_frequency(u32 ref_freq,
48070diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
48071index fef20db..d28b1ab 100644
48072--- a/drivers/xen/xenfs/xenstored.c
48073+++ b/drivers/xen/xenfs/xenstored.c
48074@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
48075 static int xsd_kva_open(struct inode *inode, struct file *file)
48076 {
48077 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
48078+#ifdef CONFIG_GRKERNSEC_HIDESYM
48079+ NULL);
48080+#else
48081 xen_store_interface);
48082+#endif
48083+
48084 if (!file->private_data)
48085 return -ENOMEM;
48086 return 0;
48087diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
48088index 890bed5..17ae73e 100644
48089--- a/fs/9p/vfs_inode.c
48090+++ b/fs/9p/vfs_inode.c
48091@@ -1329,7 +1329,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
48092 void
48093 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
48094 {
48095- char *s = nd_get_link(nd);
48096+ const char *s = nd_get_link(nd);
48097
48098 p9_debug(P9_DEBUG_VFS, " %s %s\n",
48099 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
48100diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
48101index 0efd152..b5802ad 100644
48102--- a/fs/Kconfig.binfmt
48103+++ b/fs/Kconfig.binfmt
48104@@ -89,7 +89,7 @@ config HAVE_AOUT
48105
48106 config BINFMT_AOUT
48107 tristate "Kernel support for a.out and ECOFF binaries"
48108- depends on HAVE_AOUT
48109+ depends on HAVE_AOUT && BROKEN
48110 ---help---
48111 A.out (Assembler.OUTput) is a set of formats for libraries and
48112 executables used in the earliest versions of UNIX. Linux used
48113diff --git a/fs/aio.c b/fs/aio.c
48114index 71f613c..9d01f1f 100644
48115--- a/fs/aio.c
48116+++ b/fs/aio.c
48117@@ -111,7 +111,7 @@ static int aio_setup_ring(struct kioctx *ctx)
48118 size += sizeof(struct io_event) * nr_events;
48119 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
48120
48121- if (nr_pages < 0)
48122+ if (nr_pages <= 0)
48123 return -EINVAL;
48124
48125 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
48126@@ -1373,18 +1373,19 @@ static ssize_t aio_fsync(struct kiocb *iocb)
48127 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
48128 {
48129 ssize_t ret;
48130+ struct iovec iovstack;
48131
48132 #ifdef CONFIG_COMPAT
48133 if (compat)
48134 ret = compat_rw_copy_check_uvector(type,
48135 (struct compat_iovec __user *)kiocb->ki_buf,
48136- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
48137+ kiocb->ki_nbytes, 1, &iovstack,
48138 &kiocb->ki_iovec);
48139 else
48140 #endif
48141 ret = rw_copy_check_uvector(type,
48142 (struct iovec __user *)kiocb->ki_buf,
48143- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
48144+ kiocb->ki_nbytes, 1, &iovstack,
48145 &kiocb->ki_iovec);
48146 if (ret < 0)
48147 goto out;
48148@@ -1393,6 +1394,10 @@ static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
48149 if (ret < 0)
48150 goto out;
48151
48152+ if (kiocb->ki_iovec == &iovstack) {
48153+ kiocb->ki_inline_vec = iovstack;
48154+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
48155+ }
48156 kiocb->ki_nr_segs = kiocb->ki_nbytes;
48157 kiocb->ki_cur_seg = 0;
48158 /* ki_nbytes/left now reflect bytes instead of segs */
48159diff --git a/fs/attr.c b/fs/attr.c
48160index 1449adb..a2038c2 100644
48161--- a/fs/attr.c
48162+++ b/fs/attr.c
48163@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
48164 unsigned long limit;
48165
48166 limit = rlimit(RLIMIT_FSIZE);
48167+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
48168 if (limit != RLIM_INFINITY && offset > limit)
48169 goto out_sig;
48170 if (offset > inode->i_sb->s_maxbytes)
48171diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
48172index 03bc1d3..6205356 100644
48173--- a/fs/autofs4/waitq.c
48174+++ b/fs/autofs4/waitq.c
48175@@ -61,7 +61,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
48176 {
48177 unsigned long sigpipe, flags;
48178 mm_segment_t fs;
48179- const char *data = (const char *)addr;
48180+ const char __user *data = (const char __force_user *)addr;
48181 ssize_t wr = 0;
48182
48183 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
48184@@ -348,6 +348,10 @@ static int validate_request(struct autofs_wait_queue **wait,
48185 return 1;
48186 }
48187
48188+#ifdef CONFIG_GRKERNSEC_HIDESYM
48189+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
48190+#endif
48191+
48192 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
48193 enum autofs_notify notify)
48194 {
48195@@ -381,7 +385,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
48196
48197 /* If this is a direct mount request create a dummy name */
48198 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
48199+#ifdef CONFIG_GRKERNSEC_HIDESYM
48200+ /* this name does get written to userland via autofs4_write() */
48201+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
48202+#else
48203 qstr.len = sprintf(name, "%p", dentry);
48204+#endif
48205 else {
48206 qstr.len = autofs4_getpath(sbi, dentry, &name);
48207 if (!qstr.len) {
48208diff --git a/fs/befs/endian.h b/fs/befs/endian.h
48209index 2722387..c8dd2a7 100644
48210--- a/fs/befs/endian.h
48211+++ b/fs/befs/endian.h
48212@@ -11,7 +11,7 @@
48213
48214 #include <asm/byteorder.h>
48215
48216-static inline u64
48217+static inline u64 __intentional_overflow(-1)
48218 fs64_to_cpu(const struct super_block *sb, fs64 n)
48219 {
48220 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
48221@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
48222 return (__force fs64)cpu_to_be64(n);
48223 }
48224
48225-static inline u32
48226+static inline u32 __intentional_overflow(-1)
48227 fs32_to_cpu(const struct super_block *sb, fs32 n)
48228 {
48229 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
48230diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
48231index 2b3bda8..6a2d4be 100644
48232--- a/fs/befs/linuxvfs.c
48233+++ b/fs/befs/linuxvfs.c
48234@@ -510,7 +510,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
48235 {
48236 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
48237 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
48238- char *link = nd_get_link(nd);
48239+ const char *link = nd_get_link(nd);
48240 if (!IS_ERR(link))
48241 kfree(link);
48242 }
48243diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
48244index 6043567..16a9239 100644
48245--- a/fs/binfmt_aout.c
48246+++ b/fs/binfmt_aout.c
48247@@ -16,6 +16,7 @@
48248 #include <linux/string.h>
48249 #include <linux/fs.h>
48250 #include <linux/file.h>
48251+#include <linux/security.h>
48252 #include <linux/stat.h>
48253 #include <linux/fcntl.h>
48254 #include <linux/ptrace.h>
48255@@ -59,6 +60,8 @@ static int aout_core_dump(struct coredump_params *cprm)
48256 #endif
48257 # define START_STACK(u) ((void __user *)u.start_stack)
48258
48259+ memset(&dump, 0, sizeof(dump));
48260+
48261 fs = get_fs();
48262 set_fs(KERNEL_DS);
48263 has_dumped = 1;
48264@@ -70,10 +73,12 @@ static int aout_core_dump(struct coredump_params *cprm)
48265
48266 /* If the size of the dump file exceeds the rlimit, then see what would happen
48267 if we wrote the stack, but not the data area. */
48268+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
48269 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
48270 dump.u_dsize = 0;
48271
48272 /* Make sure we have enough room to write the stack and data areas. */
48273+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
48274 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
48275 dump.u_ssize = 0;
48276
48277@@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
48278 rlim = rlimit(RLIMIT_DATA);
48279 if (rlim >= RLIM_INFINITY)
48280 rlim = ~0;
48281+
48282+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
48283 if (ex.a_data + ex.a_bss > rlim)
48284 return -ENOMEM;
48285
48286@@ -268,6 +275,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
48287
48288 install_exec_creds(bprm);
48289
48290+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
48291+ current->mm->pax_flags = 0UL;
48292+#endif
48293+
48294+#ifdef CONFIG_PAX_PAGEEXEC
48295+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
48296+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
48297+
48298+#ifdef CONFIG_PAX_EMUTRAMP
48299+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
48300+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
48301+#endif
48302+
48303+#ifdef CONFIG_PAX_MPROTECT
48304+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
48305+ current->mm->pax_flags |= MF_PAX_MPROTECT;
48306+#endif
48307+
48308+ }
48309+#endif
48310+
48311 if (N_MAGIC(ex) == OMAGIC) {
48312 unsigned long text_addr, map_size;
48313 loff_t pos;
48314@@ -333,7 +361,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
48315 }
48316
48317 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
48318- PROT_READ | PROT_WRITE | PROT_EXEC,
48319+ PROT_READ | PROT_WRITE,
48320 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
48321 fd_offset + ex.a_text);
48322 if (error != N_DATADDR(ex)) {
48323diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
48324index 0c42cdb..b62581e9 100644
48325--- a/fs/binfmt_elf.c
48326+++ b/fs/binfmt_elf.c
48327@@ -33,6 +33,7 @@
48328 #include <linux/elf.h>
48329 #include <linux/utsname.h>
48330 #include <linux/coredump.h>
48331+#include <linux/xattr.h>
48332 #include <asm/uaccess.h>
48333 #include <asm/param.h>
48334 #include <asm/page.h>
48335@@ -59,6 +60,10 @@ static int elf_core_dump(struct coredump_params *cprm);
48336 #define elf_core_dump NULL
48337 #endif
48338
48339+#ifdef CONFIG_PAX_MPROTECT
48340+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
48341+#endif
48342+
48343 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
48344 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
48345 #else
48346@@ -78,6 +83,11 @@ static struct linux_binfmt elf_format = {
48347 .load_binary = load_elf_binary,
48348 .load_shlib = load_elf_library,
48349 .core_dump = elf_core_dump,
48350+
48351+#ifdef CONFIG_PAX_MPROTECT
48352+ .handle_mprotect= elf_handle_mprotect,
48353+#endif
48354+
48355 .min_coredump = ELF_EXEC_PAGESIZE,
48356 };
48357
48358@@ -85,6 +95,8 @@ static struct linux_binfmt elf_format = {
48359
48360 static int set_brk(unsigned long start, unsigned long end)
48361 {
48362+ unsigned long e = end;
48363+
48364 start = ELF_PAGEALIGN(start);
48365 end = ELF_PAGEALIGN(end);
48366 if (end > start) {
48367@@ -93,7 +105,7 @@ static int set_brk(unsigned long start, unsigned long end)
48368 if (BAD_ADDR(addr))
48369 return addr;
48370 }
48371- current->mm->start_brk = current->mm->brk = end;
48372+ current->mm->start_brk = current->mm->brk = e;
48373 return 0;
48374 }
48375
48376@@ -154,12 +166,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
48377 elf_addr_t __user *u_rand_bytes;
48378 const char *k_platform = ELF_PLATFORM;
48379 const char *k_base_platform = ELF_BASE_PLATFORM;
48380- unsigned char k_rand_bytes[16];
48381+ u32 k_rand_bytes[4];
48382 int items;
48383 elf_addr_t *elf_info;
48384 int ei_index = 0;
48385 const struct cred *cred = current_cred();
48386 struct vm_area_struct *vma;
48387+ unsigned long saved_auxv[AT_VECTOR_SIZE];
48388
48389 /*
48390 * In some cases (e.g. Hyper-Threading), we want to avoid L1
48391@@ -201,8 +214,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
48392 * Generate 16 random bytes for userspace PRNG seeding.
48393 */
48394 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
48395- u_rand_bytes = (elf_addr_t __user *)
48396- STACK_ALLOC(p, sizeof(k_rand_bytes));
48397+ srandom32(k_rand_bytes[0] ^ random32());
48398+ srandom32(k_rand_bytes[1] ^ random32());
48399+ srandom32(k_rand_bytes[2] ^ random32());
48400+ srandom32(k_rand_bytes[3] ^ random32());
48401+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
48402+ u_rand_bytes = (elf_addr_t __user *) p;
48403 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
48404 return -EFAULT;
48405
48406@@ -314,9 +331,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
48407 return -EFAULT;
48408 current->mm->env_end = p;
48409
48410+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
48411+
48412 /* Put the elf_info on the stack in the right place. */
48413 sp = (elf_addr_t __user *)envp + 1;
48414- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
48415+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
48416 return -EFAULT;
48417 return 0;
48418 }
48419@@ -380,15 +399,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
48420 an ELF header */
48421
48422 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
48423- struct file *interpreter, unsigned long *interp_map_addr,
48424- unsigned long no_base)
48425+ struct file *interpreter, unsigned long no_base)
48426 {
48427 struct elf_phdr *elf_phdata;
48428 struct elf_phdr *eppnt;
48429- unsigned long load_addr = 0;
48430+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
48431 int load_addr_set = 0;
48432 unsigned long last_bss = 0, elf_bss = 0;
48433- unsigned long error = ~0UL;
48434+ unsigned long error = -EINVAL;
48435 unsigned long total_size;
48436 int retval, i, size;
48437
48438@@ -434,6 +452,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
48439 goto out_close;
48440 }
48441
48442+#ifdef CONFIG_PAX_SEGMEXEC
48443+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
48444+ pax_task_size = SEGMEXEC_TASK_SIZE;
48445+#endif
48446+
48447 eppnt = elf_phdata;
48448 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
48449 if (eppnt->p_type == PT_LOAD) {
48450@@ -457,8 +480,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
48451 map_addr = elf_map(interpreter, load_addr + vaddr,
48452 eppnt, elf_prot, elf_type, total_size);
48453 total_size = 0;
48454- if (!*interp_map_addr)
48455- *interp_map_addr = map_addr;
48456 error = map_addr;
48457 if (BAD_ADDR(map_addr))
48458 goto out_close;
48459@@ -477,8 +498,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
48460 k = load_addr + eppnt->p_vaddr;
48461 if (BAD_ADDR(k) ||
48462 eppnt->p_filesz > eppnt->p_memsz ||
48463- eppnt->p_memsz > TASK_SIZE ||
48464- TASK_SIZE - eppnt->p_memsz < k) {
48465+ eppnt->p_memsz > pax_task_size ||
48466+ pax_task_size - eppnt->p_memsz < k) {
48467 error = -ENOMEM;
48468 goto out_close;
48469 }
48470@@ -530,6 +551,315 @@ out:
48471 return error;
48472 }
48473
48474+#ifdef CONFIG_PAX_PT_PAX_FLAGS
48475+#ifdef CONFIG_PAX_SOFTMODE
48476+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
48477+{
48478+ unsigned long pax_flags = 0UL;
48479+
48480+#ifdef CONFIG_PAX_PAGEEXEC
48481+ if (elf_phdata->p_flags & PF_PAGEEXEC)
48482+ pax_flags |= MF_PAX_PAGEEXEC;
48483+#endif
48484+
48485+#ifdef CONFIG_PAX_SEGMEXEC
48486+ if (elf_phdata->p_flags & PF_SEGMEXEC)
48487+ pax_flags |= MF_PAX_SEGMEXEC;
48488+#endif
48489+
48490+#ifdef CONFIG_PAX_EMUTRAMP
48491+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
48492+ pax_flags |= MF_PAX_EMUTRAMP;
48493+#endif
48494+
48495+#ifdef CONFIG_PAX_MPROTECT
48496+ if (elf_phdata->p_flags & PF_MPROTECT)
48497+ pax_flags |= MF_PAX_MPROTECT;
48498+#endif
48499+
48500+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
48501+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
48502+ pax_flags |= MF_PAX_RANDMMAP;
48503+#endif
48504+
48505+ return pax_flags;
48506+}
48507+#endif
48508+
48509+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
48510+{
48511+ unsigned long pax_flags = 0UL;
48512+
48513+#ifdef CONFIG_PAX_PAGEEXEC
48514+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
48515+ pax_flags |= MF_PAX_PAGEEXEC;
48516+#endif
48517+
48518+#ifdef CONFIG_PAX_SEGMEXEC
48519+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
48520+ pax_flags |= MF_PAX_SEGMEXEC;
48521+#endif
48522+
48523+#ifdef CONFIG_PAX_EMUTRAMP
48524+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
48525+ pax_flags |= MF_PAX_EMUTRAMP;
48526+#endif
48527+
48528+#ifdef CONFIG_PAX_MPROTECT
48529+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
48530+ pax_flags |= MF_PAX_MPROTECT;
48531+#endif
48532+
48533+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
48534+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
48535+ pax_flags |= MF_PAX_RANDMMAP;
48536+#endif
48537+
48538+ return pax_flags;
48539+}
48540+#endif
48541+
48542+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
48543+#ifdef CONFIG_PAX_SOFTMODE
48544+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
48545+{
48546+ unsigned long pax_flags = 0UL;
48547+
48548+#ifdef CONFIG_PAX_PAGEEXEC
48549+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
48550+ pax_flags |= MF_PAX_PAGEEXEC;
48551+#endif
48552+
48553+#ifdef CONFIG_PAX_SEGMEXEC
48554+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
48555+ pax_flags |= MF_PAX_SEGMEXEC;
48556+#endif
48557+
48558+#ifdef CONFIG_PAX_EMUTRAMP
48559+ if (pax_flags_softmode & MF_PAX_EMUTRAMP)
48560+ pax_flags |= MF_PAX_EMUTRAMP;
48561+#endif
48562+
48563+#ifdef CONFIG_PAX_MPROTECT
48564+ if (pax_flags_softmode & MF_PAX_MPROTECT)
48565+ pax_flags |= MF_PAX_MPROTECT;
48566+#endif
48567+
48568+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
48569+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
48570+ pax_flags |= MF_PAX_RANDMMAP;
48571+#endif
48572+
48573+ return pax_flags;
48574+}
48575+#endif
48576+
48577+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
48578+{
48579+ unsigned long pax_flags = 0UL;
48580+
48581+#ifdef CONFIG_PAX_PAGEEXEC
48582+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
48583+ pax_flags |= MF_PAX_PAGEEXEC;
48584+#endif
48585+
48586+#ifdef CONFIG_PAX_SEGMEXEC
48587+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
48588+ pax_flags |= MF_PAX_SEGMEXEC;
48589+#endif
48590+
48591+#ifdef CONFIG_PAX_EMUTRAMP
48592+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
48593+ pax_flags |= MF_PAX_EMUTRAMP;
48594+#endif
48595+
48596+#ifdef CONFIG_PAX_MPROTECT
48597+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
48598+ pax_flags |= MF_PAX_MPROTECT;
48599+#endif
48600+
48601+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
48602+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
48603+ pax_flags |= MF_PAX_RANDMMAP;
48604+#endif
48605+
48606+ return pax_flags;
48607+}
48608+#endif
48609+
48610+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
48611+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
48612+{
48613+ unsigned long pax_flags = 0UL;
48614+
48615+#ifdef CONFIG_PAX_EI_PAX
48616+
48617+#ifdef CONFIG_PAX_PAGEEXEC
48618+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
48619+ pax_flags |= MF_PAX_PAGEEXEC;
48620+#endif
48621+
48622+#ifdef CONFIG_PAX_SEGMEXEC
48623+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
48624+ pax_flags |= MF_PAX_SEGMEXEC;
48625+#endif
48626+
48627+#ifdef CONFIG_PAX_EMUTRAMP
48628+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
48629+ pax_flags |= MF_PAX_EMUTRAMP;
48630+#endif
48631+
48632+#ifdef CONFIG_PAX_MPROTECT
48633+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
48634+ pax_flags |= MF_PAX_MPROTECT;
48635+#endif
48636+
48637+#ifdef CONFIG_PAX_ASLR
48638+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
48639+ pax_flags |= MF_PAX_RANDMMAP;
48640+#endif
48641+
48642+#else
48643+
48644+#ifdef CONFIG_PAX_PAGEEXEC
48645+ pax_flags |= MF_PAX_PAGEEXEC;
48646+#endif
48647+
48648+#ifdef CONFIG_PAX_SEGMEXEC
48649+ pax_flags |= MF_PAX_SEGMEXEC;
48650+#endif
48651+
48652+#ifdef CONFIG_PAX_MPROTECT
48653+ pax_flags |= MF_PAX_MPROTECT;
48654+#endif
48655+
48656+#ifdef CONFIG_PAX_RANDMMAP
48657+ if (randomize_va_space)
48658+ pax_flags |= MF_PAX_RANDMMAP;
48659+#endif
48660+
48661+#endif
48662+
48663+ return pax_flags;
48664+}
48665+
48666+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
48667+{
48668+
48669+#ifdef CONFIG_PAX_PT_PAX_FLAGS
48670+ unsigned long i;
48671+
48672+ for (i = 0UL; i < elf_ex->e_phnum; i++)
48673+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
48674+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
48675+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
48676+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
48677+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
48678+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
48679+ return ~0UL;
48680+
48681+#ifdef CONFIG_PAX_SOFTMODE
48682+ if (pax_softmode)
48683+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
48684+ else
48685+#endif
48686+
48687+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
48688+ break;
48689+ }
48690+#endif
48691+
48692+ return ~0UL;
48693+}
48694+
48695+static unsigned long pax_parse_xattr_pax(struct file * const file)
48696+{
48697+
48698+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
48699+ ssize_t xattr_size, i;
48700+ unsigned char xattr_value[5];
48701+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
48702+
48703+ xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
48704+ if (xattr_size <= 0 || xattr_size > 5)
48705+ return ~0UL;
48706+
48707+ for (i = 0; i < xattr_size; i++)
48708+ switch (xattr_value[i]) {
48709+ default:
48710+ return ~0UL;
48711+
48712+#define parse_flag(option1, option2, flag) \
48713+ case option1: \
48714+ if (pax_flags_hardmode & MF_PAX_##flag) \
48715+ return ~0UL; \
48716+ pax_flags_hardmode |= MF_PAX_##flag; \
48717+ break; \
48718+ case option2: \
48719+ if (pax_flags_softmode & MF_PAX_##flag) \
48720+ return ~0UL; \
48721+ pax_flags_softmode |= MF_PAX_##flag; \
48722+ break;
48723+
48724+ parse_flag('p', 'P', PAGEEXEC);
48725+ parse_flag('e', 'E', EMUTRAMP);
48726+ parse_flag('m', 'M', MPROTECT);
48727+ parse_flag('r', 'R', RANDMMAP);
48728+ parse_flag('s', 'S', SEGMEXEC);
48729+
48730+#undef parse_flag
48731+ }
48732+
48733+ if (pax_flags_hardmode & pax_flags_softmode)
48734+ return ~0UL;
48735+
48736+#ifdef CONFIG_PAX_SOFTMODE
48737+ if (pax_softmode)
48738+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
48739+ else
48740+#endif
48741+
48742+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
48743+#else
48744+ return ~0UL;
48745+#endif
48746+
48747+}
48748+
48749+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
48750+{
48751+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
48752+
48753+ pax_flags = pax_parse_ei_pax(elf_ex);
48754+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
48755+ xattr_pax_flags = pax_parse_xattr_pax(file);
48756+
48757+ if (pt_pax_flags == ~0UL)
48758+ pt_pax_flags = xattr_pax_flags;
48759+ else if (xattr_pax_flags == ~0UL)
48760+ xattr_pax_flags = pt_pax_flags;
48761+ if (pt_pax_flags != xattr_pax_flags)
48762+ return -EINVAL;
48763+ if (pt_pax_flags != ~0UL)
48764+ pax_flags = pt_pax_flags;
48765+
48766+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
48767+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
48768+ if ((__supported_pte_mask & _PAGE_NX))
48769+ pax_flags &= ~MF_PAX_SEGMEXEC;
48770+ else
48771+ pax_flags &= ~MF_PAX_PAGEEXEC;
48772+ }
48773+#endif
48774+
48775+ if (0 > pax_check_flags(&pax_flags))
48776+ return -EINVAL;
48777+
48778+ current->mm->pax_flags = pax_flags;
48779+ return 0;
48780+}
48781+#endif
48782+
48783 /*
48784 * These are the functions used to load ELF style executables and shared
48785 * libraries. There is no binary dependent code anywhere else.
48786@@ -546,6 +876,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
48787 {
48788 unsigned int random_variable = 0;
48789
48790+#ifdef CONFIG_PAX_RANDUSTACK
48791+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
48792+ return stack_top - current->mm->delta_stack;
48793+#endif
48794+
48795 if ((current->flags & PF_RANDOMIZE) &&
48796 !(current->personality & ADDR_NO_RANDOMIZE)) {
48797 random_variable = get_random_int() & STACK_RND_MASK;
48798@@ -564,7 +899,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
48799 unsigned long load_addr = 0, load_bias = 0;
48800 int load_addr_set = 0;
48801 char * elf_interpreter = NULL;
48802- unsigned long error;
48803+ unsigned long error = 0;
48804 struct elf_phdr *elf_ppnt, *elf_phdata;
48805 unsigned long elf_bss, elf_brk;
48806 int retval, i;
48807@@ -574,12 +909,12 @@ static int load_elf_binary(struct linux_binprm *bprm)
48808 unsigned long start_code, end_code, start_data, end_data;
48809 unsigned long reloc_func_desc __maybe_unused = 0;
48810 int executable_stack = EXSTACK_DEFAULT;
48811- unsigned long def_flags = 0;
48812 struct pt_regs *regs = current_pt_regs();
48813 struct {
48814 struct elfhdr elf_ex;
48815 struct elfhdr interp_elf_ex;
48816 } *loc;
48817+ unsigned long pax_task_size = TASK_SIZE;
48818
48819 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
48820 if (!loc) {
48821@@ -715,11 +1050,81 @@ static int load_elf_binary(struct linux_binprm *bprm)
48822 goto out_free_dentry;
48823
48824 /* OK, This is the point of no return */
48825- current->mm->def_flags = def_flags;
48826+
48827+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
48828+ current->mm->pax_flags = 0UL;
48829+#endif
48830+
48831+#ifdef CONFIG_PAX_DLRESOLVE
48832+ current->mm->call_dl_resolve = 0UL;
48833+#endif
48834+
48835+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
48836+ current->mm->call_syscall = 0UL;
48837+#endif
48838+
48839+#ifdef CONFIG_PAX_ASLR
48840+ current->mm->delta_mmap = 0UL;
48841+ current->mm->delta_stack = 0UL;
48842+#endif
48843+
48844+ current->mm->def_flags = 0;
48845+
48846+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
48847+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
48848+ send_sig(SIGKILL, current, 0);
48849+ goto out_free_dentry;
48850+ }
48851+#endif
48852+
48853+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
48854+ pax_set_initial_flags(bprm);
48855+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
48856+ if (pax_set_initial_flags_func)
48857+ (pax_set_initial_flags_func)(bprm);
48858+#endif
48859+
48860+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
48861+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
48862+ current->mm->context.user_cs_limit = PAGE_SIZE;
48863+ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
48864+ }
48865+#endif
48866+
48867+#ifdef CONFIG_PAX_SEGMEXEC
48868+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
48869+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
48870+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
48871+ pax_task_size = SEGMEXEC_TASK_SIZE;
48872+ current->mm->def_flags |= VM_NOHUGEPAGE;
48873+ }
48874+#endif
48875+
48876+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
48877+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
48878+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
48879+ put_cpu();
48880+ }
48881+#endif
48882
48883 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
48884 may depend on the personality. */
48885 SET_PERSONALITY(loc->elf_ex);
48886+
48887+#ifdef CONFIG_PAX_ASLR
48888+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
48889+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
48890+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
48891+ }
48892+#endif
48893+
48894+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
48895+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
48896+ executable_stack = EXSTACK_DISABLE_X;
48897+ current->personality &= ~READ_IMPLIES_EXEC;
48898+ } else
48899+#endif
48900+
48901 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
48902 current->personality |= READ_IMPLIES_EXEC;
48903
48904@@ -810,6 +1215,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
48905 #else
48906 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
48907 #endif
48908+
48909+#ifdef CONFIG_PAX_RANDMMAP
48910+ /* PaX: randomize base address at the default exe base if requested */
48911+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
48912+#ifdef CONFIG_SPARC64
48913+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
48914+#else
48915+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
48916+#endif
48917+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
48918+ elf_flags |= MAP_FIXED;
48919+ }
48920+#endif
48921+
48922 }
48923
48924 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
48925@@ -842,9 +1261,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
48926 * allowed task size. Note that p_filesz must always be
48927 * <= p_memsz so it is only necessary to check p_memsz.
48928 */
48929- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
48930- elf_ppnt->p_memsz > TASK_SIZE ||
48931- TASK_SIZE - elf_ppnt->p_memsz < k) {
48932+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
48933+ elf_ppnt->p_memsz > pax_task_size ||
48934+ pax_task_size - elf_ppnt->p_memsz < k) {
48935 /* set_brk can never work. Avoid overflows. */
48936 send_sig(SIGKILL, current, 0);
48937 retval = -EINVAL;
48938@@ -883,17 +1302,44 @@ static int load_elf_binary(struct linux_binprm *bprm)
48939 goto out_free_dentry;
48940 }
48941 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
48942- send_sig(SIGSEGV, current, 0);
48943- retval = -EFAULT; /* Nobody gets to see this, but.. */
48944- goto out_free_dentry;
48945+ /*
48946+ * This bss-zeroing can fail if the ELF
48947+ * file specifies odd protections. So
48948+ * we don't check the return value
48949+ */
48950 }
48951
48952+#ifdef CONFIG_PAX_RANDMMAP
48953+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
48954+ unsigned long start, size, flags, vm_flags;
48955+
48956+ start = ELF_PAGEALIGN(elf_brk);
48957+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
48958+ flags = MAP_FIXED | MAP_PRIVATE;
48959+ vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
48960+
48961+ down_write(&current->mm->mmap_sem);
48962+ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
48963+ retval = -ENOMEM;
48964+ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
48965+// if (current->personality & ADDR_NO_RANDOMIZE)
48966+// vm_flags |= VM_READ | VM_MAYREAD;
48967+ start = mmap_region(NULL, start, PAGE_ALIGN(size), flags, vm_flags, 0);
48968+ retval = IS_ERR_VALUE(start) ? start : 0;
48969+ }
48970+ up_write(&current->mm->mmap_sem);
48971+ if (retval == 0)
48972+ retval = set_brk(start + size, start + size + PAGE_SIZE);
48973+ if (retval < 0) {
48974+ send_sig(SIGKILL, current, 0);
48975+ goto out_free_dentry;
48976+ }
48977+ }
48978+#endif
48979+
48980 if (elf_interpreter) {
48981- unsigned long interp_map_addr = 0;
48982-
48983 elf_entry = load_elf_interp(&loc->interp_elf_ex,
48984 interpreter,
48985- &interp_map_addr,
48986 load_bias);
48987 if (!IS_ERR((void *)elf_entry)) {
48988 /*
48989@@ -1115,7 +1561,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
48990 * Decide what to dump of a segment, part, all or none.
48991 */
48992 static unsigned long vma_dump_size(struct vm_area_struct *vma,
48993- unsigned long mm_flags)
48994+ unsigned long mm_flags, long signr)
48995 {
48996 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
48997
48998@@ -1152,7 +1598,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
48999 if (vma->vm_file == NULL)
49000 return 0;
49001
49002- if (FILTER(MAPPED_PRIVATE))
49003+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
49004 goto whole;
49005
49006 /*
49007@@ -1374,9 +1820,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
49008 {
49009 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
49010 int i = 0;
49011- do
49012+ do {
49013 i += 2;
49014- while (auxv[i - 2] != AT_NULL);
49015+ } while (auxv[i - 2] != AT_NULL);
49016 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
49017 }
49018
49019@@ -2006,14 +2452,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
49020 }
49021
49022 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
49023- unsigned long mm_flags)
49024+ struct coredump_params *cprm)
49025 {
49026 struct vm_area_struct *vma;
49027 size_t size = 0;
49028
49029 for (vma = first_vma(current, gate_vma); vma != NULL;
49030 vma = next_vma(vma, gate_vma))
49031- size += vma_dump_size(vma, mm_flags);
49032+ size += vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
49033 return size;
49034 }
49035
49036@@ -2107,7 +2553,7 @@ static int elf_core_dump(struct coredump_params *cprm)
49037
49038 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
49039
49040- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
49041+ offset += elf_core_vma_data_size(gate_vma, cprm);
49042 offset += elf_core_extra_data_size();
49043 e_shoff = offset;
49044
49045@@ -2121,10 +2567,12 @@ static int elf_core_dump(struct coredump_params *cprm)
49046 offset = dataoff;
49047
49048 size += sizeof(*elf);
49049+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
49050 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
49051 goto end_coredump;
49052
49053 size += sizeof(*phdr4note);
49054+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
49055 if (size > cprm->limit
49056 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
49057 goto end_coredump;
49058@@ -2138,7 +2586,7 @@ static int elf_core_dump(struct coredump_params *cprm)
49059 phdr.p_offset = offset;
49060 phdr.p_vaddr = vma->vm_start;
49061 phdr.p_paddr = 0;
49062- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
49063+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
49064 phdr.p_memsz = vma->vm_end - vma->vm_start;
49065 offset += phdr.p_filesz;
49066 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
49067@@ -2149,6 +2597,7 @@ static int elf_core_dump(struct coredump_params *cprm)
49068 phdr.p_align = ELF_EXEC_PAGESIZE;
49069
49070 size += sizeof(phdr);
49071+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
49072 if (size > cprm->limit
49073 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
49074 goto end_coredump;
49075@@ -2173,7 +2622,7 @@ static int elf_core_dump(struct coredump_params *cprm)
49076 unsigned long addr;
49077 unsigned long end;
49078
49079- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
49080+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
49081
49082 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
49083 struct page *page;
49084@@ -2182,6 +2631,7 @@ static int elf_core_dump(struct coredump_params *cprm)
49085 page = get_dump_page(addr);
49086 if (page) {
49087 void *kaddr = kmap(page);
49088+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
49089 stop = ((size += PAGE_SIZE) > cprm->limit) ||
49090 !dump_write(cprm->file, kaddr,
49091 PAGE_SIZE);
49092@@ -2199,6 +2649,7 @@ static int elf_core_dump(struct coredump_params *cprm)
49093
49094 if (e_phnum == PN_XNUM) {
49095 size += sizeof(*shdr4extnum);
49096+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
49097 if (size > cprm->limit
49098 || !dump_write(cprm->file, shdr4extnum,
49099 sizeof(*shdr4extnum)))
49100@@ -2219,6 +2670,97 @@ out:
49101
49102 #endif /* CONFIG_ELF_CORE */
49103
49104+#ifdef CONFIG_PAX_MPROTECT
49105+/* PaX: non-PIC ELF libraries need relocations on their executable segments
49106+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
49107+ * we'll remove VM_MAYWRITE for good on RELRO segments.
49108+ *
49109+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
49110+ * basis because we want to allow the common case and not the special ones.
49111+ */
49112+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
49113+{
49114+ struct elfhdr elf_h;
49115+ struct elf_phdr elf_p;
49116+ unsigned long i;
49117+ unsigned long oldflags;
49118+ bool is_textrel_rw, is_textrel_rx, is_relro;
49119+
49120+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
49121+ return;
49122+
49123+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
49124+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
49125+
49126+#ifdef CONFIG_PAX_ELFRELOCS
49127+ /* possible TEXTREL */
49128+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
49129+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
49130+#else
49131+ is_textrel_rw = false;
49132+ is_textrel_rx = false;
49133+#endif
49134+
49135+ /* possible RELRO */
49136+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
49137+
49138+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
49139+ return;
49140+
49141+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
49142+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
49143+
49144+#ifdef CONFIG_PAX_ETEXECRELOCS
49145+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
49146+#else
49147+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
49148+#endif
49149+
49150+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
49151+ !elf_check_arch(&elf_h) ||
49152+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
49153+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
49154+ return;
49155+
49156+ for (i = 0UL; i < elf_h.e_phnum; i++) {
49157+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
49158+ return;
49159+ switch (elf_p.p_type) {
49160+ case PT_DYNAMIC:
49161+ if (!is_textrel_rw && !is_textrel_rx)
49162+ continue;
49163+ i = 0UL;
49164+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
49165+ elf_dyn dyn;
49166+
49167+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
49168+ return;
49169+ if (dyn.d_tag == DT_NULL)
49170+ return;
49171+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
49172+ gr_log_textrel(vma);
49173+ if (is_textrel_rw)
49174+ vma->vm_flags |= VM_MAYWRITE;
49175+ else
49176+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
49177+ vma->vm_flags &= ~VM_MAYWRITE;
49178+ return;
49179+ }
49180+ i++;
49181+ }
49182+ return;
49183+
49184+ case PT_GNU_RELRO:
49185+ if (!is_relro)
49186+ continue;
49187+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
49188+ vma->vm_flags &= ~VM_MAYWRITE;
49189+ return;
49190+ }
49191+ }
49192+}
49193+#endif
49194+
49195 static int __init init_elf_binfmt(void)
49196 {
49197 register_binfmt(&elf_format);
49198diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
49199index b563719..3868998 100644
49200--- a/fs/binfmt_flat.c
49201+++ b/fs/binfmt_flat.c
49202@@ -562,7 +562,9 @@ static int load_flat_file(struct linux_binprm * bprm,
49203 realdatastart = (unsigned long) -ENOMEM;
49204 printk("Unable to allocate RAM for process data, errno %d\n",
49205 (int)-realdatastart);
49206+ down_write(&current->mm->mmap_sem);
49207 vm_munmap(textpos, text_len);
49208+ up_write(&current->mm->mmap_sem);
49209 ret = realdatastart;
49210 goto err;
49211 }
49212@@ -586,8 +588,10 @@ static int load_flat_file(struct linux_binprm * bprm,
49213 }
49214 if (IS_ERR_VALUE(result)) {
49215 printk("Unable to read data+bss, errno %d\n", (int)-result);
49216+ down_write(&current->mm->mmap_sem);
49217 vm_munmap(textpos, text_len);
49218 vm_munmap(realdatastart, len);
49219+ up_write(&current->mm->mmap_sem);
49220 ret = result;
49221 goto err;
49222 }
49223@@ -654,8 +658,10 @@ static int load_flat_file(struct linux_binprm * bprm,
49224 }
49225 if (IS_ERR_VALUE(result)) {
49226 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
49227+ down_write(&current->mm->mmap_sem);
49228 vm_munmap(textpos, text_len + data_len + extra +
49229 MAX_SHARED_LIBS * sizeof(unsigned long));
49230+ up_write(&current->mm->mmap_sem);
49231 ret = result;
49232 goto err;
49233 }
49234diff --git a/fs/bio.c b/fs/bio.c
49235index b96fc6c..431d628 100644
49236--- a/fs/bio.c
49237+++ b/fs/bio.c
49238@@ -818,7 +818,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
49239 /*
49240 * Overflow, abort
49241 */
49242- if (end < start)
49243+ if (end < start || end - start > INT_MAX - nr_pages)
49244 return ERR_PTR(-EINVAL);
49245
49246 nr_pages += end - start;
49247@@ -952,7 +952,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
49248 /*
49249 * Overflow, abort
49250 */
49251- if (end < start)
49252+ if (end < start || end - start > INT_MAX - nr_pages)
49253 return ERR_PTR(-EINVAL);
49254
49255 nr_pages += end - start;
49256@@ -1214,7 +1214,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
49257 const int read = bio_data_dir(bio) == READ;
49258 struct bio_map_data *bmd = bio->bi_private;
49259 int i;
49260- char *p = bmd->sgvecs[0].iov_base;
49261+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
49262
49263 __bio_for_each_segment(bvec, bio, i, 0) {
49264 char *addr = page_address(bvec->bv_page);
49265diff --git a/fs/block_dev.c b/fs/block_dev.c
49266index 883dc49..f27794a 100644
49267--- a/fs/block_dev.c
49268+++ b/fs/block_dev.c
49269@@ -652,7 +652,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
49270 else if (bdev->bd_contains == bdev)
49271 return true; /* is a whole device which isn't held */
49272
49273- else if (whole->bd_holder == bd_may_claim)
49274+ else if (whole->bd_holder == (void *)bd_may_claim)
49275 return true; /* is a partition of a device that is being partitioned */
49276 else if (whole->bd_holder != NULL)
49277 return false; /* is a partition of a held device */
49278diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
49279index ce1c169..1ef484f 100644
49280--- a/fs/btrfs/ctree.c
49281+++ b/fs/btrfs/ctree.c
49282@@ -1036,9 +1036,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
49283 free_extent_buffer(buf);
49284 add_root_to_dirty_list(root);
49285 } else {
49286- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
49287- parent_start = parent->start;
49288- else
49289+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
49290+ if (parent)
49291+ parent_start = parent->start;
49292+ else
49293+ parent_start = 0;
49294+ } else
49295 parent_start = 0;
49296
49297 WARN_ON(trans->transid != btrfs_header_generation(parent));
49298diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
49299index 7c4e6cc..27bd5c2 100644
49300--- a/fs/btrfs/inode.c
49301+++ b/fs/btrfs/inode.c
49302@@ -7314,7 +7314,7 @@ fail:
49303 return -ENOMEM;
49304 }
49305
49306-static int btrfs_getattr(struct vfsmount *mnt,
49307+int btrfs_getattr(struct vfsmount *mnt,
49308 struct dentry *dentry, struct kstat *stat)
49309 {
49310 struct inode *inode = dentry->d_inode;
49311@@ -7328,6 +7328,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
49312 return 0;
49313 }
49314
49315+EXPORT_SYMBOL(btrfs_getattr);
49316+
49317+dev_t get_btrfs_dev_from_inode(struct inode *inode)
49318+{
49319+ return BTRFS_I(inode)->root->anon_dev;
49320+}
49321+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
49322+
49323 /*
49324 * If a file is moved, it will inherit the cow and compression flags of the new
49325 * directory.
49326diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
49327index 338f259..b657640 100644
49328--- a/fs/btrfs/ioctl.c
49329+++ b/fs/btrfs/ioctl.c
49330@@ -3033,9 +3033,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
49331 for (i = 0; i < num_types; i++) {
49332 struct btrfs_space_info *tmp;
49333
49334+ /* Don't copy in more than we allocated */
49335 if (!slot_count)
49336 break;
49337
49338+ slot_count--;
49339+
49340 info = NULL;
49341 rcu_read_lock();
49342 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
49343@@ -3057,10 +3060,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
49344 memcpy(dest, &space, sizeof(space));
49345 dest++;
49346 space_args.total_spaces++;
49347- slot_count--;
49348 }
49349- if (!slot_count)
49350- break;
49351 }
49352 up_read(&info->groups_sem);
49353 }
49354diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
49355index 300e09a..9fe4539 100644
49356--- a/fs/btrfs/relocation.c
49357+++ b/fs/btrfs/relocation.c
49358@@ -1269,7 +1269,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
49359 }
49360 spin_unlock(&rc->reloc_root_tree.lock);
49361
49362- BUG_ON((struct btrfs_root *)node->data != root);
49363+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
49364
49365 if (!del) {
49366 spin_lock(&rc->reloc_root_tree.lock);
49367diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
49368index d8982e9..29a85fa 100644
49369--- a/fs/btrfs/super.c
49370+++ b/fs/btrfs/super.c
49371@@ -267,7 +267,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
49372 function, line, errstr);
49373 return;
49374 }
49375- ACCESS_ONCE(trans->transaction->aborted) = errno;
49376+ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
49377 __btrfs_std_error(root->fs_info, function, line, errno, NULL);
49378 }
49379 /*
49380diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
49381index 622f469..e8d2d55 100644
49382--- a/fs/cachefiles/bind.c
49383+++ b/fs/cachefiles/bind.c
49384@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
49385 args);
49386
49387 /* start by checking things over */
49388- ASSERT(cache->fstop_percent >= 0 &&
49389- cache->fstop_percent < cache->fcull_percent &&
49390+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
49391 cache->fcull_percent < cache->frun_percent &&
49392 cache->frun_percent < 100);
49393
49394- ASSERT(cache->bstop_percent >= 0 &&
49395- cache->bstop_percent < cache->bcull_percent &&
49396+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
49397 cache->bcull_percent < cache->brun_percent &&
49398 cache->brun_percent < 100);
49399
49400diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
49401index 0a1467b..6a53245 100644
49402--- a/fs/cachefiles/daemon.c
49403+++ b/fs/cachefiles/daemon.c
49404@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
49405 if (n > buflen)
49406 return -EMSGSIZE;
49407
49408- if (copy_to_user(_buffer, buffer, n) != 0)
49409+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
49410 return -EFAULT;
49411
49412 return n;
49413@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
49414 if (test_bit(CACHEFILES_DEAD, &cache->flags))
49415 return -EIO;
49416
49417- if (datalen < 0 || datalen > PAGE_SIZE - 1)
49418+ if (datalen > PAGE_SIZE - 1)
49419 return -EOPNOTSUPP;
49420
49421 /* drag the command string into the kernel so we can parse it */
49422@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
49423 if (args[0] != '%' || args[1] != '\0')
49424 return -EINVAL;
49425
49426- if (fstop < 0 || fstop >= cache->fcull_percent)
49427+ if (fstop >= cache->fcull_percent)
49428 return cachefiles_daemon_range_error(cache, args);
49429
49430 cache->fstop_percent = fstop;
49431@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
49432 if (args[0] != '%' || args[1] != '\0')
49433 return -EINVAL;
49434
49435- if (bstop < 0 || bstop >= cache->bcull_percent)
49436+ if (bstop >= cache->bcull_percent)
49437 return cachefiles_daemon_range_error(cache, args);
49438
49439 cache->bstop_percent = bstop;
49440diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
49441index 4938251..7e01445 100644
49442--- a/fs/cachefiles/internal.h
49443+++ b/fs/cachefiles/internal.h
49444@@ -59,7 +59,7 @@ struct cachefiles_cache {
49445 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
49446 struct rb_root active_nodes; /* active nodes (can't be culled) */
49447 rwlock_t active_lock; /* lock for active_nodes */
49448- atomic_t gravecounter; /* graveyard uniquifier */
49449+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
49450 unsigned frun_percent; /* when to stop culling (% files) */
49451 unsigned fcull_percent; /* when to start culling (% files) */
49452 unsigned fstop_percent; /* when to stop allocating (% files) */
49453@@ -171,19 +171,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
49454 * proc.c
49455 */
49456 #ifdef CONFIG_CACHEFILES_HISTOGRAM
49457-extern atomic_t cachefiles_lookup_histogram[HZ];
49458-extern atomic_t cachefiles_mkdir_histogram[HZ];
49459-extern atomic_t cachefiles_create_histogram[HZ];
49460+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
49461+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
49462+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
49463
49464 extern int __init cachefiles_proc_init(void);
49465 extern void cachefiles_proc_cleanup(void);
49466 static inline
49467-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
49468+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
49469 {
49470 unsigned long jif = jiffies - start_jif;
49471 if (jif >= HZ)
49472 jif = HZ - 1;
49473- atomic_inc(&histogram[jif]);
49474+ atomic_inc_unchecked(&histogram[jif]);
49475 }
49476
49477 #else
49478diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
49479index 8c01c5fc..15f982e 100644
49480--- a/fs/cachefiles/namei.c
49481+++ b/fs/cachefiles/namei.c
49482@@ -317,7 +317,7 @@ try_again:
49483 /* first step is to make up a grave dentry in the graveyard */
49484 sprintf(nbuffer, "%08x%08x",
49485 (uint32_t) get_seconds(),
49486- (uint32_t) atomic_inc_return(&cache->gravecounter));
49487+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
49488
49489 /* do the multiway lock magic */
49490 trap = lock_rename(cache->graveyard, dir);
49491diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
49492index eccd339..4c1d995 100644
49493--- a/fs/cachefiles/proc.c
49494+++ b/fs/cachefiles/proc.c
49495@@ -14,9 +14,9 @@
49496 #include <linux/seq_file.h>
49497 #include "internal.h"
49498
49499-atomic_t cachefiles_lookup_histogram[HZ];
49500-atomic_t cachefiles_mkdir_histogram[HZ];
49501-atomic_t cachefiles_create_histogram[HZ];
49502+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
49503+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
49504+atomic_unchecked_t cachefiles_create_histogram[HZ];
49505
49506 /*
49507 * display the latency histogram
49508@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
49509 return 0;
49510 default:
49511 index = (unsigned long) v - 3;
49512- x = atomic_read(&cachefiles_lookup_histogram[index]);
49513- y = atomic_read(&cachefiles_mkdir_histogram[index]);
49514- z = atomic_read(&cachefiles_create_histogram[index]);
49515+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
49516+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
49517+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
49518 if (x == 0 && y == 0 && z == 0)
49519 return 0;
49520
49521diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
49522index 4809922..aab2c39 100644
49523--- a/fs/cachefiles/rdwr.c
49524+++ b/fs/cachefiles/rdwr.c
49525@@ -965,7 +965,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
49526 old_fs = get_fs();
49527 set_fs(KERNEL_DS);
49528 ret = file->f_op->write(
49529- file, (const void __user *) data, len, &pos);
49530+ file, (const void __force_user *) data, len, &pos);
49531 set_fs(old_fs);
49532 kunmap(page);
49533 if (ret != len)
49534diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
49535index 8c1aabe..bbf856a 100644
49536--- a/fs/ceph/dir.c
49537+++ b/fs/ceph/dir.c
49538@@ -243,7 +243,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
49539 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
49540 struct ceph_mds_client *mdsc = fsc->mdsc;
49541 unsigned frag = fpos_frag(filp->f_pos);
49542- int off = fpos_off(filp->f_pos);
49543+ unsigned int off = fpos_off(filp->f_pos);
49544 int err;
49545 u32 ftype;
49546 struct ceph_mds_reply_info_parsed *rinfo;
49547diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
49548index d9ea6ed..1e6c8ac 100644
49549--- a/fs/cifs/cifs_debug.c
49550+++ b/fs/cifs/cifs_debug.c
49551@@ -267,8 +267,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
49552
49553 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
49554 #ifdef CONFIG_CIFS_STATS2
49555- atomic_set(&totBufAllocCount, 0);
49556- atomic_set(&totSmBufAllocCount, 0);
49557+ atomic_set_unchecked(&totBufAllocCount, 0);
49558+ atomic_set_unchecked(&totSmBufAllocCount, 0);
49559 #endif /* CONFIG_CIFS_STATS2 */
49560 spin_lock(&cifs_tcp_ses_lock);
49561 list_for_each(tmp1, &cifs_tcp_ses_list) {
49562@@ -281,7 +281,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
49563 tcon = list_entry(tmp3,
49564 struct cifs_tcon,
49565 tcon_list);
49566- atomic_set(&tcon->num_smbs_sent, 0);
49567+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
49568 if (server->ops->clear_stats)
49569 server->ops->clear_stats(tcon);
49570 }
49571@@ -313,8 +313,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
49572 smBufAllocCount.counter, cifs_min_small);
49573 #ifdef CONFIG_CIFS_STATS2
49574 seq_printf(m, "Total Large %d Small %d Allocations\n",
49575- atomic_read(&totBufAllocCount),
49576- atomic_read(&totSmBufAllocCount));
49577+ atomic_read_unchecked(&totBufAllocCount),
49578+ atomic_read_unchecked(&totSmBufAllocCount));
49579 #endif /* CONFIG_CIFS_STATS2 */
49580
49581 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
49582@@ -343,7 +343,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
49583 if (tcon->need_reconnect)
49584 seq_puts(m, "\tDISCONNECTED ");
49585 seq_printf(m, "\nSMBs: %d",
49586- atomic_read(&tcon->num_smbs_sent));
49587+ atomic_read_unchecked(&tcon->num_smbs_sent));
49588 if (server->ops->print_stats)
49589 server->ops->print_stats(m, tcon);
49590 }
49591diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
49592index b9db388..9a73d6d 100644
49593--- a/fs/cifs/cifsfs.c
49594+++ b/fs/cifs/cifsfs.c
49595@@ -1026,7 +1026,7 @@ cifs_init_request_bufs(void)
49596 /* cERROR(1, "CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize); */
49597 cifs_req_cachep = kmem_cache_create("cifs_request",
49598 CIFSMaxBufSize + max_hdr_size, 0,
49599- SLAB_HWCACHE_ALIGN, NULL);
49600+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
49601 if (cifs_req_cachep == NULL)
49602 return -ENOMEM;
49603
49604@@ -1053,7 +1053,7 @@ cifs_init_request_bufs(void)
49605 efficient to alloc 1 per page off the slab compared to 17K (5page)
49606 alloc of large cifs buffers even when page debugging is on */
49607 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
49608- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
49609+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
49610 NULL);
49611 if (cifs_sm_req_cachep == NULL) {
49612 mempool_destroy(cifs_req_poolp);
49613@@ -1138,8 +1138,8 @@ init_cifs(void)
49614 atomic_set(&bufAllocCount, 0);
49615 atomic_set(&smBufAllocCount, 0);
49616 #ifdef CONFIG_CIFS_STATS2
49617- atomic_set(&totBufAllocCount, 0);
49618- atomic_set(&totSmBufAllocCount, 0);
49619+ atomic_set_unchecked(&totBufAllocCount, 0);
49620+ atomic_set_unchecked(&totSmBufAllocCount, 0);
49621 #endif /* CONFIG_CIFS_STATS2 */
49622
49623 atomic_set(&midCount, 0);
49624diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
49625index e6899ce..d6b2920 100644
49626--- a/fs/cifs/cifsglob.h
49627+++ b/fs/cifs/cifsglob.h
49628@@ -751,35 +751,35 @@ struct cifs_tcon {
49629 __u16 Flags; /* optional support bits */
49630 enum statusEnum tidStatus;
49631 #ifdef CONFIG_CIFS_STATS
49632- atomic_t num_smbs_sent;
49633+ atomic_unchecked_t num_smbs_sent;
49634 union {
49635 struct {
49636- atomic_t num_writes;
49637- atomic_t num_reads;
49638- atomic_t num_flushes;
49639- atomic_t num_oplock_brks;
49640- atomic_t num_opens;
49641- atomic_t num_closes;
49642- atomic_t num_deletes;
49643- atomic_t num_mkdirs;
49644- atomic_t num_posixopens;
49645- atomic_t num_posixmkdirs;
49646- atomic_t num_rmdirs;
49647- atomic_t num_renames;
49648- atomic_t num_t2renames;
49649- atomic_t num_ffirst;
49650- atomic_t num_fnext;
49651- atomic_t num_fclose;
49652- atomic_t num_hardlinks;
49653- atomic_t num_symlinks;
49654- atomic_t num_locks;
49655- atomic_t num_acl_get;
49656- atomic_t num_acl_set;
49657+ atomic_unchecked_t num_writes;
49658+ atomic_unchecked_t num_reads;
49659+ atomic_unchecked_t num_flushes;
49660+ atomic_unchecked_t num_oplock_brks;
49661+ atomic_unchecked_t num_opens;
49662+ atomic_unchecked_t num_closes;
49663+ atomic_unchecked_t num_deletes;
49664+ atomic_unchecked_t num_mkdirs;
49665+ atomic_unchecked_t num_posixopens;
49666+ atomic_unchecked_t num_posixmkdirs;
49667+ atomic_unchecked_t num_rmdirs;
49668+ atomic_unchecked_t num_renames;
49669+ atomic_unchecked_t num_t2renames;
49670+ atomic_unchecked_t num_ffirst;
49671+ atomic_unchecked_t num_fnext;
49672+ atomic_unchecked_t num_fclose;
49673+ atomic_unchecked_t num_hardlinks;
49674+ atomic_unchecked_t num_symlinks;
49675+ atomic_unchecked_t num_locks;
49676+ atomic_unchecked_t num_acl_get;
49677+ atomic_unchecked_t num_acl_set;
49678 } cifs_stats;
49679 #ifdef CONFIG_CIFS_SMB2
49680 struct {
49681- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
49682- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
49683+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
49684+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
49685 } smb2_stats;
49686 #endif /* CONFIG_CIFS_SMB2 */
49687 } stats;
49688@@ -1080,7 +1080,7 @@ convert_delimiter(char *path, char delim)
49689 }
49690
49691 #ifdef CONFIG_CIFS_STATS
49692-#define cifs_stats_inc atomic_inc
49693+#define cifs_stats_inc atomic_inc_unchecked
49694
49695 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
49696 unsigned int bytes)
49697@@ -1445,8 +1445,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
49698 /* Various Debug counters */
49699 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
49700 #ifdef CONFIG_CIFS_STATS2
49701-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
49702-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
49703+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
49704+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
49705 #endif
49706 GLOBAL_EXTERN atomic_t smBufAllocCount;
49707 GLOBAL_EXTERN atomic_t midCount;
49708diff --git a/fs/cifs/link.c b/fs/cifs/link.c
49709index 51dc2fb..1e12a33 100644
49710--- a/fs/cifs/link.c
49711+++ b/fs/cifs/link.c
49712@@ -616,7 +616,7 @@ symlink_exit:
49713
49714 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
49715 {
49716- char *p = nd_get_link(nd);
49717+ const char *p = nd_get_link(nd);
49718 if (!IS_ERR(p))
49719 kfree(p);
49720 }
49721diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
49722index 3a00c0d..42d901c 100644
49723--- a/fs/cifs/misc.c
49724+++ b/fs/cifs/misc.c
49725@@ -169,7 +169,7 @@ cifs_buf_get(void)
49726 memset(ret_buf, 0, buf_size + 3);
49727 atomic_inc(&bufAllocCount);
49728 #ifdef CONFIG_CIFS_STATS2
49729- atomic_inc(&totBufAllocCount);
49730+ atomic_inc_unchecked(&totBufAllocCount);
49731 #endif /* CONFIG_CIFS_STATS2 */
49732 }
49733
49734@@ -204,7 +204,7 @@ cifs_small_buf_get(void)
49735 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
49736 atomic_inc(&smBufAllocCount);
49737 #ifdef CONFIG_CIFS_STATS2
49738- atomic_inc(&totSmBufAllocCount);
49739+ atomic_inc_unchecked(&totSmBufAllocCount);
49740 #endif /* CONFIG_CIFS_STATS2 */
49741
49742 }
49743diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
49744index 47bc5a8..10decbe 100644
49745--- a/fs/cifs/smb1ops.c
49746+++ b/fs/cifs/smb1ops.c
49747@@ -586,27 +586,27 @@ static void
49748 cifs_clear_stats(struct cifs_tcon *tcon)
49749 {
49750 #ifdef CONFIG_CIFS_STATS
49751- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
49752- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
49753- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
49754- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
49755- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
49756- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
49757- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
49758- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
49759- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
49760- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
49761- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
49762- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
49763- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
49764- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
49765- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
49766- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
49767- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
49768- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
49769- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
49770- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
49771- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
49772+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
49773+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
49774+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
49775+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
49776+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
49777+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
49778+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
49779+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
49780+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
49781+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
49782+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
49783+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
49784+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
49785+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
49786+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
49787+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
49788+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
49789+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
49790+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
49791+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
49792+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
49793 #endif
49794 }
49795
49796@@ -615,36 +615,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
49797 {
49798 #ifdef CONFIG_CIFS_STATS
49799 seq_printf(m, " Oplocks breaks: %d",
49800- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
49801+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
49802 seq_printf(m, "\nReads: %d Bytes: %llu",
49803- atomic_read(&tcon->stats.cifs_stats.num_reads),
49804+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
49805 (long long)(tcon->bytes_read));
49806 seq_printf(m, "\nWrites: %d Bytes: %llu",
49807- atomic_read(&tcon->stats.cifs_stats.num_writes),
49808+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
49809 (long long)(tcon->bytes_written));
49810 seq_printf(m, "\nFlushes: %d",
49811- atomic_read(&tcon->stats.cifs_stats.num_flushes));
49812+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
49813 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
49814- atomic_read(&tcon->stats.cifs_stats.num_locks),
49815- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
49816- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
49817+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
49818+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
49819+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
49820 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
49821- atomic_read(&tcon->stats.cifs_stats.num_opens),
49822- atomic_read(&tcon->stats.cifs_stats.num_closes),
49823- atomic_read(&tcon->stats.cifs_stats.num_deletes));
49824+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
49825+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
49826+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
49827 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
49828- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
49829- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
49830+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
49831+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
49832 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
49833- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
49834- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
49835+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
49836+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
49837 seq_printf(m, "\nRenames: %d T2 Renames %d",
49838- atomic_read(&tcon->stats.cifs_stats.num_renames),
49839- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
49840+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
49841+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
49842 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
49843- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
49844- atomic_read(&tcon->stats.cifs_stats.num_fnext),
49845- atomic_read(&tcon->stats.cifs_stats.num_fclose));
49846+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
49847+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
49848+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
49849 #endif
49850 }
49851
49852diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
49853index bceffe7..cd1ae59 100644
49854--- a/fs/cifs/smb2ops.c
49855+++ b/fs/cifs/smb2ops.c
49856@@ -274,8 +274,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
49857 #ifdef CONFIG_CIFS_STATS
49858 int i;
49859 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
49860- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
49861- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
49862+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
49863+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
49864 }
49865 #endif
49866 }
49867@@ -284,66 +284,66 @@ static void
49868 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
49869 {
49870 #ifdef CONFIG_CIFS_STATS
49871- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
49872- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
49873+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
49874+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
49875 seq_printf(m, "\nNegotiates: %d sent %d failed",
49876- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
49877- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
49878+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
49879+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
49880 seq_printf(m, "\nSessionSetups: %d sent %d failed",
49881- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
49882- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
49883+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
49884+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
49885 #define SMB2LOGOFF 0x0002 /* trivial request/resp */
49886 seq_printf(m, "\nLogoffs: %d sent %d failed",
49887- atomic_read(&sent[SMB2_LOGOFF_HE]),
49888- atomic_read(&failed[SMB2_LOGOFF_HE]));
49889+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
49890+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
49891 seq_printf(m, "\nTreeConnects: %d sent %d failed",
49892- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
49893- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
49894+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
49895+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
49896 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
49897- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
49898- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
49899+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
49900+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
49901 seq_printf(m, "\nCreates: %d sent %d failed",
49902- atomic_read(&sent[SMB2_CREATE_HE]),
49903- atomic_read(&failed[SMB2_CREATE_HE]));
49904+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
49905+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
49906 seq_printf(m, "\nCloses: %d sent %d failed",
49907- atomic_read(&sent[SMB2_CLOSE_HE]),
49908- atomic_read(&failed[SMB2_CLOSE_HE]));
49909+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
49910+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
49911 seq_printf(m, "\nFlushes: %d sent %d failed",
49912- atomic_read(&sent[SMB2_FLUSH_HE]),
49913- atomic_read(&failed[SMB2_FLUSH_HE]));
49914+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
49915+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
49916 seq_printf(m, "\nReads: %d sent %d failed",
49917- atomic_read(&sent[SMB2_READ_HE]),
49918- atomic_read(&failed[SMB2_READ_HE]));
49919+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
49920+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
49921 seq_printf(m, "\nWrites: %d sent %d failed",
49922- atomic_read(&sent[SMB2_WRITE_HE]),
49923- atomic_read(&failed[SMB2_WRITE_HE]));
49924+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
49925+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
49926 seq_printf(m, "\nLocks: %d sent %d failed",
49927- atomic_read(&sent[SMB2_LOCK_HE]),
49928- atomic_read(&failed[SMB2_LOCK_HE]));
49929+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
49930+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
49931 seq_printf(m, "\nIOCTLs: %d sent %d failed",
49932- atomic_read(&sent[SMB2_IOCTL_HE]),
49933- atomic_read(&failed[SMB2_IOCTL_HE]));
49934+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
49935+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
49936 seq_printf(m, "\nCancels: %d sent %d failed",
49937- atomic_read(&sent[SMB2_CANCEL_HE]),
49938- atomic_read(&failed[SMB2_CANCEL_HE]));
49939+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
49940+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
49941 seq_printf(m, "\nEchos: %d sent %d failed",
49942- atomic_read(&sent[SMB2_ECHO_HE]),
49943- atomic_read(&failed[SMB2_ECHO_HE]));
49944+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
49945+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
49946 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
49947- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
49948- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
49949+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
49950+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
49951 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
49952- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
49953- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
49954+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
49955+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
49956 seq_printf(m, "\nQueryInfos: %d sent %d failed",
49957- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
49958- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
49959+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
49960+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
49961 seq_printf(m, "\nSetInfos: %d sent %d failed",
49962- atomic_read(&sent[SMB2_SET_INFO_HE]),
49963- atomic_read(&failed[SMB2_SET_INFO_HE]));
49964+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
49965+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
49966 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
49967- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
49968- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
49969+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
49970+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
49971 #endif
49972 }
49973
49974diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
49975index 41d9d07..dbb4772 100644
49976--- a/fs/cifs/smb2pdu.c
49977+++ b/fs/cifs/smb2pdu.c
49978@@ -1761,8 +1761,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
49979 default:
49980 cERROR(1, "info level %u isn't supported",
49981 srch_inf->info_level);
49982- rc = -EINVAL;
49983- goto qdir_exit;
49984+ return -EINVAL;
49985 }
49986
49987 req->FileIndex = cpu_to_le32(index);
49988diff --git a/fs/coda/cache.c b/fs/coda/cache.c
49989index 958ae0e..505c9d0 100644
49990--- a/fs/coda/cache.c
49991+++ b/fs/coda/cache.c
49992@@ -24,7 +24,7 @@
49993 #include "coda_linux.h"
49994 #include "coda_cache.h"
49995
49996-static atomic_t permission_epoch = ATOMIC_INIT(0);
49997+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
49998
49999 /* replace or extend an acl cache hit */
50000 void coda_cache_enter(struct inode *inode, int mask)
50001@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
50002 struct coda_inode_info *cii = ITOC(inode);
50003
50004 spin_lock(&cii->c_lock);
50005- cii->c_cached_epoch = atomic_read(&permission_epoch);
50006+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
50007 if (cii->c_uid != current_fsuid()) {
50008 cii->c_uid = current_fsuid();
50009 cii->c_cached_perm = mask;
50010@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
50011 {
50012 struct coda_inode_info *cii = ITOC(inode);
50013 spin_lock(&cii->c_lock);
50014- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
50015+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
50016 spin_unlock(&cii->c_lock);
50017 }
50018
50019 /* remove all acl caches */
50020 void coda_cache_clear_all(struct super_block *sb)
50021 {
50022- atomic_inc(&permission_epoch);
50023+ atomic_inc_unchecked(&permission_epoch);
50024 }
50025
50026
50027@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
50028 spin_lock(&cii->c_lock);
50029 hit = (mask & cii->c_cached_perm) == mask &&
50030 cii->c_uid == current_fsuid() &&
50031- cii->c_cached_epoch == atomic_read(&permission_epoch);
50032+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
50033 spin_unlock(&cii->c_lock);
50034
50035 return hit;
50036diff --git a/fs/compat.c b/fs/compat.c
50037index a06dcbc..dacb6d3 100644
50038--- a/fs/compat.c
50039+++ b/fs/compat.c
50040@@ -54,7 +54,7 @@
50041 #include <asm/ioctls.h>
50042 #include "internal.h"
50043
50044-int compat_log = 1;
50045+int compat_log = 0;
50046
50047 int compat_printk(const char *fmt, ...)
50048 {
50049@@ -490,7 +490,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
50050
50051 set_fs(KERNEL_DS);
50052 /* The __user pointer cast is valid because of the set_fs() */
50053- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
50054+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
50055 set_fs(oldfs);
50056 /* truncating is ok because it's a user address */
50057 if (!ret)
50058@@ -548,7 +548,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
50059 goto out;
50060
50061 ret = -EINVAL;
50062- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
50063+ if (nr_segs > UIO_MAXIOV)
50064 goto out;
50065 if (nr_segs > fast_segs) {
50066 ret = -ENOMEM;
50067@@ -835,6 +835,7 @@ struct compat_old_linux_dirent {
50068
50069 struct compat_readdir_callback {
50070 struct compat_old_linux_dirent __user *dirent;
50071+ struct file * file;
50072 int result;
50073 };
50074
50075@@ -852,6 +853,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
50076 buf->result = -EOVERFLOW;
50077 return -EOVERFLOW;
50078 }
50079+
50080+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
50081+ return 0;
50082+
50083 buf->result++;
50084 dirent = buf->dirent;
50085 if (!access_ok(VERIFY_WRITE, dirent,
50086@@ -882,6 +887,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
50087
50088 buf.result = 0;
50089 buf.dirent = dirent;
50090+ buf.file = f.file;
50091
50092 error = vfs_readdir(f.file, compat_fillonedir, &buf);
50093 if (buf.result)
50094@@ -901,6 +907,7 @@ struct compat_linux_dirent {
50095 struct compat_getdents_callback {
50096 struct compat_linux_dirent __user *current_dir;
50097 struct compat_linux_dirent __user *previous;
50098+ struct file * file;
50099 int count;
50100 int error;
50101 };
50102@@ -922,6 +929,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
50103 buf->error = -EOVERFLOW;
50104 return -EOVERFLOW;
50105 }
50106+
50107+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
50108+ return 0;
50109+
50110 dirent = buf->previous;
50111 if (dirent) {
50112 if (__put_user(offset, &dirent->d_off))
50113@@ -967,6 +978,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
50114 buf.previous = NULL;
50115 buf.count = count;
50116 buf.error = 0;
50117+ buf.file = f.file;
50118
50119 error = vfs_readdir(f.file, compat_filldir, &buf);
50120 if (error >= 0)
50121@@ -987,6 +999,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
50122 struct compat_getdents_callback64 {
50123 struct linux_dirent64 __user *current_dir;
50124 struct linux_dirent64 __user *previous;
50125+ struct file * file;
50126 int count;
50127 int error;
50128 };
50129@@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
50130 buf->error = -EINVAL; /* only used if we fail.. */
50131 if (reclen > buf->count)
50132 return -EINVAL;
50133+
50134+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
50135+ return 0;
50136+
50137 dirent = buf->previous;
50138
50139 if (dirent) {
50140@@ -1052,13 +1069,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
50141 buf.previous = NULL;
50142 buf.count = count;
50143 buf.error = 0;
50144+ buf.file = f.file;
50145
50146 error = vfs_readdir(f.file, compat_filldir64, &buf);
50147 if (error >= 0)
50148 error = buf.error;
50149 lastdirent = buf.previous;
50150 if (lastdirent) {
50151- typeof(lastdirent->d_off) d_off = f.file->f_pos;
50152+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
50153 if (__put_user_unaligned(d_off, &lastdirent->d_off))
50154 error = -EFAULT;
50155 else
50156diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
50157index a81147e..20bf2b5 100644
50158--- a/fs/compat_binfmt_elf.c
50159+++ b/fs/compat_binfmt_elf.c
50160@@ -30,11 +30,13 @@
50161 #undef elf_phdr
50162 #undef elf_shdr
50163 #undef elf_note
50164+#undef elf_dyn
50165 #undef elf_addr_t
50166 #define elfhdr elf32_hdr
50167 #define elf_phdr elf32_phdr
50168 #define elf_shdr elf32_shdr
50169 #define elf_note elf32_note
50170+#define elf_dyn Elf32_Dyn
50171 #define elf_addr_t Elf32_Addr
50172
50173 /*
50174diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
50175index e2f57a0..3c78771 100644
50176--- a/fs/compat_ioctl.c
50177+++ b/fs/compat_ioctl.c
50178@@ -623,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
50179 return -EFAULT;
50180 if (__get_user(udata, &ss32->iomem_base))
50181 return -EFAULT;
50182- ss.iomem_base = compat_ptr(udata);
50183+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
50184 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
50185 __get_user(ss.port_high, &ss32->port_high))
50186 return -EFAULT;
50187@@ -798,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
50188 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
50189 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
50190 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
50191- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
50192+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
50193 return -EFAULT;
50194
50195 return ioctl_preallocate(file, p);
50196@@ -1620,8 +1620,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
50197 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
50198 {
50199 unsigned int a, b;
50200- a = *(unsigned int *)p;
50201- b = *(unsigned int *)q;
50202+ a = *(const unsigned int *)p;
50203+ b = *(const unsigned int *)q;
50204 if (a > b)
50205 return 1;
50206 if (a < b)
50207diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
50208index 712b10f..c33c4ca 100644
50209--- a/fs/configfs/dir.c
50210+++ b/fs/configfs/dir.c
50211@@ -1037,10 +1037,11 @@ static int configfs_dump(struct configfs_dirent *sd, int level)
50212 static int configfs_depend_prep(struct dentry *origin,
50213 struct config_item *target)
50214 {
50215- struct configfs_dirent *child_sd, *sd = origin->d_fsdata;
50216+ struct configfs_dirent *child_sd, *sd;
50217 int ret = 0;
50218
50219- BUG_ON(!origin || !sd);
50220+ BUG_ON(!origin || !origin->d_fsdata);
50221+ sd = origin->d_fsdata;
50222
50223 if (sd->s_element == target) /* Boo-yah */
50224 goto out;
50225@@ -1564,7 +1565,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
50226 }
50227 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
50228 struct configfs_dirent *next;
50229- const char * name;
50230+ const unsigned char * name;
50231+ char d_name[sizeof(next->s_dentry->d_iname)];
50232 int len;
50233 struct inode *inode = NULL;
50234
50235@@ -1574,7 +1576,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
50236 continue;
50237
50238 name = configfs_get_name(next);
50239- len = strlen(name);
50240+ if (next->s_dentry && name == next->s_dentry->d_iname) {
50241+ len = next->s_dentry->d_name.len;
50242+ memcpy(d_name, name, len);
50243+ name = d_name;
50244+ } else
50245+ len = strlen(name);
50246
50247 /*
50248 * We'll have a dentry and an inode for
50249diff --git a/fs/coredump.c b/fs/coredump.c
50250index 1774932..5812106 100644
50251--- a/fs/coredump.c
50252+++ b/fs/coredump.c
50253@@ -52,7 +52,7 @@ struct core_name {
50254 char *corename;
50255 int used, size;
50256 };
50257-static atomic_t call_count = ATOMIC_INIT(1);
50258+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
50259
50260 /* The maximal length of core_pattern is also specified in sysctl.c */
50261
50262@@ -60,7 +60,7 @@ static int expand_corename(struct core_name *cn)
50263 {
50264 char *old_corename = cn->corename;
50265
50266- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
50267+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
50268 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
50269
50270 if (!cn->corename) {
50271@@ -157,7 +157,7 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm)
50272 int pid_in_pattern = 0;
50273 int err = 0;
50274
50275- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
50276+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
50277 cn->corename = kmalloc(cn->size, GFP_KERNEL);
50278 cn->used = 0;
50279
50280@@ -414,17 +414,17 @@ static void wait_for_dump_helpers(struct file *file)
50281 pipe = file->f_path.dentry->d_inode->i_pipe;
50282
50283 pipe_lock(pipe);
50284- pipe->readers++;
50285- pipe->writers--;
50286+ atomic_inc(&pipe->readers);
50287+ atomic_dec(&pipe->writers);
50288
50289- while ((pipe->readers > 1) && (!signal_pending(current))) {
50290+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
50291 wake_up_interruptible_sync(&pipe->wait);
50292 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
50293 pipe_wait(pipe);
50294 }
50295
50296- pipe->readers--;
50297- pipe->writers++;
50298+ atomic_dec(&pipe->readers);
50299+ atomic_inc(&pipe->writers);
50300 pipe_unlock(pipe);
50301
50302 }
50303@@ -471,7 +471,8 @@ void do_coredump(siginfo_t *siginfo)
50304 int ispipe;
50305 struct files_struct *displaced;
50306 bool need_nonrelative = false;
50307- static atomic_t core_dump_count = ATOMIC_INIT(0);
50308+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
50309+ long signr = siginfo->si_signo;
50310 struct coredump_params cprm = {
50311 .siginfo = siginfo,
50312 .regs = signal_pt_regs(),
50313@@ -484,7 +485,10 @@ void do_coredump(siginfo_t *siginfo)
50314 .mm_flags = mm->flags,
50315 };
50316
50317- audit_core_dumps(siginfo->si_signo);
50318+ audit_core_dumps(signr);
50319+
50320+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
50321+ gr_handle_brute_attach(cprm.mm_flags);
50322
50323 binfmt = mm->binfmt;
50324 if (!binfmt || !binfmt->core_dump)
50325@@ -508,7 +512,7 @@ void do_coredump(siginfo_t *siginfo)
50326 need_nonrelative = true;
50327 }
50328
50329- retval = coredump_wait(siginfo->si_signo, &core_state);
50330+ retval = coredump_wait(signr, &core_state);
50331 if (retval < 0)
50332 goto fail_creds;
50333
50334@@ -556,7 +560,7 @@ void do_coredump(siginfo_t *siginfo)
50335 }
50336 cprm.limit = RLIM_INFINITY;
50337
50338- dump_count = atomic_inc_return(&core_dump_count);
50339+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
50340 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
50341 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
50342 task_tgid_vnr(current), current->comm);
50343@@ -583,6 +587,8 @@ void do_coredump(siginfo_t *siginfo)
50344 } else {
50345 struct inode *inode;
50346
50347+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
50348+
50349 if (cprm.limit < binfmt->min_coredump)
50350 goto fail_unlock;
50351
50352@@ -640,7 +646,7 @@ close_fail:
50353 filp_close(cprm.file, NULL);
50354 fail_dropcount:
50355 if (ispipe)
50356- atomic_dec(&core_dump_count);
50357+ atomic_dec_unchecked(&core_dump_count);
50358 fail_unlock:
50359 kfree(cn.corename);
50360 fail_corename:
50361@@ -659,7 +665,7 @@ fail:
50362 */
50363 int dump_write(struct file *file, const void *addr, int nr)
50364 {
50365- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
50366+ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
50367 }
50368 EXPORT_SYMBOL(dump_write);
50369
50370diff --git a/fs/dcache.c b/fs/dcache.c
50371index c3bbf85..5b71101 100644
50372--- a/fs/dcache.c
50373+++ b/fs/dcache.c
50374@@ -3139,7 +3139,7 @@ void __init vfs_caches_init(unsigned long mempages)
50375 mempages -= reserve;
50376
50377 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
50378- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
50379+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
50380
50381 dcache_init();
50382 inode_init();
50383diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
50384index a5f12b7..4ee8a6f 100644
50385--- a/fs/debugfs/inode.c
50386+++ b/fs/debugfs/inode.c
50387@@ -415,7 +415,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
50388 */
50389 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
50390 {
50391+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
50392+ return __create_file(name, S_IFDIR | S_IRWXU,
50393+#else
50394 return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
50395+#endif
50396 parent, NULL, NULL);
50397 }
50398 EXPORT_SYMBOL_GPL(debugfs_create_dir);
50399diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
50400index cc7709e..7e7211f 100644
50401--- a/fs/ecryptfs/inode.c
50402+++ b/fs/ecryptfs/inode.c
50403@@ -674,7 +674,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
50404 old_fs = get_fs();
50405 set_fs(get_ds());
50406 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
50407- (char __user *)lower_buf,
50408+ (char __force_user *)lower_buf,
50409 PATH_MAX);
50410 set_fs(old_fs);
50411 if (rc < 0)
50412@@ -706,7 +706,7 @@ out:
50413 static void
50414 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
50415 {
50416- char *buf = nd_get_link(nd);
50417+ const char *buf = nd_get_link(nd);
50418 if (!IS_ERR(buf)) {
50419 /* Free the char* */
50420 kfree(buf);
50421diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
50422index 412e6ed..4292d22 100644
50423--- a/fs/ecryptfs/miscdev.c
50424+++ b/fs/ecryptfs/miscdev.c
50425@@ -315,7 +315,7 @@ check_list:
50426 goto out_unlock_msg_ctx;
50427 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
50428 if (msg_ctx->msg) {
50429- if (copy_to_user(&buf[i], packet_length, packet_length_size))
50430+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
50431 goto out_unlock_msg_ctx;
50432 i += packet_length_size;
50433 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
50434diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
50435index b2a34a1..162fa69 100644
50436--- a/fs/ecryptfs/read_write.c
50437+++ b/fs/ecryptfs/read_write.c
50438@@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
50439 return -EIO;
50440 fs_save = get_fs();
50441 set_fs(get_ds());
50442- rc = vfs_write(lower_file, data, size, &offset);
50443+ rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
50444 set_fs(fs_save);
50445 mark_inode_dirty_sync(ecryptfs_inode);
50446 return rc;
50447@@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
50448 return -EIO;
50449 fs_save = get_fs();
50450 set_fs(get_ds());
50451- rc = vfs_read(lower_file, data, size, &offset);
50452+ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
50453 set_fs(fs_save);
50454 return rc;
50455 }
50456diff --git a/fs/exec.c b/fs/exec.c
50457index 20df02c..1b1d946 100644
50458--- a/fs/exec.c
50459+++ b/fs/exec.c
50460@@ -55,6 +55,17 @@
50461 #include <linux/pipe_fs_i.h>
50462 #include <linux/oom.h>
50463 #include <linux/compat.h>
50464+#include <linux/random.h>
50465+#include <linux/seq_file.h>
50466+#include <linux/coredump.h>
50467+#include <linux/mman.h>
50468+
50469+#ifdef CONFIG_PAX_REFCOUNT
50470+#include <linux/kallsyms.h>
50471+#include <linux/kdebug.h>
50472+#endif
50473+
50474+#include <trace/events/fs.h>
50475
50476 #include <asm/uaccess.h>
50477 #include <asm/mmu_context.h>
50478@@ -66,6 +77,18 @@
50479
50480 #include <trace/events/sched.h>
50481
50482+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
50483+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
50484+{
50485+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
50486+}
50487+#endif
50488+
50489+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
50490+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
50491+EXPORT_SYMBOL(pax_set_initial_flags_func);
50492+#endif
50493+
50494 int suid_dumpable = 0;
50495
50496 static LIST_HEAD(formats);
50497@@ -75,8 +98,8 @@ void __register_binfmt(struct linux_binfmt * fmt, int insert)
50498 {
50499 BUG_ON(!fmt);
50500 write_lock(&binfmt_lock);
50501- insert ? list_add(&fmt->lh, &formats) :
50502- list_add_tail(&fmt->lh, &formats);
50503+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
50504+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
50505 write_unlock(&binfmt_lock);
50506 }
50507
50508@@ -85,7 +108,7 @@ EXPORT_SYMBOL(__register_binfmt);
50509 void unregister_binfmt(struct linux_binfmt * fmt)
50510 {
50511 write_lock(&binfmt_lock);
50512- list_del(&fmt->lh);
50513+ pax_list_del((struct list_head *)&fmt->lh);
50514 write_unlock(&binfmt_lock);
50515 }
50516
50517@@ -180,18 +203,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
50518 int write)
50519 {
50520 struct page *page;
50521- int ret;
50522
50523-#ifdef CONFIG_STACK_GROWSUP
50524- if (write) {
50525- ret = expand_downwards(bprm->vma, pos);
50526- if (ret < 0)
50527- return NULL;
50528- }
50529-#endif
50530- ret = get_user_pages(current, bprm->mm, pos,
50531- 1, write, 1, &page, NULL);
50532- if (ret <= 0)
50533+ if (0 > expand_downwards(bprm->vma, pos))
50534+ return NULL;
50535+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
50536 return NULL;
50537
50538 if (write) {
50539@@ -207,6 +222,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
50540 if (size <= ARG_MAX)
50541 return page;
50542
50543+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50544+ // only allow 512KB for argv+env on suid/sgid binaries
50545+ // to prevent easy ASLR exhaustion
50546+ if (((!uid_eq(bprm->cred->euid, current_euid())) ||
50547+ (!gid_eq(bprm->cred->egid, current_egid()))) &&
50548+ (size > (512 * 1024))) {
50549+ put_page(page);
50550+ return NULL;
50551+ }
50552+#endif
50553+
50554 /*
50555 * Limit to 1/4-th the stack size for the argv+env strings.
50556 * This ensures that:
50557@@ -266,6 +292,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
50558 vma->vm_end = STACK_TOP_MAX;
50559 vma->vm_start = vma->vm_end - PAGE_SIZE;
50560 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
50561+
50562+#ifdef CONFIG_PAX_SEGMEXEC
50563+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
50564+#endif
50565+
50566 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
50567 INIT_LIST_HEAD(&vma->anon_vma_chain);
50568
50569@@ -276,6 +307,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
50570 mm->stack_vm = mm->total_vm = 1;
50571 up_write(&mm->mmap_sem);
50572 bprm->p = vma->vm_end - sizeof(void *);
50573+
50574+#ifdef CONFIG_PAX_RANDUSTACK
50575+ if (randomize_va_space)
50576+ bprm->p ^= random32() & ~PAGE_MASK;
50577+#endif
50578+
50579 return 0;
50580 err:
50581 up_write(&mm->mmap_sem);
50582@@ -396,7 +433,7 @@ struct user_arg_ptr {
50583 } ptr;
50584 };
50585
50586-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
50587+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
50588 {
50589 const char __user *native;
50590
50591@@ -405,14 +442,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
50592 compat_uptr_t compat;
50593
50594 if (get_user(compat, argv.ptr.compat + nr))
50595- return ERR_PTR(-EFAULT);
50596+ return (const char __force_user *)ERR_PTR(-EFAULT);
50597
50598 return compat_ptr(compat);
50599 }
50600 #endif
50601
50602 if (get_user(native, argv.ptr.native + nr))
50603- return ERR_PTR(-EFAULT);
50604+ return (const char __force_user *)ERR_PTR(-EFAULT);
50605
50606 return native;
50607 }
50608@@ -431,7 +468,7 @@ static int count(struct user_arg_ptr argv, int max)
50609 if (!p)
50610 break;
50611
50612- if (IS_ERR(p))
50613+ if (IS_ERR((const char __force_kernel *)p))
50614 return -EFAULT;
50615
50616 if (i >= max)
50617@@ -466,7 +503,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
50618
50619 ret = -EFAULT;
50620 str = get_user_arg_ptr(argv, argc);
50621- if (IS_ERR(str))
50622+ if (IS_ERR((const char __force_kernel *)str))
50623 goto out;
50624
50625 len = strnlen_user(str, MAX_ARG_STRLEN);
50626@@ -548,7 +585,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
50627 int r;
50628 mm_segment_t oldfs = get_fs();
50629 struct user_arg_ptr argv = {
50630- .ptr.native = (const char __user *const __user *)__argv,
50631+ .ptr.native = (const char __force_user *const __force_user *)__argv,
50632 };
50633
50634 set_fs(KERNEL_DS);
50635@@ -583,7 +620,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
50636 unsigned long new_end = old_end - shift;
50637 struct mmu_gather tlb;
50638
50639- BUG_ON(new_start > new_end);
50640+ if (new_start >= new_end || new_start < mmap_min_addr)
50641+ return -ENOMEM;
50642
50643 /*
50644 * ensure there are no vmas between where we want to go
50645@@ -592,6 +630,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
50646 if (vma != find_vma(mm, new_start))
50647 return -EFAULT;
50648
50649+#ifdef CONFIG_PAX_SEGMEXEC
50650+ BUG_ON(pax_find_mirror_vma(vma));
50651+#endif
50652+
50653 /*
50654 * cover the whole range: [new_start, old_end)
50655 */
50656@@ -672,10 +714,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
50657 stack_top = arch_align_stack(stack_top);
50658 stack_top = PAGE_ALIGN(stack_top);
50659
50660- if (unlikely(stack_top < mmap_min_addr) ||
50661- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
50662- return -ENOMEM;
50663-
50664 stack_shift = vma->vm_end - stack_top;
50665
50666 bprm->p -= stack_shift;
50667@@ -687,8 +725,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
50668 bprm->exec -= stack_shift;
50669
50670 down_write(&mm->mmap_sem);
50671+
50672+ /* Move stack pages down in memory. */
50673+ if (stack_shift) {
50674+ ret = shift_arg_pages(vma, stack_shift);
50675+ if (ret)
50676+ goto out_unlock;
50677+ }
50678+
50679 vm_flags = VM_STACK_FLAGS;
50680
50681+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
50682+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
50683+ vm_flags &= ~VM_EXEC;
50684+
50685+#ifdef CONFIG_PAX_MPROTECT
50686+ if (mm->pax_flags & MF_PAX_MPROTECT)
50687+ vm_flags &= ~VM_MAYEXEC;
50688+#endif
50689+
50690+ }
50691+#endif
50692+
50693 /*
50694 * Adjust stack execute permissions; explicitly enable for
50695 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
50696@@ -707,13 +765,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
50697 goto out_unlock;
50698 BUG_ON(prev != vma);
50699
50700- /* Move stack pages down in memory. */
50701- if (stack_shift) {
50702- ret = shift_arg_pages(vma, stack_shift);
50703- if (ret)
50704- goto out_unlock;
50705- }
50706-
50707 /* mprotect_fixup is overkill to remove the temporary stack flags */
50708 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
50709
50710@@ -737,6 +788,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
50711 #endif
50712 current->mm->start_stack = bprm->p;
50713 ret = expand_stack(vma, stack_base);
50714+
50715+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
50716+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
50717+ unsigned long size, flags, vm_flags;
50718+
50719+ size = STACK_TOP - vma->vm_end;
50720+ flags = MAP_FIXED | MAP_PRIVATE;
50721+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
50722+
50723+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, flags, vm_flags, 0);
50724+
50725+#ifdef CONFIG_X86
50726+ if (!ret) {
50727+ size = mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
50728+ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), flags, vm_flags, 0);
50729+ }
50730+#endif
50731+
50732+ }
50733+#endif
50734+
50735 if (ret)
50736 ret = -EFAULT;
50737
50738@@ -772,6 +844,8 @@ struct file *open_exec(const char *name)
50739
50740 fsnotify_open(file);
50741
50742+ trace_open_exec(name);
50743+
50744 err = deny_write_access(file);
50745 if (err)
50746 goto exit;
50747@@ -795,7 +869,7 @@ int kernel_read(struct file *file, loff_t offset,
50748 old_fs = get_fs();
50749 set_fs(get_ds());
50750 /* The cast to a user pointer is valid due to the set_fs() */
50751- result = vfs_read(file, (void __user *)addr, count, &pos);
50752+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
50753 set_fs(old_fs);
50754 return result;
50755 }
50756@@ -1247,7 +1321,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
50757 }
50758 rcu_read_unlock();
50759
50760- if (p->fs->users > n_fs) {
50761+ if (atomic_read(&p->fs->users) > n_fs) {
50762 bprm->unsafe |= LSM_UNSAFE_SHARE;
50763 } else {
50764 res = -EAGAIN;
50765@@ -1447,6 +1521,31 @@ int search_binary_handler(struct linux_binprm *bprm)
50766
50767 EXPORT_SYMBOL(search_binary_handler);
50768
50769+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50770+static DEFINE_PER_CPU(u64, exec_counter);
50771+static int __init init_exec_counters(void)
50772+{
50773+ unsigned int cpu;
50774+
50775+ for_each_possible_cpu(cpu) {
50776+ per_cpu(exec_counter, cpu) = (u64)cpu;
50777+ }
50778+
50779+ return 0;
50780+}
50781+early_initcall(init_exec_counters);
50782+static inline void increment_exec_counter(void)
50783+{
50784+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
50785+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
50786+}
50787+#else
50788+static inline void increment_exec_counter(void) {}
50789+#endif
50790+
50791+extern void gr_handle_exec_args(struct linux_binprm *bprm,
50792+ struct user_arg_ptr argv);
50793+
50794 /*
50795 * sys_execve() executes a new program.
50796 */
50797@@ -1454,6 +1553,11 @@ static int do_execve_common(const char *filename,
50798 struct user_arg_ptr argv,
50799 struct user_arg_ptr envp)
50800 {
50801+#ifdef CONFIG_GRKERNSEC
50802+ struct file *old_exec_file;
50803+ struct acl_subject_label *old_acl;
50804+ struct rlimit old_rlim[RLIM_NLIMITS];
50805+#endif
50806 struct linux_binprm *bprm;
50807 struct file *file;
50808 struct files_struct *displaced;
50809@@ -1461,6 +1565,8 @@ static int do_execve_common(const char *filename,
50810 int retval;
50811 const struct cred *cred = current_cred();
50812
50813+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
50814+
50815 /*
50816 * We move the actual failure in case of RLIMIT_NPROC excess from
50817 * set*uid() to execve() because too many poorly written programs
50818@@ -1501,12 +1607,27 @@ static int do_execve_common(const char *filename,
50819 if (IS_ERR(file))
50820 goto out_unmark;
50821
50822+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
50823+ retval = -EPERM;
50824+ goto out_file;
50825+ }
50826+
50827 sched_exec();
50828
50829 bprm->file = file;
50830 bprm->filename = filename;
50831 bprm->interp = filename;
50832
50833+ if (gr_process_user_ban()) {
50834+ retval = -EPERM;
50835+ goto out_file;
50836+ }
50837+
50838+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
50839+ retval = -EACCES;
50840+ goto out_file;
50841+ }
50842+
50843 retval = bprm_mm_init(bprm);
50844 if (retval)
50845 goto out_file;
50846@@ -1523,24 +1644,65 @@ static int do_execve_common(const char *filename,
50847 if (retval < 0)
50848 goto out;
50849
50850+#ifdef CONFIG_GRKERNSEC
50851+ old_acl = current->acl;
50852+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
50853+ old_exec_file = current->exec_file;
50854+ get_file(file);
50855+ current->exec_file = file;
50856+#endif
50857+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50858+ /* limit suid stack to 8MB
50859+ * we saved the old limits above and will restore them if this exec fails
50860+ */
50861+ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
50862+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
50863+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
50864+#endif
50865+
50866+ if (!gr_tpe_allow(file)) {
50867+ retval = -EACCES;
50868+ goto out_fail;
50869+ }
50870+
50871+ if (gr_check_crash_exec(file)) {
50872+ retval = -EACCES;
50873+ goto out_fail;
50874+ }
50875+
50876+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
50877+ bprm->unsafe);
50878+ if (retval < 0)
50879+ goto out_fail;
50880+
50881 retval = copy_strings_kernel(1, &bprm->filename, bprm);
50882 if (retval < 0)
50883- goto out;
50884+ goto out_fail;
50885
50886 bprm->exec = bprm->p;
50887 retval = copy_strings(bprm->envc, envp, bprm);
50888 if (retval < 0)
50889- goto out;
50890+ goto out_fail;
50891
50892 retval = copy_strings(bprm->argc, argv, bprm);
50893 if (retval < 0)
50894- goto out;
50895+ goto out_fail;
50896+
50897+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
50898+
50899+ gr_handle_exec_args(bprm, argv);
50900
50901 retval = search_binary_handler(bprm);
50902 if (retval < 0)
50903- goto out;
50904+ goto out_fail;
50905+#ifdef CONFIG_GRKERNSEC
50906+ if (old_exec_file)
50907+ fput(old_exec_file);
50908+#endif
50909
50910 /* execve succeeded */
50911+
50912+ increment_exec_counter();
50913 current->fs->in_exec = 0;
50914 current->in_execve = 0;
50915 acct_update_integrals(current);
50916@@ -1549,6 +1711,14 @@ static int do_execve_common(const char *filename,
50917 put_files_struct(displaced);
50918 return retval;
50919
50920+out_fail:
50921+#ifdef CONFIG_GRKERNSEC
50922+ current->acl = old_acl;
50923+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
50924+ fput(current->exec_file);
50925+ current->exec_file = old_exec_file;
50926+#endif
50927+
50928 out:
50929 if (bprm->mm) {
50930 acct_arg_size(bprm, 0);
50931@@ -1697,3 +1867,253 @@ asmlinkage long compat_sys_execve(const char __user * filename,
50932 return error;
50933 }
50934 #endif
50935+
50936+int pax_check_flags(unsigned long *flags)
50937+{
50938+ int retval = 0;
50939+
50940+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
50941+ if (*flags & MF_PAX_SEGMEXEC)
50942+ {
50943+ *flags &= ~MF_PAX_SEGMEXEC;
50944+ retval = -EINVAL;
50945+ }
50946+#endif
50947+
50948+ if ((*flags & MF_PAX_PAGEEXEC)
50949+
50950+#ifdef CONFIG_PAX_PAGEEXEC
50951+ && (*flags & MF_PAX_SEGMEXEC)
50952+#endif
50953+
50954+ )
50955+ {
50956+ *flags &= ~MF_PAX_PAGEEXEC;
50957+ retval = -EINVAL;
50958+ }
50959+
50960+ if ((*flags & MF_PAX_MPROTECT)
50961+
50962+#ifdef CONFIG_PAX_MPROTECT
50963+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
50964+#endif
50965+
50966+ )
50967+ {
50968+ *flags &= ~MF_PAX_MPROTECT;
50969+ retval = -EINVAL;
50970+ }
50971+
50972+ if ((*flags & MF_PAX_EMUTRAMP)
50973+
50974+#ifdef CONFIG_PAX_EMUTRAMP
50975+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
50976+#endif
50977+
50978+ )
50979+ {
50980+ *flags &= ~MF_PAX_EMUTRAMP;
50981+ retval = -EINVAL;
50982+ }
50983+
50984+ return retval;
50985+}
50986+
50987+EXPORT_SYMBOL(pax_check_flags);
50988+
50989+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
50990+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
50991+{
50992+ struct task_struct *tsk = current;
50993+ struct mm_struct *mm = current->mm;
50994+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
50995+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
50996+ char *path_exec = NULL;
50997+ char *path_fault = NULL;
50998+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
50999+ siginfo_t info = { };
51000+
51001+ if (buffer_exec && buffer_fault) {
51002+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
51003+
51004+ down_read(&mm->mmap_sem);
51005+ vma = mm->mmap;
51006+ while (vma && (!vma_exec || !vma_fault)) {
51007+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
51008+ vma_exec = vma;
51009+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
51010+ vma_fault = vma;
51011+ vma = vma->vm_next;
51012+ }
51013+ if (vma_exec) {
51014+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
51015+ if (IS_ERR(path_exec))
51016+ path_exec = "<path too long>";
51017+ else {
51018+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
51019+ if (path_exec) {
51020+ *path_exec = 0;
51021+ path_exec = buffer_exec;
51022+ } else
51023+ path_exec = "<path too long>";
51024+ }
51025+ }
51026+ if (vma_fault) {
51027+ start = vma_fault->vm_start;
51028+ end = vma_fault->vm_end;
51029+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
51030+ if (vma_fault->vm_file) {
51031+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
51032+ if (IS_ERR(path_fault))
51033+ path_fault = "<path too long>";
51034+ else {
51035+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
51036+ if (path_fault) {
51037+ *path_fault = 0;
51038+ path_fault = buffer_fault;
51039+ } else
51040+ path_fault = "<path too long>";
51041+ }
51042+ } else
51043+ path_fault = "<anonymous mapping>";
51044+ }
51045+ up_read(&mm->mmap_sem);
51046+ }
51047+ if (tsk->signal->curr_ip)
51048+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
51049+ else
51050+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
51051+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
51052+ from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
51053+ free_page((unsigned long)buffer_exec);
51054+ free_page((unsigned long)buffer_fault);
51055+ pax_report_insns(regs, pc, sp);
51056+ info.si_signo = SIGKILL;
51057+ info.si_errno = 0;
51058+ info.si_code = SI_KERNEL;
51059+ info.si_pid = 0;
51060+ info.si_uid = 0;
51061+ do_coredump(&info);
51062+}
51063+#endif
51064+
51065+#ifdef CONFIG_PAX_REFCOUNT
51066+void pax_report_refcount_overflow(struct pt_regs *regs)
51067+{
51068+ if (current->signal->curr_ip)
51069+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
51070+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
51071+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
51072+ else
51073+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
51074+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
51075+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
51076+ show_regs(regs);
51077+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
51078+}
51079+#endif
51080+
51081+#ifdef CONFIG_PAX_USERCOPY
51082+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
51083+static noinline int check_stack_object(const void *obj, unsigned long len)
51084+{
51085+ const void * const stack = task_stack_page(current);
51086+ const void * const stackend = stack + THREAD_SIZE;
51087+
51088+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
51089+ const void *frame = NULL;
51090+ const void *oldframe;
51091+#endif
51092+
51093+ if (obj + len < obj)
51094+ return -1;
51095+
51096+ if (obj + len <= stack || stackend <= obj)
51097+ return 0;
51098+
51099+ if (obj < stack || stackend < obj + len)
51100+ return -1;
51101+
51102+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
51103+ oldframe = __builtin_frame_address(1);
51104+ if (oldframe)
51105+ frame = __builtin_frame_address(2);
51106+ /*
51107+ low ----------------------------------------------> high
51108+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
51109+ ^----------------^
51110+ allow copies only within here
51111+ */
51112+ while (stack <= frame && frame < stackend) {
51113+ /* if obj + len extends past the last frame, this
51114+ check won't pass and the next frame will be 0,
51115+ causing us to bail out and correctly report
51116+ the copy as invalid
51117+ */
51118+ if (obj + len <= frame)
51119+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
51120+ oldframe = frame;
51121+ frame = *(const void * const *)frame;
51122+ }
51123+ return -1;
51124+#else
51125+ return 1;
51126+#endif
51127+}
51128+
51129+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
51130+{
51131+ if (current->signal->curr_ip)
51132+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
51133+ &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
51134+ else
51135+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
51136+ to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
51137+ dump_stack();
51138+ gr_handle_kernel_exploit();
51139+ do_group_exit(SIGKILL);
51140+}
51141+#endif
51142+
51143+void __check_object_size(const void *ptr, unsigned long n, bool to_user)
51144+{
51145+
51146+#ifdef CONFIG_PAX_USERCOPY
51147+ const char *type;
51148+
51149+ if (!n)
51150+ return;
51151+
51152+ type = check_heap_object(ptr, n);
51153+ if (!type) {
51154+ if (check_stack_object(ptr, n) != -1)
51155+ return;
51156+ type = "<process stack>";
51157+ }
51158+
51159+ pax_report_usercopy(ptr, n, to_user, type);
51160+#endif
51161+
51162+}
51163+EXPORT_SYMBOL(__check_object_size);
51164+
51165+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
51166+void pax_track_stack(void)
51167+{
51168+ unsigned long sp = (unsigned long)&sp;
51169+ if (sp < current_thread_info()->lowest_stack &&
51170+ sp > (unsigned long)task_stack_page(current))
51171+ current_thread_info()->lowest_stack = sp;
51172+}
51173+EXPORT_SYMBOL(pax_track_stack);
51174+#endif
51175+
51176+#ifdef CONFIG_PAX_SIZE_OVERFLOW
51177+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
51178+{
51179+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
51180+ dump_stack();
51181+ do_group_exit(SIGKILL);
51182+}
51183+EXPORT_SYMBOL(report_size_overflow);
51184+#endif
51185diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
51186index 2616d0e..2ffdec9 100644
51187--- a/fs/ext2/balloc.c
51188+++ b/fs/ext2/balloc.c
51189@@ -1190,10 +1190,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
51190
51191 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
51192 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
51193- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
51194+ if (free_blocks < root_blocks + 1 &&
51195 !uid_eq(sbi->s_resuid, current_fsuid()) &&
51196 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
51197- !in_group_p (sbi->s_resgid))) {
51198+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
51199 return 0;
51200 }
51201 return 1;
51202diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
51203index 22548f5..41521d8 100644
51204--- a/fs/ext3/balloc.c
51205+++ b/fs/ext3/balloc.c
51206@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
51207
51208 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
51209 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
51210- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
51211+ if (free_blocks < root_blocks + 1 &&
51212 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
51213 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
51214- !in_group_p (sbi->s_resgid))) {
51215+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
51216 return 0;
51217 }
51218 return 1;
51219diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
51220index 92e68b3..115d987 100644
51221--- a/fs/ext4/balloc.c
51222+++ b/fs/ext4/balloc.c
51223@@ -505,8 +505,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
51224 /* Hm, nope. Are (enough) root reserved clusters available? */
51225 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
51226 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
51227- capable(CAP_SYS_RESOURCE) ||
51228- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
51229+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
51230+ capable_nolog(CAP_SYS_RESOURCE)) {
51231
51232 if (free_clusters >= (nclusters + dirty_clusters))
51233 return 1;
51234diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
51235index bbcd6a0..2824592 100644
51236--- a/fs/ext4/ext4.h
51237+++ b/fs/ext4/ext4.h
51238@@ -1265,19 +1265,19 @@ struct ext4_sb_info {
51239 unsigned long s_mb_last_start;
51240
51241 /* stats for buddy allocator */
51242- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
51243- atomic_t s_bal_success; /* we found long enough chunks */
51244- atomic_t s_bal_allocated; /* in blocks */
51245- atomic_t s_bal_ex_scanned; /* total extents scanned */
51246- atomic_t s_bal_goals; /* goal hits */
51247- atomic_t s_bal_breaks; /* too long searches */
51248- atomic_t s_bal_2orders; /* 2^order hits */
51249+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
51250+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
51251+ atomic_unchecked_t s_bal_allocated; /* in blocks */
51252+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
51253+ atomic_unchecked_t s_bal_goals; /* goal hits */
51254+ atomic_unchecked_t s_bal_breaks; /* too long searches */
51255+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
51256 spinlock_t s_bal_lock;
51257 unsigned long s_mb_buddies_generated;
51258 unsigned long long s_mb_generation_time;
51259- atomic_t s_mb_lost_chunks;
51260- atomic_t s_mb_preallocated;
51261- atomic_t s_mb_discarded;
51262+ atomic_unchecked_t s_mb_lost_chunks;
51263+ atomic_unchecked_t s_mb_preallocated;
51264+ atomic_unchecked_t s_mb_discarded;
51265 atomic_t s_lock_busy;
51266
51267 /* locality groups */
51268diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
51269index 82f8c2d..ce7c889 100644
51270--- a/fs/ext4/mballoc.c
51271+++ b/fs/ext4/mballoc.c
51272@@ -1747,7 +1747,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
51273 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
51274
51275 if (EXT4_SB(sb)->s_mb_stats)
51276- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
51277+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
51278
51279 break;
51280 }
51281@@ -2044,7 +2044,7 @@ repeat:
51282 ac->ac_status = AC_STATUS_CONTINUE;
51283 ac->ac_flags |= EXT4_MB_HINT_FIRST;
51284 cr = 3;
51285- atomic_inc(&sbi->s_mb_lost_chunks);
51286+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
51287 goto repeat;
51288 }
51289 }
51290@@ -2552,25 +2552,25 @@ int ext4_mb_release(struct super_block *sb)
51291 if (sbi->s_mb_stats) {
51292 ext4_msg(sb, KERN_INFO,
51293 "mballoc: %u blocks %u reqs (%u success)",
51294- atomic_read(&sbi->s_bal_allocated),
51295- atomic_read(&sbi->s_bal_reqs),
51296- atomic_read(&sbi->s_bal_success));
51297+ atomic_read_unchecked(&sbi->s_bal_allocated),
51298+ atomic_read_unchecked(&sbi->s_bal_reqs),
51299+ atomic_read_unchecked(&sbi->s_bal_success));
51300 ext4_msg(sb, KERN_INFO,
51301 "mballoc: %u extents scanned, %u goal hits, "
51302 "%u 2^N hits, %u breaks, %u lost",
51303- atomic_read(&sbi->s_bal_ex_scanned),
51304- atomic_read(&sbi->s_bal_goals),
51305- atomic_read(&sbi->s_bal_2orders),
51306- atomic_read(&sbi->s_bal_breaks),
51307- atomic_read(&sbi->s_mb_lost_chunks));
51308+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
51309+ atomic_read_unchecked(&sbi->s_bal_goals),
51310+ atomic_read_unchecked(&sbi->s_bal_2orders),
51311+ atomic_read_unchecked(&sbi->s_bal_breaks),
51312+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
51313 ext4_msg(sb, KERN_INFO,
51314 "mballoc: %lu generated and it took %Lu",
51315 sbi->s_mb_buddies_generated,
51316 sbi->s_mb_generation_time);
51317 ext4_msg(sb, KERN_INFO,
51318 "mballoc: %u preallocated, %u discarded",
51319- atomic_read(&sbi->s_mb_preallocated),
51320- atomic_read(&sbi->s_mb_discarded));
51321+ atomic_read_unchecked(&sbi->s_mb_preallocated),
51322+ atomic_read_unchecked(&sbi->s_mb_discarded));
51323 }
51324
51325 free_percpu(sbi->s_locality_groups);
51326@@ -3060,16 +3060,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
51327 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
51328
51329 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
51330- atomic_inc(&sbi->s_bal_reqs);
51331- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
51332+ atomic_inc_unchecked(&sbi->s_bal_reqs);
51333+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
51334 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
51335- atomic_inc(&sbi->s_bal_success);
51336- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
51337+ atomic_inc_unchecked(&sbi->s_bal_success);
51338+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
51339 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
51340 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
51341- atomic_inc(&sbi->s_bal_goals);
51342+ atomic_inc_unchecked(&sbi->s_bal_goals);
51343 if (ac->ac_found > sbi->s_mb_max_to_scan)
51344- atomic_inc(&sbi->s_bal_breaks);
51345+ atomic_inc_unchecked(&sbi->s_bal_breaks);
51346 }
51347
51348 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
51349@@ -3469,7 +3469,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
51350 trace_ext4_mb_new_inode_pa(ac, pa);
51351
51352 ext4_mb_use_inode_pa(ac, pa);
51353- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
51354+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
51355
51356 ei = EXT4_I(ac->ac_inode);
51357 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
51358@@ -3529,7 +3529,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
51359 trace_ext4_mb_new_group_pa(ac, pa);
51360
51361 ext4_mb_use_group_pa(ac, pa);
51362- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
51363+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
51364
51365 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
51366 lg = ac->ac_lg;
51367@@ -3618,7 +3618,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
51368 * from the bitmap and continue.
51369 */
51370 }
51371- atomic_add(free, &sbi->s_mb_discarded);
51372+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
51373
51374 return err;
51375 }
51376@@ -3636,7 +3636,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
51377 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
51378 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
51379 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
51380- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
51381+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
51382 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
51383
51384 return 0;
51385diff --git a/fs/ext4/super.c b/fs/ext4/super.c
51386index 24c767d..893aa55 100644
51387--- a/fs/ext4/super.c
51388+++ b/fs/ext4/super.c
51389@@ -2429,7 +2429,7 @@ struct ext4_attr {
51390 ssize_t (*store)(struct ext4_attr *, struct ext4_sb_info *,
51391 const char *, size_t);
51392 int offset;
51393-};
51394+} __do_const;
51395
51396 static int parse_strtoul(const char *buf,
51397 unsigned long max, unsigned long *value)
51398diff --git a/fs/fcntl.c b/fs/fcntl.c
51399index 71a600a..20d87b1 100644
51400--- a/fs/fcntl.c
51401+++ b/fs/fcntl.c
51402@@ -107,6 +107,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
51403 if (err)
51404 return err;
51405
51406+ if (gr_handle_chroot_fowner(pid, type))
51407+ return -ENOENT;
51408+ if (gr_check_protected_task_fowner(pid, type))
51409+ return -EACCES;
51410+
51411 f_modown(filp, pid, type, force);
51412 return 0;
51413 }
51414diff --git a/fs/fhandle.c b/fs/fhandle.c
51415index 999ff5c..41f4109 100644
51416--- a/fs/fhandle.c
51417+++ b/fs/fhandle.c
51418@@ -67,8 +67,7 @@ static long do_sys_name_to_handle(struct path *path,
51419 } else
51420 retval = 0;
51421 /* copy the mount id */
51422- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
51423- sizeof(*mnt_id)) ||
51424+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
51425 copy_to_user(ufh, handle,
51426 sizeof(struct file_handle) + handle_bytes))
51427 retval = -EFAULT;
51428diff --git a/fs/fifo.c b/fs/fifo.c
51429index cf6f434..3d7942c 100644
51430--- a/fs/fifo.c
51431+++ b/fs/fifo.c
51432@@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
51433 */
51434 filp->f_op = &read_pipefifo_fops;
51435 pipe->r_counter++;
51436- if (pipe->readers++ == 0)
51437+ if (atomic_inc_return(&pipe->readers) == 1)
51438 wake_up_partner(inode);
51439
51440- if (!pipe->writers) {
51441+ if (!atomic_read(&pipe->writers)) {
51442 if ((filp->f_flags & O_NONBLOCK)) {
51443 /* suppress POLLHUP until we have
51444 * seen a writer */
51445@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
51446 * errno=ENXIO when there is no process reading the FIFO.
51447 */
51448 ret = -ENXIO;
51449- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
51450+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
51451 goto err;
51452
51453 filp->f_op = &write_pipefifo_fops;
51454 pipe->w_counter++;
51455- if (!pipe->writers++)
51456+ if (atomic_inc_return(&pipe->writers) == 1)
51457 wake_up_partner(inode);
51458
51459- if (!pipe->readers) {
51460+ if (!atomic_read(&pipe->readers)) {
51461 if (wait_for_partner(inode, &pipe->r_counter))
51462 goto err_wr;
51463 }
51464@@ -104,11 +104,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
51465 */
51466 filp->f_op = &rdwr_pipefifo_fops;
51467
51468- pipe->readers++;
51469- pipe->writers++;
51470+ atomic_inc(&pipe->readers);
51471+ atomic_inc(&pipe->writers);
51472 pipe->r_counter++;
51473 pipe->w_counter++;
51474- if (pipe->readers == 1 || pipe->writers == 1)
51475+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
51476 wake_up_partner(inode);
51477 break;
51478
51479@@ -122,19 +122,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
51480 return 0;
51481
51482 err_rd:
51483- if (!--pipe->readers)
51484+ if (atomic_dec_and_test(&pipe->readers))
51485 wake_up_interruptible(&pipe->wait);
51486 ret = -ERESTARTSYS;
51487 goto err;
51488
51489 err_wr:
51490- if (!--pipe->writers)
51491+ if (atomic_dec_and_test(&pipe->writers))
51492 wake_up_interruptible(&pipe->wait);
51493 ret = -ERESTARTSYS;
51494 goto err;
51495
51496 err:
51497- if (!pipe->readers && !pipe->writers)
51498+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
51499 free_pipe_info(inode);
51500
51501 err_nocleanup:
51502diff --git a/fs/file.c b/fs/file.c
51503index 2b3570b..c57924b 100644
51504--- a/fs/file.c
51505+++ b/fs/file.c
51506@@ -16,6 +16,7 @@
51507 #include <linux/slab.h>
51508 #include <linux/vmalloc.h>
51509 #include <linux/file.h>
51510+#include <linux/security.h>
51511 #include <linux/fdtable.h>
51512 #include <linux/bitops.h>
51513 #include <linux/interrupt.h>
51514@@ -892,6 +893,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
51515 if (!file)
51516 return __close_fd(files, fd);
51517
51518+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
51519 if (fd >= rlimit(RLIMIT_NOFILE))
51520 return -EBADF;
51521
51522@@ -918,6 +920,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
51523 if (unlikely(oldfd == newfd))
51524 return -EINVAL;
51525
51526+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
51527 if (newfd >= rlimit(RLIMIT_NOFILE))
51528 return -EBADF;
51529
51530@@ -973,6 +976,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
51531 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
51532 {
51533 int err;
51534+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
51535 if (from >= rlimit(RLIMIT_NOFILE))
51536 return -EINVAL;
51537 err = alloc_fd(from, flags);
51538diff --git a/fs/filesystems.c b/fs/filesystems.c
51539index da165f6..3671bdb 100644
51540--- a/fs/filesystems.c
51541+++ b/fs/filesystems.c
51542@@ -273,7 +273,12 @@ struct file_system_type *get_fs_type(const char *name)
51543 int len = dot ? dot - name : strlen(name);
51544
51545 fs = __get_fs_type(name, len);
51546+
51547+#ifdef CONFIG_GRKERNSEC_MODHARDEN
51548+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
51549+#else
51550 if (!fs && (request_module("%.*s", len, name) == 0))
51551+#endif
51552 fs = __get_fs_type(name, len);
51553
51554 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
51555diff --git a/fs/fs_struct.c b/fs/fs_struct.c
51556index fe6ca58..65318cf 100644
51557--- a/fs/fs_struct.c
51558+++ b/fs/fs_struct.c
51559@@ -4,6 +4,7 @@
51560 #include <linux/path.h>
51561 #include <linux/slab.h>
51562 #include <linux/fs_struct.h>
51563+#include <linux/grsecurity.h>
51564 #include "internal.h"
51565
51566 /*
51567@@ -19,6 +20,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
51568 write_seqcount_begin(&fs->seq);
51569 old_root = fs->root;
51570 fs->root = *path;
51571+ gr_set_chroot_entries(current, path);
51572 write_seqcount_end(&fs->seq);
51573 spin_unlock(&fs->lock);
51574 if (old_root.dentry)
51575@@ -53,6 +55,21 @@ static inline int replace_path(struct path *p, const struct path *old, const str
51576 return 1;
51577 }
51578
51579+static inline int replace_root_path(struct task_struct *task, struct path *p, const struct path *old, struct path *new)
51580+{
51581+ if (likely(p->dentry != old->dentry || p->mnt != old->mnt))
51582+ return 0;
51583+ *p = *new;
51584+
51585+ /* This function is only called from pivot_root(). Leave our
51586+ gr_chroot_dentry and is_chrooted flags as-is, so that a
51587+ pivoted root isn't treated as a chroot
51588+ */
51589+ //gr_set_chroot_entries(task, new);
51590+
51591+ return 1;
51592+}
51593+
51594 void chroot_fs_refs(struct path *old_root, struct path *new_root)
51595 {
51596 struct task_struct *g, *p;
51597@@ -67,7 +84,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
51598 int hits = 0;
51599 spin_lock(&fs->lock);
51600 write_seqcount_begin(&fs->seq);
51601- hits += replace_path(&fs->root, old_root, new_root);
51602+ hits += replace_root_path(p, &fs->root, old_root, new_root);
51603 hits += replace_path(&fs->pwd, old_root, new_root);
51604 write_seqcount_end(&fs->seq);
51605 while (hits--) {
51606@@ -99,7 +116,8 @@ void exit_fs(struct task_struct *tsk)
51607 task_lock(tsk);
51608 spin_lock(&fs->lock);
51609 tsk->fs = NULL;
51610- kill = !--fs->users;
51611+ gr_clear_chroot_entries(tsk);
51612+ kill = !atomic_dec_return(&fs->users);
51613 spin_unlock(&fs->lock);
51614 task_unlock(tsk);
51615 if (kill)
51616@@ -112,7 +130,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
51617 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
51618 /* We don't need to lock fs - think why ;-) */
51619 if (fs) {
51620- fs->users = 1;
51621+ atomic_set(&fs->users, 1);
51622 fs->in_exec = 0;
51623 spin_lock_init(&fs->lock);
51624 seqcount_init(&fs->seq);
51625@@ -121,6 +139,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
51626 spin_lock(&old->lock);
51627 fs->root = old->root;
51628 path_get(&fs->root);
51629+ /* instead of calling gr_set_chroot_entries here,
51630+ we call it from every caller of this function
51631+ */
51632 fs->pwd = old->pwd;
51633 path_get(&fs->pwd);
51634 spin_unlock(&old->lock);
51635@@ -139,8 +160,9 @@ int unshare_fs_struct(void)
51636
51637 task_lock(current);
51638 spin_lock(&fs->lock);
51639- kill = !--fs->users;
51640+ kill = !atomic_dec_return(&fs->users);
51641 current->fs = new_fs;
51642+ gr_set_chroot_entries(current, &new_fs->root);
51643 spin_unlock(&fs->lock);
51644 task_unlock(current);
51645
51646@@ -153,13 +175,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
51647
51648 int current_umask(void)
51649 {
51650- return current->fs->umask;
51651+ return current->fs->umask | gr_acl_umask();
51652 }
51653 EXPORT_SYMBOL(current_umask);
51654
51655 /* to be mentioned only in INIT_TASK */
51656 struct fs_struct init_fs = {
51657- .users = 1,
51658+ .users = ATOMIC_INIT(1),
51659 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
51660 .seq = SEQCNT_ZERO,
51661 .umask = 0022,
51662diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
51663index 8dcb114..b1072e2 100644
51664--- a/fs/fscache/cookie.c
51665+++ b/fs/fscache/cookie.c
51666@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
51667 parent ? (char *) parent->def->name : "<no-parent>",
51668 def->name, netfs_data);
51669
51670- fscache_stat(&fscache_n_acquires);
51671+ fscache_stat_unchecked(&fscache_n_acquires);
51672
51673 /* if there's no parent cookie, then we don't create one here either */
51674 if (!parent) {
51675- fscache_stat(&fscache_n_acquires_null);
51676+ fscache_stat_unchecked(&fscache_n_acquires_null);
51677 _leave(" [no parent]");
51678 return NULL;
51679 }
51680@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
51681 /* allocate and initialise a cookie */
51682 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
51683 if (!cookie) {
51684- fscache_stat(&fscache_n_acquires_oom);
51685+ fscache_stat_unchecked(&fscache_n_acquires_oom);
51686 _leave(" [ENOMEM]");
51687 return NULL;
51688 }
51689@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
51690
51691 switch (cookie->def->type) {
51692 case FSCACHE_COOKIE_TYPE_INDEX:
51693- fscache_stat(&fscache_n_cookie_index);
51694+ fscache_stat_unchecked(&fscache_n_cookie_index);
51695 break;
51696 case FSCACHE_COOKIE_TYPE_DATAFILE:
51697- fscache_stat(&fscache_n_cookie_data);
51698+ fscache_stat_unchecked(&fscache_n_cookie_data);
51699 break;
51700 default:
51701- fscache_stat(&fscache_n_cookie_special);
51702+ fscache_stat_unchecked(&fscache_n_cookie_special);
51703 break;
51704 }
51705
51706@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
51707 if (fscache_acquire_non_index_cookie(cookie) < 0) {
51708 atomic_dec(&parent->n_children);
51709 __fscache_cookie_put(cookie);
51710- fscache_stat(&fscache_n_acquires_nobufs);
51711+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
51712 _leave(" = NULL");
51713 return NULL;
51714 }
51715 }
51716
51717- fscache_stat(&fscache_n_acquires_ok);
51718+ fscache_stat_unchecked(&fscache_n_acquires_ok);
51719 _leave(" = %p", cookie);
51720 return cookie;
51721 }
51722@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
51723 cache = fscache_select_cache_for_object(cookie->parent);
51724 if (!cache) {
51725 up_read(&fscache_addremove_sem);
51726- fscache_stat(&fscache_n_acquires_no_cache);
51727+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
51728 _leave(" = -ENOMEDIUM [no cache]");
51729 return -ENOMEDIUM;
51730 }
51731@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
51732 object = cache->ops->alloc_object(cache, cookie);
51733 fscache_stat_d(&fscache_n_cop_alloc_object);
51734 if (IS_ERR(object)) {
51735- fscache_stat(&fscache_n_object_no_alloc);
51736+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
51737 ret = PTR_ERR(object);
51738 goto error;
51739 }
51740
51741- fscache_stat(&fscache_n_object_alloc);
51742+ fscache_stat_unchecked(&fscache_n_object_alloc);
51743
51744 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
51745
51746@@ -378,7 +378,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
51747
51748 _enter("{%s}", cookie->def->name);
51749
51750- fscache_stat(&fscache_n_invalidates);
51751+ fscache_stat_unchecked(&fscache_n_invalidates);
51752
51753 /* Only permit invalidation of data files. Invalidating an index will
51754 * require the caller to release all its attachments to the tree rooted
51755@@ -437,10 +437,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
51756 struct fscache_object *object;
51757 struct hlist_node *_p;
51758
51759- fscache_stat(&fscache_n_updates);
51760+ fscache_stat_unchecked(&fscache_n_updates);
51761
51762 if (!cookie) {
51763- fscache_stat(&fscache_n_updates_null);
51764+ fscache_stat_unchecked(&fscache_n_updates_null);
51765 _leave(" [no cookie]");
51766 return;
51767 }
51768@@ -474,12 +474,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
51769 struct fscache_object *object;
51770 unsigned long event;
51771
51772- fscache_stat(&fscache_n_relinquishes);
51773+ fscache_stat_unchecked(&fscache_n_relinquishes);
51774 if (retire)
51775- fscache_stat(&fscache_n_relinquishes_retire);
51776+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
51777
51778 if (!cookie) {
51779- fscache_stat(&fscache_n_relinquishes_null);
51780+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
51781 _leave(" [no cookie]");
51782 return;
51783 }
51784@@ -495,7 +495,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
51785
51786 /* wait for the cookie to finish being instantiated (or to fail) */
51787 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
51788- fscache_stat(&fscache_n_relinquishes_waitcrt);
51789+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
51790 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
51791 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
51792 }
51793diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
51794index ee38fef..0a326d4 100644
51795--- a/fs/fscache/internal.h
51796+++ b/fs/fscache/internal.h
51797@@ -148,101 +148,101 @@ extern void fscache_proc_cleanup(void);
51798 * stats.c
51799 */
51800 #ifdef CONFIG_FSCACHE_STATS
51801-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
51802-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
51803+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
51804+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
51805
51806-extern atomic_t fscache_n_op_pend;
51807-extern atomic_t fscache_n_op_run;
51808-extern atomic_t fscache_n_op_enqueue;
51809-extern atomic_t fscache_n_op_deferred_release;
51810-extern atomic_t fscache_n_op_release;
51811-extern atomic_t fscache_n_op_gc;
51812-extern atomic_t fscache_n_op_cancelled;
51813-extern atomic_t fscache_n_op_rejected;
51814+extern atomic_unchecked_t fscache_n_op_pend;
51815+extern atomic_unchecked_t fscache_n_op_run;
51816+extern atomic_unchecked_t fscache_n_op_enqueue;
51817+extern atomic_unchecked_t fscache_n_op_deferred_release;
51818+extern atomic_unchecked_t fscache_n_op_release;
51819+extern atomic_unchecked_t fscache_n_op_gc;
51820+extern atomic_unchecked_t fscache_n_op_cancelled;
51821+extern atomic_unchecked_t fscache_n_op_rejected;
51822
51823-extern atomic_t fscache_n_attr_changed;
51824-extern atomic_t fscache_n_attr_changed_ok;
51825-extern atomic_t fscache_n_attr_changed_nobufs;
51826-extern atomic_t fscache_n_attr_changed_nomem;
51827-extern atomic_t fscache_n_attr_changed_calls;
51828+extern atomic_unchecked_t fscache_n_attr_changed;
51829+extern atomic_unchecked_t fscache_n_attr_changed_ok;
51830+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
51831+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
51832+extern atomic_unchecked_t fscache_n_attr_changed_calls;
51833
51834-extern atomic_t fscache_n_allocs;
51835-extern atomic_t fscache_n_allocs_ok;
51836-extern atomic_t fscache_n_allocs_wait;
51837-extern atomic_t fscache_n_allocs_nobufs;
51838-extern atomic_t fscache_n_allocs_intr;
51839-extern atomic_t fscache_n_allocs_object_dead;
51840-extern atomic_t fscache_n_alloc_ops;
51841-extern atomic_t fscache_n_alloc_op_waits;
51842+extern atomic_unchecked_t fscache_n_allocs;
51843+extern atomic_unchecked_t fscache_n_allocs_ok;
51844+extern atomic_unchecked_t fscache_n_allocs_wait;
51845+extern atomic_unchecked_t fscache_n_allocs_nobufs;
51846+extern atomic_unchecked_t fscache_n_allocs_intr;
51847+extern atomic_unchecked_t fscache_n_allocs_object_dead;
51848+extern atomic_unchecked_t fscache_n_alloc_ops;
51849+extern atomic_unchecked_t fscache_n_alloc_op_waits;
51850
51851-extern atomic_t fscache_n_retrievals;
51852-extern atomic_t fscache_n_retrievals_ok;
51853-extern atomic_t fscache_n_retrievals_wait;
51854-extern atomic_t fscache_n_retrievals_nodata;
51855-extern atomic_t fscache_n_retrievals_nobufs;
51856-extern atomic_t fscache_n_retrievals_intr;
51857-extern atomic_t fscache_n_retrievals_nomem;
51858-extern atomic_t fscache_n_retrievals_object_dead;
51859-extern atomic_t fscache_n_retrieval_ops;
51860-extern atomic_t fscache_n_retrieval_op_waits;
51861+extern atomic_unchecked_t fscache_n_retrievals;
51862+extern atomic_unchecked_t fscache_n_retrievals_ok;
51863+extern atomic_unchecked_t fscache_n_retrievals_wait;
51864+extern atomic_unchecked_t fscache_n_retrievals_nodata;
51865+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
51866+extern atomic_unchecked_t fscache_n_retrievals_intr;
51867+extern atomic_unchecked_t fscache_n_retrievals_nomem;
51868+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
51869+extern atomic_unchecked_t fscache_n_retrieval_ops;
51870+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
51871
51872-extern atomic_t fscache_n_stores;
51873-extern atomic_t fscache_n_stores_ok;
51874-extern atomic_t fscache_n_stores_again;
51875-extern atomic_t fscache_n_stores_nobufs;
51876-extern atomic_t fscache_n_stores_oom;
51877-extern atomic_t fscache_n_store_ops;
51878-extern atomic_t fscache_n_store_calls;
51879-extern atomic_t fscache_n_store_pages;
51880-extern atomic_t fscache_n_store_radix_deletes;
51881-extern atomic_t fscache_n_store_pages_over_limit;
51882+extern atomic_unchecked_t fscache_n_stores;
51883+extern atomic_unchecked_t fscache_n_stores_ok;
51884+extern atomic_unchecked_t fscache_n_stores_again;
51885+extern atomic_unchecked_t fscache_n_stores_nobufs;
51886+extern atomic_unchecked_t fscache_n_stores_oom;
51887+extern atomic_unchecked_t fscache_n_store_ops;
51888+extern atomic_unchecked_t fscache_n_store_calls;
51889+extern atomic_unchecked_t fscache_n_store_pages;
51890+extern atomic_unchecked_t fscache_n_store_radix_deletes;
51891+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
51892
51893-extern atomic_t fscache_n_store_vmscan_not_storing;
51894-extern atomic_t fscache_n_store_vmscan_gone;
51895-extern atomic_t fscache_n_store_vmscan_busy;
51896-extern atomic_t fscache_n_store_vmscan_cancelled;
51897-extern atomic_t fscache_n_store_vmscan_wait;
51898+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
51899+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
51900+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
51901+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
51902+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
51903
51904-extern atomic_t fscache_n_marks;
51905-extern atomic_t fscache_n_uncaches;
51906+extern atomic_unchecked_t fscache_n_marks;
51907+extern atomic_unchecked_t fscache_n_uncaches;
51908
51909-extern atomic_t fscache_n_acquires;
51910-extern atomic_t fscache_n_acquires_null;
51911-extern atomic_t fscache_n_acquires_no_cache;
51912-extern atomic_t fscache_n_acquires_ok;
51913-extern atomic_t fscache_n_acquires_nobufs;
51914-extern atomic_t fscache_n_acquires_oom;
51915+extern atomic_unchecked_t fscache_n_acquires;
51916+extern atomic_unchecked_t fscache_n_acquires_null;
51917+extern atomic_unchecked_t fscache_n_acquires_no_cache;
51918+extern atomic_unchecked_t fscache_n_acquires_ok;
51919+extern atomic_unchecked_t fscache_n_acquires_nobufs;
51920+extern atomic_unchecked_t fscache_n_acquires_oom;
51921
51922-extern atomic_t fscache_n_invalidates;
51923-extern atomic_t fscache_n_invalidates_run;
51924+extern atomic_unchecked_t fscache_n_invalidates;
51925+extern atomic_unchecked_t fscache_n_invalidates_run;
51926
51927-extern atomic_t fscache_n_updates;
51928-extern atomic_t fscache_n_updates_null;
51929-extern atomic_t fscache_n_updates_run;
51930+extern atomic_unchecked_t fscache_n_updates;
51931+extern atomic_unchecked_t fscache_n_updates_null;
51932+extern atomic_unchecked_t fscache_n_updates_run;
51933
51934-extern atomic_t fscache_n_relinquishes;
51935-extern atomic_t fscache_n_relinquishes_null;
51936-extern atomic_t fscache_n_relinquishes_waitcrt;
51937-extern atomic_t fscache_n_relinquishes_retire;
51938+extern atomic_unchecked_t fscache_n_relinquishes;
51939+extern atomic_unchecked_t fscache_n_relinquishes_null;
51940+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
51941+extern atomic_unchecked_t fscache_n_relinquishes_retire;
51942
51943-extern atomic_t fscache_n_cookie_index;
51944-extern atomic_t fscache_n_cookie_data;
51945-extern atomic_t fscache_n_cookie_special;
51946+extern atomic_unchecked_t fscache_n_cookie_index;
51947+extern atomic_unchecked_t fscache_n_cookie_data;
51948+extern atomic_unchecked_t fscache_n_cookie_special;
51949
51950-extern atomic_t fscache_n_object_alloc;
51951-extern atomic_t fscache_n_object_no_alloc;
51952-extern atomic_t fscache_n_object_lookups;
51953-extern atomic_t fscache_n_object_lookups_negative;
51954-extern atomic_t fscache_n_object_lookups_positive;
51955-extern atomic_t fscache_n_object_lookups_timed_out;
51956-extern atomic_t fscache_n_object_created;
51957-extern atomic_t fscache_n_object_avail;
51958-extern atomic_t fscache_n_object_dead;
51959+extern atomic_unchecked_t fscache_n_object_alloc;
51960+extern atomic_unchecked_t fscache_n_object_no_alloc;
51961+extern atomic_unchecked_t fscache_n_object_lookups;
51962+extern atomic_unchecked_t fscache_n_object_lookups_negative;
51963+extern atomic_unchecked_t fscache_n_object_lookups_positive;
51964+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
51965+extern atomic_unchecked_t fscache_n_object_created;
51966+extern atomic_unchecked_t fscache_n_object_avail;
51967+extern atomic_unchecked_t fscache_n_object_dead;
51968
51969-extern atomic_t fscache_n_checkaux_none;
51970-extern atomic_t fscache_n_checkaux_okay;
51971-extern atomic_t fscache_n_checkaux_update;
51972-extern atomic_t fscache_n_checkaux_obsolete;
51973+extern atomic_unchecked_t fscache_n_checkaux_none;
51974+extern atomic_unchecked_t fscache_n_checkaux_okay;
51975+extern atomic_unchecked_t fscache_n_checkaux_update;
51976+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
51977
51978 extern atomic_t fscache_n_cop_alloc_object;
51979 extern atomic_t fscache_n_cop_lookup_object;
51980@@ -267,6 +267,11 @@ static inline void fscache_stat(atomic_t *stat)
51981 atomic_inc(stat);
51982 }
51983
51984+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
51985+{
51986+ atomic_inc_unchecked(stat);
51987+}
51988+
51989 static inline void fscache_stat_d(atomic_t *stat)
51990 {
51991 atomic_dec(stat);
51992@@ -279,6 +284,7 @@ extern const struct file_operations fscache_stats_fops;
51993
51994 #define __fscache_stat(stat) (NULL)
51995 #define fscache_stat(stat) do {} while (0)
51996+#define fscache_stat_unchecked(stat) do {} while (0)
51997 #define fscache_stat_d(stat) do {} while (0)
51998 #endif
51999
52000diff --git a/fs/fscache/object.c b/fs/fscache/object.c
52001index 50d41c1..10ee117 100644
52002--- a/fs/fscache/object.c
52003+++ b/fs/fscache/object.c
52004@@ -143,7 +143,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
52005 /* Invalidate an object on disk */
52006 case FSCACHE_OBJECT_INVALIDATING:
52007 clear_bit(FSCACHE_OBJECT_EV_INVALIDATE, &object->events);
52008- fscache_stat(&fscache_n_invalidates_run);
52009+ fscache_stat_unchecked(&fscache_n_invalidates_run);
52010 fscache_stat(&fscache_n_cop_invalidate_object);
52011 fscache_invalidate_object(object);
52012 fscache_stat_d(&fscache_n_cop_invalidate_object);
52013@@ -153,7 +153,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
52014 /* update the object metadata on disk */
52015 case FSCACHE_OBJECT_UPDATING:
52016 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
52017- fscache_stat(&fscache_n_updates_run);
52018+ fscache_stat_unchecked(&fscache_n_updates_run);
52019 fscache_stat(&fscache_n_cop_update_object);
52020 object->cache->ops->update_object(object);
52021 fscache_stat_d(&fscache_n_cop_update_object);
52022@@ -242,7 +242,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
52023 spin_lock(&object->lock);
52024 object->state = FSCACHE_OBJECT_DEAD;
52025 spin_unlock(&object->lock);
52026- fscache_stat(&fscache_n_object_dead);
52027+ fscache_stat_unchecked(&fscache_n_object_dead);
52028 goto terminal_transit;
52029
52030 /* handle the parent cache of this object being withdrawn from
52031@@ -257,7 +257,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
52032 spin_lock(&object->lock);
52033 object->state = FSCACHE_OBJECT_DEAD;
52034 spin_unlock(&object->lock);
52035- fscache_stat(&fscache_n_object_dead);
52036+ fscache_stat_unchecked(&fscache_n_object_dead);
52037 goto terminal_transit;
52038
52039 /* complain about the object being woken up once it is
52040@@ -495,7 +495,7 @@ static void fscache_lookup_object(struct fscache_object *object)
52041 parent->cookie->def->name, cookie->def->name,
52042 object->cache->tag->name);
52043
52044- fscache_stat(&fscache_n_object_lookups);
52045+ fscache_stat_unchecked(&fscache_n_object_lookups);
52046 fscache_stat(&fscache_n_cop_lookup_object);
52047 ret = object->cache->ops->lookup_object(object);
52048 fscache_stat_d(&fscache_n_cop_lookup_object);
52049@@ -506,7 +506,7 @@ static void fscache_lookup_object(struct fscache_object *object)
52050 if (ret == -ETIMEDOUT) {
52051 /* probably stuck behind another object, so move this one to
52052 * the back of the queue */
52053- fscache_stat(&fscache_n_object_lookups_timed_out);
52054+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
52055 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
52056 }
52057
52058@@ -529,7 +529,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
52059
52060 spin_lock(&object->lock);
52061 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
52062- fscache_stat(&fscache_n_object_lookups_negative);
52063+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
52064
52065 /* transit here to allow write requests to begin stacking up
52066 * and read requests to begin returning ENODATA */
52067@@ -575,7 +575,7 @@ void fscache_obtained_object(struct fscache_object *object)
52068 * result, in which case there may be data available */
52069 spin_lock(&object->lock);
52070 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
52071- fscache_stat(&fscache_n_object_lookups_positive);
52072+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
52073
52074 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
52075
52076@@ -589,7 +589,7 @@ void fscache_obtained_object(struct fscache_object *object)
52077 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
52078 } else {
52079 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
52080- fscache_stat(&fscache_n_object_created);
52081+ fscache_stat_unchecked(&fscache_n_object_created);
52082
52083 object->state = FSCACHE_OBJECT_AVAILABLE;
52084 spin_unlock(&object->lock);
52085@@ -634,7 +634,7 @@ static void fscache_object_available(struct fscache_object *object)
52086 fscache_enqueue_dependents(object);
52087
52088 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
52089- fscache_stat(&fscache_n_object_avail);
52090+ fscache_stat_unchecked(&fscache_n_object_avail);
52091
52092 _leave("");
52093 }
52094@@ -894,7 +894,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
52095 enum fscache_checkaux result;
52096
52097 if (!object->cookie->def->check_aux) {
52098- fscache_stat(&fscache_n_checkaux_none);
52099+ fscache_stat_unchecked(&fscache_n_checkaux_none);
52100 return FSCACHE_CHECKAUX_OKAY;
52101 }
52102
52103@@ -903,17 +903,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
52104 switch (result) {
52105 /* entry okay as is */
52106 case FSCACHE_CHECKAUX_OKAY:
52107- fscache_stat(&fscache_n_checkaux_okay);
52108+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
52109 break;
52110
52111 /* entry requires update */
52112 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
52113- fscache_stat(&fscache_n_checkaux_update);
52114+ fscache_stat_unchecked(&fscache_n_checkaux_update);
52115 break;
52116
52117 /* entry requires deletion */
52118 case FSCACHE_CHECKAUX_OBSOLETE:
52119- fscache_stat(&fscache_n_checkaux_obsolete);
52120+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
52121 break;
52122
52123 default:
52124diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
52125index 762a9ec..2023284 100644
52126--- a/fs/fscache/operation.c
52127+++ b/fs/fscache/operation.c
52128@@ -17,7 +17,7 @@
52129 #include <linux/slab.h>
52130 #include "internal.h"
52131
52132-atomic_t fscache_op_debug_id;
52133+atomic_unchecked_t fscache_op_debug_id;
52134 EXPORT_SYMBOL(fscache_op_debug_id);
52135
52136 /**
52137@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
52138 ASSERTCMP(atomic_read(&op->usage), >, 0);
52139 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
52140
52141- fscache_stat(&fscache_n_op_enqueue);
52142+ fscache_stat_unchecked(&fscache_n_op_enqueue);
52143 switch (op->flags & FSCACHE_OP_TYPE) {
52144 case FSCACHE_OP_ASYNC:
52145 _debug("queue async");
52146@@ -73,7 +73,7 @@ static void fscache_run_op(struct fscache_object *object,
52147 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
52148 if (op->processor)
52149 fscache_enqueue_operation(op);
52150- fscache_stat(&fscache_n_op_run);
52151+ fscache_stat_unchecked(&fscache_n_op_run);
52152 }
52153
52154 /*
52155@@ -105,11 +105,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
52156 if (object->n_in_progress > 0) {
52157 atomic_inc(&op->usage);
52158 list_add_tail(&op->pend_link, &object->pending_ops);
52159- fscache_stat(&fscache_n_op_pend);
52160+ fscache_stat_unchecked(&fscache_n_op_pend);
52161 } else if (!list_empty(&object->pending_ops)) {
52162 atomic_inc(&op->usage);
52163 list_add_tail(&op->pend_link, &object->pending_ops);
52164- fscache_stat(&fscache_n_op_pend);
52165+ fscache_stat_unchecked(&fscache_n_op_pend);
52166 fscache_start_operations(object);
52167 } else {
52168 ASSERTCMP(object->n_in_progress, ==, 0);
52169@@ -125,7 +125,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
52170 object->n_exclusive++; /* reads and writes must wait */
52171 atomic_inc(&op->usage);
52172 list_add_tail(&op->pend_link, &object->pending_ops);
52173- fscache_stat(&fscache_n_op_pend);
52174+ fscache_stat_unchecked(&fscache_n_op_pend);
52175 ret = 0;
52176 } else {
52177 /* If we're in any other state, there must have been an I/O
52178@@ -215,11 +215,11 @@ int fscache_submit_op(struct fscache_object *object,
52179 if (object->n_exclusive > 0) {
52180 atomic_inc(&op->usage);
52181 list_add_tail(&op->pend_link, &object->pending_ops);
52182- fscache_stat(&fscache_n_op_pend);
52183+ fscache_stat_unchecked(&fscache_n_op_pend);
52184 } else if (!list_empty(&object->pending_ops)) {
52185 atomic_inc(&op->usage);
52186 list_add_tail(&op->pend_link, &object->pending_ops);
52187- fscache_stat(&fscache_n_op_pend);
52188+ fscache_stat_unchecked(&fscache_n_op_pend);
52189 fscache_start_operations(object);
52190 } else {
52191 ASSERTCMP(object->n_exclusive, ==, 0);
52192@@ -231,12 +231,12 @@ int fscache_submit_op(struct fscache_object *object,
52193 object->n_ops++;
52194 atomic_inc(&op->usage);
52195 list_add_tail(&op->pend_link, &object->pending_ops);
52196- fscache_stat(&fscache_n_op_pend);
52197+ fscache_stat_unchecked(&fscache_n_op_pend);
52198 ret = 0;
52199 } else if (object->state == FSCACHE_OBJECT_DYING ||
52200 object->state == FSCACHE_OBJECT_LC_DYING ||
52201 object->state == FSCACHE_OBJECT_WITHDRAWING) {
52202- fscache_stat(&fscache_n_op_rejected);
52203+ fscache_stat_unchecked(&fscache_n_op_rejected);
52204 op->state = FSCACHE_OP_ST_CANCELLED;
52205 ret = -ENOBUFS;
52206 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
52207@@ -315,7 +315,7 @@ int fscache_cancel_op(struct fscache_operation *op,
52208 ret = -EBUSY;
52209 if (op->state == FSCACHE_OP_ST_PENDING) {
52210 ASSERT(!list_empty(&op->pend_link));
52211- fscache_stat(&fscache_n_op_cancelled);
52212+ fscache_stat_unchecked(&fscache_n_op_cancelled);
52213 list_del_init(&op->pend_link);
52214 if (do_cancel)
52215 do_cancel(op);
52216@@ -347,7 +347,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
52217 while (!list_empty(&object->pending_ops)) {
52218 op = list_entry(object->pending_ops.next,
52219 struct fscache_operation, pend_link);
52220- fscache_stat(&fscache_n_op_cancelled);
52221+ fscache_stat_unchecked(&fscache_n_op_cancelled);
52222 list_del_init(&op->pend_link);
52223
52224 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
52225@@ -419,7 +419,7 @@ void fscache_put_operation(struct fscache_operation *op)
52226 op->state, ==, FSCACHE_OP_ST_CANCELLED);
52227 op->state = FSCACHE_OP_ST_DEAD;
52228
52229- fscache_stat(&fscache_n_op_release);
52230+ fscache_stat_unchecked(&fscache_n_op_release);
52231
52232 if (op->release) {
52233 op->release(op);
52234@@ -442,7 +442,7 @@ void fscache_put_operation(struct fscache_operation *op)
52235 * lock, and defer it otherwise */
52236 if (!spin_trylock(&object->lock)) {
52237 _debug("defer put");
52238- fscache_stat(&fscache_n_op_deferred_release);
52239+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
52240
52241 cache = object->cache;
52242 spin_lock(&cache->op_gc_list_lock);
52243@@ -495,7 +495,7 @@ void fscache_operation_gc(struct work_struct *work)
52244
52245 _debug("GC DEFERRED REL OBJ%x OP%x",
52246 object->debug_id, op->debug_id);
52247- fscache_stat(&fscache_n_op_gc);
52248+ fscache_stat_unchecked(&fscache_n_op_gc);
52249
52250 ASSERTCMP(atomic_read(&op->usage), ==, 0);
52251 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
52252diff --git a/fs/fscache/page.c b/fs/fscache/page.c
52253index ff000e5..c44ec6d 100644
52254--- a/fs/fscache/page.c
52255+++ b/fs/fscache/page.c
52256@@ -61,7 +61,7 @@ try_again:
52257 val = radix_tree_lookup(&cookie->stores, page->index);
52258 if (!val) {
52259 rcu_read_unlock();
52260- fscache_stat(&fscache_n_store_vmscan_not_storing);
52261+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
52262 __fscache_uncache_page(cookie, page);
52263 return true;
52264 }
52265@@ -91,11 +91,11 @@ try_again:
52266 spin_unlock(&cookie->stores_lock);
52267
52268 if (xpage) {
52269- fscache_stat(&fscache_n_store_vmscan_cancelled);
52270- fscache_stat(&fscache_n_store_radix_deletes);
52271+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
52272+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
52273 ASSERTCMP(xpage, ==, page);
52274 } else {
52275- fscache_stat(&fscache_n_store_vmscan_gone);
52276+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
52277 }
52278
52279 wake_up_bit(&cookie->flags, 0);
52280@@ -110,11 +110,11 @@ page_busy:
52281 * sleeping on memory allocation, so we may need to impose a timeout
52282 * too. */
52283 if (!(gfp & __GFP_WAIT)) {
52284- fscache_stat(&fscache_n_store_vmscan_busy);
52285+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
52286 return false;
52287 }
52288
52289- fscache_stat(&fscache_n_store_vmscan_wait);
52290+ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
52291 __fscache_wait_on_page_write(cookie, page);
52292 gfp &= ~__GFP_WAIT;
52293 goto try_again;
52294@@ -140,7 +140,7 @@ static void fscache_end_page_write(struct fscache_object *object,
52295 FSCACHE_COOKIE_STORING_TAG);
52296 if (!radix_tree_tag_get(&cookie->stores, page->index,
52297 FSCACHE_COOKIE_PENDING_TAG)) {
52298- fscache_stat(&fscache_n_store_radix_deletes);
52299+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
52300 xpage = radix_tree_delete(&cookie->stores, page->index);
52301 }
52302 spin_unlock(&cookie->stores_lock);
52303@@ -161,7 +161,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
52304
52305 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
52306
52307- fscache_stat(&fscache_n_attr_changed_calls);
52308+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
52309
52310 if (fscache_object_is_active(object)) {
52311 fscache_stat(&fscache_n_cop_attr_changed);
52312@@ -187,11 +187,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
52313
52314 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
52315
52316- fscache_stat(&fscache_n_attr_changed);
52317+ fscache_stat_unchecked(&fscache_n_attr_changed);
52318
52319 op = kzalloc(sizeof(*op), GFP_KERNEL);
52320 if (!op) {
52321- fscache_stat(&fscache_n_attr_changed_nomem);
52322+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
52323 _leave(" = -ENOMEM");
52324 return -ENOMEM;
52325 }
52326@@ -209,7 +209,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
52327 if (fscache_submit_exclusive_op(object, op) < 0)
52328 goto nobufs;
52329 spin_unlock(&cookie->lock);
52330- fscache_stat(&fscache_n_attr_changed_ok);
52331+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
52332 fscache_put_operation(op);
52333 _leave(" = 0");
52334 return 0;
52335@@ -217,7 +217,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
52336 nobufs:
52337 spin_unlock(&cookie->lock);
52338 kfree(op);
52339- fscache_stat(&fscache_n_attr_changed_nobufs);
52340+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
52341 _leave(" = %d", -ENOBUFS);
52342 return -ENOBUFS;
52343 }
52344@@ -255,7 +255,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
52345 /* allocate a retrieval operation and attempt to submit it */
52346 op = kzalloc(sizeof(*op), GFP_NOIO);
52347 if (!op) {
52348- fscache_stat(&fscache_n_retrievals_nomem);
52349+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
52350 return NULL;
52351 }
52352
52353@@ -283,13 +283,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
52354 return 0;
52355 }
52356
52357- fscache_stat(&fscache_n_retrievals_wait);
52358+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
52359
52360 jif = jiffies;
52361 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
52362 fscache_wait_bit_interruptible,
52363 TASK_INTERRUPTIBLE) != 0) {
52364- fscache_stat(&fscache_n_retrievals_intr);
52365+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
52366 _leave(" = -ERESTARTSYS");
52367 return -ERESTARTSYS;
52368 }
52369@@ -318,8 +318,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
52370 */
52371 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
52372 struct fscache_retrieval *op,
52373- atomic_t *stat_op_waits,
52374- atomic_t *stat_object_dead)
52375+ atomic_unchecked_t *stat_op_waits,
52376+ atomic_unchecked_t *stat_object_dead)
52377 {
52378 int ret;
52379
52380@@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
52381 goto check_if_dead;
52382
52383 _debug(">>> WT");
52384- fscache_stat(stat_op_waits);
52385+ fscache_stat_unchecked(stat_op_waits);
52386 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
52387 fscache_wait_bit_interruptible,
52388 TASK_INTERRUPTIBLE) != 0) {
52389@@ -344,14 +344,14 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
52390
52391 check_if_dead:
52392 if (op->op.state == FSCACHE_OP_ST_CANCELLED) {
52393- fscache_stat(stat_object_dead);
52394+ fscache_stat_unchecked(stat_object_dead);
52395 _leave(" = -ENOBUFS [cancelled]");
52396 return -ENOBUFS;
52397 }
52398 if (unlikely(fscache_object_is_dead(object))) {
52399 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->op.state);
52400 fscache_cancel_op(&op->op, fscache_do_cancel_retrieval);
52401- fscache_stat(stat_object_dead);
52402+ fscache_stat_unchecked(stat_object_dead);
52403 return -ENOBUFS;
52404 }
52405 return 0;
52406@@ -378,7 +378,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
52407
52408 _enter("%p,%p,,,", cookie, page);
52409
52410- fscache_stat(&fscache_n_retrievals);
52411+ fscache_stat_unchecked(&fscache_n_retrievals);
52412
52413 if (hlist_empty(&cookie->backing_objects))
52414 goto nobufs;
52415@@ -417,7 +417,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
52416 goto nobufs_unlock_dec;
52417 spin_unlock(&cookie->lock);
52418
52419- fscache_stat(&fscache_n_retrieval_ops);
52420+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
52421
52422 /* pin the netfs read context in case we need to do the actual netfs
52423 * read because we've encountered a cache read failure */
52424@@ -447,15 +447,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
52425
52426 error:
52427 if (ret == -ENOMEM)
52428- fscache_stat(&fscache_n_retrievals_nomem);
52429+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
52430 else if (ret == -ERESTARTSYS)
52431- fscache_stat(&fscache_n_retrievals_intr);
52432+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
52433 else if (ret == -ENODATA)
52434- fscache_stat(&fscache_n_retrievals_nodata);
52435+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
52436 else if (ret < 0)
52437- fscache_stat(&fscache_n_retrievals_nobufs);
52438+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
52439 else
52440- fscache_stat(&fscache_n_retrievals_ok);
52441+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
52442
52443 fscache_put_retrieval(op);
52444 _leave(" = %d", ret);
52445@@ -467,7 +467,7 @@ nobufs_unlock:
52446 spin_unlock(&cookie->lock);
52447 kfree(op);
52448 nobufs:
52449- fscache_stat(&fscache_n_retrievals_nobufs);
52450+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
52451 _leave(" = -ENOBUFS");
52452 return -ENOBUFS;
52453 }
52454@@ -505,7 +505,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
52455
52456 _enter("%p,,%d,,,", cookie, *nr_pages);
52457
52458- fscache_stat(&fscache_n_retrievals);
52459+ fscache_stat_unchecked(&fscache_n_retrievals);
52460
52461 if (hlist_empty(&cookie->backing_objects))
52462 goto nobufs;
52463@@ -541,7 +541,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
52464 goto nobufs_unlock_dec;
52465 spin_unlock(&cookie->lock);
52466
52467- fscache_stat(&fscache_n_retrieval_ops);
52468+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
52469
52470 /* pin the netfs read context in case we need to do the actual netfs
52471 * read because we've encountered a cache read failure */
52472@@ -571,15 +571,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
52473
52474 error:
52475 if (ret == -ENOMEM)
52476- fscache_stat(&fscache_n_retrievals_nomem);
52477+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
52478 else if (ret == -ERESTARTSYS)
52479- fscache_stat(&fscache_n_retrievals_intr);
52480+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
52481 else if (ret == -ENODATA)
52482- fscache_stat(&fscache_n_retrievals_nodata);
52483+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
52484 else if (ret < 0)
52485- fscache_stat(&fscache_n_retrievals_nobufs);
52486+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
52487 else
52488- fscache_stat(&fscache_n_retrievals_ok);
52489+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
52490
52491 fscache_put_retrieval(op);
52492 _leave(" = %d", ret);
52493@@ -591,7 +591,7 @@ nobufs_unlock:
52494 spin_unlock(&cookie->lock);
52495 kfree(op);
52496 nobufs:
52497- fscache_stat(&fscache_n_retrievals_nobufs);
52498+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
52499 _leave(" = -ENOBUFS");
52500 return -ENOBUFS;
52501 }
52502@@ -615,7 +615,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
52503
52504 _enter("%p,%p,,,", cookie, page);
52505
52506- fscache_stat(&fscache_n_allocs);
52507+ fscache_stat_unchecked(&fscache_n_allocs);
52508
52509 if (hlist_empty(&cookie->backing_objects))
52510 goto nobufs;
52511@@ -647,7 +647,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
52512 goto nobufs_unlock;
52513 spin_unlock(&cookie->lock);
52514
52515- fscache_stat(&fscache_n_alloc_ops);
52516+ fscache_stat_unchecked(&fscache_n_alloc_ops);
52517
52518 ret = fscache_wait_for_retrieval_activation(
52519 object, op,
52520@@ -663,11 +663,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
52521
52522 error:
52523 if (ret == -ERESTARTSYS)
52524- fscache_stat(&fscache_n_allocs_intr);
52525+ fscache_stat_unchecked(&fscache_n_allocs_intr);
52526 else if (ret < 0)
52527- fscache_stat(&fscache_n_allocs_nobufs);
52528+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
52529 else
52530- fscache_stat(&fscache_n_allocs_ok);
52531+ fscache_stat_unchecked(&fscache_n_allocs_ok);
52532
52533 fscache_put_retrieval(op);
52534 _leave(" = %d", ret);
52535@@ -677,7 +677,7 @@ nobufs_unlock:
52536 spin_unlock(&cookie->lock);
52537 kfree(op);
52538 nobufs:
52539- fscache_stat(&fscache_n_allocs_nobufs);
52540+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
52541 _leave(" = -ENOBUFS");
52542 return -ENOBUFS;
52543 }
52544@@ -736,7 +736,7 @@ static void fscache_write_op(struct fscache_operation *_op)
52545
52546 spin_lock(&cookie->stores_lock);
52547
52548- fscache_stat(&fscache_n_store_calls);
52549+ fscache_stat_unchecked(&fscache_n_store_calls);
52550
52551 /* find a page to store */
52552 page = NULL;
52553@@ -747,7 +747,7 @@ static void fscache_write_op(struct fscache_operation *_op)
52554 page = results[0];
52555 _debug("gang %d [%lx]", n, page->index);
52556 if (page->index > op->store_limit) {
52557- fscache_stat(&fscache_n_store_pages_over_limit);
52558+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
52559 goto superseded;
52560 }
52561
52562@@ -759,7 +759,7 @@ static void fscache_write_op(struct fscache_operation *_op)
52563 spin_unlock(&cookie->stores_lock);
52564 spin_unlock(&object->lock);
52565
52566- fscache_stat(&fscache_n_store_pages);
52567+ fscache_stat_unchecked(&fscache_n_store_pages);
52568 fscache_stat(&fscache_n_cop_write_page);
52569 ret = object->cache->ops->write_page(op, page);
52570 fscache_stat_d(&fscache_n_cop_write_page);
52571@@ -860,7 +860,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
52572 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
52573 ASSERT(PageFsCache(page));
52574
52575- fscache_stat(&fscache_n_stores);
52576+ fscache_stat_unchecked(&fscache_n_stores);
52577
52578 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
52579 _leave(" = -ENOBUFS [invalidating]");
52580@@ -916,7 +916,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
52581 spin_unlock(&cookie->stores_lock);
52582 spin_unlock(&object->lock);
52583
52584- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
52585+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
52586 op->store_limit = object->store_limit;
52587
52588 if (fscache_submit_op(object, &op->op) < 0)
52589@@ -924,8 +924,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
52590
52591 spin_unlock(&cookie->lock);
52592 radix_tree_preload_end();
52593- fscache_stat(&fscache_n_store_ops);
52594- fscache_stat(&fscache_n_stores_ok);
52595+ fscache_stat_unchecked(&fscache_n_store_ops);
52596+ fscache_stat_unchecked(&fscache_n_stores_ok);
52597
52598 /* the work queue now carries its own ref on the object */
52599 fscache_put_operation(&op->op);
52600@@ -933,14 +933,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
52601 return 0;
52602
52603 already_queued:
52604- fscache_stat(&fscache_n_stores_again);
52605+ fscache_stat_unchecked(&fscache_n_stores_again);
52606 already_pending:
52607 spin_unlock(&cookie->stores_lock);
52608 spin_unlock(&object->lock);
52609 spin_unlock(&cookie->lock);
52610 radix_tree_preload_end();
52611 kfree(op);
52612- fscache_stat(&fscache_n_stores_ok);
52613+ fscache_stat_unchecked(&fscache_n_stores_ok);
52614 _leave(" = 0");
52615 return 0;
52616
52617@@ -959,14 +959,14 @@ nobufs:
52618 spin_unlock(&cookie->lock);
52619 radix_tree_preload_end();
52620 kfree(op);
52621- fscache_stat(&fscache_n_stores_nobufs);
52622+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
52623 _leave(" = -ENOBUFS");
52624 return -ENOBUFS;
52625
52626 nomem_free:
52627 kfree(op);
52628 nomem:
52629- fscache_stat(&fscache_n_stores_oom);
52630+ fscache_stat_unchecked(&fscache_n_stores_oom);
52631 _leave(" = -ENOMEM");
52632 return -ENOMEM;
52633 }
52634@@ -984,7 +984,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
52635 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
52636 ASSERTCMP(page, !=, NULL);
52637
52638- fscache_stat(&fscache_n_uncaches);
52639+ fscache_stat_unchecked(&fscache_n_uncaches);
52640
52641 /* cache withdrawal may beat us to it */
52642 if (!PageFsCache(page))
52643@@ -1035,7 +1035,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
52644 struct fscache_cookie *cookie = op->op.object->cookie;
52645
52646 #ifdef CONFIG_FSCACHE_STATS
52647- atomic_inc(&fscache_n_marks);
52648+ atomic_inc_unchecked(&fscache_n_marks);
52649 #endif
52650
52651 _debug("- mark %p{%lx}", page, page->index);
52652diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
52653index 8179e8b..5072cc7 100644
52654--- a/fs/fscache/stats.c
52655+++ b/fs/fscache/stats.c
52656@@ -18,99 +18,99 @@
52657 /*
52658 * operation counters
52659 */
52660-atomic_t fscache_n_op_pend;
52661-atomic_t fscache_n_op_run;
52662-atomic_t fscache_n_op_enqueue;
52663-atomic_t fscache_n_op_requeue;
52664-atomic_t fscache_n_op_deferred_release;
52665-atomic_t fscache_n_op_release;
52666-atomic_t fscache_n_op_gc;
52667-atomic_t fscache_n_op_cancelled;
52668-atomic_t fscache_n_op_rejected;
52669+atomic_unchecked_t fscache_n_op_pend;
52670+atomic_unchecked_t fscache_n_op_run;
52671+atomic_unchecked_t fscache_n_op_enqueue;
52672+atomic_unchecked_t fscache_n_op_requeue;
52673+atomic_unchecked_t fscache_n_op_deferred_release;
52674+atomic_unchecked_t fscache_n_op_release;
52675+atomic_unchecked_t fscache_n_op_gc;
52676+atomic_unchecked_t fscache_n_op_cancelled;
52677+atomic_unchecked_t fscache_n_op_rejected;
52678
52679-atomic_t fscache_n_attr_changed;
52680-atomic_t fscache_n_attr_changed_ok;
52681-atomic_t fscache_n_attr_changed_nobufs;
52682-atomic_t fscache_n_attr_changed_nomem;
52683-atomic_t fscache_n_attr_changed_calls;
52684+atomic_unchecked_t fscache_n_attr_changed;
52685+atomic_unchecked_t fscache_n_attr_changed_ok;
52686+atomic_unchecked_t fscache_n_attr_changed_nobufs;
52687+atomic_unchecked_t fscache_n_attr_changed_nomem;
52688+atomic_unchecked_t fscache_n_attr_changed_calls;
52689
52690-atomic_t fscache_n_allocs;
52691-atomic_t fscache_n_allocs_ok;
52692-atomic_t fscache_n_allocs_wait;
52693-atomic_t fscache_n_allocs_nobufs;
52694-atomic_t fscache_n_allocs_intr;
52695-atomic_t fscache_n_allocs_object_dead;
52696-atomic_t fscache_n_alloc_ops;
52697-atomic_t fscache_n_alloc_op_waits;
52698+atomic_unchecked_t fscache_n_allocs;
52699+atomic_unchecked_t fscache_n_allocs_ok;
52700+atomic_unchecked_t fscache_n_allocs_wait;
52701+atomic_unchecked_t fscache_n_allocs_nobufs;
52702+atomic_unchecked_t fscache_n_allocs_intr;
52703+atomic_unchecked_t fscache_n_allocs_object_dead;
52704+atomic_unchecked_t fscache_n_alloc_ops;
52705+atomic_unchecked_t fscache_n_alloc_op_waits;
52706
52707-atomic_t fscache_n_retrievals;
52708-atomic_t fscache_n_retrievals_ok;
52709-atomic_t fscache_n_retrievals_wait;
52710-atomic_t fscache_n_retrievals_nodata;
52711-atomic_t fscache_n_retrievals_nobufs;
52712-atomic_t fscache_n_retrievals_intr;
52713-atomic_t fscache_n_retrievals_nomem;
52714-atomic_t fscache_n_retrievals_object_dead;
52715-atomic_t fscache_n_retrieval_ops;
52716-atomic_t fscache_n_retrieval_op_waits;
52717+atomic_unchecked_t fscache_n_retrievals;
52718+atomic_unchecked_t fscache_n_retrievals_ok;
52719+atomic_unchecked_t fscache_n_retrievals_wait;
52720+atomic_unchecked_t fscache_n_retrievals_nodata;
52721+atomic_unchecked_t fscache_n_retrievals_nobufs;
52722+atomic_unchecked_t fscache_n_retrievals_intr;
52723+atomic_unchecked_t fscache_n_retrievals_nomem;
52724+atomic_unchecked_t fscache_n_retrievals_object_dead;
52725+atomic_unchecked_t fscache_n_retrieval_ops;
52726+atomic_unchecked_t fscache_n_retrieval_op_waits;
52727
52728-atomic_t fscache_n_stores;
52729-atomic_t fscache_n_stores_ok;
52730-atomic_t fscache_n_stores_again;
52731-atomic_t fscache_n_stores_nobufs;
52732-atomic_t fscache_n_stores_oom;
52733-atomic_t fscache_n_store_ops;
52734-atomic_t fscache_n_store_calls;
52735-atomic_t fscache_n_store_pages;
52736-atomic_t fscache_n_store_radix_deletes;
52737-atomic_t fscache_n_store_pages_over_limit;
52738+atomic_unchecked_t fscache_n_stores;
52739+atomic_unchecked_t fscache_n_stores_ok;
52740+atomic_unchecked_t fscache_n_stores_again;
52741+atomic_unchecked_t fscache_n_stores_nobufs;
52742+atomic_unchecked_t fscache_n_stores_oom;
52743+atomic_unchecked_t fscache_n_store_ops;
52744+atomic_unchecked_t fscache_n_store_calls;
52745+atomic_unchecked_t fscache_n_store_pages;
52746+atomic_unchecked_t fscache_n_store_radix_deletes;
52747+atomic_unchecked_t fscache_n_store_pages_over_limit;
52748
52749-atomic_t fscache_n_store_vmscan_not_storing;
52750-atomic_t fscache_n_store_vmscan_gone;
52751-atomic_t fscache_n_store_vmscan_busy;
52752-atomic_t fscache_n_store_vmscan_cancelled;
52753-atomic_t fscache_n_store_vmscan_wait;
52754+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
52755+atomic_unchecked_t fscache_n_store_vmscan_gone;
52756+atomic_unchecked_t fscache_n_store_vmscan_busy;
52757+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
52758+atomic_unchecked_t fscache_n_store_vmscan_wait;
52759
52760-atomic_t fscache_n_marks;
52761-atomic_t fscache_n_uncaches;
52762+atomic_unchecked_t fscache_n_marks;
52763+atomic_unchecked_t fscache_n_uncaches;
52764
52765-atomic_t fscache_n_acquires;
52766-atomic_t fscache_n_acquires_null;
52767-atomic_t fscache_n_acquires_no_cache;
52768-atomic_t fscache_n_acquires_ok;
52769-atomic_t fscache_n_acquires_nobufs;
52770-atomic_t fscache_n_acquires_oom;
52771+atomic_unchecked_t fscache_n_acquires;
52772+atomic_unchecked_t fscache_n_acquires_null;
52773+atomic_unchecked_t fscache_n_acquires_no_cache;
52774+atomic_unchecked_t fscache_n_acquires_ok;
52775+atomic_unchecked_t fscache_n_acquires_nobufs;
52776+atomic_unchecked_t fscache_n_acquires_oom;
52777
52778-atomic_t fscache_n_invalidates;
52779-atomic_t fscache_n_invalidates_run;
52780+atomic_unchecked_t fscache_n_invalidates;
52781+atomic_unchecked_t fscache_n_invalidates_run;
52782
52783-atomic_t fscache_n_updates;
52784-atomic_t fscache_n_updates_null;
52785-atomic_t fscache_n_updates_run;
52786+atomic_unchecked_t fscache_n_updates;
52787+atomic_unchecked_t fscache_n_updates_null;
52788+atomic_unchecked_t fscache_n_updates_run;
52789
52790-atomic_t fscache_n_relinquishes;
52791-atomic_t fscache_n_relinquishes_null;
52792-atomic_t fscache_n_relinquishes_waitcrt;
52793-atomic_t fscache_n_relinquishes_retire;
52794+atomic_unchecked_t fscache_n_relinquishes;
52795+atomic_unchecked_t fscache_n_relinquishes_null;
52796+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
52797+atomic_unchecked_t fscache_n_relinquishes_retire;
52798
52799-atomic_t fscache_n_cookie_index;
52800-atomic_t fscache_n_cookie_data;
52801-atomic_t fscache_n_cookie_special;
52802+atomic_unchecked_t fscache_n_cookie_index;
52803+atomic_unchecked_t fscache_n_cookie_data;
52804+atomic_unchecked_t fscache_n_cookie_special;
52805
52806-atomic_t fscache_n_object_alloc;
52807-atomic_t fscache_n_object_no_alloc;
52808-atomic_t fscache_n_object_lookups;
52809-atomic_t fscache_n_object_lookups_negative;
52810-atomic_t fscache_n_object_lookups_positive;
52811-atomic_t fscache_n_object_lookups_timed_out;
52812-atomic_t fscache_n_object_created;
52813-atomic_t fscache_n_object_avail;
52814-atomic_t fscache_n_object_dead;
52815+atomic_unchecked_t fscache_n_object_alloc;
52816+atomic_unchecked_t fscache_n_object_no_alloc;
52817+atomic_unchecked_t fscache_n_object_lookups;
52818+atomic_unchecked_t fscache_n_object_lookups_negative;
52819+atomic_unchecked_t fscache_n_object_lookups_positive;
52820+atomic_unchecked_t fscache_n_object_lookups_timed_out;
52821+atomic_unchecked_t fscache_n_object_created;
52822+atomic_unchecked_t fscache_n_object_avail;
52823+atomic_unchecked_t fscache_n_object_dead;
52824
52825-atomic_t fscache_n_checkaux_none;
52826-atomic_t fscache_n_checkaux_okay;
52827-atomic_t fscache_n_checkaux_update;
52828-atomic_t fscache_n_checkaux_obsolete;
52829+atomic_unchecked_t fscache_n_checkaux_none;
52830+atomic_unchecked_t fscache_n_checkaux_okay;
52831+atomic_unchecked_t fscache_n_checkaux_update;
52832+atomic_unchecked_t fscache_n_checkaux_obsolete;
52833
52834 atomic_t fscache_n_cop_alloc_object;
52835 atomic_t fscache_n_cop_lookup_object;
52836@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
52837 seq_puts(m, "FS-Cache statistics\n");
52838
52839 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
52840- atomic_read(&fscache_n_cookie_index),
52841- atomic_read(&fscache_n_cookie_data),
52842- atomic_read(&fscache_n_cookie_special));
52843+ atomic_read_unchecked(&fscache_n_cookie_index),
52844+ atomic_read_unchecked(&fscache_n_cookie_data),
52845+ atomic_read_unchecked(&fscache_n_cookie_special));
52846
52847 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
52848- atomic_read(&fscache_n_object_alloc),
52849- atomic_read(&fscache_n_object_no_alloc),
52850- atomic_read(&fscache_n_object_avail),
52851- atomic_read(&fscache_n_object_dead));
52852+ atomic_read_unchecked(&fscache_n_object_alloc),
52853+ atomic_read_unchecked(&fscache_n_object_no_alloc),
52854+ atomic_read_unchecked(&fscache_n_object_avail),
52855+ atomic_read_unchecked(&fscache_n_object_dead));
52856 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
52857- atomic_read(&fscache_n_checkaux_none),
52858- atomic_read(&fscache_n_checkaux_okay),
52859- atomic_read(&fscache_n_checkaux_update),
52860- atomic_read(&fscache_n_checkaux_obsolete));
52861+ atomic_read_unchecked(&fscache_n_checkaux_none),
52862+ atomic_read_unchecked(&fscache_n_checkaux_okay),
52863+ atomic_read_unchecked(&fscache_n_checkaux_update),
52864+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
52865
52866 seq_printf(m, "Pages : mrk=%u unc=%u\n",
52867- atomic_read(&fscache_n_marks),
52868- atomic_read(&fscache_n_uncaches));
52869+ atomic_read_unchecked(&fscache_n_marks),
52870+ atomic_read_unchecked(&fscache_n_uncaches));
52871
52872 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
52873 " oom=%u\n",
52874- atomic_read(&fscache_n_acquires),
52875- atomic_read(&fscache_n_acquires_null),
52876- atomic_read(&fscache_n_acquires_no_cache),
52877- atomic_read(&fscache_n_acquires_ok),
52878- atomic_read(&fscache_n_acquires_nobufs),
52879- atomic_read(&fscache_n_acquires_oom));
52880+ atomic_read_unchecked(&fscache_n_acquires),
52881+ atomic_read_unchecked(&fscache_n_acquires_null),
52882+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
52883+ atomic_read_unchecked(&fscache_n_acquires_ok),
52884+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
52885+ atomic_read_unchecked(&fscache_n_acquires_oom));
52886
52887 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
52888- atomic_read(&fscache_n_object_lookups),
52889- atomic_read(&fscache_n_object_lookups_negative),
52890- atomic_read(&fscache_n_object_lookups_positive),
52891- atomic_read(&fscache_n_object_created),
52892- atomic_read(&fscache_n_object_lookups_timed_out));
52893+ atomic_read_unchecked(&fscache_n_object_lookups),
52894+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
52895+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
52896+ atomic_read_unchecked(&fscache_n_object_created),
52897+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
52898
52899 seq_printf(m, "Invals : n=%u run=%u\n",
52900- atomic_read(&fscache_n_invalidates),
52901- atomic_read(&fscache_n_invalidates_run));
52902+ atomic_read_unchecked(&fscache_n_invalidates),
52903+ atomic_read_unchecked(&fscache_n_invalidates_run));
52904
52905 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
52906- atomic_read(&fscache_n_updates),
52907- atomic_read(&fscache_n_updates_null),
52908- atomic_read(&fscache_n_updates_run));
52909+ atomic_read_unchecked(&fscache_n_updates),
52910+ atomic_read_unchecked(&fscache_n_updates_null),
52911+ atomic_read_unchecked(&fscache_n_updates_run));
52912
52913 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
52914- atomic_read(&fscache_n_relinquishes),
52915- atomic_read(&fscache_n_relinquishes_null),
52916- atomic_read(&fscache_n_relinquishes_waitcrt),
52917- atomic_read(&fscache_n_relinquishes_retire));
52918+ atomic_read_unchecked(&fscache_n_relinquishes),
52919+ atomic_read_unchecked(&fscache_n_relinquishes_null),
52920+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
52921+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
52922
52923 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
52924- atomic_read(&fscache_n_attr_changed),
52925- atomic_read(&fscache_n_attr_changed_ok),
52926- atomic_read(&fscache_n_attr_changed_nobufs),
52927- atomic_read(&fscache_n_attr_changed_nomem),
52928- atomic_read(&fscache_n_attr_changed_calls));
52929+ atomic_read_unchecked(&fscache_n_attr_changed),
52930+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
52931+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
52932+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
52933+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
52934
52935 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
52936- atomic_read(&fscache_n_allocs),
52937- atomic_read(&fscache_n_allocs_ok),
52938- atomic_read(&fscache_n_allocs_wait),
52939- atomic_read(&fscache_n_allocs_nobufs),
52940- atomic_read(&fscache_n_allocs_intr));
52941+ atomic_read_unchecked(&fscache_n_allocs),
52942+ atomic_read_unchecked(&fscache_n_allocs_ok),
52943+ atomic_read_unchecked(&fscache_n_allocs_wait),
52944+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
52945+ atomic_read_unchecked(&fscache_n_allocs_intr));
52946 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
52947- atomic_read(&fscache_n_alloc_ops),
52948- atomic_read(&fscache_n_alloc_op_waits),
52949- atomic_read(&fscache_n_allocs_object_dead));
52950+ atomic_read_unchecked(&fscache_n_alloc_ops),
52951+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
52952+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
52953
52954 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
52955 " int=%u oom=%u\n",
52956- atomic_read(&fscache_n_retrievals),
52957- atomic_read(&fscache_n_retrievals_ok),
52958- atomic_read(&fscache_n_retrievals_wait),
52959- atomic_read(&fscache_n_retrievals_nodata),
52960- atomic_read(&fscache_n_retrievals_nobufs),
52961- atomic_read(&fscache_n_retrievals_intr),
52962- atomic_read(&fscache_n_retrievals_nomem));
52963+ atomic_read_unchecked(&fscache_n_retrievals),
52964+ atomic_read_unchecked(&fscache_n_retrievals_ok),
52965+ atomic_read_unchecked(&fscache_n_retrievals_wait),
52966+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
52967+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
52968+ atomic_read_unchecked(&fscache_n_retrievals_intr),
52969+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
52970 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
52971- atomic_read(&fscache_n_retrieval_ops),
52972- atomic_read(&fscache_n_retrieval_op_waits),
52973- atomic_read(&fscache_n_retrievals_object_dead));
52974+ atomic_read_unchecked(&fscache_n_retrieval_ops),
52975+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
52976+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
52977
52978 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
52979- atomic_read(&fscache_n_stores),
52980- atomic_read(&fscache_n_stores_ok),
52981- atomic_read(&fscache_n_stores_again),
52982- atomic_read(&fscache_n_stores_nobufs),
52983- atomic_read(&fscache_n_stores_oom));
52984+ atomic_read_unchecked(&fscache_n_stores),
52985+ atomic_read_unchecked(&fscache_n_stores_ok),
52986+ atomic_read_unchecked(&fscache_n_stores_again),
52987+ atomic_read_unchecked(&fscache_n_stores_nobufs),
52988+ atomic_read_unchecked(&fscache_n_stores_oom));
52989 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
52990- atomic_read(&fscache_n_store_ops),
52991- atomic_read(&fscache_n_store_calls),
52992- atomic_read(&fscache_n_store_pages),
52993- atomic_read(&fscache_n_store_radix_deletes),
52994- atomic_read(&fscache_n_store_pages_over_limit));
52995+ atomic_read_unchecked(&fscache_n_store_ops),
52996+ atomic_read_unchecked(&fscache_n_store_calls),
52997+ atomic_read_unchecked(&fscache_n_store_pages),
52998+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
52999+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
53000
53001 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
53002- atomic_read(&fscache_n_store_vmscan_not_storing),
53003- atomic_read(&fscache_n_store_vmscan_gone),
53004- atomic_read(&fscache_n_store_vmscan_busy),
53005- atomic_read(&fscache_n_store_vmscan_cancelled),
53006- atomic_read(&fscache_n_store_vmscan_wait));
53007+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
53008+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
53009+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
53010+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
53011+ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
53012
53013 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
53014- atomic_read(&fscache_n_op_pend),
53015- atomic_read(&fscache_n_op_run),
53016- atomic_read(&fscache_n_op_enqueue),
53017- atomic_read(&fscache_n_op_cancelled),
53018- atomic_read(&fscache_n_op_rejected));
53019+ atomic_read_unchecked(&fscache_n_op_pend),
53020+ atomic_read_unchecked(&fscache_n_op_run),
53021+ atomic_read_unchecked(&fscache_n_op_enqueue),
53022+ atomic_read_unchecked(&fscache_n_op_cancelled),
53023+ atomic_read_unchecked(&fscache_n_op_rejected));
53024 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
53025- atomic_read(&fscache_n_op_deferred_release),
53026- atomic_read(&fscache_n_op_release),
53027- atomic_read(&fscache_n_op_gc));
53028+ atomic_read_unchecked(&fscache_n_op_deferred_release),
53029+ atomic_read_unchecked(&fscache_n_op_release),
53030+ atomic_read_unchecked(&fscache_n_op_gc));
53031
53032 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
53033 atomic_read(&fscache_n_cop_alloc_object),
53034diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
53035index e397b67..b0d8709 100644
53036--- a/fs/fuse/cuse.c
53037+++ b/fs/fuse/cuse.c
53038@@ -593,10 +593,12 @@ static int __init cuse_init(void)
53039 INIT_LIST_HEAD(&cuse_conntbl[i]);
53040
53041 /* inherit and extend fuse_dev_operations */
53042- cuse_channel_fops = fuse_dev_operations;
53043- cuse_channel_fops.owner = THIS_MODULE;
53044- cuse_channel_fops.open = cuse_channel_open;
53045- cuse_channel_fops.release = cuse_channel_release;
53046+ pax_open_kernel();
53047+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
53048+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
53049+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
53050+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
53051+ pax_close_kernel();
53052
53053 cuse_class = class_create(THIS_MODULE, "cuse");
53054 if (IS_ERR(cuse_class))
53055diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
53056index e83351a..41e3c9c 100644
53057--- a/fs/fuse/dev.c
53058+++ b/fs/fuse/dev.c
53059@@ -1236,7 +1236,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
53060 ret = 0;
53061 pipe_lock(pipe);
53062
53063- if (!pipe->readers) {
53064+ if (!atomic_read(&pipe->readers)) {
53065 send_sig(SIGPIPE, current, 0);
53066 if (!ret)
53067 ret = -EPIPE;
53068diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
53069index 315e1f8..91f890c 100644
53070--- a/fs/fuse/dir.c
53071+++ b/fs/fuse/dir.c
53072@@ -1233,7 +1233,7 @@ static char *read_link(struct dentry *dentry)
53073 return link;
53074 }
53075
53076-static void free_link(char *link)
53077+static void free_link(const char *link)
53078 {
53079 if (!IS_ERR(link))
53080 free_page((unsigned long) link);
53081diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
53082index 2b6f569..fcb4d1f 100644
53083--- a/fs/gfs2/inode.c
53084+++ b/fs/gfs2/inode.c
53085@@ -1499,7 +1499,7 @@ out:
53086
53087 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
53088 {
53089- char *s = nd_get_link(nd);
53090+ const char *s = nd_get_link(nd);
53091 if (!IS_ERR(s))
53092 kfree(s);
53093 }
53094diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
53095index 78bde32..767e906 100644
53096--- a/fs/hugetlbfs/inode.c
53097+++ b/fs/hugetlbfs/inode.c
53098@@ -152,6 +152,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
53099 struct mm_struct *mm = current->mm;
53100 struct vm_area_struct *vma;
53101 struct hstate *h = hstate_file(file);
53102+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
53103 struct vm_unmapped_area_info info;
53104
53105 if (len & ~huge_page_mask(h))
53106@@ -165,17 +166,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
53107 return addr;
53108 }
53109
53110+#ifdef CONFIG_PAX_RANDMMAP
53111+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
53112+#endif
53113+
53114 if (addr) {
53115 addr = ALIGN(addr, huge_page_size(h));
53116 vma = find_vma(mm, addr);
53117- if (TASK_SIZE - len >= addr &&
53118- (!vma || addr + len <= vma->vm_start))
53119+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
53120 return addr;
53121 }
53122
53123 info.flags = 0;
53124 info.length = len;
53125 info.low_limit = TASK_UNMAPPED_BASE;
53126+
53127+#ifdef CONFIG_PAX_RANDMMAP
53128+ if (mm->pax_flags & MF_PAX_RANDMMAP)
53129+ info.low_limit += mm->delta_mmap;
53130+#endif
53131+
53132 info.high_limit = TASK_SIZE;
53133 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
53134 info.align_offset = 0;
53135@@ -897,7 +907,7 @@ static struct file_system_type hugetlbfs_fs_type = {
53136 .kill_sb = kill_litter_super,
53137 };
53138
53139-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
53140+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
53141
53142 static int can_do_hugetlb_shm(void)
53143 {
53144diff --git a/fs/inode.c b/fs/inode.c
53145index 14084b7..29af1d9 100644
53146--- a/fs/inode.c
53147+++ b/fs/inode.c
53148@@ -880,8 +880,8 @@ unsigned int get_next_ino(void)
53149
53150 #ifdef CONFIG_SMP
53151 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
53152- static atomic_t shared_last_ino;
53153- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
53154+ static atomic_unchecked_t shared_last_ino;
53155+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
53156
53157 res = next - LAST_INO_BATCH;
53158 }
53159diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
53160index 4a6cf28..d3a29d3 100644
53161--- a/fs/jffs2/erase.c
53162+++ b/fs/jffs2/erase.c
53163@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
53164 struct jffs2_unknown_node marker = {
53165 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
53166 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
53167- .totlen = cpu_to_je32(c->cleanmarker_size)
53168+ .totlen = cpu_to_je32(c->cleanmarker_size),
53169+ .hdr_crc = cpu_to_je32(0)
53170 };
53171
53172 jffs2_prealloc_raw_node_refs(c, jeb, 1);
53173diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
53174index a6597d6..41b30ec 100644
53175--- a/fs/jffs2/wbuf.c
53176+++ b/fs/jffs2/wbuf.c
53177@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
53178 {
53179 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
53180 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
53181- .totlen = constant_cpu_to_je32(8)
53182+ .totlen = constant_cpu_to_je32(8),
53183+ .hdr_crc = constant_cpu_to_je32(0)
53184 };
53185
53186 /*
53187diff --git a/fs/jfs/super.c b/fs/jfs/super.c
53188index 1a543be..a4e1363 100644
53189--- a/fs/jfs/super.c
53190+++ b/fs/jfs/super.c
53191@@ -225,7 +225,7 @@ static const match_table_t tokens = {
53192 static int parse_options(char *options, struct super_block *sb, s64 *newLVSize,
53193 int *flag)
53194 {
53195- void *nls_map = (void *)-1; /* -1: no change; NULL: none */
53196+ const void *nls_map = (const void *)-1; /* -1: no change; NULL: none */
53197 char *p;
53198 struct jfs_sb_info *sbi = JFS_SBI(sb);
53199
53200@@ -253,7 +253,7 @@ static int parse_options(char *options, struct super_block *sb, s64 *newLVSize,
53201 /* Don't do anything ;-) */
53202 break;
53203 case Opt_iocharset:
53204- if (nls_map && nls_map != (void *) -1)
53205+ if (nls_map && nls_map != (const void *) -1)
53206 unload_nls(nls_map);
53207 if (!strcmp(args[0].from, "none"))
53208 nls_map = NULL;
53209@@ -855,7 +855,7 @@ static int __init init_jfs_fs(void)
53210
53211 jfs_inode_cachep =
53212 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
53213- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
53214+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
53215 init_once);
53216 if (jfs_inode_cachep == NULL)
53217 return -ENOMEM;
53218diff --git a/fs/libfs.c b/fs/libfs.c
53219index 916da8c..1588998 100644
53220--- a/fs/libfs.c
53221+++ b/fs/libfs.c
53222@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
53223
53224 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
53225 struct dentry *next;
53226+ char d_name[sizeof(next->d_iname)];
53227+ const unsigned char *name;
53228+
53229 next = list_entry(p, struct dentry, d_u.d_child);
53230 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
53231 if (!simple_positive(next)) {
53232@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
53233
53234 spin_unlock(&next->d_lock);
53235 spin_unlock(&dentry->d_lock);
53236- if (filldir(dirent, next->d_name.name,
53237+ name = next->d_name.name;
53238+ if (name == next->d_iname) {
53239+ memcpy(d_name, name, next->d_name.len);
53240+ name = d_name;
53241+ }
53242+ if (filldir(dirent, name,
53243 next->d_name.len, filp->f_pos,
53244 next->d_inode->i_ino,
53245 dt_type(next->d_inode)) < 0)
53246diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
53247index 52e5120..808936e 100644
53248--- a/fs/lockd/clntproc.c
53249+++ b/fs/lockd/clntproc.c
53250@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
53251 /*
53252 * Cookie counter for NLM requests
53253 */
53254-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
53255+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
53256
53257 void nlmclnt_next_cookie(struct nlm_cookie *c)
53258 {
53259- u32 cookie = atomic_inc_return(&nlm_cookie);
53260+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
53261
53262 memcpy(c->data, &cookie, 4);
53263 c->len=4;
53264diff --git a/fs/locks.c b/fs/locks.c
53265index a94e331..060bce3 100644
53266--- a/fs/locks.c
53267+++ b/fs/locks.c
53268@@ -2064,16 +2064,16 @@ void locks_remove_flock(struct file *filp)
53269 return;
53270
53271 if (filp->f_op && filp->f_op->flock) {
53272- struct file_lock fl = {
53273+ struct file_lock flock = {
53274 .fl_pid = current->tgid,
53275 .fl_file = filp,
53276 .fl_flags = FL_FLOCK,
53277 .fl_type = F_UNLCK,
53278 .fl_end = OFFSET_MAX,
53279 };
53280- filp->f_op->flock(filp, F_SETLKW, &fl);
53281- if (fl.fl_ops && fl.fl_ops->fl_release_private)
53282- fl.fl_ops->fl_release_private(&fl);
53283+ filp->f_op->flock(filp, F_SETLKW, &flock);
53284+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
53285+ flock.fl_ops->fl_release_private(&flock);
53286 }
53287
53288 lock_flocks();
53289diff --git a/fs/namei.c b/fs/namei.c
53290index ec97aef..e67718d 100644
53291--- a/fs/namei.c
53292+++ b/fs/namei.c
53293@@ -319,16 +319,32 @@ int generic_permission(struct inode *inode, int mask)
53294 if (ret != -EACCES)
53295 return ret;
53296
53297+#ifdef CONFIG_GRKERNSEC
53298+ /* we'll block if we have to log due to a denied capability use */
53299+ if (mask & MAY_NOT_BLOCK)
53300+ return -ECHILD;
53301+#endif
53302+
53303 if (S_ISDIR(inode->i_mode)) {
53304 /* DACs are overridable for directories */
53305- if (inode_capable(inode, CAP_DAC_OVERRIDE))
53306- return 0;
53307 if (!(mask & MAY_WRITE))
53308- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
53309+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
53310+ inode_capable(inode, CAP_DAC_READ_SEARCH))
53311 return 0;
53312+ if (inode_capable(inode, CAP_DAC_OVERRIDE))
53313+ return 0;
53314 return -EACCES;
53315 }
53316 /*
53317+ * Searching includes executable on directories, else just read.
53318+ */
53319+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
53320+ if (mask == MAY_READ)
53321+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
53322+ inode_capable(inode, CAP_DAC_READ_SEARCH))
53323+ return 0;
53324+
53325+ /*
53326 * Read/write DACs are always overridable.
53327 * Executable DACs are overridable when there is
53328 * at least one exec bit set.
53329@@ -337,14 +353,6 @@ int generic_permission(struct inode *inode, int mask)
53330 if (inode_capable(inode, CAP_DAC_OVERRIDE))
53331 return 0;
53332
53333- /*
53334- * Searching includes executable on directories, else just read.
53335- */
53336- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
53337- if (mask == MAY_READ)
53338- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
53339- return 0;
53340-
53341 return -EACCES;
53342 }
53343
53344@@ -824,7 +832,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
53345 {
53346 struct dentry *dentry = link->dentry;
53347 int error;
53348- char *s;
53349+ const char *s;
53350
53351 BUG_ON(nd->flags & LOOKUP_RCU);
53352
53353@@ -845,6 +853,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
53354 if (error)
53355 goto out_put_nd_path;
53356
53357+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
53358+ dentry->d_inode, dentry, nd->path.mnt)) {
53359+ error = -EACCES;
53360+ goto out_put_nd_path;
53361+ }
53362+
53363 nd->last_type = LAST_BIND;
53364 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
53365 error = PTR_ERR(*p);
53366@@ -1594,6 +1608,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
53367 break;
53368 res = walk_component(nd, path, &nd->last,
53369 nd->last_type, LOOKUP_FOLLOW);
53370+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
53371+ res = -EACCES;
53372 put_link(nd, &link, cookie);
53373 } while (res > 0);
53374
53375@@ -1692,7 +1708,7 @@ EXPORT_SYMBOL(full_name_hash);
53376 static inline unsigned long hash_name(const char *name, unsigned int *hashp)
53377 {
53378 unsigned long a, b, adata, bdata, mask, hash, len;
53379- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
53380+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
53381
53382 hash = a = 0;
53383 len = -sizeof(unsigned long);
53384@@ -1977,6 +1993,8 @@ static int path_lookupat(int dfd, const char *name,
53385 if (err)
53386 break;
53387 err = lookup_last(nd, &path);
53388+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
53389+ err = -EACCES;
53390 put_link(nd, &link, cookie);
53391 }
53392 }
53393@@ -1984,6 +2002,13 @@ static int path_lookupat(int dfd, const char *name,
53394 if (!err)
53395 err = complete_walk(nd);
53396
53397+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
53398+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
53399+ path_put(&nd->path);
53400+ err = -ENOENT;
53401+ }
53402+ }
53403+
53404 if (!err && nd->flags & LOOKUP_DIRECTORY) {
53405 if (!nd->inode->i_op->lookup) {
53406 path_put(&nd->path);
53407@@ -2011,8 +2036,15 @@ static int filename_lookup(int dfd, struct filename *name,
53408 retval = path_lookupat(dfd, name->name,
53409 flags | LOOKUP_REVAL, nd);
53410
53411- if (likely(!retval))
53412+ if (likely(!retval)) {
53413 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
53414+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
53415+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
53416+ path_put(&nd->path);
53417+ return -ENOENT;
53418+ }
53419+ }
53420+ }
53421 return retval;
53422 }
53423
53424@@ -2390,6 +2422,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
53425 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
53426 return -EPERM;
53427
53428+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
53429+ return -EPERM;
53430+ if (gr_handle_rawio(inode))
53431+ return -EPERM;
53432+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
53433+ return -EACCES;
53434+
53435 return 0;
53436 }
53437
53438@@ -2611,7 +2650,7 @@ looked_up:
53439 * cleared otherwise prior to returning.
53440 */
53441 static int lookup_open(struct nameidata *nd, struct path *path,
53442- struct file *file,
53443+ struct path *link, struct file *file,
53444 const struct open_flags *op,
53445 bool got_write, int *opened)
53446 {
53447@@ -2646,6 +2685,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
53448 /* Negative dentry, just create the file */
53449 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
53450 umode_t mode = op->mode;
53451+
53452+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
53453+ error = -EACCES;
53454+ goto out_dput;
53455+ }
53456+
53457+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
53458+ error = -EACCES;
53459+ goto out_dput;
53460+ }
53461+
53462 if (!IS_POSIXACL(dir->d_inode))
53463 mode &= ~current_umask();
53464 /*
53465@@ -2667,6 +2717,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
53466 nd->flags & LOOKUP_EXCL);
53467 if (error)
53468 goto out_dput;
53469+ else
53470+ gr_handle_create(dentry, nd->path.mnt);
53471 }
53472 out_no_open:
53473 path->dentry = dentry;
53474@@ -2681,7 +2733,7 @@ out_dput:
53475 /*
53476 * Handle the last step of open()
53477 */
53478-static int do_last(struct nameidata *nd, struct path *path,
53479+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
53480 struct file *file, const struct open_flags *op,
53481 int *opened, struct filename *name)
53482 {
53483@@ -2710,16 +2762,32 @@ static int do_last(struct nameidata *nd, struct path *path,
53484 error = complete_walk(nd);
53485 if (error)
53486 return error;
53487+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
53488+ error = -ENOENT;
53489+ goto out;
53490+ }
53491 audit_inode(name, nd->path.dentry, 0);
53492 if (open_flag & O_CREAT) {
53493 error = -EISDIR;
53494 goto out;
53495 }
53496+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
53497+ error = -EACCES;
53498+ goto out;
53499+ }
53500 goto finish_open;
53501 case LAST_BIND:
53502 error = complete_walk(nd);
53503 if (error)
53504 return error;
53505+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
53506+ error = -ENOENT;
53507+ goto out;
53508+ }
53509+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
53510+ error = -EACCES;
53511+ goto out;
53512+ }
53513 audit_inode(name, dir, 0);
53514 goto finish_open;
53515 }
53516@@ -2768,7 +2836,7 @@ retry_lookup:
53517 */
53518 }
53519 mutex_lock(&dir->d_inode->i_mutex);
53520- error = lookup_open(nd, path, file, op, got_write, opened);
53521+ error = lookup_open(nd, path, link, file, op, got_write, opened);
53522 mutex_unlock(&dir->d_inode->i_mutex);
53523
53524 if (error <= 0) {
53525@@ -2792,11 +2860,28 @@ retry_lookup:
53526 goto finish_open_created;
53527 }
53528
53529+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
53530+ error = -ENOENT;
53531+ goto exit_dput;
53532+ }
53533+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
53534+ error = -EACCES;
53535+ goto exit_dput;
53536+ }
53537+
53538 /*
53539 * create/update audit record if it already exists.
53540 */
53541- if (path->dentry->d_inode)
53542+ if (path->dentry->d_inode) {
53543+ /* only check if O_CREAT is specified, all other checks need to go
53544+ into may_open */
53545+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
53546+ error = -EACCES;
53547+ goto exit_dput;
53548+ }
53549+
53550 audit_inode(name, path->dentry, 0);
53551+ }
53552
53553 /*
53554 * If atomic_open() acquired write access it is dropped now due to
53555@@ -2837,6 +2922,11 @@ finish_lookup:
53556 }
53557 }
53558 BUG_ON(inode != path->dentry->d_inode);
53559+ /* if we're resolving a symlink to another symlink */
53560+ if (link && gr_handle_symlink_owner(link, inode)) {
53561+ error = -EACCES;
53562+ goto out;
53563+ }
53564 return 1;
53565 }
53566
53567@@ -2846,7 +2936,6 @@ finish_lookup:
53568 save_parent.dentry = nd->path.dentry;
53569 save_parent.mnt = mntget(path->mnt);
53570 nd->path.dentry = path->dentry;
53571-
53572 }
53573 nd->inode = inode;
53574 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
53575@@ -2855,6 +2944,16 @@ finish_lookup:
53576 path_put(&save_parent);
53577 return error;
53578 }
53579+
53580+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
53581+ error = -ENOENT;
53582+ goto out;
53583+ }
53584+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
53585+ error = -EACCES;
53586+ goto out;
53587+ }
53588+
53589 error = -EISDIR;
53590 if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode))
53591 goto out;
53592@@ -2953,7 +3052,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
53593 if (unlikely(error))
53594 goto out;
53595
53596- error = do_last(nd, &path, file, op, &opened, pathname);
53597+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
53598 while (unlikely(error > 0)) { /* trailing symlink */
53599 struct path link = path;
53600 void *cookie;
53601@@ -2971,7 +3070,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
53602 error = follow_link(&link, nd, &cookie);
53603 if (unlikely(error))
53604 break;
53605- error = do_last(nd, &path, file, op, &opened, pathname);
53606+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
53607 put_link(nd, &link, cookie);
53608 }
53609 out:
53610@@ -3071,8 +3170,12 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
53611 goto unlock;
53612
53613 error = -EEXIST;
53614- if (dentry->d_inode)
53615+ if (dentry->d_inode) {
53616+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
53617+ error = -ENOENT;
53618+ }
53619 goto fail;
53620+ }
53621 /*
53622 * Special case - lookup gave negative, but... we had foo/bar/
53623 * From the vfs_mknod() POV we just have a negative dentry -
53624@@ -3124,6 +3227,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
53625 }
53626 EXPORT_SYMBOL(user_path_create);
53627
53628+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
53629+{
53630+ struct filename *tmp = getname(pathname);
53631+ struct dentry *res;
53632+ if (IS_ERR(tmp))
53633+ return ERR_CAST(tmp);
53634+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
53635+ if (IS_ERR(res))
53636+ putname(tmp);
53637+ else
53638+ *to = tmp;
53639+ return res;
53640+}
53641+
53642 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
53643 {
53644 int error = may_create(dir, dentry);
53645@@ -3186,6 +3303,17 @@ retry:
53646
53647 if (!IS_POSIXACL(path.dentry->d_inode))
53648 mode &= ~current_umask();
53649+
53650+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
53651+ error = -EPERM;
53652+ goto out;
53653+ }
53654+
53655+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
53656+ error = -EACCES;
53657+ goto out;
53658+ }
53659+
53660 error = security_path_mknod(&path, dentry, mode, dev);
53661 if (error)
53662 goto out;
53663@@ -3202,6 +3330,8 @@ retry:
53664 break;
53665 }
53666 out:
53667+ if (!error)
53668+ gr_handle_create(dentry, path.mnt);
53669 done_path_create(&path, dentry);
53670 if (retry_estale(error, lookup_flags)) {
53671 lookup_flags |= LOOKUP_REVAL;
53672@@ -3254,9 +3384,16 @@ retry:
53673
53674 if (!IS_POSIXACL(path.dentry->d_inode))
53675 mode &= ~current_umask();
53676+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
53677+ error = -EACCES;
53678+ goto out;
53679+ }
53680 error = security_path_mkdir(&path, dentry, mode);
53681 if (!error)
53682 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
53683+ if (!error)
53684+ gr_handle_create(dentry, path.mnt);
53685+out:
53686 done_path_create(&path, dentry);
53687 if (retry_estale(error, lookup_flags)) {
53688 lookup_flags |= LOOKUP_REVAL;
53689@@ -3337,6 +3474,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
53690 struct filename *name;
53691 struct dentry *dentry;
53692 struct nameidata nd;
53693+ ino_t saved_ino = 0;
53694+ dev_t saved_dev = 0;
53695 unsigned int lookup_flags = 0;
53696 retry:
53697 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
53698@@ -3369,10 +3508,21 @@ retry:
53699 error = -ENOENT;
53700 goto exit3;
53701 }
53702+
53703+ saved_ino = dentry->d_inode->i_ino;
53704+ saved_dev = gr_get_dev_from_dentry(dentry);
53705+
53706+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
53707+ error = -EACCES;
53708+ goto exit3;
53709+ }
53710+
53711 error = security_path_rmdir(&nd.path, dentry);
53712 if (error)
53713 goto exit3;
53714 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
53715+ if (!error && (saved_dev || saved_ino))
53716+ gr_handle_delete(saved_ino, saved_dev);
53717 exit3:
53718 dput(dentry);
53719 exit2:
53720@@ -3438,6 +3588,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
53721 struct dentry *dentry;
53722 struct nameidata nd;
53723 struct inode *inode = NULL;
53724+ ino_t saved_ino = 0;
53725+ dev_t saved_dev = 0;
53726 unsigned int lookup_flags = 0;
53727 retry:
53728 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
53729@@ -3464,10 +3616,22 @@ retry:
53730 if (!inode)
53731 goto slashes;
53732 ihold(inode);
53733+
53734+ if (inode->i_nlink <= 1) {
53735+ saved_ino = inode->i_ino;
53736+ saved_dev = gr_get_dev_from_dentry(dentry);
53737+ }
53738+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
53739+ error = -EACCES;
53740+ goto exit2;
53741+ }
53742+
53743 error = security_path_unlink(&nd.path, dentry);
53744 if (error)
53745 goto exit2;
53746 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
53747+ if (!error && (saved_ino || saved_dev))
53748+ gr_handle_delete(saved_ino, saved_dev);
53749 exit2:
53750 dput(dentry);
53751 }
53752@@ -3545,9 +3709,17 @@ retry:
53753 if (IS_ERR(dentry))
53754 goto out_putname;
53755
53756+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
53757+ error = -EACCES;
53758+ goto out;
53759+ }
53760+
53761 error = security_path_symlink(&path, dentry, from->name);
53762 if (!error)
53763 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
53764+ if (!error)
53765+ gr_handle_create(dentry, path.mnt);
53766+out:
53767 done_path_create(&path, dentry);
53768 if (retry_estale(error, lookup_flags)) {
53769 lookup_flags |= LOOKUP_REVAL;
53770@@ -3621,6 +3793,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
53771 {
53772 struct dentry *new_dentry;
53773 struct path old_path, new_path;
53774+ struct filename *to = NULL;
53775 int how = 0;
53776 int error;
53777
53778@@ -3644,7 +3817,7 @@ retry:
53779 if (error)
53780 return error;
53781
53782- new_dentry = user_path_create(newdfd, newname, &new_path,
53783+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
53784 (how & LOOKUP_REVAL));
53785 error = PTR_ERR(new_dentry);
53786 if (IS_ERR(new_dentry))
53787@@ -3656,11 +3829,28 @@ retry:
53788 error = may_linkat(&old_path);
53789 if (unlikely(error))
53790 goto out_dput;
53791+
53792+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
53793+ old_path.dentry->d_inode,
53794+ old_path.dentry->d_inode->i_mode, to)) {
53795+ error = -EACCES;
53796+ goto out_dput;
53797+ }
53798+
53799+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
53800+ old_path.dentry, old_path.mnt, to)) {
53801+ error = -EACCES;
53802+ goto out_dput;
53803+ }
53804+
53805 error = security_path_link(old_path.dentry, &new_path, new_dentry);
53806 if (error)
53807 goto out_dput;
53808 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
53809+ if (!error)
53810+ gr_handle_create(new_dentry, new_path.mnt);
53811 out_dput:
53812+ putname(to);
53813 done_path_create(&new_path, new_dentry);
53814 if (retry_estale(error, how)) {
53815 how |= LOOKUP_REVAL;
53816@@ -3906,12 +4096,21 @@ retry:
53817 if (new_dentry == trap)
53818 goto exit5;
53819
53820+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
53821+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
53822+ to);
53823+ if (error)
53824+ goto exit5;
53825+
53826 error = security_path_rename(&oldnd.path, old_dentry,
53827 &newnd.path, new_dentry);
53828 if (error)
53829 goto exit5;
53830 error = vfs_rename(old_dir->d_inode, old_dentry,
53831 new_dir->d_inode, new_dentry);
53832+ if (!error)
53833+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
53834+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
53835 exit5:
53836 dput(new_dentry);
53837 exit4:
53838@@ -3943,6 +4142,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
53839
53840 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
53841 {
53842+ char tmpbuf[64];
53843+ const char *newlink;
53844 int len;
53845
53846 len = PTR_ERR(link);
53847@@ -3952,7 +4153,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
53848 len = strlen(link);
53849 if (len > (unsigned) buflen)
53850 len = buflen;
53851- if (copy_to_user(buffer, link, len))
53852+
53853+ if (len < sizeof(tmpbuf)) {
53854+ memcpy(tmpbuf, link, len);
53855+ newlink = tmpbuf;
53856+ } else
53857+ newlink = link;
53858+
53859+ if (copy_to_user(buffer, newlink, len))
53860 len = -EFAULT;
53861 out:
53862 return len;
53863diff --git a/fs/namespace.c b/fs/namespace.c
53864index 5dd7709..0002ebe 100644
53865--- a/fs/namespace.c
53866+++ b/fs/namespace.c
53867@@ -1219,6 +1219,9 @@ static int do_umount(struct mount *mnt, int flags)
53868 if (!(sb->s_flags & MS_RDONLY))
53869 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
53870 up_write(&sb->s_umount);
53871+
53872+ gr_log_remount(mnt->mnt_devname, retval);
53873+
53874 return retval;
53875 }
53876
53877@@ -1238,6 +1241,9 @@ static int do_umount(struct mount *mnt, int flags)
53878 br_write_unlock(&vfsmount_lock);
53879 up_write(&namespace_sem);
53880 release_mounts(&umount_list);
53881+
53882+ gr_log_unmount(mnt->mnt_devname, retval);
53883+
53884 return retval;
53885 }
53886
53887@@ -2294,6 +2300,16 @@ long do_mount(const char *dev_name, const char *dir_name,
53888 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
53889 MS_STRICTATIME);
53890
53891+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
53892+ retval = -EPERM;
53893+ goto dput_out;
53894+ }
53895+
53896+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
53897+ retval = -EPERM;
53898+ goto dput_out;
53899+ }
53900+
53901 if (flags & MS_REMOUNT)
53902 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
53903 data_page);
53904@@ -2308,6 +2324,9 @@ long do_mount(const char *dev_name, const char *dir_name,
53905 dev_name, data_page);
53906 dput_out:
53907 path_put(&path);
53908+
53909+ gr_log_mount(dev_name, dir_name, retval);
53910+
53911 return retval;
53912 }
53913
53914@@ -2594,6 +2613,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
53915 if (error)
53916 goto out2;
53917
53918+ if (gr_handle_chroot_pivot()) {
53919+ error = -EPERM;
53920+ goto out2;
53921+ }
53922+
53923 get_fs_root(current->fs, &root);
53924 error = lock_mount(&old);
53925 if (error)
53926@@ -2842,7 +2866,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
53927 !nsown_capable(CAP_SYS_ADMIN))
53928 return -EPERM;
53929
53930- if (fs->users != 1)
53931+ if (atomic_read(&fs->users) != 1)
53932 return -EINVAL;
53933
53934 get_mnt_ns(mnt_ns);
53935diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
53936index 59461c9..b17c57e 100644
53937--- a/fs/nfs/callback_xdr.c
53938+++ b/fs/nfs/callback_xdr.c
53939@@ -51,7 +51,7 @@ struct callback_op {
53940 callback_decode_arg_t decode_args;
53941 callback_encode_res_t encode_res;
53942 long res_maxsize;
53943-};
53944+} __do_const;
53945
53946 static struct callback_op callback_ops[];
53947
53948diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
53949index ebeb94c..ff35337 100644
53950--- a/fs/nfs/inode.c
53951+++ b/fs/nfs/inode.c
53952@@ -1042,16 +1042,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
53953 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
53954 }
53955
53956-static atomic_long_t nfs_attr_generation_counter;
53957+static atomic_long_unchecked_t nfs_attr_generation_counter;
53958
53959 static unsigned long nfs_read_attr_generation_counter(void)
53960 {
53961- return atomic_long_read(&nfs_attr_generation_counter);
53962+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
53963 }
53964
53965 unsigned long nfs_inc_attr_generation_counter(void)
53966 {
53967- return atomic_long_inc_return(&nfs_attr_generation_counter);
53968+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
53969 }
53970
53971 void nfs_fattr_init(struct nfs_fattr *fattr)
53972diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
53973index 9d1c5db..1e13db8 100644
53974--- a/fs/nfsd/nfs4proc.c
53975+++ b/fs/nfsd/nfs4proc.c
53976@@ -1097,7 +1097,7 @@ struct nfsd4_operation {
53977 nfsd4op_rsize op_rsize_bop;
53978 stateid_getter op_get_currentstateid;
53979 stateid_setter op_set_currentstateid;
53980-};
53981+} __do_const;
53982
53983 static struct nfsd4_operation nfsd4_ops[];
53984
53985diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
53986index d1dd710..32ac0e8 100644
53987--- a/fs/nfsd/nfs4xdr.c
53988+++ b/fs/nfsd/nfs4xdr.c
53989@@ -1456,7 +1456,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
53990
53991 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
53992
53993-static nfsd4_dec nfsd4_dec_ops[] = {
53994+static const nfsd4_dec nfsd4_dec_ops[] = {
53995 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
53996 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
53997 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
53998@@ -1496,7 +1496,7 @@ static nfsd4_dec nfsd4_dec_ops[] = {
53999 [OP_RELEASE_LOCKOWNER] = (nfsd4_dec)nfsd4_decode_release_lockowner,
54000 };
54001
54002-static nfsd4_dec nfsd41_dec_ops[] = {
54003+static const nfsd4_dec nfsd41_dec_ops[] = {
54004 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
54005 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
54006 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
54007@@ -1558,7 +1558,7 @@ static nfsd4_dec nfsd41_dec_ops[] = {
54008 };
54009
54010 struct nfsd4_minorversion_ops {
54011- nfsd4_dec *decoders;
54012+ const nfsd4_dec *decoders;
54013 int nops;
54014 };
54015
54016diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
54017index 69c6413..c0408d2 100644
54018--- a/fs/nfsd/vfs.c
54019+++ b/fs/nfsd/vfs.c
54020@@ -939,7 +939,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
54021 } else {
54022 oldfs = get_fs();
54023 set_fs(KERNEL_DS);
54024- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
54025+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
54026 set_fs(oldfs);
54027 }
54028
54029@@ -1026,7 +1026,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
54030
54031 /* Write the data. */
54032 oldfs = get_fs(); set_fs(KERNEL_DS);
54033- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
54034+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
54035 set_fs(oldfs);
54036 if (host_err < 0)
54037 goto out_nfserr;
54038@@ -1572,7 +1572,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
54039 */
54040
54041 oldfs = get_fs(); set_fs(KERNEL_DS);
54042- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
54043+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
54044 set_fs(oldfs);
54045
54046 if (host_err < 0)
54047diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
54048index fea6bd5..8ee9d81 100644
54049--- a/fs/nls/nls_base.c
54050+++ b/fs/nls/nls_base.c
54051@@ -234,20 +234,22 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
54052
54053 int register_nls(struct nls_table * nls)
54054 {
54055- struct nls_table ** tmp = &tables;
54056+ struct nls_table *tmp = tables;
54057
54058 if (nls->next)
54059 return -EBUSY;
54060
54061 spin_lock(&nls_lock);
54062- while (*tmp) {
54063- if (nls == *tmp) {
54064+ while (tmp) {
54065+ if (nls == tmp) {
54066 spin_unlock(&nls_lock);
54067 return -EBUSY;
54068 }
54069- tmp = &(*tmp)->next;
54070+ tmp = tmp->next;
54071 }
54072- nls->next = tables;
54073+ pax_open_kernel();
54074+ *(struct nls_table **)&nls->next = tables;
54075+ pax_close_kernel();
54076 tables = nls;
54077 spin_unlock(&nls_lock);
54078 return 0;
54079@@ -255,12 +257,14 @@ int register_nls(struct nls_table * nls)
54080
54081 int unregister_nls(struct nls_table * nls)
54082 {
54083- struct nls_table ** tmp = &tables;
54084+ struct nls_table * const * tmp = &tables;
54085
54086 spin_lock(&nls_lock);
54087 while (*tmp) {
54088 if (nls == *tmp) {
54089- *tmp = nls->next;
54090+ pax_open_kernel();
54091+ *(struct nls_table **)tmp = nls->next;
54092+ pax_close_kernel();
54093 spin_unlock(&nls_lock);
54094 return 0;
54095 }
54096diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
54097index 7424929..35f6be5 100644
54098--- a/fs/nls/nls_euc-jp.c
54099+++ b/fs/nls/nls_euc-jp.c
54100@@ -561,8 +561,10 @@ static int __init init_nls_euc_jp(void)
54101 p_nls = load_nls("cp932");
54102
54103 if (p_nls) {
54104- table.charset2upper = p_nls->charset2upper;
54105- table.charset2lower = p_nls->charset2lower;
54106+ pax_open_kernel();
54107+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
54108+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
54109+ pax_close_kernel();
54110 return register_nls(&table);
54111 }
54112
54113diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
54114index e7bc1d7..06bd4bb 100644
54115--- a/fs/nls/nls_koi8-ru.c
54116+++ b/fs/nls/nls_koi8-ru.c
54117@@ -63,8 +63,10 @@ static int __init init_nls_koi8_ru(void)
54118 p_nls = load_nls("koi8-u");
54119
54120 if (p_nls) {
54121- table.charset2upper = p_nls->charset2upper;
54122- table.charset2lower = p_nls->charset2lower;
54123+ pax_open_kernel();
54124+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
54125+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
54126+ pax_close_kernel();
54127 return register_nls(&table);
54128 }
54129
54130diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
54131index 9ff4a5e..deb1f0f 100644
54132--- a/fs/notify/fanotify/fanotify_user.c
54133+++ b/fs/notify/fanotify/fanotify_user.c
54134@@ -251,8 +251,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
54135
54136 fd = fanotify_event_metadata.fd;
54137 ret = -EFAULT;
54138- if (copy_to_user(buf, &fanotify_event_metadata,
54139- fanotify_event_metadata.event_len))
54140+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
54141+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
54142 goto out_close_fd;
54143
54144 ret = prepare_for_access_response(group, event, fd);
54145diff --git a/fs/notify/notification.c b/fs/notify/notification.c
54146index 7b51b05..5ea5ef6 100644
54147--- a/fs/notify/notification.c
54148+++ b/fs/notify/notification.c
54149@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
54150 * get set to 0 so it will never get 'freed'
54151 */
54152 static struct fsnotify_event *q_overflow_event;
54153-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
54154+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
54155
54156 /**
54157 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
54158@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
54159 */
54160 u32 fsnotify_get_cookie(void)
54161 {
54162- return atomic_inc_return(&fsnotify_sync_cookie);
54163+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
54164 }
54165 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
54166
54167diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
54168index 99e3610..02c1068 100644
54169--- a/fs/ntfs/dir.c
54170+++ b/fs/ntfs/dir.c
54171@@ -1329,7 +1329,7 @@ find_next_index_buffer:
54172 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
54173 ~(s64)(ndir->itype.index.block_size - 1)));
54174 /* Bounds checks. */
54175- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
54176+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
54177 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
54178 "inode 0x%lx or driver bug.", vdir->i_ino);
54179 goto err_out;
54180diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
54181index 5b2d4f0..c6de396 100644
54182--- a/fs/ntfs/file.c
54183+++ b/fs/ntfs/file.c
54184@@ -2242,6 +2242,6 @@ const struct inode_operations ntfs_file_inode_ops = {
54185 #endif /* NTFS_RW */
54186 };
54187
54188-const struct file_operations ntfs_empty_file_ops = {};
54189+const struct file_operations ntfs_empty_file_ops __read_only;
54190
54191-const struct inode_operations ntfs_empty_inode_ops = {};
54192+const struct inode_operations ntfs_empty_inode_ops __read_only;
54193diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
54194index a9f78c7..ed8a381 100644
54195--- a/fs/ocfs2/localalloc.c
54196+++ b/fs/ocfs2/localalloc.c
54197@@ -1279,7 +1279,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
54198 goto bail;
54199 }
54200
54201- atomic_inc(&osb->alloc_stats.moves);
54202+ atomic_inc_unchecked(&osb->alloc_stats.moves);
54203
54204 bail:
54205 if (handle)
54206diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
54207index d355e6e..578d905 100644
54208--- a/fs/ocfs2/ocfs2.h
54209+++ b/fs/ocfs2/ocfs2.h
54210@@ -235,11 +235,11 @@ enum ocfs2_vol_state
54211
54212 struct ocfs2_alloc_stats
54213 {
54214- atomic_t moves;
54215- atomic_t local_data;
54216- atomic_t bitmap_data;
54217- atomic_t bg_allocs;
54218- atomic_t bg_extends;
54219+ atomic_unchecked_t moves;
54220+ atomic_unchecked_t local_data;
54221+ atomic_unchecked_t bitmap_data;
54222+ atomic_unchecked_t bg_allocs;
54223+ atomic_unchecked_t bg_extends;
54224 };
54225
54226 enum ocfs2_local_alloc_state
54227diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
54228index b7e74b5..19c6536 100644
54229--- a/fs/ocfs2/suballoc.c
54230+++ b/fs/ocfs2/suballoc.c
54231@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
54232 mlog_errno(status);
54233 goto bail;
54234 }
54235- atomic_inc(&osb->alloc_stats.bg_extends);
54236+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
54237
54238 /* You should never ask for this much metadata */
54239 BUG_ON(bits_wanted >
54240@@ -2007,7 +2007,7 @@ int ocfs2_claim_metadata(handle_t *handle,
54241 mlog_errno(status);
54242 goto bail;
54243 }
54244- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
54245+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
54246
54247 *suballoc_loc = res.sr_bg_blkno;
54248 *suballoc_bit_start = res.sr_bit_offset;
54249@@ -2171,7 +2171,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
54250 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
54251 res->sr_bits);
54252
54253- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
54254+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
54255
54256 BUG_ON(res->sr_bits != 1);
54257
54258@@ -2213,7 +2213,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
54259 mlog_errno(status);
54260 goto bail;
54261 }
54262- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
54263+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
54264
54265 BUG_ON(res.sr_bits != 1);
54266
54267@@ -2317,7 +2317,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
54268 cluster_start,
54269 num_clusters);
54270 if (!status)
54271- atomic_inc(&osb->alloc_stats.local_data);
54272+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
54273 } else {
54274 if (min_clusters > (osb->bitmap_cpg - 1)) {
54275 /* The only paths asking for contiguousness
54276@@ -2343,7 +2343,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
54277 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
54278 res.sr_bg_blkno,
54279 res.sr_bit_offset);
54280- atomic_inc(&osb->alloc_stats.bitmap_data);
54281+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
54282 *num_clusters = res.sr_bits;
54283 }
54284 }
54285diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
54286index 0e91ec2..f4b3fc6 100644
54287--- a/fs/ocfs2/super.c
54288+++ b/fs/ocfs2/super.c
54289@@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
54290 "%10s => GlobalAllocs: %d LocalAllocs: %d "
54291 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
54292 "Stats",
54293- atomic_read(&osb->alloc_stats.bitmap_data),
54294- atomic_read(&osb->alloc_stats.local_data),
54295- atomic_read(&osb->alloc_stats.bg_allocs),
54296- atomic_read(&osb->alloc_stats.moves),
54297- atomic_read(&osb->alloc_stats.bg_extends));
54298+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
54299+ atomic_read_unchecked(&osb->alloc_stats.local_data),
54300+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
54301+ atomic_read_unchecked(&osb->alloc_stats.moves),
54302+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
54303
54304 out += snprintf(buf + out, len - out,
54305 "%10s => State: %u Descriptor: %llu Size: %u bits "
54306@@ -2121,11 +2121,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
54307 spin_lock_init(&osb->osb_xattr_lock);
54308 ocfs2_init_steal_slots(osb);
54309
54310- atomic_set(&osb->alloc_stats.moves, 0);
54311- atomic_set(&osb->alloc_stats.local_data, 0);
54312- atomic_set(&osb->alloc_stats.bitmap_data, 0);
54313- atomic_set(&osb->alloc_stats.bg_allocs, 0);
54314- atomic_set(&osb->alloc_stats.bg_extends, 0);
54315+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
54316+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
54317+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
54318+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
54319+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
54320
54321 /* Copy the blockcheck stats from the superblock probe */
54322 osb->osb_ecc_stats = *stats;
54323diff --git a/fs/open.c b/fs/open.c
54324index 9b33c0c..2ffcca2 100644
54325--- a/fs/open.c
54326+++ b/fs/open.c
54327@@ -31,6 +31,8 @@
54328 #include <linux/ima.h>
54329 #include <linux/dnotify.h>
54330
54331+#define CREATE_TRACE_POINTS
54332+#include <trace/events/fs.h>
54333 #include "internal.h"
54334
54335 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
54336@@ -101,6 +103,8 @@ long vfs_truncate(struct path *path, loff_t length)
54337 error = locks_verify_truncate(inode, NULL, length);
54338 if (!error)
54339 error = security_path_truncate(path);
54340+ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
54341+ error = -EACCES;
54342 if (!error)
54343 error = do_truncate(path->dentry, length, 0, NULL);
54344
54345@@ -178,6 +182,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
54346 error = locks_verify_truncate(inode, f.file, length);
54347 if (!error)
54348 error = security_path_truncate(&f.file->f_path);
54349+ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
54350+ error = -EACCES;
54351 if (!error)
54352 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
54353 sb_end_write(inode->i_sb);
54354@@ -373,6 +379,9 @@ retry:
54355 if (__mnt_is_readonly(path.mnt))
54356 res = -EROFS;
54357
54358+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
54359+ res = -EACCES;
54360+
54361 out_path_release:
54362 path_put(&path);
54363 if (retry_estale(res, lookup_flags)) {
54364@@ -404,6 +413,8 @@ retry:
54365 if (error)
54366 goto dput_and_out;
54367
54368+ gr_log_chdir(path.dentry, path.mnt);
54369+
54370 set_fs_pwd(current->fs, &path);
54371
54372 dput_and_out:
54373@@ -433,6 +444,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
54374 goto out_putf;
54375
54376 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
54377+
54378+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
54379+ error = -EPERM;
54380+
54381+ if (!error)
54382+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
54383+
54384 if (!error)
54385 set_fs_pwd(current->fs, &f.file->f_path);
54386 out_putf:
54387@@ -462,7 +480,13 @@ retry:
54388 if (error)
54389 goto dput_and_out;
54390
54391+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
54392+ goto dput_and_out;
54393+
54394 set_fs_root(current->fs, &path);
54395+
54396+ gr_handle_chroot_chdir(&path);
54397+
54398 error = 0;
54399 dput_and_out:
54400 path_put(&path);
54401@@ -484,6 +508,16 @@ static int chmod_common(struct path *path, umode_t mode)
54402 if (error)
54403 return error;
54404 mutex_lock(&inode->i_mutex);
54405+
54406+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
54407+ error = -EACCES;
54408+ goto out_unlock;
54409+ }
54410+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
54411+ error = -EACCES;
54412+ goto out_unlock;
54413+ }
54414+
54415 error = security_path_chmod(path, mode);
54416 if (error)
54417 goto out_unlock;
54418@@ -544,6 +578,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
54419 uid = make_kuid(current_user_ns(), user);
54420 gid = make_kgid(current_user_ns(), group);
54421
54422+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
54423+ return -EACCES;
54424+
54425 newattrs.ia_valid = ATTR_CTIME;
54426 if (user != (uid_t) -1) {
54427 if (!uid_valid(uid))
54428@@ -960,6 +997,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
54429 } else {
54430 fsnotify_open(f);
54431 fd_install(fd, f);
54432+ trace_do_sys_open(tmp->name, flags, mode);
54433 }
54434 }
54435 putname(tmp);
54436diff --git a/fs/pipe.c b/fs/pipe.c
54437index 8e2e73f..1ef1048 100644
54438--- a/fs/pipe.c
54439+++ b/fs/pipe.c
54440@@ -438,9 +438,9 @@ redo:
54441 }
54442 if (bufs) /* More to do? */
54443 continue;
54444- if (!pipe->writers)
54445+ if (!atomic_read(&pipe->writers))
54446 break;
54447- if (!pipe->waiting_writers) {
54448+ if (!atomic_read(&pipe->waiting_writers)) {
54449 /* syscall merging: Usually we must not sleep
54450 * if O_NONBLOCK is set, or if we got some data.
54451 * But if a writer sleeps in kernel space, then
54452@@ -504,7 +504,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
54453 mutex_lock(&inode->i_mutex);
54454 pipe = inode->i_pipe;
54455
54456- if (!pipe->readers) {
54457+ if (!atomic_read(&pipe->readers)) {
54458 send_sig(SIGPIPE, current, 0);
54459 ret = -EPIPE;
54460 goto out;
54461@@ -553,7 +553,7 @@ redo1:
54462 for (;;) {
54463 int bufs;
54464
54465- if (!pipe->readers) {
54466+ if (!atomic_read(&pipe->readers)) {
54467 send_sig(SIGPIPE, current, 0);
54468 if (!ret)
54469 ret = -EPIPE;
54470@@ -644,9 +644,9 @@ redo2:
54471 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
54472 do_wakeup = 0;
54473 }
54474- pipe->waiting_writers++;
54475+ atomic_inc(&pipe->waiting_writers);
54476 pipe_wait(pipe);
54477- pipe->waiting_writers--;
54478+ atomic_dec(&pipe->waiting_writers);
54479 }
54480 out:
54481 mutex_unlock(&inode->i_mutex);
54482@@ -716,7 +716,7 @@ pipe_poll(struct file *filp, poll_table *wait)
54483 mask = 0;
54484 if (filp->f_mode & FMODE_READ) {
54485 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
54486- if (!pipe->writers && filp->f_version != pipe->w_counter)
54487+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
54488 mask |= POLLHUP;
54489 }
54490
54491@@ -726,7 +726,7 @@ pipe_poll(struct file *filp, poll_table *wait)
54492 * Most Unices do not set POLLERR for FIFOs but on Linux they
54493 * behave exactly like pipes for poll().
54494 */
54495- if (!pipe->readers)
54496+ if (!atomic_read(&pipe->readers))
54497 mask |= POLLERR;
54498 }
54499
54500@@ -740,10 +740,10 @@ pipe_release(struct inode *inode, int decr, int decw)
54501
54502 mutex_lock(&inode->i_mutex);
54503 pipe = inode->i_pipe;
54504- pipe->readers -= decr;
54505- pipe->writers -= decw;
54506+ atomic_sub(decr, &pipe->readers);
54507+ atomic_sub(decw, &pipe->writers);
54508
54509- if (!pipe->readers && !pipe->writers) {
54510+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
54511 free_pipe_info(inode);
54512 } else {
54513 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
54514@@ -833,7 +833,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
54515
54516 if (inode->i_pipe) {
54517 ret = 0;
54518- inode->i_pipe->readers++;
54519+ atomic_inc(&inode->i_pipe->readers);
54520 }
54521
54522 mutex_unlock(&inode->i_mutex);
54523@@ -850,7 +850,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
54524
54525 if (inode->i_pipe) {
54526 ret = 0;
54527- inode->i_pipe->writers++;
54528+ atomic_inc(&inode->i_pipe->writers);
54529 }
54530
54531 mutex_unlock(&inode->i_mutex);
54532@@ -871,9 +871,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
54533 if (inode->i_pipe) {
54534 ret = 0;
54535 if (filp->f_mode & FMODE_READ)
54536- inode->i_pipe->readers++;
54537+ atomic_inc(&inode->i_pipe->readers);
54538 if (filp->f_mode & FMODE_WRITE)
54539- inode->i_pipe->writers++;
54540+ atomic_inc(&inode->i_pipe->writers);
54541 }
54542
54543 mutex_unlock(&inode->i_mutex);
54544@@ -965,7 +965,7 @@ void free_pipe_info(struct inode *inode)
54545 inode->i_pipe = NULL;
54546 }
54547
54548-static struct vfsmount *pipe_mnt __read_mostly;
54549+struct vfsmount *pipe_mnt __read_mostly;
54550
54551 /*
54552 * pipefs_dname() is called from d_path().
54553@@ -995,7 +995,8 @@ static struct inode * get_pipe_inode(void)
54554 goto fail_iput;
54555 inode->i_pipe = pipe;
54556
54557- pipe->readers = pipe->writers = 1;
54558+ atomic_set(&pipe->readers, 1);
54559+ atomic_set(&pipe->writers, 1);
54560 inode->i_fop = &rdwr_pipefifo_fops;
54561
54562 /*
54563diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
54564index 15af622..0e9f4467 100644
54565--- a/fs/proc/Kconfig
54566+++ b/fs/proc/Kconfig
54567@@ -30,12 +30,12 @@ config PROC_FS
54568
54569 config PROC_KCORE
54570 bool "/proc/kcore support" if !ARM
54571- depends on PROC_FS && MMU
54572+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
54573
54574 config PROC_VMCORE
54575 bool "/proc/vmcore support"
54576- depends on PROC_FS && CRASH_DUMP
54577- default y
54578+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
54579+ default n
54580 help
54581 Exports the dump image of crashed kernel in ELF format.
54582
54583@@ -59,8 +59,8 @@ config PROC_SYSCTL
54584 limited in memory.
54585
54586 config PROC_PAGE_MONITOR
54587- default y
54588- depends on PROC_FS && MMU
54589+ default n
54590+ depends on PROC_FS && MMU && !GRKERNSEC
54591 bool "Enable /proc page monitoring" if EXPERT
54592 help
54593 Various /proc files exist to monitor process memory utilization:
54594diff --git a/fs/proc/array.c b/fs/proc/array.c
54595index 6a91e6f..e54dbc14 100644
54596--- a/fs/proc/array.c
54597+++ b/fs/proc/array.c
54598@@ -60,6 +60,7 @@
54599 #include <linux/tty.h>
54600 #include <linux/string.h>
54601 #include <linux/mman.h>
54602+#include <linux/grsecurity.h>
54603 #include <linux/proc_fs.h>
54604 #include <linux/ioport.h>
54605 #include <linux/uaccess.h>
54606@@ -362,6 +363,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
54607 seq_putc(m, '\n');
54608 }
54609
54610+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
54611+static inline void task_pax(struct seq_file *m, struct task_struct *p)
54612+{
54613+ if (p->mm)
54614+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
54615+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
54616+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
54617+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
54618+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
54619+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
54620+ else
54621+ seq_printf(m, "PaX:\t-----\n");
54622+}
54623+#endif
54624+
54625 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
54626 struct pid *pid, struct task_struct *task)
54627 {
54628@@ -380,9 +396,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
54629 task_cpus_allowed(m, task);
54630 cpuset_task_status_allowed(m, task);
54631 task_context_switch_counts(m, task);
54632+
54633+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
54634+ task_pax(m, task);
54635+#endif
54636+
54637+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
54638+ task_grsec_rbac(m, task);
54639+#endif
54640+
54641 return 0;
54642 }
54643
54644+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54645+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
54646+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
54647+ _mm->pax_flags & MF_PAX_SEGMEXEC))
54648+#endif
54649+
54650 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
54651 struct pid *pid, struct task_struct *task, int whole)
54652 {
54653@@ -404,6 +435,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
54654 char tcomm[sizeof(task->comm)];
54655 unsigned long flags;
54656
54657+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54658+ if (current->exec_id != m->exec_id) {
54659+ gr_log_badprocpid("stat");
54660+ return 0;
54661+ }
54662+#endif
54663+
54664 state = *get_task_state(task);
54665 vsize = eip = esp = 0;
54666 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
54667@@ -475,6 +513,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
54668 gtime = task->gtime;
54669 }
54670
54671+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54672+ if (PAX_RAND_FLAGS(mm)) {
54673+ eip = 0;
54674+ esp = 0;
54675+ wchan = 0;
54676+ }
54677+#endif
54678+#ifdef CONFIG_GRKERNSEC_HIDESYM
54679+ wchan = 0;
54680+ eip =0;
54681+ esp =0;
54682+#endif
54683+
54684 /* scale priority and nice values from timeslices to -20..20 */
54685 /* to make it look like a "normal" Unix priority/nice value */
54686 priority = task_prio(task);
54687@@ -511,9 +562,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
54688 seq_put_decimal_ull(m, ' ', vsize);
54689 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
54690 seq_put_decimal_ull(m, ' ', rsslim);
54691+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54692+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
54693+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
54694+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
54695+#else
54696 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
54697 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
54698 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
54699+#endif
54700 seq_put_decimal_ull(m, ' ', esp);
54701 seq_put_decimal_ull(m, ' ', eip);
54702 /* The signal information here is obsolete.
54703@@ -535,7 +592,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
54704 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
54705 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
54706
54707- if (mm && permitted) {
54708+ if (mm && permitted
54709+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54710+ && !PAX_RAND_FLAGS(mm)
54711+#endif
54712+ ) {
54713 seq_put_decimal_ull(m, ' ', mm->start_data);
54714 seq_put_decimal_ull(m, ' ', mm->end_data);
54715 seq_put_decimal_ull(m, ' ', mm->start_brk);
54716@@ -573,8 +634,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
54717 struct pid *pid, struct task_struct *task)
54718 {
54719 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
54720- struct mm_struct *mm = get_task_mm(task);
54721+ struct mm_struct *mm;
54722
54723+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54724+ if (current->exec_id != m->exec_id) {
54725+ gr_log_badprocpid("statm");
54726+ return 0;
54727+ }
54728+#endif
54729+ mm = get_task_mm(task);
54730 if (mm) {
54731 size = task_statm(mm, &shared, &text, &data, &resident);
54732 mmput(mm);
54733@@ -597,6 +665,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
54734 return 0;
54735 }
54736
54737+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
54738+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
54739+{
54740+ return sprintf(buffer, "%pI4\n", &task->signal->curr_ip);
54741+}
54742+#endif
54743+
54744 #ifdef CONFIG_CHECKPOINT_RESTORE
54745 static struct pid *
54746 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
54747diff --git a/fs/proc/base.c b/fs/proc/base.c
54748index 9b43ff77..0fa9564 100644
54749--- a/fs/proc/base.c
54750+++ b/fs/proc/base.c
54751@@ -111,6 +111,14 @@ struct pid_entry {
54752 union proc_op op;
54753 };
54754
54755+struct getdents_callback {
54756+ struct linux_dirent __user * current_dir;
54757+ struct linux_dirent __user * previous;
54758+ struct file * file;
54759+ int count;
54760+ int error;
54761+};
54762+
54763 #define NOD(NAME, MODE, IOP, FOP, OP) { \
54764 .name = (NAME), \
54765 .len = sizeof(NAME) - 1, \
54766@@ -208,6 +216,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
54767 if (!mm->arg_end)
54768 goto out_mm; /* Shh! No looking before we're done */
54769
54770+ if (gr_acl_handle_procpidmem(task))
54771+ goto out_mm;
54772+
54773 len = mm->arg_end - mm->arg_start;
54774
54775 if (len > PAGE_SIZE)
54776@@ -235,12 +246,28 @@ out:
54777 return res;
54778 }
54779
54780+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54781+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
54782+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
54783+ _mm->pax_flags & MF_PAX_SEGMEXEC))
54784+#endif
54785+
54786 static int proc_pid_auxv(struct task_struct *task, char *buffer)
54787 {
54788 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
54789 int res = PTR_ERR(mm);
54790 if (mm && !IS_ERR(mm)) {
54791 unsigned int nwords = 0;
54792+
54793+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54794+ /* allow if we're currently ptracing this task */
54795+ if (PAX_RAND_FLAGS(mm) &&
54796+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
54797+ mmput(mm);
54798+ return 0;
54799+ }
54800+#endif
54801+
54802 do {
54803 nwords += 2;
54804 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
54805@@ -254,7 +281,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
54806 }
54807
54808
54809-#ifdef CONFIG_KALLSYMS
54810+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
54811 /*
54812 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
54813 * Returns the resolved symbol. If that fails, simply return the address.
54814@@ -293,7 +320,7 @@ static void unlock_trace(struct task_struct *task)
54815 mutex_unlock(&task->signal->cred_guard_mutex);
54816 }
54817
54818-#ifdef CONFIG_STACKTRACE
54819+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
54820
54821 #define MAX_STACK_TRACE_DEPTH 64
54822
54823@@ -485,7 +512,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
54824 return count;
54825 }
54826
54827-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
54828+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
54829 static int proc_pid_syscall(struct task_struct *task, char *buffer)
54830 {
54831 long nr;
54832@@ -514,7 +541,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
54833 /************************************************************************/
54834
54835 /* permission checks */
54836-static int proc_fd_access_allowed(struct inode *inode)
54837+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
54838 {
54839 struct task_struct *task;
54840 int allowed = 0;
54841@@ -524,7 +551,10 @@ static int proc_fd_access_allowed(struct inode *inode)
54842 */
54843 task = get_proc_task(inode);
54844 if (task) {
54845- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
54846+ if (log)
54847+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
54848+ else
54849+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
54850 put_task_struct(task);
54851 }
54852 return allowed;
54853@@ -555,10 +585,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
54854 struct task_struct *task,
54855 int hide_pid_min)
54856 {
54857+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
54858+ return false;
54859+
54860+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54861+ rcu_read_lock();
54862+ {
54863+ const struct cred *tmpcred = current_cred();
54864+ const struct cred *cred = __task_cred(task);
54865+
54866+ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
54867+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
54868+ || in_group_p(grsec_proc_gid)
54869+#endif
54870+ ) {
54871+ rcu_read_unlock();
54872+ return true;
54873+ }
54874+ }
54875+ rcu_read_unlock();
54876+
54877+ if (!pid->hide_pid)
54878+ return false;
54879+#endif
54880+
54881 if (pid->hide_pid < hide_pid_min)
54882 return true;
54883 if (in_group_p(pid->pid_gid))
54884 return true;
54885+
54886 return ptrace_may_access(task, PTRACE_MODE_READ);
54887 }
54888
54889@@ -576,7 +631,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
54890 put_task_struct(task);
54891
54892 if (!has_perms) {
54893+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54894+ {
54895+#else
54896 if (pid->hide_pid == 2) {
54897+#endif
54898 /*
54899 * Let's make getdents(), stat(), and open()
54900 * consistent with each other. If a process
54901@@ -674,6 +733,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
54902 if (!task)
54903 return -ESRCH;
54904
54905+ if (gr_acl_handle_procpidmem(task)) {
54906+ put_task_struct(task);
54907+ return -EPERM;
54908+ }
54909+
54910 mm = mm_access(task, mode);
54911 put_task_struct(task);
54912
54913@@ -689,6 +753,10 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
54914
54915 file->private_data = mm;
54916
54917+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54918+ file->f_version = current->exec_id;
54919+#endif
54920+
54921 return 0;
54922 }
54923
54924@@ -710,6 +778,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
54925 ssize_t copied;
54926 char *page;
54927
54928+#ifdef CONFIG_GRKERNSEC
54929+ if (write)
54930+ return -EPERM;
54931+#endif
54932+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54933+ if (file->f_version != current->exec_id) {
54934+ gr_log_badprocpid("mem");
54935+ return 0;
54936+ }
54937+#endif
54938+
54939 if (!mm)
54940 return 0;
54941
54942@@ -722,7 +801,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
54943 goto free;
54944
54945 while (count > 0) {
54946- int this_len = min_t(int, count, PAGE_SIZE);
54947+ ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
54948
54949 if (write && copy_from_user(page, buf, this_len)) {
54950 copied = -EFAULT;
54951@@ -814,6 +893,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
54952 if (!mm)
54953 return 0;
54954
54955+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54956+ if (file->f_version != current->exec_id) {
54957+ gr_log_badprocpid("environ");
54958+ return 0;
54959+ }
54960+#endif
54961+
54962 page = (char *)__get_free_page(GFP_TEMPORARY);
54963 if (!page)
54964 return -ENOMEM;
54965@@ -823,7 +909,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
54966 goto free;
54967 while (count > 0) {
54968 size_t this_len, max_len;
54969- int retval;
54970+ ssize_t retval;
54971
54972 if (src >= (mm->env_end - mm->env_start))
54973 break;
54974@@ -1429,7 +1515,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
54975 int error = -EACCES;
54976
54977 /* Are we allowed to snoop on the tasks file descriptors? */
54978- if (!proc_fd_access_allowed(inode))
54979+ if (!proc_fd_access_allowed(inode, 0))
54980 goto out;
54981
54982 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
54983@@ -1473,8 +1559,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
54984 struct path path;
54985
54986 /* Are we allowed to snoop on the tasks file descriptors? */
54987- if (!proc_fd_access_allowed(inode))
54988- goto out;
54989+ /* logging this is needed for learning on chromium to work properly,
54990+ but we don't want to flood the logs from 'ps' which does a readlink
54991+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
54992+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
54993+ */
54994+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
54995+ if (!proc_fd_access_allowed(inode,0))
54996+ goto out;
54997+ } else {
54998+ if (!proc_fd_access_allowed(inode,1))
54999+ goto out;
55000+ }
55001
55002 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
55003 if (error)
55004@@ -1524,7 +1620,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
55005 rcu_read_lock();
55006 cred = __task_cred(task);
55007 inode->i_uid = cred->euid;
55008+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
55009+ inode->i_gid = grsec_proc_gid;
55010+#else
55011 inode->i_gid = cred->egid;
55012+#endif
55013 rcu_read_unlock();
55014 }
55015 security_task_to_inode(task, inode);
55016@@ -1560,10 +1660,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
55017 return -ENOENT;
55018 }
55019 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
55020+#ifdef CONFIG_GRKERNSEC_PROC_USER
55021+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
55022+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55023+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
55024+#endif
55025 task_dumpable(task)) {
55026 cred = __task_cred(task);
55027 stat->uid = cred->euid;
55028+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
55029+ stat->gid = grsec_proc_gid;
55030+#else
55031 stat->gid = cred->egid;
55032+#endif
55033 }
55034 }
55035 rcu_read_unlock();
55036@@ -1601,11 +1710,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
55037
55038 if (task) {
55039 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
55040+#ifdef CONFIG_GRKERNSEC_PROC_USER
55041+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
55042+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55043+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
55044+#endif
55045 task_dumpable(task)) {
55046 rcu_read_lock();
55047 cred = __task_cred(task);
55048 inode->i_uid = cred->euid;
55049+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
55050+ inode->i_gid = grsec_proc_gid;
55051+#else
55052 inode->i_gid = cred->egid;
55053+#endif
55054 rcu_read_unlock();
55055 } else {
55056 inode->i_uid = GLOBAL_ROOT_UID;
55057@@ -2058,6 +2176,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
55058 if (!task)
55059 goto out_no_task;
55060
55061+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
55062+ goto out;
55063+
55064 /*
55065 * Yes, it does not scale. And it should not. Don't add
55066 * new entries into /proc/<tgid>/ without very good reasons.
55067@@ -2102,6 +2223,9 @@ static int proc_pident_readdir(struct file *filp,
55068 if (!task)
55069 goto out_no_task;
55070
55071+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
55072+ goto out;
55073+
55074 ret = 0;
55075 i = filp->f_pos;
55076 switch (i) {
55077@@ -2515,7 +2639,7 @@ static const struct pid_entry tgid_base_stuff[] = {
55078 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
55079 #endif
55080 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
55081-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
55082+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
55083 INF("syscall", S_IRUGO, proc_pid_syscall),
55084 #endif
55085 INF("cmdline", S_IRUGO, proc_pid_cmdline),
55086@@ -2540,10 +2664,10 @@ static const struct pid_entry tgid_base_stuff[] = {
55087 #ifdef CONFIG_SECURITY
55088 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
55089 #endif
55090-#ifdef CONFIG_KALLSYMS
55091+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
55092 INF("wchan", S_IRUGO, proc_pid_wchan),
55093 #endif
55094-#ifdef CONFIG_STACKTRACE
55095+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
55096 ONE("stack", S_IRUGO, proc_pid_stack),
55097 #endif
55098 #ifdef CONFIG_SCHEDSTATS
55099@@ -2577,6 +2701,9 @@ static const struct pid_entry tgid_base_stuff[] = {
55100 #ifdef CONFIG_HARDWALL
55101 INF("hardwall", S_IRUGO, proc_pid_hardwall),
55102 #endif
55103+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
55104+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
55105+#endif
55106 #ifdef CONFIG_USER_NS
55107 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
55108 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
55109@@ -2705,7 +2832,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
55110 if (!inode)
55111 goto out;
55112
55113+#ifdef CONFIG_GRKERNSEC_PROC_USER
55114+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
55115+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55116+ inode->i_gid = grsec_proc_gid;
55117+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
55118+#else
55119 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
55120+#endif
55121 inode->i_op = &proc_tgid_base_inode_operations;
55122 inode->i_fop = &proc_tgid_base_operations;
55123 inode->i_flags|=S_IMMUTABLE;
55124@@ -2743,7 +2877,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
55125 if (!task)
55126 goto out;
55127
55128+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
55129+ goto out_put_task;
55130+
55131 result = proc_pid_instantiate(dir, dentry, task, NULL);
55132+out_put_task:
55133 put_task_struct(task);
55134 out:
55135 return result;
55136@@ -2806,6 +2944,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
55137 static int fake_filldir(void *buf, const char *name, int namelen,
55138 loff_t offset, u64 ino, unsigned d_type)
55139 {
55140+ struct getdents_callback * __buf = (struct getdents_callback *) buf;
55141+ __buf->error = -EINVAL;
55142 return 0;
55143 }
55144
55145@@ -2857,7 +2997,7 @@ static const struct pid_entry tid_base_stuff[] = {
55146 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
55147 #endif
55148 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
55149-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
55150+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
55151 INF("syscall", S_IRUGO, proc_pid_syscall),
55152 #endif
55153 INF("cmdline", S_IRUGO, proc_pid_cmdline),
55154@@ -2884,10 +3024,10 @@ static const struct pid_entry tid_base_stuff[] = {
55155 #ifdef CONFIG_SECURITY
55156 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
55157 #endif
55158-#ifdef CONFIG_KALLSYMS
55159+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
55160 INF("wchan", S_IRUGO, proc_pid_wchan),
55161 #endif
55162-#ifdef CONFIG_STACKTRACE
55163+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
55164 ONE("stack", S_IRUGO, proc_pid_stack),
55165 #endif
55166 #ifdef CONFIG_SCHEDSTATS
55167diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
55168index 82676e3..5f8518a 100644
55169--- a/fs/proc/cmdline.c
55170+++ b/fs/proc/cmdline.c
55171@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
55172
55173 static int __init proc_cmdline_init(void)
55174 {
55175+#ifdef CONFIG_GRKERNSEC_PROC_ADD
55176+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
55177+#else
55178 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
55179+#endif
55180 return 0;
55181 }
55182 module_init(proc_cmdline_init);
55183diff --git a/fs/proc/devices.c b/fs/proc/devices.c
55184index b143471..bb105e5 100644
55185--- a/fs/proc/devices.c
55186+++ b/fs/proc/devices.c
55187@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
55188
55189 static int __init proc_devices_init(void)
55190 {
55191+#ifdef CONFIG_GRKERNSEC_PROC_ADD
55192+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
55193+#else
55194 proc_create("devices", 0, NULL, &proc_devinfo_operations);
55195+#endif
55196 return 0;
55197 }
55198 module_init(proc_devices_init);
55199diff --git a/fs/proc/fd.c b/fs/proc/fd.c
55200index d7a4a28..0201742 100644
55201--- a/fs/proc/fd.c
55202+++ b/fs/proc/fd.c
55203@@ -25,7 +25,8 @@ static int seq_show(struct seq_file *m, void *v)
55204 if (!task)
55205 return -ENOENT;
55206
55207- files = get_files_struct(task);
55208+ if (!gr_acl_handle_procpidmem(task))
55209+ files = get_files_struct(task);
55210 put_task_struct(task);
55211
55212 if (files) {
55213@@ -302,11 +303,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
55214 */
55215 int proc_fd_permission(struct inode *inode, int mask)
55216 {
55217+ struct task_struct *task;
55218 int rv = generic_permission(inode, mask);
55219- if (rv == 0)
55220- return 0;
55221+
55222 if (task_pid(current) == proc_pid(inode))
55223 rv = 0;
55224+
55225+ task = get_proc_task(inode);
55226+ if (task == NULL)
55227+ return rv;
55228+
55229+ if (gr_acl_handle_procpidmem(task))
55230+ rv = -EACCES;
55231+
55232+ put_task_struct(task);
55233+
55234 return rv;
55235 }
55236
55237diff --git a/fs/proc/inode.c b/fs/proc/inode.c
55238index 0ac1e1b..0497e58 100644
55239--- a/fs/proc/inode.c
55240+++ b/fs/proc/inode.c
55241@@ -21,11 +21,17 @@
55242 #include <linux/seq_file.h>
55243 #include <linux/slab.h>
55244 #include <linux/mount.h>
55245+#include <linux/grsecurity.h>
55246
55247 #include <asm/uaccess.h>
55248
55249 #include "internal.h"
55250
55251+#ifdef CONFIG_PROC_SYSCTL
55252+extern const struct inode_operations proc_sys_inode_operations;
55253+extern const struct inode_operations proc_sys_dir_operations;
55254+#endif
55255+
55256 static void proc_evict_inode(struct inode *inode)
55257 {
55258 struct proc_dir_entry *de;
55259@@ -53,6 +59,13 @@ static void proc_evict_inode(struct inode *inode)
55260 ns = PROC_I(inode)->ns;
55261 if (ns_ops && ns)
55262 ns_ops->put(ns);
55263+
55264+#ifdef CONFIG_PROC_SYSCTL
55265+ if (inode->i_op == &proc_sys_inode_operations ||
55266+ inode->i_op == &proc_sys_dir_operations)
55267+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
55268+#endif
55269+
55270 }
55271
55272 static struct kmem_cache * proc_inode_cachep;
55273@@ -455,7 +468,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
55274 if (de->mode) {
55275 inode->i_mode = de->mode;
55276 inode->i_uid = de->uid;
55277+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
55278+ inode->i_gid = grsec_proc_gid;
55279+#else
55280 inode->i_gid = de->gid;
55281+#endif
55282 }
55283 if (de->size)
55284 inode->i_size = de->size;
55285diff --git a/fs/proc/internal.h b/fs/proc/internal.h
55286index 252544c..04395b9 100644
55287--- a/fs/proc/internal.h
55288+++ b/fs/proc/internal.h
55289@@ -55,6 +55,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
55290 struct pid *pid, struct task_struct *task);
55291 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
55292 struct pid *pid, struct task_struct *task);
55293+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
55294+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
55295+#endif
55296 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
55297
55298 extern const struct file_operations proc_tid_children_operations;
55299diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
55300index e96d4f1..8b116ed 100644
55301--- a/fs/proc/kcore.c
55302+++ b/fs/proc/kcore.c
55303@@ -480,9 +480,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
55304 * the addresses in the elf_phdr on our list.
55305 */
55306 start = kc_offset_to_vaddr(*fpos - elf_buflen);
55307- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
55308+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
55309+ if (tsz > buflen)
55310 tsz = buflen;
55311-
55312+
55313 while (buflen) {
55314 struct kcore_list *m;
55315
55316@@ -511,20 +512,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
55317 kfree(elf_buf);
55318 } else {
55319 if (kern_addr_valid(start)) {
55320- unsigned long n;
55321+ char *elf_buf;
55322+ mm_segment_t oldfs;
55323
55324- n = copy_to_user(buffer, (char *)start, tsz);
55325- /*
55326- * We cannot distinguish between fault on source
55327- * and fault on destination. When this happens
55328- * we clear too and hope it will trigger the
55329- * EFAULT again.
55330- */
55331- if (n) {
55332- if (clear_user(buffer + tsz - n,
55333- n))
55334+ elf_buf = kmalloc(tsz, GFP_KERNEL);
55335+ if (!elf_buf)
55336+ return -ENOMEM;
55337+ oldfs = get_fs();
55338+ set_fs(KERNEL_DS);
55339+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
55340+ set_fs(oldfs);
55341+ if (copy_to_user(buffer, elf_buf, tsz)) {
55342+ kfree(elf_buf);
55343 return -EFAULT;
55344+ }
55345 }
55346+ set_fs(oldfs);
55347+ kfree(elf_buf);
55348 } else {
55349 if (clear_user(buffer, tsz))
55350 return -EFAULT;
55351@@ -544,6 +548,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
55352
55353 static int open_kcore(struct inode *inode, struct file *filp)
55354 {
55355+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
55356+ return -EPERM;
55357+#endif
55358 if (!capable(CAP_SYS_RAWIO))
55359 return -EPERM;
55360 if (kcore_need_update)
55361diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
55362index 80e4645..53e5fcf 100644
55363--- a/fs/proc/meminfo.c
55364+++ b/fs/proc/meminfo.c
55365@@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
55366 vmi.used >> 10,
55367 vmi.largest_chunk >> 10
55368 #ifdef CONFIG_MEMORY_FAILURE
55369- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
55370+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
55371 #endif
55372 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
55373 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
55374diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
55375index b1822dd..df622cb 100644
55376--- a/fs/proc/nommu.c
55377+++ b/fs/proc/nommu.c
55378@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
55379 if (len < 1)
55380 len = 1;
55381 seq_printf(m, "%*c", len, ' ');
55382- seq_path(m, &file->f_path, "");
55383+ seq_path(m, &file->f_path, "\n\\");
55384 }
55385
55386 seq_putc(m, '\n');
55387diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
55388index fe72cd0..21b52ff 100644
55389--- a/fs/proc/proc_net.c
55390+++ b/fs/proc/proc_net.c
55391@@ -23,6 +23,7 @@
55392 #include <linux/nsproxy.h>
55393 #include <net/net_namespace.h>
55394 #include <linux/seq_file.h>
55395+#include <linux/grsecurity.h>
55396
55397 #include "internal.h"
55398
55399@@ -105,6 +106,17 @@ static struct net *get_proc_task_net(struct inode *dir)
55400 struct task_struct *task;
55401 struct nsproxy *ns;
55402 struct net *net = NULL;
55403+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55404+ const struct cred *cred = current_cred();
55405+#endif
55406+
55407+#ifdef CONFIG_GRKERNSEC_PROC_USER
55408+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
55409+ return net;
55410+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55411+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
55412+ return net;
55413+#endif
55414
55415 rcu_read_lock();
55416 task = pid_task(proc_pid(dir), PIDTYPE_PID);
55417diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
55418index 1827d88..43b0279 100644
55419--- a/fs/proc/proc_sysctl.c
55420+++ b/fs/proc/proc_sysctl.c
55421@@ -12,11 +12,15 @@
55422 #include <linux/module.h>
55423 #include "internal.h"
55424
55425+extern int gr_handle_chroot_sysctl(const int op);
55426+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
55427+ const int op);
55428+
55429 static const struct dentry_operations proc_sys_dentry_operations;
55430 static const struct file_operations proc_sys_file_operations;
55431-static const struct inode_operations proc_sys_inode_operations;
55432+const struct inode_operations proc_sys_inode_operations;
55433 static const struct file_operations proc_sys_dir_file_operations;
55434-static const struct inode_operations proc_sys_dir_operations;
55435+const struct inode_operations proc_sys_dir_operations;
55436
55437 void proc_sys_poll_notify(struct ctl_table_poll *poll)
55438 {
55439@@ -466,6 +470,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
55440
55441 err = NULL;
55442 d_set_d_op(dentry, &proc_sys_dentry_operations);
55443+
55444+ gr_handle_proc_create(dentry, inode);
55445+
55446 d_add(dentry, inode);
55447
55448 out:
55449@@ -481,6 +488,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
55450 struct inode *inode = filp->f_path.dentry->d_inode;
55451 struct ctl_table_header *head = grab_header(inode);
55452 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
55453+ int op = write ? MAY_WRITE : MAY_READ;
55454 ssize_t error;
55455 size_t res;
55456
55457@@ -492,7 +500,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
55458 * and won't be until we finish.
55459 */
55460 error = -EPERM;
55461- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
55462+ if (sysctl_perm(head, table, op))
55463 goto out;
55464
55465 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
55466@@ -500,6 +508,22 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
55467 if (!table->proc_handler)
55468 goto out;
55469
55470+#ifdef CONFIG_GRKERNSEC
55471+ error = -EPERM;
55472+ if (gr_handle_chroot_sysctl(op))
55473+ goto out;
55474+ dget(filp->f_path.dentry);
55475+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
55476+ dput(filp->f_path.dentry);
55477+ goto out;
55478+ }
55479+ dput(filp->f_path.dentry);
55480+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
55481+ goto out;
55482+ if (write && !capable(CAP_SYS_ADMIN))
55483+ goto out;
55484+#endif
55485+
55486 /* careful: calling conventions are nasty here */
55487 res = count;
55488 error = table->proc_handler(table, write, buf, &res, ppos);
55489@@ -597,6 +621,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
55490 return -ENOMEM;
55491 } else {
55492 d_set_d_op(child, &proc_sys_dentry_operations);
55493+
55494+ gr_handle_proc_create(child, inode);
55495+
55496 d_add(child, inode);
55497 }
55498 } else {
55499@@ -640,6 +667,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
55500 if ((*pos)++ < file->f_pos)
55501 return 0;
55502
55503+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
55504+ return 0;
55505+
55506 if (unlikely(S_ISLNK(table->mode)))
55507 res = proc_sys_link_fill_cache(file, dirent, filldir, head, table);
55508 else
55509@@ -750,6 +780,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
55510 if (IS_ERR(head))
55511 return PTR_ERR(head);
55512
55513+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
55514+ return -ENOENT;
55515+
55516 generic_fillattr(inode, stat);
55517 if (table)
55518 stat->mode = (stat->mode & S_IFMT) | table->mode;
55519@@ -772,13 +805,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
55520 .llseek = generic_file_llseek,
55521 };
55522
55523-static const struct inode_operations proc_sys_inode_operations = {
55524+const struct inode_operations proc_sys_inode_operations = {
55525 .permission = proc_sys_permission,
55526 .setattr = proc_sys_setattr,
55527 .getattr = proc_sys_getattr,
55528 };
55529
55530-static const struct inode_operations proc_sys_dir_operations = {
55531+const struct inode_operations proc_sys_dir_operations = {
55532 .lookup = proc_sys_lookup,
55533 .permission = proc_sys_permission,
55534 .setattr = proc_sys_setattr,
55535@@ -854,7 +887,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
55536 static struct ctl_dir *new_dir(struct ctl_table_set *set,
55537 const char *name, int namelen)
55538 {
55539- struct ctl_table *table;
55540+ ctl_table_no_const *table;
55541 struct ctl_dir *new;
55542 struct ctl_node *node;
55543 char *new_name;
55544@@ -866,7 +899,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
55545 return NULL;
55546
55547 node = (struct ctl_node *)(new + 1);
55548- table = (struct ctl_table *)(node + 1);
55549+ table = (ctl_table_no_const *)(node + 1);
55550 new_name = (char *)(table + 2);
55551 memcpy(new_name, name, namelen);
55552 new_name[namelen] = '\0';
55553@@ -1035,7 +1068,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
55554 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
55555 struct ctl_table_root *link_root)
55556 {
55557- struct ctl_table *link_table, *entry, *link;
55558+ ctl_table_no_const *link_table, *link;
55559+ struct ctl_table *entry;
55560 struct ctl_table_header *links;
55561 struct ctl_node *node;
55562 char *link_name;
55563@@ -1058,7 +1092,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
55564 return NULL;
55565
55566 node = (struct ctl_node *)(links + 1);
55567- link_table = (struct ctl_table *)(node + nr_entries);
55568+ link_table = (ctl_table_no_const *)(node + nr_entries);
55569 link_name = (char *)&link_table[nr_entries + 1];
55570
55571 for (link = link_table, entry = table; entry->procname; link++, entry++) {
55572@@ -1306,8 +1340,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
55573 struct ctl_table_header ***subheader, struct ctl_table_set *set,
55574 struct ctl_table *table)
55575 {
55576- struct ctl_table *ctl_table_arg = NULL;
55577- struct ctl_table *entry, *files;
55578+ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
55579+ struct ctl_table *entry;
55580 int nr_files = 0;
55581 int nr_dirs = 0;
55582 int err = -ENOMEM;
55583@@ -1319,10 +1353,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
55584 nr_files++;
55585 }
55586
55587- files = table;
55588 /* If there are mixed files and directories we need a new table */
55589 if (nr_dirs && nr_files) {
55590- struct ctl_table *new;
55591+ ctl_table_no_const *new;
55592 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
55593 GFP_KERNEL);
55594 if (!files)
55595@@ -1340,7 +1373,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
55596 /* Register everything except a directory full of subdirectories */
55597 if (nr_files || !nr_dirs) {
55598 struct ctl_table_header *header;
55599- header = __register_sysctl_table(set, path, files);
55600+ header = __register_sysctl_table(set, path, files ? files : table);
55601 if (!header) {
55602 kfree(ctl_table_arg);
55603 goto out;
55604diff --git a/fs/proc/root.c b/fs/proc/root.c
55605index 9c7fab1..ed1c8e0 100644
55606--- a/fs/proc/root.c
55607+++ b/fs/proc/root.c
55608@@ -180,7 +180,15 @@ void __init proc_root_init(void)
55609 #ifdef CONFIG_PROC_DEVICETREE
55610 proc_device_tree_init();
55611 #endif
55612+#ifdef CONFIG_GRKERNSEC_PROC_ADD
55613+#ifdef CONFIG_GRKERNSEC_PROC_USER
55614+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
55615+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55616+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
55617+#endif
55618+#else
55619 proc_mkdir("bus", NULL);
55620+#endif
55621 proc_sys_init();
55622 }
55623
55624diff --git a/fs/proc/self.c b/fs/proc/self.c
55625index aa5cc3b..c91a5d0 100644
55626--- a/fs/proc/self.c
55627+++ b/fs/proc/self.c
55628@@ -37,7 +37,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
55629 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
55630 void *cookie)
55631 {
55632- char *s = nd_get_link(nd);
55633+ const char *s = nd_get_link(nd);
55634 if (!IS_ERR(s))
55635 kfree(s);
55636 }
55637diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
55638index ca5ce7f..02c1cf0 100644
55639--- a/fs/proc/task_mmu.c
55640+++ b/fs/proc/task_mmu.c
55641@@ -11,12 +11,19 @@
55642 #include <linux/rmap.h>
55643 #include <linux/swap.h>
55644 #include <linux/swapops.h>
55645+#include <linux/grsecurity.h>
55646
55647 #include <asm/elf.h>
55648 #include <asm/uaccess.h>
55649 #include <asm/tlbflush.h>
55650 #include "internal.h"
55651
55652+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55653+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
55654+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
55655+ _mm->pax_flags & MF_PAX_SEGMEXEC))
55656+#endif
55657+
55658 void task_mem(struct seq_file *m, struct mm_struct *mm)
55659 {
55660 unsigned long data, text, lib, swap;
55661@@ -52,8 +59,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
55662 "VmExe:\t%8lu kB\n"
55663 "VmLib:\t%8lu kB\n"
55664 "VmPTE:\t%8lu kB\n"
55665- "VmSwap:\t%8lu kB\n",
55666- hiwater_vm << (PAGE_SHIFT-10),
55667+ "VmSwap:\t%8lu kB\n"
55668+
55669+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
55670+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
55671+#endif
55672+
55673+ ,hiwater_vm << (PAGE_SHIFT-10),
55674 total_vm << (PAGE_SHIFT-10),
55675 mm->locked_vm << (PAGE_SHIFT-10),
55676 mm->pinned_vm << (PAGE_SHIFT-10),
55677@@ -62,7 +74,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
55678 data << (PAGE_SHIFT-10),
55679 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
55680 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
55681- swap << (PAGE_SHIFT-10));
55682+ swap << (PAGE_SHIFT-10)
55683+
55684+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
55685+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55686+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
55687+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
55688+#else
55689+ , mm->context.user_cs_base
55690+ , mm->context.user_cs_limit
55691+#endif
55692+#endif
55693+
55694+ );
55695 }
55696
55697 unsigned long task_vsize(struct mm_struct *mm)
55698@@ -277,13 +301,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
55699 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
55700 }
55701
55702- /* We don't show the stack guard page in /proc/maps */
55703+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55704+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
55705+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
55706+#else
55707 start = vma->vm_start;
55708- if (stack_guard_page_start(vma, start))
55709- start += PAGE_SIZE;
55710 end = vma->vm_end;
55711- if (stack_guard_page_end(vma, end))
55712- end -= PAGE_SIZE;
55713+#endif
55714
55715 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
55716 start,
55717@@ -292,7 +316,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
55718 flags & VM_WRITE ? 'w' : '-',
55719 flags & VM_EXEC ? 'x' : '-',
55720 flags & VM_MAYSHARE ? 's' : 'p',
55721+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55722+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
55723+#else
55724 pgoff,
55725+#endif
55726 MAJOR(dev), MINOR(dev), ino, &len);
55727
55728 /*
55729@@ -301,7 +329,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
55730 */
55731 if (file) {
55732 pad_len_spaces(m, len);
55733- seq_path(m, &file->f_path, "\n");
55734+ seq_path(m, &file->f_path, "\n\\");
55735 goto done;
55736 }
55737
55738@@ -327,8 +355,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
55739 * Thread stack in /proc/PID/task/TID/maps or
55740 * the main process stack.
55741 */
55742- if (!is_pid || (vma->vm_start <= mm->start_stack &&
55743- vma->vm_end >= mm->start_stack)) {
55744+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
55745+ (vma->vm_start <= mm->start_stack &&
55746+ vma->vm_end >= mm->start_stack)) {
55747 name = "[stack]";
55748 } else {
55749 /* Thread stack in /proc/PID/maps */
55750@@ -352,6 +381,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
55751 struct proc_maps_private *priv = m->private;
55752 struct task_struct *task = priv->task;
55753
55754+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55755+ if (current->exec_id != m->exec_id) {
55756+ gr_log_badprocpid("maps");
55757+ return 0;
55758+ }
55759+#endif
55760+
55761 show_map_vma(m, vma, is_pid);
55762
55763 if (m->count < m->size) /* vma is copied successfully */
55764@@ -589,12 +625,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
55765 .private = &mss,
55766 };
55767
55768+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55769+ if (current->exec_id != m->exec_id) {
55770+ gr_log_badprocpid("smaps");
55771+ return 0;
55772+ }
55773+#endif
55774 memset(&mss, 0, sizeof mss);
55775- mss.vma = vma;
55776- /* mmap_sem is held in m_start */
55777- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
55778- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
55779-
55780+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55781+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
55782+#endif
55783+ mss.vma = vma;
55784+ /* mmap_sem is held in m_start */
55785+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
55786+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
55787+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55788+ }
55789+#endif
55790 show_map_vma(m, vma, is_pid);
55791
55792 seq_printf(m,
55793@@ -612,7 +659,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
55794 "KernelPageSize: %8lu kB\n"
55795 "MMUPageSize: %8lu kB\n"
55796 "Locked: %8lu kB\n",
55797+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55798+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
55799+#else
55800 (vma->vm_end - vma->vm_start) >> 10,
55801+#endif
55802 mss.resident >> 10,
55803 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
55804 mss.shared_clean >> 10,
55805@@ -1264,6 +1315,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
55806 int n;
55807 char buffer[50];
55808
55809+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55810+ if (current->exec_id != m->exec_id) {
55811+ gr_log_badprocpid("numa_maps");
55812+ return 0;
55813+ }
55814+#endif
55815+
55816 if (!mm)
55817 return 0;
55818
55819@@ -1281,11 +1339,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
55820 mpol_to_str(buffer, sizeof(buffer), pol);
55821 mpol_cond_put(pol);
55822
55823+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55824+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
55825+#else
55826 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
55827+#endif
55828
55829 if (file) {
55830 seq_printf(m, " file=");
55831- seq_path(m, &file->f_path, "\n\t= ");
55832+ seq_path(m, &file->f_path, "\n\t\\= ");
55833 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
55834 seq_printf(m, " heap");
55835 } else {
55836diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
55837index 1ccfa53..0848f95 100644
55838--- a/fs/proc/task_nommu.c
55839+++ b/fs/proc/task_nommu.c
55840@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
55841 else
55842 bytes += kobjsize(mm);
55843
55844- if (current->fs && current->fs->users > 1)
55845+ if (current->fs && atomic_read(&current->fs->users) > 1)
55846 sbytes += kobjsize(current->fs);
55847 else
55848 bytes += kobjsize(current->fs);
55849@@ -168,7 +168,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
55850
55851 if (file) {
55852 pad_len_spaces(m, len);
55853- seq_path(m, &file->f_path, "");
55854+ seq_path(m, &file->f_path, "\n\\");
55855 } else if (mm) {
55856 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
55857
55858diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
55859index b00fcc9..e0c6381 100644
55860--- a/fs/qnx6/qnx6.h
55861+++ b/fs/qnx6/qnx6.h
55862@@ -74,7 +74,7 @@ enum {
55863 BYTESEX_BE,
55864 };
55865
55866-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
55867+static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
55868 {
55869 if (sbi->s_bytesex == BYTESEX_LE)
55870 return le64_to_cpu((__force __le64)n);
55871@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
55872 return (__force __fs64)cpu_to_be64(n);
55873 }
55874
55875-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
55876+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
55877 {
55878 if (sbi->s_bytesex == BYTESEX_LE)
55879 return le32_to_cpu((__force __le32)n);
55880diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
55881index 16e8abb..2dcf914 100644
55882--- a/fs/quota/netlink.c
55883+++ b/fs/quota/netlink.c
55884@@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
55885 void quota_send_warning(struct kqid qid, dev_t dev,
55886 const char warntype)
55887 {
55888- static atomic_t seq;
55889+ static atomic_unchecked_t seq;
55890 struct sk_buff *skb;
55891 void *msg_head;
55892 int ret;
55893@@ -49,7 +49,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
55894 "VFS: Not enough memory to send quota warning.\n");
55895 return;
55896 }
55897- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
55898+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
55899 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
55900 if (!msg_head) {
55901 printk(KERN_ERR
55902diff --git a/fs/readdir.c b/fs/readdir.c
55903index 5e69ef5..e5d9099 100644
55904--- a/fs/readdir.c
55905+++ b/fs/readdir.c
55906@@ -17,6 +17,7 @@
55907 #include <linux/security.h>
55908 #include <linux/syscalls.h>
55909 #include <linux/unistd.h>
55910+#include <linux/namei.h>
55911
55912 #include <asm/uaccess.h>
55913
55914@@ -67,6 +68,7 @@ struct old_linux_dirent {
55915
55916 struct readdir_callback {
55917 struct old_linux_dirent __user * dirent;
55918+ struct file * file;
55919 int result;
55920 };
55921
55922@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
55923 buf->result = -EOVERFLOW;
55924 return -EOVERFLOW;
55925 }
55926+
55927+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
55928+ return 0;
55929+
55930 buf->result++;
55931 dirent = buf->dirent;
55932 if (!access_ok(VERIFY_WRITE, dirent,
55933@@ -114,6 +120,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
55934
55935 buf.result = 0;
55936 buf.dirent = dirent;
55937+ buf.file = f.file;
55938
55939 error = vfs_readdir(f.file, fillonedir, &buf);
55940 if (buf.result)
55941@@ -139,6 +146,7 @@ struct linux_dirent {
55942 struct getdents_callback {
55943 struct linux_dirent __user * current_dir;
55944 struct linux_dirent __user * previous;
55945+ struct file * file;
55946 int count;
55947 int error;
55948 };
55949@@ -160,6 +168,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
55950 buf->error = -EOVERFLOW;
55951 return -EOVERFLOW;
55952 }
55953+
55954+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
55955+ return 0;
55956+
55957 dirent = buf->previous;
55958 if (dirent) {
55959 if (__put_user(offset, &dirent->d_off))
55960@@ -205,6 +217,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
55961 buf.previous = NULL;
55962 buf.count = count;
55963 buf.error = 0;
55964+ buf.file = f.file;
55965
55966 error = vfs_readdir(f.file, filldir, &buf);
55967 if (error >= 0)
55968@@ -223,6 +236,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
55969 struct getdents_callback64 {
55970 struct linux_dirent64 __user * current_dir;
55971 struct linux_dirent64 __user * previous;
55972+ struct file *file;
55973 int count;
55974 int error;
55975 };
55976@@ -238,6 +252,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
55977 buf->error = -EINVAL; /* only used if we fail.. */
55978 if (reclen > buf->count)
55979 return -EINVAL;
55980+
55981+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
55982+ return 0;
55983+
55984 dirent = buf->previous;
55985 if (dirent) {
55986 if (__put_user(offset, &dirent->d_off))
55987@@ -283,6 +301,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
55988
55989 buf.current_dir = dirent;
55990 buf.previous = NULL;
55991+ buf.file = f.file;
55992 buf.count = count;
55993 buf.error = 0;
55994
55995@@ -291,7 +310,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
55996 error = buf.error;
55997 lastdirent = buf.previous;
55998 if (lastdirent) {
55999- typeof(lastdirent->d_off) d_off = f.file->f_pos;
56000+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
56001 if (__put_user(d_off, &lastdirent->d_off))
56002 error = -EFAULT;
56003 else
56004diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
56005index 2b7882b..1c5ef48 100644
56006--- a/fs/reiserfs/do_balan.c
56007+++ b/fs/reiserfs/do_balan.c
56008@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
56009 return;
56010 }
56011
56012- atomic_inc(&(fs_generation(tb->tb_sb)));
56013+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
56014 do_balance_starts(tb);
56015
56016 /* balance leaf returns 0 except if combining L R and S into
56017diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
56018index e60e870..f40ac16 100644
56019--- a/fs/reiserfs/procfs.c
56020+++ b/fs/reiserfs/procfs.c
56021@@ -112,7 +112,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
56022 "SMALL_TAILS " : "NO_TAILS ",
56023 replay_only(sb) ? "REPLAY_ONLY " : "",
56024 convert_reiserfs(sb) ? "CONV " : "",
56025- atomic_read(&r->s_generation_counter),
56026+ atomic_read_unchecked(&r->s_generation_counter),
56027 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
56028 SF(s_do_balance), SF(s_unneeded_left_neighbor),
56029 SF(s_good_search_by_key_reada), SF(s_bmaps),
56030diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
56031index 157e474..65a6114 100644
56032--- a/fs/reiserfs/reiserfs.h
56033+++ b/fs/reiserfs/reiserfs.h
56034@@ -453,7 +453,7 @@ struct reiserfs_sb_info {
56035 /* Comment? -Hans */
56036 wait_queue_head_t s_wait;
56037 /* To be obsoleted soon by per buffer seals.. -Hans */
56038- atomic_t s_generation_counter; // increased by one every time the
56039+ atomic_unchecked_t s_generation_counter; // increased by one every time the
56040 // tree gets re-balanced
56041 unsigned long s_properties; /* File system properties. Currently holds
56042 on-disk FS format */
56043@@ -1978,7 +1978,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
56044 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
56045
56046 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
56047-#define get_generation(s) atomic_read (&fs_generation(s))
56048+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
56049 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
56050 #define __fs_changed(gen,s) (gen != get_generation (s))
56051 #define fs_changed(gen,s) \
56052diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
56053index c196369..4cce1d9 100644
56054--- a/fs/reiserfs/xattr.c
56055+++ b/fs/reiserfs/xattr.c
56056@@ -187,8 +187,8 @@ fill_with_dentries(void *buf, const char *name, int namelen, loff_t offset,
56057 if (dbuf->count == ARRAY_SIZE(dbuf->dentries))
56058 return -ENOSPC;
56059
56060- if (name[0] == '.' && (name[1] == '\0' ||
56061- (name[1] == '.' && name[2] == '\0')))
56062+ if (name[0] == '.' && (namelen < 2 ||
56063+ (namelen == 2 && name[1] == '.')))
56064 return 0;
56065
56066 dentry = lookup_one_len(name, dbuf->xadir, namelen);
56067diff --git a/fs/select.c b/fs/select.c
56068index 2ef72d9..f213b17 100644
56069--- a/fs/select.c
56070+++ b/fs/select.c
56071@@ -20,6 +20,7 @@
56072 #include <linux/export.h>
56073 #include <linux/slab.h>
56074 #include <linux/poll.h>
56075+#include <linux/security.h>
56076 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
56077 #include <linux/file.h>
56078 #include <linux/fdtable.h>
56079@@ -826,6 +827,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
56080 struct poll_list *walk = head;
56081 unsigned long todo = nfds;
56082
56083+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
56084 if (nfds > rlimit(RLIMIT_NOFILE))
56085 return -EINVAL;
56086
56087diff --git a/fs/seq_file.c b/fs/seq_file.c
56088index f2bc3df..239d4f6 100644
56089--- a/fs/seq_file.c
56090+++ b/fs/seq_file.c
56091@@ -10,6 +10,7 @@
56092 #include <linux/seq_file.h>
56093 #include <linux/slab.h>
56094 #include <linux/cred.h>
56095+#include <linux/sched.h>
56096
56097 #include <asm/uaccess.h>
56098 #include <asm/page.h>
56099@@ -60,6 +61,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
56100 #ifdef CONFIG_USER_NS
56101 p->user_ns = file->f_cred->user_ns;
56102 #endif
56103+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56104+ p->exec_id = current->exec_id;
56105+#endif
56106
56107 /*
56108 * Wrappers around seq_open(e.g. swaps_open) need to be
56109@@ -96,7 +100,7 @@ static int traverse(struct seq_file *m, loff_t offset)
56110 return 0;
56111 }
56112 if (!m->buf) {
56113- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
56114+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
56115 if (!m->buf)
56116 return -ENOMEM;
56117 }
56118@@ -136,7 +140,7 @@ static int traverse(struct seq_file *m, loff_t offset)
56119 Eoverflow:
56120 m->op->stop(m, p);
56121 kfree(m->buf);
56122- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
56123+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
56124 return !m->buf ? -ENOMEM : -EAGAIN;
56125 }
56126
56127@@ -191,7 +195,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
56128
56129 /* grab buffer if we didn't have one */
56130 if (!m->buf) {
56131- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
56132+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
56133 if (!m->buf)
56134 goto Enomem;
56135 }
56136@@ -232,7 +236,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
56137 goto Fill;
56138 m->op->stop(m, p);
56139 kfree(m->buf);
56140- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
56141+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
56142 if (!m->buf)
56143 goto Enomem;
56144 m->count = 0;
56145@@ -581,7 +585,7 @@ static void single_stop(struct seq_file *p, void *v)
56146 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
56147 void *data)
56148 {
56149- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
56150+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
56151 int res = -ENOMEM;
56152
56153 if (op) {
56154diff --git a/fs/splice.c b/fs/splice.c
56155index 6909d89..5b2e8f9 100644
56156--- a/fs/splice.c
56157+++ b/fs/splice.c
56158@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
56159 pipe_lock(pipe);
56160
56161 for (;;) {
56162- if (!pipe->readers) {
56163+ if (!atomic_read(&pipe->readers)) {
56164 send_sig(SIGPIPE, current, 0);
56165 if (!ret)
56166 ret = -EPIPE;
56167@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
56168 do_wakeup = 0;
56169 }
56170
56171- pipe->waiting_writers++;
56172+ atomic_inc(&pipe->waiting_writers);
56173 pipe_wait(pipe);
56174- pipe->waiting_writers--;
56175+ atomic_dec(&pipe->waiting_writers);
56176 }
56177
56178 pipe_unlock(pipe);
56179@@ -563,7 +563,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
56180 old_fs = get_fs();
56181 set_fs(get_ds());
56182 /* The cast to a user pointer is valid due to the set_fs() */
56183- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
56184+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
56185 set_fs(old_fs);
56186
56187 return res;
56188@@ -578,7 +578,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
56189 old_fs = get_fs();
56190 set_fs(get_ds());
56191 /* The cast to a user pointer is valid due to the set_fs() */
56192- res = vfs_write(file, (const char __user *)buf, count, &pos);
56193+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
56194 set_fs(old_fs);
56195
56196 return res;
56197@@ -630,7 +630,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
56198 goto err;
56199
56200 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
56201- vec[i].iov_base = (void __user *) page_address(page);
56202+ vec[i].iov_base = (void __force_user *) page_address(page);
56203 vec[i].iov_len = this_len;
56204 spd.pages[i] = page;
56205 spd.nr_pages++;
56206@@ -851,10 +851,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
56207 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
56208 {
56209 while (!pipe->nrbufs) {
56210- if (!pipe->writers)
56211+ if (!atomic_read(&pipe->writers))
56212 return 0;
56213
56214- if (!pipe->waiting_writers && sd->num_spliced)
56215+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
56216 return 0;
56217
56218 if (sd->flags & SPLICE_F_NONBLOCK)
56219@@ -1189,7 +1189,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
56220 * out of the pipe right after the splice_to_pipe(). So set
56221 * PIPE_READERS appropriately.
56222 */
56223- pipe->readers = 1;
56224+ atomic_set(&pipe->readers, 1);
56225
56226 current->splice_pipe = pipe;
56227 }
56228@@ -1738,9 +1738,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
56229 ret = -ERESTARTSYS;
56230 break;
56231 }
56232- if (!pipe->writers)
56233+ if (!atomic_read(&pipe->writers))
56234 break;
56235- if (!pipe->waiting_writers) {
56236+ if (!atomic_read(&pipe->waiting_writers)) {
56237 if (flags & SPLICE_F_NONBLOCK) {
56238 ret = -EAGAIN;
56239 break;
56240@@ -1772,7 +1772,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
56241 pipe_lock(pipe);
56242
56243 while (pipe->nrbufs >= pipe->buffers) {
56244- if (!pipe->readers) {
56245+ if (!atomic_read(&pipe->readers)) {
56246 send_sig(SIGPIPE, current, 0);
56247 ret = -EPIPE;
56248 break;
56249@@ -1785,9 +1785,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
56250 ret = -ERESTARTSYS;
56251 break;
56252 }
56253- pipe->waiting_writers++;
56254+ atomic_inc(&pipe->waiting_writers);
56255 pipe_wait(pipe);
56256- pipe->waiting_writers--;
56257+ atomic_dec(&pipe->waiting_writers);
56258 }
56259
56260 pipe_unlock(pipe);
56261@@ -1823,14 +1823,14 @@ retry:
56262 pipe_double_lock(ipipe, opipe);
56263
56264 do {
56265- if (!opipe->readers) {
56266+ if (!atomic_read(&opipe->readers)) {
56267 send_sig(SIGPIPE, current, 0);
56268 if (!ret)
56269 ret = -EPIPE;
56270 break;
56271 }
56272
56273- if (!ipipe->nrbufs && !ipipe->writers)
56274+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
56275 break;
56276
56277 /*
56278@@ -1927,7 +1927,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
56279 pipe_double_lock(ipipe, opipe);
56280
56281 do {
56282- if (!opipe->readers) {
56283+ if (!atomic_read(&opipe->readers)) {
56284 send_sig(SIGPIPE, current, 0);
56285 if (!ret)
56286 ret = -EPIPE;
56287@@ -1972,7 +1972,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
56288 * return EAGAIN if we have the potential of some data in the
56289 * future, otherwise just return 0
56290 */
56291- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
56292+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
56293 ret = -EAGAIN;
56294
56295 pipe_unlock(ipipe);
56296diff --git a/fs/stat.c b/fs/stat.c
56297index 14f4545..9b7f55b 100644
56298--- a/fs/stat.c
56299+++ b/fs/stat.c
56300@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
56301 stat->gid = inode->i_gid;
56302 stat->rdev = inode->i_rdev;
56303 stat->size = i_size_read(inode);
56304- stat->atime = inode->i_atime;
56305- stat->mtime = inode->i_mtime;
56306+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
56307+ stat->atime = inode->i_ctime;
56308+ stat->mtime = inode->i_ctime;
56309+ } else {
56310+ stat->atime = inode->i_atime;
56311+ stat->mtime = inode->i_mtime;
56312+ }
56313 stat->ctime = inode->i_ctime;
56314 stat->blksize = (1 << inode->i_blkbits);
56315 stat->blocks = inode->i_blocks;
56316@@ -46,8 +51,14 @@ int vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
56317 if (retval)
56318 return retval;
56319
56320- if (inode->i_op->getattr)
56321- return inode->i_op->getattr(mnt, dentry, stat);
56322+ if (inode->i_op->getattr) {
56323+ retval = inode->i_op->getattr(mnt, dentry, stat);
56324+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
56325+ stat->atime = stat->ctime;
56326+ stat->mtime = stat->ctime;
56327+ }
56328+ return retval;
56329+ }
56330
56331 generic_fillattr(inode, stat);
56332 return 0;
56333diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
56334index 614b2b5..4d321e6 100644
56335--- a/fs/sysfs/bin.c
56336+++ b/fs/sysfs/bin.c
56337@@ -235,13 +235,13 @@ static int bin_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
56338 return ret;
56339 }
56340
56341-static int bin_access(struct vm_area_struct *vma, unsigned long addr,
56342- void *buf, int len, int write)
56343+static ssize_t bin_access(struct vm_area_struct *vma, unsigned long addr,
56344+ void *buf, size_t len, int write)
56345 {
56346 struct file *file = vma->vm_file;
56347 struct bin_buffer *bb = file->private_data;
56348 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
56349- int ret;
56350+ ssize_t ret;
56351
56352 if (!bb->vm_ops)
56353 return -EINVAL;
56354diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
56355index 1f8c823..ed57cfe 100644
56356--- a/fs/sysfs/dir.c
56357+++ b/fs/sysfs/dir.c
56358@@ -40,7 +40,7 @@ static DEFINE_IDA(sysfs_ino_ida);
56359 *
56360 * Returns 31 bit hash of ns + name (so it fits in an off_t )
56361 */
56362-static unsigned int sysfs_name_hash(const void *ns, const char *name)
56363+static unsigned int sysfs_name_hash(const void *ns, const unsigned char *name)
56364 {
56365 unsigned long hash = init_name_hash();
56366 unsigned int len = strlen(name);
56367@@ -685,6 +685,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
56368 struct sysfs_dirent *sd;
56369 int rc;
56370
56371+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
56372+ const char *parent_name = parent_sd->s_name;
56373+
56374+ mode = S_IFDIR | S_IRWXU;
56375+
56376+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
56377+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
56378+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
56379+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
56380+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
56381+#endif
56382+
56383 /* allocate */
56384 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
56385 if (!sd)
56386diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
56387index 602f56d..6853db8 100644
56388--- a/fs/sysfs/file.c
56389+++ b/fs/sysfs/file.c
56390@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
56391
56392 struct sysfs_open_dirent {
56393 atomic_t refcnt;
56394- atomic_t event;
56395+ atomic_unchecked_t event;
56396 wait_queue_head_t poll;
56397 struct list_head buffers; /* goes through sysfs_buffer.list */
56398 };
56399@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
56400 if (!sysfs_get_active(attr_sd))
56401 return -ENODEV;
56402
56403- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
56404+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
56405 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
56406
56407 sysfs_put_active(attr_sd);
56408@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
56409 return -ENOMEM;
56410
56411 atomic_set(&new_od->refcnt, 0);
56412- atomic_set(&new_od->event, 1);
56413+ atomic_set_unchecked(&new_od->event, 1);
56414 init_waitqueue_head(&new_od->poll);
56415 INIT_LIST_HEAD(&new_od->buffers);
56416 goto retry;
56417@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
56418
56419 sysfs_put_active(attr_sd);
56420
56421- if (buffer->event != atomic_read(&od->event))
56422+ if (buffer->event != atomic_read_unchecked(&od->event))
56423 goto trigger;
56424
56425 return DEFAULT_POLLMASK;
56426@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
56427
56428 od = sd->s_attr.open;
56429 if (od) {
56430- atomic_inc(&od->event);
56431+ atomic_inc_unchecked(&od->event);
56432 wake_up_interruptible(&od->poll);
56433 }
56434
56435diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
56436index 3c9eb56..9dea5be 100644
56437--- a/fs/sysfs/symlink.c
56438+++ b/fs/sysfs/symlink.c
56439@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
56440
56441 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
56442 {
56443- char *page = nd_get_link(nd);
56444+ const char *page = nd_get_link(nd);
56445 if (!IS_ERR(page))
56446 free_page((unsigned long)page);
56447 }
56448diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
56449index 69d4889..a810bd4 100644
56450--- a/fs/sysv/sysv.h
56451+++ b/fs/sysv/sysv.h
56452@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
56453 #endif
56454 }
56455
56456-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
56457+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
56458 {
56459 if (sbi->s_bytesex == BYTESEX_PDP)
56460 return PDP_swab((__force __u32)n);
56461diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
56462index e18b988..f1d4ad0f 100644
56463--- a/fs/ubifs/io.c
56464+++ b/fs/ubifs/io.c
56465@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
56466 return err;
56467 }
56468
56469-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
56470+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
56471 {
56472 int err;
56473
56474diff --git a/fs/udf/misc.c b/fs/udf/misc.c
56475index c175b4d..8f36a16 100644
56476--- a/fs/udf/misc.c
56477+++ b/fs/udf/misc.c
56478@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
56479
56480 u8 udf_tag_checksum(const struct tag *t)
56481 {
56482- u8 *data = (u8 *)t;
56483+ const u8 *data = (const u8 *)t;
56484 u8 checksum = 0;
56485 int i;
56486 for (i = 0; i < sizeof(struct tag); ++i)
56487diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
56488index 8d974c4..b82f6ec 100644
56489--- a/fs/ufs/swab.h
56490+++ b/fs/ufs/swab.h
56491@@ -22,7 +22,7 @@ enum {
56492 BYTESEX_BE
56493 };
56494
56495-static inline u64
56496+static inline u64 __intentional_overflow(-1)
56497 fs64_to_cpu(struct super_block *sbp, __fs64 n)
56498 {
56499 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
56500@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
56501 return (__force __fs64)cpu_to_be64(n);
56502 }
56503
56504-static inline u32
56505+static inline u32 __intentional_overflow(-1)
56506 fs32_to_cpu(struct super_block *sbp, __fs32 n)
56507 {
56508 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
56509diff --git a/fs/utimes.c b/fs/utimes.c
56510index f4fb7ec..3fe03c0 100644
56511--- a/fs/utimes.c
56512+++ b/fs/utimes.c
56513@@ -1,6 +1,7 @@
56514 #include <linux/compiler.h>
56515 #include <linux/file.h>
56516 #include <linux/fs.h>
56517+#include <linux/security.h>
56518 #include <linux/linkage.h>
56519 #include <linux/mount.h>
56520 #include <linux/namei.h>
56521@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
56522 goto mnt_drop_write_and_out;
56523 }
56524 }
56525+
56526+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
56527+ error = -EACCES;
56528+ goto mnt_drop_write_and_out;
56529+ }
56530+
56531 mutex_lock(&inode->i_mutex);
56532 error = notify_change(path->dentry, &newattrs);
56533 mutex_unlock(&inode->i_mutex);
56534diff --git a/fs/xattr.c b/fs/xattr.c
56535index 3377dff..4feded6 100644
56536--- a/fs/xattr.c
56537+++ b/fs/xattr.c
56538@@ -319,7 +319,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
56539 * Extended attribute SET operations
56540 */
56541 static long
56542-setxattr(struct dentry *d, const char __user *name, const void __user *value,
56543+setxattr(struct path *path, const char __user *name, const void __user *value,
56544 size_t size, int flags)
56545 {
56546 int error;
56547@@ -355,7 +355,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
56548 posix_acl_fix_xattr_from_user(kvalue, size);
56549 }
56550
56551- error = vfs_setxattr(d, kname, kvalue, size, flags);
56552+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
56553+ error = -EACCES;
56554+ goto out;
56555+ }
56556+
56557+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
56558 out:
56559 if (vvalue)
56560 vfree(vvalue);
56561@@ -377,7 +382,7 @@ retry:
56562 return error;
56563 error = mnt_want_write(path.mnt);
56564 if (!error) {
56565- error = setxattr(path.dentry, name, value, size, flags);
56566+ error = setxattr(&path, name, value, size, flags);
56567 mnt_drop_write(path.mnt);
56568 }
56569 path_put(&path);
56570@@ -401,7 +406,7 @@ retry:
56571 return error;
56572 error = mnt_want_write(path.mnt);
56573 if (!error) {
56574- error = setxattr(path.dentry, name, value, size, flags);
56575+ error = setxattr(&path, name, value, size, flags);
56576 mnt_drop_write(path.mnt);
56577 }
56578 path_put(&path);
56579@@ -416,16 +421,14 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
56580 const void __user *,value, size_t, size, int, flags)
56581 {
56582 struct fd f = fdget(fd);
56583- struct dentry *dentry;
56584 int error = -EBADF;
56585
56586 if (!f.file)
56587 return error;
56588- dentry = f.file->f_path.dentry;
56589- audit_inode(NULL, dentry, 0);
56590+ audit_inode(NULL, f.file->f_path.dentry, 0);
56591 error = mnt_want_write_file(f.file);
56592 if (!error) {
56593- error = setxattr(dentry, name, value, size, flags);
56594+ error = setxattr(&f.file->f_path, name, value, size, flags);
56595 mnt_drop_write_file(f.file);
56596 }
56597 fdput(f);
56598diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
56599index 9fbea87..6b19972 100644
56600--- a/fs/xattr_acl.c
56601+++ b/fs/xattr_acl.c
56602@@ -76,8 +76,8 @@ struct posix_acl *
56603 posix_acl_from_xattr(struct user_namespace *user_ns,
56604 const void *value, size_t size)
56605 {
56606- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
56607- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
56608+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
56609+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
56610 int count;
56611 struct posix_acl *acl;
56612 struct posix_acl_entry *acl_e;
56613diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
56614index 572a858..12a9b0d 100644
56615--- a/fs/xfs/xfs_bmap.c
56616+++ b/fs/xfs/xfs_bmap.c
56617@@ -192,7 +192,7 @@ xfs_bmap_validate_ret(
56618 int nmap,
56619 int ret_nmap);
56620 #else
56621-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
56622+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
56623 #endif /* DEBUG */
56624
56625 STATIC int
56626diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
56627index 1b9fc3e..e1bdde0 100644
56628--- a/fs/xfs/xfs_dir2_sf.c
56629+++ b/fs/xfs/xfs_dir2_sf.c
56630@@ -851,7 +851,15 @@ xfs_dir2_sf_getdents(
56631 }
56632
56633 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
56634- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
56635+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
56636+ char name[sfep->namelen];
56637+ memcpy(name, sfep->name, sfep->namelen);
56638+ if (filldir(dirent, name, sfep->namelen,
56639+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
56640+ *offset = off & 0x7fffffff;
56641+ return 0;
56642+ }
56643+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
56644 off & 0x7fffffff, ino, DT_UNKNOWN)) {
56645 *offset = off & 0x7fffffff;
56646 return 0;
56647diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
56648index c1c3ef8..0952438 100644
56649--- a/fs/xfs/xfs_ioctl.c
56650+++ b/fs/xfs/xfs_ioctl.c
56651@@ -127,7 +127,7 @@ xfs_find_handle(
56652 }
56653
56654 error = -EFAULT;
56655- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
56656+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
56657 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
56658 goto out_put;
56659
56660diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
56661index d82efaa..0904a8e 100644
56662--- a/fs/xfs/xfs_iops.c
56663+++ b/fs/xfs/xfs_iops.c
56664@@ -395,7 +395,7 @@ xfs_vn_put_link(
56665 struct nameidata *nd,
56666 void *p)
56667 {
56668- char *s = nd_get_link(nd);
56669+ const char *s = nd_get_link(nd);
56670
56671 if (!IS_ERR(s))
56672 kfree(s);
56673diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
56674new file mode 100644
56675index 0000000..92247e4
56676--- /dev/null
56677+++ b/grsecurity/Kconfig
56678@@ -0,0 +1,1021 @@
56679+#
56680+# grecurity configuration
56681+#
56682+menu "Memory Protections"
56683+depends on GRKERNSEC
56684+
56685+config GRKERNSEC_KMEM
56686+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
56687+ default y if GRKERNSEC_CONFIG_AUTO
56688+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
56689+ help
56690+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
56691+ be written to or read from to modify or leak the contents of the running
56692+ kernel. /dev/port will also not be allowed to be opened and support
56693+ for /dev/cpu/*/msr will be removed. If you have module
56694+ support disabled, enabling this will close up five ways that are
56695+ currently used to insert malicious code into the running kernel.
56696+
56697+ Even with all these features enabled, we still highly recommend that
56698+ you use the RBAC system, as it is still possible for an attacker to
56699+ modify the running kernel through privileged I/O granted by ioperm/iopl.
56700+
56701+ If you are not using XFree86, you may be able to stop this additional
56702+ case by enabling the 'Disable privileged I/O' option. Though nothing
56703+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
56704+ but only to video memory, which is the only writing we allow in this
56705+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
56706+ not be allowed to mprotect it with PROT_WRITE later.
56707+ Enabling this feature will prevent the "cpupower" and "powertop" tools
56708+ from working.
56709+
56710+ It is highly recommended that you say Y here if you meet all the
56711+ conditions above.
56712+
56713+config GRKERNSEC_VM86
56714+ bool "Restrict VM86 mode"
56715+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
56716+ depends on X86_32
56717+
56718+ help
56719+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
56720+ make use of a special execution mode on 32bit x86 processors called
56721+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
56722+ video cards and will still work with this option enabled. The purpose
56723+ of the option is to prevent exploitation of emulation errors in
56724+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
56725+ Nearly all users should be able to enable this option.
56726+
56727+config GRKERNSEC_IO
56728+ bool "Disable privileged I/O"
56729+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
56730+ depends on X86
56731+ select RTC_CLASS
56732+ select RTC_INTF_DEV
56733+ select RTC_DRV_CMOS
56734+
56735+ help
56736+ If you say Y here, all ioperm and iopl calls will return an error.
56737+ Ioperm and iopl can be used to modify the running kernel.
56738+ Unfortunately, some programs need this access to operate properly,
56739+ the most notable of which are XFree86 and hwclock. hwclock can be
56740+ remedied by having RTC support in the kernel, so real-time
56741+ clock support is enabled if this option is enabled, to ensure
56742+ that hwclock operates correctly. XFree86 still will not
56743+ operate correctly with this option enabled, so DO NOT CHOOSE Y
56744+ IF YOU USE XFree86. If you use XFree86 and you still want to
56745+ protect your kernel against modification, use the RBAC system.
56746+
56747+config GRKERNSEC_JIT_HARDEN
56748+ bool "Harden BPF JIT against spray attacks"
56749+ default y if GRKERNSEC_CONFIG_AUTO
56750+ depends on BPF_JIT
56751+ help
56752+ If you say Y here, the native code generated by the kernel's Berkeley
56753+ Packet Filter (BPF) JIT engine will be hardened against JIT-spraying
56754+ attacks that attempt to fit attacker-beneficial instructions in
56755+ 32bit immediate fields of JIT-generated native instructions. The
56756+ attacker will generally aim to cause an unintended instruction sequence
56757+ of JIT-generated native code to execute by jumping into the middle of
56758+ a generated instruction. This feature effectively randomizes the 32bit
56759+ immediate constants present in the generated code to thwart such attacks.
56760+
56761+ If you're using KERNEXEC, it's recommended that you enable this option
56762+ to supplement the hardening of the kernel.
56763+
56764+config GRKERNSEC_RAND_THREADSTACK
56765+ bool "Insert random gaps between thread stacks"
56766+ default y if GRKERNSEC_CONFIG_AUTO
56767+ depends on PAX_RANDMMAP && !PPC
56768+ help
56769+ If you say Y here, a random-sized gap will be enforced between allocated
56770+ thread stacks. Glibc's NPTL and other threading libraries that
56771+ pass MAP_STACK to the kernel for thread stack allocation are supported.
56772+ The implementation currently provides 8 bits of entropy for the gap.
56773+
56774+ Many distributions do not compile threaded remote services with the
56775+ -fstack-check argument to GCC, causing the variable-sized stack-based
56776+ allocator, alloca(), to not probe the stack on allocation. This
56777+ permits an unbounded alloca() to skip over any guard page and potentially
56778+ modify another thread's stack reliably. An enforced random gap
56779+ reduces the reliability of such an attack and increases the chance
56780+ that such a read/write to another thread's stack instead lands in
56781+ an unmapped area, causing a crash and triggering grsecurity's
56782+ anti-bruteforcing logic.
56783+
56784+config GRKERNSEC_PROC_MEMMAP
56785+ bool "Harden ASLR against information leaks and entropy reduction"
56786+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
56787+ depends on PAX_NOEXEC || PAX_ASLR
56788+ help
56789+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
56790+ give no information about the addresses of its mappings if
56791+ PaX features that rely on random addresses are enabled on the task.
56792+ In addition to sanitizing this information and disabling other
56793+ dangerous sources of information, this option causes reads of sensitive
56794+ /proc/<pid> entries where the file descriptor was opened in a different
56795+ task than the one performing the read. Such attempts are logged.
56796+ This option also limits argv/env strings for suid/sgid binaries
56797+ to 512KB to prevent a complete exhaustion of the stack entropy provided
56798+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
56799+ binaries to prevent alternative mmap layouts from being abused.
56800+
56801+ If you use PaX it is essential that you say Y here as it closes up
56802+ several holes that make full ASLR useless locally.
56803+
56804+config GRKERNSEC_BRUTE
56805+ bool "Deter exploit bruteforcing"
56806+ default y if GRKERNSEC_CONFIG_AUTO
56807+ help
56808+ If you say Y here, attempts to bruteforce exploits against forking
56809+ daemons such as apache or sshd, as well as against suid/sgid binaries
56810+ will be deterred. When a child of a forking daemon is killed by PaX
56811+ or crashes due to an illegal instruction or other suspicious signal,
56812+ the parent process will be delayed 30 seconds upon every subsequent
56813+ fork until the administrator is able to assess the situation and
56814+ restart the daemon.
56815+ In the suid/sgid case, the attempt is logged, the user has all their
56816+ processes terminated, and they are prevented from executing any further
56817+ processes for 15 minutes.
56818+ It is recommended that you also enable signal logging in the auditing
56819+ section so that logs are generated when a process triggers a suspicious
56820+ signal.
56821+ If the sysctl option is enabled, a sysctl option with name
56822+ "deter_bruteforce" is created.
56823+
56824+
56825+config GRKERNSEC_MODHARDEN
56826+ bool "Harden module auto-loading"
56827+ default y if GRKERNSEC_CONFIG_AUTO
56828+ depends on MODULES
56829+ help
56830+ If you say Y here, module auto-loading in response to use of some
56831+ feature implemented by an unloaded module will be restricted to
56832+ root users. Enabling this option helps defend against attacks
56833+ by unprivileged users who abuse the auto-loading behavior to
56834+ cause a vulnerable module to load that is then exploited.
56835+
56836+ If this option prevents a legitimate use of auto-loading for a
56837+ non-root user, the administrator can execute modprobe manually
56838+ with the exact name of the module mentioned in the alert log.
56839+ Alternatively, the administrator can add the module to the list
56840+ of modules loaded at boot by modifying init scripts.
56841+
56842+ Modification of init scripts will most likely be needed on
56843+ Ubuntu servers with encrypted home directory support enabled,
56844+ as the first non-root user logging in will cause the ecb(aes),
56845+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
56846+
56847+config GRKERNSEC_HIDESYM
56848+ bool "Hide kernel symbols"
56849+ default y if GRKERNSEC_CONFIG_AUTO
56850+ select PAX_USERCOPY_SLABS
56851+ help
56852+ If you say Y here, getting information on loaded modules, and
56853+ displaying all kernel symbols through a syscall will be restricted
56854+ to users with CAP_SYS_MODULE. For software compatibility reasons,
56855+ /proc/kallsyms will be restricted to the root user. The RBAC
56856+ system can hide that entry even from root.
56857+
56858+ This option also prevents leaking of kernel addresses through
56859+ several /proc entries.
56860+
56861+ Note that this option is only effective provided the following
56862+ conditions are met:
56863+ 1) The kernel using grsecurity is not precompiled by some distribution
56864+ 2) You have also enabled GRKERNSEC_DMESG
56865+ 3) You are using the RBAC system and hiding other files such as your
56866+ kernel image and System.map. Alternatively, enabling this option
56867+ causes the permissions on /boot, /lib/modules, and the kernel
56868+ source directory to change at compile time to prevent
56869+ reading by non-root users.
56870+ If the above conditions are met, this option will aid in providing a
56871+ useful protection against local kernel exploitation of overflows
56872+ and arbitrary read/write vulnerabilities.
56873+
56874+config GRKERNSEC_KERN_LOCKOUT
56875+ bool "Active kernel exploit response"
56876+ default y if GRKERNSEC_CONFIG_AUTO
56877+ depends on X86 || ARM || PPC || SPARC
56878+ help
56879+ If you say Y here, when a PaX alert is triggered due to suspicious
56880+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
56881+ or an OOPS occurs due to bad memory accesses, instead of just
56882+ terminating the offending process (and potentially allowing
56883+ a subsequent exploit from the same user), we will take one of two
56884+ actions:
56885+ If the user was root, we will panic the system
56886+ If the user was non-root, we will log the attempt, terminate
56887+ all processes owned by the user, then prevent them from creating
56888+ any new processes until the system is restarted
56889+ This deters repeated kernel exploitation/bruteforcing attempts
56890+ and is useful for later forensics.
56891+
56892+endmenu
56893+menu "Role Based Access Control Options"
56894+depends on GRKERNSEC
56895+
56896+config GRKERNSEC_RBAC_DEBUG
56897+ bool
56898+
56899+config GRKERNSEC_NO_RBAC
56900+ bool "Disable RBAC system"
56901+ help
56902+ If you say Y here, the /dev/grsec device will be removed from the kernel,
56903+ preventing the RBAC system from being enabled. You should only say Y
56904+ here if you have no intention of using the RBAC system, so as to prevent
56905+ an attacker with root access from misusing the RBAC system to hide files
56906+ and processes when loadable module support and /dev/[k]mem have been
56907+ locked down.
56908+
56909+config GRKERNSEC_ACL_HIDEKERN
56910+ bool "Hide kernel processes"
56911+ help
56912+ If you say Y here, all kernel threads will be hidden to all
56913+ processes but those whose subject has the "view hidden processes"
56914+ flag.
56915+
56916+config GRKERNSEC_ACL_MAXTRIES
56917+ int "Maximum tries before password lockout"
56918+ default 3
56919+ help
56920+ This option enforces the maximum number of times a user can attempt
56921+ to authorize themselves with the grsecurity RBAC system before being
56922+ denied the ability to attempt authorization again for a specified time.
56923+ The lower the number, the harder it will be to brute-force a password.
56924+
56925+config GRKERNSEC_ACL_TIMEOUT
56926+ int "Time to wait after max password tries, in seconds"
56927+ default 30
56928+ help
56929+ This option specifies the time the user must wait after attempting to
56930+ authorize to the RBAC system with the maximum number of invalid
56931+ passwords. The higher the number, the harder it will be to brute-force
56932+ a password.
56933+
56934+endmenu
56935+menu "Filesystem Protections"
56936+depends on GRKERNSEC
56937+
56938+config GRKERNSEC_PROC
56939+ bool "Proc restrictions"
56940+ default y if GRKERNSEC_CONFIG_AUTO
56941+ help
56942+ If you say Y here, the permissions of the /proc filesystem
56943+ will be altered to enhance system security and privacy. You MUST
56944+ choose either a user only restriction or a user and group restriction.
56945+ Depending upon the option you choose, you can either restrict users to
56946+ see only the processes they themselves run, or choose a group that can
56947+ view all processes and files normally restricted to root if you choose
56948+ the "restrict to user only" option. NOTE: If you're running identd or
56949+ ntpd as a non-root user, you will have to run it as the group you
56950+ specify here.
56951+
56952+config GRKERNSEC_PROC_USER
56953+ bool "Restrict /proc to user only"
56954+ depends on GRKERNSEC_PROC
56955+ help
56956+ If you say Y here, non-root users will only be able to view their own
56957+ processes, and restricts them from viewing network-related information,
56958+ and viewing kernel symbol and module information.
56959+
56960+config GRKERNSEC_PROC_USERGROUP
56961+ bool "Allow special group"
56962+ default y if GRKERNSEC_CONFIG_AUTO
56963+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
56964+ help
56965+ If you say Y here, you will be able to select a group that will be
56966+ able to view all processes and network-related information. If you've
56967+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
56968+ remain hidden. This option is useful if you want to run identd as
56969+ a non-root user. The group you select may also be chosen at boot time
56970+ via "grsec_proc_gid=" on the kernel commandline.
56971+
56972+config GRKERNSEC_PROC_GID
56973+ int "GID for special group"
56974+ depends on GRKERNSEC_PROC_USERGROUP
56975+ default 1001
56976+
56977+config GRKERNSEC_PROC_ADD
56978+ bool "Additional restrictions"
56979+ default y if GRKERNSEC_CONFIG_AUTO
56980+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
56981+ help
56982+ If you say Y here, additional restrictions will be placed on
56983+ /proc that keep normal users from viewing device information and
56984+ slabinfo information that could be useful for exploits.
56985+
56986+config GRKERNSEC_LINK
56987+ bool "Linking restrictions"
56988+ default y if GRKERNSEC_CONFIG_AUTO
56989+ help
56990+ If you say Y here, /tmp race exploits will be prevented, since users
56991+ will no longer be able to follow symlinks owned by other users in
56992+ world-writable +t directories (e.g. /tmp), unless the owner of the
56993+ symlink is the owner of the directory. users will also not be
56994+ able to hardlink to files they do not own. If the sysctl option is
56995+ enabled, a sysctl option with name "linking_restrictions" is created.
56996+
56997+config GRKERNSEC_SYMLINKOWN
56998+ bool "Kernel-enforced SymlinksIfOwnerMatch"
56999+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
57000+ help
57001+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
57002+ that prevents it from being used as a security feature. As Apache
57003+ verifies the symlink by performing a stat() against the target of
57004+ the symlink before it is followed, an attacker can setup a symlink
57005+ to point to a same-owned file, then replace the symlink with one
57006+ that targets another user's file just after Apache "validates" the
57007+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
57008+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
57009+ will be in place for the group you specify. If the sysctl option
57010+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
57011+ created.
57012+
57013+config GRKERNSEC_SYMLINKOWN_GID
57014+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
57015+ depends on GRKERNSEC_SYMLINKOWN
57016+ default 1006
57017+ help
57018+ Setting this GID determines what group kernel-enforced
57019+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
57020+ is enabled, a sysctl option with name "symlinkown_gid" is created.
57021+
57022+config GRKERNSEC_FIFO
57023+ bool "FIFO restrictions"
57024+ default y if GRKERNSEC_CONFIG_AUTO
57025+ help
57026+ If you say Y here, users will not be able to write to FIFOs they don't
57027+ own in world-writable +t directories (e.g. /tmp), unless the owner of
57028+ the FIFO is the same owner of the directory it's held in. If the sysctl
57029+ option is enabled, a sysctl option with name "fifo_restrictions" is
57030+ created.
57031+
57032+config GRKERNSEC_SYSFS_RESTRICT
57033+ bool "Sysfs/debugfs restriction"
57034+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
57035+ depends on SYSFS
57036+ help
57037+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
57038+ any filesystem normally mounted under it (e.g. debugfs) will be
57039+ mostly accessible only by root. These filesystems generally provide access
57040+ to hardware and debug information that isn't appropriate for unprivileged
57041+ users of the system. Sysfs and debugfs have also become a large source
57042+ of new vulnerabilities, ranging from infoleaks to local compromise.
57043+ There has been very little oversight with an eye toward security involved
57044+ in adding new exporters of information to these filesystems, so their
57045+ use is discouraged.
57046+ For reasons of compatibility, a few directories have been whitelisted
57047+ for access by non-root users:
57048+ /sys/fs/selinux
57049+ /sys/fs/fuse
57050+ /sys/devices/system/cpu
57051+
57052+config GRKERNSEC_ROFS
57053+ bool "Runtime read-only mount protection"
57054+ help
57055+ If you say Y here, a sysctl option with name "romount_protect" will
57056+ be created. By setting this option to 1 at runtime, filesystems
57057+ will be protected in the following ways:
57058+ * No new writable mounts will be allowed
57059+ * Existing read-only mounts won't be able to be remounted read/write
57060+ * Write operations will be denied on all block devices
57061+ This option acts independently of grsec_lock: once it is set to 1,
57062+ it cannot be turned off. Therefore, please be mindful of the resulting
57063+ behavior if this option is enabled in an init script on a read-only
57064+ filesystem. This feature is mainly intended for secure embedded systems.
57065+
57066+config GRKERNSEC_DEVICE_SIDECHANNEL
57067+ bool "Eliminate stat/notify-based device sidechannels"
57068+ default y if GRKERNSEC_CONFIG_AUTO
57069+ help
57070+ If you say Y here, timing analyses on block or character
57071+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
57072+ will be thwarted for unprivileged users. If a process without
57073+ CAP_MKNOD stats such a device, the last access and last modify times
57074+ will match the device's create time. No access or modify events
57075+ will be triggered through inotify/dnotify/fanotify for such devices.
57076+ This feature will prevent attacks that may at a minimum
57077+ allow an attacker to determine the administrator's password length.
57078+
57079+config GRKERNSEC_CHROOT
57080+ bool "Chroot jail restrictions"
57081+ default y if GRKERNSEC_CONFIG_AUTO
57082+ help
57083+ If you say Y here, you will be able to choose several options that will
57084+ make breaking out of a chrooted jail much more difficult. If you
57085+ encounter no software incompatibilities with the following options, it
57086+ is recommended that you enable each one.
57087+
57088+config GRKERNSEC_CHROOT_MOUNT
57089+ bool "Deny mounts"
57090+ default y if GRKERNSEC_CONFIG_AUTO
57091+ depends on GRKERNSEC_CHROOT
57092+ help
57093+ If you say Y here, processes inside a chroot will not be able to
57094+ mount or remount filesystems. If the sysctl option is enabled, a
57095+ sysctl option with name "chroot_deny_mount" is created.
57096+
57097+config GRKERNSEC_CHROOT_DOUBLE
57098+ bool "Deny double-chroots"
57099+ default y if GRKERNSEC_CONFIG_AUTO
57100+ depends on GRKERNSEC_CHROOT
57101+ help
57102+ If you say Y here, processes inside a chroot will not be able to chroot
57103+ again outside the chroot. This is a widely used method of breaking
57104+ out of a chroot jail and should not be allowed. If the sysctl
57105+ option is enabled, a sysctl option with name
57106+ "chroot_deny_chroot" is created.
57107+
57108+config GRKERNSEC_CHROOT_PIVOT
57109+ bool "Deny pivot_root in chroot"
57110+ default y if GRKERNSEC_CONFIG_AUTO
57111+ depends on GRKERNSEC_CHROOT
57112+ help
57113+ If you say Y here, processes inside a chroot will not be able to use
57114+ a function called pivot_root() that was introduced in Linux 2.3.41. It
57115+ works similar to chroot in that it changes the root filesystem. This
57116+ function could be misused in a chrooted process to attempt to break out
57117+ of the chroot, and therefore should not be allowed. If the sysctl
57118+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
57119+ created.
57120+
57121+config GRKERNSEC_CHROOT_CHDIR
57122+ bool "Enforce chdir(\"/\") on all chroots"
57123+ default y if GRKERNSEC_CONFIG_AUTO
57124+ depends on GRKERNSEC_CHROOT
57125+ help
57126+ If you say Y here, the current working directory of all newly-chrooted
57127+ applications will be set to the the root directory of the chroot.
57128+ The man page on chroot(2) states:
57129+ Note that this call does not change the current working
57130+ directory, so that `.' can be outside the tree rooted at
57131+ `/'. In particular, the super-user can escape from a
57132+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
57133+
57134+ It is recommended that you say Y here, since it's not known to break
57135+ any software. If the sysctl option is enabled, a sysctl option with
57136+ name "chroot_enforce_chdir" is created.
57137+
57138+config GRKERNSEC_CHROOT_CHMOD
57139+ bool "Deny (f)chmod +s"
57140+ default y if GRKERNSEC_CONFIG_AUTO
57141+ depends on GRKERNSEC_CHROOT
57142+ help
57143+ If you say Y here, processes inside a chroot will not be able to chmod
57144+ or fchmod files to make them have suid or sgid bits. This protects
57145+ against another published method of breaking a chroot. If the sysctl
57146+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
57147+ created.
57148+
57149+config GRKERNSEC_CHROOT_FCHDIR
57150+ bool "Deny fchdir out of chroot"
57151+ default y if GRKERNSEC_CONFIG_AUTO
57152+ depends on GRKERNSEC_CHROOT
57153+ help
57154+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
57155+ to a file descriptor of the chrooting process that points to a directory
57156+ outside the filesystem will be stopped. If the sysctl option
57157+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
57158+
57159+config GRKERNSEC_CHROOT_MKNOD
57160+ bool "Deny mknod"
57161+ default y if GRKERNSEC_CONFIG_AUTO
57162+ depends on GRKERNSEC_CHROOT
57163+ help
57164+ If you say Y here, processes inside a chroot will not be allowed to
57165+ mknod. The problem with using mknod inside a chroot is that it
57166+ would allow an attacker to create a device entry that is the same
57167+ as one on the physical root of your system, which could range from
57168+ anything from the console device to a device for your harddrive (which
57169+ they could then use to wipe the drive or steal data). It is recommended
57170+ that you say Y here, unless you run into software incompatibilities.
57171+ If the sysctl option is enabled, a sysctl option with name
57172+ "chroot_deny_mknod" is created.
57173+
57174+config GRKERNSEC_CHROOT_SHMAT
57175+ bool "Deny shmat() out of chroot"
57176+ default y if GRKERNSEC_CONFIG_AUTO
57177+ depends on GRKERNSEC_CHROOT
57178+ help
57179+ If you say Y here, processes inside a chroot will not be able to attach
57180+ to shared memory segments that were created outside of the chroot jail.
57181+ It is recommended that you say Y here. If the sysctl option is enabled,
57182+ a sysctl option with name "chroot_deny_shmat" is created.
57183+
57184+config GRKERNSEC_CHROOT_UNIX
57185+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
57186+ default y if GRKERNSEC_CONFIG_AUTO
57187+ depends on GRKERNSEC_CHROOT
57188+ help
57189+ If you say Y here, processes inside a chroot will not be able to
57190+ connect to abstract (meaning not belonging to a filesystem) Unix
57191+ domain sockets that were bound outside of a chroot. It is recommended
57192+ that you say Y here. If the sysctl option is enabled, a sysctl option
57193+ with name "chroot_deny_unix" is created.
57194+
57195+config GRKERNSEC_CHROOT_FINDTASK
57196+ bool "Protect outside processes"
57197+ default y if GRKERNSEC_CONFIG_AUTO
57198+ depends on GRKERNSEC_CHROOT
57199+ help
57200+ If you say Y here, processes inside a chroot will not be able to
57201+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
57202+ getsid, or view any process outside of the chroot. If the sysctl
57203+ option is enabled, a sysctl option with name "chroot_findtask" is
57204+ created.
57205+
57206+config GRKERNSEC_CHROOT_NICE
57207+ bool "Restrict priority changes"
57208+ default y if GRKERNSEC_CONFIG_AUTO
57209+ depends on GRKERNSEC_CHROOT
57210+ help
57211+ If you say Y here, processes inside a chroot will not be able to raise
57212+ the priority of processes in the chroot, or alter the priority of
57213+ processes outside the chroot. This provides more security than simply
57214+ removing CAP_SYS_NICE from the process' capability set. If the
57215+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
57216+ is created.
57217+
57218+config GRKERNSEC_CHROOT_SYSCTL
57219+ bool "Deny sysctl writes"
57220+ default y if GRKERNSEC_CONFIG_AUTO
57221+ depends on GRKERNSEC_CHROOT
57222+ help
57223+ If you say Y here, an attacker in a chroot will not be able to
57224+ write to sysctl entries, either by sysctl(2) or through a /proc
57225+ interface. It is strongly recommended that you say Y here. If the
57226+ sysctl option is enabled, a sysctl option with name
57227+ "chroot_deny_sysctl" is created.
57228+
57229+config GRKERNSEC_CHROOT_CAPS
57230+ bool "Capability restrictions"
57231+ default y if GRKERNSEC_CONFIG_AUTO
57232+ depends on GRKERNSEC_CHROOT
57233+ help
57234+ If you say Y here, the capabilities on all processes within a
57235+ chroot jail will be lowered to stop module insertion, raw i/o,
57236+ system and net admin tasks, rebooting the system, modifying immutable
57237+ files, modifying IPC owned by another, and changing the system time.
57238+ This is left an option because it can break some apps. Disable this
57239+ if your chrooted apps are having problems performing those kinds of
57240+ tasks. If the sysctl option is enabled, a sysctl option with
57241+ name "chroot_caps" is created.
57242+
57243+endmenu
57244+menu "Kernel Auditing"
57245+depends on GRKERNSEC
57246+
57247+config GRKERNSEC_AUDIT_GROUP
57248+ bool "Single group for auditing"
57249+ help
57250+ If you say Y here, the exec and chdir logging features will only operate
57251+ on a group you specify. This option is recommended if you only want to
57252+ watch certain users instead of having a large amount of logs from the
57253+ entire system. If the sysctl option is enabled, a sysctl option with
57254+ name "audit_group" is created.
57255+
57256+config GRKERNSEC_AUDIT_GID
57257+ int "GID for auditing"
57258+ depends on GRKERNSEC_AUDIT_GROUP
57259+ default 1007
57260+
57261+config GRKERNSEC_EXECLOG
57262+ bool "Exec logging"
57263+ help
57264+ If you say Y here, all execve() calls will be logged (since the
57265+ other exec*() calls are frontends to execve(), all execution
57266+ will be logged). Useful for shell-servers that like to keep track
57267+ of their users. If the sysctl option is enabled, a sysctl option with
57268+ name "exec_logging" is created.
57269+ WARNING: This option when enabled will produce a LOT of logs, especially
57270+ on an active system.
57271+
57272+config GRKERNSEC_RESLOG
57273+ bool "Resource logging"
57274+ default y if GRKERNSEC_CONFIG_AUTO
57275+ help
57276+ If you say Y here, all attempts to overstep resource limits will
57277+ be logged with the resource name, the requested size, and the current
57278+ limit. It is highly recommended that you say Y here. If the sysctl
57279+ option is enabled, a sysctl option with name "resource_logging" is
57280+ created. If the RBAC system is enabled, the sysctl value is ignored.
57281+
57282+config GRKERNSEC_CHROOT_EXECLOG
57283+ bool "Log execs within chroot"
57284+ help
57285+ If you say Y here, all executions inside a chroot jail will be logged
57286+ to syslog. This can cause a large amount of logs if certain
57287+ applications (eg. djb's daemontools) are installed on the system, and
57288+ is therefore left as an option. If the sysctl option is enabled, a
57289+ sysctl option with name "chroot_execlog" is created.
57290+
57291+config GRKERNSEC_AUDIT_PTRACE
57292+ bool "Ptrace logging"
57293+ help
57294+ If you say Y here, all attempts to attach to a process via ptrace
57295+ will be logged. If the sysctl option is enabled, a sysctl option
57296+ with name "audit_ptrace" is created.
57297+
57298+config GRKERNSEC_AUDIT_CHDIR
57299+ bool "Chdir logging"
57300+ help
57301+ If you say Y here, all chdir() calls will be logged. If the sysctl
57302+ option is enabled, a sysctl option with name "audit_chdir" is created.
57303+
57304+config GRKERNSEC_AUDIT_MOUNT
57305+ bool "(Un)Mount logging"
57306+ help
57307+ If you say Y here, all mounts and unmounts will be logged. If the
57308+ sysctl option is enabled, a sysctl option with name "audit_mount" is
57309+ created.
57310+
57311+config GRKERNSEC_SIGNAL
57312+ bool "Signal logging"
57313+ default y if GRKERNSEC_CONFIG_AUTO
57314+ help
57315+ If you say Y here, certain important signals will be logged, such as
57316+ SIGSEGV, which will as a result inform you of when a error in a program
57317+ occurred, which in some cases could mean a possible exploit attempt.
57318+ If the sysctl option is enabled, a sysctl option with name
57319+ "signal_logging" is created.
57320+
57321+config GRKERNSEC_FORKFAIL
57322+ bool "Fork failure logging"
57323+ help
57324+ If you say Y here, all failed fork() attempts will be logged.
57325+ This could suggest a fork bomb, or someone attempting to overstep
57326+ their process limit. If the sysctl option is enabled, a sysctl option
57327+ with name "forkfail_logging" is created.
57328+
57329+config GRKERNSEC_TIME
57330+ bool "Time change logging"
57331+ default y if GRKERNSEC_CONFIG_AUTO
57332+ help
57333+ If you say Y here, any changes of the system clock will be logged.
57334+ If the sysctl option is enabled, a sysctl option with name
57335+ "timechange_logging" is created.
57336+
57337+config GRKERNSEC_PROC_IPADDR
57338+ bool "/proc/<pid>/ipaddr support"
57339+ default y if GRKERNSEC_CONFIG_AUTO
57340+ help
57341+ If you say Y here, a new entry will be added to each /proc/<pid>
57342+ directory that contains the IP address of the person using the task.
57343+ The IP is carried across local TCP and AF_UNIX stream sockets.
57344+ This information can be useful for IDS/IPSes to perform remote response
57345+ to a local attack. The entry is readable by only the owner of the
57346+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
57347+ the RBAC system), and thus does not create privacy concerns.
57348+
57349+config GRKERNSEC_RWXMAP_LOG
57350+ bool 'Denied RWX mmap/mprotect logging'
57351+ default y if GRKERNSEC_CONFIG_AUTO
57352+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
57353+ help
57354+ If you say Y here, calls to mmap() and mprotect() with explicit
57355+ usage of PROT_WRITE and PROT_EXEC together will be logged when
57356+ denied by the PAX_MPROTECT feature. If the sysctl option is
57357+ enabled, a sysctl option with name "rwxmap_logging" is created.
57358+
57359+config GRKERNSEC_AUDIT_TEXTREL
57360+ bool 'ELF text relocations logging (READ HELP)'
57361+ depends on PAX_MPROTECT
57362+ help
57363+ If you say Y here, text relocations will be logged with the filename
57364+ of the offending library or binary. The purpose of the feature is
57365+ to help Linux distribution developers get rid of libraries and
57366+ binaries that need text relocations which hinder the future progress
57367+ of PaX. Only Linux distribution developers should say Y here, and
57368+ never on a production machine, as this option creates an information
57369+ leak that could aid an attacker in defeating the randomization of
57370+ a single memory region. If the sysctl option is enabled, a sysctl
57371+ option with name "audit_textrel" is created.
57372+
57373+endmenu
57374+
57375+menu "Executable Protections"
57376+depends on GRKERNSEC
57377+
57378+config GRKERNSEC_DMESG
57379+ bool "Dmesg(8) restriction"
57380+ default y if GRKERNSEC_CONFIG_AUTO
57381+ help
57382+ If you say Y here, non-root users will not be able to use dmesg(8)
57383+ to view the contents of the kernel's circular log buffer.
57384+ The kernel's log buffer often contains kernel addresses and other
57385+ identifying information useful to an attacker in fingerprinting a
57386+ system for a targeted exploit.
57387+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
57388+ created.
57389+
57390+config GRKERNSEC_HARDEN_PTRACE
57391+ bool "Deter ptrace-based process snooping"
57392+ default y if GRKERNSEC_CONFIG_AUTO
57393+ help
57394+ If you say Y here, TTY sniffers and other malicious monitoring
57395+ programs implemented through ptrace will be defeated. If you
57396+ have been using the RBAC system, this option has already been
57397+ enabled for several years for all users, with the ability to make
57398+ fine-grained exceptions.
57399+
57400+ This option only affects the ability of non-root users to ptrace
57401+ processes that are not a descendent of the ptracing process.
57402+ This means that strace ./binary and gdb ./binary will still work,
57403+ but attaching to arbitrary processes will not. If the sysctl
57404+ option is enabled, a sysctl option with name "harden_ptrace" is
57405+ created.
57406+
57407+config GRKERNSEC_PTRACE_READEXEC
57408+ bool "Require read access to ptrace sensitive binaries"
57409+ default y if GRKERNSEC_CONFIG_AUTO
57410+ help
57411+ If you say Y here, unprivileged users will not be able to ptrace unreadable
57412+ binaries. This option is useful in environments that
57413+ remove the read bits (e.g. file mode 4711) from suid binaries to
57414+ prevent infoleaking of their contents. This option adds
57415+ consistency to the use of that file mode, as the binary could normally
57416+ be read out when run without privileges while ptracing.
57417+
57418+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
57419+ is created.
57420+
57421+config GRKERNSEC_SETXID
57422+ bool "Enforce consistent multithreaded privileges"
57423+ default y if GRKERNSEC_CONFIG_AUTO
57424+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
57425+ help
57426+ If you say Y here, a change from a root uid to a non-root uid
57427+ in a multithreaded application will cause the resulting uids,
57428+ gids, supplementary groups, and capabilities in that thread
57429+ to be propagated to the other threads of the process. In most
57430+ cases this is unnecessary, as glibc will emulate this behavior
57431+ on behalf of the application. Other libcs do not act in the
57432+ same way, allowing the other threads of the process to continue
57433+ running with root privileges. If the sysctl option is enabled,
57434+ a sysctl option with name "consistent_setxid" is created.
57435+
57436+config GRKERNSEC_TPE
57437+ bool "Trusted Path Execution (TPE)"
57438+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
57439+ help
57440+ If you say Y here, you will be able to choose a gid to add to the
57441+ supplementary groups of users you want to mark as "untrusted."
57442+ These users will not be able to execute any files that are not in
57443+ root-owned directories writable only by root. If the sysctl option
57444+ is enabled, a sysctl option with name "tpe" is created.
57445+
57446+config GRKERNSEC_TPE_ALL
57447+ bool "Partially restrict all non-root users"
57448+ depends on GRKERNSEC_TPE
57449+ help
57450+ If you say Y here, all non-root users will be covered under
57451+ a weaker TPE restriction. This is separate from, and in addition to,
57452+ the main TPE options that you have selected elsewhere. Thus, if a
57453+ "trusted" GID is chosen, this restriction applies to even that GID.
57454+ Under this restriction, all non-root users will only be allowed to
57455+ execute files in directories they own that are not group or
57456+ world-writable, or in directories owned by root and writable only by
57457+ root. If the sysctl option is enabled, a sysctl option with name
57458+ "tpe_restrict_all" is created.
57459+
57460+config GRKERNSEC_TPE_INVERT
57461+ bool "Invert GID option"
57462+ depends on GRKERNSEC_TPE
57463+ help
57464+ If you say Y here, the group you specify in the TPE configuration will
57465+ decide what group TPE restrictions will be *disabled* for. This
57466+ option is useful if you want TPE restrictions to be applied to most
57467+ users on the system. If the sysctl option is enabled, a sysctl option
57468+ with name "tpe_invert" is created. Unlike other sysctl options, this
57469+ entry will default to on for backward-compatibility.
57470+
57471+config GRKERNSEC_TPE_GID
57472+ int
57473+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
57474+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
57475+
57476+config GRKERNSEC_TPE_UNTRUSTED_GID
57477+ int "GID for TPE-untrusted users"
57478+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
57479+ default 1005
57480+ help
57481+ Setting this GID determines what group TPE restrictions will be
57482+ *enabled* for. If the sysctl option is enabled, a sysctl option
57483+ with name "tpe_gid" is created.
57484+
57485+config GRKERNSEC_TPE_TRUSTED_GID
57486+ int "GID for TPE-trusted users"
57487+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
57488+ default 1005
57489+ help
57490+ Setting this GID determines what group TPE restrictions will be
57491+ *disabled* for. If the sysctl option is enabled, a sysctl option
57492+ with name "tpe_gid" is created.
57493+
57494+endmenu
57495+menu "Network Protections"
57496+depends on GRKERNSEC
57497+
57498+config GRKERNSEC_RANDNET
57499+ bool "Larger entropy pools"
57500+ default y if GRKERNSEC_CONFIG_AUTO
57501+ help
57502+ If you say Y here, the entropy pools used for many features of Linux
57503+ and grsecurity will be doubled in size. Since several grsecurity
57504+ features use additional randomness, it is recommended that you say Y
57505+ here. Saying Y here has a similar effect as modifying
57506+ /proc/sys/kernel/random/poolsize.
57507+
57508+config GRKERNSEC_BLACKHOLE
57509+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
57510+ default y if GRKERNSEC_CONFIG_AUTO
57511+ depends on NET
57512+ help
57513+ If you say Y here, neither TCP resets nor ICMP
57514+ destination-unreachable packets will be sent in response to packets
57515+ sent to ports for which no associated listening process exists.
57516+ This feature supports both IPV4 and IPV6 and exempts the
57517+ loopback interface from blackholing. Enabling this feature
57518+ makes a host more resilient to DoS attacks and reduces network
57519+ visibility against scanners.
57520+
57521+ The blackhole feature as-implemented is equivalent to the FreeBSD
57522+ blackhole feature, as it prevents RST responses to all packets, not
57523+ just SYNs. Under most application behavior this causes no
57524+ problems, but applications (like haproxy) may not close certain
57525+ connections in a way that cleanly terminates them on the remote
57526+ end, leaving the remote host in LAST_ACK state. Because of this
57527+ side-effect and to prevent intentional LAST_ACK DoSes, this
57528+ feature also adds automatic mitigation against such attacks.
57529+ The mitigation drastically reduces the amount of time a socket
57530+ can spend in LAST_ACK state. If you're using haproxy and not
57531+ all servers it connects to have this option enabled, consider
57532+ disabling this feature on the haproxy host.
57533+
57534+ If the sysctl option is enabled, two sysctl options with names
57535+ "ip_blackhole" and "lastack_retries" will be created.
57536+ While "ip_blackhole" takes the standard zero/non-zero on/off
57537+ toggle, "lastack_retries" uses the same kinds of values as
57538+ "tcp_retries1" and "tcp_retries2". The default value of 4
57539+ prevents a socket from lasting more than 45 seconds in LAST_ACK
57540+ state.
57541+
57542+config GRKERNSEC_NO_SIMULT_CONNECT
57543+ bool "Disable TCP Simultaneous Connect"
57544+ default y if GRKERNSEC_CONFIG_AUTO
57545+ depends on NET
57546+ help
57547+ If you say Y here, a feature by Willy Tarreau will be enabled that
57548+ removes a weakness in Linux's strict implementation of TCP that
57549+ allows two clients to connect to each other without either entering
57550+ a listening state. The weakness allows an attacker to easily prevent
57551+ a client from connecting to a known server provided the source port
57552+ for the connection is guessed correctly.
57553+
57554+ As the weakness could be used to prevent an antivirus or IPS from
57555+ fetching updates, or prevent an SSL gateway from fetching a CRL,
57556+ it should be eliminated by enabling this option. Though Linux is
57557+ one of few operating systems supporting simultaneous connect, it
57558+ has no legitimate use in practice and is rarely supported by firewalls.
57559+
57560+config GRKERNSEC_SOCKET
57561+ bool "Socket restrictions"
57562+ depends on NET
57563+ help
57564+ If you say Y here, you will be able to choose from several options.
57565+ If you assign a GID on your system and add it to the supplementary
57566+ groups of users you want to restrict socket access to, this patch
57567+ will perform up to three things, based on the option(s) you choose.
57568+
57569+config GRKERNSEC_SOCKET_ALL
57570+ bool "Deny any sockets to group"
57571+ depends on GRKERNSEC_SOCKET
57572+ help
57573+ If you say Y here, you will be able to choose a GID of whose users will
57574+ be unable to connect to other hosts from your machine or run server
57575+ applications from your machine. If the sysctl option is enabled, a
57576+ sysctl option with name "socket_all" is created.
57577+
57578+config GRKERNSEC_SOCKET_ALL_GID
57579+ int "GID to deny all sockets for"
57580+ depends on GRKERNSEC_SOCKET_ALL
57581+ default 1004
57582+ help
57583+ Here you can choose the GID to disable socket access for. Remember to
57584+ add the users you want socket access disabled for to the GID
57585+ specified here. If the sysctl option is enabled, a sysctl option
57586+ with name "socket_all_gid" is created.
57587+
57588+config GRKERNSEC_SOCKET_CLIENT
57589+ bool "Deny client sockets to group"
57590+ depends on GRKERNSEC_SOCKET
57591+ help
57592+ If you say Y here, you will be able to choose a GID of whose users will
57593+ be unable to connect to other hosts from your machine, but will be
57594+ able to run servers. If this option is enabled, all users in the group
57595+ you specify will have to use passive mode when initiating ftp transfers
57596+ from the shell on your machine. If the sysctl option is enabled, a
57597+ sysctl option with name "socket_client" is created.
57598+
57599+config GRKERNSEC_SOCKET_CLIENT_GID
57600+ int "GID to deny client sockets for"
57601+ depends on GRKERNSEC_SOCKET_CLIENT
57602+ default 1003
57603+ help
57604+ Here you can choose the GID to disable client socket access for.
57605+ Remember to add the users you want client socket access disabled for to
57606+ the GID specified here. If the sysctl option is enabled, a sysctl
57607+ option with name "socket_client_gid" is created.
57608+
57609+config GRKERNSEC_SOCKET_SERVER
57610+ bool "Deny server sockets to group"
57611+ depends on GRKERNSEC_SOCKET
57612+ help
57613+ If you say Y here, you will be able to choose a GID of whose users will
57614+ be unable to run server applications from your machine. If the sysctl
57615+ option is enabled, a sysctl option with name "socket_server" is created.
57616+
57617+config GRKERNSEC_SOCKET_SERVER_GID
57618+ int "GID to deny server sockets for"
57619+ depends on GRKERNSEC_SOCKET_SERVER
57620+ default 1002
57621+ help
57622+ Here you can choose the GID to disable server socket access for.
57623+ Remember to add the users you want server socket access disabled for to
57624+ the GID specified here. If the sysctl option is enabled, a sysctl
57625+ option with name "socket_server_gid" is created.
57626+
57627+endmenu
57628+menu "Sysctl Support"
57629+depends on GRKERNSEC && SYSCTL
57630+
57631+config GRKERNSEC_SYSCTL
57632+ bool "Sysctl support"
57633+ default y if GRKERNSEC_CONFIG_AUTO
57634+ help
57635+ If you say Y here, you will be able to change the options that
57636+ grsecurity runs with at bootup, without having to recompile your
57637+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
57638+ to enable (1) or disable (0) various features. All the sysctl entries
57639+ are mutable until the "grsec_lock" entry is set to a non-zero value.
57640+ All features enabled in the kernel configuration are disabled at boot
57641+ if you do not say Y to the "Turn on features by default" option.
57642+ All options should be set at startup, and the grsec_lock entry should
57643+ be set to a non-zero value after all the options are set.
57644+ *THIS IS EXTREMELY IMPORTANT*
57645+
57646+config GRKERNSEC_SYSCTL_DISTRO
57647+ bool "Extra sysctl support for distro makers (READ HELP)"
57648+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
57649+ help
57650+ If you say Y here, additional sysctl options will be created
57651+ for features that affect processes running as root. Therefore,
57652+ it is critical when using this option that the grsec_lock entry be
57653+ enabled after boot. Only distros with prebuilt kernel packages
57654+ with this option enabled that can ensure grsec_lock is enabled
57655+ after boot should use this option.
57656+ *Failure to set grsec_lock after boot makes all grsec features
57657+ this option covers useless*
57658+
57659+ Currently this option creates the following sysctl entries:
57660+ "Disable Privileged I/O": "disable_priv_io"
57661+
57662+config GRKERNSEC_SYSCTL_ON
57663+ bool "Turn on features by default"
57664+ default y if GRKERNSEC_CONFIG_AUTO
57665+ depends on GRKERNSEC_SYSCTL
57666+ help
57667+ If you say Y here, instead of having all features enabled in the
57668+ kernel configuration disabled at boot time, the features will be
57669+ enabled at boot time. It is recommended you say Y here unless
57670+ there is some reason you would want all sysctl-tunable features to
57671+ be disabled by default. As mentioned elsewhere, it is important
57672+ to enable the grsec_lock entry once you have finished modifying
57673+ the sysctl entries.
57674+
57675+endmenu
57676+menu "Logging Options"
57677+depends on GRKERNSEC
57678+
57679+config GRKERNSEC_FLOODTIME
57680+ int "Seconds in between log messages (minimum)"
57681+ default 10
57682+ help
57683+ This option allows you to enforce the number of seconds between
57684+ grsecurity log messages. The default should be suitable for most
57685+ people, however, if you choose to change it, choose a value small enough
57686+ to allow informative logs to be produced, but large enough to
57687+ prevent flooding.
57688+
57689+config GRKERNSEC_FLOODBURST
57690+ int "Number of messages in a burst (maximum)"
57691+ default 6
57692+ help
57693+ This option allows you to choose the maximum number of messages allowed
57694+ within the flood time interval you chose in a separate option. The
57695+ default should be suitable for most people, however if you find that
57696+ many of your logs are being interpreted as flooding, you may want to
57697+ raise this value.
57698+
57699+endmenu
57700diff --git a/grsecurity/Makefile b/grsecurity/Makefile
57701new file mode 100644
57702index 0000000..1b9afa9
57703--- /dev/null
57704+++ b/grsecurity/Makefile
57705@@ -0,0 +1,38 @@
57706+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
57707+# during 2001-2009 it has been completely redesigned by Brad Spengler
57708+# into an RBAC system
57709+#
57710+# All code in this directory and various hooks inserted throughout the kernel
57711+# are copyright Brad Spengler - Open Source Security, Inc., and released
57712+# under the GPL v2 or higher
57713+
57714+KBUILD_CFLAGS += -Werror
57715+
57716+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
57717+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
57718+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
57719+
57720+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
57721+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
57722+ gracl_learn.o grsec_log.o
57723+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
57724+
57725+ifdef CONFIG_NET
57726+obj-y += grsec_sock.o
57727+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
57728+endif
57729+
57730+ifndef CONFIG_GRKERNSEC
57731+obj-y += grsec_disabled.o
57732+endif
57733+
57734+ifdef CONFIG_GRKERNSEC_HIDESYM
57735+extra-y := grsec_hidesym.o
57736+$(obj)/grsec_hidesym.o:
57737+ @-chmod -f 500 /boot
57738+ @-chmod -f 500 /lib/modules
57739+ @-chmod -f 500 /lib64/modules
57740+ @-chmod -f 500 /lib32/modules
57741+ @-chmod -f 700 .
57742+ @echo ' grsec: protected kernel image paths'
57743+endif
57744diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
57745new file mode 100644
57746index 0000000..d0e7b38
57747--- /dev/null
57748+++ b/grsecurity/gracl.c
57749@@ -0,0 +1,4071 @@
57750+#include <linux/kernel.h>
57751+#include <linux/module.h>
57752+#include <linux/sched.h>
57753+#include <linux/mm.h>
57754+#include <linux/file.h>
57755+#include <linux/fs.h>
57756+#include <linux/namei.h>
57757+#include <linux/mount.h>
57758+#include <linux/tty.h>
57759+#include <linux/proc_fs.h>
57760+#include <linux/lglock.h>
57761+#include <linux/slab.h>
57762+#include <linux/vmalloc.h>
57763+#include <linux/types.h>
57764+#include <linux/sysctl.h>
57765+#include <linux/netdevice.h>
57766+#include <linux/ptrace.h>
57767+#include <linux/gracl.h>
57768+#include <linux/gralloc.h>
57769+#include <linux/security.h>
57770+#include <linux/grinternal.h>
57771+#include <linux/pid_namespace.h>
57772+#include <linux/stop_machine.h>
57773+#include <linux/fdtable.h>
57774+#include <linux/percpu.h>
57775+#include <linux/lglock.h>
57776+#include <linux/hugetlb.h>
57777+#include <linux/posix-timers.h>
57778+#include "../fs/mount.h"
57779+
57780+#include <asm/uaccess.h>
57781+#include <asm/errno.h>
57782+#include <asm/mman.h>
57783+
57784+extern struct lglock vfsmount_lock;
57785+
57786+static struct acl_role_db acl_role_set;
57787+static struct name_db name_set;
57788+static struct inodev_db inodev_set;
57789+
57790+/* for keeping track of userspace pointers used for subjects, so we
57791+ can share references in the kernel as well
57792+*/
57793+
57794+static struct path real_root;
57795+
57796+static struct acl_subj_map_db subj_map_set;
57797+
57798+static struct acl_role_label *default_role;
57799+
57800+static struct acl_role_label *role_list;
57801+
57802+static u16 acl_sp_role_value;
57803+
57804+extern char *gr_shared_page[4];
57805+static DEFINE_MUTEX(gr_dev_mutex);
57806+DEFINE_RWLOCK(gr_inode_lock);
57807+
57808+struct gr_arg *gr_usermode;
57809+
57810+static unsigned int gr_status __read_only = GR_STATUS_INIT;
57811+
57812+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
57813+extern void gr_clear_learn_entries(void);
57814+
57815+unsigned char *gr_system_salt;
57816+unsigned char *gr_system_sum;
57817+
57818+static struct sprole_pw **acl_special_roles = NULL;
57819+static __u16 num_sprole_pws = 0;
57820+
57821+static struct acl_role_label *kernel_role = NULL;
57822+
57823+static unsigned int gr_auth_attempts = 0;
57824+static unsigned long gr_auth_expires = 0UL;
57825+
57826+#ifdef CONFIG_NET
57827+extern struct vfsmount *sock_mnt;
57828+#endif
57829+
57830+extern struct vfsmount *pipe_mnt;
57831+extern struct vfsmount *shm_mnt;
57832+
57833+#ifdef CONFIG_HUGETLBFS
57834+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
57835+#endif
57836+
57837+static struct acl_object_label *fakefs_obj_rw;
57838+static struct acl_object_label *fakefs_obj_rwx;
57839+
57840+extern int gr_init_uidset(void);
57841+extern void gr_free_uidset(void);
57842+extern void gr_remove_uid(uid_t uid);
57843+extern int gr_find_uid(uid_t uid);
57844+
57845+__inline__ int
57846+gr_acl_is_enabled(void)
57847+{
57848+ return (gr_status & GR_READY);
57849+}
57850+
57851+#ifdef CONFIG_BTRFS_FS
57852+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
57853+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
57854+#endif
57855+
57856+static inline dev_t __get_dev(const struct dentry *dentry)
57857+{
57858+#ifdef CONFIG_BTRFS_FS
57859+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
57860+ return get_btrfs_dev_from_inode(dentry->d_inode);
57861+ else
57862+#endif
57863+ return dentry->d_inode->i_sb->s_dev;
57864+}
57865+
57866+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
57867+{
57868+ return __get_dev(dentry);
57869+}
57870+
57871+static char gr_task_roletype_to_char(struct task_struct *task)
57872+{
57873+ switch (task->role->roletype &
57874+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
57875+ GR_ROLE_SPECIAL)) {
57876+ case GR_ROLE_DEFAULT:
57877+ return 'D';
57878+ case GR_ROLE_USER:
57879+ return 'U';
57880+ case GR_ROLE_GROUP:
57881+ return 'G';
57882+ case GR_ROLE_SPECIAL:
57883+ return 'S';
57884+ }
57885+
57886+ return 'X';
57887+}
57888+
57889+char gr_roletype_to_char(void)
57890+{
57891+ return gr_task_roletype_to_char(current);
57892+}
57893+
57894+__inline__ int
57895+gr_acl_tpe_check(void)
57896+{
57897+ if (unlikely(!(gr_status & GR_READY)))
57898+ return 0;
57899+ if (current->role->roletype & GR_ROLE_TPE)
57900+ return 1;
57901+ else
57902+ return 0;
57903+}
57904+
57905+int
57906+gr_handle_rawio(const struct inode *inode)
57907+{
57908+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57909+ if (inode && S_ISBLK(inode->i_mode) &&
57910+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
57911+ !capable(CAP_SYS_RAWIO))
57912+ return 1;
57913+#endif
57914+ return 0;
57915+}
57916+
57917+static int
57918+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
57919+{
57920+ if (likely(lena != lenb))
57921+ return 0;
57922+
57923+ return !memcmp(a, b, lena);
57924+}
57925+
57926+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
57927+{
57928+ *buflen -= namelen;
57929+ if (*buflen < 0)
57930+ return -ENAMETOOLONG;
57931+ *buffer -= namelen;
57932+ memcpy(*buffer, str, namelen);
57933+ return 0;
57934+}
57935+
57936+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
57937+{
57938+ return prepend(buffer, buflen, name->name, name->len);
57939+}
57940+
57941+static int prepend_path(const struct path *path, struct path *root,
57942+ char **buffer, int *buflen)
57943+{
57944+ struct dentry *dentry = path->dentry;
57945+ struct vfsmount *vfsmnt = path->mnt;
57946+ struct mount *mnt = real_mount(vfsmnt);
57947+ bool slash = false;
57948+ int error = 0;
57949+
57950+ while (dentry != root->dentry || vfsmnt != root->mnt) {
57951+ struct dentry * parent;
57952+
57953+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
57954+ /* Global root? */
57955+ if (!mnt_has_parent(mnt)) {
57956+ goto out;
57957+ }
57958+ dentry = mnt->mnt_mountpoint;
57959+ mnt = mnt->mnt_parent;
57960+ vfsmnt = &mnt->mnt;
57961+ continue;
57962+ }
57963+ parent = dentry->d_parent;
57964+ prefetch(parent);
57965+ spin_lock(&dentry->d_lock);
57966+ error = prepend_name(buffer, buflen, &dentry->d_name);
57967+ spin_unlock(&dentry->d_lock);
57968+ if (!error)
57969+ error = prepend(buffer, buflen, "/", 1);
57970+ if (error)
57971+ break;
57972+
57973+ slash = true;
57974+ dentry = parent;
57975+ }
57976+
57977+out:
57978+ if (!error && !slash)
57979+ error = prepend(buffer, buflen, "/", 1);
57980+
57981+ return error;
57982+}
57983+
57984+/* this must be called with vfsmount_lock and rename_lock held */
57985+
57986+static char *__our_d_path(const struct path *path, struct path *root,
57987+ char *buf, int buflen)
57988+{
57989+ char *res = buf + buflen;
57990+ int error;
57991+
57992+ prepend(&res, &buflen, "\0", 1);
57993+ error = prepend_path(path, root, &res, &buflen);
57994+ if (error)
57995+ return ERR_PTR(error);
57996+
57997+ return res;
57998+}
57999+
58000+static char *
58001+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
58002+{
58003+ char *retval;
58004+
58005+ retval = __our_d_path(path, root, buf, buflen);
58006+ if (unlikely(IS_ERR(retval)))
58007+ retval = strcpy(buf, "<path too long>");
58008+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
58009+ retval[1] = '\0';
58010+
58011+ return retval;
58012+}
58013+
58014+static char *
58015+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
58016+ char *buf, int buflen)
58017+{
58018+ struct path path;
58019+ char *res;
58020+
58021+ path.dentry = (struct dentry *)dentry;
58022+ path.mnt = (struct vfsmount *)vfsmnt;
58023+
58024+ /* we can use real_root.dentry, real_root.mnt, because this is only called
58025+ by the RBAC system */
58026+ res = gen_full_path(&path, &real_root, buf, buflen);
58027+
58028+ return res;
58029+}
58030+
58031+static char *
58032+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
58033+ char *buf, int buflen)
58034+{
58035+ char *res;
58036+ struct path path;
58037+ struct path root;
58038+ struct task_struct *reaper = init_pid_ns.child_reaper;
58039+
58040+ path.dentry = (struct dentry *)dentry;
58041+ path.mnt = (struct vfsmount *)vfsmnt;
58042+
58043+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
58044+ get_fs_root(reaper->fs, &root);
58045+
58046+ br_read_lock(&vfsmount_lock);
58047+ write_seqlock(&rename_lock);
58048+ res = gen_full_path(&path, &root, buf, buflen);
58049+ write_sequnlock(&rename_lock);
58050+ br_read_unlock(&vfsmount_lock);
58051+
58052+ path_put(&root);
58053+ return res;
58054+}
58055+
58056+static char *
58057+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
58058+{
58059+ char *ret;
58060+ br_read_lock(&vfsmount_lock);
58061+ write_seqlock(&rename_lock);
58062+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
58063+ PAGE_SIZE);
58064+ write_sequnlock(&rename_lock);
58065+ br_read_unlock(&vfsmount_lock);
58066+ return ret;
58067+}
58068+
58069+static char *
58070+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
58071+{
58072+ char *ret;
58073+ char *buf;
58074+ int buflen;
58075+
58076+ br_read_lock(&vfsmount_lock);
58077+ write_seqlock(&rename_lock);
58078+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
58079+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
58080+ buflen = (int)(ret - buf);
58081+ if (buflen >= 5)
58082+ prepend(&ret, &buflen, "/proc", 5);
58083+ else
58084+ ret = strcpy(buf, "<path too long>");
58085+ write_sequnlock(&rename_lock);
58086+ br_read_unlock(&vfsmount_lock);
58087+ return ret;
58088+}
58089+
58090+char *
58091+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
58092+{
58093+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
58094+ PAGE_SIZE);
58095+}
58096+
58097+char *
58098+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
58099+{
58100+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
58101+ PAGE_SIZE);
58102+}
58103+
58104+char *
58105+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
58106+{
58107+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
58108+ PAGE_SIZE);
58109+}
58110+
58111+char *
58112+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
58113+{
58114+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
58115+ PAGE_SIZE);
58116+}
58117+
58118+char *
58119+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
58120+{
58121+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
58122+ PAGE_SIZE);
58123+}
58124+
58125+__inline__ __u32
58126+to_gr_audit(const __u32 reqmode)
58127+{
58128+ /* masks off auditable permission flags, then shifts them to create
58129+ auditing flags, and adds the special case of append auditing if
58130+ we're requesting write */
58131+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
58132+}
58133+
58134+struct acl_subject_label *
58135+lookup_subject_map(const struct acl_subject_label *userp)
58136+{
58137+ unsigned int index = gr_shash(userp, subj_map_set.s_size);
58138+ struct subject_map *match;
58139+
58140+ match = subj_map_set.s_hash[index];
58141+
58142+ while (match && match->user != userp)
58143+ match = match->next;
58144+
58145+ if (match != NULL)
58146+ return match->kernel;
58147+ else
58148+ return NULL;
58149+}
58150+
58151+static void
58152+insert_subj_map_entry(struct subject_map *subjmap)
58153+{
58154+ unsigned int index = gr_shash(subjmap->user, subj_map_set.s_size);
58155+ struct subject_map **curr;
58156+
58157+ subjmap->prev = NULL;
58158+
58159+ curr = &subj_map_set.s_hash[index];
58160+ if (*curr != NULL)
58161+ (*curr)->prev = subjmap;
58162+
58163+ subjmap->next = *curr;
58164+ *curr = subjmap;
58165+
58166+ return;
58167+}
58168+
58169+static struct acl_role_label *
58170+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
58171+ const gid_t gid)
58172+{
58173+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
58174+ struct acl_role_label *match;
58175+ struct role_allowed_ip *ipp;
58176+ unsigned int x;
58177+ u32 curr_ip = task->signal->curr_ip;
58178+
58179+ task->signal->saved_ip = curr_ip;
58180+
58181+ match = acl_role_set.r_hash[index];
58182+
58183+ while (match) {
58184+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
58185+ for (x = 0; x < match->domain_child_num; x++) {
58186+ if (match->domain_children[x] == uid)
58187+ goto found;
58188+ }
58189+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
58190+ break;
58191+ match = match->next;
58192+ }
58193+found:
58194+ if (match == NULL) {
58195+ try_group:
58196+ index = gr_rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
58197+ match = acl_role_set.r_hash[index];
58198+
58199+ while (match) {
58200+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
58201+ for (x = 0; x < match->domain_child_num; x++) {
58202+ if (match->domain_children[x] == gid)
58203+ goto found2;
58204+ }
58205+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
58206+ break;
58207+ match = match->next;
58208+ }
58209+found2:
58210+ if (match == NULL)
58211+ match = default_role;
58212+ if (match->allowed_ips == NULL)
58213+ return match;
58214+ else {
58215+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
58216+ if (likely
58217+ ((ntohl(curr_ip) & ipp->netmask) ==
58218+ (ntohl(ipp->addr) & ipp->netmask)))
58219+ return match;
58220+ }
58221+ match = default_role;
58222+ }
58223+ } else if (match->allowed_ips == NULL) {
58224+ return match;
58225+ } else {
58226+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
58227+ if (likely
58228+ ((ntohl(curr_ip) & ipp->netmask) ==
58229+ (ntohl(ipp->addr) & ipp->netmask)))
58230+ return match;
58231+ }
58232+ goto try_group;
58233+ }
58234+
58235+ return match;
58236+}
58237+
58238+struct acl_subject_label *
58239+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
58240+ const struct acl_role_label *role)
58241+{
58242+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
58243+ struct acl_subject_label *match;
58244+
58245+ match = role->subj_hash[index];
58246+
58247+ while (match && (match->inode != ino || match->device != dev ||
58248+ (match->mode & GR_DELETED))) {
58249+ match = match->next;
58250+ }
58251+
58252+ if (match && !(match->mode & GR_DELETED))
58253+ return match;
58254+ else
58255+ return NULL;
58256+}
58257+
58258+struct acl_subject_label *
58259+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
58260+ const struct acl_role_label *role)
58261+{
58262+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
58263+ struct acl_subject_label *match;
58264+
58265+ match = role->subj_hash[index];
58266+
58267+ while (match && (match->inode != ino || match->device != dev ||
58268+ !(match->mode & GR_DELETED))) {
58269+ match = match->next;
58270+ }
58271+
58272+ if (match && (match->mode & GR_DELETED))
58273+ return match;
58274+ else
58275+ return NULL;
58276+}
58277+
58278+static struct acl_object_label *
58279+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
58280+ const struct acl_subject_label *subj)
58281+{
58282+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
58283+ struct acl_object_label *match;
58284+
58285+ match = subj->obj_hash[index];
58286+
58287+ while (match && (match->inode != ino || match->device != dev ||
58288+ (match->mode & GR_DELETED))) {
58289+ match = match->next;
58290+ }
58291+
58292+ if (match && !(match->mode & GR_DELETED))
58293+ return match;
58294+ else
58295+ return NULL;
58296+}
58297+
58298+static struct acl_object_label *
58299+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
58300+ const struct acl_subject_label *subj)
58301+{
58302+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
58303+ struct acl_object_label *match;
58304+
58305+ match = subj->obj_hash[index];
58306+
58307+ while (match && (match->inode != ino || match->device != dev ||
58308+ !(match->mode & GR_DELETED))) {
58309+ match = match->next;
58310+ }
58311+
58312+ if (match && (match->mode & GR_DELETED))
58313+ return match;
58314+
58315+ match = subj->obj_hash[index];
58316+
58317+ while (match && (match->inode != ino || match->device != dev ||
58318+ (match->mode & GR_DELETED))) {
58319+ match = match->next;
58320+ }
58321+
58322+ if (match && !(match->mode & GR_DELETED))
58323+ return match;
58324+ else
58325+ return NULL;
58326+}
58327+
58328+static struct name_entry *
58329+lookup_name_entry(const char *name)
58330+{
58331+ unsigned int len = strlen(name);
58332+ unsigned int key = full_name_hash(name, len);
58333+ unsigned int index = key % name_set.n_size;
58334+ struct name_entry *match;
58335+
58336+ match = name_set.n_hash[index];
58337+
58338+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
58339+ match = match->next;
58340+
58341+ return match;
58342+}
58343+
58344+static struct name_entry *
58345+lookup_name_entry_create(const char *name)
58346+{
58347+ unsigned int len = strlen(name);
58348+ unsigned int key = full_name_hash(name, len);
58349+ unsigned int index = key % name_set.n_size;
58350+ struct name_entry *match;
58351+
58352+ match = name_set.n_hash[index];
58353+
58354+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
58355+ !match->deleted))
58356+ match = match->next;
58357+
58358+ if (match && match->deleted)
58359+ return match;
58360+
58361+ match = name_set.n_hash[index];
58362+
58363+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
58364+ match->deleted))
58365+ match = match->next;
58366+
58367+ if (match && !match->deleted)
58368+ return match;
58369+ else
58370+ return NULL;
58371+}
58372+
58373+static struct inodev_entry *
58374+lookup_inodev_entry(const ino_t ino, const dev_t dev)
58375+{
58376+ unsigned int index = gr_fhash(ino, dev, inodev_set.i_size);
58377+ struct inodev_entry *match;
58378+
58379+ match = inodev_set.i_hash[index];
58380+
58381+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
58382+ match = match->next;
58383+
58384+ return match;
58385+}
58386+
58387+static void
58388+insert_inodev_entry(struct inodev_entry *entry)
58389+{
58390+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
58391+ inodev_set.i_size);
58392+ struct inodev_entry **curr;
58393+
58394+ entry->prev = NULL;
58395+
58396+ curr = &inodev_set.i_hash[index];
58397+ if (*curr != NULL)
58398+ (*curr)->prev = entry;
58399+
58400+ entry->next = *curr;
58401+ *curr = entry;
58402+
58403+ return;
58404+}
58405+
58406+static void
58407+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
58408+{
58409+ unsigned int index =
58410+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
58411+ struct acl_role_label **curr;
58412+ struct acl_role_label *tmp, *tmp2;
58413+
58414+ curr = &acl_role_set.r_hash[index];
58415+
58416+ /* simple case, slot is empty, just set it to our role */
58417+ if (*curr == NULL) {
58418+ *curr = role;
58419+ } else {
58420+ /* example:
58421+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
58422+ 2 -> 3
58423+ */
58424+ /* first check to see if we can already be reached via this slot */
58425+ tmp = *curr;
58426+ while (tmp && tmp != role)
58427+ tmp = tmp->next;
58428+ if (tmp == role) {
58429+ /* we don't need to add ourselves to this slot's chain */
58430+ return;
58431+ }
58432+ /* we need to add ourselves to this chain, two cases */
58433+ if (role->next == NULL) {
58434+ /* simple case, append the current chain to our role */
58435+ role->next = *curr;
58436+ *curr = role;
58437+ } else {
58438+ /* 1 -> 2 -> 3 -> 4
58439+ 2 -> 3 -> 4
58440+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
58441+ */
58442+ /* trickier case: walk our role's chain until we find
58443+ the role for the start of the current slot's chain */
58444+ tmp = role;
58445+ tmp2 = *curr;
58446+ while (tmp->next && tmp->next != tmp2)
58447+ tmp = tmp->next;
58448+ if (tmp->next == tmp2) {
58449+ /* from example above, we found 3, so just
58450+ replace this slot's chain with ours */
58451+ *curr = role;
58452+ } else {
58453+ /* we didn't find a subset of our role's chain
58454+ in the current slot's chain, so append their
58455+ chain to ours, and set us as the first role in
58456+ the slot's chain
58457+
58458+ we could fold this case with the case above,
58459+ but making it explicit for clarity
58460+ */
58461+ tmp->next = tmp2;
58462+ *curr = role;
58463+ }
58464+ }
58465+ }
58466+
58467+ return;
58468+}
58469+
58470+static void
58471+insert_acl_role_label(struct acl_role_label *role)
58472+{
58473+ int i;
58474+
58475+ if (role_list == NULL) {
58476+ role_list = role;
58477+ role->prev = NULL;
58478+ } else {
58479+ role->prev = role_list;
58480+ role_list = role;
58481+ }
58482+
58483+ /* used for hash chains */
58484+ role->next = NULL;
58485+
58486+ if (role->roletype & GR_ROLE_DOMAIN) {
58487+ for (i = 0; i < role->domain_child_num; i++)
58488+ __insert_acl_role_label(role, role->domain_children[i]);
58489+ } else
58490+ __insert_acl_role_label(role, role->uidgid);
58491+}
58492+
58493+static int
58494+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
58495+{
58496+ struct name_entry **curr, *nentry;
58497+ struct inodev_entry *ientry;
58498+ unsigned int len = strlen(name);
58499+ unsigned int key = full_name_hash(name, len);
58500+ unsigned int index = key % name_set.n_size;
58501+
58502+ curr = &name_set.n_hash[index];
58503+
58504+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
58505+ curr = &((*curr)->next);
58506+
58507+ if (*curr != NULL)
58508+ return 1;
58509+
58510+ nentry = acl_alloc(sizeof (struct name_entry));
58511+ if (nentry == NULL)
58512+ return 0;
58513+ ientry = acl_alloc(sizeof (struct inodev_entry));
58514+ if (ientry == NULL)
58515+ return 0;
58516+ ientry->nentry = nentry;
58517+
58518+ nentry->key = key;
58519+ nentry->name = name;
58520+ nentry->inode = inode;
58521+ nentry->device = device;
58522+ nentry->len = len;
58523+ nentry->deleted = deleted;
58524+
58525+ nentry->prev = NULL;
58526+ curr = &name_set.n_hash[index];
58527+ if (*curr != NULL)
58528+ (*curr)->prev = nentry;
58529+ nentry->next = *curr;
58530+ *curr = nentry;
58531+
58532+ /* insert us into the table searchable by inode/dev */
58533+ insert_inodev_entry(ientry);
58534+
58535+ return 1;
58536+}
58537+
58538+static void
58539+insert_acl_obj_label(struct acl_object_label *obj,
58540+ struct acl_subject_label *subj)
58541+{
58542+ unsigned int index =
58543+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
58544+ struct acl_object_label **curr;
58545+
58546+
58547+ obj->prev = NULL;
58548+
58549+ curr = &subj->obj_hash[index];
58550+ if (*curr != NULL)
58551+ (*curr)->prev = obj;
58552+
58553+ obj->next = *curr;
58554+ *curr = obj;
58555+
58556+ return;
58557+}
58558+
58559+static void
58560+insert_acl_subj_label(struct acl_subject_label *obj,
58561+ struct acl_role_label *role)
58562+{
58563+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
58564+ struct acl_subject_label **curr;
58565+
58566+ obj->prev = NULL;
58567+
58568+ curr = &role->subj_hash[index];
58569+ if (*curr != NULL)
58570+ (*curr)->prev = obj;
58571+
58572+ obj->next = *curr;
58573+ *curr = obj;
58574+
58575+ return;
58576+}
58577+
58578+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
58579+
58580+static void *
58581+create_table(__u32 * len, int elementsize)
58582+{
58583+ unsigned int table_sizes[] = {
58584+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
58585+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
58586+ 4194301, 8388593, 16777213, 33554393, 67108859
58587+ };
58588+ void *newtable = NULL;
58589+ unsigned int pwr = 0;
58590+
58591+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
58592+ table_sizes[pwr] <= *len)
58593+ pwr++;
58594+
58595+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
58596+ return newtable;
58597+
58598+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
58599+ newtable =
58600+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
58601+ else
58602+ newtable = vmalloc(table_sizes[pwr] * elementsize);
58603+
58604+ *len = table_sizes[pwr];
58605+
58606+ return newtable;
58607+}
58608+
58609+static int
58610+init_variables(const struct gr_arg *arg)
58611+{
58612+ struct task_struct *reaper = init_pid_ns.child_reaper;
58613+ unsigned int stacksize;
58614+
58615+ subj_map_set.s_size = arg->role_db.num_subjects;
58616+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
58617+ name_set.n_size = arg->role_db.num_objects;
58618+ inodev_set.i_size = arg->role_db.num_objects;
58619+
58620+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
58621+ !name_set.n_size || !inodev_set.i_size)
58622+ return 1;
58623+
58624+ if (!gr_init_uidset())
58625+ return 1;
58626+
58627+ /* set up the stack that holds allocation info */
58628+
58629+ stacksize = arg->role_db.num_pointers + 5;
58630+
58631+ if (!acl_alloc_stack_init(stacksize))
58632+ return 1;
58633+
58634+ /* grab reference for the real root dentry and vfsmount */
58635+ get_fs_root(reaper->fs, &real_root);
58636+
58637+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58638+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
58639+#endif
58640+
58641+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
58642+ if (fakefs_obj_rw == NULL)
58643+ return 1;
58644+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
58645+
58646+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
58647+ if (fakefs_obj_rwx == NULL)
58648+ return 1;
58649+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
58650+
58651+ subj_map_set.s_hash =
58652+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
58653+ acl_role_set.r_hash =
58654+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
58655+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
58656+ inodev_set.i_hash =
58657+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
58658+
58659+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
58660+ !name_set.n_hash || !inodev_set.i_hash)
58661+ return 1;
58662+
58663+ memset(subj_map_set.s_hash, 0,
58664+ sizeof(struct subject_map *) * subj_map_set.s_size);
58665+ memset(acl_role_set.r_hash, 0,
58666+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
58667+ memset(name_set.n_hash, 0,
58668+ sizeof (struct name_entry *) * name_set.n_size);
58669+ memset(inodev_set.i_hash, 0,
58670+ sizeof (struct inodev_entry *) * inodev_set.i_size);
58671+
58672+ return 0;
58673+}
58674+
58675+/* free information not needed after startup
58676+ currently contains user->kernel pointer mappings for subjects
58677+*/
58678+
58679+static void
58680+free_init_variables(void)
58681+{
58682+ __u32 i;
58683+
58684+ if (subj_map_set.s_hash) {
58685+ for (i = 0; i < subj_map_set.s_size; i++) {
58686+ if (subj_map_set.s_hash[i]) {
58687+ kfree(subj_map_set.s_hash[i]);
58688+ subj_map_set.s_hash[i] = NULL;
58689+ }
58690+ }
58691+
58692+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
58693+ PAGE_SIZE)
58694+ kfree(subj_map_set.s_hash);
58695+ else
58696+ vfree(subj_map_set.s_hash);
58697+ }
58698+
58699+ return;
58700+}
58701+
58702+static void
58703+free_variables(void)
58704+{
58705+ struct acl_subject_label *s;
58706+ struct acl_role_label *r;
58707+ struct task_struct *task, *task2;
58708+ unsigned int x;
58709+
58710+ gr_clear_learn_entries();
58711+
58712+ read_lock(&tasklist_lock);
58713+ do_each_thread(task2, task) {
58714+ task->acl_sp_role = 0;
58715+ task->acl_role_id = 0;
58716+ task->acl = NULL;
58717+ task->role = NULL;
58718+ } while_each_thread(task2, task);
58719+ read_unlock(&tasklist_lock);
58720+
58721+ /* release the reference to the real root dentry and vfsmount */
58722+ path_put(&real_root);
58723+ memset(&real_root, 0, sizeof(real_root));
58724+
58725+ /* free all object hash tables */
58726+
58727+ FOR_EACH_ROLE_START(r)
58728+ if (r->subj_hash == NULL)
58729+ goto next_role;
58730+ FOR_EACH_SUBJECT_START(r, s, x)
58731+ if (s->obj_hash == NULL)
58732+ break;
58733+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
58734+ kfree(s->obj_hash);
58735+ else
58736+ vfree(s->obj_hash);
58737+ FOR_EACH_SUBJECT_END(s, x)
58738+ FOR_EACH_NESTED_SUBJECT_START(r, s)
58739+ if (s->obj_hash == NULL)
58740+ break;
58741+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
58742+ kfree(s->obj_hash);
58743+ else
58744+ vfree(s->obj_hash);
58745+ FOR_EACH_NESTED_SUBJECT_END(s)
58746+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
58747+ kfree(r->subj_hash);
58748+ else
58749+ vfree(r->subj_hash);
58750+ r->subj_hash = NULL;
58751+next_role:
58752+ FOR_EACH_ROLE_END(r)
58753+
58754+ acl_free_all();
58755+
58756+ if (acl_role_set.r_hash) {
58757+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
58758+ PAGE_SIZE)
58759+ kfree(acl_role_set.r_hash);
58760+ else
58761+ vfree(acl_role_set.r_hash);
58762+ }
58763+ if (name_set.n_hash) {
58764+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
58765+ PAGE_SIZE)
58766+ kfree(name_set.n_hash);
58767+ else
58768+ vfree(name_set.n_hash);
58769+ }
58770+
58771+ if (inodev_set.i_hash) {
58772+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
58773+ PAGE_SIZE)
58774+ kfree(inodev_set.i_hash);
58775+ else
58776+ vfree(inodev_set.i_hash);
58777+ }
58778+
58779+ gr_free_uidset();
58780+
58781+ memset(&name_set, 0, sizeof (struct name_db));
58782+ memset(&inodev_set, 0, sizeof (struct inodev_db));
58783+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
58784+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
58785+
58786+ default_role = NULL;
58787+ kernel_role = NULL;
58788+ role_list = NULL;
58789+
58790+ return;
58791+}
58792+
58793+static __u32
58794+count_user_objs(struct acl_object_label *userp)
58795+{
58796+ struct acl_object_label o_tmp;
58797+ __u32 num = 0;
58798+
58799+ while (userp) {
58800+ if (copy_from_user(&o_tmp, userp,
58801+ sizeof (struct acl_object_label)))
58802+ break;
58803+
58804+ userp = o_tmp.prev;
58805+ num++;
58806+ }
58807+
58808+ return num;
58809+}
58810+
58811+static struct acl_subject_label *
58812+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
58813+
58814+static int
58815+copy_user_glob(struct acl_object_label *obj)
58816+{
58817+ struct acl_object_label *g_tmp, **guser;
58818+ unsigned int len;
58819+ char *tmp;
58820+
58821+ if (obj->globbed == NULL)
58822+ return 0;
58823+
58824+ guser = &obj->globbed;
58825+ while (*guser) {
58826+ g_tmp = (struct acl_object_label *)
58827+ acl_alloc(sizeof (struct acl_object_label));
58828+ if (g_tmp == NULL)
58829+ return -ENOMEM;
58830+
58831+ if (copy_from_user(g_tmp, *guser,
58832+ sizeof (struct acl_object_label)))
58833+ return -EFAULT;
58834+
58835+ len = strnlen_user(g_tmp->filename, PATH_MAX);
58836+
58837+ if (!len || len >= PATH_MAX)
58838+ return -EINVAL;
58839+
58840+ if ((tmp = (char *) acl_alloc(len)) == NULL)
58841+ return -ENOMEM;
58842+
58843+ if (copy_from_user(tmp, g_tmp->filename, len))
58844+ return -EFAULT;
58845+ tmp[len-1] = '\0';
58846+ g_tmp->filename = tmp;
58847+
58848+ *guser = g_tmp;
58849+ guser = &(g_tmp->next);
58850+ }
58851+
58852+ return 0;
58853+}
58854+
58855+static int
58856+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
58857+ struct acl_role_label *role)
58858+{
58859+ struct acl_object_label *o_tmp;
58860+ unsigned int len;
58861+ int ret;
58862+ char *tmp;
58863+
58864+ while (userp) {
58865+ if ((o_tmp = (struct acl_object_label *)
58866+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
58867+ return -ENOMEM;
58868+
58869+ if (copy_from_user(o_tmp, userp,
58870+ sizeof (struct acl_object_label)))
58871+ return -EFAULT;
58872+
58873+ userp = o_tmp->prev;
58874+
58875+ len = strnlen_user(o_tmp->filename, PATH_MAX);
58876+
58877+ if (!len || len >= PATH_MAX)
58878+ return -EINVAL;
58879+
58880+ if ((tmp = (char *) acl_alloc(len)) == NULL)
58881+ return -ENOMEM;
58882+
58883+ if (copy_from_user(tmp, o_tmp->filename, len))
58884+ return -EFAULT;
58885+ tmp[len-1] = '\0';
58886+ o_tmp->filename = tmp;
58887+
58888+ insert_acl_obj_label(o_tmp, subj);
58889+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
58890+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
58891+ return -ENOMEM;
58892+
58893+ ret = copy_user_glob(o_tmp);
58894+ if (ret)
58895+ return ret;
58896+
58897+ if (o_tmp->nested) {
58898+ int already_copied;
58899+
58900+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
58901+ if (IS_ERR(o_tmp->nested))
58902+ return PTR_ERR(o_tmp->nested);
58903+
58904+ /* insert into nested subject list if we haven't copied this one yet
58905+ to prevent duplicate entries */
58906+ if (!already_copied) {
58907+ o_tmp->nested->next = role->hash->first;
58908+ role->hash->first = o_tmp->nested;
58909+ }
58910+ }
58911+ }
58912+
58913+ return 0;
58914+}
58915+
58916+static __u32
58917+count_user_subjs(struct acl_subject_label *userp)
58918+{
58919+ struct acl_subject_label s_tmp;
58920+ __u32 num = 0;
58921+
58922+ while (userp) {
58923+ if (copy_from_user(&s_tmp, userp,
58924+ sizeof (struct acl_subject_label)))
58925+ break;
58926+
58927+ userp = s_tmp.prev;
58928+ }
58929+
58930+ return num;
58931+}
58932+
58933+static int
58934+copy_user_allowedips(struct acl_role_label *rolep)
58935+{
58936+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
58937+
58938+ ruserip = rolep->allowed_ips;
58939+
58940+ while (ruserip) {
58941+ rlast = rtmp;
58942+
58943+ if ((rtmp = (struct role_allowed_ip *)
58944+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
58945+ return -ENOMEM;
58946+
58947+ if (copy_from_user(rtmp, ruserip,
58948+ sizeof (struct role_allowed_ip)))
58949+ return -EFAULT;
58950+
58951+ ruserip = rtmp->prev;
58952+
58953+ if (!rlast) {
58954+ rtmp->prev = NULL;
58955+ rolep->allowed_ips = rtmp;
58956+ } else {
58957+ rlast->next = rtmp;
58958+ rtmp->prev = rlast;
58959+ }
58960+
58961+ if (!ruserip)
58962+ rtmp->next = NULL;
58963+ }
58964+
58965+ return 0;
58966+}
58967+
58968+static int
58969+copy_user_transitions(struct acl_role_label *rolep)
58970+{
58971+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
58972+
58973+ unsigned int len;
58974+ char *tmp;
58975+
58976+ rusertp = rolep->transitions;
58977+
58978+ while (rusertp) {
58979+ rlast = rtmp;
58980+
58981+ if ((rtmp = (struct role_transition *)
58982+ acl_alloc(sizeof (struct role_transition))) == NULL)
58983+ return -ENOMEM;
58984+
58985+ if (copy_from_user(rtmp, rusertp,
58986+ sizeof (struct role_transition)))
58987+ return -EFAULT;
58988+
58989+ rusertp = rtmp->prev;
58990+
58991+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
58992+
58993+ if (!len || len >= GR_SPROLE_LEN)
58994+ return -EINVAL;
58995+
58996+ if ((tmp = (char *) acl_alloc(len)) == NULL)
58997+ return -ENOMEM;
58998+
58999+ if (copy_from_user(tmp, rtmp->rolename, len))
59000+ return -EFAULT;
59001+ tmp[len-1] = '\0';
59002+ rtmp->rolename = tmp;
59003+
59004+ if (!rlast) {
59005+ rtmp->prev = NULL;
59006+ rolep->transitions = rtmp;
59007+ } else {
59008+ rlast->next = rtmp;
59009+ rtmp->prev = rlast;
59010+ }
59011+
59012+ if (!rusertp)
59013+ rtmp->next = NULL;
59014+ }
59015+
59016+ return 0;
59017+}
59018+
59019+static struct acl_subject_label *
59020+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
59021+{
59022+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
59023+ unsigned int len;
59024+ char *tmp;
59025+ __u32 num_objs;
59026+ struct acl_ip_label **i_tmp, *i_utmp2;
59027+ struct gr_hash_struct ghash;
59028+ struct subject_map *subjmap;
59029+ unsigned int i_num;
59030+ int err;
59031+
59032+ if (already_copied != NULL)
59033+ *already_copied = 0;
59034+
59035+ s_tmp = lookup_subject_map(userp);
59036+
59037+ /* we've already copied this subject into the kernel, just return
59038+ the reference to it, and don't copy it over again
59039+ */
59040+ if (s_tmp) {
59041+ if (already_copied != NULL)
59042+ *already_copied = 1;
59043+ return(s_tmp);
59044+ }
59045+
59046+ if ((s_tmp = (struct acl_subject_label *)
59047+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
59048+ return ERR_PTR(-ENOMEM);
59049+
59050+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
59051+ if (subjmap == NULL)
59052+ return ERR_PTR(-ENOMEM);
59053+
59054+ subjmap->user = userp;
59055+ subjmap->kernel = s_tmp;
59056+ insert_subj_map_entry(subjmap);
59057+
59058+ if (copy_from_user(s_tmp, userp,
59059+ sizeof (struct acl_subject_label)))
59060+ return ERR_PTR(-EFAULT);
59061+
59062+ len = strnlen_user(s_tmp->filename, PATH_MAX);
59063+
59064+ if (!len || len >= PATH_MAX)
59065+ return ERR_PTR(-EINVAL);
59066+
59067+ if ((tmp = (char *) acl_alloc(len)) == NULL)
59068+ return ERR_PTR(-ENOMEM);
59069+
59070+ if (copy_from_user(tmp, s_tmp->filename, len))
59071+ return ERR_PTR(-EFAULT);
59072+ tmp[len-1] = '\0';
59073+ s_tmp->filename = tmp;
59074+
59075+ if (!strcmp(s_tmp->filename, "/"))
59076+ role->root_label = s_tmp;
59077+
59078+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
59079+ return ERR_PTR(-EFAULT);
59080+
59081+ /* copy user and group transition tables */
59082+
59083+ if (s_tmp->user_trans_num) {
59084+ uid_t *uidlist;
59085+
59086+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
59087+ if (uidlist == NULL)
59088+ return ERR_PTR(-ENOMEM);
59089+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
59090+ return ERR_PTR(-EFAULT);
59091+
59092+ s_tmp->user_transitions = uidlist;
59093+ }
59094+
59095+ if (s_tmp->group_trans_num) {
59096+ gid_t *gidlist;
59097+
59098+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
59099+ if (gidlist == NULL)
59100+ return ERR_PTR(-ENOMEM);
59101+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
59102+ return ERR_PTR(-EFAULT);
59103+
59104+ s_tmp->group_transitions = gidlist;
59105+ }
59106+
59107+ /* set up object hash table */
59108+ num_objs = count_user_objs(ghash.first);
59109+
59110+ s_tmp->obj_hash_size = num_objs;
59111+ s_tmp->obj_hash =
59112+ (struct acl_object_label **)
59113+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
59114+
59115+ if (!s_tmp->obj_hash)
59116+ return ERR_PTR(-ENOMEM);
59117+
59118+ memset(s_tmp->obj_hash, 0,
59119+ s_tmp->obj_hash_size *
59120+ sizeof (struct acl_object_label *));
59121+
59122+ /* add in objects */
59123+ err = copy_user_objs(ghash.first, s_tmp, role);
59124+
59125+ if (err)
59126+ return ERR_PTR(err);
59127+
59128+ /* set pointer for parent subject */
59129+ if (s_tmp->parent_subject) {
59130+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
59131+
59132+ if (IS_ERR(s_tmp2))
59133+ return s_tmp2;
59134+
59135+ s_tmp->parent_subject = s_tmp2;
59136+ }
59137+
59138+ /* add in ip acls */
59139+
59140+ if (!s_tmp->ip_num) {
59141+ s_tmp->ips = NULL;
59142+ goto insert;
59143+ }
59144+
59145+ i_tmp =
59146+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
59147+ sizeof (struct acl_ip_label *));
59148+
59149+ if (!i_tmp)
59150+ return ERR_PTR(-ENOMEM);
59151+
59152+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
59153+ *(i_tmp + i_num) =
59154+ (struct acl_ip_label *)
59155+ acl_alloc(sizeof (struct acl_ip_label));
59156+ if (!*(i_tmp + i_num))
59157+ return ERR_PTR(-ENOMEM);
59158+
59159+ if (copy_from_user
59160+ (&i_utmp2, s_tmp->ips + i_num,
59161+ sizeof (struct acl_ip_label *)))
59162+ return ERR_PTR(-EFAULT);
59163+
59164+ if (copy_from_user
59165+ (*(i_tmp + i_num), i_utmp2,
59166+ sizeof (struct acl_ip_label)))
59167+ return ERR_PTR(-EFAULT);
59168+
59169+ if ((*(i_tmp + i_num))->iface == NULL)
59170+ continue;
59171+
59172+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
59173+ if (!len || len >= IFNAMSIZ)
59174+ return ERR_PTR(-EINVAL);
59175+ tmp = acl_alloc(len);
59176+ if (tmp == NULL)
59177+ return ERR_PTR(-ENOMEM);
59178+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
59179+ return ERR_PTR(-EFAULT);
59180+ (*(i_tmp + i_num))->iface = tmp;
59181+ }
59182+
59183+ s_tmp->ips = i_tmp;
59184+
59185+insert:
59186+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
59187+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
59188+ return ERR_PTR(-ENOMEM);
59189+
59190+ return s_tmp;
59191+}
59192+
59193+static int
59194+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
59195+{
59196+ struct acl_subject_label s_pre;
59197+ struct acl_subject_label * ret;
59198+ int err;
59199+
59200+ while (userp) {
59201+ if (copy_from_user(&s_pre, userp,
59202+ sizeof (struct acl_subject_label)))
59203+ return -EFAULT;
59204+
59205+ ret = do_copy_user_subj(userp, role, NULL);
59206+
59207+ err = PTR_ERR(ret);
59208+ if (IS_ERR(ret))
59209+ return err;
59210+
59211+ insert_acl_subj_label(ret, role);
59212+
59213+ userp = s_pre.prev;
59214+ }
59215+
59216+ return 0;
59217+}
59218+
59219+static int
59220+copy_user_acl(struct gr_arg *arg)
59221+{
59222+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
59223+ struct acl_subject_label *subj_list;
59224+ struct sprole_pw *sptmp;
59225+ struct gr_hash_struct *ghash;
59226+ uid_t *domainlist;
59227+ unsigned int r_num;
59228+ unsigned int len;
59229+ char *tmp;
59230+ int err = 0;
59231+ __u16 i;
59232+ __u32 num_subjs;
59233+
59234+ /* we need a default and kernel role */
59235+ if (arg->role_db.num_roles < 2)
59236+ return -EINVAL;
59237+
59238+ /* copy special role authentication info from userspace */
59239+
59240+ num_sprole_pws = arg->num_sprole_pws;
59241+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
59242+
59243+ if (!acl_special_roles && num_sprole_pws)
59244+ return -ENOMEM;
59245+
59246+ for (i = 0; i < num_sprole_pws; i++) {
59247+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
59248+ if (!sptmp)
59249+ return -ENOMEM;
59250+ if (copy_from_user(sptmp, arg->sprole_pws + i,
59251+ sizeof (struct sprole_pw)))
59252+ return -EFAULT;
59253+
59254+ len = strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
59255+
59256+ if (!len || len >= GR_SPROLE_LEN)
59257+ return -EINVAL;
59258+
59259+ if ((tmp = (char *) acl_alloc(len)) == NULL)
59260+ return -ENOMEM;
59261+
59262+ if (copy_from_user(tmp, sptmp->rolename, len))
59263+ return -EFAULT;
59264+
59265+ tmp[len-1] = '\0';
59266+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59267+ printk(KERN_ALERT "Copying special role %s\n", tmp);
59268+#endif
59269+ sptmp->rolename = tmp;
59270+ acl_special_roles[i] = sptmp;
59271+ }
59272+
59273+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
59274+
59275+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
59276+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
59277+
59278+ if (!r_tmp)
59279+ return -ENOMEM;
59280+
59281+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
59282+ sizeof (struct acl_role_label *)))
59283+ return -EFAULT;
59284+
59285+ if (copy_from_user(r_tmp, r_utmp2,
59286+ sizeof (struct acl_role_label)))
59287+ return -EFAULT;
59288+
59289+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
59290+
59291+ if (!len || len >= PATH_MAX)
59292+ return -EINVAL;
59293+
59294+ if ((tmp = (char *) acl_alloc(len)) == NULL)
59295+ return -ENOMEM;
59296+
59297+ if (copy_from_user(tmp, r_tmp->rolename, len))
59298+ return -EFAULT;
59299+
59300+ tmp[len-1] = '\0';
59301+ r_tmp->rolename = tmp;
59302+
59303+ if (!strcmp(r_tmp->rolename, "default")
59304+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
59305+ default_role = r_tmp;
59306+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
59307+ kernel_role = r_tmp;
59308+ }
59309+
59310+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
59311+ return -ENOMEM;
59312+
59313+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct)))
59314+ return -EFAULT;
59315+
59316+ r_tmp->hash = ghash;
59317+
59318+ num_subjs = count_user_subjs(r_tmp->hash->first);
59319+
59320+ r_tmp->subj_hash_size = num_subjs;
59321+ r_tmp->subj_hash =
59322+ (struct acl_subject_label **)
59323+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
59324+
59325+ if (!r_tmp->subj_hash)
59326+ return -ENOMEM;
59327+
59328+ err = copy_user_allowedips(r_tmp);
59329+ if (err)
59330+ return err;
59331+
59332+ /* copy domain info */
59333+ if (r_tmp->domain_children != NULL) {
59334+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
59335+ if (domainlist == NULL)
59336+ return -ENOMEM;
59337+
59338+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
59339+ return -EFAULT;
59340+
59341+ r_tmp->domain_children = domainlist;
59342+ }
59343+
59344+ err = copy_user_transitions(r_tmp);
59345+ if (err)
59346+ return err;
59347+
59348+ memset(r_tmp->subj_hash, 0,
59349+ r_tmp->subj_hash_size *
59350+ sizeof (struct acl_subject_label *));
59351+
59352+ /* acquire the list of subjects, then NULL out
59353+ the list prior to parsing the subjects for this role,
59354+ as during this parsing the list is replaced with a list
59355+ of *nested* subjects for the role
59356+ */
59357+ subj_list = r_tmp->hash->first;
59358+
59359+ /* set nested subject list to null */
59360+ r_tmp->hash->first = NULL;
59361+
59362+ err = copy_user_subjs(subj_list, r_tmp);
59363+
59364+ if (err)
59365+ return err;
59366+
59367+ insert_acl_role_label(r_tmp);
59368+ }
59369+
59370+ if (default_role == NULL || kernel_role == NULL)
59371+ return -EINVAL;
59372+
59373+ return err;
59374+}
59375+
59376+static int
59377+gracl_init(struct gr_arg *args)
59378+{
59379+ int error = 0;
59380+
59381+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
59382+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
59383+
59384+ if (init_variables(args)) {
59385+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
59386+ error = -ENOMEM;
59387+ free_variables();
59388+ goto out;
59389+ }
59390+
59391+ error = copy_user_acl(args);
59392+ free_init_variables();
59393+ if (error) {
59394+ free_variables();
59395+ goto out;
59396+ }
59397+
59398+ if ((error = gr_set_acls(0))) {
59399+ free_variables();
59400+ goto out;
59401+ }
59402+
59403+ pax_open_kernel();
59404+ gr_status |= GR_READY;
59405+ pax_close_kernel();
59406+
59407+ out:
59408+ return error;
59409+}
59410+
59411+/* derived from glibc fnmatch() 0: match, 1: no match*/
59412+
59413+static int
59414+glob_match(const char *p, const char *n)
59415+{
59416+ char c;
59417+
59418+ while ((c = *p++) != '\0') {
59419+ switch (c) {
59420+ case '?':
59421+ if (*n == '\0')
59422+ return 1;
59423+ else if (*n == '/')
59424+ return 1;
59425+ break;
59426+ case '\\':
59427+ if (*n != c)
59428+ return 1;
59429+ break;
59430+ case '*':
59431+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
59432+ if (*n == '/')
59433+ return 1;
59434+ else if (c == '?') {
59435+ if (*n == '\0')
59436+ return 1;
59437+ else
59438+ ++n;
59439+ }
59440+ }
59441+ if (c == '\0') {
59442+ return 0;
59443+ } else {
59444+ const char *endp;
59445+
59446+ if ((endp = strchr(n, '/')) == NULL)
59447+ endp = n + strlen(n);
59448+
59449+ if (c == '[') {
59450+ for (--p; n < endp; ++n)
59451+ if (!glob_match(p, n))
59452+ return 0;
59453+ } else if (c == '/') {
59454+ while (*n != '\0' && *n != '/')
59455+ ++n;
59456+ if (*n == '/' && !glob_match(p, n + 1))
59457+ return 0;
59458+ } else {
59459+ for (--p; n < endp; ++n)
59460+ if (*n == c && !glob_match(p, n))
59461+ return 0;
59462+ }
59463+
59464+ return 1;
59465+ }
59466+ case '[':
59467+ {
59468+ int not;
59469+ char cold;
59470+
59471+ if (*n == '\0' || *n == '/')
59472+ return 1;
59473+
59474+ not = (*p == '!' || *p == '^');
59475+ if (not)
59476+ ++p;
59477+
59478+ c = *p++;
59479+ for (;;) {
59480+ unsigned char fn = (unsigned char)*n;
59481+
59482+ if (c == '\0')
59483+ return 1;
59484+ else {
59485+ if (c == fn)
59486+ goto matched;
59487+ cold = c;
59488+ c = *p++;
59489+
59490+ if (c == '-' && *p != ']') {
59491+ unsigned char cend = *p++;
59492+
59493+ if (cend == '\0')
59494+ return 1;
59495+
59496+ if (cold <= fn && fn <= cend)
59497+ goto matched;
59498+
59499+ c = *p++;
59500+ }
59501+ }
59502+
59503+ if (c == ']')
59504+ break;
59505+ }
59506+ if (!not)
59507+ return 1;
59508+ break;
59509+ matched:
59510+ while (c != ']') {
59511+ if (c == '\0')
59512+ return 1;
59513+
59514+ c = *p++;
59515+ }
59516+ if (not)
59517+ return 1;
59518+ }
59519+ break;
59520+ default:
59521+ if (c != *n)
59522+ return 1;
59523+ }
59524+
59525+ ++n;
59526+ }
59527+
59528+ if (*n == '\0')
59529+ return 0;
59530+
59531+ if (*n == '/')
59532+ return 0;
59533+
59534+ return 1;
59535+}
59536+
59537+static struct acl_object_label *
59538+chk_glob_label(struct acl_object_label *globbed,
59539+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
59540+{
59541+ struct acl_object_label *tmp;
59542+
59543+ if (*path == NULL)
59544+ *path = gr_to_filename_nolock(dentry, mnt);
59545+
59546+ tmp = globbed;
59547+
59548+ while (tmp) {
59549+ if (!glob_match(tmp->filename, *path))
59550+ return tmp;
59551+ tmp = tmp->next;
59552+ }
59553+
59554+ return NULL;
59555+}
59556+
59557+static struct acl_object_label *
59558+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
59559+ const ino_t curr_ino, const dev_t curr_dev,
59560+ const struct acl_subject_label *subj, char **path, const int checkglob)
59561+{
59562+ struct acl_subject_label *tmpsubj;
59563+ struct acl_object_label *retval;
59564+ struct acl_object_label *retval2;
59565+
59566+ tmpsubj = (struct acl_subject_label *) subj;
59567+ read_lock(&gr_inode_lock);
59568+ do {
59569+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
59570+ if (retval) {
59571+ if (checkglob && retval->globbed) {
59572+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
59573+ if (retval2)
59574+ retval = retval2;
59575+ }
59576+ break;
59577+ }
59578+ } while ((tmpsubj = tmpsubj->parent_subject));
59579+ read_unlock(&gr_inode_lock);
59580+
59581+ return retval;
59582+}
59583+
59584+static __inline__ struct acl_object_label *
59585+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
59586+ struct dentry *curr_dentry,
59587+ const struct acl_subject_label *subj, char **path, const int checkglob)
59588+{
59589+ int newglob = checkglob;
59590+ ino_t inode;
59591+ dev_t device;
59592+
59593+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
59594+ as we don't want a / * rule to match instead of the / object
59595+ don't do this for create lookups that call this function though, since they're looking up
59596+ on the parent and thus need globbing checks on all paths
59597+ */
59598+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
59599+ newglob = GR_NO_GLOB;
59600+
59601+ spin_lock(&curr_dentry->d_lock);
59602+ inode = curr_dentry->d_inode->i_ino;
59603+ device = __get_dev(curr_dentry);
59604+ spin_unlock(&curr_dentry->d_lock);
59605+
59606+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
59607+}
59608+
59609+#ifdef CONFIG_HUGETLBFS
59610+static inline bool
59611+is_hugetlbfs_mnt(const struct vfsmount *mnt)
59612+{
59613+ int i;
59614+ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
59615+ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
59616+ return true;
59617+ }
59618+
59619+ return false;
59620+}
59621+#endif
59622+
59623+static struct acl_object_label *
59624+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
59625+ const struct acl_subject_label *subj, char *path, const int checkglob)
59626+{
59627+ struct dentry *dentry = (struct dentry *) l_dentry;
59628+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
59629+ struct mount *real_mnt = real_mount(mnt);
59630+ struct acl_object_label *retval;
59631+ struct dentry *parent;
59632+
59633+ br_read_lock(&vfsmount_lock);
59634+ write_seqlock(&rename_lock);
59635+
59636+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
59637+#ifdef CONFIG_NET
59638+ mnt == sock_mnt ||
59639+#endif
59640+#ifdef CONFIG_HUGETLBFS
59641+ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
59642+#endif
59643+ /* ignore Eric Biederman */
59644+ IS_PRIVATE(l_dentry->d_inode))) {
59645+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
59646+ goto out;
59647+ }
59648+
59649+ for (;;) {
59650+ if (dentry == real_root.dentry && mnt == real_root.mnt)
59651+ break;
59652+
59653+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
59654+ if (!mnt_has_parent(real_mnt))
59655+ break;
59656+
59657+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
59658+ if (retval != NULL)
59659+ goto out;
59660+
59661+ dentry = real_mnt->mnt_mountpoint;
59662+ real_mnt = real_mnt->mnt_parent;
59663+ mnt = &real_mnt->mnt;
59664+ continue;
59665+ }
59666+
59667+ parent = dentry->d_parent;
59668+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
59669+ if (retval != NULL)
59670+ goto out;
59671+
59672+ dentry = parent;
59673+ }
59674+
59675+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
59676+
59677+ /* real_root is pinned so we don't have to hold a reference */
59678+ if (retval == NULL)
59679+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
59680+out:
59681+ write_sequnlock(&rename_lock);
59682+ br_read_unlock(&vfsmount_lock);
59683+
59684+ BUG_ON(retval == NULL);
59685+
59686+ return retval;
59687+}
59688+
59689+static __inline__ struct acl_object_label *
59690+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
59691+ const struct acl_subject_label *subj)
59692+{
59693+ char *path = NULL;
59694+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
59695+}
59696+
59697+static __inline__ struct acl_object_label *
59698+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
59699+ const struct acl_subject_label *subj)
59700+{
59701+ char *path = NULL;
59702+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
59703+}
59704+
59705+static __inline__ struct acl_object_label *
59706+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
59707+ const struct acl_subject_label *subj, char *path)
59708+{
59709+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
59710+}
59711+
59712+static struct acl_subject_label *
59713+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
59714+ const struct acl_role_label *role)
59715+{
59716+ struct dentry *dentry = (struct dentry *) l_dentry;
59717+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
59718+ struct mount *real_mnt = real_mount(mnt);
59719+ struct acl_subject_label *retval;
59720+ struct dentry *parent;
59721+
59722+ br_read_lock(&vfsmount_lock);
59723+ write_seqlock(&rename_lock);
59724+
59725+ for (;;) {
59726+ if (dentry == real_root.dentry && mnt == real_root.mnt)
59727+ break;
59728+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
59729+ if (!mnt_has_parent(real_mnt))
59730+ break;
59731+
59732+ spin_lock(&dentry->d_lock);
59733+ read_lock(&gr_inode_lock);
59734+ retval =
59735+ lookup_acl_subj_label(dentry->d_inode->i_ino,
59736+ __get_dev(dentry), role);
59737+ read_unlock(&gr_inode_lock);
59738+ spin_unlock(&dentry->d_lock);
59739+ if (retval != NULL)
59740+ goto out;
59741+
59742+ dentry = real_mnt->mnt_mountpoint;
59743+ real_mnt = real_mnt->mnt_parent;
59744+ mnt = &real_mnt->mnt;
59745+ continue;
59746+ }
59747+
59748+ spin_lock(&dentry->d_lock);
59749+ read_lock(&gr_inode_lock);
59750+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
59751+ __get_dev(dentry), role);
59752+ read_unlock(&gr_inode_lock);
59753+ parent = dentry->d_parent;
59754+ spin_unlock(&dentry->d_lock);
59755+
59756+ if (retval != NULL)
59757+ goto out;
59758+
59759+ dentry = parent;
59760+ }
59761+
59762+ spin_lock(&dentry->d_lock);
59763+ read_lock(&gr_inode_lock);
59764+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
59765+ __get_dev(dentry), role);
59766+ read_unlock(&gr_inode_lock);
59767+ spin_unlock(&dentry->d_lock);
59768+
59769+ if (unlikely(retval == NULL)) {
59770+ /* real_root is pinned, we don't need to hold a reference */
59771+ read_lock(&gr_inode_lock);
59772+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
59773+ __get_dev(real_root.dentry), role);
59774+ read_unlock(&gr_inode_lock);
59775+ }
59776+out:
59777+ write_sequnlock(&rename_lock);
59778+ br_read_unlock(&vfsmount_lock);
59779+
59780+ BUG_ON(retval == NULL);
59781+
59782+ return retval;
59783+}
59784+
59785+static void
59786+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
59787+{
59788+ struct task_struct *task = current;
59789+ const struct cred *cred = current_cred();
59790+
59791+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
59792+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
59793+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
59794+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
59795+
59796+ return;
59797+}
59798+
59799+static void
59800+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
59801+{
59802+ struct task_struct *task = current;
59803+ const struct cred *cred = current_cred();
59804+
59805+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
59806+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
59807+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
59808+ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
59809+
59810+ return;
59811+}
59812+
59813+static void
59814+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
59815+{
59816+ struct task_struct *task = current;
59817+ const struct cred *cred = current_cred();
59818+
59819+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
59820+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
59821+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
59822+ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
59823+
59824+ return;
59825+}
59826+
59827+__u32
59828+gr_search_file(const struct dentry * dentry, const __u32 mode,
59829+ const struct vfsmount * mnt)
59830+{
59831+ __u32 retval = mode;
59832+ struct acl_subject_label *curracl;
59833+ struct acl_object_label *currobj;
59834+
59835+ if (unlikely(!(gr_status & GR_READY)))
59836+ return (mode & ~GR_AUDITS);
59837+
59838+ curracl = current->acl;
59839+
59840+ currobj = chk_obj_label(dentry, mnt, curracl);
59841+ retval = currobj->mode & mode;
59842+
59843+ /* if we're opening a specified transfer file for writing
59844+ (e.g. /dev/initctl), then transfer our role to init
59845+ */
59846+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
59847+ current->role->roletype & GR_ROLE_PERSIST)) {
59848+ struct task_struct *task = init_pid_ns.child_reaper;
59849+
59850+ if (task->role != current->role) {
59851+ task->acl_sp_role = 0;
59852+ task->acl_role_id = current->acl_role_id;
59853+ task->role = current->role;
59854+ rcu_read_lock();
59855+ read_lock(&grsec_exec_file_lock);
59856+ gr_apply_subject_to_task(task);
59857+ read_unlock(&grsec_exec_file_lock);
59858+ rcu_read_unlock();
59859+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
59860+ }
59861+ }
59862+
59863+ if (unlikely
59864+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
59865+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
59866+ __u32 new_mode = mode;
59867+
59868+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
59869+
59870+ retval = new_mode;
59871+
59872+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
59873+ new_mode |= GR_INHERIT;
59874+
59875+ if (!(mode & GR_NOLEARN))
59876+ gr_log_learn(dentry, mnt, new_mode);
59877+ }
59878+
59879+ return retval;
59880+}
59881+
59882+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
59883+ const struct dentry *parent,
59884+ const struct vfsmount *mnt)
59885+{
59886+ struct name_entry *match;
59887+ struct acl_object_label *matchpo;
59888+ struct acl_subject_label *curracl;
59889+ char *path;
59890+
59891+ if (unlikely(!(gr_status & GR_READY)))
59892+ return NULL;
59893+
59894+ preempt_disable();
59895+ path = gr_to_filename_rbac(new_dentry, mnt);
59896+ match = lookup_name_entry_create(path);
59897+
59898+ curracl = current->acl;
59899+
59900+ if (match) {
59901+ read_lock(&gr_inode_lock);
59902+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
59903+ read_unlock(&gr_inode_lock);
59904+
59905+ if (matchpo) {
59906+ preempt_enable();
59907+ return matchpo;
59908+ }
59909+ }
59910+
59911+ // lookup parent
59912+
59913+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
59914+
59915+ preempt_enable();
59916+ return matchpo;
59917+}
59918+
59919+__u32
59920+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
59921+ const struct vfsmount * mnt, const __u32 mode)
59922+{
59923+ struct acl_object_label *matchpo;
59924+ __u32 retval;
59925+
59926+ if (unlikely(!(gr_status & GR_READY)))
59927+ return (mode & ~GR_AUDITS);
59928+
59929+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
59930+
59931+ retval = matchpo->mode & mode;
59932+
59933+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
59934+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
59935+ __u32 new_mode = mode;
59936+
59937+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
59938+
59939+ gr_log_learn(new_dentry, mnt, new_mode);
59940+ return new_mode;
59941+ }
59942+
59943+ return retval;
59944+}
59945+
59946+__u32
59947+gr_check_link(const struct dentry * new_dentry,
59948+ const struct dentry * parent_dentry,
59949+ const struct vfsmount * parent_mnt,
59950+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
59951+{
59952+ struct acl_object_label *obj;
59953+ __u32 oldmode, newmode;
59954+ __u32 needmode;
59955+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
59956+ GR_DELETE | GR_INHERIT;
59957+
59958+ if (unlikely(!(gr_status & GR_READY)))
59959+ return (GR_CREATE | GR_LINK);
59960+
59961+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
59962+ oldmode = obj->mode;
59963+
59964+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
59965+ newmode = obj->mode;
59966+
59967+ needmode = newmode & checkmodes;
59968+
59969+ // old name for hardlink must have at least the permissions of the new name
59970+ if ((oldmode & needmode) != needmode)
59971+ goto bad;
59972+
59973+ // if old name had restrictions/auditing, make sure the new name does as well
59974+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
59975+
59976+ // don't allow hardlinking of suid/sgid/fcapped files without permission
59977+ if (is_privileged_binary(old_dentry))
59978+ needmode |= GR_SETID;
59979+
59980+ if ((newmode & needmode) != needmode)
59981+ goto bad;
59982+
59983+ // enforce minimum permissions
59984+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
59985+ return newmode;
59986+bad:
59987+ needmode = oldmode;
59988+ if (is_privileged_binary(old_dentry))
59989+ needmode |= GR_SETID;
59990+
59991+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
59992+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
59993+ return (GR_CREATE | GR_LINK);
59994+ } else if (newmode & GR_SUPPRESS)
59995+ return GR_SUPPRESS;
59996+ else
59997+ return 0;
59998+}
59999+
60000+int
60001+gr_check_hidden_task(const struct task_struct *task)
60002+{
60003+ if (unlikely(!(gr_status & GR_READY)))
60004+ return 0;
60005+
60006+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
60007+ return 1;
60008+
60009+ return 0;
60010+}
60011+
60012+int
60013+gr_check_protected_task(const struct task_struct *task)
60014+{
60015+ if (unlikely(!(gr_status & GR_READY) || !task))
60016+ return 0;
60017+
60018+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
60019+ task->acl != current->acl)
60020+ return 1;
60021+
60022+ return 0;
60023+}
60024+
60025+int
60026+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
60027+{
60028+ struct task_struct *p;
60029+ int ret = 0;
60030+
60031+ if (unlikely(!(gr_status & GR_READY) || !pid))
60032+ return ret;
60033+
60034+ read_lock(&tasklist_lock);
60035+ do_each_pid_task(pid, type, p) {
60036+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
60037+ p->acl != current->acl) {
60038+ ret = 1;
60039+ goto out;
60040+ }
60041+ } while_each_pid_task(pid, type, p);
60042+out:
60043+ read_unlock(&tasklist_lock);
60044+
60045+ return ret;
60046+}
60047+
60048+void
60049+gr_copy_label(struct task_struct *tsk)
60050+{
60051+ tsk->signal->used_accept = 0;
60052+ tsk->acl_sp_role = 0;
60053+ tsk->acl_role_id = current->acl_role_id;
60054+ tsk->acl = current->acl;
60055+ tsk->role = current->role;
60056+ tsk->signal->curr_ip = current->signal->curr_ip;
60057+ tsk->signal->saved_ip = current->signal->saved_ip;
60058+ if (current->exec_file)
60059+ get_file(current->exec_file);
60060+ tsk->exec_file = current->exec_file;
60061+ tsk->is_writable = current->is_writable;
60062+ if (unlikely(current->signal->used_accept)) {
60063+ current->signal->curr_ip = 0;
60064+ current->signal->saved_ip = 0;
60065+ }
60066+
60067+ return;
60068+}
60069+
60070+static void
60071+gr_set_proc_res(struct task_struct *task)
60072+{
60073+ struct acl_subject_label *proc;
60074+ unsigned short i;
60075+
60076+ proc = task->acl;
60077+
60078+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
60079+ return;
60080+
60081+ for (i = 0; i < RLIM_NLIMITS; i++) {
60082+ if (!(proc->resmask & (1 << i)))
60083+ continue;
60084+
60085+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
60086+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
60087+
60088+ if (i == RLIMIT_CPU)
60089+ update_rlimit_cpu(task, proc->res[i].rlim_cur);
60090+ }
60091+
60092+ return;
60093+}
60094+
60095+extern int __gr_process_user_ban(struct user_struct *user);
60096+
60097+int
60098+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
60099+{
60100+ unsigned int i;
60101+ __u16 num;
60102+ uid_t *uidlist;
60103+ uid_t curuid;
60104+ int realok = 0;
60105+ int effectiveok = 0;
60106+ int fsok = 0;
60107+ uid_t globalreal, globaleffective, globalfs;
60108+
60109+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
60110+ struct user_struct *user;
60111+
60112+ if (!uid_valid(real))
60113+ goto skipit;
60114+
60115+ /* find user based on global namespace */
60116+
60117+ globalreal = GR_GLOBAL_UID(real);
60118+
60119+ user = find_user(make_kuid(&init_user_ns, globalreal));
60120+ if (user == NULL)
60121+ goto skipit;
60122+
60123+ if (__gr_process_user_ban(user)) {
60124+ /* for find_user */
60125+ free_uid(user);
60126+ return 1;
60127+ }
60128+
60129+ /* for find_user */
60130+ free_uid(user);
60131+
60132+skipit:
60133+#endif
60134+
60135+ if (unlikely(!(gr_status & GR_READY)))
60136+ return 0;
60137+
60138+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
60139+ gr_log_learn_uid_change(real, effective, fs);
60140+
60141+ num = current->acl->user_trans_num;
60142+ uidlist = current->acl->user_transitions;
60143+
60144+ if (uidlist == NULL)
60145+ return 0;
60146+
60147+ if (!uid_valid(real)) {
60148+ realok = 1;
60149+ globalreal = (uid_t)-1;
60150+ } else {
60151+ globalreal = GR_GLOBAL_UID(real);
60152+ }
60153+ if (!uid_valid(effective)) {
60154+ effectiveok = 1;
60155+ globaleffective = (uid_t)-1;
60156+ } else {
60157+ globaleffective = GR_GLOBAL_UID(effective);
60158+ }
60159+ if (!uid_valid(fs)) {
60160+ fsok = 1;
60161+ globalfs = (uid_t)-1;
60162+ } else {
60163+ globalfs = GR_GLOBAL_UID(fs);
60164+ }
60165+
60166+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
60167+ for (i = 0; i < num; i++) {
60168+ curuid = uidlist[i];
60169+ if (globalreal == curuid)
60170+ realok = 1;
60171+ if (globaleffective == curuid)
60172+ effectiveok = 1;
60173+ if (globalfs == curuid)
60174+ fsok = 1;
60175+ }
60176+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
60177+ for (i = 0; i < num; i++) {
60178+ curuid = uidlist[i];
60179+ if (globalreal == curuid)
60180+ break;
60181+ if (globaleffective == curuid)
60182+ break;
60183+ if (globalfs == curuid)
60184+ break;
60185+ }
60186+ /* not in deny list */
60187+ if (i == num) {
60188+ realok = 1;
60189+ effectiveok = 1;
60190+ fsok = 1;
60191+ }
60192+ }
60193+
60194+ if (realok && effectiveok && fsok)
60195+ return 0;
60196+ else {
60197+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
60198+ return 1;
60199+ }
60200+}
60201+
60202+int
60203+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
60204+{
60205+ unsigned int i;
60206+ __u16 num;
60207+ gid_t *gidlist;
60208+ gid_t curgid;
60209+ int realok = 0;
60210+ int effectiveok = 0;
60211+ int fsok = 0;
60212+ gid_t globalreal, globaleffective, globalfs;
60213+
60214+ if (unlikely(!(gr_status & GR_READY)))
60215+ return 0;
60216+
60217+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
60218+ gr_log_learn_gid_change(real, effective, fs);
60219+
60220+ num = current->acl->group_trans_num;
60221+ gidlist = current->acl->group_transitions;
60222+
60223+ if (gidlist == NULL)
60224+ return 0;
60225+
60226+ if (!gid_valid(real)) {
60227+ realok = 1;
60228+ globalreal = (gid_t)-1;
60229+ } else {
60230+ globalreal = GR_GLOBAL_GID(real);
60231+ }
60232+ if (!gid_valid(effective)) {
60233+ effectiveok = 1;
60234+ globaleffective = (gid_t)-1;
60235+ } else {
60236+ globaleffective = GR_GLOBAL_GID(effective);
60237+ }
60238+ if (!gid_valid(fs)) {
60239+ fsok = 1;
60240+ globalfs = (gid_t)-1;
60241+ } else {
60242+ globalfs = GR_GLOBAL_GID(fs);
60243+ }
60244+
60245+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
60246+ for (i = 0; i < num; i++) {
60247+ curgid = gidlist[i];
60248+ if (globalreal == curgid)
60249+ realok = 1;
60250+ if (globaleffective == curgid)
60251+ effectiveok = 1;
60252+ if (globalfs == curgid)
60253+ fsok = 1;
60254+ }
60255+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
60256+ for (i = 0; i < num; i++) {
60257+ curgid = gidlist[i];
60258+ if (globalreal == curgid)
60259+ break;
60260+ if (globaleffective == curgid)
60261+ break;
60262+ if (globalfs == curgid)
60263+ break;
60264+ }
60265+ /* not in deny list */
60266+ if (i == num) {
60267+ realok = 1;
60268+ effectiveok = 1;
60269+ fsok = 1;
60270+ }
60271+ }
60272+
60273+ if (realok && effectiveok && fsok)
60274+ return 0;
60275+ else {
60276+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
60277+ return 1;
60278+ }
60279+}
60280+
60281+extern int gr_acl_is_capable(const int cap);
60282+
60283+void
60284+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
60285+{
60286+ struct acl_role_label *role = task->role;
60287+ struct acl_subject_label *subj = NULL;
60288+ struct acl_object_label *obj;
60289+ struct file *filp;
60290+ uid_t uid;
60291+ gid_t gid;
60292+
60293+ if (unlikely(!(gr_status & GR_READY)))
60294+ return;
60295+
60296+ uid = GR_GLOBAL_UID(kuid);
60297+ gid = GR_GLOBAL_GID(kgid);
60298+
60299+ filp = task->exec_file;
60300+
60301+ /* kernel process, we'll give them the kernel role */
60302+ if (unlikely(!filp)) {
60303+ task->role = kernel_role;
60304+ task->acl = kernel_role->root_label;
60305+ return;
60306+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
60307+ role = lookup_acl_role_label(task, uid, gid);
60308+
60309+ /* don't change the role if we're not a privileged process */
60310+ if (role && task->role != role &&
60311+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
60312+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
60313+ return;
60314+
60315+ /* perform subject lookup in possibly new role
60316+ we can use this result below in the case where role == task->role
60317+ */
60318+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
60319+
60320+ /* if we changed uid/gid, but result in the same role
60321+ and are using inheritance, don't lose the inherited subject
60322+ if current subject is other than what normal lookup
60323+ would result in, we arrived via inheritance, don't
60324+ lose subject
60325+ */
60326+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
60327+ (subj == task->acl)))
60328+ task->acl = subj;
60329+
60330+ task->role = role;
60331+
60332+ task->is_writable = 0;
60333+
60334+ /* ignore additional mmap checks for processes that are writable
60335+ by the default ACL */
60336+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
60337+ if (unlikely(obj->mode & GR_WRITE))
60338+ task->is_writable = 1;
60339+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
60340+ if (unlikely(obj->mode & GR_WRITE))
60341+ task->is_writable = 1;
60342+
60343+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
60344+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
60345+#endif
60346+
60347+ gr_set_proc_res(task);
60348+
60349+ return;
60350+}
60351+
60352+int
60353+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
60354+ const int unsafe_flags)
60355+{
60356+ struct task_struct *task = current;
60357+ struct acl_subject_label *newacl;
60358+ struct acl_object_label *obj;
60359+ __u32 retmode;
60360+
60361+ if (unlikely(!(gr_status & GR_READY)))
60362+ return 0;
60363+
60364+ newacl = chk_subj_label(dentry, mnt, task->role);
60365+
60366+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
60367+ did an exec
60368+ */
60369+ rcu_read_lock();
60370+ read_lock(&tasklist_lock);
60371+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
60372+ (task->parent->acl->mode & GR_POVERRIDE))) {
60373+ read_unlock(&tasklist_lock);
60374+ rcu_read_unlock();
60375+ goto skip_check;
60376+ }
60377+ read_unlock(&tasklist_lock);
60378+ rcu_read_unlock();
60379+
60380+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
60381+ !(task->role->roletype & GR_ROLE_GOD) &&
60382+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
60383+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
60384+ if (unsafe_flags & LSM_UNSAFE_SHARE)
60385+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
60386+ else
60387+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
60388+ return -EACCES;
60389+ }
60390+
60391+skip_check:
60392+
60393+ obj = chk_obj_label(dentry, mnt, task->acl);
60394+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
60395+
60396+ if (!(task->acl->mode & GR_INHERITLEARN) &&
60397+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
60398+ if (obj->nested)
60399+ task->acl = obj->nested;
60400+ else
60401+ task->acl = newacl;
60402+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
60403+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
60404+
60405+ task->is_writable = 0;
60406+
60407+ /* ignore additional mmap checks for processes that are writable
60408+ by the default ACL */
60409+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
60410+ if (unlikely(obj->mode & GR_WRITE))
60411+ task->is_writable = 1;
60412+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
60413+ if (unlikely(obj->mode & GR_WRITE))
60414+ task->is_writable = 1;
60415+
60416+ gr_set_proc_res(task);
60417+
60418+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
60419+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
60420+#endif
60421+ return 0;
60422+}
60423+
60424+/* always called with valid inodev ptr */
60425+static void
60426+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
60427+{
60428+ struct acl_object_label *matchpo;
60429+ struct acl_subject_label *matchps;
60430+ struct acl_subject_label *subj;
60431+ struct acl_role_label *role;
60432+ unsigned int x;
60433+
60434+ FOR_EACH_ROLE_START(role)
60435+ FOR_EACH_SUBJECT_START(role, subj, x)
60436+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
60437+ matchpo->mode |= GR_DELETED;
60438+ FOR_EACH_SUBJECT_END(subj,x)
60439+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
60440+ /* nested subjects aren't in the role's subj_hash table */
60441+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
60442+ matchpo->mode |= GR_DELETED;
60443+ FOR_EACH_NESTED_SUBJECT_END(subj)
60444+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
60445+ matchps->mode |= GR_DELETED;
60446+ FOR_EACH_ROLE_END(role)
60447+
60448+ inodev->nentry->deleted = 1;
60449+
60450+ return;
60451+}
60452+
60453+void
60454+gr_handle_delete(const ino_t ino, const dev_t dev)
60455+{
60456+ struct inodev_entry *inodev;
60457+
60458+ if (unlikely(!(gr_status & GR_READY)))
60459+ return;
60460+
60461+ write_lock(&gr_inode_lock);
60462+ inodev = lookup_inodev_entry(ino, dev);
60463+ if (inodev != NULL)
60464+ do_handle_delete(inodev, ino, dev);
60465+ write_unlock(&gr_inode_lock);
60466+
60467+ return;
60468+}
60469+
60470+static void
60471+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
60472+ const ino_t newinode, const dev_t newdevice,
60473+ struct acl_subject_label *subj)
60474+{
60475+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
60476+ struct acl_object_label *match;
60477+
60478+ match = subj->obj_hash[index];
60479+
60480+ while (match && (match->inode != oldinode ||
60481+ match->device != olddevice ||
60482+ !(match->mode & GR_DELETED)))
60483+ match = match->next;
60484+
60485+ if (match && (match->inode == oldinode)
60486+ && (match->device == olddevice)
60487+ && (match->mode & GR_DELETED)) {
60488+ if (match->prev == NULL) {
60489+ subj->obj_hash[index] = match->next;
60490+ if (match->next != NULL)
60491+ match->next->prev = NULL;
60492+ } else {
60493+ match->prev->next = match->next;
60494+ if (match->next != NULL)
60495+ match->next->prev = match->prev;
60496+ }
60497+ match->prev = NULL;
60498+ match->next = NULL;
60499+ match->inode = newinode;
60500+ match->device = newdevice;
60501+ match->mode &= ~GR_DELETED;
60502+
60503+ insert_acl_obj_label(match, subj);
60504+ }
60505+
60506+ return;
60507+}
60508+
60509+static void
60510+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
60511+ const ino_t newinode, const dev_t newdevice,
60512+ struct acl_role_label *role)
60513+{
60514+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
60515+ struct acl_subject_label *match;
60516+
60517+ match = role->subj_hash[index];
60518+
60519+ while (match && (match->inode != oldinode ||
60520+ match->device != olddevice ||
60521+ !(match->mode & GR_DELETED)))
60522+ match = match->next;
60523+
60524+ if (match && (match->inode == oldinode)
60525+ && (match->device == olddevice)
60526+ && (match->mode & GR_DELETED)) {
60527+ if (match->prev == NULL) {
60528+ role->subj_hash[index] = match->next;
60529+ if (match->next != NULL)
60530+ match->next->prev = NULL;
60531+ } else {
60532+ match->prev->next = match->next;
60533+ if (match->next != NULL)
60534+ match->next->prev = match->prev;
60535+ }
60536+ match->prev = NULL;
60537+ match->next = NULL;
60538+ match->inode = newinode;
60539+ match->device = newdevice;
60540+ match->mode &= ~GR_DELETED;
60541+
60542+ insert_acl_subj_label(match, role);
60543+ }
60544+
60545+ return;
60546+}
60547+
60548+static void
60549+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
60550+ const ino_t newinode, const dev_t newdevice)
60551+{
60552+ unsigned int index = gr_fhash(oldinode, olddevice, inodev_set.i_size);
60553+ struct inodev_entry *match;
60554+
60555+ match = inodev_set.i_hash[index];
60556+
60557+ while (match && (match->nentry->inode != oldinode ||
60558+ match->nentry->device != olddevice || !match->nentry->deleted))
60559+ match = match->next;
60560+
60561+ if (match && (match->nentry->inode == oldinode)
60562+ && (match->nentry->device == olddevice) &&
60563+ match->nentry->deleted) {
60564+ if (match->prev == NULL) {
60565+ inodev_set.i_hash[index] = match->next;
60566+ if (match->next != NULL)
60567+ match->next->prev = NULL;
60568+ } else {
60569+ match->prev->next = match->next;
60570+ if (match->next != NULL)
60571+ match->next->prev = match->prev;
60572+ }
60573+ match->prev = NULL;
60574+ match->next = NULL;
60575+ match->nentry->inode = newinode;
60576+ match->nentry->device = newdevice;
60577+ match->nentry->deleted = 0;
60578+
60579+ insert_inodev_entry(match);
60580+ }
60581+
60582+ return;
60583+}
60584+
60585+static void
60586+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
60587+{
60588+ struct acl_subject_label *subj;
60589+ struct acl_role_label *role;
60590+ unsigned int x;
60591+
60592+ FOR_EACH_ROLE_START(role)
60593+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
60594+
60595+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
60596+ if ((subj->inode == ino) && (subj->device == dev)) {
60597+ subj->inode = ino;
60598+ subj->device = dev;
60599+ }
60600+ /* nested subjects aren't in the role's subj_hash table */
60601+ update_acl_obj_label(matchn->inode, matchn->device,
60602+ ino, dev, subj);
60603+ FOR_EACH_NESTED_SUBJECT_END(subj)
60604+ FOR_EACH_SUBJECT_START(role, subj, x)
60605+ update_acl_obj_label(matchn->inode, matchn->device,
60606+ ino, dev, subj);
60607+ FOR_EACH_SUBJECT_END(subj,x)
60608+ FOR_EACH_ROLE_END(role)
60609+
60610+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
60611+
60612+ return;
60613+}
60614+
60615+static void
60616+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
60617+ const struct vfsmount *mnt)
60618+{
60619+ ino_t ino = dentry->d_inode->i_ino;
60620+ dev_t dev = __get_dev(dentry);
60621+
60622+ __do_handle_create(matchn, ino, dev);
60623+
60624+ return;
60625+}
60626+
60627+void
60628+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
60629+{
60630+ struct name_entry *matchn;
60631+
60632+ if (unlikely(!(gr_status & GR_READY)))
60633+ return;
60634+
60635+ preempt_disable();
60636+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
60637+
60638+ if (unlikely((unsigned long)matchn)) {
60639+ write_lock(&gr_inode_lock);
60640+ do_handle_create(matchn, dentry, mnt);
60641+ write_unlock(&gr_inode_lock);
60642+ }
60643+ preempt_enable();
60644+
60645+ return;
60646+}
60647+
60648+void
60649+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
60650+{
60651+ struct name_entry *matchn;
60652+
60653+ if (unlikely(!(gr_status & GR_READY)))
60654+ return;
60655+
60656+ preempt_disable();
60657+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
60658+
60659+ if (unlikely((unsigned long)matchn)) {
60660+ write_lock(&gr_inode_lock);
60661+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
60662+ write_unlock(&gr_inode_lock);
60663+ }
60664+ preempt_enable();
60665+
60666+ return;
60667+}
60668+
60669+void
60670+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
60671+ struct dentry *old_dentry,
60672+ struct dentry *new_dentry,
60673+ struct vfsmount *mnt, const __u8 replace)
60674+{
60675+ struct name_entry *matchn;
60676+ struct inodev_entry *inodev;
60677+ struct inode *inode = new_dentry->d_inode;
60678+ ino_t old_ino = old_dentry->d_inode->i_ino;
60679+ dev_t old_dev = __get_dev(old_dentry);
60680+
60681+ /* vfs_rename swaps the name and parent link for old_dentry and
60682+ new_dentry
60683+ at this point, old_dentry has the new name, parent link, and inode
60684+ for the renamed file
60685+ if a file is being replaced by a rename, new_dentry has the inode
60686+ and name for the replaced file
60687+ */
60688+
60689+ if (unlikely(!(gr_status & GR_READY)))
60690+ return;
60691+
60692+ preempt_disable();
60693+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
60694+
60695+ /* we wouldn't have to check d_inode if it weren't for
60696+ NFS silly-renaming
60697+ */
60698+
60699+ write_lock(&gr_inode_lock);
60700+ if (unlikely(replace && inode)) {
60701+ ino_t new_ino = inode->i_ino;
60702+ dev_t new_dev = __get_dev(new_dentry);
60703+
60704+ inodev = lookup_inodev_entry(new_ino, new_dev);
60705+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
60706+ do_handle_delete(inodev, new_ino, new_dev);
60707+ }
60708+
60709+ inodev = lookup_inodev_entry(old_ino, old_dev);
60710+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
60711+ do_handle_delete(inodev, old_ino, old_dev);
60712+
60713+ if (unlikely((unsigned long)matchn))
60714+ do_handle_create(matchn, old_dentry, mnt);
60715+
60716+ write_unlock(&gr_inode_lock);
60717+ preempt_enable();
60718+
60719+ return;
60720+}
60721+
60722+static int
60723+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
60724+ unsigned char **sum)
60725+{
60726+ struct acl_role_label *r;
60727+ struct role_allowed_ip *ipp;
60728+ struct role_transition *trans;
60729+ unsigned int i;
60730+ int found = 0;
60731+ u32 curr_ip = current->signal->curr_ip;
60732+
60733+ current->signal->saved_ip = curr_ip;
60734+
60735+ /* check transition table */
60736+
60737+ for (trans = current->role->transitions; trans; trans = trans->next) {
60738+ if (!strcmp(rolename, trans->rolename)) {
60739+ found = 1;
60740+ break;
60741+ }
60742+ }
60743+
60744+ if (!found)
60745+ return 0;
60746+
60747+ /* handle special roles that do not require authentication
60748+ and check ip */
60749+
60750+ FOR_EACH_ROLE_START(r)
60751+ if (!strcmp(rolename, r->rolename) &&
60752+ (r->roletype & GR_ROLE_SPECIAL)) {
60753+ found = 0;
60754+ if (r->allowed_ips != NULL) {
60755+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
60756+ if ((ntohl(curr_ip) & ipp->netmask) ==
60757+ (ntohl(ipp->addr) & ipp->netmask))
60758+ found = 1;
60759+ }
60760+ } else
60761+ found = 2;
60762+ if (!found)
60763+ return 0;
60764+
60765+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
60766+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
60767+ *salt = NULL;
60768+ *sum = NULL;
60769+ return 1;
60770+ }
60771+ }
60772+ FOR_EACH_ROLE_END(r)
60773+
60774+ for (i = 0; i < num_sprole_pws; i++) {
60775+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
60776+ *salt = acl_special_roles[i]->salt;
60777+ *sum = acl_special_roles[i]->sum;
60778+ return 1;
60779+ }
60780+ }
60781+
60782+ return 0;
60783+}
60784+
60785+static void
60786+assign_special_role(char *rolename)
60787+{
60788+ struct acl_object_label *obj;
60789+ struct acl_role_label *r;
60790+ struct acl_role_label *assigned = NULL;
60791+ struct task_struct *tsk;
60792+ struct file *filp;
60793+
60794+ FOR_EACH_ROLE_START(r)
60795+ if (!strcmp(rolename, r->rolename) &&
60796+ (r->roletype & GR_ROLE_SPECIAL)) {
60797+ assigned = r;
60798+ break;
60799+ }
60800+ FOR_EACH_ROLE_END(r)
60801+
60802+ if (!assigned)
60803+ return;
60804+
60805+ read_lock(&tasklist_lock);
60806+ read_lock(&grsec_exec_file_lock);
60807+
60808+ tsk = current->real_parent;
60809+ if (tsk == NULL)
60810+ goto out_unlock;
60811+
60812+ filp = tsk->exec_file;
60813+ if (filp == NULL)
60814+ goto out_unlock;
60815+
60816+ tsk->is_writable = 0;
60817+
60818+ tsk->acl_sp_role = 1;
60819+ tsk->acl_role_id = ++acl_sp_role_value;
60820+ tsk->role = assigned;
60821+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
60822+
60823+ /* ignore additional mmap checks for processes that are writable
60824+ by the default ACL */
60825+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
60826+ if (unlikely(obj->mode & GR_WRITE))
60827+ tsk->is_writable = 1;
60828+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
60829+ if (unlikely(obj->mode & GR_WRITE))
60830+ tsk->is_writable = 1;
60831+
60832+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
60833+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
60834+#endif
60835+
60836+out_unlock:
60837+ read_unlock(&grsec_exec_file_lock);
60838+ read_unlock(&tasklist_lock);
60839+ return;
60840+}
60841+
60842+int gr_check_secure_terminal(struct task_struct *task)
60843+{
60844+ struct task_struct *p, *p2, *p3;
60845+ struct files_struct *files;
60846+ struct fdtable *fdt;
60847+ struct file *our_file = NULL, *file;
60848+ int i;
60849+
60850+ if (task->signal->tty == NULL)
60851+ return 1;
60852+
60853+ files = get_files_struct(task);
60854+ if (files != NULL) {
60855+ rcu_read_lock();
60856+ fdt = files_fdtable(files);
60857+ for (i=0; i < fdt->max_fds; i++) {
60858+ file = fcheck_files(files, i);
60859+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
60860+ get_file(file);
60861+ our_file = file;
60862+ }
60863+ }
60864+ rcu_read_unlock();
60865+ put_files_struct(files);
60866+ }
60867+
60868+ if (our_file == NULL)
60869+ return 1;
60870+
60871+ read_lock(&tasklist_lock);
60872+ do_each_thread(p2, p) {
60873+ files = get_files_struct(p);
60874+ if (files == NULL ||
60875+ (p->signal && p->signal->tty == task->signal->tty)) {
60876+ if (files != NULL)
60877+ put_files_struct(files);
60878+ continue;
60879+ }
60880+ rcu_read_lock();
60881+ fdt = files_fdtable(files);
60882+ for (i=0; i < fdt->max_fds; i++) {
60883+ file = fcheck_files(files, i);
60884+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
60885+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
60886+ p3 = task;
60887+ while (task_pid_nr(p3) > 0) {
60888+ if (p3 == p)
60889+ break;
60890+ p3 = p3->real_parent;
60891+ }
60892+ if (p3 == p)
60893+ break;
60894+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
60895+ gr_handle_alertkill(p);
60896+ rcu_read_unlock();
60897+ put_files_struct(files);
60898+ read_unlock(&tasklist_lock);
60899+ fput(our_file);
60900+ return 0;
60901+ }
60902+ }
60903+ rcu_read_unlock();
60904+ put_files_struct(files);
60905+ } while_each_thread(p2, p);
60906+ read_unlock(&tasklist_lock);
60907+
60908+ fput(our_file);
60909+ return 1;
60910+}
60911+
60912+static int gr_rbac_disable(void *unused)
60913+{
60914+ pax_open_kernel();
60915+ gr_status &= ~GR_READY;
60916+ pax_close_kernel();
60917+
60918+ return 0;
60919+}
60920+
60921+ssize_t
60922+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
60923+{
60924+ struct gr_arg_wrapper uwrap;
60925+ unsigned char *sprole_salt = NULL;
60926+ unsigned char *sprole_sum = NULL;
60927+ int error = sizeof (struct gr_arg_wrapper);
60928+ int error2 = 0;
60929+
60930+ mutex_lock(&gr_dev_mutex);
60931+
60932+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
60933+ error = -EPERM;
60934+ goto out;
60935+ }
60936+
60937+ if (count != sizeof (struct gr_arg_wrapper)) {
60938+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
60939+ error = -EINVAL;
60940+ goto out;
60941+ }
60942+
60943+
60944+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
60945+ gr_auth_expires = 0;
60946+ gr_auth_attempts = 0;
60947+ }
60948+
60949+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
60950+ error = -EFAULT;
60951+ goto out;
60952+ }
60953+
60954+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
60955+ error = -EINVAL;
60956+ goto out;
60957+ }
60958+
60959+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
60960+ error = -EFAULT;
60961+ goto out;
60962+ }
60963+
60964+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
60965+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
60966+ time_after(gr_auth_expires, get_seconds())) {
60967+ error = -EBUSY;
60968+ goto out;
60969+ }
60970+
60971+ /* if non-root trying to do anything other than use a special role,
60972+ do not attempt authentication, do not count towards authentication
60973+ locking
60974+ */
60975+
60976+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
60977+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
60978+ gr_is_global_nonroot(current_uid())) {
60979+ error = -EPERM;
60980+ goto out;
60981+ }
60982+
60983+ /* ensure pw and special role name are null terminated */
60984+
60985+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
60986+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
60987+
60988+ /* Okay.
60989+ * We have our enough of the argument structure..(we have yet
60990+ * to copy_from_user the tables themselves) . Copy the tables
60991+ * only if we need them, i.e. for loading operations. */
60992+
60993+ switch (gr_usermode->mode) {
60994+ case GR_STATUS:
60995+ if (gr_status & GR_READY) {
60996+ error = 1;
60997+ if (!gr_check_secure_terminal(current))
60998+ error = 3;
60999+ } else
61000+ error = 2;
61001+ goto out;
61002+ case GR_SHUTDOWN:
61003+ if ((gr_status & GR_READY)
61004+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
61005+ stop_machine(gr_rbac_disable, NULL, NULL);
61006+ free_variables();
61007+ memset(gr_usermode, 0, sizeof (struct gr_arg));
61008+ memset(gr_system_salt, 0, GR_SALT_LEN);
61009+ memset(gr_system_sum, 0, GR_SHA_LEN);
61010+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
61011+ } else if (gr_status & GR_READY) {
61012+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
61013+ error = -EPERM;
61014+ } else {
61015+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
61016+ error = -EAGAIN;
61017+ }
61018+ break;
61019+ case GR_ENABLE:
61020+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
61021+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
61022+ else {
61023+ if (gr_status & GR_READY)
61024+ error = -EAGAIN;
61025+ else
61026+ error = error2;
61027+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
61028+ }
61029+ break;
61030+ case GR_RELOAD:
61031+ if (!(gr_status & GR_READY)) {
61032+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
61033+ error = -EAGAIN;
61034+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
61035+ stop_machine(gr_rbac_disable, NULL, NULL);
61036+ free_variables();
61037+ error2 = gracl_init(gr_usermode);
61038+ if (!error2)
61039+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
61040+ else {
61041+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
61042+ error = error2;
61043+ }
61044+ } else {
61045+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
61046+ error = -EPERM;
61047+ }
61048+ break;
61049+ case GR_SEGVMOD:
61050+ if (unlikely(!(gr_status & GR_READY))) {
61051+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
61052+ error = -EAGAIN;
61053+ break;
61054+ }
61055+
61056+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
61057+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
61058+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
61059+ struct acl_subject_label *segvacl;
61060+ segvacl =
61061+ lookup_acl_subj_label(gr_usermode->segv_inode,
61062+ gr_usermode->segv_device,
61063+ current->role);
61064+ if (segvacl) {
61065+ segvacl->crashes = 0;
61066+ segvacl->expires = 0;
61067+ }
61068+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
61069+ gr_remove_uid(gr_usermode->segv_uid);
61070+ }
61071+ } else {
61072+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
61073+ error = -EPERM;
61074+ }
61075+ break;
61076+ case GR_SPROLE:
61077+ case GR_SPROLEPAM:
61078+ if (unlikely(!(gr_status & GR_READY))) {
61079+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
61080+ error = -EAGAIN;
61081+ break;
61082+ }
61083+
61084+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
61085+ current->role->expires = 0;
61086+ current->role->auth_attempts = 0;
61087+ }
61088+
61089+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
61090+ time_after(current->role->expires, get_seconds())) {
61091+ error = -EBUSY;
61092+ goto out;
61093+ }
61094+
61095+ if (lookup_special_role_auth
61096+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
61097+ && ((!sprole_salt && !sprole_sum)
61098+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
61099+ char *p = "";
61100+ assign_special_role(gr_usermode->sp_role);
61101+ read_lock(&tasklist_lock);
61102+ if (current->real_parent)
61103+ p = current->real_parent->role->rolename;
61104+ read_unlock(&tasklist_lock);
61105+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
61106+ p, acl_sp_role_value);
61107+ } else {
61108+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
61109+ error = -EPERM;
61110+ if(!(current->role->auth_attempts++))
61111+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
61112+
61113+ goto out;
61114+ }
61115+ break;
61116+ case GR_UNSPROLE:
61117+ if (unlikely(!(gr_status & GR_READY))) {
61118+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
61119+ error = -EAGAIN;
61120+ break;
61121+ }
61122+
61123+ if (current->role->roletype & GR_ROLE_SPECIAL) {
61124+ char *p = "";
61125+ int i = 0;
61126+
61127+ read_lock(&tasklist_lock);
61128+ if (current->real_parent) {
61129+ p = current->real_parent->role->rolename;
61130+ i = current->real_parent->acl_role_id;
61131+ }
61132+ read_unlock(&tasklist_lock);
61133+
61134+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
61135+ gr_set_acls(1);
61136+ } else {
61137+ error = -EPERM;
61138+ goto out;
61139+ }
61140+ break;
61141+ default:
61142+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
61143+ error = -EINVAL;
61144+ break;
61145+ }
61146+
61147+ if (error != -EPERM)
61148+ goto out;
61149+
61150+ if(!(gr_auth_attempts++))
61151+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
61152+
61153+ out:
61154+ mutex_unlock(&gr_dev_mutex);
61155+ return error;
61156+}
61157+
61158+/* must be called with
61159+ rcu_read_lock();
61160+ read_lock(&tasklist_lock);
61161+ read_lock(&grsec_exec_file_lock);
61162+*/
61163+int gr_apply_subject_to_task(struct task_struct *task)
61164+{
61165+ struct acl_object_label *obj;
61166+ char *tmpname;
61167+ struct acl_subject_label *tmpsubj;
61168+ struct file *filp;
61169+ struct name_entry *nmatch;
61170+
61171+ filp = task->exec_file;
61172+ if (filp == NULL)
61173+ return 0;
61174+
61175+ /* the following is to apply the correct subject
61176+ on binaries running when the RBAC system
61177+ is enabled, when the binaries have been
61178+ replaced or deleted since their execution
61179+ -----
61180+ when the RBAC system starts, the inode/dev
61181+ from exec_file will be one the RBAC system
61182+ is unaware of. It only knows the inode/dev
61183+ of the present file on disk, or the absence
61184+ of it.
61185+ */
61186+ preempt_disable();
61187+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
61188+
61189+ nmatch = lookup_name_entry(tmpname);
61190+ preempt_enable();
61191+ tmpsubj = NULL;
61192+ if (nmatch) {
61193+ if (nmatch->deleted)
61194+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
61195+ else
61196+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
61197+ if (tmpsubj != NULL)
61198+ task->acl = tmpsubj;
61199+ }
61200+ if (tmpsubj == NULL)
61201+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
61202+ task->role);
61203+ if (task->acl) {
61204+ task->is_writable = 0;
61205+ /* ignore additional mmap checks for processes that are writable
61206+ by the default ACL */
61207+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
61208+ if (unlikely(obj->mode & GR_WRITE))
61209+ task->is_writable = 1;
61210+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
61211+ if (unlikely(obj->mode & GR_WRITE))
61212+ task->is_writable = 1;
61213+
61214+ gr_set_proc_res(task);
61215+
61216+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
61217+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
61218+#endif
61219+ } else {
61220+ return 1;
61221+ }
61222+
61223+ return 0;
61224+}
61225+
61226+int
61227+gr_set_acls(const int type)
61228+{
61229+ struct task_struct *task, *task2;
61230+ struct acl_role_label *role = current->role;
61231+ __u16 acl_role_id = current->acl_role_id;
61232+ const struct cred *cred;
61233+ int ret;
61234+
61235+ rcu_read_lock();
61236+ read_lock(&tasklist_lock);
61237+ read_lock(&grsec_exec_file_lock);
61238+ do_each_thread(task2, task) {
61239+ /* check to see if we're called from the exit handler,
61240+ if so, only replace ACLs that have inherited the admin
61241+ ACL */
61242+
61243+ if (type && (task->role != role ||
61244+ task->acl_role_id != acl_role_id))
61245+ continue;
61246+
61247+ task->acl_role_id = 0;
61248+ task->acl_sp_role = 0;
61249+
61250+ if (task->exec_file) {
61251+ cred = __task_cred(task);
61252+ task->role = lookup_acl_role_label(task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
61253+ ret = gr_apply_subject_to_task(task);
61254+ if (ret) {
61255+ read_unlock(&grsec_exec_file_lock);
61256+ read_unlock(&tasklist_lock);
61257+ rcu_read_unlock();
61258+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
61259+ return ret;
61260+ }
61261+ } else {
61262+ // it's a kernel process
61263+ task->role = kernel_role;
61264+ task->acl = kernel_role->root_label;
61265+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
61266+ task->acl->mode &= ~GR_PROCFIND;
61267+#endif
61268+ }
61269+ } while_each_thread(task2, task);
61270+ read_unlock(&grsec_exec_file_lock);
61271+ read_unlock(&tasklist_lock);
61272+ rcu_read_unlock();
61273+
61274+ return 0;
61275+}
61276+
61277+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
61278+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
61279+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
61280+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
61281+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
61282+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
61283+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
61284+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
61285+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
61286+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
61287+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
61288+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
61289+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
61290+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
61291+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
61292+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
61293+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
61294+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
61295+};
61296+
61297+void
61298+gr_learn_resource(const struct task_struct *task,
61299+ const int res, const unsigned long wanted, const int gt)
61300+{
61301+ struct acl_subject_label *acl;
61302+ const struct cred *cred;
61303+
61304+ if (unlikely((gr_status & GR_READY) &&
61305+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
61306+ goto skip_reslog;
61307+
61308+ gr_log_resource(task, res, wanted, gt);
61309+skip_reslog:
61310+
61311+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
61312+ return;
61313+
61314+ acl = task->acl;
61315+
61316+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
61317+ !(acl->resmask & (1 << (unsigned short) res))))
61318+ return;
61319+
61320+ if (wanted >= acl->res[res].rlim_cur) {
61321+ unsigned long res_add;
61322+
61323+ res_add = wanted + res_learn_bumps[res];
61324+
61325+ acl->res[res].rlim_cur = res_add;
61326+
61327+ if (wanted > acl->res[res].rlim_max)
61328+ acl->res[res].rlim_max = res_add;
61329+
61330+ /* only log the subject filename, since resource logging is supported for
61331+ single-subject learning only */
61332+ rcu_read_lock();
61333+ cred = __task_cred(task);
61334+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
61335+ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
61336+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
61337+ "", (unsigned long) res, &task->signal->saved_ip);
61338+ rcu_read_unlock();
61339+ }
61340+
61341+ return;
61342+}
61343+EXPORT_SYMBOL(gr_learn_resource);
61344+#endif
61345+
61346+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
61347+void
61348+pax_set_initial_flags(struct linux_binprm *bprm)
61349+{
61350+ struct task_struct *task = current;
61351+ struct acl_subject_label *proc;
61352+ unsigned long flags;
61353+
61354+ if (unlikely(!(gr_status & GR_READY)))
61355+ return;
61356+
61357+ flags = pax_get_flags(task);
61358+
61359+ proc = task->acl;
61360+
61361+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
61362+ flags &= ~MF_PAX_PAGEEXEC;
61363+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
61364+ flags &= ~MF_PAX_SEGMEXEC;
61365+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
61366+ flags &= ~MF_PAX_RANDMMAP;
61367+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
61368+ flags &= ~MF_PAX_EMUTRAMP;
61369+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
61370+ flags &= ~MF_PAX_MPROTECT;
61371+
61372+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
61373+ flags |= MF_PAX_PAGEEXEC;
61374+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
61375+ flags |= MF_PAX_SEGMEXEC;
61376+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
61377+ flags |= MF_PAX_RANDMMAP;
61378+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
61379+ flags |= MF_PAX_EMUTRAMP;
61380+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
61381+ flags |= MF_PAX_MPROTECT;
61382+
61383+ pax_set_flags(task, flags);
61384+
61385+ return;
61386+}
61387+#endif
61388+
61389+int
61390+gr_handle_proc_ptrace(struct task_struct *task)
61391+{
61392+ struct file *filp;
61393+ struct task_struct *tmp = task;
61394+ struct task_struct *curtemp = current;
61395+ __u32 retmode;
61396+
61397+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
61398+ if (unlikely(!(gr_status & GR_READY)))
61399+ return 0;
61400+#endif
61401+
61402+ read_lock(&tasklist_lock);
61403+ read_lock(&grsec_exec_file_lock);
61404+ filp = task->exec_file;
61405+
61406+ while (task_pid_nr(tmp) > 0) {
61407+ if (tmp == curtemp)
61408+ break;
61409+ tmp = tmp->real_parent;
61410+ }
61411+
61412+ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
61413+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
61414+ read_unlock(&grsec_exec_file_lock);
61415+ read_unlock(&tasklist_lock);
61416+ return 1;
61417+ }
61418+
61419+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
61420+ if (!(gr_status & GR_READY)) {
61421+ read_unlock(&grsec_exec_file_lock);
61422+ read_unlock(&tasklist_lock);
61423+ return 0;
61424+ }
61425+#endif
61426+
61427+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
61428+ read_unlock(&grsec_exec_file_lock);
61429+ read_unlock(&tasklist_lock);
61430+
61431+ if (retmode & GR_NOPTRACE)
61432+ return 1;
61433+
61434+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
61435+ && (current->acl != task->acl || (current->acl != current->role->root_label
61436+ && task_pid_nr(current) != task_pid_nr(task))))
61437+ return 1;
61438+
61439+ return 0;
61440+}
61441+
61442+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
61443+{
61444+ if (unlikely(!(gr_status & GR_READY)))
61445+ return;
61446+
61447+ if (!(current->role->roletype & GR_ROLE_GOD))
61448+ return;
61449+
61450+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
61451+ p->role->rolename, gr_task_roletype_to_char(p),
61452+ p->acl->filename);
61453+}
61454+
61455+int
61456+gr_handle_ptrace(struct task_struct *task, const long request)
61457+{
61458+ struct task_struct *tmp = task;
61459+ struct task_struct *curtemp = current;
61460+ __u32 retmode;
61461+
61462+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
61463+ if (unlikely(!(gr_status & GR_READY)))
61464+ return 0;
61465+#endif
61466+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
61467+ read_lock(&tasklist_lock);
61468+ while (task_pid_nr(tmp) > 0) {
61469+ if (tmp == curtemp)
61470+ break;
61471+ tmp = tmp->real_parent;
61472+ }
61473+
61474+ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
61475+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
61476+ read_unlock(&tasklist_lock);
61477+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
61478+ return 1;
61479+ }
61480+ read_unlock(&tasklist_lock);
61481+ }
61482+
61483+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
61484+ if (!(gr_status & GR_READY))
61485+ return 0;
61486+#endif
61487+
61488+ read_lock(&grsec_exec_file_lock);
61489+ if (unlikely(!task->exec_file)) {
61490+ read_unlock(&grsec_exec_file_lock);
61491+ return 0;
61492+ }
61493+
61494+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
61495+ read_unlock(&grsec_exec_file_lock);
61496+
61497+ if (retmode & GR_NOPTRACE) {
61498+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
61499+ return 1;
61500+ }
61501+
61502+ if (retmode & GR_PTRACERD) {
61503+ switch (request) {
61504+ case PTRACE_SEIZE:
61505+ case PTRACE_POKETEXT:
61506+ case PTRACE_POKEDATA:
61507+ case PTRACE_POKEUSR:
61508+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
61509+ case PTRACE_SETREGS:
61510+ case PTRACE_SETFPREGS:
61511+#endif
61512+#ifdef CONFIG_X86
61513+ case PTRACE_SETFPXREGS:
61514+#endif
61515+#ifdef CONFIG_ALTIVEC
61516+ case PTRACE_SETVRREGS:
61517+#endif
61518+ return 1;
61519+ default:
61520+ return 0;
61521+ }
61522+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
61523+ !(current->role->roletype & GR_ROLE_GOD) &&
61524+ (current->acl != task->acl)) {
61525+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
61526+ return 1;
61527+ }
61528+
61529+ return 0;
61530+}
61531+
61532+static int is_writable_mmap(const struct file *filp)
61533+{
61534+ struct task_struct *task = current;
61535+ struct acl_object_label *obj, *obj2;
61536+
61537+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
61538+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
61539+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
61540+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
61541+ task->role->root_label);
61542+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
61543+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
61544+ return 1;
61545+ }
61546+ }
61547+ return 0;
61548+}
61549+
61550+int
61551+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
61552+{
61553+ __u32 mode;
61554+
61555+ if (unlikely(!file || !(prot & PROT_EXEC)))
61556+ return 1;
61557+
61558+ if (is_writable_mmap(file))
61559+ return 0;
61560+
61561+ mode =
61562+ gr_search_file(file->f_path.dentry,
61563+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
61564+ file->f_path.mnt);
61565+
61566+ if (!gr_tpe_allow(file))
61567+ return 0;
61568+
61569+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
61570+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
61571+ return 0;
61572+ } else if (unlikely(!(mode & GR_EXEC))) {
61573+ return 0;
61574+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
61575+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
61576+ return 1;
61577+ }
61578+
61579+ return 1;
61580+}
61581+
61582+int
61583+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
61584+{
61585+ __u32 mode;
61586+
61587+ if (unlikely(!file || !(prot & PROT_EXEC)))
61588+ return 1;
61589+
61590+ if (is_writable_mmap(file))
61591+ return 0;
61592+
61593+ mode =
61594+ gr_search_file(file->f_path.dentry,
61595+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
61596+ file->f_path.mnt);
61597+
61598+ if (!gr_tpe_allow(file))
61599+ return 0;
61600+
61601+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
61602+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
61603+ return 0;
61604+ } else if (unlikely(!(mode & GR_EXEC))) {
61605+ return 0;
61606+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
61607+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
61608+ return 1;
61609+ }
61610+
61611+ return 1;
61612+}
61613+
61614+void
61615+gr_acl_handle_psacct(struct task_struct *task, const long code)
61616+{
61617+ unsigned long runtime;
61618+ unsigned long cputime;
61619+ unsigned int wday, cday;
61620+ __u8 whr, chr;
61621+ __u8 wmin, cmin;
61622+ __u8 wsec, csec;
61623+ struct timespec timeval;
61624+
61625+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
61626+ !(task->acl->mode & GR_PROCACCT)))
61627+ return;
61628+
61629+ do_posix_clock_monotonic_gettime(&timeval);
61630+ runtime = timeval.tv_sec - task->start_time.tv_sec;
61631+ wday = runtime / (3600 * 24);
61632+ runtime -= wday * (3600 * 24);
61633+ whr = runtime / 3600;
61634+ runtime -= whr * 3600;
61635+ wmin = runtime / 60;
61636+ runtime -= wmin * 60;
61637+ wsec = runtime;
61638+
61639+ cputime = (task->utime + task->stime) / HZ;
61640+ cday = cputime / (3600 * 24);
61641+ cputime -= cday * (3600 * 24);
61642+ chr = cputime / 3600;
61643+ cputime -= chr * 3600;
61644+ cmin = cputime / 60;
61645+ cputime -= cmin * 60;
61646+ csec = cputime;
61647+
61648+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
61649+
61650+ return;
61651+}
61652+
61653+void gr_set_kernel_label(struct task_struct *task)
61654+{
61655+ if (gr_status & GR_READY) {
61656+ task->role = kernel_role;
61657+ task->acl = kernel_role->root_label;
61658+ }
61659+ return;
61660+}
61661+
61662+#ifdef CONFIG_TASKSTATS
61663+int gr_is_taskstats_denied(int pid)
61664+{
61665+ struct task_struct *task;
61666+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61667+ const struct cred *cred;
61668+#endif
61669+ int ret = 0;
61670+
61671+ /* restrict taskstats viewing to un-chrooted root users
61672+ who have the 'view' subject flag if the RBAC system is enabled
61673+ */
61674+
61675+ rcu_read_lock();
61676+ read_lock(&tasklist_lock);
61677+ task = find_task_by_vpid(pid);
61678+ if (task) {
61679+#ifdef CONFIG_GRKERNSEC_CHROOT
61680+ if (proc_is_chrooted(task))
61681+ ret = -EACCES;
61682+#endif
61683+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61684+ cred = __task_cred(task);
61685+#ifdef CONFIG_GRKERNSEC_PROC_USER
61686+ if (gr_is_global_nonroot(cred->uid))
61687+ ret = -EACCES;
61688+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61689+ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
61690+ ret = -EACCES;
61691+#endif
61692+#endif
61693+ if (gr_status & GR_READY) {
61694+ if (!(task->acl->mode & GR_VIEW))
61695+ ret = -EACCES;
61696+ }
61697+ } else
61698+ ret = -ENOENT;
61699+
61700+ read_unlock(&tasklist_lock);
61701+ rcu_read_unlock();
61702+
61703+ return ret;
61704+}
61705+#endif
61706+
61707+/* AUXV entries are filled via a descendant of search_binary_handler
61708+ after we've already applied the subject for the target
61709+*/
61710+int gr_acl_enable_at_secure(void)
61711+{
61712+ if (unlikely(!(gr_status & GR_READY)))
61713+ return 0;
61714+
61715+ if (current->acl->mode & GR_ATSECURE)
61716+ return 1;
61717+
61718+ return 0;
61719+}
61720+
61721+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
61722+{
61723+ struct task_struct *task = current;
61724+ struct dentry *dentry = file->f_path.dentry;
61725+ struct vfsmount *mnt = file->f_path.mnt;
61726+ struct acl_object_label *obj, *tmp;
61727+ struct acl_subject_label *subj;
61728+ unsigned int bufsize;
61729+ int is_not_root;
61730+ char *path;
61731+ dev_t dev = __get_dev(dentry);
61732+
61733+ if (unlikely(!(gr_status & GR_READY)))
61734+ return 1;
61735+
61736+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
61737+ return 1;
61738+
61739+ /* ignore Eric Biederman */
61740+ if (IS_PRIVATE(dentry->d_inode))
61741+ return 1;
61742+
61743+ subj = task->acl;
61744+ read_lock(&gr_inode_lock);
61745+ do {
61746+ obj = lookup_acl_obj_label(ino, dev, subj);
61747+ if (obj != NULL) {
61748+ read_unlock(&gr_inode_lock);
61749+ return (obj->mode & GR_FIND) ? 1 : 0;
61750+ }
61751+ } while ((subj = subj->parent_subject));
61752+ read_unlock(&gr_inode_lock);
61753+
61754+ /* this is purely an optimization since we're looking for an object
61755+ for the directory we're doing a readdir on
61756+ if it's possible for any globbed object to match the entry we're
61757+ filling into the directory, then the object we find here will be
61758+ an anchor point with attached globbed objects
61759+ */
61760+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
61761+ if (obj->globbed == NULL)
61762+ return (obj->mode & GR_FIND) ? 1 : 0;
61763+
61764+ is_not_root = ((obj->filename[0] == '/') &&
61765+ (obj->filename[1] == '\0')) ? 0 : 1;
61766+ bufsize = PAGE_SIZE - namelen - is_not_root;
61767+
61768+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
61769+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
61770+ return 1;
61771+
61772+ preempt_disable();
61773+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
61774+ bufsize);
61775+
61776+ bufsize = strlen(path);
61777+
61778+ /* if base is "/", don't append an additional slash */
61779+ if (is_not_root)
61780+ *(path + bufsize) = '/';
61781+ memcpy(path + bufsize + is_not_root, name, namelen);
61782+ *(path + bufsize + namelen + is_not_root) = '\0';
61783+
61784+ tmp = obj->globbed;
61785+ while (tmp) {
61786+ if (!glob_match(tmp->filename, path)) {
61787+ preempt_enable();
61788+ return (tmp->mode & GR_FIND) ? 1 : 0;
61789+ }
61790+ tmp = tmp->next;
61791+ }
61792+ preempt_enable();
61793+ return (obj->mode & GR_FIND) ? 1 : 0;
61794+}
61795+
61796+void gr_put_exec_file(struct task_struct *task)
61797+{
61798+ struct file *filp;
61799+
61800+ write_lock(&grsec_exec_file_lock);
61801+ filp = task->exec_file;
61802+ task->exec_file = NULL;
61803+ write_unlock(&grsec_exec_file_lock);
61804+
61805+ if (filp)
61806+ fput(filp);
61807+
61808+ return;
61809+}
61810+
61811+
61812+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
61813+EXPORT_SYMBOL(gr_acl_is_enabled);
61814+#endif
61815+EXPORT_SYMBOL(gr_set_kernel_label);
61816+#ifdef CONFIG_SECURITY
61817+EXPORT_SYMBOL(gr_check_user_change);
61818+EXPORT_SYMBOL(gr_check_group_change);
61819+#endif
61820+
61821diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
61822new file mode 100644
61823index 0000000..34fefda
61824--- /dev/null
61825+++ b/grsecurity/gracl_alloc.c
61826@@ -0,0 +1,105 @@
61827+#include <linux/kernel.h>
61828+#include <linux/mm.h>
61829+#include <linux/slab.h>
61830+#include <linux/vmalloc.h>
61831+#include <linux/gracl.h>
61832+#include <linux/grsecurity.h>
61833+
61834+static unsigned long alloc_stack_next = 1;
61835+static unsigned long alloc_stack_size = 1;
61836+static void **alloc_stack;
61837+
61838+static __inline__ int
61839+alloc_pop(void)
61840+{
61841+ if (alloc_stack_next == 1)
61842+ return 0;
61843+
61844+ kfree(alloc_stack[alloc_stack_next - 2]);
61845+
61846+ alloc_stack_next--;
61847+
61848+ return 1;
61849+}
61850+
61851+static __inline__ int
61852+alloc_push(void *buf)
61853+{
61854+ if (alloc_stack_next >= alloc_stack_size)
61855+ return 1;
61856+
61857+ alloc_stack[alloc_stack_next - 1] = buf;
61858+
61859+ alloc_stack_next++;
61860+
61861+ return 0;
61862+}
61863+
61864+void *
61865+acl_alloc(unsigned long len)
61866+{
61867+ void *ret = NULL;
61868+
61869+ if (!len || len > PAGE_SIZE)
61870+ goto out;
61871+
61872+ ret = kmalloc(len, GFP_KERNEL);
61873+
61874+ if (ret) {
61875+ if (alloc_push(ret)) {
61876+ kfree(ret);
61877+ ret = NULL;
61878+ }
61879+ }
61880+
61881+out:
61882+ return ret;
61883+}
61884+
61885+void *
61886+acl_alloc_num(unsigned long num, unsigned long len)
61887+{
61888+ if (!len || (num > (PAGE_SIZE / len)))
61889+ return NULL;
61890+
61891+ return acl_alloc(num * len);
61892+}
61893+
61894+void
61895+acl_free_all(void)
61896+{
61897+ if (gr_acl_is_enabled() || !alloc_stack)
61898+ return;
61899+
61900+ while (alloc_pop()) ;
61901+
61902+ if (alloc_stack) {
61903+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
61904+ kfree(alloc_stack);
61905+ else
61906+ vfree(alloc_stack);
61907+ }
61908+
61909+ alloc_stack = NULL;
61910+ alloc_stack_size = 1;
61911+ alloc_stack_next = 1;
61912+
61913+ return;
61914+}
61915+
61916+int
61917+acl_alloc_stack_init(unsigned long size)
61918+{
61919+ if ((size * sizeof (void *)) <= PAGE_SIZE)
61920+ alloc_stack =
61921+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
61922+ else
61923+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
61924+
61925+ alloc_stack_size = size;
61926+
61927+ if (!alloc_stack)
61928+ return 0;
61929+ else
61930+ return 1;
61931+}
61932diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
61933new file mode 100644
61934index 0000000..bdd51ea
61935--- /dev/null
61936+++ b/grsecurity/gracl_cap.c
61937@@ -0,0 +1,110 @@
61938+#include <linux/kernel.h>
61939+#include <linux/module.h>
61940+#include <linux/sched.h>
61941+#include <linux/gracl.h>
61942+#include <linux/grsecurity.h>
61943+#include <linux/grinternal.h>
61944+
61945+extern const char *captab_log[];
61946+extern int captab_log_entries;
61947+
61948+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
61949+{
61950+ struct acl_subject_label *curracl;
61951+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
61952+ kernel_cap_t cap_audit = __cap_empty_set;
61953+
61954+ if (!gr_acl_is_enabled())
61955+ return 1;
61956+
61957+ curracl = task->acl;
61958+
61959+ cap_drop = curracl->cap_lower;
61960+ cap_mask = curracl->cap_mask;
61961+ cap_audit = curracl->cap_invert_audit;
61962+
61963+ while ((curracl = curracl->parent_subject)) {
61964+ /* if the cap isn't specified in the current computed mask but is specified in the
61965+ current level subject, and is lowered in the current level subject, then add
61966+ it to the set of dropped capabilities
61967+ otherwise, add the current level subject's mask to the current computed mask
61968+ */
61969+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
61970+ cap_raise(cap_mask, cap);
61971+ if (cap_raised(curracl->cap_lower, cap))
61972+ cap_raise(cap_drop, cap);
61973+ if (cap_raised(curracl->cap_invert_audit, cap))
61974+ cap_raise(cap_audit, cap);
61975+ }
61976+ }
61977+
61978+ if (!cap_raised(cap_drop, cap)) {
61979+ if (cap_raised(cap_audit, cap))
61980+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
61981+ return 1;
61982+ }
61983+
61984+ curracl = task->acl;
61985+
61986+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
61987+ && cap_raised(cred->cap_effective, cap)) {
61988+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
61989+ task->role->roletype, GR_GLOBAL_UID(cred->uid),
61990+ GR_GLOBAL_GID(cred->gid), task->exec_file ?
61991+ gr_to_filename(task->exec_file->f_path.dentry,
61992+ task->exec_file->f_path.mnt) : curracl->filename,
61993+ curracl->filename, 0UL,
61994+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
61995+ return 1;
61996+ }
61997+
61998+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
61999+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
62000+
62001+ return 0;
62002+}
62003+
62004+int
62005+gr_acl_is_capable(const int cap)
62006+{
62007+ return gr_task_acl_is_capable(current, current_cred(), cap);
62008+}
62009+
62010+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
62011+{
62012+ struct acl_subject_label *curracl;
62013+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
62014+
62015+ if (!gr_acl_is_enabled())
62016+ return 1;
62017+
62018+ curracl = task->acl;
62019+
62020+ cap_drop = curracl->cap_lower;
62021+ cap_mask = curracl->cap_mask;
62022+
62023+ while ((curracl = curracl->parent_subject)) {
62024+ /* if the cap isn't specified in the current computed mask but is specified in the
62025+ current level subject, and is lowered in the current level subject, then add
62026+ it to the set of dropped capabilities
62027+ otherwise, add the current level subject's mask to the current computed mask
62028+ */
62029+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
62030+ cap_raise(cap_mask, cap);
62031+ if (cap_raised(curracl->cap_lower, cap))
62032+ cap_raise(cap_drop, cap);
62033+ }
62034+ }
62035+
62036+ if (!cap_raised(cap_drop, cap))
62037+ return 1;
62038+
62039+ return 0;
62040+}
62041+
62042+int
62043+gr_acl_is_capable_nolog(const int cap)
62044+{
62045+ return gr_task_acl_is_capable_nolog(current, cap);
62046+}
62047+
62048diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
62049new file mode 100644
62050index 0000000..a340c17
62051--- /dev/null
62052+++ b/grsecurity/gracl_fs.c
62053@@ -0,0 +1,431 @@
62054+#include <linux/kernel.h>
62055+#include <linux/sched.h>
62056+#include <linux/types.h>
62057+#include <linux/fs.h>
62058+#include <linux/file.h>
62059+#include <linux/stat.h>
62060+#include <linux/grsecurity.h>
62061+#include <linux/grinternal.h>
62062+#include <linux/gracl.h>
62063+
62064+umode_t
62065+gr_acl_umask(void)
62066+{
62067+ if (unlikely(!gr_acl_is_enabled()))
62068+ return 0;
62069+
62070+ return current->role->umask;
62071+}
62072+
62073+__u32
62074+gr_acl_handle_hidden_file(const struct dentry * dentry,
62075+ const struct vfsmount * mnt)
62076+{
62077+ __u32 mode;
62078+
62079+ if (unlikely(!dentry->d_inode))
62080+ return GR_FIND;
62081+
62082+ mode =
62083+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
62084+
62085+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
62086+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
62087+ return mode;
62088+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
62089+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
62090+ return 0;
62091+ } else if (unlikely(!(mode & GR_FIND)))
62092+ return 0;
62093+
62094+ return GR_FIND;
62095+}
62096+
62097+__u32
62098+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
62099+ int acc_mode)
62100+{
62101+ __u32 reqmode = GR_FIND;
62102+ __u32 mode;
62103+
62104+ if (unlikely(!dentry->d_inode))
62105+ return reqmode;
62106+
62107+ if (acc_mode & MAY_APPEND)
62108+ reqmode |= GR_APPEND;
62109+ else if (acc_mode & MAY_WRITE)
62110+ reqmode |= GR_WRITE;
62111+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
62112+ reqmode |= GR_READ;
62113+
62114+ mode =
62115+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
62116+ mnt);
62117+
62118+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
62119+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
62120+ reqmode & GR_READ ? " reading" : "",
62121+ reqmode & GR_WRITE ? " writing" : reqmode &
62122+ GR_APPEND ? " appending" : "");
62123+ return reqmode;
62124+ } else
62125+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
62126+ {
62127+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
62128+ reqmode & GR_READ ? " reading" : "",
62129+ reqmode & GR_WRITE ? " writing" : reqmode &
62130+ GR_APPEND ? " appending" : "");
62131+ return 0;
62132+ } else if (unlikely((mode & reqmode) != reqmode))
62133+ return 0;
62134+
62135+ return reqmode;
62136+}
62137+
62138+__u32
62139+gr_acl_handle_creat(const struct dentry * dentry,
62140+ const struct dentry * p_dentry,
62141+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
62142+ const int imode)
62143+{
62144+ __u32 reqmode = GR_WRITE | GR_CREATE;
62145+ __u32 mode;
62146+
62147+ if (acc_mode & MAY_APPEND)
62148+ reqmode |= GR_APPEND;
62149+ // if a directory was required or the directory already exists, then
62150+ // don't count this open as a read
62151+ if ((acc_mode & MAY_READ) &&
62152+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
62153+ reqmode |= GR_READ;
62154+ if ((open_flags & O_CREAT) &&
62155+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
62156+ reqmode |= GR_SETID;
62157+
62158+ mode =
62159+ gr_check_create(dentry, p_dentry, p_mnt,
62160+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
62161+
62162+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
62163+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
62164+ reqmode & GR_READ ? " reading" : "",
62165+ reqmode & GR_WRITE ? " writing" : reqmode &
62166+ GR_APPEND ? " appending" : "");
62167+ return reqmode;
62168+ } else
62169+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
62170+ {
62171+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
62172+ reqmode & GR_READ ? " reading" : "",
62173+ reqmode & GR_WRITE ? " writing" : reqmode &
62174+ GR_APPEND ? " appending" : "");
62175+ return 0;
62176+ } else if (unlikely((mode & reqmode) != reqmode))
62177+ return 0;
62178+
62179+ return reqmode;
62180+}
62181+
62182+__u32
62183+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
62184+ const int fmode)
62185+{
62186+ __u32 mode, reqmode = GR_FIND;
62187+
62188+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
62189+ reqmode |= GR_EXEC;
62190+ if (fmode & S_IWOTH)
62191+ reqmode |= GR_WRITE;
62192+ if (fmode & S_IROTH)
62193+ reqmode |= GR_READ;
62194+
62195+ mode =
62196+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
62197+ mnt);
62198+
62199+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
62200+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
62201+ reqmode & GR_READ ? " reading" : "",
62202+ reqmode & GR_WRITE ? " writing" : "",
62203+ reqmode & GR_EXEC ? " executing" : "");
62204+ return reqmode;
62205+ } else
62206+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
62207+ {
62208+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
62209+ reqmode & GR_READ ? " reading" : "",
62210+ reqmode & GR_WRITE ? " writing" : "",
62211+ reqmode & GR_EXEC ? " executing" : "");
62212+ return 0;
62213+ } else if (unlikely((mode & reqmode) != reqmode))
62214+ return 0;
62215+
62216+ return reqmode;
62217+}
62218+
62219+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
62220+{
62221+ __u32 mode;
62222+
62223+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
62224+
62225+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
62226+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
62227+ return mode;
62228+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
62229+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
62230+ return 0;
62231+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
62232+ return 0;
62233+
62234+ return (reqmode);
62235+}
62236+
62237+__u32
62238+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
62239+{
62240+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
62241+}
62242+
62243+__u32
62244+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
62245+{
62246+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
62247+}
62248+
62249+__u32
62250+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
62251+{
62252+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
62253+}
62254+
62255+__u32
62256+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
62257+{
62258+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
62259+}
62260+
62261+__u32
62262+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
62263+ umode_t *modeptr)
62264+{
62265+ umode_t mode;
62266+
62267+ *modeptr &= ~gr_acl_umask();
62268+ mode = *modeptr;
62269+
62270+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
62271+ return 1;
62272+
62273+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
62274+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
62275+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
62276+ GR_CHMOD_ACL_MSG);
62277+ } else {
62278+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
62279+ }
62280+}
62281+
62282+__u32
62283+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
62284+{
62285+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
62286+}
62287+
62288+__u32
62289+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
62290+{
62291+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
62292+}
62293+
62294+__u32
62295+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
62296+{
62297+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
62298+}
62299+
62300+__u32
62301+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
62302+{
62303+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
62304+ GR_UNIXCONNECT_ACL_MSG);
62305+}
62306+
62307+/* hardlinks require at minimum create and link permission,
62308+ any additional privilege required is based on the
62309+ privilege of the file being linked to
62310+*/
62311+__u32
62312+gr_acl_handle_link(const struct dentry * new_dentry,
62313+ const struct dentry * parent_dentry,
62314+ const struct vfsmount * parent_mnt,
62315+ const struct dentry * old_dentry,
62316+ const struct vfsmount * old_mnt, const struct filename *to)
62317+{
62318+ __u32 mode;
62319+ __u32 needmode = GR_CREATE | GR_LINK;
62320+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
62321+
62322+ mode =
62323+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
62324+ old_mnt);
62325+
62326+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
62327+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
62328+ return mode;
62329+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
62330+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
62331+ return 0;
62332+ } else if (unlikely((mode & needmode) != needmode))
62333+ return 0;
62334+
62335+ return 1;
62336+}
62337+
62338+__u32
62339+gr_acl_handle_symlink(const struct dentry * new_dentry,
62340+ const struct dentry * parent_dentry,
62341+ const struct vfsmount * parent_mnt, const struct filename *from)
62342+{
62343+ __u32 needmode = GR_WRITE | GR_CREATE;
62344+ __u32 mode;
62345+
62346+ mode =
62347+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
62348+ GR_CREATE | GR_AUDIT_CREATE |
62349+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
62350+
62351+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
62352+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
62353+ return mode;
62354+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
62355+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
62356+ return 0;
62357+ } else if (unlikely((mode & needmode) != needmode))
62358+ return 0;
62359+
62360+ return (GR_WRITE | GR_CREATE);
62361+}
62362+
62363+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
62364+{
62365+ __u32 mode;
62366+
62367+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
62368+
62369+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
62370+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
62371+ return mode;
62372+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
62373+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
62374+ return 0;
62375+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
62376+ return 0;
62377+
62378+ return (reqmode);
62379+}
62380+
62381+__u32
62382+gr_acl_handle_mknod(const struct dentry * new_dentry,
62383+ const struct dentry * parent_dentry,
62384+ const struct vfsmount * parent_mnt,
62385+ const int mode)
62386+{
62387+ __u32 reqmode = GR_WRITE | GR_CREATE;
62388+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
62389+ reqmode |= GR_SETID;
62390+
62391+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
62392+ reqmode, GR_MKNOD_ACL_MSG);
62393+}
62394+
62395+__u32
62396+gr_acl_handle_mkdir(const struct dentry *new_dentry,
62397+ const struct dentry *parent_dentry,
62398+ const struct vfsmount *parent_mnt)
62399+{
62400+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
62401+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
62402+}
62403+
62404+#define RENAME_CHECK_SUCCESS(old, new) \
62405+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
62406+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
62407+
62408+int
62409+gr_acl_handle_rename(struct dentry *new_dentry,
62410+ struct dentry *parent_dentry,
62411+ const struct vfsmount *parent_mnt,
62412+ struct dentry *old_dentry,
62413+ struct inode *old_parent_inode,
62414+ struct vfsmount *old_mnt, const struct filename *newname)
62415+{
62416+ __u32 comp1, comp2;
62417+ int error = 0;
62418+
62419+ if (unlikely(!gr_acl_is_enabled()))
62420+ return 0;
62421+
62422+ if (!new_dentry->d_inode) {
62423+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
62424+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
62425+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
62426+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
62427+ GR_DELETE | GR_AUDIT_DELETE |
62428+ GR_AUDIT_READ | GR_AUDIT_WRITE |
62429+ GR_SUPPRESS, old_mnt);
62430+ } else {
62431+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
62432+ GR_CREATE | GR_DELETE |
62433+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
62434+ GR_AUDIT_READ | GR_AUDIT_WRITE |
62435+ GR_SUPPRESS, parent_mnt);
62436+ comp2 =
62437+ gr_search_file(old_dentry,
62438+ GR_READ | GR_WRITE | GR_AUDIT_READ |
62439+ GR_DELETE | GR_AUDIT_DELETE |
62440+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
62441+ }
62442+
62443+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
62444+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
62445+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
62446+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
62447+ && !(comp2 & GR_SUPPRESS)) {
62448+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
62449+ error = -EACCES;
62450+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
62451+ error = -EACCES;
62452+
62453+ return error;
62454+}
62455+
62456+void
62457+gr_acl_handle_exit(void)
62458+{
62459+ u16 id;
62460+ char *rolename;
62461+
62462+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
62463+ !(current->role->roletype & GR_ROLE_PERSIST))) {
62464+ id = current->acl_role_id;
62465+ rolename = current->role->rolename;
62466+ gr_set_acls(1);
62467+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
62468+ }
62469+
62470+ gr_put_exec_file(current);
62471+ return;
62472+}
62473+
62474+int
62475+gr_acl_handle_procpidmem(const struct task_struct *task)
62476+{
62477+ if (unlikely(!gr_acl_is_enabled()))
62478+ return 0;
62479+
62480+ if (task != current && task->acl->mode & GR_PROTPROCFD)
62481+ return -EACCES;
62482+
62483+ return 0;
62484+}
62485diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
62486new file mode 100644
62487index 0000000..4699807
62488--- /dev/null
62489+++ b/grsecurity/gracl_ip.c
62490@@ -0,0 +1,384 @@
62491+#include <linux/kernel.h>
62492+#include <asm/uaccess.h>
62493+#include <asm/errno.h>
62494+#include <net/sock.h>
62495+#include <linux/file.h>
62496+#include <linux/fs.h>
62497+#include <linux/net.h>
62498+#include <linux/in.h>
62499+#include <linux/skbuff.h>
62500+#include <linux/ip.h>
62501+#include <linux/udp.h>
62502+#include <linux/types.h>
62503+#include <linux/sched.h>
62504+#include <linux/netdevice.h>
62505+#include <linux/inetdevice.h>
62506+#include <linux/gracl.h>
62507+#include <linux/grsecurity.h>
62508+#include <linux/grinternal.h>
62509+
62510+#define GR_BIND 0x01
62511+#define GR_CONNECT 0x02
62512+#define GR_INVERT 0x04
62513+#define GR_BINDOVERRIDE 0x08
62514+#define GR_CONNECTOVERRIDE 0x10
62515+#define GR_SOCK_FAMILY 0x20
62516+
62517+static const char * gr_protocols[IPPROTO_MAX] = {
62518+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
62519+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
62520+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
62521+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
62522+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
62523+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
62524+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
62525+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
62526+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
62527+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
62528+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
62529+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
62530+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
62531+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
62532+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
62533+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
62534+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
62535+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
62536+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
62537+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
62538+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
62539+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
62540+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
62541+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
62542+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
62543+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
62544+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
62545+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
62546+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
62547+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
62548+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
62549+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
62550+ };
62551+
62552+static const char * gr_socktypes[SOCK_MAX] = {
62553+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
62554+ "unknown:7", "unknown:8", "unknown:9", "packet"
62555+ };
62556+
62557+static const char * gr_sockfamilies[AF_MAX+1] = {
62558+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
62559+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
62560+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
62561+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
62562+ };
62563+
62564+const char *
62565+gr_proto_to_name(unsigned char proto)
62566+{
62567+ return gr_protocols[proto];
62568+}
62569+
62570+const char *
62571+gr_socktype_to_name(unsigned char type)
62572+{
62573+ return gr_socktypes[type];
62574+}
62575+
62576+const char *
62577+gr_sockfamily_to_name(unsigned char family)
62578+{
62579+ return gr_sockfamilies[family];
62580+}
62581+
62582+int
62583+gr_search_socket(const int domain, const int type, const int protocol)
62584+{
62585+ struct acl_subject_label *curr;
62586+ const struct cred *cred = current_cred();
62587+
62588+ if (unlikely(!gr_acl_is_enabled()))
62589+ goto exit;
62590+
62591+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
62592+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
62593+ goto exit; // let the kernel handle it
62594+
62595+ curr = current->acl;
62596+
62597+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
62598+ /* the family is allowed, if this is PF_INET allow it only if
62599+ the extra sock type/protocol checks pass */
62600+ if (domain == PF_INET)
62601+ goto inet_check;
62602+ goto exit;
62603+ } else {
62604+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
62605+ __u32 fakeip = 0;
62606+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
62607+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
62608+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
62609+ gr_to_filename(current->exec_file->f_path.dentry,
62610+ current->exec_file->f_path.mnt) :
62611+ curr->filename, curr->filename,
62612+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
62613+ &current->signal->saved_ip);
62614+ goto exit;
62615+ }
62616+ goto exit_fail;
62617+ }
62618+
62619+inet_check:
62620+ /* the rest of this checking is for IPv4 only */
62621+ if (!curr->ips)
62622+ goto exit;
62623+
62624+ if ((curr->ip_type & (1 << type)) &&
62625+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
62626+ goto exit;
62627+
62628+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
62629+ /* we don't place acls on raw sockets , and sometimes
62630+ dgram/ip sockets are opened for ioctl and not
62631+ bind/connect, so we'll fake a bind learn log */
62632+ if (type == SOCK_RAW || type == SOCK_PACKET) {
62633+ __u32 fakeip = 0;
62634+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
62635+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
62636+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
62637+ gr_to_filename(current->exec_file->f_path.dentry,
62638+ current->exec_file->f_path.mnt) :
62639+ curr->filename, curr->filename,
62640+ &fakeip, 0, type,
62641+ protocol, GR_CONNECT, &current->signal->saved_ip);
62642+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
62643+ __u32 fakeip = 0;
62644+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
62645+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
62646+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
62647+ gr_to_filename(current->exec_file->f_path.dentry,
62648+ current->exec_file->f_path.mnt) :
62649+ curr->filename, curr->filename,
62650+ &fakeip, 0, type,
62651+ protocol, GR_BIND, &current->signal->saved_ip);
62652+ }
62653+ /* we'll log when they use connect or bind */
62654+ goto exit;
62655+ }
62656+
62657+exit_fail:
62658+ if (domain == PF_INET)
62659+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
62660+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
62661+ else
62662+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
62663+ gr_socktype_to_name(type), protocol);
62664+
62665+ return 0;
62666+exit:
62667+ return 1;
62668+}
62669+
62670+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
62671+{
62672+ if ((ip->mode & mode) &&
62673+ (ip_port >= ip->low) &&
62674+ (ip_port <= ip->high) &&
62675+ ((ntohl(ip_addr) & our_netmask) ==
62676+ (ntohl(our_addr) & our_netmask))
62677+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
62678+ && (ip->type & (1 << type))) {
62679+ if (ip->mode & GR_INVERT)
62680+ return 2; // specifically denied
62681+ else
62682+ return 1; // allowed
62683+ }
62684+
62685+ return 0; // not specifically allowed, may continue parsing
62686+}
62687+
62688+static int
62689+gr_search_connectbind(const int full_mode, struct sock *sk,
62690+ struct sockaddr_in *addr, const int type)
62691+{
62692+ char iface[IFNAMSIZ] = {0};
62693+ struct acl_subject_label *curr;
62694+ struct acl_ip_label *ip;
62695+ struct inet_sock *isk;
62696+ struct net_device *dev;
62697+ struct in_device *idev;
62698+ unsigned long i;
62699+ int ret;
62700+ int mode = full_mode & (GR_BIND | GR_CONNECT);
62701+ __u32 ip_addr = 0;
62702+ __u32 our_addr;
62703+ __u32 our_netmask;
62704+ char *p;
62705+ __u16 ip_port = 0;
62706+ const struct cred *cred = current_cred();
62707+
62708+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
62709+ return 0;
62710+
62711+ curr = current->acl;
62712+ isk = inet_sk(sk);
62713+
62714+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
62715+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
62716+ addr->sin_addr.s_addr = curr->inaddr_any_override;
62717+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
62718+ struct sockaddr_in saddr;
62719+ int err;
62720+
62721+ saddr.sin_family = AF_INET;
62722+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
62723+ saddr.sin_port = isk->inet_sport;
62724+
62725+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
62726+ if (err)
62727+ return err;
62728+
62729+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
62730+ if (err)
62731+ return err;
62732+ }
62733+
62734+ if (!curr->ips)
62735+ return 0;
62736+
62737+ ip_addr = addr->sin_addr.s_addr;
62738+ ip_port = ntohs(addr->sin_port);
62739+
62740+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
62741+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
62742+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
62743+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
62744+ gr_to_filename(current->exec_file->f_path.dentry,
62745+ current->exec_file->f_path.mnt) :
62746+ curr->filename, curr->filename,
62747+ &ip_addr, ip_port, type,
62748+ sk->sk_protocol, mode, &current->signal->saved_ip);
62749+ return 0;
62750+ }
62751+
62752+ for (i = 0; i < curr->ip_num; i++) {
62753+ ip = *(curr->ips + i);
62754+ if (ip->iface != NULL) {
62755+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
62756+ p = strchr(iface, ':');
62757+ if (p != NULL)
62758+ *p = '\0';
62759+ dev = dev_get_by_name(sock_net(sk), iface);
62760+ if (dev == NULL)
62761+ continue;
62762+ idev = in_dev_get(dev);
62763+ if (idev == NULL) {
62764+ dev_put(dev);
62765+ continue;
62766+ }
62767+ rcu_read_lock();
62768+ for_ifa(idev) {
62769+ if (!strcmp(ip->iface, ifa->ifa_label)) {
62770+ our_addr = ifa->ifa_address;
62771+ our_netmask = 0xffffffff;
62772+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
62773+ if (ret == 1) {
62774+ rcu_read_unlock();
62775+ in_dev_put(idev);
62776+ dev_put(dev);
62777+ return 0;
62778+ } else if (ret == 2) {
62779+ rcu_read_unlock();
62780+ in_dev_put(idev);
62781+ dev_put(dev);
62782+ goto denied;
62783+ }
62784+ }
62785+ } endfor_ifa(idev);
62786+ rcu_read_unlock();
62787+ in_dev_put(idev);
62788+ dev_put(dev);
62789+ } else {
62790+ our_addr = ip->addr;
62791+ our_netmask = ip->netmask;
62792+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
62793+ if (ret == 1)
62794+ return 0;
62795+ else if (ret == 2)
62796+ goto denied;
62797+ }
62798+ }
62799+
62800+denied:
62801+ if (mode == GR_BIND)
62802+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
62803+ else if (mode == GR_CONNECT)
62804+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
62805+
62806+ return -EACCES;
62807+}
62808+
62809+int
62810+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
62811+{
62812+ /* always allow disconnection of dgram sockets with connect */
62813+ if (addr->sin_family == AF_UNSPEC)
62814+ return 0;
62815+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
62816+}
62817+
62818+int
62819+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
62820+{
62821+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
62822+}
62823+
62824+int gr_search_listen(struct socket *sock)
62825+{
62826+ struct sock *sk = sock->sk;
62827+ struct sockaddr_in addr;
62828+
62829+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
62830+ addr.sin_port = inet_sk(sk)->inet_sport;
62831+
62832+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
62833+}
62834+
62835+int gr_search_accept(struct socket *sock)
62836+{
62837+ struct sock *sk = sock->sk;
62838+ struct sockaddr_in addr;
62839+
62840+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
62841+ addr.sin_port = inet_sk(sk)->inet_sport;
62842+
62843+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
62844+}
62845+
62846+int
62847+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
62848+{
62849+ if (addr)
62850+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
62851+ else {
62852+ struct sockaddr_in sin;
62853+ const struct inet_sock *inet = inet_sk(sk);
62854+
62855+ sin.sin_addr.s_addr = inet->inet_daddr;
62856+ sin.sin_port = inet->inet_dport;
62857+
62858+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
62859+ }
62860+}
62861+
62862+int
62863+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
62864+{
62865+ struct sockaddr_in sin;
62866+
62867+ if (unlikely(skb->len < sizeof (struct udphdr)))
62868+ return 0; // skip this packet
62869+
62870+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
62871+ sin.sin_port = udp_hdr(skb)->source;
62872+
62873+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
62874+}
62875diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
62876new file mode 100644
62877index 0000000..25f54ef
62878--- /dev/null
62879+++ b/grsecurity/gracl_learn.c
62880@@ -0,0 +1,207 @@
62881+#include <linux/kernel.h>
62882+#include <linux/mm.h>
62883+#include <linux/sched.h>
62884+#include <linux/poll.h>
62885+#include <linux/string.h>
62886+#include <linux/file.h>
62887+#include <linux/types.h>
62888+#include <linux/vmalloc.h>
62889+#include <linux/grinternal.h>
62890+
62891+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
62892+ size_t count, loff_t *ppos);
62893+extern int gr_acl_is_enabled(void);
62894+
62895+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
62896+static int gr_learn_attached;
62897+
62898+/* use a 512k buffer */
62899+#define LEARN_BUFFER_SIZE (512 * 1024)
62900+
62901+static DEFINE_SPINLOCK(gr_learn_lock);
62902+static DEFINE_MUTEX(gr_learn_user_mutex);
62903+
62904+/* we need to maintain two buffers, so that the kernel context of grlearn
62905+ uses a semaphore around the userspace copying, and the other kernel contexts
62906+ use a spinlock when copying into the buffer, since they cannot sleep
62907+*/
62908+static char *learn_buffer;
62909+static char *learn_buffer_user;
62910+static int learn_buffer_len;
62911+static int learn_buffer_user_len;
62912+
62913+static ssize_t
62914+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
62915+{
62916+ DECLARE_WAITQUEUE(wait, current);
62917+ ssize_t retval = 0;
62918+
62919+ add_wait_queue(&learn_wait, &wait);
62920+ set_current_state(TASK_INTERRUPTIBLE);
62921+ do {
62922+ mutex_lock(&gr_learn_user_mutex);
62923+ spin_lock(&gr_learn_lock);
62924+ if (learn_buffer_len)
62925+ break;
62926+ spin_unlock(&gr_learn_lock);
62927+ mutex_unlock(&gr_learn_user_mutex);
62928+ if (file->f_flags & O_NONBLOCK) {
62929+ retval = -EAGAIN;
62930+ goto out;
62931+ }
62932+ if (signal_pending(current)) {
62933+ retval = -ERESTARTSYS;
62934+ goto out;
62935+ }
62936+
62937+ schedule();
62938+ } while (1);
62939+
62940+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
62941+ learn_buffer_user_len = learn_buffer_len;
62942+ retval = learn_buffer_len;
62943+ learn_buffer_len = 0;
62944+
62945+ spin_unlock(&gr_learn_lock);
62946+
62947+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
62948+ retval = -EFAULT;
62949+
62950+ mutex_unlock(&gr_learn_user_mutex);
62951+out:
62952+ set_current_state(TASK_RUNNING);
62953+ remove_wait_queue(&learn_wait, &wait);
62954+ return retval;
62955+}
62956+
62957+static unsigned int
62958+poll_learn(struct file * file, poll_table * wait)
62959+{
62960+ poll_wait(file, &learn_wait, wait);
62961+
62962+ if (learn_buffer_len)
62963+ return (POLLIN | POLLRDNORM);
62964+
62965+ return 0;
62966+}
62967+
62968+void
62969+gr_clear_learn_entries(void)
62970+{
62971+ char *tmp;
62972+
62973+ mutex_lock(&gr_learn_user_mutex);
62974+ spin_lock(&gr_learn_lock);
62975+ tmp = learn_buffer;
62976+ learn_buffer = NULL;
62977+ spin_unlock(&gr_learn_lock);
62978+ if (tmp)
62979+ vfree(tmp);
62980+ if (learn_buffer_user != NULL) {
62981+ vfree(learn_buffer_user);
62982+ learn_buffer_user = NULL;
62983+ }
62984+ learn_buffer_len = 0;
62985+ mutex_unlock(&gr_learn_user_mutex);
62986+
62987+ return;
62988+}
62989+
62990+void
62991+gr_add_learn_entry(const char *fmt, ...)
62992+{
62993+ va_list args;
62994+ unsigned int len;
62995+
62996+ if (!gr_learn_attached)
62997+ return;
62998+
62999+ spin_lock(&gr_learn_lock);
63000+
63001+ /* leave a gap at the end so we know when it's "full" but don't have to
63002+ compute the exact length of the string we're trying to append
63003+ */
63004+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
63005+ spin_unlock(&gr_learn_lock);
63006+ wake_up_interruptible(&learn_wait);
63007+ return;
63008+ }
63009+ if (learn_buffer == NULL) {
63010+ spin_unlock(&gr_learn_lock);
63011+ return;
63012+ }
63013+
63014+ va_start(args, fmt);
63015+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
63016+ va_end(args);
63017+
63018+ learn_buffer_len += len + 1;
63019+
63020+ spin_unlock(&gr_learn_lock);
63021+ wake_up_interruptible(&learn_wait);
63022+
63023+ return;
63024+}
63025+
63026+static int
63027+open_learn(struct inode *inode, struct file *file)
63028+{
63029+ if (file->f_mode & FMODE_READ && gr_learn_attached)
63030+ return -EBUSY;
63031+ if (file->f_mode & FMODE_READ) {
63032+ int retval = 0;
63033+ mutex_lock(&gr_learn_user_mutex);
63034+ if (learn_buffer == NULL)
63035+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
63036+ if (learn_buffer_user == NULL)
63037+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
63038+ if (learn_buffer == NULL) {
63039+ retval = -ENOMEM;
63040+ goto out_error;
63041+ }
63042+ if (learn_buffer_user == NULL) {
63043+ retval = -ENOMEM;
63044+ goto out_error;
63045+ }
63046+ learn_buffer_len = 0;
63047+ learn_buffer_user_len = 0;
63048+ gr_learn_attached = 1;
63049+out_error:
63050+ mutex_unlock(&gr_learn_user_mutex);
63051+ return retval;
63052+ }
63053+ return 0;
63054+}
63055+
63056+static int
63057+close_learn(struct inode *inode, struct file *file)
63058+{
63059+ if (file->f_mode & FMODE_READ) {
63060+ char *tmp = NULL;
63061+ mutex_lock(&gr_learn_user_mutex);
63062+ spin_lock(&gr_learn_lock);
63063+ tmp = learn_buffer;
63064+ learn_buffer = NULL;
63065+ spin_unlock(&gr_learn_lock);
63066+ if (tmp)
63067+ vfree(tmp);
63068+ if (learn_buffer_user != NULL) {
63069+ vfree(learn_buffer_user);
63070+ learn_buffer_user = NULL;
63071+ }
63072+ learn_buffer_len = 0;
63073+ learn_buffer_user_len = 0;
63074+ gr_learn_attached = 0;
63075+ mutex_unlock(&gr_learn_user_mutex);
63076+ }
63077+
63078+ return 0;
63079+}
63080+
63081+const struct file_operations grsec_fops = {
63082+ .read = read_learn,
63083+ .write = write_grsec_handler,
63084+ .open = open_learn,
63085+ .release = close_learn,
63086+ .poll = poll_learn,
63087+};
63088diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
63089new file mode 100644
63090index 0000000..39645c9
63091--- /dev/null
63092+++ b/grsecurity/gracl_res.c
63093@@ -0,0 +1,68 @@
63094+#include <linux/kernel.h>
63095+#include <linux/sched.h>
63096+#include <linux/gracl.h>
63097+#include <linux/grinternal.h>
63098+
63099+static const char *restab_log[] = {
63100+ [RLIMIT_CPU] = "RLIMIT_CPU",
63101+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
63102+ [RLIMIT_DATA] = "RLIMIT_DATA",
63103+ [RLIMIT_STACK] = "RLIMIT_STACK",
63104+ [RLIMIT_CORE] = "RLIMIT_CORE",
63105+ [RLIMIT_RSS] = "RLIMIT_RSS",
63106+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
63107+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
63108+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
63109+ [RLIMIT_AS] = "RLIMIT_AS",
63110+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
63111+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
63112+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
63113+ [RLIMIT_NICE] = "RLIMIT_NICE",
63114+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
63115+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
63116+ [GR_CRASH_RES] = "RLIMIT_CRASH"
63117+};
63118+
63119+void
63120+gr_log_resource(const struct task_struct *task,
63121+ const int res, const unsigned long wanted, const int gt)
63122+{
63123+ const struct cred *cred;
63124+ unsigned long rlim;
63125+
63126+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
63127+ return;
63128+
63129+ // not yet supported resource
63130+ if (unlikely(!restab_log[res]))
63131+ return;
63132+
63133+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
63134+ rlim = task_rlimit_max(task, res);
63135+ else
63136+ rlim = task_rlimit(task, res);
63137+
63138+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
63139+ return;
63140+
63141+ rcu_read_lock();
63142+ cred = __task_cred(task);
63143+
63144+ if (res == RLIMIT_NPROC &&
63145+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
63146+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
63147+ goto out_rcu_unlock;
63148+ else if (res == RLIMIT_MEMLOCK &&
63149+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
63150+ goto out_rcu_unlock;
63151+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
63152+ goto out_rcu_unlock;
63153+ rcu_read_unlock();
63154+
63155+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
63156+
63157+ return;
63158+out_rcu_unlock:
63159+ rcu_read_unlock();
63160+ return;
63161+}
63162diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
63163new file mode 100644
63164index 0000000..8c8fc9d
63165--- /dev/null
63166+++ b/grsecurity/gracl_segv.c
63167@@ -0,0 +1,303 @@
63168+#include <linux/kernel.h>
63169+#include <linux/mm.h>
63170+#include <asm/uaccess.h>
63171+#include <asm/errno.h>
63172+#include <asm/mman.h>
63173+#include <net/sock.h>
63174+#include <linux/file.h>
63175+#include <linux/fs.h>
63176+#include <linux/net.h>
63177+#include <linux/in.h>
63178+#include <linux/slab.h>
63179+#include <linux/types.h>
63180+#include <linux/sched.h>
63181+#include <linux/timer.h>
63182+#include <linux/gracl.h>
63183+#include <linux/grsecurity.h>
63184+#include <linux/grinternal.h>
63185+
63186+static struct crash_uid *uid_set;
63187+static unsigned short uid_used;
63188+static DEFINE_SPINLOCK(gr_uid_lock);
63189+extern rwlock_t gr_inode_lock;
63190+extern struct acl_subject_label *
63191+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
63192+ struct acl_role_label *role);
63193+
63194+#ifdef CONFIG_BTRFS_FS
63195+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
63196+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
63197+#endif
63198+
63199+static inline dev_t __get_dev(const struct dentry *dentry)
63200+{
63201+#ifdef CONFIG_BTRFS_FS
63202+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
63203+ return get_btrfs_dev_from_inode(dentry->d_inode);
63204+ else
63205+#endif
63206+ return dentry->d_inode->i_sb->s_dev;
63207+}
63208+
63209+int
63210+gr_init_uidset(void)
63211+{
63212+ uid_set =
63213+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
63214+ uid_used = 0;
63215+
63216+ return uid_set ? 1 : 0;
63217+}
63218+
63219+void
63220+gr_free_uidset(void)
63221+{
63222+ if (uid_set)
63223+ kfree(uid_set);
63224+
63225+ return;
63226+}
63227+
63228+int
63229+gr_find_uid(const uid_t uid)
63230+{
63231+ struct crash_uid *tmp = uid_set;
63232+ uid_t buid;
63233+ int low = 0, high = uid_used - 1, mid;
63234+
63235+ while (high >= low) {
63236+ mid = (low + high) >> 1;
63237+ buid = tmp[mid].uid;
63238+ if (buid == uid)
63239+ return mid;
63240+ if (buid > uid)
63241+ high = mid - 1;
63242+ if (buid < uid)
63243+ low = mid + 1;
63244+ }
63245+
63246+ return -1;
63247+}
63248+
63249+static __inline__ void
63250+gr_insertsort(void)
63251+{
63252+ unsigned short i, j;
63253+ struct crash_uid index;
63254+
63255+ for (i = 1; i < uid_used; i++) {
63256+ index = uid_set[i];
63257+ j = i;
63258+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
63259+ uid_set[j] = uid_set[j - 1];
63260+ j--;
63261+ }
63262+ uid_set[j] = index;
63263+ }
63264+
63265+ return;
63266+}
63267+
63268+static __inline__ void
63269+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
63270+{
63271+ int loc;
63272+ uid_t uid = GR_GLOBAL_UID(kuid);
63273+
63274+ if (uid_used == GR_UIDTABLE_MAX)
63275+ return;
63276+
63277+ loc = gr_find_uid(uid);
63278+
63279+ if (loc >= 0) {
63280+ uid_set[loc].expires = expires;
63281+ return;
63282+ }
63283+
63284+ uid_set[uid_used].uid = uid;
63285+ uid_set[uid_used].expires = expires;
63286+ uid_used++;
63287+
63288+ gr_insertsort();
63289+
63290+ return;
63291+}
63292+
63293+void
63294+gr_remove_uid(const unsigned short loc)
63295+{
63296+ unsigned short i;
63297+
63298+ for (i = loc + 1; i < uid_used; i++)
63299+ uid_set[i - 1] = uid_set[i];
63300+
63301+ uid_used--;
63302+
63303+ return;
63304+}
63305+
63306+int
63307+gr_check_crash_uid(const kuid_t kuid)
63308+{
63309+ int loc;
63310+ int ret = 0;
63311+ uid_t uid;
63312+
63313+ if (unlikely(!gr_acl_is_enabled()))
63314+ return 0;
63315+
63316+ uid = GR_GLOBAL_UID(kuid);
63317+
63318+ spin_lock(&gr_uid_lock);
63319+ loc = gr_find_uid(uid);
63320+
63321+ if (loc < 0)
63322+ goto out_unlock;
63323+
63324+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
63325+ gr_remove_uid(loc);
63326+ else
63327+ ret = 1;
63328+
63329+out_unlock:
63330+ spin_unlock(&gr_uid_lock);
63331+ return ret;
63332+}
63333+
63334+static __inline__ int
63335+proc_is_setxid(const struct cred *cred)
63336+{
63337+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
63338+ !uid_eq(cred->uid, cred->fsuid))
63339+ return 1;
63340+ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
63341+ !gid_eq(cred->gid, cred->fsgid))
63342+ return 1;
63343+
63344+ return 0;
63345+}
63346+
63347+extern int gr_fake_force_sig(int sig, struct task_struct *t);
63348+
63349+void
63350+gr_handle_crash(struct task_struct *task, const int sig)
63351+{
63352+ struct acl_subject_label *curr;
63353+ struct task_struct *tsk, *tsk2;
63354+ const struct cred *cred;
63355+ const struct cred *cred2;
63356+
63357+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
63358+ return;
63359+
63360+ if (unlikely(!gr_acl_is_enabled()))
63361+ return;
63362+
63363+ curr = task->acl;
63364+
63365+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
63366+ return;
63367+
63368+ if (time_before_eq(curr->expires, get_seconds())) {
63369+ curr->expires = 0;
63370+ curr->crashes = 0;
63371+ }
63372+
63373+ curr->crashes++;
63374+
63375+ if (!curr->expires)
63376+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
63377+
63378+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
63379+ time_after(curr->expires, get_seconds())) {
63380+ rcu_read_lock();
63381+ cred = __task_cred(task);
63382+ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
63383+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
63384+ spin_lock(&gr_uid_lock);
63385+ gr_insert_uid(cred->uid, curr->expires);
63386+ spin_unlock(&gr_uid_lock);
63387+ curr->expires = 0;
63388+ curr->crashes = 0;
63389+ read_lock(&tasklist_lock);
63390+ do_each_thread(tsk2, tsk) {
63391+ cred2 = __task_cred(tsk);
63392+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
63393+ gr_fake_force_sig(SIGKILL, tsk);
63394+ } while_each_thread(tsk2, tsk);
63395+ read_unlock(&tasklist_lock);
63396+ } else {
63397+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
63398+ read_lock(&tasklist_lock);
63399+ read_lock(&grsec_exec_file_lock);
63400+ do_each_thread(tsk2, tsk) {
63401+ if (likely(tsk != task)) {
63402+ // if this thread has the same subject as the one that triggered
63403+ // RES_CRASH and it's the same binary, kill it
63404+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
63405+ gr_fake_force_sig(SIGKILL, tsk);
63406+ }
63407+ } while_each_thread(tsk2, tsk);
63408+ read_unlock(&grsec_exec_file_lock);
63409+ read_unlock(&tasklist_lock);
63410+ }
63411+ rcu_read_unlock();
63412+ }
63413+
63414+ return;
63415+}
63416+
63417+int
63418+gr_check_crash_exec(const struct file *filp)
63419+{
63420+ struct acl_subject_label *curr;
63421+
63422+ if (unlikely(!gr_acl_is_enabled()))
63423+ return 0;
63424+
63425+ read_lock(&gr_inode_lock);
63426+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
63427+ __get_dev(filp->f_path.dentry),
63428+ current->role);
63429+ read_unlock(&gr_inode_lock);
63430+
63431+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
63432+ (!curr->crashes && !curr->expires))
63433+ return 0;
63434+
63435+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
63436+ time_after(curr->expires, get_seconds()))
63437+ return 1;
63438+ else if (time_before_eq(curr->expires, get_seconds())) {
63439+ curr->crashes = 0;
63440+ curr->expires = 0;
63441+ }
63442+
63443+ return 0;
63444+}
63445+
63446+void
63447+gr_handle_alertkill(struct task_struct *task)
63448+{
63449+ struct acl_subject_label *curracl;
63450+ __u32 curr_ip;
63451+ struct task_struct *p, *p2;
63452+
63453+ if (unlikely(!gr_acl_is_enabled()))
63454+ return;
63455+
63456+ curracl = task->acl;
63457+ curr_ip = task->signal->curr_ip;
63458+
63459+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
63460+ read_lock(&tasklist_lock);
63461+ do_each_thread(p2, p) {
63462+ if (p->signal->curr_ip == curr_ip)
63463+ gr_fake_force_sig(SIGKILL, p);
63464+ } while_each_thread(p2, p);
63465+ read_unlock(&tasklist_lock);
63466+ } else if (curracl->mode & GR_KILLPROC)
63467+ gr_fake_force_sig(SIGKILL, task);
63468+
63469+ return;
63470+}
63471diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
63472new file mode 100644
63473index 0000000..98011b0
63474--- /dev/null
63475+++ b/grsecurity/gracl_shm.c
63476@@ -0,0 +1,40 @@
63477+#include <linux/kernel.h>
63478+#include <linux/mm.h>
63479+#include <linux/sched.h>
63480+#include <linux/file.h>
63481+#include <linux/ipc.h>
63482+#include <linux/gracl.h>
63483+#include <linux/grsecurity.h>
63484+#include <linux/grinternal.h>
63485+
63486+int
63487+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
63488+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
63489+{
63490+ struct task_struct *task;
63491+
63492+ if (!gr_acl_is_enabled())
63493+ return 1;
63494+
63495+ rcu_read_lock();
63496+ read_lock(&tasklist_lock);
63497+
63498+ task = find_task_by_vpid(shm_cprid);
63499+
63500+ if (unlikely(!task))
63501+ task = find_task_by_vpid(shm_lapid);
63502+
63503+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
63504+ (task_pid_nr(task) == shm_lapid)) &&
63505+ (task->acl->mode & GR_PROTSHM) &&
63506+ (task->acl != current->acl))) {
63507+ read_unlock(&tasklist_lock);
63508+ rcu_read_unlock();
63509+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
63510+ return 0;
63511+ }
63512+ read_unlock(&tasklist_lock);
63513+ rcu_read_unlock();
63514+
63515+ return 1;
63516+}
63517diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
63518new file mode 100644
63519index 0000000..bc0be01
63520--- /dev/null
63521+++ b/grsecurity/grsec_chdir.c
63522@@ -0,0 +1,19 @@
63523+#include <linux/kernel.h>
63524+#include <linux/sched.h>
63525+#include <linux/fs.h>
63526+#include <linux/file.h>
63527+#include <linux/grsecurity.h>
63528+#include <linux/grinternal.h>
63529+
63530+void
63531+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
63532+{
63533+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
63534+ if ((grsec_enable_chdir && grsec_enable_group &&
63535+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
63536+ !grsec_enable_group)) {
63537+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
63538+ }
63539+#endif
63540+ return;
63541+}
63542diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
63543new file mode 100644
63544index 0000000..6d2de57
63545--- /dev/null
63546+++ b/grsecurity/grsec_chroot.c
63547@@ -0,0 +1,357 @@
63548+#include <linux/kernel.h>
63549+#include <linux/module.h>
63550+#include <linux/sched.h>
63551+#include <linux/file.h>
63552+#include <linux/fs.h>
63553+#include <linux/mount.h>
63554+#include <linux/types.h>
63555+#include "../fs/mount.h"
63556+#include <linux/grsecurity.h>
63557+#include <linux/grinternal.h>
63558+
63559+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
63560+{
63561+#ifdef CONFIG_GRKERNSEC
63562+ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
63563+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root)
63564+ task->gr_is_chrooted = 1;
63565+ else
63566+ task->gr_is_chrooted = 0;
63567+
63568+ task->gr_chroot_dentry = path->dentry;
63569+#endif
63570+ return;
63571+}
63572+
63573+void gr_clear_chroot_entries(struct task_struct *task)
63574+{
63575+#ifdef CONFIG_GRKERNSEC
63576+ task->gr_is_chrooted = 0;
63577+ task->gr_chroot_dentry = NULL;
63578+#endif
63579+ return;
63580+}
63581+
63582+int
63583+gr_handle_chroot_unix(const pid_t pid)
63584+{
63585+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
63586+ struct task_struct *p;
63587+
63588+ if (unlikely(!grsec_enable_chroot_unix))
63589+ return 1;
63590+
63591+ if (likely(!proc_is_chrooted(current)))
63592+ return 1;
63593+
63594+ rcu_read_lock();
63595+ read_lock(&tasklist_lock);
63596+ p = find_task_by_vpid_unrestricted(pid);
63597+ if (unlikely(p && !have_same_root(current, p))) {
63598+ read_unlock(&tasklist_lock);
63599+ rcu_read_unlock();
63600+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
63601+ return 0;
63602+ }
63603+ read_unlock(&tasklist_lock);
63604+ rcu_read_unlock();
63605+#endif
63606+ return 1;
63607+}
63608+
63609+int
63610+gr_handle_chroot_nice(void)
63611+{
63612+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
63613+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
63614+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
63615+ return -EPERM;
63616+ }
63617+#endif
63618+ return 0;
63619+}
63620+
63621+int
63622+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
63623+{
63624+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
63625+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
63626+ && proc_is_chrooted(current)) {
63627+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
63628+ return -EACCES;
63629+ }
63630+#endif
63631+ return 0;
63632+}
63633+
63634+int
63635+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
63636+{
63637+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
63638+ struct task_struct *p;
63639+ int ret = 0;
63640+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
63641+ return ret;
63642+
63643+ read_lock(&tasklist_lock);
63644+ do_each_pid_task(pid, type, p) {
63645+ if (!have_same_root(current, p)) {
63646+ ret = 1;
63647+ goto out;
63648+ }
63649+ } while_each_pid_task(pid, type, p);
63650+out:
63651+ read_unlock(&tasklist_lock);
63652+ return ret;
63653+#endif
63654+ return 0;
63655+}
63656+
63657+int
63658+gr_pid_is_chrooted(struct task_struct *p)
63659+{
63660+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
63661+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
63662+ return 0;
63663+
63664+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
63665+ !have_same_root(current, p)) {
63666+ return 1;
63667+ }
63668+#endif
63669+ return 0;
63670+}
63671+
63672+EXPORT_SYMBOL(gr_pid_is_chrooted);
63673+
63674+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
63675+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
63676+{
63677+ struct path path, currentroot;
63678+ int ret = 0;
63679+
63680+ path.dentry = (struct dentry *)u_dentry;
63681+ path.mnt = (struct vfsmount *)u_mnt;
63682+ get_fs_root(current->fs, &currentroot);
63683+ if (path_is_under(&path, &currentroot))
63684+ ret = 1;
63685+ path_put(&currentroot);
63686+
63687+ return ret;
63688+}
63689+#endif
63690+
63691+int
63692+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
63693+{
63694+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
63695+ if (!grsec_enable_chroot_fchdir)
63696+ return 1;
63697+
63698+ if (!proc_is_chrooted(current))
63699+ return 1;
63700+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
63701+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
63702+ return 0;
63703+ }
63704+#endif
63705+ return 1;
63706+}
63707+
63708+int
63709+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
63710+ const time_t shm_createtime)
63711+{
63712+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
63713+ struct task_struct *p;
63714+ time_t starttime;
63715+
63716+ if (unlikely(!grsec_enable_chroot_shmat))
63717+ return 1;
63718+
63719+ if (likely(!proc_is_chrooted(current)))
63720+ return 1;
63721+
63722+ rcu_read_lock();
63723+ read_lock(&tasklist_lock);
63724+
63725+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
63726+ starttime = p->start_time.tv_sec;
63727+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
63728+ if (have_same_root(current, p)) {
63729+ goto allow;
63730+ } else {
63731+ read_unlock(&tasklist_lock);
63732+ rcu_read_unlock();
63733+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
63734+ return 0;
63735+ }
63736+ }
63737+ /* creator exited, pid reuse, fall through to next check */
63738+ }
63739+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
63740+ if (unlikely(!have_same_root(current, p))) {
63741+ read_unlock(&tasklist_lock);
63742+ rcu_read_unlock();
63743+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
63744+ return 0;
63745+ }
63746+ }
63747+
63748+allow:
63749+ read_unlock(&tasklist_lock);
63750+ rcu_read_unlock();
63751+#endif
63752+ return 1;
63753+}
63754+
63755+void
63756+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
63757+{
63758+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
63759+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
63760+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
63761+#endif
63762+ return;
63763+}
63764+
63765+int
63766+gr_handle_chroot_mknod(const struct dentry *dentry,
63767+ const struct vfsmount *mnt, const int mode)
63768+{
63769+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
63770+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
63771+ proc_is_chrooted(current)) {
63772+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
63773+ return -EPERM;
63774+ }
63775+#endif
63776+ return 0;
63777+}
63778+
63779+int
63780+gr_handle_chroot_mount(const struct dentry *dentry,
63781+ const struct vfsmount *mnt, const char *dev_name)
63782+{
63783+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
63784+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
63785+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
63786+ return -EPERM;
63787+ }
63788+#endif
63789+ return 0;
63790+}
63791+
63792+int
63793+gr_handle_chroot_pivot(void)
63794+{
63795+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
63796+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
63797+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
63798+ return -EPERM;
63799+ }
63800+#endif
63801+ return 0;
63802+}
63803+
63804+int
63805+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
63806+{
63807+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
63808+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
63809+ !gr_is_outside_chroot(dentry, mnt)) {
63810+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
63811+ return -EPERM;
63812+ }
63813+#endif
63814+ return 0;
63815+}
63816+
63817+extern const char *captab_log[];
63818+extern int captab_log_entries;
63819+
63820+int
63821+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
63822+{
63823+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
63824+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
63825+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
63826+ if (cap_raised(chroot_caps, cap)) {
63827+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
63828+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
63829+ }
63830+ return 0;
63831+ }
63832+ }
63833+#endif
63834+ return 1;
63835+}
63836+
63837+int
63838+gr_chroot_is_capable(const int cap)
63839+{
63840+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
63841+ return gr_task_chroot_is_capable(current, current_cred(), cap);
63842+#endif
63843+ return 1;
63844+}
63845+
63846+int
63847+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
63848+{
63849+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
63850+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
63851+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
63852+ if (cap_raised(chroot_caps, cap)) {
63853+ return 0;
63854+ }
63855+ }
63856+#endif
63857+ return 1;
63858+}
63859+
63860+int
63861+gr_chroot_is_capable_nolog(const int cap)
63862+{
63863+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
63864+ return gr_task_chroot_is_capable_nolog(current, cap);
63865+#endif
63866+ return 1;
63867+}
63868+
63869+int
63870+gr_handle_chroot_sysctl(const int op)
63871+{
63872+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
63873+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
63874+ proc_is_chrooted(current))
63875+ return -EACCES;
63876+#endif
63877+ return 0;
63878+}
63879+
63880+void
63881+gr_handle_chroot_chdir(struct path *path)
63882+{
63883+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
63884+ if (grsec_enable_chroot_chdir)
63885+ set_fs_pwd(current->fs, path);
63886+#endif
63887+ return;
63888+}
63889+
63890+int
63891+gr_handle_chroot_chmod(const struct dentry *dentry,
63892+ const struct vfsmount *mnt, const int mode)
63893+{
63894+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
63895+ /* allow chmod +s on directories, but not files */
63896+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
63897+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
63898+ proc_is_chrooted(current)) {
63899+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
63900+ return -EPERM;
63901+ }
63902+#endif
63903+ return 0;
63904+}
63905diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
63906new file mode 100644
63907index 0000000..207d409
63908--- /dev/null
63909+++ b/grsecurity/grsec_disabled.c
63910@@ -0,0 +1,434 @@
63911+#include <linux/kernel.h>
63912+#include <linux/module.h>
63913+#include <linux/sched.h>
63914+#include <linux/file.h>
63915+#include <linux/fs.h>
63916+#include <linux/kdev_t.h>
63917+#include <linux/net.h>
63918+#include <linux/in.h>
63919+#include <linux/ip.h>
63920+#include <linux/skbuff.h>
63921+#include <linux/sysctl.h>
63922+
63923+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
63924+void
63925+pax_set_initial_flags(struct linux_binprm *bprm)
63926+{
63927+ return;
63928+}
63929+#endif
63930+
63931+#ifdef CONFIG_SYSCTL
63932+__u32
63933+gr_handle_sysctl(const struct ctl_table * table, const int op)
63934+{
63935+ return 0;
63936+}
63937+#endif
63938+
63939+#ifdef CONFIG_TASKSTATS
63940+int gr_is_taskstats_denied(int pid)
63941+{
63942+ return 0;
63943+}
63944+#endif
63945+
63946+int
63947+gr_acl_is_enabled(void)
63948+{
63949+ return 0;
63950+}
63951+
63952+void
63953+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
63954+{
63955+ return;
63956+}
63957+
63958+int
63959+gr_handle_rawio(const struct inode *inode)
63960+{
63961+ return 0;
63962+}
63963+
63964+void
63965+gr_acl_handle_psacct(struct task_struct *task, const long code)
63966+{
63967+ return;
63968+}
63969+
63970+int
63971+gr_handle_ptrace(struct task_struct *task, const long request)
63972+{
63973+ return 0;
63974+}
63975+
63976+int
63977+gr_handle_proc_ptrace(struct task_struct *task)
63978+{
63979+ return 0;
63980+}
63981+
63982+int
63983+gr_set_acls(const int type)
63984+{
63985+ return 0;
63986+}
63987+
63988+int
63989+gr_check_hidden_task(const struct task_struct *tsk)
63990+{
63991+ return 0;
63992+}
63993+
63994+int
63995+gr_check_protected_task(const struct task_struct *task)
63996+{
63997+ return 0;
63998+}
63999+
64000+int
64001+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
64002+{
64003+ return 0;
64004+}
64005+
64006+void
64007+gr_copy_label(struct task_struct *tsk)
64008+{
64009+ return;
64010+}
64011+
64012+void
64013+gr_set_pax_flags(struct task_struct *task)
64014+{
64015+ return;
64016+}
64017+
64018+int
64019+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
64020+ const int unsafe_share)
64021+{
64022+ return 0;
64023+}
64024+
64025+void
64026+gr_handle_delete(const ino_t ino, const dev_t dev)
64027+{
64028+ return;
64029+}
64030+
64031+void
64032+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
64033+{
64034+ return;
64035+}
64036+
64037+void
64038+gr_handle_crash(struct task_struct *task, const int sig)
64039+{
64040+ return;
64041+}
64042+
64043+int
64044+gr_check_crash_exec(const struct file *filp)
64045+{
64046+ return 0;
64047+}
64048+
64049+int
64050+gr_check_crash_uid(const kuid_t uid)
64051+{
64052+ return 0;
64053+}
64054+
64055+void
64056+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
64057+ struct dentry *old_dentry,
64058+ struct dentry *new_dentry,
64059+ struct vfsmount *mnt, const __u8 replace)
64060+{
64061+ return;
64062+}
64063+
64064+int
64065+gr_search_socket(const int family, const int type, const int protocol)
64066+{
64067+ return 1;
64068+}
64069+
64070+int
64071+gr_search_connectbind(const int mode, const struct socket *sock,
64072+ const struct sockaddr_in *addr)
64073+{
64074+ return 0;
64075+}
64076+
64077+void
64078+gr_handle_alertkill(struct task_struct *task)
64079+{
64080+ return;
64081+}
64082+
64083+__u32
64084+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
64085+{
64086+ return 1;
64087+}
64088+
64089+__u32
64090+gr_acl_handle_hidden_file(const struct dentry * dentry,
64091+ const struct vfsmount * mnt)
64092+{
64093+ return 1;
64094+}
64095+
64096+__u32
64097+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
64098+ int acc_mode)
64099+{
64100+ return 1;
64101+}
64102+
64103+__u32
64104+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
64105+{
64106+ return 1;
64107+}
64108+
64109+__u32
64110+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
64111+{
64112+ return 1;
64113+}
64114+
64115+int
64116+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
64117+ unsigned int *vm_flags)
64118+{
64119+ return 1;
64120+}
64121+
64122+__u32
64123+gr_acl_handle_truncate(const struct dentry * dentry,
64124+ const struct vfsmount * mnt)
64125+{
64126+ return 1;
64127+}
64128+
64129+__u32
64130+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
64131+{
64132+ return 1;
64133+}
64134+
64135+__u32
64136+gr_acl_handle_access(const struct dentry * dentry,
64137+ const struct vfsmount * mnt, const int fmode)
64138+{
64139+ return 1;
64140+}
64141+
64142+__u32
64143+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
64144+ umode_t *mode)
64145+{
64146+ return 1;
64147+}
64148+
64149+__u32
64150+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
64151+{
64152+ return 1;
64153+}
64154+
64155+__u32
64156+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
64157+{
64158+ return 1;
64159+}
64160+
64161+void
64162+grsecurity_init(void)
64163+{
64164+ return;
64165+}
64166+
64167+umode_t gr_acl_umask(void)
64168+{
64169+ return 0;
64170+}
64171+
64172+__u32
64173+gr_acl_handle_mknod(const struct dentry * new_dentry,
64174+ const struct dentry * parent_dentry,
64175+ const struct vfsmount * parent_mnt,
64176+ const int mode)
64177+{
64178+ return 1;
64179+}
64180+
64181+__u32
64182+gr_acl_handle_mkdir(const struct dentry * new_dentry,
64183+ const struct dentry * parent_dentry,
64184+ const struct vfsmount * parent_mnt)
64185+{
64186+ return 1;
64187+}
64188+
64189+__u32
64190+gr_acl_handle_symlink(const struct dentry * new_dentry,
64191+ const struct dentry * parent_dentry,
64192+ const struct vfsmount * parent_mnt, const struct filename *from)
64193+{
64194+ return 1;
64195+}
64196+
64197+__u32
64198+gr_acl_handle_link(const struct dentry * new_dentry,
64199+ const struct dentry * parent_dentry,
64200+ const struct vfsmount * parent_mnt,
64201+ const struct dentry * old_dentry,
64202+ const struct vfsmount * old_mnt, const struct filename *to)
64203+{
64204+ return 1;
64205+}
64206+
64207+int
64208+gr_acl_handle_rename(const struct dentry *new_dentry,
64209+ const struct dentry *parent_dentry,
64210+ const struct vfsmount *parent_mnt,
64211+ const struct dentry *old_dentry,
64212+ const struct inode *old_parent_inode,
64213+ const struct vfsmount *old_mnt, const struct filename *newname)
64214+{
64215+ return 0;
64216+}
64217+
64218+int
64219+gr_acl_handle_filldir(const struct file *file, const char *name,
64220+ const int namelen, const ino_t ino)
64221+{
64222+ return 1;
64223+}
64224+
64225+int
64226+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
64227+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
64228+{
64229+ return 1;
64230+}
64231+
64232+int
64233+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
64234+{
64235+ return 0;
64236+}
64237+
64238+int
64239+gr_search_accept(const struct socket *sock)
64240+{
64241+ return 0;
64242+}
64243+
64244+int
64245+gr_search_listen(const struct socket *sock)
64246+{
64247+ return 0;
64248+}
64249+
64250+int
64251+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
64252+{
64253+ return 0;
64254+}
64255+
64256+__u32
64257+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
64258+{
64259+ return 1;
64260+}
64261+
64262+__u32
64263+gr_acl_handle_creat(const struct dentry * dentry,
64264+ const struct dentry * p_dentry,
64265+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
64266+ const int imode)
64267+{
64268+ return 1;
64269+}
64270+
64271+void
64272+gr_acl_handle_exit(void)
64273+{
64274+ return;
64275+}
64276+
64277+int
64278+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
64279+{
64280+ return 1;
64281+}
64282+
64283+void
64284+gr_set_role_label(const kuid_t uid, const kgid_t gid)
64285+{
64286+ return;
64287+}
64288+
64289+int
64290+gr_acl_handle_procpidmem(const struct task_struct *task)
64291+{
64292+ return 0;
64293+}
64294+
64295+int
64296+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
64297+{
64298+ return 0;
64299+}
64300+
64301+int
64302+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
64303+{
64304+ return 0;
64305+}
64306+
64307+void
64308+gr_set_kernel_label(struct task_struct *task)
64309+{
64310+ return;
64311+}
64312+
64313+int
64314+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
64315+{
64316+ return 0;
64317+}
64318+
64319+int
64320+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
64321+{
64322+ return 0;
64323+}
64324+
64325+int gr_acl_enable_at_secure(void)
64326+{
64327+ return 0;
64328+}
64329+
64330+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
64331+{
64332+ return dentry->d_inode->i_sb->s_dev;
64333+}
64334+
64335+void gr_put_exec_file(struct task_struct *task)
64336+{
64337+ return;
64338+}
64339+
64340+EXPORT_SYMBOL(gr_set_kernel_label);
64341+#ifdef CONFIG_SECURITY
64342+EXPORT_SYMBOL(gr_check_user_change);
64343+EXPORT_SYMBOL(gr_check_group_change);
64344+#endif
64345diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
64346new file mode 100644
64347index 0000000..387032b
64348--- /dev/null
64349+++ b/grsecurity/grsec_exec.c
64350@@ -0,0 +1,187 @@
64351+#include <linux/kernel.h>
64352+#include <linux/sched.h>
64353+#include <linux/file.h>
64354+#include <linux/binfmts.h>
64355+#include <linux/fs.h>
64356+#include <linux/types.h>
64357+#include <linux/grdefs.h>
64358+#include <linux/grsecurity.h>
64359+#include <linux/grinternal.h>
64360+#include <linux/capability.h>
64361+#include <linux/module.h>
64362+#include <linux/compat.h>
64363+
64364+#include <asm/uaccess.h>
64365+
64366+#ifdef CONFIG_GRKERNSEC_EXECLOG
64367+static char gr_exec_arg_buf[132];
64368+static DEFINE_MUTEX(gr_exec_arg_mutex);
64369+#endif
64370+
64371+struct user_arg_ptr {
64372+#ifdef CONFIG_COMPAT
64373+ bool is_compat;
64374+#endif
64375+ union {
64376+ const char __user *const __user *native;
64377+#ifdef CONFIG_COMPAT
64378+ const compat_uptr_t __user *compat;
64379+#endif
64380+ } ptr;
64381+};
64382+
64383+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
64384+
64385+void
64386+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
64387+{
64388+#ifdef CONFIG_GRKERNSEC_EXECLOG
64389+ char *grarg = gr_exec_arg_buf;
64390+ unsigned int i, x, execlen = 0;
64391+ char c;
64392+
64393+ if (!((grsec_enable_execlog && grsec_enable_group &&
64394+ in_group_p(grsec_audit_gid))
64395+ || (grsec_enable_execlog && !grsec_enable_group)))
64396+ return;
64397+
64398+ mutex_lock(&gr_exec_arg_mutex);
64399+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
64400+
64401+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
64402+ const char __user *p;
64403+ unsigned int len;
64404+
64405+ p = get_user_arg_ptr(argv, i);
64406+ if (IS_ERR(p))
64407+ goto log;
64408+
64409+ len = strnlen_user(p, 128 - execlen);
64410+ if (len > 128 - execlen)
64411+ len = 128 - execlen;
64412+ else if (len > 0)
64413+ len--;
64414+ if (copy_from_user(grarg + execlen, p, len))
64415+ goto log;
64416+
64417+ /* rewrite unprintable characters */
64418+ for (x = 0; x < len; x++) {
64419+ c = *(grarg + execlen + x);
64420+ if (c < 32 || c > 126)
64421+ *(grarg + execlen + x) = ' ';
64422+ }
64423+
64424+ execlen += len;
64425+ *(grarg + execlen) = ' ';
64426+ *(grarg + execlen + 1) = '\0';
64427+ execlen++;
64428+ }
64429+
64430+ log:
64431+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
64432+ bprm->file->f_path.mnt, grarg);
64433+ mutex_unlock(&gr_exec_arg_mutex);
64434+#endif
64435+ return;
64436+}
64437+
64438+#ifdef CONFIG_GRKERNSEC
64439+extern int gr_acl_is_capable(const int cap);
64440+extern int gr_acl_is_capable_nolog(const int cap);
64441+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
64442+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
64443+extern int gr_chroot_is_capable(const int cap);
64444+extern int gr_chroot_is_capable_nolog(const int cap);
64445+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
64446+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
64447+#endif
64448+
64449+const char *captab_log[] = {
64450+ "CAP_CHOWN",
64451+ "CAP_DAC_OVERRIDE",
64452+ "CAP_DAC_READ_SEARCH",
64453+ "CAP_FOWNER",
64454+ "CAP_FSETID",
64455+ "CAP_KILL",
64456+ "CAP_SETGID",
64457+ "CAP_SETUID",
64458+ "CAP_SETPCAP",
64459+ "CAP_LINUX_IMMUTABLE",
64460+ "CAP_NET_BIND_SERVICE",
64461+ "CAP_NET_BROADCAST",
64462+ "CAP_NET_ADMIN",
64463+ "CAP_NET_RAW",
64464+ "CAP_IPC_LOCK",
64465+ "CAP_IPC_OWNER",
64466+ "CAP_SYS_MODULE",
64467+ "CAP_SYS_RAWIO",
64468+ "CAP_SYS_CHROOT",
64469+ "CAP_SYS_PTRACE",
64470+ "CAP_SYS_PACCT",
64471+ "CAP_SYS_ADMIN",
64472+ "CAP_SYS_BOOT",
64473+ "CAP_SYS_NICE",
64474+ "CAP_SYS_RESOURCE",
64475+ "CAP_SYS_TIME",
64476+ "CAP_SYS_TTY_CONFIG",
64477+ "CAP_MKNOD",
64478+ "CAP_LEASE",
64479+ "CAP_AUDIT_WRITE",
64480+ "CAP_AUDIT_CONTROL",
64481+ "CAP_SETFCAP",
64482+ "CAP_MAC_OVERRIDE",
64483+ "CAP_MAC_ADMIN",
64484+ "CAP_SYSLOG",
64485+ "CAP_WAKE_ALARM"
64486+};
64487+
64488+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
64489+
64490+int gr_is_capable(const int cap)
64491+{
64492+#ifdef CONFIG_GRKERNSEC
64493+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
64494+ return 1;
64495+ return 0;
64496+#else
64497+ return 1;
64498+#endif
64499+}
64500+
64501+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
64502+{
64503+#ifdef CONFIG_GRKERNSEC
64504+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
64505+ return 1;
64506+ return 0;
64507+#else
64508+ return 1;
64509+#endif
64510+}
64511+
64512+int gr_is_capable_nolog(const int cap)
64513+{
64514+#ifdef CONFIG_GRKERNSEC
64515+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
64516+ return 1;
64517+ return 0;
64518+#else
64519+ return 1;
64520+#endif
64521+}
64522+
64523+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
64524+{
64525+#ifdef CONFIG_GRKERNSEC
64526+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
64527+ return 1;
64528+ return 0;
64529+#else
64530+ return 1;
64531+#endif
64532+}
64533+
64534+EXPORT_SYMBOL(gr_is_capable);
64535+EXPORT_SYMBOL(gr_is_capable_nolog);
64536+EXPORT_SYMBOL(gr_task_is_capable);
64537+EXPORT_SYMBOL(gr_task_is_capable_nolog);
64538diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
64539new file mode 100644
64540index 0000000..06cc6ea
64541--- /dev/null
64542+++ b/grsecurity/grsec_fifo.c
64543@@ -0,0 +1,24 @@
64544+#include <linux/kernel.h>
64545+#include <linux/sched.h>
64546+#include <linux/fs.h>
64547+#include <linux/file.h>
64548+#include <linux/grinternal.h>
64549+
64550+int
64551+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
64552+ const struct dentry *dir, const int flag, const int acc_mode)
64553+{
64554+#ifdef CONFIG_GRKERNSEC_FIFO
64555+ const struct cred *cred = current_cred();
64556+
64557+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
64558+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
64559+ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
64560+ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
64561+ if (!inode_permission(dentry->d_inode, acc_mode))
64562+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
64563+ return -EACCES;
64564+ }
64565+#endif
64566+ return 0;
64567+}
64568diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
64569new file mode 100644
64570index 0000000..8ca18bf
64571--- /dev/null
64572+++ b/grsecurity/grsec_fork.c
64573@@ -0,0 +1,23 @@
64574+#include <linux/kernel.h>
64575+#include <linux/sched.h>
64576+#include <linux/grsecurity.h>
64577+#include <linux/grinternal.h>
64578+#include <linux/errno.h>
64579+
64580+void
64581+gr_log_forkfail(const int retval)
64582+{
64583+#ifdef CONFIG_GRKERNSEC_FORKFAIL
64584+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
64585+ switch (retval) {
64586+ case -EAGAIN:
64587+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
64588+ break;
64589+ case -ENOMEM:
64590+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
64591+ break;
64592+ }
64593+ }
64594+#endif
64595+ return;
64596+}
64597diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
64598new file mode 100644
64599index 0000000..a862e9f
64600--- /dev/null
64601+++ b/grsecurity/grsec_init.c
64602@@ -0,0 +1,283 @@
64603+#include <linux/kernel.h>
64604+#include <linux/sched.h>
64605+#include <linux/mm.h>
64606+#include <linux/gracl.h>
64607+#include <linux/slab.h>
64608+#include <linux/vmalloc.h>
64609+#include <linux/percpu.h>
64610+#include <linux/module.h>
64611+
64612+int grsec_enable_ptrace_readexec;
64613+int grsec_enable_setxid;
64614+int grsec_enable_symlinkown;
64615+kgid_t grsec_symlinkown_gid;
64616+int grsec_enable_brute;
64617+int grsec_enable_link;
64618+int grsec_enable_dmesg;
64619+int grsec_enable_harden_ptrace;
64620+int grsec_enable_fifo;
64621+int grsec_enable_execlog;
64622+int grsec_enable_signal;
64623+int grsec_enable_forkfail;
64624+int grsec_enable_audit_ptrace;
64625+int grsec_enable_time;
64626+int grsec_enable_audit_textrel;
64627+int grsec_enable_group;
64628+kgid_t grsec_audit_gid;
64629+int grsec_enable_chdir;
64630+int grsec_enable_mount;
64631+int grsec_enable_rofs;
64632+int grsec_enable_chroot_findtask;
64633+int grsec_enable_chroot_mount;
64634+int grsec_enable_chroot_shmat;
64635+int grsec_enable_chroot_fchdir;
64636+int grsec_enable_chroot_double;
64637+int grsec_enable_chroot_pivot;
64638+int grsec_enable_chroot_chdir;
64639+int grsec_enable_chroot_chmod;
64640+int grsec_enable_chroot_mknod;
64641+int grsec_enable_chroot_nice;
64642+int grsec_enable_chroot_execlog;
64643+int grsec_enable_chroot_caps;
64644+int grsec_enable_chroot_sysctl;
64645+int grsec_enable_chroot_unix;
64646+int grsec_enable_tpe;
64647+kgid_t grsec_tpe_gid;
64648+int grsec_enable_blackhole;
64649+#ifdef CONFIG_IPV6_MODULE
64650+EXPORT_SYMBOL(grsec_enable_blackhole);
64651+#endif
64652+int grsec_lastack_retries;
64653+int grsec_enable_tpe_all;
64654+int grsec_enable_tpe_invert;
64655+int grsec_enable_socket_all;
64656+kgid_t grsec_socket_all_gid;
64657+int grsec_enable_socket_client;
64658+kgid_t grsec_socket_client_gid;
64659+int grsec_enable_socket_server;
64660+kgid_t grsec_socket_server_gid;
64661+int grsec_resource_logging;
64662+int grsec_disable_privio;
64663+int grsec_enable_log_rwxmaps;
64664+int grsec_lock;
64665+
64666+DEFINE_SPINLOCK(grsec_alert_lock);
64667+unsigned long grsec_alert_wtime = 0;
64668+unsigned long grsec_alert_fyet = 0;
64669+
64670+DEFINE_SPINLOCK(grsec_audit_lock);
64671+
64672+DEFINE_RWLOCK(grsec_exec_file_lock);
64673+
64674+char *gr_shared_page[4];
64675+
64676+char *gr_alert_log_fmt;
64677+char *gr_audit_log_fmt;
64678+char *gr_alert_log_buf;
64679+char *gr_audit_log_buf;
64680+
64681+extern struct gr_arg *gr_usermode;
64682+extern unsigned char *gr_system_salt;
64683+extern unsigned char *gr_system_sum;
64684+
64685+void __init
64686+grsecurity_init(void)
64687+{
64688+ int j;
64689+ /* create the per-cpu shared pages */
64690+
64691+#ifdef CONFIG_X86
64692+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
64693+#endif
64694+
64695+ for (j = 0; j < 4; j++) {
64696+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
64697+ if (gr_shared_page[j] == NULL) {
64698+ panic("Unable to allocate grsecurity shared page");
64699+ return;
64700+ }
64701+ }
64702+
64703+ /* allocate log buffers */
64704+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
64705+ if (!gr_alert_log_fmt) {
64706+ panic("Unable to allocate grsecurity alert log format buffer");
64707+ return;
64708+ }
64709+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
64710+ if (!gr_audit_log_fmt) {
64711+ panic("Unable to allocate grsecurity audit log format buffer");
64712+ return;
64713+ }
64714+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
64715+ if (!gr_alert_log_buf) {
64716+ panic("Unable to allocate grsecurity alert log buffer");
64717+ return;
64718+ }
64719+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
64720+ if (!gr_audit_log_buf) {
64721+ panic("Unable to allocate grsecurity audit log buffer");
64722+ return;
64723+ }
64724+
64725+ /* allocate memory for authentication structure */
64726+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
64727+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
64728+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
64729+
64730+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
64731+ panic("Unable to allocate grsecurity authentication structure");
64732+ return;
64733+ }
64734+
64735+
64736+#ifdef CONFIG_GRKERNSEC_IO
64737+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
64738+ grsec_disable_privio = 1;
64739+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
64740+ grsec_disable_privio = 1;
64741+#else
64742+ grsec_disable_privio = 0;
64743+#endif
64744+#endif
64745+
64746+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
64747+ /* for backward compatibility, tpe_invert always defaults to on if
64748+ enabled in the kernel
64749+ */
64750+ grsec_enable_tpe_invert = 1;
64751+#endif
64752+
64753+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
64754+#ifndef CONFIG_GRKERNSEC_SYSCTL
64755+ grsec_lock = 1;
64756+#endif
64757+
64758+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
64759+ grsec_enable_audit_textrel = 1;
64760+#endif
64761+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
64762+ grsec_enable_log_rwxmaps = 1;
64763+#endif
64764+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
64765+ grsec_enable_group = 1;
64766+ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
64767+#endif
64768+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
64769+ grsec_enable_ptrace_readexec = 1;
64770+#endif
64771+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
64772+ grsec_enable_chdir = 1;
64773+#endif
64774+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
64775+ grsec_enable_harden_ptrace = 1;
64776+#endif
64777+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
64778+ grsec_enable_mount = 1;
64779+#endif
64780+#ifdef CONFIG_GRKERNSEC_LINK
64781+ grsec_enable_link = 1;
64782+#endif
64783+#ifdef CONFIG_GRKERNSEC_BRUTE
64784+ grsec_enable_brute = 1;
64785+#endif
64786+#ifdef CONFIG_GRKERNSEC_DMESG
64787+ grsec_enable_dmesg = 1;
64788+#endif
64789+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
64790+ grsec_enable_blackhole = 1;
64791+ grsec_lastack_retries = 4;
64792+#endif
64793+#ifdef CONFIG_GRKERNSEC_FIFO
64794+ grsec_enable_fifo = 1;
64795+#endif
64796+#ifdef CONFIG_GRKERNSEC_EXECLOG
64797+ grsec_enable_execlog = 1;
64798+#endif
64799+#ifdef CONFIG_GRKERNSEC_SETXID
64800+ grsec_enable_setxid = 1;
64801+#endif
64802+#ifdef CONFIG_GRKERNSEC_SIGNAL
64803+ grsec_enable_signal = 1;
64804+#endif
64805+#ifdef CONFIG_GRKERNSEC_FORKFAIL
64806+ grsec_enable_forkfail = 1;
64807+#endif
64808+#ifdef CONFIG_GRKERNSEC_TIME
64809+ grsec_enable_time = 1;
64810+#endif
64811+#ifdef CONFIG_GRKERNSEC_RESLOG
64812+ grsec_resource_logging = 1;
64813+#endif
64814+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
64815+ grsec_enable_chroot_findtask = 1;
64816+#endif
64817+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
64818+ grsec_enable_chroot_unix = 1;
64819+#endif
64820+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
64821+ grsec_enable_chroot_mount = 1;
64822+#endif
64823+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
64824+ grsec_enable_chroot_fchdir = 1;
64825+#endif
64826+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
64827+ grsec_enable_chroot_shmat = 1;
64828+#endif
64829+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
64830+ grsec_enable_audit_ptrace = 1;
64831+#endif
64832+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
64833+ grsec_enable_chroot_double = 1;
64834+#endif
64835+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
64836+ grsec_enable_chroot_pivot = 1;
64837+#endif
64838+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
64839+ grsec_enable_chroot_chdir = 1;
64840+#endif
64841+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
64842+ grsec_enable_chroot_chmod = 1;
64843+#endif
64844+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
64845+ grsec_enable_chroot_mknod = 1;
64846+#endif
64847+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
64848+ grsec_enable_chroot_nice = 1;
64849+#endif
64850+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
64851+ grsec_enable_chroot_execlog = 1;
64852+#endif
64853+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
64854+ grsec_enable_chroot_caps = 1;
64855+#endif
64856+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
64857+ grsec_enable_chroot_sysctl = 1;
64858+#endif
64859+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
64860+ grsec_enable_symlinkown = 1;
64861+ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
64862+#endif
64863+#ifdef CONFIG_GRKERNSEC_TPE
64864+ grsec_enable_tpe = 1;
64865+ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
64866+#ifdef CONFIG_GRKERNSEC_TPE_ALL
64867+ grsec_enable_tpe_all = 1;
64868+#endif
64869+#endif
64870+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
64871+ grsec_enable_socket_all = 1;
64872+ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
64873+#endif
64874+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
64875+ grsec_enable_socket_client = 1;
64876+ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
64877+#endif
64878+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
64879+ grsec_enable_socket_server = 1;
64880+ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
64881+#endif
64882+#endif
64883+
64884+ return;
64885+}
64886diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
64887new file mode 100644
64888index 0000000..5e05e20
64889--- /dev/null
64890+++ b/grsecurity/grsec_link.c
64891@@ -0,0 +1,58 @@
64892+#include <linux/kernel.h>
64893+#include <linux/sched.h>
64894+#include <linux/fs.h>
64895+#include <linux/file.h>
64896+#include <linux/grinternal.h>
64897+
64898+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
64899+{
64900+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
64901+ const struct inode *link_inode = link->dentry->d_inode;
64902+
64903+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
64904+ /* ignore root-owned links, e.g. /proc/self */
64905+ gr_is_global_nonroot(link_inode->i_uid) && target &&
64906+ !uid_eq(link_inode->i_uid, target->i_uid)) {
64907+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
64908+ return 1;
64909+ }
64910+#endif
64911+ return 0;
64912+}
64913+
64914+int
64915+gr_handle_follow_link(const struct inode *parent,
64916+ const struct inode *inode,
64917+ const struct dentry *dentry, const struct vfsmount *mnt)
64918+{
64919+#ifdef CONFIG_GRKERNSEC_LINK
64920+ const struct cred *cred = current_cred();
64921+
64922+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
64923+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
64924+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
64925+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
64926+ return -EACCES;
64927+ }
64928+#endif
64929+ return 0;
64930+}
64931+
64932+int
64933+gr_handle_hardlink(const struct dentry *dentry,
64934+ const struct vfsmount *mnt,
64935+ struct inode *inode, const int mode, const struct filename *to)
64936+{
64937+#ifdef CONFIG_GRKERNSEC_LINK
64938+ const struct cred *cred = current_cred();
64939+
64940+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
64941+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
64942+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
64943+ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
64944+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
64945+ return -EPERM;
64946+ }
64947+#endif
64948+ return 0;
64949+}
64950diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
64951new file mode 100644
64952index 0000000..7c06085
64953--- /dev/null
64954+++ b/grsecurity/grsec_log.c
64955@@ -0,0 +1,326 @@
64956+#include <linux/kernel.h>
64957+#include <linux/sched.h>
64958+#include <linux/file.h>
64959+#include <linux/tty.h>
64960+#include <linux/fs.h>
64961+#include <linux/grinternal.h>
64962+
64963+#ifdef CONFIG_TREE_PREEMPT_RCU
64964+#define DISABLE_PREEMPT() preempt_disable()
64965+#define ENABLE_PREEMPT() preempt_enable()
64966+#else
64967+#define DISABLE_PREEMPT()
64968+#define ENABLE_PREEMPT()
64969+#endif
64970+
64971+#define BEGIN_LOCKS(x) \
64972+ DISABLE_PREEMPT(); \
64973+ rcu_read_lock(); \
64974+ read_lock(&tasklist_lock); \
64975+ read_lock(&grsec_exec_file_lock); \
64976+ if (x != GR_DO_AUDIT) \
64977+ spin_lock(&grsec_alert_lock); \
64978+ else \
64979+ spin_lock(&grsec_audit_lock)
64980+
64981+#define END_LOCKS(x) \
64982+ if (x != GR_DO_AUDIT) \
64983+ spin_unlock(&grsec_alert_lock); \
64984+ else \
64985+ spin_unlock(&grsec_audit_lock); \
64986+ read_unlock(&grsec_exec_file_lock); \
64987+ read_unlock(&tasklist_lock); \
64988+ rcu_read_unlock(); \
64989+ ENABLE_PREEMPT(); \
64990+ if (x == GR_DONT_AUDIT) \
64991+ gr_handle_alertkill(current)
64992+
64993+enum {
64994+ FLOODING,
64995+ NO_FLOODING
64996+};
64997+
64998+extern char *gr_alert_log_fmt;
64999+extern char *gr_audit_log_fmt;
65000+extern char *gr_alert_log_buf;
65001+extern char *gr_audit_log_buf;
65002+
65003+static int gr_log_start(int audit)
65004+{
65005+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
65006+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
65007+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
65008+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
65009+ unsigned long curr_secs = get_seconds();
65010+
65011+ if (audit == GR_DO_AUDIT)
65012+ goto set_fmt;
65013+
65014+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
65015+ grsec_alert_wtime = curr_secs;
65016+ grsec_alert_fyet = 0;
65017+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
65018+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
65019+ grsec_alert_fyet++;
65020+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
65021+ grsec_alert_wtime = curr_secs;
65022+ grsec_alert_fyet++;
65023+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
65024+ return FLOODING;
65025+ }
65026+ else return FLOODING;
65027+
65028+set_fmt:
65029+#endif
65030+ memset(buf, 0, PAGE_SIZE);
65031+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
65032+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
65033+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
65034+ } else if (current->signal->curr_ip) {
65035+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
65036+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
65037+ } else if (gr_acl_is_enabled()) {
65038+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
65039+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
65040+ } else {
65041+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
65042+ strcpy(buf, fmt);
65043+ }
65044+
65045+ return NO_FLOODING;
65046+}
65047+
65048+static void gr_log_middle(int audit, const char *msg, va_list ap)
65049+ __attribute__ ((format (printf, 2, 0)));
65050+
65051+static void gr_log_middle(int audit, const char *msg, va_list ap)
65052+{
65053+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
65054+ unsigned int len = strlen(buf);
65055+
65056+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
65057+
65058+ return;
65059+}
65060+
65061+static void gr_log_middle_varargs(int audit, const char *msg, ...)
65062+ __attribute__ ((format (printf, 2, 3)));
65063+
65064+static void gr_log_middle_varargs(int audit, const char *msg, ...)
65065+{
65066+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
65067+ unsigned int len = strlen(buf);
65068+ va_list ap;
65069+
65070+ va_start(ap, msg);
65071+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
65072+ va_end(ap);
65073+
65074+ return;
65075+}
65076+
65077+static void gr_log_end(int audit, int append_default)
65078+{
65079+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
65080+ if (append_default) {
65081+ struct task_struct *task = current;
65082+ struct task_struct *parent = task->real_parent;
65083+ const struct cred *cred = __task_cred(task);
65084+ const struct cred *pcred = __task_cred(parent);
65085+ unsigned int len = strlen(buf);
65086+
65087+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
65088+ }
65089+
65090+ printk("%s\n", buf);
65091+
65092+ return;
65093+}
65094+
65095+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
65096+{
65097+ int logtype;
65098+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
65099+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
65100+ void *voidptr = NULL;
65101+ int num1 = 0, num2 = 0;
65102+ unsigned long ulong1 = 0, ulong2 = 0;
65103+ struct dentry *dentry = NULL;
65104+ struct vfsmount *mnt = NULL;
65105+ struct file *file = NULL;
65106+ struct task_struct *task = NULL;
65107+ const struct cred *cred, *pcred;
65108+ va_list ap;
65109+
65110+ BEGIN_LOCKS(audit);
65111+ logtype = gr_log_start(audit);
65112+ if (logtype == FLOODING) {
65113+ END_LOCKS(audit);
65114+ return;
65115+ }
65116+ va_start(ap, argtypes);
65117+ switch (argtypes) {
65118+ case GR_TTYSNIFF:
65119+ task = va_arg(ap, struct task_struct *);
65120+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
65121+ break;
65122+ case GR_SYSCTL_HIDDEN:
65123+ str1 = va_arg(ap, char *);
65124+ gr_log_middle_varargs(audit, msg, result, str1);
65125+ break;
65126+ case GR_RBAC:
65127+ dentry = va_arg(ap, struct dentry *);
65128+ mnt = va_arg(ap, struct vfsmount *);
65129+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
65130+ break;
65131+ case GR_RBAC_STR:
65132+ dentry = va_arg(ap, struct dentry *);
65133+ mnt = va_arg(ap, struct vfsmount *);
65134+ str1 = va_arg(ap, char *);
65135+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
65136+ break;
65137+ case GR_STR_RBAC:
65138+ str1 = va_arg(ap, char *);
65139+ dentry = va_arg(ap, struct dentry *);
65140+ mnt = va_arg(ap, struct vfsmount *);
65141+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
65142+ break;
65143+ case GR_RBAC_MODE2:
65144+ dentry = va_arg(ap, struct dentry *);
65145+ mnt = va_arg(ap, struct vfsmount *);
65146+ str1 = va_arg(ap, char *);
65147+ str2 = va_arg(ap, char *);
65148+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
65149+ break;
65150+ case GR_RBAC_MODE3:
65151+ dentry = va_arg(ap, struct dentry *);
65152+ mnt = va_arg(ap, struct vfsmount *);
65153+ str1 = va_arg(ap, char *);
65154+ str2 = va_arg(ap, char *);
65155+ str3 = va_arg(ap, char *);
65156+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
65157+ break;
65158+ case GR_FILENAME:
65159+ dentry = va_arg(ap, struct dentry *);
65160+ mnt = va_arg(ap, struct vfsmount *);
65161+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
65162+ break;
65163+ case GR_STR_FILENAME:
65164+ str1 = va_arg(ap, char *);
65165+ dentry = va_arg(ap, struct dentry *);
65166+ mnt = va_arg(ap, struct vfsmount *);
65167+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
65168+ break;
65169+ case GR_FILENAME_STR:
65170+ dentry = va_arg(ap, struct dentry *);
65171+ mnt = va_arg(ap, struct vfsmount *);
65172+ str1 = va_arg(ap, char *);
65173+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
65174+ break;
65175+ case GR_FILENAME_TWO_INT:
65176+ dentry = va_arg(ap, struct dentry *);
65177+ mnt = va_arg(ap, struct vfsmount *);
65178+ num1 = va_arg(ap, int);
65179+ num2 = va_arg(ap, int);
65180+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
65181+ break;
65182+ case GR_FILENAME_TWO_INT_STR:
65183+ dentry = va_arg(ap, struct dentry *);
65184+ mnt = va_arg(ap, struct vfsmount *);
65185+ num1 = va_arg(ap, int);
65186+ num2 = va_arg(ap, int);
65187+ str1 = va_arg(ap, char *);
65188+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
65189+ break;
65190+ case GR_TEXTREL:
65191+ file = va_arg(ap, struct file *);
65192+ ulong1 = va_arg(ap, unsigned long);
65193+ ulong2 = va_arg(ap, unsigned long);
65194+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
65195+ break;
65196+ case GR_PTRACE:
65197+ task = va_arg(ap, struct task_struct *);
65198+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
65199+ break;
65200+ case GR_RESOURCE:
65201+ task = va_arg(ap, struct task_struct *);
65202+ cred = __task_cred(task);
65203+ pcred = __task_cred(task->real_parent);
65204+ ulong1 = va_arg(ap, unsigned long);
65205+ str1 = va_arg(ap, char *);
65206+ ulong2 = va_arg(ap, unsigned long);
65207+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
65208+ break;
65209+ case GR_CAP:
65210+ task = va_arg(ap, struct task_struct *);
65211+ cred = __task_cred(task);
65212+ pcred = __task_cred(task->real_parent);
65213+ str1 = va_arg(ap, char *);
65214+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
65215+ break;
65216+ case GR_SIG:
65217+ str1 = va_arg(ap, char *);
65218+ voidptr = va_arg(ap, void *);
65219+ gr_log_middle_varargs(audit, msg, str1, voidptr);
65220+ break;
65221+ case GR_SIG2:
65222+ task = va_arg(ap, struct task_struct *);
65223+ cred = __task_cred(task);
65224+ pcred = __task_cred(task->real_parent);
65225+ num1 = va_arg(ap, int);
65226+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
65227+ break;
65228+ case GR_CRASH1:
65229+ task = va_arg(ap, struct task_struct *);
65230+ cred = __task_cred(task);
65231+ pcred = __task_cred(task->real_parent);
65232+ ulong1 = va_arg(ap, unsigned long);
65233+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
65234+ break;
65235+ case GR_CRASH2:
65236+ task = va_arg(ap, struct task_struct *);
65237+ cred = __task_cred(task);
65238+ pcred = __task_cred(task->real_parent);
65239+ ulong1 = va_arg(ap, unsigned long);
65240+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
65241+ break;
65242+ case GR_RWXMAP:
65243+ file = va_arg(ap, struct file *);
65244+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
65245+ break;
65246+ case GR_PSACCT:
65247+ {
65248+ unsigned int wday, cday;
65249+ __u8 whr, chr;
65250+ __u8 wmin, cmin;
65251+ __u8 wsec, csec;
65252+ char cur_tty[64] = { 0 };
65253+ char parent_tty[64] = { 0 };
65254+
65255+ task = va_arg(ap, struct task_struct *);
65256+ wday = va_arg(ap, unsigned int);
65257+ cday = va_arg(ap, unsigned int);
65258+ whr = va_arg(ap, int);
65259+ chr = va_arg(ap, int);
65260+ wmin = va_arg(ap, int);
65261+ cmin = va_arg(ap, int);
65262+ wsec = va_arg(ap, int);
65263+ csec = va_arg(ap, int);
65264+ ulong1 = va_arg(ap, unsigned long);
65265+ cred = __task_cred(task);
65266+ pcred = __task_cred(task->real_parent);
65267+
65268+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
65269+ }
65270+ break;
65271+ default:
65272+ gr_log_middle(audit, msg, ap);
65273+ }
65274+ va_end(ap);
65275+ // these don't need DEFAULTSECARGS printed on the end
65276+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
65277+ gr_log_end(audit, 0);
65278+ else
65279+ gr_log_end(audit, 1);
65280+ END_LOCKS(audit);
65281+}
65282diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
65283new file mode 100644
65284index 0000000..f536303
65285--- /dev/null
65286+++ b/grsecurity/grsec_mem.c
65287@@ -0,0 +1,40 @@
65288+#include <linux/kernel.h>
65289+#include <linux/sched.h>
65290+#include <linux/mm.h>
65291+#include <linux/mman.h>
65292+#include <linux/grinternal.h>
65293+
65294+void
65295+gr_handle_ioperm(void)
65296+{
65297+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
65298+ return;
65299+}
65300+
65301+void
65302+gr_handle_iopl(void)
65303+{
65304+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
65305+ return;
65306+}
65307+
65308+void
65309+gr_handle_mem_readwrite(u64 from, u64 to)
65310+{
65311+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
65312+ return;
65313+}
65314+
65315+void
65316+gr_handle_vm86(void)
65317+{
65318+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
65319+ return;
65320+}
65321+
65322+void
65323+gr_log_badprocpid(const char *entry)
65324+{
65325+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
65326+ return;
65327+}
65328diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
65329new file mode 100644
65330index 0000000..2131422
65331--- /dev/null
65332+++ b/grsecurity/grsec_mount.c
65333@@ -0,0 +1,62 @@
65334+#include <linux/kernel.h>
65335+#include <linux/sched.h>
65336+#include <linux/mount.h>
65337+#include <linux/grsecurity.h>
65338+#include <linux/grinternal.h>
65339+
65340+void
65341+gr_log_remount(const char *devname, const int retval)
65342+{
65343+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
65344+ if (grsec_enable_mount && (retval >= 0))
65345+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
65346+#endif
65347+ return;
65348+}
65349+
65350+void
65351+gr_log_unmount(const char *devname, const int retval)
65352+{
65353+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
65354+ if (grsec_enable_mount && (retval >= 0))
65355+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
65356+#endif
65357+ return;
65358+}
65359+
65360+void
65361+gr_log_mount(const char *from, const char *to, const int retval)
65362+{
65363+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
65364+ if (grsec_enable_mount && (retval >= 0))
65365+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
65366+#endif
65367+ return;
65368+}
65369+
65370+int
65371+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
65372+{
65373+#ifdef CONFIG_GRKERNSEC_ROFS
65374+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
65375+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
65376+ return -EPERM;
65377+ } else
65378+ return 0;
65379+#endif
65380+ return 0;
65381+}
65382+
65383+int
65384+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
65385+{
65386+#ifdef CONFIG_GRKERNSEC_ROFS
65387+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
65388+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
65389+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
65390+ return -EPERM;
65391+ } else
65392+ return 0;
65393+#endif
65394+ return 0;
65395+}
65396diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
65397new file mode 100644
65398index 0000000..a3b12a0
65399--- /dev/null
65400+++ b/grsecurity/grsec_pax.c
65401@@ -0,0 +1,36 @@
65402+#include <linux/kernel.h>
65403+#include <linux/sched.h>
65404+#include <linux/mm.h>
65405+#include <linux/file.h>
65406+#include <linux/grinternal.h>
65407+#include <linux/grsecurity.h>
65408+
65409+void
65410+gr_log_textrel(struct vm_area_struct * vma)
65411+{
65412+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
65413+ if (grsec_enable_audit_textrel)
65414+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
65415+#endif
65416+ return;
65417+}
65418+
65419+void
65420+gr_log_rwxmmap(struct file *file)
65421+{
65422+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
65423+ if (grsec_enable_log_rwxmaps)
65424+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
65425+#endif
65426+ return;
65427+}
65428+
65429+void
65430+gr_log_rwxmprotect(struct file *file)
65431+{
65432+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
65433+ if (grsec_enable_log_rwxmaps)
65434+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
65435+#endif
65436+ return;
65437+}
65438diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
65439new file mode 100644
65440index 0000000..f7f29aa
65441--- /dev/null
65442+++ b/grsecurity/grsec_ptrace.c
65443@@ -0,0 +1,30 @@
65444+#include <linux/kernel.h>
65445+#include <linux/sched.h>
65446+#include <linux/grinternal.h>
65447+#include <linux/security.h>
65448+
65449+void
65450+gr_audit_ptrace(struct task_struct *task)
65451+{
65452+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
65453+ if (grsec_enable_audit_ptrace)
65454+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
65455+#endif
65456+ return;
65457+}
65458+
65459+int
65460+gr_ptrace_readexec(struct file *file, int unsafe_flags)
65461+{
65462+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
65463+ const struct dentry *dentry = file->f_path.dentry;
65464+ const struct vfsmount *mnt = file->f_path.mnt;
65465+
65466+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
65467+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
65468+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
65469+ return -EACCES;
65470+ }
65471+#endif
65472+ return 0;
65473+}
65474diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
65475new file mode 100644
65476index 0000000..e09715a
65477--- /dev/null
65478+++ b/grsecurity/grsec_sig.c
65479@@ -0,0 +1,222 @@
65480+#include <linux/kernel.h>
65481+#include <linux/sched.h>
65482+#include <linux/delay.h>
65483+#include <linux/grsecurity.h>
65484+#include <linux/grinternal.h>
65485+#include <linux/hardirq.h>
65486+
65487+char *signames[] = {
65488+ [SIGSEGV] = "Segmentation fault",
65489+ [SIGILL] = "Illegal instruction",
65490+ [SIGABRT] = "Abort",
65491+ [SIGBUS] = "Invalid alignment/Bus error"
65492+};
65493+
65494+void
65495+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
65496+{
65497+#ifdef CONFIG_GRKERNSEC_SIGNAL
65498+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
65499+ (sig == SIGABRT) || (sig == SIGBUS))) {
65500+ if (task_pid_nr(t) == task_pid_nr(current)) {
65501+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
65502+ } else {
65503+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
65504+ }
65505+ }
65506+#endif
65507+ return;
65508+}
65509+
65510+int
65511+gr_handle_signal(const struct task_struct *p, const int sig)
65512+{
65513+#ifdef CONFIG_GRKERNSEC
65514+ /* ignore the 0 signal for protected task checks */
65515+ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
65516+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
65517+ return -EPERM;
65518+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
65519+ return -EPERM;
65520+ }
65521+#endif
65522+ return 0;
65523+}
65524+
65525+#ifdef CONFIG_GRKERNSEC
65526+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
65527+
65528+int gr_fake_force_sig(int sig, struct task_struct *t)
65529+{
65530+ unsigned long int flags;
65531+ int ret, blocked, ignored;
65532+ struct k_sigaction *action;
65533+
65534+ spin_lock_irqsave(&t->sighand->siglock, flags);
65535+ action = &t->sighand->action[sig-1];
65536+ ignored = action->sa.sa_handler == SIG_IGN;
65537+ blocked = sigismember(&t->blocked, sig);
65538+ if (blocked || ignored) {
65539+ action->sa.sa_handler = SIG_DFL;
65540+ if (blocked) {
65541+ sigdelset(&t->blocked, sig);
65542+ recalc_sigpending_and_wake(t);
65543+ }
65544+ }
65545+ if (action->sa.sa_handler == SIG_DFL)
65546+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
65547+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
65548+
65549+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
65550+
65551+ return ret;
65552+}
65553+#endif
65554+
65555+#ifdef CONFIG_GRKERNSEC_BRUTE
65556+#define GR_USER_BAN_TIME (15 * 60)
65557+#define GR_DAEMON_BRUTE_TIME (30 * 60)
65558+
65559+static int __get_dumpable(unsigned long mm_flags)
65560+{
65561+ int ret;
65562+
65563+ ret = mm_flags & MMF_DUMPABLE_MASK;
65564+ return (ret >= 2) ? 2 : ret;
65565+}
65566+#endif
65567+
65568+void gr_handle_brute_attach(unsigned long mm_flags)
65569+{
65570+#ifdef CONFIG_GRKERNSEC_BRUTE
65571+ struct task_struct *p = current;
65572+ kuid_t uid = GLOBAL_ROOT_UID;
65573+ int daemon = 0;
65574+
65575+ if (!grsec_enable_brute)
65576+ return;
65577+
65578+ rcu_read_lock();
65579+ read_lock(&tasklist_lock);
65580+ read_lock(&grsec_exec_file_lock);
65581+ if (p->real_parent && p->real_parent->exec_file == p->exec_file) {
65582+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
65583+ p->real_parent->brute = 1;
65584+ daemon = 1;
65585+ } else {
65586+ const struct cred *cred = __task_cred(p), *cred2;
65587+ struct task_struct *tsk, *tsk2;
65588+
65589+ if (!__get_dumpable(mm_flags) && gr_is_global_nonroot(cred->uid)) {
65590+ struct user_struct *user;
65591+
65592+ uid = cred->uid;
65593+
65594+ /* this is put upon execution past expiration */
65595+ user = find_user(uid);
65596+ if (user == NULL)
65597+ goto unlock;
65598+ user->banned = 1;
65599+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
65600+ if (user->ban_expires == ~0UL)
65601+ user->ban_expires--;
65602+
65603+ do_each_thread(tsk2, tsk) {
65604+ cred2 = __task_cred(tsk);
65605+ if (tsk != p && uid_eq(cred2->uid, uid))
65606+ gr_fake_force_sig(SIGKILL, tsk);
65607+ } while_each_thread(tsk2, tsk);
65608+ }
65609+ }
65610+unlock:
65611+ read_unlock(&grsec_exec_file_lock);
65612+ read_unlock(&tasklist_lock);
65613+ rcu_read_unlock();
65614+
65615+ if (gr_is_global_nonroot(uid))
65616+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n",
65617+ GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
65618+ else if (daemon)
65619+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
65620+
65621+#endif
65622+ return;
65623+}
65624+
65625+void gr_handle_brute_check(void)
65626+{
65627+#ifdef CONFIG_GRKERNSEC_BRUTE
65628+ struct task_struct *p = current;
65629+
65630+ if (unlikely(p->brute)) {
65631+ if (!grsec_enable_brute)
65632+ p->brute = 0;
65633+ else if (time_before(get_seconds(), p->brute_expires))
65634+ msleep(30 * 1000);
65635+ }
65636+#endif
65637+ return;
65638+}
65639+
65640+void gr_handle_kernel_exploit(void)
65641+{
65642+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
65643+ const struct cred *cred;
65644+ struct task_struct *tsk, *tsk2;
65645+ struct user_struct *user;
65646+ kuid_t uid;
65647+
65648+ if (in_irq() || in_serving_softirq() || in_nmi())
65649+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
65650+
65651+ uid = current_uid();
65652+
65653+ if (gr_is_global_root(uid))
65654+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
65655+ else {
65656+ /* kill all the processes of this user, hold a reference
65657+ to their creds struct, and prevent them from creating
65658+ another process until system reset
65659+ */
65660+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
65661+ GR_GLOBAL_UID(uid));
65662+ /* we intentionally leak this ref */
65663+ user = get_uid(current->cred->user);
65664+ if (user) {
65665+ user->banned = 1;
65666+ user->ban_expires = ~0UL;
65667+ }
65668+
65669+ read_lock(&tasklist_lock);
65670+ do_each_thread(tsk2, tsk) {
65671+ cred = __task_cred(tsk);
65672+ if (uid_eq(cred->uid, uid))
65673+ gr_fake_force_sig(SIGKILL, tsk);
65674+ } while_each_thread(tsk2, tsk);
65675+ read_unlock(&tasklist_lock);
65676+ }
65677+#endif
65678+}
65679+
65680+int __gr_process_user_ban(struct user_struct *user)
65681+{
65682+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
65683+ if (unlikely(user->banned)) {
65684+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
65685+ user->banned = 0;
65686+ user->ban_expires = 0;
65687+ free_uid(user);
65688+ } else
65689+ return -EPERM;
65690+ }
65691+#endif
65692+ return 0;
65693+}
65694+
65695+int gr_process_user_ban(void)
65696+{
65697+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
65698+ return __gr_process_user_ban(current->cred->user);
65699+#endif
65700+ return 0;
65701+}
65702diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
65703new file mode 100644
65704index 0000000..4030d57
65705--- /dev/null
65706+++ b/grsecurity/grsec_sock.c
65707@@ -0,0 +1,244 @@
65708+#include <linux/kernel.h>
65709+#include <linux/module.h>
65710+#include <linux/sched.h>
65711+#include <linux/file.h>
65712+#include <linux/net.h>
65713+#include <linux/in.h>
65714+#include <linux/ip.h>
65715+#include <net/sock.h>
65716+#include <net/inet_sock.h>
65717+#include <linux/grsecurity.h>
65718+#include <linux/grinternal.h>
65719+#include <linux/gracl.h>
65720+
65721+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
65722+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
65723+
65724+EXPORT_SYMBOL(gr_search_udp_recvmsg);
65725+EXPORT_SYMBOL(gr_search_udp_sendmsg);
65726+
65727+#ifdef CONFIG_UNIX_MODULE
65728+EXPORT_SYMBOL(gr_acl_handle_unix);
65729+EXPORT_SYMBOL(gr_acl_handle_mknod);
65730+EXPORT_SYMBOL(gr_handle_chroot_unix);
65731+EXPORT_SYMBOL(gr_handle_create);
65732+#endif
65733+
65734+#ifdef CONFIG_GRKERNSEC
65735+#define gr_conn_table_size 32749
65736+struct conn_table_entry {
65737+ struct conn_table_entry *next;
65738+ struct signal_struct *sig;
65739+};
65740+
65741+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
65742+DEFINE_SPINLOCK(gr_conn_table_lock);
65743+
65744+extern const char * gr_socktype_to_name(unsigned char type);
65745+extern const char * gr_proto_to_name(unsigned char proto);
65746+extern const char * gr_sockfamily_to_name(unsigned char family);
65747+
65748+static __inline__ int
65749+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
65750+{
65751+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
65752+}
65753+
65754+static __inline__ int
65755+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
65756+ __u16 sport, __u16 dport)
65757+{
65758+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
65759+ sig->gr_sport == sport && sig->gr_dport == dport))
65760+ return 1;
65761+ else
65762+ return 0;
65763+}
65764+
65765+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
65766+{
65767+ struct conn_table_entry **match;
65768+ unsigned int index;
65769+
65770+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
65771+ sig->gr_sport, sig->gr_dport,
65772+ gr_conn_table_size);
65773+
65774+ newent->sig = sig;
65775+
65776+ match = &gr_conn_table[index];
65777+ newent->next = *match;
65778+ *match = newent;
65779+
65780+ return;
65781+}
65782+
65783+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
65784+{
65785+ struct conn_table_entry *match, *last = NULL;
65786+ unsigned int index;
65787+
65788+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
65789+ sig->gr_sport, sig->gr_dport,
65790+ gr_conn_table_size);
65791+
65792+ match = gr_conn_table[index];
65793+ while (match && !conn_match(match->sig,
65794+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
65795+ sig->gr_dport)) {
65796+ last = match;
65797+ match = match->next;
65798+ }
65799+
65800+ if (match) {
65801+ if (last)
65802+ last->next = match->next;
65803+ else
65804+ gr_conn_table[index] = NULL;
65805+ kfree(match);
65806+ }
65807+
65808+ return;
65809+}
65810+
65811+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
65812+ __u16 sport, __u16 dport)
65813+{
65814+ struct conn_table_entry *match;
65815+ unsigned int index;
65816+
65817+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
65818+
65819+ match = gr_conn_table[index];
65820+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
65821+ match = match->next;
65822+
65823+ if (match)
65824+ return match->sig;
65825+ else
65826+ return NULL;
65827+}
65828+
65829+#endif
65830+
65831+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
65832+{
65833+#ifdef CONFIG_GRKERNSEC
65834+ struct signal_struct *sig = task->signal;
65835+ struct conn_table_entry *newent;
65836+
65837+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
65838+ if (newent == NULL)
65839+ return;
65840+ /* no bh lock needed since we are called with bh disabled */
65841+ spin_lock(&gr_conn_table_lock);
65842+ gr_del_task_from_ip_table_nolock(sig);
65843+ sig->gr_saddr = inet->inet_rcv_saddr;
65844+ sig->gr_daddr = inet->inet_daddr;
65845+ sig->gr_sport = inet->inet_sport;
65846+ sig->gr_dport = inet->inet_dport;
65847+ gr_add_to_task_ip_table_nolock(sig, newent);
65848+ spin_unlock(&gr_conn_table_lock);
65849+#endif
65850+ return;
65851+}
65852+
65853+void gr_del_task_from_ip_table(struct task_struct *task)
65854+{
65855+#ifdef CONFIG_GRKERNSEC
65856+ spin_lock_bh(&gr_conn_table_lock);
65857+ gr_del_task_from_ip_table_nolock(task->signal);
65858+ spin_unlock_bh(&gr_conn_table_lock);
65859+#endif
65860+ return;
65861+}
65862+
65863+void
65864+gr_attach_curr_ip(const struct sock *sk)
65865+{
65866+#ifdef CONFIG_GRKERNSEC
65867+ struct signal_struct *p, *set;
65868+ const struct inet_sock *inet = inet_sk(sk);
65869+
65870+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
65871+ return;
65872+
65873+ set = current->signal;
65874+
65875+ spin_lock_bh(&gr_conn_table_lock);
65876+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
65877+ inet->inet_dport, inet->inet_sport);
65878+ if (unlikely(p != NULL)) {
65879+ set->curr_ip = p->curr_ip;
65880+ set->used_accept = 1;
65881+ gr_del_task_from_ip_table_nolock(p);
65882+ spin_unlock_bh(&gr_conn_table_lock);
65883+ return;
65884+ }
65885+ spin_unlock_bh(&gr_conn_table_lock);
65886+
65887+ set->curr_ip = inet->inet_daddr;
65888+ set->used_accept = 1;
65889+#endif
65890+ return;
65891+}
65892+
65893+int
65894+gr_handle_sock_all(const int family, const int type, const int protocol)
65895+{
65896+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
65897+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
65898+ (family != AF_UNIX)) {
65899+ if (family == AF_INET)
65900+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
65901+ else
65902+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
65903+ return -EACCES;
65904+ }
65905+#endif
65906+ return 0;
65907+}
65908+
65909+int
65910+gr_handle_sock_server(const struct sockaddr *sck)
65911+{
65912+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
65913+ if (grsec_enable_socket_server &&
65914+ in_group_p(grsec_socket_server_gid) &&
65915+ sck && (sck->sa_family != AF_UNIX) &&
65916+ (sck->sa_family != AF_LOCAL)) {
65917+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
65918+ return -EACCES;
65919+ }
65920+#endif
65921+ return 0;
65922+}
65923+
65924+int
65925+gr_handle_sock_server_other(const struct sock *sck)
65926+{
65927+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
65928+ if (grsec_enable_socket_server &&
65929+ in_group_p(grsec_socket_server_gid) &&
65930+ sck && (sck->sk_family != AF_UNIX) &&
65931+ (sck->sk_family != AF_LOCAL)) {
65932+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
65933+ return -EACCES;
65934+ }
65935+#endif
65936+ return 0;
65937+}
65938+
65939+int
65940+gr_handle_sock_client(const struct sockaddr *sck)
65941+{
65942+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
65943+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
65944+ sck && (sck->sa_family != AF_UNIX) &&
65945+ (sck->sa_family != AF_LOCAL)) {
65946+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
65947+ return -EACCES;
65948+ }
65949+#endif
65950+ return 0;
65951+}
65952diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
65953new file mode 100644
65954index 0000000..f55ef0f
65955--- /dev/null
65956+++ b/grsecurity/grsec_sysctl.c
65957@@ -0,0 +1,469 @@
65958+#include <linux/kernel.h>
65959+#include <linux/sched.h>
65960+#include <linux/sysctl.h>
65961+#include <linux/grsecurity.h>
65962+#include <linux/grinternal.h>
65963+
65964+int
65965+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
65966+{
65967+#ifdef CONFIG_GRKERNSEC_SYSCTL
65968+ if (dirname == NULL || name == NULL)
65969+ return 0;
65970+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
65971+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
65972+ return -EACCES;
65973+ }
65974+#endif
65975+ return 0;
65976+}
65977+
65978+#ifdef CONFIG_GRKERNSEC_ROFS
65979+static int __maybe_unused one = 1;
65980+#endif
65981+
65982+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
65983+struct ctl_table grsecurity_table[] = {
65984+#ifdef CONFIG_GRKERNSEC_SYSCTL
65985+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
65986+#ifdef CONFIG_GRKERNSEC_IO
65987+ {
65988+ .procname = "disable_priv_io",
65989+ .data = &grsec_disable_privio,
65990+ .maxlen = sizeof(int),
65991+ .mode = 0600,
65992+ .proc_handler = &proc_dointvec,
65993+ },
65994+#endif
65995+#endif
65996+#ifdef CONFIG_GRKERNSEC_LINK
65997+ {
65998+ .procname = "linking_restrictions",
65999+ .data = &grsec_enable_link,
66000+ .maxlen = sizeof(int),
66001+ .mode = 0600,
66002+ .proc_handler = &proc_dointvec,
66003+ },
66004+#endif
66005+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
66006+ {
66007+ .procname = "enforce_symlinksifowner",
66008+ .data = &grsec_enable_symlinkown,
66009+ .maxlen = sizeof(int),
66010+ .mode = 0600,
66011+ .proc_handler = &proc_dointvec,
66012+ },
66013+ {
66014+ .procname = "symlinkown_gid",
66015+ .data = &grsec_symlinkown_gid,
66016+ .maxlen = sizeof(int),
66017+ .mode = 0600,
66018+ .proc_handler = &proc_dointvec,
66019+ },
66020+#endif
66021+#ifdef CONFIG_GRKERNSEC_BRUTE
66022+ {
66023+ .procname = "deter_bruteforce",
66024+ .data = &grsec_enable_brute,
66025+ .maxlen = sizeof(int),
66026+ .mode = 0600,
66027+ .proc_handler = &proc_dointvec,
66028+ },
66029+#endif
66030+#ifdef CONFIG_GRKERNSEC_FIFO
66031+ {
66032+ .procname = "fifo_restrictions",
66033+ .data = &grsec_enable_fifo,
66034+ .maxlen = sizeof(int),
66035+ .mode = 0600,
66036+ .proc_handler = &proc_dointvec,
66037+ },
66038+#endif
66039+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
66040+ {
66041+ .procname = "ptrace_readexec",
66042+ .data = &grsec_enable_ptrace_readexec,
66043+ .maxlen = sizeof(int),
66044+ .mode = 0600,
66045+ .proc_handler = &proc_dointvec,
66046+ },
66047+#endif
66048+#ifdef CONFIG_GRKERNSEC_SETXID
66049+ {
66050+ .procname = "consistent_setxid",
66051+ .data = &grsec_enable_setxid,
66052+ .maxlen = sizeof(int),
66053+ .mode = 0600,
66054+ .proc_handler = &proc_dointvec,
66055+ },
66056+#endif
66057+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66058+ {
66059+ .procname = "ip_blackhole",
66060+ .data = &grsec_enable_blackhole,
66061+ .maxlen = sizeof(int),
66062+ .mode = 0600,
66063+ .proc_handler = &proc_dointvec,
66064+ },
66065+ {
66066+ .procname = "lastack_retries",
66067+ .data = &grsec_lastack_retries,
66068+ .maxlen = sizeof(int),
66069+ .mode = 0600,
66070+ .proc_handler = &proc_dointvec,
66071+ },
66072+#endif
66073+#ifdef CONFIG_GRKERNSEC_EXECLOG
66074+ {
66075+ .procname = "exec_logging",
66076+ .data = &grsec_enable_execlog,
66077+ .maxlen = sizeof(int),
66078+ .mode = 0600,
66079+ .proc_handler = &proc_dointvec,
66080+ },
66081+#endif
66082+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
66083+ {
66084+ .procname = "rwxmap_logging",
66085+ .data = &grsec_enable_log_rwxmaps,
66086+ .maxlen = sizeof(int),
66087+ .mode = 0600,
66088+ .proc_handler = &proc_dointvec,
66089+ },
66090+#endif
66091+#ifdef CONFIG_GRKERNSEC_SIGNAL
66092+ {
66093+ .procname = "signal_logging",
66094+ .data = &grsec_enable_signal,
66095+ .maxlen = sizeof(int),
66096+ .mode = 0600,
66097+ .proc_handler = &proc_dointvec,
66098+ },
66099+#endif
66100+#ifdef CONFIG_GRKERNSEC_FORKFAIL
66101+ {
66102+ .procname = "forkfail_logging",
66103+ .data = &grsec_enable_forkfail,
66104+ .maxlen = sizeof(int),
66105+ .mode = 0600,
66106+ .proc_handler = &proc_dointvec,
66107+ },
66108+#endif
66109+#ifdef CONFIG_GRKERNSEC_TIME
66110+ {
66111+ .procname = "timechange_logging",
66112+ .data = &grsec_enable_time,
66113+ .maxlen = sizeof(int),
66114+ .mode = 0600,
66115+ .proc_handler = &proc_dointvec,
66116+ },
66117+#endif
66118+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
66119+ {
66120+ .procname = "chroot_deny_shmat",
66121+ .data = &grsec_enable_chroot_shmat,
66122+ .maxlen = sizeof(int),
66123+ .mode = 0600,
66124+ .proc_handler = &proc_dointvec,
66125+ },
66126+#endif
66127+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
66128+ {
66129+ .procname = "chroot_deny_unix",
66130+ .data = &grsec_enable_chroot_unix,
66131+ .maxlen = sizeof(int),
66132+ .mode = 0600,
66133+ .proc_handler = &proc_dointvec,
66134+ },
66135+#endif
66136+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
66137+ {
66138+ .procname = "chroot_deny_mount",
66139+ .data = &grsec_enable_chroot_mount,
66140+ .maxlen = sizeof(int),
66141+ .mode = 0600,
66142+ .proc_handler = &proc_dointvec,
66143+ },
66144+#endif
66145+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
66146+ {
66147+ .procname = "chroot_deny_fchdir",
66148+ .data = &grsec_enable_chroot_fchdir,
66149+ .maxlen = sizeof(int),
66150+ .mode = 0600,
66151+ .proc_handler = &proc_dointvec,
66152+ },
66153+#endif
66154+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
66155+ {
66156+ .procname = "chroot_deny_chroot",
66157+ .data = &grsec_enable_chroot_double,
66158+ .maxlen = sizeof(int),
66159+ .mode = 0600,
66160+ .proc_handler = &proc_dointvec,
66161+ },
66162+#endif
66163+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
66164+ {
66165+ .procname = "chroot_deny_pivot",
66166+ .data = &grsec_enable_chroot_pivot,
66167+ .maxlen = sizeof(int),
66168+ .mode = 0600,
66169+ .proc_handler = &proc_dointvec,
66170+ },
66171+#endif
66172+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
66173+ {
66174+ .procname = "chroot_enforce_chdir",
66175+ .data = &grsec_enable_chroot_chdir,
66176+ .maxlen = sizeof(int),
66177+ .mode = 0600,
66178+ .proc_handler = &proc_dointvec,
66179+ },
66180+#endif
66181+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
66182+ {
66183+ .procname = "chroot_deny_chmod",
66184+ .data = &grsec_enable_chroot_chmod,
66185+ .maxlen = sizeof(int),
66186+ .mode = 0600,
66187+ .proc_handler = &proc_dointvec,
66188+ },
66189+#endif
66190+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
66191+ {
66192+ .procname = "chroot_deny_mknod",
66193+ .data = &grsec_enable_chroot_mknod,
66194+ .maxlen = sizeof(int),
66195+ .mode = 0600,
66196+ .proc_handler = &proc_dointvec,
66197+ },
66198+#endif
66199+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
66200+ {
66201+ .procname = "chroot_restrict_nice",
66202+ .data = &grsec_enable_chroot_nice,
66203+ .maxlen = sizeof(int),
66204+ .mode = 0600,
66205+ .proc_handler = &proc_dointvec,
66206+ },
66207+#endif
66208+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
66209+ {
66210+ .procname = "chroot_execlog",
66211+ .data = &grsec_enable_chroot_execlog,
66212+ .maxlen = sizeof(int),
66213+ .mode = 0600,
66214+ .proc_handler = &proc_dointvec,
66215+ },
66216+#endif
66217+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
66218+ {
66219+ .procname = "chroot_caps",
66220+ .data = &grsec_enable_chroot_caps,
66221+ .maxlen = sizeof(int),
66222+ .mode = 0600,
66223+ .proc_handler = &proc_dointvec,
66224+ },
66225+#endif
66226+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
66227+ {
66228+ .procname = "chroot_deny_sysctl",
66229+ .data = &grsec_enable_chroot_sysctl,
66230+ .maxlen = sizeof(int),
66231+ .mode = 0600,
66232+ .proc_handler = &proc_dointvec,
66233+ },
66234+#endif
66235+#ifdef CONFIG_GRKERNSEC_TPE
66236+ {
66237+ .procname = "tpe",
66238+ .data = &grsec_enable_tpe,
66239+ .maxlen = sizeof(int),
66240+ .mode = 0600,
66241+ .proc_handler = &proc_dointvec,
66242+ },
66243+ {
66244+ .procname = "tpe_gid",
66245+ .data = &grsec_tpe_gid,
66246+ .maxlen = sizeof(int),
66247+ .mode = 0600,
66248+ .proc_handler = &proc_dointvec,
66249+ },
66250+#endif
66251+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
66252+ {
66253+ .procname = "tpe_invert",
66254+ .data = &grsec_enable_tpe_invert,
66255+ .maxlen = sizeof(int),
66256+ .mode = 0600,
66257+ .proc_handler = &proc_dointvec,
66258+ },
66259+#endif
66260+#ifdef CONFIG_GRKERNSEC_TPE_ALL
66261+ {
66262+ .procname = "tpe_restrict_all",
66263+ .data = &grsec_enable_tpe_all,
66264+ .maxlen = sizeof(int),
66265+ .mode = 0600,
66266+ .proc_handler = &proc_dointvec,
66267+ },
66268+#endif
66269+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
66270+ {
66271+ .procname = "socket_all",
66272+ .data = &grsec_enable_socket_all,
66273+ .maxlen = sizeof(int),
66274+ .mode = 0600,
66275+ .proc_handler = &proc_dointvec,
66276+ },
66277+ {
66278+ .procname = "socket_all_gid",
66279+ .data = &grsec_socket_all_gid,
66280+ .maxlen = sizeof(int),
66281+ .mode = 0600,
66282+ .proc_handler = &proc_dointvec,
66283+ },
66284+#endif
66285+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
66286+ {
66287+ .procname = "socket_client",
66288+ .data = &grsec_enable_socket_client,
66289+ .maxlen = sizeof(int),
66290+ .mode = 0600,
66291+ .proc_handler = &proc_dointvec,
66292+ },
66293+ {
66294+ .procname = "socket_client_gid",
66295+ .data = &grsec_socket_client_gid,
66296+ .maxlen = sizeof(int),
66297+ .mode = 0600,
66298+ .proc_handler = &proc_dointvec,
66299+ },
66300+#endif
66301+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
66302+ {
66303+ .procname = "socket_server",
66304+ .data = &grsec_enable_socket_server,
66305+ .maxlen = sizeof(int),
66306+ .mode = 0600,
66307+ .proc_handler = &proc_dointvec,
66308+ },
66309+ {
66310+ .procname = "socket_server_gid",
66311+ .data = &grsec_socket_server_gid,
66312+ .maxlen = sizeof(int),
66313+ .mode = 0600,
66314+ .proc_handler = &proc_dointvec,
66315+ },
66316+#endif
66317+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
66318+ {
66319+ .procname = "audit_group",
66320+ .data = &grsec_enable_group,
66321+ .maxlen = sizeof(int),
66322+ .mode = 0600,
66323+ .proc_handler = &proc_dointvec,
66324+ },
66325+ {
66326+ .procname = "audit_gid",
66327+ .data = &grsec_audit_gid,
66328+ .maxlen = sizeof(int),
66329+ .mode = 0600,
66330+ .proc_handler = &proc_dointvec,
66331+ },
66332+#endif
66333+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
66334+ {
66335+ .procname = "audit_chdir",
66336+ .data = &grsec_enable_chdir,
66337+ .maxlen = sizeof(int),
66338+ .mode = 0600,
66339+ .proc_handler = &proc_dointvec,
66340+ },
66341+#endif
66342+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
66343+ {
66344+ .procname = "audit_mount",
66345+ .data = &grsec_enable_mount,
66346+ .maxlen = sizeof(int),
66347+ .mode = 0600,
66348+ .proc_handler = &proc_dointvec,
66349+ },
66350+#endif
66351+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
66352+ {
66353+ .procname = "audit_textrel",
66354+ .data = &grsec_enable_audit_textrel,
66355+ .maxlen = sizeof(int),
66356+ .mode = 0600,
66357+ .proc_handler = &proc_dointvec,
66358+ },
66359+#endif
66360+#ifdef CONFIG_GRKERNSEC_DMESG
66361+ {
66362+ .procname = "dmesg",
66363+ .data = &grsec_enable_dmesg,
66364+ .maxlen = sizeof(int),
66365+ .mode = 0600,
66366+ .proc_handler = &proc_dointvec,
66367+ },
66368+#endif
66369+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
66370+ {
66371+ .procname = "chroot_findtask",
66372+ .data = &grsec_enable_chroot_findtask,
66373+ .maxlen = sizeof(int),
66374+ .mode = 0600,
66375+ .proc_handler = &proc_dointvec,
66376+ },
66377+#endif
66378+#ifdef CONFIG_GRKERNSEC_RESLOG
66379+ {
66380+ .procname = "resource_logging",
66381+ .data = &grsec_resource_logging,
66382+ .maxlen = sizeof(int),
66383+ .mode = 0600,
66384+ .proc_handler = &proc_dointvec,
66385+ },
66386+#endif
66387+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
66388+ {
66389+ .procname = "audit_ptrace",
66390+ .data = &grsec_enable_audit_ptrace,
66391+ .maxlen = sizeof(int),
66392+ .mode = 0600,
66393+ .proc_handler = &proc_dointvec,
66394+ },
66395+#endif
66396+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
66397+ {
66398+ .procname = "harden_ptrace",
66399+ .data = &grsec_enable_harden_ptrace,
66400+ .maxlen = sizeof(int),
66401+ .mode = 0600,
66402+ .proc_handler = &proc_dointvec,
66403+ },
66404+#endif
66405+ {
66406+ .procname = "grsec_lock",
66407+ .data = &grsec_lock,
66408+ .maxlen = sizeof(int),
66409+ .mode = 0600,
66410+ .proc_handler = &proc_dointvec,
66411+ },
66412+#endif
66413+#ifdef CONFIG_GRKERNSEC_ROFS
66414+ {
66415+ .procname = "romount_protect",
66416+ .data = &grsec_enable_rofs,
66417+ .maxlen = sizeof(int),
66418+ .mode = 0600,
66419+ .proc_handler = &proc_dointvec_minmax,
66420+ .extra1 = &one,
66421+ .extra2 = &one,
66422+ },
66423+#endif
66424+ { }
66425+};
66426+#endif
66427diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
66428new file mode 100644
66429index 0000000..0dc13c3
66430--- /dev/null
66431+++ b/grsecurity/grsec_time.c
66432@@ -0,0 +1,16 @@
66433+#include <linux/kernel.h>
66434+#include <linux/sched.h>
66435+#include <linux/grinternal.h>
66436+#include <linux/module.h>
66437+
66438+void
66439+gr_log_timechange(void)
66440+{
66441+#ifdef CONFIG_GRKERNSEC_TIME
66442+ if (grsec_enable_time)
66443+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
66444+#endif
66445+ return;
66446+}
66447+
66448+EXPORT_SYMBOL(gr_log_timechange);
66449diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
66450new file mode 100644
66451index 0000000..ee57dcf
66452--- /dev/null
66453+++ b/grsecurity/grsec_tpe.c
66454@@ -0,0 +1,73 @@
66455+#include <linux/kernel.h>
66456+#include <linux/sched.h>
66457+#include <linux/file.h>
66458+#include <linux/fs.h>
66459+#include <linux/grinternal.h>
66460+
66461+extern int gr_acl_tpe_check(void);
66462+
66463+int
66464+gr_tpe_allow(const struct file *file)
66465+{
66466+#ifdef CONFIG_GRKERNSEC
66467+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
66468+ const struct cred *cred = current_cred();
66469+ char *msg = NULL;
66470+ char *msg2 = NULL;
66471+
66472+ // never restrict root
66473+ if (gr_is_global_root(cred->uid))
66474+ return 1;
66475+
66476+ if (grsec_enable_tpe) {
66477+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
66478+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
66479+ msg = "not being in trusted group";
66480+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
66481+ msg = "being in untrusted group";
66482+#else
66483+ if (in_group_p(grsec_tpe_gid))
66484+ msg = "being in untrusted group";
66485+#endif
66486+ }
66487+ if (!msg && gr_acl_tpe_check())
66488+ msg = "being in untrusted role";
66489+
66490+ // not in any affected group/role
66491+ if (!msg)
66492+ goto next_check;
66493+
66494+ if (gr_is_global_nonroot(inode->i_uid))
66495+ msg2 = "file in non-root-owned directory";
66496+ else if (inode->i_mode & S_IWOTH)
66497+ msg2 = "file in world-writable directory";
66498+ else if (inode->i_mode & S_IWGRP)
66499+ msg2 = "file in group-writable directory";
66500+
66501+ if (msg && msg2) {
66502+ char fullmsg[70] = {0};
66503+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
66504+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
66505+ return 0;
66506+ }
66507+ msg = NULL;
66508+next_check:
66509+#ifdef CONFIG_GRKERNSEC_TPE_ALL
66510+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
66511+ return 1;
66512+
66513+ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
66514+ msg = "directory not owned by user";
66515+ else if (inode->i_mode & S_IWOTH)
66516+ msg = "file in world-writable directory";
66517+ else if (inode->i_mode & S_IWGRP)
66518+ msg = "file in group-writable directory";
66519+
66520+ if (msg) {
66521+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
66522+ return 0;
66523+ }
66524+#endif
66525+#endif
66526+ return 1;
66527+}
66528diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
66529new file mode 100644
66530index 0000000..9f7b1ac
66531--- /dev/null
66532+++ b/grsecurity/grsum.c
66533@@ -0,0 +1,61 @@
66534+#include <linux/err.h>
66535+#include <linux/kernel.h>
66536+#include <linux/sched.h>
66537+#include <linux/mm.h>
66538+#include <linux/scatterlist.h>
66539+#include <linux/crypto.h>
66540+#include <linux/gracl.h>
66541+
66542+
66543+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
66544+#error "crypto and sha256 must be built into the kernel"
66545+#endif
66546+
66547+int
66548+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
66549+{
66550+ char *p;
66551+ struct crypto_hash *tfm;
66552+ struct hash_desc desc;
66553+ struct scatterlist sg;
66554+ unsigned char temp_sum[GR_SHA_LEN];
66555+ volatile int retval = 0;
66556+ volatile int dummy = 0;
66557+ unsigned int i;
66558+
66559+ sg_init_table(&sg, 1);
66560+
66561+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
66562+ if (IS_ERR(tfm)) {
66563+ /* should never happen, since sha256 should be built in */
66564+ return 1;
66565+ }
66566+
66567+ desc.tfm = tfm;
66568+ desc.flags = 0;
66569+
66570+ crypto_hash_init(&desc);
66571+
66572+ p = salt;
66573+ sg_set_buf(&sg, p, GR_SALT_LEN);
66574+ crypto_hash_update(&desc, &sg, sg.length);
66575+
66576+ p = entry->pw;
66577+ sg_set_buf(&sg, p, strlen(p));
66578+
66579+ crypto_hash_update(&desc, &sg, sg.length);
66580+
66581+ crypto_hash_final(&desc, temp_sum);
66582+
66583+ memset(entry->pw, 0, GR_PW_LEN);
66584+
66585+ for (i = 0; i < GR_SHA_LEN; i++)
66586+ if (sum[i] != temp_sum[i])
66587+ retval = 1;
66588+ else
66589+ dummy = 1; // waste a cycle
66590+
66591+ crypto_free_hash(tfm);
66592+
66593+ return retval;
66594+}
66595diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
66596index 77ff547..181834f 100644
66597--- a/include/asm-generic/4level-fixup.h
66598+++ b/include/asm-generic/4level-fixup.h
66599@@ -13,8 +13,10 @@
66600 #define pmd_alloc(mm, pud, address) \
66601 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
66602 NULL: pmd_offset(pud, address))
66603+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
66604
66605 #define pud_alloc(mm, pgd, address) (pgd)
66606+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
66607 #define pud_offset(pgd, start) (pgd)
66608 #define pud_none(pud) 0
66609 #define pud_bad(pud) 0
66610diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
66611index b7babf0..04ad282 100644
66612--- a/include/asm-generic/atomic-long.h
66613+++ b/include/asm-generic/atomic-long.h
66614@@ -22,6 +22,12 @@
66615
66616 typedef atomic64_t atomic_long_t;
66617
66618+#ifdef CONFIG_PAX_REFCOUNT
66619+typedef atomic64_unchecked_t atomic_long_unchecked_t;
66620+#else
66621+typedef atomic64_t atomic_long_unchecked_t;
66622+#endif
66623+
66624 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
66625
66626 static inline long atomic_long_read(atomic_long_t *l)
66627@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
66628 return (long)atomic64_read(v);
66629 }
66630
66631+#ifdef CONFIG_PAX_REFCOUNT
66632+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
66633+{
66634+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66635+
66636+ return (long)atomic64_read_unchecked(v);
66637+}
66638+#endif
66639+
66640 static inline void atomic_long_set(atomic_long_t *l, long i)
66641 {
66642 atomic64_t *v = (atomic64_t *)l;
66643@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
66644 atomic64_set(v, i);
66645 }
66646
66647+#ifdef CONFIG_PAX_REFCOUNT
66648+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
66649+{
66650+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66651+
66652+ atomic64_set_unchecked(v, i);
66653+}
66654+#endif
66655+
66656 static inline void atomic_long_inc(atomic_long_t *l)
66657 {
66658 atomic64_t *v = (atomic64_t *)l;
66659@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
66660 atomic64_inc(v);
66661 }
66662
66663+#ifdef CONFIG_PAX_REFCOUNT
66664+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
66665+{
66666+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66667+
66668+ atomic64_inc_unchecked(v);
66669+}
66670+#endif
66671+
66672 static inline void atomic_long_dec(atomic_long_t *l)
66673 {
66674 atomic64_t *v = (atomic64_t *)l;
66675@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
66676 atomic64_dec(v);
66677 }
66678
66679+#ifdef CONFIG_PAX_REFCOUNT
66680+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
66681+{
66682+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66683+
66684+ atomic64_dec_unchecked(v);
66685+}
66686+#endif
66687+
66688 static inline void atomic_long_add(long i, atomic_long_t *l)
66689 {
66690 atomic64_t *v = (atomic64_t *)l;
66691@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
66692 atomic64_add(i, v);
66693 }
66694
66695+#ifdef CONFIG_PAX_REFCOUNT
66696+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
66697+{
66698+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66699+
66700+ atomic64_add_unchecked(i, v);
66701+}
66702+#endif
66703+
66704 static inline void atomic_long_sub(long i, atomic_long_t *l)
66705 {
66706 atomic64_t *v = (atomic64_t *)l;
66707@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
66708 atomic64_sub(i, v);
66709 }
66710
66711+#ifdef CONFIG_PAX_REFCOUNT
66712+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
66713+{
66714+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66715+
66716+ atomic64_sub_unchecked(i, v);
66717+}
66718+#endif
66719+
66720 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
66721 {
66722 atomic64_t *v = (atomic64_t *)l;
66723@@ -101,6 +161,15 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
66724 return (long)atomic64_add_return(i, v);
66725 }
66726
66727+#ifdef CONFIG_PAX_REFCOUNT
66728+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
66729+{
66730+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66731+
66732+ return (long)atomic64_add_return_unchecked(i, v);
66733+}
66734+#endif
66735+
66736 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
66737 {
66738 atomic64_t *v = (atomic64_t *)l;
66739@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
66740 return (long)atomic64_inc_return(v);
66741 }
66742
66743+#ifdef CONFIG_PAX_REFCOUNT
66744+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
66745+{
66746+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
66747+
66748+ return (long)atomic64_inc_return_unchecked(v);
66749+}
66750+#endif
66751+
66752 static inline long atomic_long_dec_return(atomic_long_t *l)
66753 {
66754 atomic64_t *v = (atomic64_t *)l;
66755@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
66756
66757 typedef atomic_t atomic_long_t;
66758
66759+#ifdef CONFIG_PAX_REFCOUNT
66760+typedef atomic_unchecked_t atomic_long_unchecked_t;
66761+#else
66762+typedef atomic_t atomic_long_unchecked_t;
66763+#endif
66764+
66765 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
66766 static inline long atomic_long_read(atomic_long_t *l)
66767 {
66768@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
66769 return (long)atomic_read(v);
66770 }
66771
66772+#ifdef CONFIG_PAX_REFCOUNT
66773+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
66774+{
66775+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
66776+
66777+ return (long)atomic_read_unchecked(v);
66778+}
66779+#endif
66780+
66781 static inline void atomic_long_set(atomic_long_t *l, long i)
66782 {
66783 atomic_t *v = (atomic_t *)l;
66784@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
66785 atomic_set(v, i);
66786 }
66787
66788+#ifdef CONFIG_PAX_REFCOUNT
66789+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
66790+{
66791+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
66792+
66793+ atomic_set_unchecked(v, i);
66794+}
66795+#endif
66796+
66797 static inline void atomic_long_inc(atomic_long_t *l)
66798 {
66799 atomic_t *v = (atomic_t *)l;
66800@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
66801 atomic_inc(v);
66802 }
66803
66804+#ifdef CONFIG_PAX_REFCOUNT
66805+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
66806+{
66807+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
66808+
66809+ atomic_inc_unchecked(v);
66810+}
66811+#endif
66812+
66813 static inline void atomic_long_dec(atomic_long_t *l)
66814 {
66815 atomic_t *v = (atomic_t *)l;
66816@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
66817 atomic_dec(v);
66818 }
66819
66820+#ifdef CONFIG_PAX_REFCOUNT
66821+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
66822+{
66823+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
66824+
66825+ atomic_dec_unchecked(v);
66826+}
66827+#endif
66828+
66829 static inline void atomic_long_add(long i, atomic_long_t *l)
66830 {
66831 atomic_t *v = (atomic_t *)l;
66832@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
66833 atomic_add(i, v);
66834 }
66835
66836+#ifdef CONFIG_PAX_REFCOUNT
66837+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
66838+{
66839+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
66840+
66841+ atomic_add_unchecked(i, v);
66842+}
66843+#endif
66844+
66845 static inline void atomic_long_sub(long i, atomic_long_t *l)
66846 {
66847 atomic_t *v = (atomic_t *)l;
66848@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
66849 atomic_sub(i, v);
66850 }
66851
66852+#ifdef CONFIG_PAX_REFCOUNT
66853+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
66854+{
66855+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
66856+
66857+ atomic_sub_unchecked(i, v);
66858+}
66859+#endif
66860+
66861 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
66862 {
66863 atomic_t *v = (atomic_t *)l;
66864@@ -218,6 +356,16 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
66865 return (long)atomic_add_return(i, v);
66866 }
66867
66868+#ifdef CONFIG_PAX_REFCOUNT
66869+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
66870+{
66871+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
66872+
66873+ return (long)atomic_add_return_unchecked(i, v);
66874+}
66875+
66876+#endif
66877+
66878 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
66879 {
66880 atomic_t *v = (atomic_t *)l;
66881@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
66882 return (long)atomic_inc_return(v);
66883 }
66884
66885+#ifdef CONFIG_PAX_REFCOUNT
66886+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
66887+{
66888+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
66889+
66890+ return (long)atomic_inc_return_unchecked(v);
66891+}
66892+#endif
66893+
66894 static inline long atomic_long_dec_return(atomic_long_t *l)
66895 {
66896 atomic_t *v = (atomic_t *)l;
66897@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
66898
66899 #endif /* BITS_PER_LONG == 64 */
66900
66901+#ifdef CONFIG_PAX_REFCOUNT
66902+static inline void pax_refcount_needs_these_functions(void)
66903+{
66904+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
66905+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
66906+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
66907+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
66908+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
66909+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
66910+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
66911+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
66912+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
66913+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
66914+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
66915+#ifdef CONFIG_X86
66916+ atomic_clear_mask_unchecked(0, NULL);
66917+ atomic_set_mask_unchecked(0, NULL);
66918+#endif
66919+
66920+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
66921+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
66922+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
66923+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
66924+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
66925+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
66926+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
66927+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
66928+}
66929+#else
66930+#define atomic_read_unchecked(v) atomic_read(v)
66931+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
66932+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
66933+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
66934+#define atomic_inc_unchecked(v) atomic_inc(v)
66935+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
66936+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
66937+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
66938+#define atomic_dec_unchecked(v) atomic_dec(v)
66939+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
66940+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
66941+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
66942+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
66943+
66944+#define atomic_long_read_unchecked(v) atomic_long_read(v)
66945+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
66946+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
66947+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
66948+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
66949+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
66950+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
66951+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
66952+#endif
66953+
66954 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
66955diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
66956index 1ced641..c896ee8 100644
66957--- a/include/asm-generic/atomic.h
66958+++ b/include/asm-generic/atomic.h
66959@@ -159,7 +159,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
66960 * Atomically clears the bits set in @mask from @v
66961 */
66962 #ifndef atomic_clear_mask
66963-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
66964+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
66965 {
66966 unsigned long flags;
66967
66968diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
66969index b18ce4f..2ee2843 100644
66970--- a/include/asm-generic/atomic64.h
66971+++ b/include/asm-generic/atomic64.h
66972@@ -16,6 +16,8 @@ typedef struct {
66973 long long counter;
66974 } atomic64_t;
66975
66976+typedef atomic64_t atomic64_unchecked_t;
66977+
66978 #define ATOMIC64_INIT(i) { (i) }
66979
66980 extern long long atomic64_read(const atomic64_t *v);
66981@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
66982 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
66983 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
66984
66985+#define atomic64_read_unchecked(v) atomic64_read(v)
66986+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
66987+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
66988+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
66989+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
66990+#define atomic64_inc_unchecked(v) atomic64_inc(v)
66991+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
66992+#define atomic64_dec_unchecked(v) atomic64_dec(v)
66993+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
66994+
66995 #endif /* _ASM_GENERIC_ATOMIC64_H */
66996diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
66997index 1bfcfe5..e04c5c9 100644
66998--- a/include/asm-generic/cache.h
66999+++ b/include/asm-generic/cache.h
67000@@ -6,7 +6,7 @@
67001 * cache lines need to provide their own cache.h.
67002 */
67003
67004-#define L1_CACHE_SHIFT 5
67005-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
67006+#define L1_CACHE_SHIFT 5UL
67007+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
67008
67009 #endif /* __ASM_GENERIC_CACHE_H */
67010diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
67011index 0d68a1e..b74a761 100644
67012--- a/include/asm-generic/emergency-restart.h
67013+++ b/include/asm-generic/emergency-restart.h
67014@@ -1,7 +1,7 @@
67015 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
67016 #define _ASM_GENERIC_EMERGENCY_RESTART_H
67017
67018-static inline void machine_emergency_restart(void)
67019+static inline __noreturn void machine_emergency_restart(void)
67020 {
67021 machine_restart(NULL);
67022 }
67023diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
67024index 90f99c7..00ce236 100644
67025--- a/include/asm-generic/kmap_types.h
67026+++ b/include/asm-generic/kmap_types.h
67027@@ -2,9 +2,9 @@
67028 #define _ASM_GENERIC_KMAP_TYPES_H
67029
67030 #ifdef __WITH_KM_FENCE
67031-# define KM_TYPE_NR 41
67032+# define KM_TYPE_NR 42
67033 #else
67034-# define KM_TYPE_NR 20
67035+# define KM_TYPE_NR 21
67036 #endif
67037
67038 #endif
67039diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
67040index 9ceb03b..62b0b8f 100644
67041--- a/include/asm-generic/local.h
67042+++ b/include/asm-generic/local.h
67043@@ -23,24 +23,37 @@ typedef struct
67044 atomic_long_t a;
67045 } local_t;
67046
67047+typedef struct {
67048+ atomic_long_unchecked_t a;
67049+} local_unchecked_t;
67050+
67051 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
67052
67053 #define local_read(l) atomic_long_read(&(l)->a)
67054+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
67055 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
67056+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
67057 #define local_inc(l) atomic_long_inc(&(l)->a)
67058+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
67059 #define local_dec(l) atomic_long_dec(&(l)->a)
67060+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
67061 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
67062+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
67063 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
67064+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
67065
67066 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
67067 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
67068 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
67069 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
67070 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
67071+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
67072 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
67073 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
67074+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
67075
67076 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
67077+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
67078 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
67079 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
67080 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
67081diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
67082index 725612b..9cc513a 100644
67083--- a/include/asm-generic/pgtable-nopmd.h
67084+++ b/include/asm-generic/pgtable-nopmd.h
67085@@ -1,14 +1,19 @@
67086 #ifndef _PGTABLE_NOPMD_H
67087 #define _PGTABLE_NOPMD_H
67088
67089-#ifndef __ASSEMBLY__
67090-
67091 #include <asm-generic/pgtable-nopud.h>
67092
67093-struct mm_struct;
67094-
67095 #define __PAGETABLE_PMD_FOLDED
67096
67097+#define PMD_SHIFT PUD_SHIFT
67098+#define PTRS_PER_PMD 1
67099+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
67100+#define PMD_MASK (~(PMD_SIZE-1))
67101+
67102+#ifndef __ASSEMBLY__
67103+
67104+struct mm_struct;
67105+
67106 /*
67107 * Having the pmd type consist of a pud gets the size right, and allows
67108 * us to conceptually access the pud entry that this pmd is folded into
67109@@ -16,11 +21,6 @@ struct mm_struct;
67110 */
67111 typedef struct { pud_t pud; } pmd_t;
67112
67113-#define PMD_SHIFT PUD_SHIFT
67114-#define PTRS_PER_PMD 1
67115-#define PMD_SIZE (1UL << PMD_SHIFT)
67116-#define PMD_MASK (~(PMD_SIZE-1))
67117-
67118 /*
67119 * The "pud_xxx()" functions here are trivial for a folded two-level
67120 * setup: the pmd is never bad, and a pmd always exists (as it's folded
67121diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
67122index 810431d..0ec4804f 100644
67123--- a/include/asm-generic/pgtable-nopud.h
67124+++ b/include/asm-generic/pgtable-nopud.h
67125@@ -1,10 +1,15 @@
67126 #ifndef _PGTABLE_NOPUD_H
67127 #define _PGTABLE_NOPUD_H
67128
67129-#ifndef __ASSEMBLY__
67130-
67131 #define __PAGETABLE_PUD_FOLDED
67132
67133+#define PUD_SHIFT PGDIR_SHIFT
67134+#define PTRS_PER_PUD 1
67135+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
67136+#define PUD_MASK (~(PUD_SIZE-1))
67137+
67138+#ifndef __ASSEMBLY__
67139+
67140 /*
67141 * Having the pud type consist of a pgd gets the size right, and allows
67142 * us to conceptually access the pgd entry that this pud is folded into
67143@@ -12,11 +17,6 @@
67144 */
67145 typedef struct { pgd_t pgd; } pud_t;
67146
67147-#define PUD_SHIFT PGDIR_SHIFT
67148-#define PTRS_PER_PUD 1
67149-#define PUD_SIZE (1UL << PUD_SHIFT)
67150-#define PUD_MASK (~(PUD_SIZE-1))
67151-
67152 /*
67153 * The "pgd_xxx()" functions here are trivial for a folded two-level
67154 * setup: the pud is never bad, and a pud always exists (as it's folded
67155@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
67156 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
67157
67158 #define pgd_populate(mm, pgd, pud) do { } while (0)
67159+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
67160 /*
67161 * (puds are folded into pgds so this doesn't get actually called,
67162 * but the define is needed for a generic inline function.)
67163diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
67164index 5cf680a..4b74d62 100644
67165--- a/include/asm-generic/pgtable.h
67166+++ b/include/asm-generic/pgtable.h
67167@@ -688,6 +688,14 @@ static inline pmd_t pmd_mknuma(pmd_t pmd)
67168 }
67169 #endif /* CONFIG_NUMA_BALANCING */
67170
67171+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
67172+static inline unsigned long pax_open_kernel(void) { return 0; }
67173+#endif
67174+
67175+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
67176+static inline unsigned long pax_close_kernel(void) { return 0; }
67177+#endif
67178+
67179 #endif /* CONFIG_MMU */
67180
67181 #endif /* !__ASSEMBLY__ */
67182diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
67183index d1ea7ce..b1ebf2a 100644
67184--- a/include/asm-generic/vmlinux.lds.h
67185+++ b/include/asm-generic/vmlinux.lds.h
67186@@ -218,6 +218,7 @@
67187 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
67188 VMLINUX_SYMBOL(__start_rodata) = .; \
67189 *(.rodata) *(.rodata.*) \
67190+ *(.data..read_only) \
67191 *(__vermagic) /* Kernel version magic */ \
67192 . = ALIGN(8); \
67193 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
67194@@ -725,17 +726,18 @@
67195 * section in the linker script will go there too. @phdr should have
67196 * a leading colon.
67197 *
67198- * Note that this macros defines __per_cpu_load as an absolute symbol.
67199+ * Note that this macros defines per_cpu_load as an absolute symbol.
67200 * If there is no need to put the percpu section at a predetermined
67201 * address, use PERCPU_SECTION.
67202 */
67203 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
67204- VMLINUX_SYMBOL(__per_cpu_load) = .; \
67205- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
67206+ per_cpu_load = .; \
67207+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
67208 - LOAD_OFFSET) { \
67209+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
67210 PERCPU_INPUT(cacheline) \
67211 } phdr \
67212- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
67213+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
67214
67215 /**
67216 * PERCPU_SECTION - define output section for percpu area, simple version
67217diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
67218index 418d270..bfd2794 100644
67219--- a/include/crypto/algapi.h
67220+++ b/include/crypto/algapi.h
67221@@ -34,7 +34,7 @@ struct crypto_type {
67222 unsigned int maskclear;
67223 unsigned int maskset;
67224 unsigned int tfmsize;
67225-};
67226+} __do_const;
67227
67228 struct crypto_instance {
67229 struct crypto_alg alg;
67230diff --git a/include/drm/drmP.h b/include/drm/drmP.h
67231index fad21c9..ab858bc 100644
67232--- a/include/drm/drmP.h
67233+++ b/include/drm/drmP.h
67234@@ -72,6 +72,7 @@
67235 #include <linux/workqueue.h>
67236 #include <linux/poll.h>
67237 #include <asm/pgalloc.h>
67238+#include <asm/local.h>
67239 #include <drm/drm.h>
67240 #include <drm/drm_sarea.h>
67241
67242@@ -293,10 +294,12 @@ do { \
67243 * \param cmd command.
67244 * \param arg argument.
67245 */
67246-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
67247+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
67248+ struct drm_file *file_priv);
67249+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
67250 struct drm_file *file_priv);
67251
67252-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
67253+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
67254 unsigned long arg);
67255
67256 #define DRM_IOCTL_NR(n) _IOC_NR(n)
67257@@ -311,9 +314,9 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
67258 struct drm_ioctl_desc {
67259 unsigned int cmd;
67260 int flags;
67261- drm_ioctl_t *func;
67262+ drm_ioctl_t func;
67263 unsigned int cmd_drv;
67264-};
67265+} __do_const;
67266
67267 /**
67268 * Creates a driver or general drm_ioctl_desc array entry for the given
67269@@ -995,7 +998,7 @@ struct drm_info_list {
67270 int (*show)(struct seq_file*, void*); /** show callback */
67271 u32 driver_features; /**< Required driver features for this entry */
67272 void *data;
67273-};
67274+} __do_const;
67275
67276 /**
67277 * debugfs node structure. This structure represents a debugfs file.
67278@@ -1068,7 +1071,7 @@ struct drm_device {
67279
67280 /** \name Usage Counters */
67281 /*@{ */
67282- int open_count; /**< Outstanding files open */
67283+ local_t open_count; /**< Outstanding files open */
67284 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
67285 atomic_t vma_count; /**< Outstanding vma areas open */
67286 int buf_use; /**< Buffers in use -- cannot alloc */
67287@@ -1079,7 +1082,7 @@ struct drm_device {
67288 /*@{ */
67289 unsigned long counters;
67290 enum drm_stat_type types[15];
67291- atomic_t counts[15];
67292+ atomic_unchecked_t counts[15];
67293 /*@} */
67294
67295 struct list_head filelist;
67296diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
67297index f43d556..94d9343 100644
67298--- a/include/drm/drm_crtc_helper.h
67299+++ b/include/drm/drm_crtc_helper.h
67300@@ -109,7 +109,7 @@ struct drm_encoder_helper_funcs {
67301 struct drm_connector *connector);
67302 /* disable encoder when not in use - more explicit than dpms off */
67303 void (*disable)(struct drm_encoder *encoder);
67304-};
67305+} __no_const;
67306
67307 /**
67308 * drm_connector_helper_funcs - helper operations for connectors
67309diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
67310index 72dcbe8..8db58d7 100644
67311--- a/include/drm/ttm/ttm_memory.h
67312+++ b/include/drm/ttm/ttm_memory.h
67313@@ -48,7 +48,7 @@
67314
67315 struct ttm_mem_shrink {
67316 int (*do_shrink) (struct ttm_mem_shrink *);
67317-};
67318+} __no_const;
67319
67320 /**
67321 * struct ttm_mem_global - Global memory accounting structure.
67322diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
67323index 4b840e8..155d235 100644
67324--- a/include/keys/asymmetric-subtype.h
67325+++ b/include/keys/asymmetric-subtype.h
67326@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
67327 /* Verify the signature on a key of this subtype (optional) */
67328 int (*verify_signature)(const struct key *key,
67329 const struct public_key_signature *sig);
67330-};
67331+} __do_const;
67332
67333 /**
67334 * asymmetric_key_subtype - Get the subtype from an asymmetric key
67335diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
67336index c1da539..1dcec55 100644
67337--- a/include/linux/atmdev.h
67338+++ b/include/linux/atmdev.h
67339@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
67340 #endif
67341
67342 struct k_atm_aal_stats {
67343-#define __HANDLE_ITEM(i) atomic_t i
67344+#define __HANDLE_ITEM(i) atomic_unchecked_t i
67345 __AAL_STAT_ITEMS
67346 #undef __HANDLE_ITEM
67347 };
67348@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
67349 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
67350 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
67351 struct module *owner;
67352-};
67353+} __do_const ;
67354
67355 struct atmphy_ops {
67356 int (*start)(struct atm_dev *dev);
67357diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
67358index 0530b98..96a8ac0 100644
67359--- a/include/linux/binfmts.h
67360+++ b/include/linux/binfmts.h
67361@@ -73,8 +73,9 @@ struct linux_binfmt {
67362 int (*load_binary)(struct linux_binprm *);
67363 int (*load_shlib)(struct file *);
67364 int (*core_dump)(struct coredump_params *cprm);
67365+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
67366 unsigned long min_coredump; /* minimal dump size */
67367-};
67368+} __do_const;
67369
67370 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
67371
67372diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
67373index f94bc83..62b9cfe 100644
67374--- a/include/linux/blkdev.h
67375+++ b/include/linux/blkdev.h
67376@@ -1498,7 +1498,7 @@ struct block_device_operations {
67377 /* this callback is with swap_lock and sometimes page table lock held */
67378 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
67379 struct module *owner;
67380-};
67381+} __do_const;
67382
67383 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
67384 unsigned long);
67385diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
67386index 7c2e030..b72475d 100644
67387--- a/include/linux/blktrace_api.h
67388+++ b/include/linux/blktrace_api.h
67389@@ -23,7 +23,7 @@ struct blk_trace {
67390 struct dentry *dir;
67391 struct dentry *dropped_file;
67392 struct dentry *msg_file;
67393- atomic_t dropped;
67394+ atomic_unchecked_t dropped;
67395 };
67396
67397 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
67398diff --git a/include/linux/cache.h b/include/linux/cache.h
67399index 4c57065..4307975 100644
67400--- a/include/linux/cache.h
67401+++ b/include/linux/cache.h
67402@@ -16,6 +16,10 @@
67403 #define __read_mostly
67404 #endif
67405
67406+#ifndef __read_only
67407+#define __read_only __read_mostly
67408+#endif
67409+
67410 #ifndef ____cacheline_aligned
67411 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
67412 #endif
67413diff --git a/include/linux/capability.h b/include/linux/capability.h
67414index 98503b7..cc36d18 100644
67415--- a/include/linux/capability.h
67416+++ b/include/linux/capability.h
67417@@ -211,8 +211,13 @@ extern bool capable(int cap);
67418 extern bool ns_capable(struct user_namespace *ns, int cap);
67419 extern bool nsown_capable(int cap);
67420 extern bool inode_capable(const struct inode *inode, int cap);
67421+extern bool capable_nolog(int cap);
67422+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
67423+extern bool inode_capable_nolog(const struct inode *inode, int cap);
67424
67425 /* audit system wants to get cap info from files as well */
67426 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
67427
67428+extern int is_privileged_binary(const struct dentry *dentry);
67429+
67430 #endif /* !_LINUX_CAPABILITY_H */
67431diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
67432index 8609d57..86e4d79 100644
67433--- a/include/linux/cdrom.h
67434+++ b/include/linux/cdrom.h
67435@@ -87,7 +87,6 @@ struct cdrom_device_ops {
67436
67437 /* driver specifications */
67438 const int capability; /* capability flags */
67439- int n_minors; /* number of active minor devices */
67440 /* handle uniform packets for scsi type devices (scsi,atapi) */
67441 int (*generic_packet) (struct cdrom_device_info *,
67442 struct packet_command *);
67443diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
67444index 42e55de..1cd0e66 100644
67445--- a/include/linux/cleancache.h
67446+++ b/include/linux/cleancache.h
67447@@ -31,7 +31,7 @@ struct cleancache_ops {
67448 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
67449 void (*invalidate_inode)(int, struct cleancache_filekey);
67450 void (*invalidate_fs)(int);
67451-};
67452+} __no_const;
67453
67454 extern struct cleancache_ops
67455 cleancache_register_ops(struct cleancache_ops *ops);
67456diff --git a/include/linux/compat.h b/include/linux/compat.h
67457index dec7e2d..45db13f 100644
67458--- a/include/linux/compat.h
67459+++ b/include/linux/compat.h
67460@@ -311,14 +311,14 @@ long compat_sys_msgsnd(int first, int second, int third, void __user *uptr);
67461 long compat_sys_msgrcv(int first, int second, int msgtyp, int third,
67462 int version, void __user *uptr);
67463 long compat_sys_shmat(int first, int second, compat_uptr_t third, int version,
67464- void __user *uptr);
67465+ void __user *uptr) __intentional_overflow(0);
67466 #else
67467 long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
67468 long compat_sys_msgsnd(int msqid, struct compat_msgbuf __user *msgp,
67469 compat_ssize_t msgsz, int msgflg);
67470 long compat_sys_msgrcv(int msqid, struct compat_msgbuf __user *msgp,
67471 compat_ssize_t msgsz, long msgtyp, int msgflg);
67472-long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
67473+long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
67474 #endif
67475 long compat_sys_msgctl(int first, int second, void __user *uptr);
67476 long compat_sys_shmctl(int first, int second, void __user *uptr);
67477@@ -414,7 +414,7 @@ extern int compat_ptrace_request(struct task_struct *child,
67478 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
67479 compat_ulong_t addr, compat_ulong_t data);
67480 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
67481- compat_long_t addr, compat_long_t data);
67482+ compat_ulong_t addr, compat_ulong_t data);
67483
67484 /*
67485 * epoll (fs/eventpoll.c) compat bits follow ...
67486diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
67487index 662fd1b..e801992 100644
67488--- a/include/linux/compiler-gcc4.h
67489+++ b/include/linux/compiler-gcc4.h
67490@@ -34,6 +34,21 @@
67491 #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
67492
67493 #if __GNUC_MINOR__ >= 5
67494+
67495+#ifdef CONSTIFY_PLUGIN
67496+#define __no_const __attribute__((no_const))
67497+#define __do_const __attribute__((do_const))
67498+#endif
67499+
67500+#ifdef SIZE_OVERFLOW_PLUGIN
67501+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
67502+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
67503+#endif
67504+
67505+#ifdef LATENT_ENTROPY_PLUGIN
67506+#define __latent_entropy __attribute__((latent_entropy))
67507+#endif
67508+
67509 /*
67510 * Mark a position in code as unreachable. This can be used to
67511 * suppress control flow warnings after asm blocks that transfer
67512@@ -49,6 +64,11 @@
67513 #define __noclone __attribute__((__noclone__))
67514
67515 #endif
67516+
67517+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
67518+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
67519+#define __bos0(ptr) __bos((ptr), 0)
67520+#define __bos1(ptr) __bos((ptr), 1)
67521 #endif
67522
67523 #if __GNUC_MINOR__ >= 6
67524diff --git a/include/linux/compiler.h b/include/linux/compiler.h
67525index dd852b7..1ad5fba 100644
67526--- a/include/linux/compiler.h
67527+++ b/include/linux/compiler.h
67528@@ -5,11 +5,14 @@
67529
67530 #ifdef __CHECKER__
67531 # define __user __attribute__((noderef, address_space(1)))
67532+# define __force_user __force __user
67533 # define __kernel __attribute__((address_space(0)))
67534+# define __force_kernel __force __kernel
67535 # define __safe __attribute__((safe))
67536 # define __force __attribute__((force))
67537 # define __nocast __attribute__((nocast))
67538 # define __iomem __attribute__((noderef, address_space(2)))
67539+# define __force_iomem __force __iomem
67540 # define __must_hold(x) __attribute__((context(x,1,1)))
67541 # define __acquires(x) __attribute__((context(x,0,1)))
67542 # define __releases(x) __attribute__((context(x,1,0)))
67543@@ -17,20 +20,37 @@
67544 # define __release(x) __context__(x,-1)
67545 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
67546 # define __percpu __attribute__((noderef, address_space(3)))
67547+# define __force_percpu __force __percpu
67548 #ifdef CONFIG_SPARSE_RCU_POINTER
67549 # define __rcu __attribute__((noderef, address_space(4)))
67550+# define __force_rcu __force __rcu
67551 #else
67552 # define __rcu
67553+# define __force_rcu
67554 #endif
67555 extern void __chk_user_ptr(const volatile void __user *);
67556 extern void __chk_io_ptr(const volatile void __iomem *);
67557 #else
67558-# define __user
67559-# define __kernel
67560+# ifdef CHECKER_PLUGIN
67561+//# define __user
67562+//# define __force_user
67563+//# define __kernel
67564+//# define __force_kernel
67565+# else
67566+# ifdef STRUCTLEAK_PLUGIN
67567+# define __user __attribute__((user))
67568+# else
67569+# define __user
67570+# endif
67571+# define __force_user
67572+# define __kernel
67573+# define __force_kernel
67574+# endif
67575 # define __safe
67576 # define __force
67577 # define __nocast
67578 # define __iomem
67579+# define __force_iomem
67580 # define __chk_user_ptr(x) (void)0
67581 # define __chk_io_ptr(x) (void)0
67582 # define __builtin_warning(x, y...) (1)
67583@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
67584 # define __release(x) (void)0
67585 # define __cond_lock(x,c) (c)
67586 # define __percpu
67587+# define __force_percpu
67588 # define __rcu
67589+# define __force_rcu
67590 #endif
67591
67592 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
67593@@ -275,6 +297,26 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
67594 # define __attribute_const__ /* unimplemented */
67595 #endif
67596
67597+#ifndef __no_const
67598+# define __no_const
67599+#endif
67600+
67601+#ifndef __do_const
67602+# define __do_const
67603+#endif
67604+
67605+#ifndef __size_overflow
67606+# define __size_overflow(...)
67607+#endif
67608+
67609+#ifndef __intentional_overflow
67610+# define __intentional_overflow(...)
67611+#endif
67612+
67613+#ifndef __latent_entropy
67614+# define __latent_entropy
67615+#endif
67616+
67617 /*
67618 * Tell gcc if a function is cold. The compiler will assume any path
67619 * directly leading to the call is unlikely.
67620@@ -284,6 +326,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
67621 #define __cold
67622 #endif
67623
67624+#ifndef __alloc_size
67625+#define __alloc_size(...)
67626+#endif
67627+
67628+#ifndef __bos
67629+#define __bos(ptr, arg)
67630+#endif
67631+
67632+#ifndef __bos0
67633+#define __bos0(ptr)
67634+#endif
67635+
67636+#ifndef __bos1
67637+#define __bos1(ptr)
67638+#endif
67639+
67640 /* Simple shorthand for a section definition */
67641 #ifndef __section
67642 # define __section(S) __attribute__ ((__section__(#S)))
67643@@ -323,6 +381,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
67644 * use is to mediate communication between process-level code and irq/NMI
67645 * handlers, all running on the same CPU.
67646 */
67647-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
67648+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
67649+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
67650
67651 #endif /* __LINUX_COMPILER_H */
67652diff --git a/include/linux/completion.h b/include/linux/completion.h
67653index 51494e6..0fd1b61 100644
67654--- a/include/linux/completion.h
67655+++ b/include/linux/completion.h
67656@@ -78,13 +78,13 @@ static inline void init_completion(struct completion *x)
67657
67658 extern void wait_for_completion(struct completion *);
67659 extern int wait_for_completion_interruptible(struct completion *x);
67660-extern int wait_for_completion_killable(struct completion *x);
67661+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
67662 extern unsigned long wait_for_completion_timeout(struct completion *x,
67663 unsigned long timeout);
67664 extern long wait_for_completion_interruptible_timeout(
67665- struct completion *x, unsigned long timeout);
67666+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
67667 extern long wait_for_completion_killable_timeout(
67668- struct completion *x, unsigned long timeout);
67669+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
67670 extern bool try_wait_for_completion(struct completion *x);
67671 extern bool completion_done(struct completion *x);
67672
67673diff --git a/include/linux/configfs.h b/include/linux/configfs.h
67674index 34025df..d94bbbc 100644
67675--- a/include/linux/configfs.h
67676+++ b/include/linux/configfs.h
67677@@ -125,7 +125,7 @@ struct configfs_attribute {
67678 const char *ca_name;
67679 struct module *ca_owner;
67680 umode_t ca_mode;
67681-};
67682+} __do_const;
67683
67684 /*
67685 * Users often need to create attribute structures for their configurable
67686diff --git a/include/linux/cpu.h b/include/linux/cpu.h
67687index ce7a074..01ab8ac 100644
67688--- a/include/linux/cpu.h
67689+++ b/include/linux/cpu.h
67690@@ -115,7 +115,7 @@ enum {
67691 /* Need to know about CPUs going up/down? */
67692 #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
67693 #define cpu_notifier(fn, pri) { \
67694- static struct notifier_block fn##_nb __cpuinitdata = \
67695+ static struct notifier_block fn##_nb = \
67696 { .notifier_call = fn, .priority = pri }; \
67697 register_cpu_notifier(&fn##_nb); \
67698 }
67699diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
67700index a55b88e..fba90c5 100644
67701--- a/include/linux/cpufreq.h
67702+++ b/include/linux/cpufreq.h
67703@@ -240,7 +240,7 @@ struct cpufreq_driver {
67704 int (*suspend) (struct cpufreq_policy *policy);
67705 int (*resume) (struct cpufreq_policy *policy);
67706 struct freq_attr **attr;
67707-};
67708+} __do_const;
67709
67710 /* flags */
67711
67712@@ -299,6 +299,7 @@ struct global_attr {
67713 ssize_t (*store)(struct kobject *a, struct attribute *b,
67714 const char *c, size_t count);
67715 };
67716+typedef struct global_attr __no_const global_attr_no_const;
67717
67718 #define define_one_global_ro(_name) \
67719 static struct global_attr _name = \
67720diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
67721index 24cd1037..20a63aae 100644
67722--- a/include/linux/cpuidle.h
67723+++ b/include/linux/cpuidle.h
67724@@ -54,7 +54,8 @@ struct cpuidle_state {
67725 int index);
67726
67727 int (*enter_dead) (struct cpuidle_device *dev, int index);
67728-};
67729+} __do_const;
67730+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
67731
67732 /* Idle State Flags */
67733 #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
67734@@ -216,7 +217,7 @@ struct cpuidle_governor {
67735 void (*reflect) (struct cpuidle_device *dev, int index);
67736
67737 struct module *owner;
67738-};
67739+} __do_const;
67740
67741 #ifdef CONFIG_CPU_IDLE
67742
67743diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
67744index 0325602..5e9feff 100644
67745--- a/include/linux/cpumask.h
67746+++ b/include/linux/cpumask.h
67747@@ -118,17 +118,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
67748 }
67749
67750 /* Valid inputs for n are -1 and 0. */
67751-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
67752+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
67753 {
67754 return n+1;
67755 }
67756
67757-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
67758+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
67759 {
67760 return n+1;
67761 }
67762
67763-static inline unsigned int cpumask_next_and(int n,
67764+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
67765 const struct cpumask *srcp,
67766 const struct cpumask *andp)
67767 {
67768@@ -167,7 +167,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
67769 *
67770 * Returns >= nr_cpu_ids if no further cpus set.
67771 */
67772-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
67773+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
67774 {
67775 /* -1 is a legal arg here. */
67776 if (n != -1)
67777@@ -182,7 +182,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
67778 *
67779 * Returns >= nr_cpu_ids if no further cpus unset.
67780 */
67781-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
67782+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
67783 {
67784 /* -1 is a legal arg here. */
67785 if (n != -1)
67786@@ -190,7 +190,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
67787 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
67788 }
67789
67790-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
67791+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
67792 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
67793
67794 /**
67795diff --git a/include/linux/cred.h b/include/linux/cred.h
67796index 04421e8..6bce4ef 100644
67797--- a/include/linux/cred.h
67798+++ b/include/linux/cred.h
67799@@ -194,6 +194,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
67800 static inline void validate_process_creds(void)
67801 {
67802 }
67803+static inline void validate_task_creds(struct task_struct *task)
67804+{
67805+}
67806 #endif
67807
67808 /**
67809diff --git a/include/linux/crypto.h b/include/linux/crypto.h
67810index b92eadf..b4ecdc1 100644
67811--- a/include/linux/crypto.h
67812+++ b/include/linux/crypto.h
67813@@ -373,7 +373,7 @@ struct cipher_tfm {
67814 const u8 *key, unsigned int keylen);
67815 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
67816 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
67817-};
67818+} __no_const;
67819
67820 struct hash_tfm {
67821 int (*init)(struct hash_desc *desc);
67822@@ -394,13 +394,13 @@ struct compress_tfm {
67823 int (*cot_decompress)(struct crypto_tfm *tfm,
67824 const u8 *src, unsigned int slen,
67825 u8 *dst, unsigned int *dlen);
67826-};
67827+} __no_const;
67828
67829 struct rng_tfm {
67830 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
67831 unsigned int dlen);
67832 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
67833-};
67834+} __no_const;
67835
67836 #define crt_ablkcipher crt_u.ablkcipher
67837 #define crt_aead crt_u.aead
67838diff --git a/include/linux/ctype.h b/include/linux/ctype.h
67839index 8acfe31..6ffccd63 100644
67840--- a/include/linux/ctype.h
67841+++ b/include/linux/ctype.h
67842@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
67843 * Fast implementation of tolower() for internal usage. Do not use in your
67844 * code.
67845 */
67846-static inline char _tolower(const char c)
67847+static inline unsigned char _tolower(const unsigned char c)
67848 {
67849 return c | 0x20;
67850 }
67851diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
67852index 7925bf0..d5143d2 100644
67853--- a/include/linux/decompress/mm.h
67854+++ b/include/linux/decompress/mm.h
67855@@ -77,7 +77,7 @@ static void free(void *where)
67856 * warnings when not needed (indeed large_malloc / large_free are not
67857 * needed by inflate */
67858
67859-#define malloc(a) kmalloc(a, GFP_KERNEL)
67860+#define malloc(a) kmalloc((a), GFP_KERNEL)
67861 #define free(a) kfree(a)
67862
67863 #define large_malloc(a) vmalloc(a)
67864diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
67865index e83ef39..33e0eb3 100644
67866--- a/include/linux/devfreq.h
67867+++ b/include/linux/devfreq.h
67868@@ -114,7 +114,7 @@ struct devfreq_governor {
67869 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
67870 int (*event_handler)(struct devfreq *devfreq,
67871 unsigned int event, void *data);
67872-};
67873+} __do_const;
67874
67875 /**
67876 * struct devfreq - Device devfreq structure
67877diff --git a/include/linux/device.h b/include/linux/device.h
67878index 43dcda9..7a1fb65 100644
67879--- a/include/linux/device.h
67880+++ b/include/linux/device.h
67881@@ -294,7 +294,7 @@ struct subsys_interface {
67882 struct list_head node;
67883 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
67884 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
67885-};
67886+} __do_const;
67887
67888 int subsys_interface_register(struct subsys_interface *sif);
67889 void subsys_interface_unregister(struct subsys_interface *sif);
67890@@ -474,7 +474,7 @@ struct device_type {
67891 void (*release)(struct device *dev);
67892
67893 const struct dev_pm_ops *pm;
67894-};
67895+} __do_const;
67896
67897 /* interface for exporting device attributes */
67898 struct device_attribute {
67899@@ -484,11 +484,12 @@ struct device_attribute {
67900 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
67901 const char *buf, size_t count);
67902 };
67903+typedef struct device_attribute __no_const device_attribute_no_const;
67904
67905 struct dev_ext_attribute {
67906 struct device_attribute attr;
67907 void *var;
67908-};
67909+} __do_const;
67910
67911 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
67912 char *buf);
67913diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
67914index 94af418..b1ca7a2 100644
67915--- a/include/linux/dma-mapping.h
67916+++ b/include/linux/dma-mapping.h
67917@@ -54,7 +54,7 @@ struct dma_map_ops {
67918 u64 (*get_required_mask)(struct device *dev);
67919 #endif
67920 int is_phys;
67921-};
67922+} __do_const;
67923
67924 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
67925
67926diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
67927index d3201e4..8281e63 100644
67928--- a/include/linux/dmaengine.h
67929+++ b/include/linux/dmaengine.h
67930@@ -1018,9 +1018,9 @@ struct dma_pinned_list {
67931 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
67932 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
67933
67934-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
67935+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
67936 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
67937-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
67938+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
67939 struct dma_pinned_list *pinned_list, struct page *page,
67940 unsigned int offset, size_t len);
67941
67942diff --git a/include/linux/efi.h b/include/linux/efi.h
67943index 7a9498a..155713d 100644
67944--- a/include/linux/efi.h
67945+++ b/include/linux/efi.h
67946@@ -733,6 +733,7 @@ struct efivar_operations {
67947 efi_set_variable_t *set_variable;
67948 efi_query_variable_info_t *query_variable_info;
67949 };
67950+typedef struct efivar_operations __no_const efivar_operations_no_const;
67951
67952 struct efivars {
67953 /*
67954diff --git a/include/linux/elf.h b/include/linux/elf.h
67955index 8c9048e..16a4665 100644
67956--- a/include/linux/elf.h
67957+++ b/include/linux/elf.h
67958@@ -20,6 +20,7 @@ extern Elf32_Dyn _DYNAMIC [];
67959 #define elf_note elf32_note
67960 #define elf_addr_t Elf32_Off
67961 #define Elf_Half Elf32_Half
67962+#define elf_dyn Elf32_Dyn
67963
67964 #else
67965
67966@@ -30,6 +31,7 @@ extern Elf64_Dyn _DYNAMIC [];
67967 #define elf_note elf64_note
67968 #define elf_addr_t Elf64_Off
67969 #define Elf_Half Elf64_Half
67970+#define elf_dyn Elf64_Dyn
67971
67972 #endif
67973
67974diff --git a/include/linux/err.h b/include/linux/err.h
67975index f2edce2..cc2082c 100644
67976--- a/include/linux/err.h
67977+++ b/include/linux/err.h
67978@@ -19,12 +19,12 @@
67979
67980 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
67981
67982-static inline void * __must_check ERR_PTR(long error)
67983+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
67984 {
67985 return (void *) error;
67986 }
67987
67988-static inline long __must_check PTR_ERR(const void *ptr)
67989+static inline long __must_check __intentional_overflow(-1) PTR_ERR(const void *ptr)
67990 {
67991 return (long) ptr;
67992 }
67993diff --git a/include/linux/extcon.h b/include/linux/extcon.h
67994index fcb51c8..bdafcf6 100644
67995--- a/include/linux/extcon.h
67996+++ b/include/linux/extcon.h
67997@@ -134,7 +134,7 @@ struct extcon_dev {
67998 /* /sys/class/extcon/.../mutually_exclusive/... */
67999 struct attribute_group attr_g_muex;
68000 struct attribute **attrs_muex;
68001- struct device_attribute *d_attrs_muex;
68002+ device_attribute_no_const *d_attrs_muex;
68003 };
68004
68005 /**
68006diff --git a/include/linux/fb.h b/include/linux/fb.h
68007index c7a9571..02eeffe 100644
68008--- a/include/linux/fb.h
68009+++ b/include/linux/fb.h
68010@@ -302,7 +302,7 @@ struct fb_ops {
68011 /* called at KDB enter and leave time to prepare the console */
68012 int (*fb_debug_enter)(struct fb_info *info);
68013 int (*fb_debug_leave)(struct fb_info *info);
68014-};
68015+} __do_const;
68016
68017 #ifdef CONFIG_FB_TILEBLITTING
68018 #define FB_TILE_CURSOR_NONE 0
68019diff --git a/include/linux/filter.h b/include/linux/filter.h
68020index c45eabc..baa0be5 100644
68021--- a/include/linux/filter.h
68022+++ b/include/linux/filter.h
68023@@ -20,6 +20,7 @@ struct compat_sock_fprog {
68024
68025 struct sk_buff;
68026 struct sock;
68027+struct bpf_jit_work;
68028
68029 struct sk_filter
68030 {
68031@@ -27,6 +28,9 @@ struct sk_filter
68032 unsigned int len; /* Number of filter blocks */
68033 unsigned int (*bpf_func)(const struct sk_buff *skb,
68034 const struct sock_filter *filter);
68035+#ifdef CONFIG_BPF_JIT
68036+ struct bpf_jit_work *work;
68037+#endif
68038 struct rcu_head rcu;
68039 struct sock_filter insns[0];
68040 };
68041diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
68042index 3044254..9767f41 100644
68043--- a/include/linux/frontswap.h
68044+++ b/include/linux/frontswap.h
68045@@ -11,7 +11,7 @@ struct frontswap_ops {
68046 int (*load)(unsigned, pgoff_t, struct page *);
68047 void (*invalidate_page)(unsigned, pgoff_t);
68048 void (*invalidate_area)(unsigned);
68049-};
68050+} __no_const;
68051
68052 extern bool frontswap_enabled;
68053 extern struct frontswap_ops
68054diff --git a/include/linux/fs.h b/include/linux/fs.h
68055index 7617ee0..b575199 100644
68056--- a/include/linux/fs.h
68057+++ b/include/linux/fs.h
68058@@ -1541,7 +1541,8 @@ struct file_operations {
68059 long (*fallocate)(struct file *file, int mode, loff_t offset,
68060 loff_t len);
68061 int (*show_fdinfo)(struct seq_file *m, struct file *f);
68062-};
68063+} __do_const;
68064+typedef struct file_operations __no_const file_operations_no_const;
68065
68066 struct inode_operations {
68067 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
68068@@ -2665,4 +2666,14 @@ static inline void inode_has_no_xattr(struct inode *inode)
68069 inode->i_flags |= S_NOSEC;
68070 }
68071
68072+static inline bool is_sidechannel_device(const struct inode *inode)
68073+{
68074+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
68075+ umode_t mode = inode->i_mode;
68076+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
68077+#else
68078+ return false;
68079+#endif
68080+}
68081+
68082 #endif /* _LINUX_FS_H */
68083diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
68084index 324f931..f292b65 100644
68085--- a/include/linux/fs_struct.h
68086+++ b/include/linux/fs_struct.h
68087@@ -6,7 +6,7 @@
68088 #include <linux/seqlock.h>
68089
68090 struct fs_struct {
68091- int users;
68092+ atomic_t users;
68093 spinlock_t lock;
68094 seqcount_t seq;
68095 int umask;
68096diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
68097index 5dfa0aa..6acf322 100644
68098--- a/include/linux/fscache-cache.h
68099+++ b/include/linux/fscache-cache.h
68100@@ -112,7 +112,7 @@ struct fscache_operation {
68101 fscache_operation_release_t release;
68102 };
68103
68104-extern atomic_t fscache_op_debug_id;
68105+extern atomic_unchecked_t fscache_op_debug_id;
68106 extern void fscache_op_work_func(struct work_struct *work);
68107
68108 extern void fscache_enqueue_operation(struct fscache_operation *);
68109@@ -134,7 +134,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
68110 INIT_WORK(&op->work, fscache_op_work_func);
68111 atomic_set(&op->usage, 1);
68112 op->state = FSCACHE_OP_ST_INITIALISED;
68113- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
68114+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
68115 op->processor = processor;
68116 op->release = release;
68117 INIT_LIST_HEAD(&op->pend_link);
68118diff --git a/include/linux/fscache.h b/include/linux/fscache.h
68119index 7a08623..4c07b0f 100644
68120--- a/include/linux/fscache.h
68121+++ b/include/linux/fscache.h
68122@@ -152,7 +152,7 @@ struct fscache_cookie_def {
68123 * - this is mandatory for any object that may have data
68124 */
68125 void (*now_uncached)(void *cookie_netfs_data);
68126-};
68127+} __do_const;
68128
68129 /*
68130 * fscache cached network filesystem type
68131diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
68132index 0fbfb46..508eb0d 100644
68133--- a/include/linux/fsnotify.h
68134+++ b/include/linux/fsnotify.h
68135@@ -195,6 +195,9 @@ static inline void fsnotify_access(struct file *file)
68136 struct inode *inode = path->dentry->d_inode;
68137 __u32 mask = FS_ACCESS;
68138
68139+ if (is_sidechannel_device(inode))
68140+ return;
68141+
68142 if (S_ISDIR(inode->i_mode))
68143 mask |= FS_ISDIR;
68144
68145@@ -213,6 +216,9 @@ static inline void fsnotify_modify(struct file *file)
68146 struct inode *inode = path->dentry->d_inode;
68147 __u32 mask = FS_MODIFY;
68148
68149+ if (is_sidechannel_device(inode))
68150+ return;
68151+
68152 if (S_ISDIR(inode->i_mode))
68153 mask |= FS_ISDIR;
68154
68155@@ -315,7 +321,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
68156 */
68157 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
68158 {
68159- return kstrdup(name, GFP_KERNEL);
68160+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
68161 }
68162
68163 /*
68164diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
68165index a3d4895..ddd2a50 100644
68166--- a/include/linux/ftrace_event.h
68167+++ b/include/linux/ftrace_event.h
68168@@ -272,7 +272,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
68169 extern int trace_add_event_call(struct ftrace_event_call *call);
68170 extern void trace_remove_event_call(struct ftrace_event_call *call);
68171
68172-#define is_signed_type(type) (((type)(-1)) < 0)
68173+#define is_signed_type(type) (((type)(-1)) < (type)1)
68174
68175 int trace_set_clr_event(const char *system, const char *event, int set);
68176
68177diff --git a/include/linux/genhd.h b/include/linux/genhd.h
68178index 79b8bba..86b539e 100644
68179--- a/include/linux/genhd.h
68180+++ b/include/linux/genhd.h
68181@@ -194,7 +194,7 @@ struct gendisk {
68182 struct kobject *slave_dir;
68183
68184 struct timer_rand_state *random;
68185- atomic_t sync_io; /* RAID */
68186+ atomic_unchecked_t sync_io; /* RAID */
68187 struct disk_events *ev;
68188 #ifdef CONFIG_BLK_DEV_INTEGRITY
68189 struct blk_integrity *integrity;
68190diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
68191index 023bc34..b02b46a 100644
68192--- a/include/linux/genl_magic_func.h
68193+++ b/include/linux/genl_magic_func.h
68194@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
68195 },
68196
68197 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
68198-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
68199+static struct genl_ops ZZZ_genl_ops[] = {
68200 #include GENL_MAGIC_INCLUDE_FILE
68201 };
68202
68203diff --git a/include/linux/gfp.h b/include/linux/gfp.h
68204index 0f615eb..5c3832f 100644
68205--- a/include/linux/gfp.h
68206+++ b/include/linux/gfp.h
68207@@ -35,6 +35,13 @@ struct vm_area_struct;
68208 #define ___GFP_NO_KSWAPD 0x400000u
68209 #define ___GFP_OTHER_NODE 0x800000u
68210 #define ___GFP_WRITE 0x1000000u
68211+
68212+#ifdef CONFIG_PAX_USERCOPY_SLABS
68213+#define ___GFP_USERCOPY 0x2000000u
68214+#else
68215+#define ___GFP_USERCOPY 0
68216+#endif
68217+
68218 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
68219
68220 /*
68221@@ -92,6 +99,7 @@ struct vm_area_struct;
68222 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
68223 #define __GFP_KMEMCG ((__force gfp_t)___GFP_KMEMCG) /* Allocation comes from a memcg-accounted resource */
68224 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
68225+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
68226
68227 /*
68228 * This may seem redundant, but it's a way of annotating false positives vs.
68229@@ -99,7 +107,7 @@ struct vm_area_struct;
68230 */
68231 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
68232
68233-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
68234+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
68235 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
68236
68237 /* This equals 0, but use constants in case they ever change */
68238@@ -153,6 +161,8 @@ struct vm_area_struct;
68239 /* 4GB DMA on some platforms */
68240 #define GFP_DMA32 __GFP_DMA32
68241
68242+#define GFP_USERCOPY __GFP_USERCOPY
68243+
68244 /* Convert GFP flags to their corresponding migrate type */
68245 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
68246 {
68247diff --git a/include/linux/gracl.h b/include/linux/gracl.h
68248new file mode 100644
68249index 0000000..ebe6d72
68250--- /dev/null
68251+++ b/include/linux/gracl.h
68252@@ -0,0 +1,319 @@
68253+#ifndef GR_ACL_H
68254+#define GR_ACL_H
68255+
68256+#include <linux/grdefs.h>
68257+#include <linux/resource.h>
68258+#include <linux/capability.h>
68259+#include <linux/dcache.h>
68260+#include <asm/resource.h>
68261+
68262+/* Major status information */
68263+
68264+#define GR_VERSION "grsecurity 2.9.1"
68265+#define GRSECURITY_VERSION 0x2901
68266+
68267+enum {
68268+ GR_SHUTDOWN = 0,
68269+ GR_ENABLE = 1,
68270+ GR_SPROLE = 2,
68271+ GR_RELOAD = 3,
68272+ GR_SEGVMOD = 4,
68273+ GR_STATUS = 5,
68274+ GR_UNSPROLE = 6,
68275+ GR_PASSSET = 7,
68276+ GR_SPROLEPAM = 8,
68277+};
68278+
68279+/* Password setup definitions
68280+ * kernel/grhash.c */
68281+enum {
68282+ GR_PW_LEN = 128,
68283+ GR_SALT_LEN = 16,
68284+ GR_SHA_LEN = 32,
68285+};
68286+
68287+enum {
68288+ GR_SPROLE_LEN = 64,
68289+};
68290+
68291+enum {
68292+ GR_NO_GLOB = 0,
68293+ GR_REG_GLOB,
68294+ GR_CREATE_GLOB
68295+};
68296+
68297+#define GR_NLIMITS 32
68298+
68299+/* Begin Data Structures */
68300+
68301+struct sprole_pw {
68302+ unsigned char *rolename;
68303+ unsigned char salt[GR_SALT_LEN];
68304+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
68305+};
68306+
68307+struct name_entry {
68308+ __u32 key;
68309+ ino_t inode;
68310+ dev_t device;
68311+ char *name;
68312+ __u16 len;
68313+ __u8 deleted;
68314+ struct name_entry *prev;
68315+ struct name_entry *next;
68316+};
68317+
68318+struct inodev_entry {
68319+ struct name_entry *nentry;
68320+ struct inodev_entry *prev;
68321+ struct inodev_entry *next;
68322+};
68323+
68324+struct acl_role_db {
68325+ struct acl_role_label **r_hash;
68326+ __u32 r_size;
68327+};
68328+
68329+struct inodev_db {
68330+ struct inodev_entry **i_hash;
68331+ __u32 i_size;
68332+};
68333+
68334+struct name_db {
68335+ struct name_entry **n_hash;
68336+ __u32 n_size;
68337+};
68338+
68339+struct crash_uid {
68340+ uid_t uid;
68341+ unsigned long expires;
68342+};
68343+
68344+struct gr_hash_struct {
68345+ void **table;
68346+ void **nametable;
68347+ void *first;
68348+ __u32 table_size;
68349+ __u32 used_size;
68350+ int type;
68351+};
68352+
68353+/* Userspace Grsecurity ACL data structures */
68354+
68355+struct acl_subject_label {
68356+ char *filename;
68357+ ino_t inode;
68358+ dev_t device;
68359+ __u32 mode;
68360+ kernel_cap_t cap_mask;
68361+ kernel_cap_t cap_lower;
68362+ kernel_cap_t cap_invert_audit;
68363+
68364+ struct rlimit res[GR_NLIMITS];
68365+ __u32 resmask;
68366+
68367+ __u8 user_trans_type;
68368+ __u8 group_trans_type;
68369+ uid_t *user_transitions;
68370+ gid_t *group_transitions;
68371+ __u16 user_trans_num;
68372+ __u16 group_trans_num;
68373+
68374+ __u32 sock_families[2];
68375+ __u32 ip_proto[8];
68376+ __u32 ip_type;
68377+ struct acl_ip_label **ips;
68378+ __u32 ip_num;
68379+ __u32 inaddr_any_override;
68380+
68381+ __u32 crashes;
68382+ unsigned long expires;
68383+
68384+ struct acl_subject_label *parent_subject;
68385+ struct gr_hash_struct *hash;
68386+ struct acl_subject_label *prev;
68387+ struct acl_subject_label *next;
68388+
68389+ struct acl_object_label **obj_hash;
68390+ __u32 obj_hash_size;
68391+ __u16 pax_flags;
68392+};
68393+
68394+struct role_allowed_ip {
68395+ __u32 addr;
68396+ __u32 netmask;
68397+
68398+ struct role_allowed_ip *prev;
68399+ struct role_allowed_ip *next;
68400+};
68401+
68402+struct role_transition {
68403+ char *rolename;
68404+
68405+ struct role_transition *prev;
68406+ struct role_transition *next;
68407+};
68408+
68409+struct acl_role_label {
68410+ char *rolename;
68411+ uid_t uidgid;
68412+ __u16 roletype;
68413+
68414+ __u16 auth_attempts;
68415+ unsigned long expires;
68416+
68417+ struct acl_subject_label *root_label;
68418+ struct gr_hash_struct *hash;
68419+
68420+ struct acl_role_label *prev;
68421+ struct acl_role_label *next;
68422+
68423+ struct role_transition *transitions;
68424+ struct role_allowed_ip *allowed_ips;
68425+ uid_t *domain_children;
68426+ __u16 domain_child_num;
68427+
68428+ umode_t umask;
68429+
68430+ struct acl_subject_label **subj_hash;
68431+ __u32 subj_hash_size;
68432+};
68433+
68434+struct user_acl_role_db {
68435+ struct acl_role_label **r_table;
68436+ __u32 num_pointers; /* Number of allocations to track */
68437+ __u32 num_roles; /* Number of roles */
68438+ __u32 num_domain_children; /* Number of domain children */
68439+ __u32 num_subjects; /* Number of subjects */
68440+ __u32 num_objects; /* Number of objects */
68441+};
68442+
68443+struct acl_object_label {
68444+ char *filename;
68445+ ino_t inode;
68446+ dev_t device;
68447+ __u32 mode;
68448+
68449+ struct acl_subject_label *nested;
68450+ struct acl_object_label *globbed;
68451+
68452+ /* next two structures not used */
68453+
68454+ struct acl_object_label *prev;
68455+ struct acl_object_label *next;
68456+};
68457+
68458+struct acl_ip_label {
68459+ char *iface;
68460+ __u32 addr;
68461+ __u32 netmask;
68462+ __u16 low, high;
68463+ __u8 mode;
68464+ __u32 type;
68465+ __u32 proto[8];
68466+
68467+ /* next two structures not used */
68468+
68469+ struct acl_ip_label *prev;
68470+ struct acl_ip_label *next;
68471+};
68472+
68473+struct gr_arg {
68474+ struct user_acl_role_db role_db;
68475+ unsigned char pw[GR_PW_LEN];
68476+ unsigned char salt[GR_SALT_LEN];
68477+ unsigned char sum[GR_SHA_LEN];
68478+ unsigned char sp_role[GR_SPROLE_LEN];
68479+ struct sprole_pw *sprole_pws;
68480+ dev_t segv_device;
68481+ ino_t segv_inode;
68482+ uid_t segv_uid;
68483+ __u16 num_sprole_pws;
68484+ __u16 mode;
68485+};
68486+
68487+struct gr_arg_wrapper {
68488+ struct gr_arg *arg;
68489+ __u32 version;
68490+ __u32 size;
68491+};
68492+
68493+struct subject_map {
68494+ struct acl_subject_label *user;
68495+ struct acl_subject_label *kernel;
68496+ struct subject_map *prev;
68497+ struct subject_map *next;
68498+};
68499+
68500+struct acl_subj_map_db {
68501+ struct subject_map **s_hash;
68502+ __u32 s_size;
68503+};
68504+
68505+/* End Data Structures Section */
68506+
68507+/* Hash functions generated by empirical testing by Brad Spengler
68508+ Makes good use of the low bits of the inode. Generally 0-1 times
68509+ in loop for successful match. 0-3 for unsuccessful match.
68510+ Shift/add algorithm with modulus of table size and an XOR*/
68511+
68512+static __inline__ unsigned int
68513+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
68514+{
68515+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
68516+}
68517+
68518+ static __inline__ unsigned int
68519+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
68520+{
68521+ return ((const unsigned long)userp % sz);
68522+}
68523+
68524+static __inline__ unsigned int
68525+gr_fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
68526+{
68527+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
68528+}
68529+
68530+static __inline__ unsigned int
68531+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
68532+{
68533+ return full_name_hash((const unsigned char *)name, len) % sz;
68534+}
68535+
68536+#define FOR_EACH_ROLE_START(role) \
68537+ role = role_list; \
68538+ while (role) {
68539+
68540+#define FOR_EACH_ROLE_END(role) \
68541+ role = role->prev; \
68542+ }
68543+
68544+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
68545+ subj = NULL; \
68546+ iter = 0; \
68547+ while (iter < role->subj_hash_size) { \
68548+ if (subj == NULL) \
68549+ subj = role->subj_hash[iter]; \
68550+ if (subj == NULL) { \
68551+ iter++; \
68552+ continue; \
68553+ }
68554+
68555+#define FOR_EACH_SUBJECT_END(subj,iter) \
68556+ subj = subj->next; \
68557+ if (subj == NULL) \
68558+ iter++; \
68559+ }
68560+
68561+
68562+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
68563+ subj = role->hash->first; \
68564+ while (subj != NULL) {
68565+
68566+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
68567+ subj = subj->next; \
68568+ }
68569+
68570+#endif
68571+
68572diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
68573new file mode 100644
68574index 0000000..323ecf2
68575--- /dev/null
68576+++ b/include/linux/gralloc.h
68577@@ -0,0 +1,9 @@
68578+#ifndef __GRALLOC_H
68579+#define __GRALLOC_H
68580+
68581+void acl_free_all(void);
68582+int acl_alloc_stack_init(unsigned long size);
68583+void *acl_alloc(unsigned long len);
68584+void *acl_alloc_num(unsigned long num, unsigned long len);
68585+
68586+#endif
68587diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
68588new file mode 100644
68589index 0000000..be66033
68590--- /dev/null
68591+++ b/include/linux/grdefs.h
68592@@ -0,0 +1,140 @@
68593+#ifndef GRDEFS_H
68594+#define GRDEFS_H
68595+
68596+/* Begin grsecurity status declarations */
68597+
68598+enum {
68599+ GR_READY = 0x01,
68600+ GR_STATUS_INIT = 0x00 // disabled state
68601+};
68602+
68603+/* Begin ACL declarations */
68604+
68605+/* Role flags */
68606+
68607+enum {
68608+ GR_ROLE_USER = 0x0001,
68609+ GR_ROLE_GROUP = 0x0002,
68610+ GR_ROLE_DEFAULT = 0x0004,
68611+ GR_ROLE_SPECIAL = 0x0008,
68612+ GR_ROLE_AUTH = 0x0010,
68613+ GR_ROLE_NOPW = 0x0020,
68614+ GR_ROLE_GOD = 0x0040,
68615+ GR_ROLE_LEARN = 0x0080,
68616+ GR_ROLE_TPE = 0x0100,
68617+ GR_ROLE_DOMAIN = 0x0200,
68618+ GR_ROLE_PAM = 0x0400,
68619+ GR_ROLE_PERSIST = 0x0800
68620+};
68621+
68622+/* ACL Subject and Object mode flags */
68623+enum {
68624+ GR_DELETED = 0x80000000
68625+};
68626+
68627+/* ACL Object-only mode flags */
68628+enum {
68629+ GR_READ = 0x00000001,
68630+ GR_APPEND = 0x00000002,
68631+ GR_WRITE = 0x00000004,
68632+ GR_EXEC = 0x00000008,
68633+ GR_FIND = 0x00000010,
68634+ GR_INHERIT = 0x00000020,
68635+ GR_SETID = 0x00000040,
68636+ GR_CREATE = 0x00000080,
68637+ GR_DELETE = 0x00000100,
68638+ GR_LINK = 0x00000200,
68639+ GR_AUDIT_READ = 0x00000400,
68640+ GR_AUDIT_APPEND = 0x00000800,
68641+ GR_AUDIT_WRITE = 0x00001000,
68642+ GR_AUDIT_EXEC = 0x00002000,
68643+ GR_AUDIT_FIND = 0x00004000,
68644+ GR_AUDIT_INHERIT= 0x00008000,
68645+ GR_AUDIT_SETID = 0x00010000,
68646+ GR_AUDIT_CREATE = 0x00020000,
68647+ GR_AUDIT_DELETE = 0x00040000,
68648+ GR_AUDIT_LINK = 0x00080000,
68649+ GR_PTRACERD = 0x00100000,
68650+ GR_NOPTRACE = 0x00200000,
68651+ GR_SUPPRESS = 0x00400000,
68652+ GR_NOLEARN = 0x00800000,
68653+ GR_INIT_TRANSFER= 0x01000000
68654+};
68655+
68656+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
68657+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
68658+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
68659+
68660+/* ACL subject-only mode flags */
68661+enum {
68662+ GR_KILL = 0x00000001,
68663+ GR_VIEW = 0x00000002,
68664+ GR_PROTECTED = 0x00000004,
68665+ GR_LEARN = 0x00000008,
68666+ GR_OVERRIDE = 0x00000010,
68667+ /* just a placeholder, this mode is only used in userspace */
68668+ GR_DUMMY = 0x00000020,
68669+ GR_PROTSHM = 0x00000040,
68670+ GR_KILLPROC = 0x00000080,
68671+ GR_KILLIPPROC = 0x00000100,
68672+ /* just a placeholder, this mode is only used in userspace */
68673+ GR_NOTROJAN = 0x00000200,
68674+ GR_PROTPROCFD = 0x00000400,
68675+ GR_PROCACCT = 0x00000800,
68676+ GR_RELAXPTRACE = 0x00001000,
68677+ //GR_NESTED = 0x00002000,
68678+ GR_INHERITLEARN = 0x00004000,
68679+ GR_PROCFIND = 0x00008000,
68680+ GR_POVERRIDE = 0x00010000,
68681+ GR_KERNELAUTH = 0x00020000,
68682+ GR_ATSECURE = 0x00040000,
68683+ GR_SHMEXEC = 0x00080000
68684+};
68685+
68686+enum {
68687+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
68688+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
68689+ GR_PAX_ENABLE_MPROTECT = 0x0004,
68690+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
68691+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
68692+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
68693+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
68694+ GR_PAX_DISABLE_MPROTECT = 0x0400,
68695+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
68696+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
68697+};
68698+
68699+enum {
68700+ GR_ID_USER = 0x01,
68701+ GR_ID_GROUP = 0x02,
68702+};
68703+
68704+enum {
68705+ GR_ID_ALLOW = 0x01,
68706+ GR_ID_DENY = 0x02,
68707+};
68708+
68709+#define GR_CRASH_RES 31
68710+#define GR_UIDTABLE_MAX 500
68711+
68712+/* begin resource learning section */
68713+enum {
68714+ GR_RLIM_CPU_BUMP = 60,
68715+ GR_RLIM_FSIZE_BUMP = 50000,
68716+ GR_RLIM_DATA_BUMP = 10000,
68717+ GR_RLIM_STACK_BUMP = 1000,
68718+ GR_RLIM_CORE_BUMP = 10000,
68719+ GR_RLIM_RSS_BUMP = 500000,
68720+ GR_RLIM_NPROC_BUMP = 1,
68721+ GR_RLIM_NOFILE_BUMP = 5,
68722+ GR_RLIM_MEMLOCK_BUMP = 50000,
68723+ GR_RLIM_AS_BUMP = 500000,
68724+ GR_RLIM_LOCKS_BUMP = 2,
68725+ GR_RLIM_SIGPENDING_BUMP = 5,
68726+ GR_RLIM_MSGQUEUE_BUMP = 10000,
68727+ GR_RLIM_NICE_BUMP = 1,
68728+ GR_RLIM_RTPRIO_BUMP = 1,
68729+ GR_RLIM_RTTIME_BUMP = 1000000
68730+};
68731+
68732+#endif
68733diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
68734new file mode 100644
68735index 0000000..9bb6662
68736--- /dev/null
68737+++ b/include/linux/grinternal.h
68738@@ -0,0 +1,215 @@
68739+#ifndef __GRINTERNAL_H
68740+#define __GRINTERNAL_H
68741+
68742+#ifdef CONFIG_GRKERNSEC
68743+
68744+#include <linux/fs.h>
68745+#include <linux/mnt_namespace.h>
68746+#include <linux/nsproxy.h>
68747+#include <linux/gracl.h>
68748+#include <linux/grdefs.h>
68749+#include <linux/grmsg.h>
68750+
68751+void gr_add_learn_entry(const char *fmt, ...)
68752+ __attribute__ ((format (printf, 1, 2)));
68753+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
68754+ const struct vfsmount *mnt);
68755+__u32 gr_check_create(const struct dentry *new_dentry,
68756+ const struct dentry *parent,
68757+ const struct vfsmount *mnt, const __u32 mode);
68758+int gr_check_protected_task(const struct task_struct *task);
68759+__u32 to_gr_audit(const __u32 reqmode);
68760+int gr_set_acls(const int type);
68761+int gr_apply_subject_to_task(struct task_struct *task);
68762+int gr_acl_is_enabled(void);
68763+char gr_roletype_to_char(void);
68764+
68765+void gr_handle_alertkill(struct task_struct *task);
68766+char *gr_to_filename(const struct dentry *dentry,
68767+ const struct vfsmount *mnt);
68768+char *gr_to_filename1(const struct dentry *dentry,
68769+ const struct vfsmount *mnt);
68770+char *gr_to_filename2(const struct dentry *dentry,
68771+ const struct vfsmount *mnt);
68772+char *gr_to_filename3(const struct dentry *dentry,
68773+ const struct vfsmount *mnt);
68774+
68775+extern int grsec_enable_ptrace_readexec;
68776+extern int grsec_enable_harden_ptrace;
68777+extern int grsec_enable_link;
68778+extern int grsec_enable_fifo;
68779+extern int grsec_enable_execve;
68780+extern int grsec_enable_shm;
68781+extern int grsec_enable_execlog;
68782+extern int grsec_enable_signal;
68783+extern int grsec_enable_audit_ptrace;
68784+extern int grsec_enable_forkfail;
68785+extern int grsec_enable_time;
68786+extern int grsec_enable_rofs;
68787+extern int grsec_enable_chroot_shmat;
68788+extern int grsec_enable_chroot_mount;
68789+extern int grsec_enable_chroot_double;
68790+extern int grsec_enable_chroot_pivot;
68791+extern int grsec_enable_chroot_chdir;
68792+extern int grsec_enable_chroot_chmod;
68793+extern int grsec_enable_chroot_mknod;
68794+extern int grsec_enable_chroot_fchdir;
68795+extern int grsec_enable_chroot_nice;
68796+extern int grsec_enable_chroot_execlog;
68797+extern int grsec_enable_chroot_caps;
68798+extern int grsec_enable_chroot_sysctl;
68799+extern int grsec_enable_chroot_unix;
68800+extern int grsec_enable_symlinkown;
68801+extern kgid_t grsec_symlinkown_gid;
68802+extern int grsec_enable_tpe;
68803+extern kgid_t grsec_tpe_gid;
68804+extern int grsec_enable_tpe_all;
68805+extern int grsec_enable_tpe_invert;
68806+extern int grsec_enable_socket_all;
68807+extern kgid_t grsec_socket_all_gid;
68808+extern int grsec_enable_socket_client;
68809+extern kgid_t grsec_socket_client_gid;
68810+extern int grsec_enable_socket_server;
68811+extern kgid_t grsec_socket_server_gid;
68812+extern kgid_t grsec_audit_gid;
68813+extern int grsec_enable_group;
68814+extern int grsec_enable_audit_textrel;
68815+extern int grsec_enable_log_rwxmaps;
68816+extern int grsec_enable_mount;
68817+extern int grsec_enable_chdir;
68818+extern int grsec_resource_logging;
68819+extern int grsec_enable_blackhole;
68820+extern int grsec_lastack_retries;
68821+extern int grsec_enable_brute;
68822+extern int grsec_lock;
68823+
68824+extern spinlock_t grsec_alert_lock;
68825+extern unsigned long grsec_alert_wtime;
68826+extern unsigned long grsec_alert_fyet;
68827+
68828+extern spinlock_t grsec_audit_lock;
68829+
68830+extern rwlock_t grsec_exec_file_lock;
68831+
68832+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
68833+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
68834+ (tsk)->exec_file->f_vfsmnt) : "/")
68835+
68836+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
68837+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
68838+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
68839+
68840+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
68841+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
68842+ (tsk)->exec_file->f_vfsmnt) : "/")
68843+
68844+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
68845+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
68846+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
68847+
68848+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
68849+
68850+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
68851+
68852+#define GR_CHROOT_CAPS {{ \
68853+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
68854+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
68855+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
68856+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
68857+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
68858+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
68859+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
68860+
68861+#define security_learn(normal_msg,args...) \
68862+({ \
68863+ read_lock(&grsec_exec_file_lock); \
68864+ gr_add_learn_entry(normal_msg "\n", ## args); \
68865+ read_unlock(&grsec_exec_file_lock); \
68866+})
68867+
68868+enum {
68869+ GR_DO_AUDIT,
68870+ GR_DONT_AUDIT,
68871+ /* used for non-audit messages that we shouldn't kill the task on */
68872+ GR_DONT_AUDIT_GOOD
68873+};
68874+
68875+enum {
68876+ GR_TTYSNIFF,
68877+ GR_RBAC,
68878+ GR_RBAC_STR,
68879+ GR_STR_RBAC,
68880+ GR_RBAC_MODE2,
68881+ GR_RBAC_MODE3,
68882+ GR_FILENAME,
68883+ GR_SYSCTL_HIDDEN,
68884+ GR_NOARGS,
68885+ GR_ONE_INT,
68886+ GR_ONE_INT_TWO_STR,
68887+ GR_ONE_STR,
68888+ GR_STR_INT,
68889+ GR_TWO_STR_INT,
68890+ GR_TWO_INT,
68891+ GR_TWO_U64,
68892+ GR_THREE_INT,
68893+ GR_FIVE_INT_TWO_STR,
68894+ GR_TWO_STR,
68895+ GR_THREE_STR,
68896+ GR_FOUR_STR,
68897+ GR_STR_FILENAME,
68898+ GR_FILENAME_STR,
68899+ GR_FILENAME_TWO_INT,
68900+ GR_FILENAME_TWO_INT_STR,
68901+ GR_TEXTREL,
68902+ GR_PTRACE,
68903+ GR_RESOURCE,
68904+ GR_CAP,
68905+ GR_SIG,
68906+ GR_SIG2,
68907+ GR_CRASH1,
68908+ GR_CRASH2,
68909+ GR_PSACCT,
68910+ GR_RWXMAP
68911+};
68912+
68913+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
68914+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
68915+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
68916+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
68917+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
68918+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
68919+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
68920+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
68921+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
68922+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
68923+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
68924+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
68925+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
68926+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
68927+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
68928+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
68929+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
68930+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
68931+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
68932+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
68933+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
68934+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
68935+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
68936+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
68937+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
68938+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
68939+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
68940+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
68941+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
68942+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
68943+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
68944+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
68945+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
68946+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
68947+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
68948+
68949+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
68950+
68951+#endif
68952+
68953+#endif
68954diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
68955new file mode 100644
68956index 0000000..2bd4c8d
68957--- /dev/null
68958+++ b/include/linux/grmsg.h
68959@@ -0,0 +1,111 @@
68960+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
68961+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
68962+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
68963+#define GR_STOPMOD_MSG "denied modification of module state by "
68964+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
68965+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
68966+#define GR_IOPERM_MSG "denied use of ioperm() by "
68967+#define GR_IOPL_MSG "denied use of iopl() by "
68968+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
68969+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
68970+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
68971+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
68972+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
68973+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
68974+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
68975+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
68976+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
68977+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
68978+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
68979+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
68980+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
68981+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
68982+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
68983+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
68984+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
68985+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
68986+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
68987+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
68988+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
68989+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
68990+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
68991+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
68992+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
68993+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
68994+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
68995+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
68996+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
68997+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
68998+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
68999+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
69000+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
69001+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
69002+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
69003+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
69004+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
69005+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
69006+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
69007+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
69008+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
69009+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
69010+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
69011+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
69012+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
69013+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
69014+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
69015+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
69016+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
69017+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
69018+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
69019+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
69020+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
69021+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
69022+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
69023+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
69024+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
69025+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
69026+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
69027+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
69028+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
69029+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
69030+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
69031+#define GR_FAILFORK_MSG "failed fork with errno %s by "
69032+#define GR_NICE_CHROOT_MSG "denied priority change by "
69033+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
69034+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
69035+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
69036+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
69037+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
69038+#define GR_TIME_MSG "time set by "
69039+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
69040+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
69041+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
69042+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
69043+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
69044+#define GR_BIND_MSG "denied bind() by "
69045+#define GR_CONNECT_MSG "denied connect() by "
69046+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
69047+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
69048+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
69049+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
69050+#define GR_CAP_ACL_MSG "use of %s denied for "
69051+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
69052+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
69053+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
69054+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
69055+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
69056+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
69057+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
69058+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
69059+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
69060+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
69061+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
69062+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
69063+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
69064+#define GR_VM86_MSG "denied use of vm86 by "
69065+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
69066+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
69067+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
69068+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
69069+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
69070+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
69071diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
69072new file mode 100644
69073index 0000000..8da63a4
69074--- /dev/null
69075+++ b/include/linux/grsecurity.h
69076@@ -0,0 +1,242 @@
69077+#ifndef GR_SECURITY_H
69078+#define GR_SECURITY_H
69079+#include <linux/fs.h>
69080+#include <linux/fs_struct.h>
69081+#include <linux/binfmts.h>
69082+#include <linux/gracl.h>
69083+
69084+/* notify of brain-dead configs */
69085+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
69086+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
69087+#endif
69088+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
69089+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
69090+#endif
69091+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
69092+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
69093+#endif
69094+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
69095+#error "CONFIG_PAX enabled, but no PaX options are enabled."
69096+#endif
69097+
69098+void gr_handle_brute_attach(unsigned long mm_flags);
69099+void gr_handle_brute_check(void);
69100+void gr_handle_kernel_exploit(void);
69101+int gr_process_user_ban(void);
69102+
69103+char gr_roletype_to_char(void);
69104+
69105+int gr_acl_enable_at_secure(void);
69106+
69107+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
69108+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
69109+
69110+void gr_del_task_from_ip_table(struct task_struct *p);
69111+
69112+int gr_pid_is_chrooted(struct task_struct *p);
69113+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
69114+int gr_handle_chroot_nice(void);
69115+int gr_handle_chroot_sysctl(const int op);
69116+int gr_handle_chroot_setpriority(struct task_struct *p,
69117+ const int niceval);
69118+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
69119+int gr_handle_chroot_chroot(const struct dentry *dentry,
69120+ const struct vfsmount *mnt);
69121+void gr_handle_chroot_chdir(struct path *path);
69122+int gr_handle_chroot_chmod(const struct dentry *dentry,
69123+ const struct vfsmount *mnt, const int mode);
69124+int gr_handle_chroot_mknod(const struct dentry *dentry,
69125+ const struct vfsmount *mnt, const int mode);
69126+int gr_handle_chroot_mount(const struct dentry *dentry,
69127+ const struct vfsmount *mnt,
69128+ const char *dev_name);
69129+int gr_handle_chroot_pivot(void);
69130+int gr_handle_chroot_unix(const pid_t pid);
69131+
69132+int gr_handle_rawio(const struct inode *inode);
69133+
69134+void gr_handle_ioperm(void);
69135+void gr_handle_iopl(void);
69136+
69137+umode_t gr_acl_umask(void);
69138+
69139+int gr_tpe_allow(const struct file *file);
69140+
69141+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
69142+void gr_clear_chroot_entries(struct task_struct *task);
69143+
69144+void gr_log_forkfail(const int retval);
69145+void gr_log_timechange(void);
69146+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
69147+void gr_log_chdir(const struct dentry *dentry,
69148+ const struct vfsmount *mnt);
69149+void gr_log_chroot_exec(const struct dentry *dentry,
69150+ const struct vfsmount *mnt);
69151+void gr_log_remount(const char *devname, const int retval);
69152+void gr_log_unmount(const char *devname, const int retval);
69153+void gr_log_mount(const char *from, const char *to, const int retval);
69154+void gr_log_textrel(struct vm_area_struct *vma);
69155+void gr_log_rwxmmap(struct file *file);
69156+void gr_log_rwxmprotect(struct file *file);
69157+
69158+int gr_handle_follow_link(const struct inode *parent,
69159+ const struct inode *inode,
69160+ const struct dentry *dentry,
69161+ const struct vfsmount *mnt);
69162+int gr_handle_fifo(const struct dentry *dentry,
69163+ const struct vfsmount *mnt,
69164+ const struct dentry *dir, const int flag,
69165+ const int acc_mode);
69166+int gr_handle_hardlink(const struct dentry *dentry,
69167+ const struct vfsmount *mnt,
69168+ struct inode *inode,
69169+ const int mode, const struct filename *to);
69170+
69171+int gr_is_capable(const int cap);
69172+int gr_is_capable_nolog(const int cap);
69173+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
69174+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
69175+
69176+void gr_copy_label(struct task_struct *tsk);
69177+void gr_handle_crash(struct task_struct *task, const int sig);
69178+int gr_handle_signal(const struct task_struct *p, const int sig);
69179+int gr_check_crash_uid(const kuid_t uid);
69180+int gr_check_protected_task(const struct task_struct *task);
69181+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
69182+int gr_acl_handle_mmap(const struct file *file,
69183+ const unsigned long prot);
69184+int gr_acl_handle_mprotect(const struct file *file,
69185+ const unsigned long prot);
69186+int gr_check_hidden_task(const struct task_struct *tsk);
69187+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
69188+ const struct vfsmount *mnt);
69189+__u32 gr_acl_handle_utime(const struct dentry *dentry,
69190+ const struct vfsmount *mnt);
69191+__u32 gr_acl_handle_access(const struct dentry *dentry,
69192+ const struct vfsmount *mnt, const int fmode);
69193+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
69194+ const struct vfsmount *mnt, umode_t *mode);
69195+__u32 gr_acl_handle_chown(const struct dentry *dentry,
69196+ const struct vfsmount *mnt);
69197+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
69198+ const struct vfsmount *mnt);
69199+int gr_handle_ptrace(struct task_struct *task, const long request);
69200+int gr_handle_proc_ptrace(struct task_struct *task);
69201+__u32 gr_acl_handle_execve(const struct dentry *dentry,
69202+ const struct vfsmount *mnt);
69203+int gr_check_crash_exec(const struct file *filp);
69204+int gr_acl_is_enabled(void);
69205+void gr_set_kernel_label(struct task_struct *task);
69206+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
69207+ const kgid_t gid);
69208+int gr_set_proc_label(const struct dentry *dentry,
69209+ const struct vfsmount *mnt,
69210+ const int unsafe_flags);
69211+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
69212+ const struct vfsmount *mnt);
69213+__u32 gr_acl_handle_open(const struct dentry *dentry,
69214+ const struct vfsmount *mnt, int acc_mode);
69215+__u32 gr_acl_handle_creat(const struct dentry *dentry,
69216+ const struct dentry *p_dentry,
69217+ const struct vfsmount *p_mnt,
69218+ int open_flags, int acc_mode, const int imode);
69219+void gr_handle_create(const struct dentry *dentry,
69220+ const struct vfsmount *mnt);
69221+void gr_handle_proc_create(const struct dentry *dentry,
69222+ const struct inode *inode);
69223+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
69224+ const struct dentry *parent_dentry,
69225+ const struct vfsmount *parent_mnt,
69226+ const int mode);
69227+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
69228+ const struct dentry *parent_dentry,
69229+ const struct vfsmount *parent_mnt);
69230+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
69231+ const struct vfsmount *mnt);
69232+void gr_handle_delete(const ino_t ino, const dev_t dev);
69233+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
69234+ const struct vfsmount *mnt);
69235+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
69236+ const struct dentry *parent_dentry,
69237+ const struct vfsmount *parent_mnt,
69238+ const struct filename *from);
69239+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
69240+ const struct dentry *parent_dentry,
69241+ const struct vfsmount *parent_mnt,
69242+ const struct dentry *old_dentry,
69243+ const struct vfsmount *old_mnt, const struct filename *to);
69244+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
69245+int gr_acl_handle_rename(struct dentry *new_dentry,
69246+ struct dentry *parent_dentry,
69247+ const struct vfsmount *parent_mnt,
69248+ struct dentry *old_dentry,
69249+ struct inode *old_parent_inode,
69250+ struct vfsmount *old_mnt, const struct filename *newname);
69251+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
69252+ struct dentry *old_dentry,
69253+ struct dentry *new_dentry,
69254+ struct vfsmount *mnt, const __u8 replace);
69255+__u32 gr_check_link(const struct dentry *new_dentry,
69256+ const struct dentry *parent_dentry,
69257+ const struct vfsmount *parent_mnt,
69258+ const struct dentry *old_dentry,
69259+ const struct vfsmount *old_mnt);
69260+int gr_acl_handle_filldir(const struct file *file, const char *name,
69261+ const unsigned int namelen, const ino_t ino);
69262+
69263+__u32 gr_acl_handle_unix(const struct dentry *dentry,
69264+ const struct vfsmount *mnt);
69265+void gr_acl_handle_exit(void);
69266+void gr_acl_handle_psacct(struct task_struct *task, const long code);
69267+int gr_acl_handle_procpidmem(const struct task_struct *task);
69268+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
69269+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
69270+void gr_audit_ptrace(struct task_struct *task);
69271+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
69272+void gr_put_exec_file(struct task_struct *task);
69273+
69274+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
69275+
69276+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
69277+extern void gr_learn_resource(const struct task_struct *task, const int res,
69278+ const unsigned long wanted, const int gt);
69279+#else
69280+static inline void gr_learn_resource(const struct task_struct *task, const int res,
69281+ const unsigned long wanted, const int gt)
69282+{
69283+}
69284+#endif
69285+
69286+#ifdef CONFIG_GRKERNSEC_RESLOG
69287+extern void gr_log_resource(const struct task_struct *task, const int res,
69288+ const unsigned long wanted, const int gt);
69289+#else
69290+static inline void gr_log_resource(const struct task_struct *task, const int res,
69291+ const unsigned long wanted, const int gt)
69292+{
69293+}
69294+#endif
69295+
69296+#ifdef CONFIG_GRKERNSEC
69297+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
69298+void gr_handle_vm86(void);
69299+void gr_handle_mem_readwrite(u64 from, u64 to);
69300+
69301+void gr_log_badprocpid(const char *entry);
69302+
69303+extern int grsec_enable_dmesg;
69304+extern int grsec_disable_privio;
69305+
69306+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
69307+extern kgid_t grsec_proc_gid;
69308+#endif
69309+
69310+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
69311+extern int grsec_enable_chroot_findtask;
69312+#endif
69313+#ifdef CONFIG_GRKERNSEC_SETXID
69314+extern int grsec_enable_setxid;
69315+#endif
69316+#endif
69317+
69318+#endif
69319diff --git a/include/linux/grsock.h b/include/linux/grsock.h
69320new file mode 100644
69321index 0000000..e7ffaaf
69322--- /dev/null
69323+++ b/include/linux/grsock.h
69324@@ -0,0 +1,19 @@
69325+#ifndef __GRSOCK_H
69326+#define __GRSOCK_H
69327+
69328+extern void gr_attach_curr_ip(const struct sock *sk);
69329+extern int gr_handle_sock_all(const int family, const int type,
69330+ const int protocol);
69331+extern int gr_handle_sock_server(const struct sockaddr *sck);
69332+extern int gr_handle_sock_server_other(const struct sock *sck);
69333+extern int gr_handle_sock_client(const struct sockaddr *sck);
69334+extern int gr_search_connect(struct socket * sock,
69335+ struct sockaddr_in * addr);
69336+extern int gr_search_bind(struct socket * sock,
69337+ struct sockaddr_in * addr);
69338+extern int gr_search_listen(struct socket * sock);
69339+extern int gr_search_accept(struct socket * sock);
69340+extern int gr_search_socket(const int domain, const int type,
69341+ const int protocol);
69342+
69343+#endif
69344diff --git a/include/linux/highmem.h b/include/linux/highmem.h
69345index ef788b5..ac41b7b 100644
69346--- a/include/linux/highmem.h
69347+++ b/include/linux/highmem.h
69348@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
69349 kunmap_atomic(kaddr);
69350 }
69351
69352+static inline void sanitize_highpage(struct page *page)
69353+{
69354+ void *kaddr;
69355+ unsigned long flags;
69356+
69357+ local_irq_save(flags);
69358+ kaddr = kmap_atomic(page);
69359+ clear_page(kaddr);
69360+ kunmap_atomic(kaddr);
69361+ local_irq_restore(flags);
69362+}
69363+
69364 static inline void zero_user_segments(struct page *page,
69365 unsigned start1, unsigned end1,
69366 unsigned start2, unsigned end2)
69367diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
69368index 1c7b89a..7f52502 100644
69369--- a/include/linux/hwmon-sysfs.h
69370+++ b/include/linux/hwmon-sysfs.h
69371@@ -25,7 +25,8 @@
69372 struct sensor_device_attribute{
69373 struct device_attribute dev_attr;
69374 int index;
69375-};
69376+} __do_const;
69377+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
69378 #define to_sensor_dev_attr(_dev_attr) \
69379 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
69380
69381@@ -41,7 +42,7 @@ struct sensor_device_attribute_2 {
69382 struct device_attribute dev_attr;
69383 u8 index;
69384 u8 nr;
69385-};
69386+} __do_const;
69387 #define to_sensor_dev_attr_2(_dev_attr) \
69388 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
69389
69390diff --git a/include/linux/i2c.h b/include/linux/i2c.h
69391index d0c4db7..61b3577 100644
69392--- a/include/linux/i2c.h
69393+++ b/include/linux/i2c.h
69394@@ -369,6 +369,7 @@ struct i2c_algorithm {
69395 /* To determine what the adapter supports */
69396 u32 (*functionality) (struct i2c_adapter *);
69397 };
69398+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
69399
69400 /*
69401 * i2c_adapter is the structure used to identify a physical i2c bus along
69402diff --git a/include/linux/i2o.h b/include/linux/i2o.h
69403index d23c3c2..eb63c81 100644
69404--- a/include/linux/i2o.h
69405+++ b/include/linux/i2o.h
69406@@ -565,7 +565,7 @@ struct i2o_controller {
69407 struct i2o_device *exec; /* Executive */
69408 #if BITS_PER_LONG == 64
69409 spinlock_t context_list_lock; /* lock for context_list */
69410- atomic_t context_list_counter; /* needed for unique contexts */
69411+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
69412 struct list_head context_list; /* list of context id's
69413 and pointers */
69414 #endif
69415diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
69416index aff7ad8..3942bbd 100644
69417--- a/include/linux/if_pppox.h
69418+++ b/include/linux/if_pppox.h
69419@@ -76,7 +76,7 @@ struct pppox_proto {
69420 int (*ioctl)(struct socket *sock, unsigned int cmd,
69421 unsigned long arg);
69422 struct module *owner;
69423-};
69424+} __do_const;
69425
69426 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
69427 extern void unregister_pppox_proto(int proto_num);
69428diff --git a/include/linux/init.h b/include/linux/init.h
69429index 10ed4f4..8e8490d 100644
69430--- a/include/linux/init.h
69431+++ b/include/linux/init.h
69432@@ -39,9 +39,36 @@
69433 * Also note, that this data cannot be "const".
69434 */
69435
69436+#ifdef MODULE
69437+#define add_init_latent_entropy
69438+#define add_devinit_latent_entropy
69439+#define add_cpuinit_latent_entropy
69440+#define add_meminit_latent_entropy
69441+#else
69442+#define add_init_latent_entropy __latent_entropy
69443+
69444+#ifdef CONFIG_HOTPLUG
69445+#define add_devinit_latent_entropy
69446+#else
69447+#define add_devinit_latent_entropy __latent_entropy
69448+#endif
69449+
69450+#ifdef CONFIG_HOTPLUG_CPU
69451+#define add_cpuinit_latent_entropy
69452+#else
69453+#define add_cpuinit_latent_entropy __latent_entropy
69454+#endif
69455+
69456+#ifdef CONFIG_MEMORY_HOTPLUG
69457+#define add_meminit_latent_entropy
69458+#else
69459+#define add_meminit_latent_entropy __latent_entropy
69460+#endif
69461+#endif
69462+
69463 /* These are for everybody (although not all archs will actually
69464 discard it in modules) */
69465-#define __init __section(.init.text) __cold notrace
69466+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
69467 #define __initdata __section(.init.data)
69468 #define __initconst __constsection(.init.rodata)
69469 #define __exitdata __section(.exit.data)
69470@@ -94,7 +121,7 @@
69471 #define __exit __section(.exit.text) __exitused __cold notrace
69472
69473 /* Used for HOTPLUG_CPU */
69474-#define __cpuinit __section(.cpuinit.text) __cold notrace
69475+#define __cpuinit __section(.cpuinit.text) __cold notrace add_cpuinit_latent_entropy
69476 #define __cpuinitdata __section(.cpuinit.data)
69477 #define __cpuinitconst __constsection(.cpuinit.rodata)
69478 #define __cpuexit __section(.cpuexit.text) __exitused __cold notrace
69479@@ -102,7 +129,7 @@
69480 #define __cpuexitconst __constsection(.cpuexit.rodata)
69481
69482 /* Used for MEMORY_HOTPLUG */
69483-#define __meminit __section(.meminit.text) __cold notrace
69484+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
69485 #define __meminitdata __section(.meminit.data)
69486 #define __meminitconst __constsection(.meminit.rodata)
69487 #define __memexit __section(.memexit.text) __exitused __cold notrace
69488diff --git a/include/linux/init_task.h b/include/linux/init_task.h
69489index 6d087c5..401cab8 100644
69490--- a/include/linux/init_task.h
69491+++ b/include/linux/init_task.h
69492@@ -143,6 +143,12 @@ extern struct task_group root_task_group;
69493
69494 #define INIT_TASK_COMM "swapper"
69495
69496+#ifdef CONFIG_X86
69497+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
69498+#else
69499+#define INIT_TASK_THREAD_INFO
69500+#endif
69501+
69502 /*
69503 * INIT_TASK is used to set up the first task table, touch at
69504 * your own risk!. Base=0, limit=0x1fffff (=2MB)
69505@@ -182,6 +188,7 @@ extern struct task_group root_task_group;
69506 RCU_POINTER_INITIALIZER(cred, &init_cred), \
69507 .comm = INIT_TASK_COMM, \
69508 .thread = INIT_THREAD, \
69509+ INIT_TASK_THREAD_INFO \
69510 .fs = &init_fs, \
69511 .files = &init_files, \
69512 .signal = &init_signals, \
69513diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
69514index 5fa5afe..ac55b25 100644
69515--- a/include/linux/interrupt.h
69516+++ b/include/linux/interrupt.h
69517@@ -430,7 +430,7 @@ enum
69518 /* map softirq index to softirq name. update 'softirq_to_name' in
69519 * kernel/softirq.c when adding a new softirq.
69520 */
69521-extern char *softirq_to_name[NR_SOFTIRQS];
69522+extern const char * const softirq_to_name[NR_SOFTIRQS];
69523
69524 /* softirq mask and active fields moved to irq_cpustat_t in
69525 * asm/hardirq.h to get better cache usage. KAO
69526@@ -438,12 +438,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
69527
69528 struct softirq_action
69529 {
69530- void (*action)(struct softirq_action *);
69531-};
69532+ void (*action)(void);
69533+} __no_const;
69534
69535 asmlinkage void do_softirq(void);
69536 asmlinkage void __do_softirq(void);
69537-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
69538+extern void open_softirq(int nr, void (*action)(void));
69539 extern void softirq_init(void);
69540 extern void __raise_softirq_irqoff(unsigned int nr);
69541
69542diff --git a/include/linux/iommu.h b/include/linux/iommu.h
69543index f3b99e1..9b73cee 100644
69544--- a/include/linux/iommu.h
69545+++ b/include/linux/iommu.h
69546@@ -101,7 +101,7 @@ struct iommu_ops {
69547 int (*domain_set_attr)(struct iommu_domain *domain,
69548 enum iommu_attr attr, void *data);
69549 unsigned long pgsize_bitmap;
69550-};
69551+} __do_const;
69552
69553 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
69554 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
69555diff --git a/include/linux/irq.h b/include/linux/irq.h
69556index fdf2c4a..5332486 100644
69557--- a/include/linux/irq.h
69558+++ b/include/linux/irq.h
69559@@ -328,7 +328,8 @@ struct irq_chip {
69560 void (*irq_print_chip)(struct irq_data *data, struct seq_file *p);
69561
69562 unsigned long flags;
69563-};
69564+} __do_const;
69565+typedef struct irq_chip __no_const irq_chip_no_const;
69566
69567 /*
69568 * irq_chip specific flags
69569diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
69570index 6883e19..06992b1 100644
69571--- a/include/linux/kallsyms.h
69572+++ b/include/linux/kallsyms.h
69573@@ -15,7 +15,8 @@
69574
69575 struct module;
69576
69577-#ifdef CONFIG_KALLSYMS
69578+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
69579+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
69580 /* Lookup the address for a symbol. Returns 0 if not found. */
69581 unsigned long kallsyms_lookup_name(const char *name);
69582
69583@@ -106,6 +107,17 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
69584 /* Stupid that this does nothing, but I didn't create this mess. */
69585 #define __print_symbol(fmt, addr)
69586 #endif /*CONFIG_KALLSYMS*/
69587+#else /* when included by kallsyms.c, vsnprintf.c, or
69588+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
69589+extern void __print_symbol(const char *fmt, unsigned long address);
69590+extern int sprint_backtrace(char *buffer, unsigned long address);
69591+extern int sprint_symbol(char *buffer, unsigned long address);
69592+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
69593+const char *kallsyms_lookup(unsigned long addr,
69594+ unsigned long *symbolsize,
69595+ unsigned long *offset,
69596+ char **modname, char *namebuf);
69597+#endif
69598
69599 /* This macro allows us to keep printk typechecking */
69600 static __printf(1, 2)
69601diff --git a/include/linux/key-type.h b/include/linux/key-type.h
69602index 518a53a..5e28358 100644
69603--- a/include/linux/key-type.h
69604+++ b/include/linux/key-type.h
69605@@ -125,7 +125,7 @@ struct key_type {
69606 /* internal fields */
69607 struct list_head link; /* link in types list */
69608 struct lock_class_key lock_class; /* key->sem lock class */
69609-};
69610+} __do_const;
69611
69612 extern struct key_type key_type_keyring;
69613
69614diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
69615index 4dff0c6..1ca9b72 100644
69616--- a/include/linux/kgdb.h
69617+++ b/include/linux/kgdb.h
69618@@ -53,7 +53,7 @@ extern int kgdb_connected;
69619 extern int kgdb_io_module_registered;
69620
69621 extern atomic_t kgdb_setting_breakpoint;
69622-extern atomic_t kgdb_cpu_doing_single_step;
69623+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
69624
69625 extern struct task_struct *kgdb_usethread;
69626 extern struct task_struct *kgdb_contthread;
69627@@ -255,7 +255,7 @@ struct kgdb_arch {
69628 void (*correct_hw_break)(void);
69629
69630 void (*enable_nmi)(bool on);
69631-};
69632+} __do_const;
69633
69634 /**
69635 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
69636@@ -280,7 +280,7 @@ struct kgdb_io {
69637 void (*pre_exception) (void);
69638 void (*post_exception) (void);
69639 int is_console;
69640-};
69641+} __do_const;
69642
69643 extern struct kgdb_arch arch_kgdb_ops;
69644
69645diff --git a/include/linux/kmod.h b/include/linux/kmod.h
69646index 5398d58..5883a34 100644
69647--- a/include/linux/kmod.h
69648+++ b/include/linux/kmod.h
69649@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
69650 * usually useless though. */
69651 extern __printf(2, 3)
69652 int __request_module(bool wait, const char *name, ...);
69653+extern __printf(3, 4)
69654+int ___request_module(bool wait, char *param_name, const char *name, ...);
69655 #define request_module(mod...) __request_module(true, mod)
69656 #define request_module_nowait(mod...) __request_module(false, mod)
69657 #define try_then_request_module(x, mod...) \
69658diff --git a/include/linux/kobject.h b/include/linux/kobject.h
69659index 939b112..ed6ed51 100644
69660--- a/include/linux/kobject.h
69661+++ b/include/linux/kobject.h
69662@@ -111,7 +111,7 @@ struct kobj_type {
69663 struct attribute **default_attrs;
69664 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
69665 const void *(*namespace)(struct kobject *kobj);
69666-};
69667+} __do_const;
69668
69669 struct kobj_uevent_env {
69670 char *envp[UEVENT_NUM_ENVP];
69671@@ -134,6 +134,7 @@ struct kobj_attribute {
69672 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
69673 const char *buf, size_t count);
69674 };
69675+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
69676
69677 extern const struct sysfs_ops kobj_sysfs_ops;
69678
69679diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
69680index f66b065..c2c29b4 100644
69681--- a/include/linux/kobject_ns.h
69682+++ b/include/linux/kobject_ns.h
69683@@ -43,7 +43,7 @@ struct kobj_ns_type_operations {
69684 const void *(*netlink_ns)(struct sock *sk);
69685 const void *(*initial_ns)(void);
69686 void (*drop_ns)(void *);
69687-};
69688+} __do_const;
69689
69690 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
69691 int kobj_ns_type_registered(enum kobj_ns_type type);
69692diff --git a/include/linux/kref.h b/include/linux/kref.h
69693index 4972e6e..de4d19b 100644
69694--- a/include/linux/kref.h
69695+++ b/include/linux/kref.h
69696@@ -64,7 +64,7 @@ static inline void kref_get(struct kref *kref)
69697 static inline int kref_sub(struct kref *kref, unsigned int count,
69698 void (*release)(struct kref *kref))
69699 {
69700- WARN_ON(release == NULL);
69701+ BUG_ON(release == NULL);
69702
69703 if (atomic_sub_and_test((int) count, &kref->refcount)) {
69704 release(kref);
69705diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
69706index 2c497ab..afe32f5 100644
69707--- a/include/linux/kvm_host.h
69708+++ b/include/linux/kvm_host.h
69709@@ -418,7 +418,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
69710 int __must_check vcpu_load(struct kvm_vcpu *vcpu);
69711 void vcpu_put(struct kvm_vcpu *vcpu);
69712
69713-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
69714+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
69715 struct module *module);
69716 void kvm_exit(void);
69717
69718@@ -574,7 +574,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
69719 struct kvm_guest_debug *dbg);
69720 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
69721
69722-int kvm_arch_init(void *opaque);
69723+int kvm_arch_init(const void *opaque);
69724 void kvm_arch_exit(void);
69725
69726 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
69727diff --git a/include/linux/libata.h b/include/linux/libata.h
69728index 649e5f8..ead5194 100644
69729--- a/include/linux/libata.h
69730+++ b/include/linux/libata.h
69731@@ -915,7 +915,7 @@ struct ata_port_operations {
69732 * fields must be pointers.
69733 */
69734 const struct ata_port_operations *inherits;
69735-};
69736+} __do_const;
69737
69738 struct ata_port_info {
69739 unsigned long flags;
69740diff --git a/include/linux/list.h b/include/linux/list.h
69741index cc6d2aa..c10ee83 100644
69742--- a/include/linux/list.h
69743+++ b/include/linux/list.h
69744@@ -112,6 +112,19 @@ extern void __list_del_entry(struct list_head *entry);
69745 extern void list_del(struct list_head *entry);
69746 #endif
69747
69748+extern void __pax_list_add(struct list_head *new,
69749+ struct list_head *prev,
69750+ struct list_head *next);
69751+static inline void pax_list_add(struct list_head *new, struct list_head *head)
69752+{
69753+ __pax_list_add(new, head, head->next);
69754+}
69755+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
69756+{
69757+ __pax_list_add(new, head->prev, head);
69758+}
69759+extern void pax_list_del(struct list_head *entry);
69760+
69761 /**
69762 * list_replace - replace old entry by new one
69763 * @old : the element to be replaced
69764@@ -145,6 +158,8 @@ static inline void list_del_init(struct list_head *entry)
69765 INIT_LIST_HEAD(entry);
69766 }
69767
69768+extern void pax_list_del_init(struct list_head *entry);
69769+
69770 /**
69771 * list_move - delete from one list and add as another's head
69772 * @list: the entry to move
69773diff --git a/include/linux/math64.h b/include/linux/math64.h
69774index b8ba855..0148090 100644
69775--- a/include/linux/math64.h
69776+++ b/include/linux/math64.h
69777@@ -14,7 +14,7 @@
69778 * This is commonly provided by 32bit archs to provide an optimized 64bit
69779 * divide.
69780 */
69781-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
69782+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
69783 {
69784 *remainder = dividend % divisor;
69785 return dividend / divisor;
69786@@ -50,7 +50,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
69787 #define div64_long(x,y) div_s64((x),(y))
69788
69789 #ifndef div_u64_rem
69790-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
69791+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
69792 {
69793 *remainder = do_div(dividend, divisor);
69794 return dividend;
69795@@ -79,7 +79,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
69796 * divide.
69797 */
69798 #ifndef div_u64
69799-static inline u64 div_u64(u64 dividend, u32 divisor)
69800+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
69801 {
69802 u32 remainder;
69803 return div_u64_rem(dividend, divisor, &remainder);
69804diff --git a/include/linux/mm.h b/include/linux/mm.h
69805index 66e2f7c..b916b9a 100644
69806--- a/include/linux/mm.h
69807+++ b/include/linux/mm.h
69808@@ -101,6 +101,11 @@ extern unsigned int kobjsize(const void *objp);
69809 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
69810 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
69811 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
69812+
69813+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
69814+#define VM_PAGEEXEC 0x02000000 /* vma->vm_page_prot needs special handling */
69815+#endif
69816+
69817 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
69818
69819 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
69820@@ -200,8 +205,8 @@ struct vm_operations_struct {
69821 /* called by access_process_vm when get_user_pages() fails, typically
69822 * for use by special VMAs that can switch between memory and hardware
69823 */
69824- int (*access)(struct vm_area_struct *vma, unsigned long addr,
69825- void *buf, int len, int write);
69826+ ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
69827+ void *buf, size_t len, int write);
69828 #ifdef CONFIG_NUMA
69829 /*
69830 * set_policy() op must add a reference to any non-NULL @new mempolicy
69831@@ -231,6 +236,7 @@ struct vm_operations_struct {
69832 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
69833 unsigned long size, pgoff_t pgoff);
69834 };
69835+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
69836
69837 struct mmu_gather;
69838 struct inode;
69839@@ -995,8 +1001,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
69840 unsigned long *pfn);
69841 int follow_phys(struct vm_area_struct *vma, unsigned long address,
69842 unsigned int flags, unsigned long *prot, resource_size_t *phys);
69843-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
69844- void *buf, int len, int write);
69845+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
69846+ void *buf, size_t len, int write);
69847
69848 static inline void unmap_shared_mapping_range(struct address_space *mapping,
69849 loff_t const holebegin, loff_t const holelen)
69850@@ -1035,10 +1041,10 @@ static inline int fixup_user_fault(struct task_struct *tsk,
69851 }
69852 #endif
69853
69854-extern int make_pages_present(unsigned long addr, unsigned long end);
69855-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
69856-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
69857- void *buf, int len, int write);
69858+extern ssize_t make_pages_present(unsigned long addr, unsigned long end);
69859+extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
69860+extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
69861+ void *buf, size_t len, int write);
69862
69863 int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
69864 unsigned long start, int len, unsigned int foll_flags,
69865@@ -1068,34 +1074,6 @@ int set_page_dirty(struct page *page);
69866 int set_page_dirty_lock(struct page *page);
69867 int clear_page_dirty_for_io(struct page *page);
69868
69869-/* Is the vma a continuation of the stack vma above it? */
69870-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
69871-{
69872- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
69873-}
69874-
69875-static inline int stack_guard_page_start(struct vm_area_struct *vma,
69876- unsigned long addr)
69877-{
69878- return (vma->vm_flags & VM_GROWSDOWN) &&
69879- (vma->vm_start == addr) &&
69880- !vma_growsdown(vma->vm_prev, addr);
69881-}
69882-
69883-/* Is the vma a continuation of the stack vma below it? */
69884-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
69885-{
69886- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
69887-}
69888-
69889-static inline int stack_guard_page_end(struct vm_area_struct *vma,
69890- unsigned long addr)
69891-{
69892- return (vma->vm_flags & VM_GROWSUP) &&
69893- (vma->vm_end == addr) &&
69894- !vma_growsup(vma->vm_next, addr);
69895-}
69896-
69897 extern pid_t
69898 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
69899
69900@@ -1198,6 +1176,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
69901 }
69902 #endif
69903
69904+#ifdef CONFIG_MMU
69905+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
69906+#else
69907+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
69908+{
69909+ return __pgprot(0);
69910+}
69911+#endif
69912+
69913 int vma_wants_writenotify(struct vm_area_struct *vma);
69914
69915 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
69916@@ -1216,8 +1203,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
69917 {
69918 return 0;
69919 }
69920+
69921+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
69922+ unsigned long address)
69923+{
69924+ return 0;
69925+}
69926 #else
69927 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
69928+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
69929 #endif
69930
69931 #ifdef __PAGETABLE_PMD_FOLDED
69932@@ -1226,8 +1220,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
69933 {
69934 return 0;
69935 }
69936+
69937+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
69938+ unsigned long address)
69939+{
69940+ return 0;
69941+}
69942 #else
69943 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
69944+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
69945 #endif
69946
69947 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
69948@@ -1245,11 +1246,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
69949 NULL: pud_offset(pgd, address);
69950 }
69951
69952+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
69953+{
69954+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
69955+ NULL: pud_offset(pgd, address);
69956+}
69957+
69958 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
69959 {
69960 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
69961 NULL: pmd_offset(pud, address);
69962 }
69963+
69964+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
69965+{
69966+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
69967+ NULL: pmd_offset(pud, address);
69968+}
69969 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
69970
69971 #if USE_SPLIT_PTLOCKS
69972@@ -1479,6 +1492,7 @@ extern unsigned long do_mmap_pgoff(struct file *, unsigned long,
69973 unsigned long, unsigned long,
69974 unsigned long, unsigned long);
69975 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
69976+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
69977
69978 /* These take the mm semaphore themselves */
69979 extern unsigned long vm_brk(unsigned long, unsigned long);
69980@@ -1573,6 +1587,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
69981 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
69982 struct vm_area_struct **pprev);
69983
69984+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
69985+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
69986+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
69987+
69988 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
69989 NULL if none. Assume start_addr < end_addr. */
69990 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
69991@@ -1601,15 +1619,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
69992 return vma;
69993 }
69994
69995-#ifdef CONFIG_MMU
69996-pgprot_t vm_get_page_prot(unsigned long vm_flags);
69997-#else
69998-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
69999-{
70000- return __pgprot(0);
70001-}
70002-#endif
70003-
70004 #ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
70005 unsigned long change_prot_numa(struct vm_area_struct *vma,
70006 unsigned long start, unsigned long end);
70007@@ -1649,6 +1658,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
70008 static inline void vm_stat_account(struct mm_struct *mm,
70009 unsigned long flags, struct file *file, long pages)
70010 {
70011+
70012+#ifdef CONFIG_PAX_RANDMMAP
70013+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
70014+#endif
70015+
70016 mm->total_vm += pages;
70017 }
70018 #endif /* CONFIG_PROC_FS */
70019@@ -1721,7 +1735,7 @@ extern int unpoison_memory(unsigned long pfn);
70020 extern int sysctl_memory_failure_early_kill;
70021 extern int sysctl_memory_failure_recovery;
70022 extern void shake_page(struct page *p, int access);
70023-extern atomic_long_t mce_bad_pages;
70024+extern atomic_long_unchecked_t mce_bad_pages;
70025 extern int soft_offline_page(struct page *page, int flags);
70026
70027 extern void dump_page(struct page *page);
70028@@ -1752,5 +1766,11 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
70029 static inline bool page_is_guard(struct page *page) { return false; }
70030 #endif /* CONFIG_DEBUG_PAGEALLOC */
70031
70032+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
70033+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
70034+#else
70035+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
70036+#endif
70037+
70038 #endif /* __KERNEL__ */
70039 #endif /* _LINUX_MM_H */
70040diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
70041index f8f5162..3aaf20f 100644
70042--- a/include/linux/mm_types.h
70043+++ b/include/linux/mm_types.h
70044@@ -288,6 +288,8 @@ struct vm_area_struct {
70045 #ifdef CONFIG_NUMA
70046 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
70047 #endif
70048+
70049+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
70050 };
70051
70052 struct core_thread {
70053@@ -436,6 +438,24 @@ struct mm_struct {
70054 int first_nid;
70055 #endif
70056 struct uprobes_state uprobes_state;
70057+
70058+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
70059+ unsigned long pax_flags;
70060+#endif
70061+
70062+#ifdef CONFIG_PAX_DLRESOLVE
70063+ unsigned long call_dl_resolve;
70064+#endif
70065+
70066+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
70067+ unsigned long call_syscall;
70068+#endif
70069+
70070+#ifdef CONFIG_PAX_ASLR
70071+ unsigned long delta_mmap; /* randomized offset */
70072+ unsigned long delta_stack; /* randomized offset */
70073+#endif
70074+
70075 };
70076
70077 /* first nid will either be a valid NID or one of these values */
70078diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
70079index c5d5278..f0b68c8 100644
70080--- a/include/linux/mmiotrace.h
70081+++ b/include/linux/mmiotrace.h
70082@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
70083 /* Called from ioremap.c */
70084 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
70085 void __iomem *addr);
70086-extern void mmiotrace_iounmap(volatile void __iomem *addr);
70087+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
70088
70089 /* For anyone to insert markers. Remember trailing newline. */
70090 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
70091@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
70092 {
70093 }
70094
70095-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
70096+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
70097 {
70098 }
70099
70100diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
70101index 73b64a3..6562925 100644
70102--- a/include/linux/mmzone.h
70103+++ b/include/linux/mmzone.h
70104@@ -412,7 +412,7 @@ struct zone {
70105 unsigned long flags; /* zone flags, see below */
70106
70107 /* Zone statistics */
70108- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70109+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70110
70111 /*
70112 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
70113diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
70114index fed3def..c933f99 100644
70115--- a/include/linux/mod_devicetable.h
70116+++ b/include/linux/mod_devicetable.h
70117@@ -12,7 +12,7 @@
70118 typedef unsigned long kernel_ulong_t;
70119 #endif
70120
70121-#define PCI_ANY_ID (~0)
70122+#define PCI_ANY_ID ((__u16)~0)
70123
70124 struct pci_device_id {
70125 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
70126@@ -139,7 +139,7 @@ struct usb_device_id {
70127 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
70128 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
70129
70130-#define HID_ANY_ID (~0)
70131+#define HID_ANY_ID (~0U)
70132 #define HID_BUS_ANY 0xffff
70133 #define HID_GROUP_ANY 0x0000
70134
70135@@ -498,7 +498,7 @@ struct dmi_system_id {
70136 const char *ident;
70137 struct dmi_strmatch matches[4];
70138 void *driver_data;
70139-};
70140+} __do_const;
70141 /*
70142 * struct dmi_device_id appears during expansion of
70143 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
70144diff --git a/include/linux/module.h b/include/linux/module.h
70145index 1375ee3..ced8177 100644
70146--- a/include/linux/module.h
70147+++ b/include/linux/module.h
70148@@ -17,9 +17,11 @@
70149 #include <linux/moduleparam.h>
70150 #include <linux/tracepoint.h>
70151 #include <linux/export.h>
70152+#include <linux/fs.h>
70153
70154 #include <linux/percpu.h>
70155 #include <asm/module.h>
70156+#include <asm/pgtable.h>
70157
70158 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
70159 #define MODULE_SIG_STRING "~Module signature appended~\n"
70160@@ -54,12 +56,13 @@ struct module_attribute {
70161 int (*test)(struct module *);
70162 void (*free)(struct module *);
70163 };
70164+typedef struct module_attribute __no_const module_attribute_no_const;
70165
70166 struct module_version_attribute {
70167 struct module_attribute mattr;
70168 const char *module_name;
70169 const char *version;
70170-} __attribute__ ((__aligned__(sizeof(void *))));
70171+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
70172
70173 extern ssize_t __modver_version_show(struct module_attribute *,
70174 struct module_kobject *, char *);
70175@@ -232,7 +235,7 @@ struct module
70176
70177 /* Sysfs stuff. */
70178 struct module_kobject mkobj;
70179- struct module_attribute *modinfo_attrs;
70180+ module_attribute_no_const *modinfo_attrs;
70181 const char *version;
70182 const char *srcversion;
70183 struct kobject *holders_dir;
70184@@ -281,19 +284,16 @@ struct module
70185 int (*init)(void);
70186
70187 /* If this is non-NULL, vfree after init() returns */
70188- void *module_init;
70189+ void *module_init_rx, *module_init_rw;
70190
70191 /* Here is the actual code + data, vfree'd on unload. */
70192- void *module_core;
70193+ void *module_core_rx, *module_core_rw;
70194
70195 /* Here are the sizes of the init and core sections */
70196- unsigned int init_size, core_size;
70197+ unsigned int init_size_rw, core_size_rw;
70198
70199 /* The size of the executable code in each section. */
70200- unsigned int init_text_size, core_text_size;
70201-
70202- /* Size of RO sections of the module (text+rodata) */
70203- unsigned int init_ro_size, core_ro_size;
70204+ unsigned int init_size_rx, core_size_rx;
70205
70206 /* Arch-specific module values */
70207 struct mod_arch_specific arch;
70208@@ -349,6 +349,10 @@ struct module
70209 #ifdef CONFIG_EVENT_TRACING
70210 struct ftrace_event_call **trace_events;
70211 unsigned int num_trace_events;
70212+ struct file_operations trace_id;
70213+ struct file_operations trace_enable;
70214+ struct file_operations trace_format;
70215+ struct file_operations trace_filter;
70216 #endif
70217 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
70218 unsigned int num_ftrace_callsites;
70219@@ -396,16 +400,46 @@ bool is_module_address(unsigned long addr);
70220 bool is_module_percpu_address(unsigned long addr);
70221 bool is_module_text_address(unsigned long addr);
70222
70223+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
70224+{
70225+
70226+#ifdef CONFIG_PAX_KERNEXEC
70227+ if (ktla_ktva(addr) >= (unsigned long)start &&
70228+ ktla_ktva(addr) < (unsigned long)start + size)
70229+ return 1;
70230+#endif
70231+
70232+ return ((void *)addr >= start && (void *)addr < start + size);
70233+}
70234+
70235+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
70236+{
70237+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
70238+}
70239+
70240+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
70241+{
70242+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
70243+}
70244+
70245+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
70246+{
70247+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
70248+}
70249+
70250+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
70251+{
70252+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
70253+}
70254+
70255 static inline int within_module_core(unsigned long addr, struct module *mod)
70256 {
70257- return (unsigned long)mod->module_core <= addr &&
70258- addr < (unsigned long)mod->module_core + mod->core_size;
70259+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
70260 }
70261
70262 static inline int within_module_init(unsigned long addr, struct module *mod)
70263 {
70264- return (unsigned long)mod->module_init <= addr &&
70265- addr < (unsigned long)mod->module_init + mod->init_size;
70266+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
70267 }
70268
70269 /* Search for module by name: must hold module_mutex. */
70270diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
70271index 560ca53..ef621ef 100644
70272--- a/include/linux/moduleloader.h
70273+++ b/include/linux/moduleloader.h
70274@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
70275 sections. Returns NULL on failure. */
70276 void *module_alloc(unsigned long size);
70277
70278+#ifdef CONFIG_PAX_KERNEXEC
70279+void *module_alloc_exec(unsigned long size);
70280+#else
70281+#define module_alloc_exec(x) module_alloc(x)
70282+#endif
70283+
70284 /* Free memory returned from module_alloc. */
70285 void module_free(struct module *mod, void *module_region);
70286
70287+#ifdef CONFIG_PAX_KERNEXEC
70288+void module_free_exec(struct module *mod, void *module_region);
70289+#else
70290+#define module_free_exec(x, y) module_free((x), (y))
70291+#endif
70292+
70293 /*
70294 * Apply the given relocation to the (simplified) ELF. Return -error
70295 * or 0.
70296@@ -45,7 +57,9 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
70297 unsigned int relsec,
70298 struct module *me)
70299 {
70300+#ifdef CONFIG_MODULES
70301 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
70302+#endif
70303 return -ENOEXEC;
70304 }
70305 #endif
70306@@ -67,7 +81,9 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
70307 unsigned int relsec,
70308 struct module *me)
70309 {
70310+#ifdef CONFIG_MODULES
70311 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
70312+#endif
70313 return -ENOEXEC;
70314 }
70315 #endif
70316diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
70317index 137b419..fe663ec 100644
70318--- a/include/linux/moduleparam.h
70319+++ b/include/linux/moduleparam.h
70320@@ -284,7 +284,7 @@ static inline void __kernel_param_unlock(void)
70321 * @len is usually just sizeof(string).
70322 */
70323 #define module_param_string(name, string, len, perm) \
70324- static const struct kparam_string __param_string_##name \
70325+ static const struct kparam_string __param_string_##name __used \
70326 = { len, string }; \
70327 __module_param_call(MODULE_PARAM_PREFIX, name, \
70328 &param_ops_string, \
70329@@ -423,7 +423,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
70330 */
70331 #define module_param_array_named(name, array, type, nump, perm) \
70332 param_check_##type(name, &(array)[0]); \
70333- static const struct kparam_array __param_arr_##name \
70334+ static const struct kparam_array __param_arr_##name __used \
70335 = { .max = ARRAY_SIZE(array), .num = nump, \
70336 .ops = &param_ops_##type, \
70337 .elemsize = sizeof(array[0]), .elem = array }; \
70338diff --git a/include/linux/namei.h b/include/linux/namei.h
70339index 5a5ff57..5ae5070 100644
70340--- a/include/linux/namei.h
70341+++ b/include/linux/namei.h
70342@@ -19,7 +19,7 @@ struct nameidata {
70343 unsigned seq;
70344 int last_type;
70345 unsigned depth;
70346- char *saved_names[MAX_NESTED_LINKS + 1];
70347+ const char *saved_names[MAX_NESTED_LINKS + 1];
70348 };
70349
70350 /*
70351@@ -84,12 +84,12 @@ extern void unlock_rename(struct dentry *, struct dentry *);
70352
70353 extern void nd_jump_link(struct nameidata *nd, struct path *path);
70354
70355-static inline void nd_set_link(struct nameidata *nd, char *path)
70356+static inline void nd_set_link(struct nameidata *nd, const char *path)
70357 {
70358 nd->saved_names[nd->depth] = path;
70359 }
70360
70361-static inline char *nd_get_link(struct nameidata *nd)
70362+static inline const char *nd_get_link(const struct nameidata *nd)
70363 {
70364 return nd->saved_names[nd->depth];
70365 }
70366diff --git a/include/linux/net.h b/include/linux/net.h
70367index aa16731..514b875 100644
70368--- a/include/linux/net.h
70369+++ b/include/linux/net.h
70370@@ -183,7 +183,7 @@ struct net_proto_family {
70371 int (*create)(struct net *net, struct socket *sock,
70372 int protocol, int kern);
70373 struct module *owner;
70374-};
70375+} __do_const;
70376
70377 struct iovec;
70378 struct kvec;
70379diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
70380index 9ef07d0..130a5d9 100644
70381--- a/include/linux/netdevice.h
70382+++ b/include/linux/netdevice.h
70383@@ -1012,6 +1012,7 @@ struct net_device_ops {
70384 u32 pid, u32 seq,
70385 struct net_device *dev);
70386 };
70387+typedef struct net_device_ops __no_const net_device_ops_no_const;
70388
70389 /*
70390 * The DEVICE structure.
70391@@ -1078,7 +1079,7 @@ struct net_device {
70392 int iflink;
70393
70394 struct net_device_stats stats;
70395- atomic_long_t rx_dropped; /* dropped packets by core network
70396+ atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
70397 * Do not use this in drivers.
70398 */
70399
70400diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
70401index ee14284..bc65d63 100644
70402--- a/include/linux/netfilter.h
70403+++ b/include/linux/netfilter.h
70404@@ -82,7 +82,7 @@ struct nf_sockopt_ops {
70405 #endif
70406 /* Use the module struct to lock set/get code in place */
70407 struct module *owner;
70408-};
70409+} __do_const;
70410
70411 /* Function to register/unregister hook points. */
70412 int nf_register_hook(struct nf_hook_ops *reg);
70413diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
70414index 7958e84..ed74d7a 100644
70415--- a/include/linux/netfilter/ipset/ip_set.h
70416+++ b/include/linux/netfilter/ipset/ip_set.h
70417@@ -98,7 +98,7 @@ struct ip_set_type_variant {
70418 /* Return true if "b" set is the same as "a"
70419 * according to the create set parameters */
70420 bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
70421-};
70422+} __do_const;
70423
70424 /* The core set type structure */
70425 struct ip_set_type {
70426diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
70427index 4966dde..7d8ce06 100644
70428--- a/include/linux/netfilter/nfnetlink.h
70429+++ b/include/linux/netfilter/nfnetlink.h
70430@@ -16,7 +16,7 @@ struct nfnl_callback {
70431 const struct nlattr * const cda[]);
70432 const struct nla_policy *policy; /* netlink attribute policy */
70433 const u_int16_t attr_count; /* number of nlattr's */
70434-};
70435+} __do_const;
70436
70437 struct nfnetlink_subsystem {
70438 const char *name;
70439diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
70440new file mode 100644
70441index 0000000..33f4af8
70442--- /dev/null
70443+++ b/include/linux/netfilter/xt_gradm.h
70444@@ -0,0 +1,9 @@
70445+#ifndef _LINUX_NETFILTER_XT_GRADM_H
70446+#define _LINUX_NETFILTER_XT_GRADM_H 1
70447+
70448+struct xt_gradm_mtinfo {
70449+ __u16 flags;
70450+ __u16 invflags;
70451+};
70452+
70453+#endif
70454diff --git a/include/linux/nls.h b/include/linux/nls.h
70455index 5dc635f..35f5e11 100644
70456--- a/include/linux/nls.h
70457+++ b/include/linux/nls.h
70458@@ -31,7 +31,7 @@ struct nls_table {
70459 const unsigned char *charset2upper;
70460 struct module *owner;
70461 struct nls_table *next;
70462-};
70463+} __do_const;
70464
70465 /* this value hold the maximum octet of charset */
70466 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
70467diff --git a/include/linux/notifier.h b/include/linux/notifier.h
70468index d65746e..62e72c2 100644
70469--- a/include/linux/notifier.h
70470+++ b/include/linux/notifier.h
70471@@ -51,7 +51,8 @@ struct notifier_block {
70472 int (*notifier_call)(struct notifier_block *, unsigned long, void *);
70473 struct notifier_block __rcu *next;
70474 int priority;
70475-};
70476+} __do_const;
70477+typedef struct notifier_block __no_const notifier_block_no_const;
70478
70479 struct atomic_notifier_head {
70480 spinlock_t lock;
70481diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
70482index a4c5624..79d6d88 100644
70483--- a/include/linux/oprofile.h
70484+++ b/include/linux/oprofile.h
70485@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
70486 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
70487 char const * name, ulong * val);
70488
70489-/** Create a file for read-only access to an atomic_t. */
70490+/** Create a file for read-only access to an atomic_unchecked_t. */
70491 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
70492- char const * name, atomic_t * val);
70493+ char const * name, atomic_unchecked_t * val);
70494
70495 /** create a directory */
70496 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
70497diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
70498index 45fc162..01a4068 100644
70499--- a/include/linux/pci_hotplug.h
70500+++ b/include/linux/pci_hotplug.h
70501@@ -80,7 +80,8 @@ struct hotplug_slot_ops {
70502 int (*get_attention_status) (struct hotplug_slot *slot, u8 *value);
70503 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
70504 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
70505-};
70506+} __do_const;
70507+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
70508
70509 /**
70510 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
70511diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
70512index a280650..2b67b91 100644
70513--- a/include/linux/perf_event.h
70514+++ b/include/linux/perf_event.h
70515@@ -328,8 +328,8 @@ struct perf_event {
70516
70517 enum perf_event_active_state state;
70518 unsigned int attach_state;
70519- local64_t count;
70520- atomic64_t child_count;
70521+ local64_t count; /* PaX: fix it one day */
70522+ atomic64_unchecked_t child_count;
70523
70524 /*
70525 * These are the total time in nanoseconds that the event
70526@@ -380,8 +380,8 @@ struct perf_event {
70527 * These accumulate total time (in nanoseconds) that children
70528 * events have been enabled and running, respectively.
70529 */
70530- atomic64_t child_total_time_enabled;
70531- atomic64_t child_total_time_running;
70532+ atomic64_unchecked_t child_total_time_enabled;
70533+ atomic64_unchecked_t child_total_time_running;
70534
70535 /*
70536 * Protect attach/detach and child_list:
70537@@ -807,7 +807,7 @@ static inline void perf_restore_debug_store(void) { }
70538 */
70539 #define perf_cpu_notifier(fn) \
70540 do { \
70541- static struct notifier_block fn##_nb __cpuinitdata = \
70542+ static struct notifier_block fn##_nb = \
70543 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
70544 unsigned long cpu = smp_processor_id(); \
70545 unsigned long flags; \
70546diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
70547index ad1a427..6419649 100644
70548--- a/include/linux/pipe_fs_i.h
70549+++ b/include/linux/pipe_fs_i.h
70550@@ -45,9 +45,9 @@ struct pipe_buffer {
70551 struct pipe_inode_info {
70552 wait_queue_head_t wait;
70553 unsigned int nrbufs, curbuf, buffers;
70554- unsigned int readers;
70555- unsigned int writers;
70556- unsigned int waiting_writers;
70557+ atomic_t readers;
70558+ atomic_t writers;
70559+ atomic_t waiting_writers;
70560 unsigned int r_counter;
70561 unsigned int w_counter;
70562 struct page *tmp_page;
70563diff --git a/include/linux/platform_data/usb-ehci-s5p.h b/include/linux/platform_data/usb-ehci-s5p.h
70564index 5f28cae..3d23723 100644
70565--- a/include/linux/platform_data/usb-ehci-s5p.h
70566+++ b/include/linux/platform_data/usb-ehci-s5p.h
70567@@ -14,7 +14,7 @@
70568 struct s5p_ehci_platdata {
70569 int (*phy_init)(struct platform_device *pdev, int type);
70570 int (*phy_exit)(struct platform_device *pdev, int type);
70571-};
70572+} __no_const;
70573
70574 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
70575
70576diff --git a/include/linux/platform_data/usb-exynos.h b/include/linux/platform_data/usb-exynos.h
70577index c256c59..8ea94c7 100644
70578--- a/include/linux/platform_data/usb-exynos.h
70579+++ b/include/linux/platform_data/usb-exynos.h
70580@@ -14,7 +14,7 @@
70581 struct exynos4_ohci_platdata {
70582 int (*phy_init)(struct platform_device *pdev, int type);
70583 int (*phy_exit)(struct platform_device *pdev, int type);
70584-};
70585+} __no_const;
70586
70587 extern void exynos4_ohci_set_platdata(struct exynos4_ohci_platdata *pd);
70588
70589diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
70590index 7c1d252..c5c773e 100644
70591--- a/include/linux/pm_domain.h
70592+++ b/include/linux/pm_domain.h
70593@@ -48,7 +48,7 @@ struct gpd_dev_ops {
70594
70595 struct gpd_cpu_data {
70596 unsigned int saved_exit_latency;
70597- struct cpuidle_state *idle_state;
70598+ cpuidle_state_no_const *idle_state;
70599 };
70600
70601 struct generic_pm_domain {
70602diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
70603index f271860..6b3bec5 100644
70604--- a/include/linux/pm_runtime.h
70605+++ b/include/linux/pm_runtime.h
70606@@ -97,7 +97,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
70607
70608 static inline void pm_runtime_mark_last_busy(struct device *dev)
70609 {
70610- ACCESS_ONCE(dev->power.last_busy) = jiffies;
70611+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
70612 }
70613
70614 #else /* !CONFIG_PM_RUNTIME */
70615diff --git a/include/linux/pnp.h b/include/linux/pnp.h
70616index 195aafc..49a7bc2 100644
70617--- a/include/linux/pnp.h
70618+++ b/include/linux/pnp.h
70619@@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
70620 struct pnp_fixup {
70621 char id[7];
70622 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
70623-};
70624+} __do_const;
70625
70626 /* config parameters */
70627 #define PNP_CONFIG_NORMAL 0x0001
70628diff --git a/include/linux/poison.h b/include/linux/poison.h
70629index 2110a81..13a11bb 100644
70630--- a/include/linux/poison.h
70631+++ b/include/linux/poison.h
70632@@ -19,8 +19,8 @@
70633 * under normal circumstances, used to verify that nobody uses
70634 * non-initialized list entries.
70635 */
70636-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
70637-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
70638+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
70639+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
70640
70641 /********** include/linux/timer.h **********/
70642 /*
70643diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
70644index c0f44c2..1572583 100644
70645--- a/include/linux/power/smartreflex.h
70646+++ b/include/linux/power/smartreflex.h
70647@@ -238,7 +238,7 @@ struct omap_sr_class_data {
70648 int (*notify)(struct omap_sr *sr, u32 status);
70649 u8 notify_flags;
70650 u8 class_type;
70651-};
70652+} __do_const;
70653
70654 /**
70655 * struct omap_sr_nvalue_table - Smartreflex n-target value info
70656diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
70657index 4ea1d37..80f4b33 100644
70658--- a/include/linux/ppp-comp.h
70659+++ b/include/linux/ppp-comp.h
70660@@ -84,7 +84,7 @@ struct compressor {
70661 struct module *owner;
70662 /* Extra skb space needed by the compressor algorithm */
70663 unsigned int comp_extra;
70664-};
70665+} __do_const;
70666
70667 /*
70668 * The return value from decompress routine is the length of the
70669diff --git a/include/linux/printk.h b/include/linux/printk.h
70670index 9afc01e..92c32e8 100644
70671--- a/include/linux/printk.h
70672+++ b/include/linux/printk.h
70673@@ -101,6 +101,8 @@ void early_printk(const char *fmt, ...);
70674 extern int printk_needs_cpu(int cpu);
70675 extern void printk_tick(void);
70676
70677+extern int kptr_restrict;
70678+
70679 #ifdef CONFIG_PRINTK
70680 asmlinkage __printf(5, 0)
70681 int vprintk_emit(int facility, int level,
70682@@ -135,7 +137,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
70683
70684 extern int printk_delay_msec;
70685 extern int dmesg_restrict;
70686-extern int kptr_restrict;
70687
70688 void log_buf_kexec_setup(void);
70689 void __init setup_log_buf(int early);
70690diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
70691index 32676b3..e46f2c0 100644
70692--- a/include/linux/proc_fs.h
70693+++ b/include/linux/proc_fs.h
70694@@ -159,6 +159,18 @@ static inline struct proc_dir_entry *proc_create(const char *name, umode_t mode,
70695 return proc_create_data(name, mode, parent, proc_fops, NULL);
70696 }
70697
70698+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
70699+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
70700+{
70701+#ifdef CONFIG_GRKERNSEC_PROC_USER
70702+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
70703+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
70704+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
70705+#else
70706+ return proc_create_data(name, mode, parent, proc_fops, NULL);
70707+#endif
70708+}
70709+
70710 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
70711 umode_t mode, struct proc_dir_entry *base,
70712 read_proc_t *read_proc, void * data)
70713@@ -268,7 +280,7 @@ struct proc_ns_operations {
70714 void (*put)(void *ns);
70715 int (*install)(struct nsproxy *nsproxy, void *ns);
70716 unsigned int (*inum)(void *ns);
70717-};
70718+} __do_const;
70719 extern const struct proc_ns_operations netns_operations;
70720 extern const struct proc_ns_operations utsns_operations;
70721 extern const struct proc_ns_operations ipcns_operations;
70722diff --git a/include/linux/random.h b/include/linux/random.h
70723index d984608..d6f0042 100644
70724--- a/include/linux/random.h
70725+++ b/include/linux/random.h
70726@@ -39,6 +39,11 @@ void prandom_seed(u32 seed);
70727 u32 prandom_u32_state(struct rnd_state *);
70728 void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
70729
70730+static inline unsigned long pax_get_random_long(void)
70731+{
70732+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
70733+}
70734+
70735 /*
70736 * Handle minimum values for seeds
70737 */
70738diff --git a/include/linux/rculist.h b/include/linux/rculist.h
70739index c92dd28..08f4eab 100644
70740--- a/include/linux/rculist.h
70741+++ b/include/linux/rculist.h
70742@@ -44,6 +44,9 @@ extern void __list_add_rcu(struct list_head *new,
70743 struct list_head *prev, struct list_head *next);
70744 #endif
70745
70746+extern void __pax_list_add_rcu(struct list_head *new,
70747+ struct list_head *prev, struct list_head *next);
70748+
70749 /**
70750 * list_add_rcu - add a new entry to rcu-protected list
70751 * @new: new entry to be added
70752@@ -65,6 +68,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
70753 __list_add_rcu(new, head, head->next);
70754 }
70755
70756+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
70757+{
70758+ __pax_list_add_rcu(new, head, head->next);
70759+}
70760+
70761 /**
70762 * list_add_tail_rcu - add a new entry to rcu-protected list
70763 * @new: new entry to be added
70764@@ -87,6 +95,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
70765 __list_add_rcu(new, head->prev, head);
70766 }
70767
70768+static inline void pax_list_add_tail_rcu(struct list_head *new,
70769+ struct list_head *head)
70770+{
70771+ __pax_list_add_rcu(new, head->prev, head);
70772+}
70773+
70774 /**
70775 * list_del_rcu - deletes entry from list without re-initialization
70776 * @entry: the element to delete from the list.
70777@@ -117,6 +131,8 @@ static inline void list_del_rcu(struct list_head *entry)
70778 entry->prev = LIST_POISON2;
70779 }
70780
70781+extern void pax_list_del_rcu(struct list_head *entry);
70782+
70783 /**
70784 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
70785 * @n: the element to delete from the hash list.
70786diff --git a/include/linux/reboot.h b/include/linux/reboot.h
70787index 23b3630..e1bc12b 100644
70788--- a/include/linux/reboot.h
70789+++ b/include/linux/reboot.h
70790@@ -18,9 +18,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
70791 * Architecture-specific implementations of sys_reboot commands.
70792 */
70793
70794-extern void machine_restart(char *cmd);
70795-extern void machine_halt(void);
70796-extern void machine_power_off(void);
70797+extern void machine_restart(char *cmd) __noreturn;
70798+extern void machine_halt(void) __noreturn;
70799+extern void machine_power_off(void) __noreturn;
70800
70801 extern void machine_shutdown(void);
70802 struct pt_regs;
70803@@ -31,9 +31,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
70804 */
70805
70806 extern void kernel_restart_prepare(char *cmd);
70807-extern void kernel_restart(char *cmd);
70808-extern void kernel_halt(void);
70809-extern void kernel_power_off(void);
70810+extern void kernel_restart(char *cmd) __noreturn;
70811+extern void kernel_halt(void) __noreturn;
70812+extern void kernel_power_off(void) __noreturn;
70813
70814 extern int C_A_D; /* for sysctl */
70815 void ctrl_alt_del(void);
70816@@ -47,7 +47,7 @@ extern int orderly_poweroff(bool force);
70817 * Emergency restart, callable from an interrupt handler.
70818 */
70819
70820-extern void emergency_restart(void);
70821+extern void emergency_restart(void) __noreturn;
70822 #include <asm/emergency-restart.h>
70823
70824 #endif /* _LINUX_REBOOT_H */
70825diff --git a/include/linux/regset.h b/include/linux/regset.h
70826index 8e0c9fe..ac4d221 100644
70827--- a/include/linux/regset.h
70828+++ b/include/linux/regset.h
70829@@ -161,7 +161,8 @@ struct user_regset {
70830 unsigned int align;
70831 unsigned int bias;
70832 unsigned int core_note_type;
70833-};
70834+} __do_const;
70835+typedef struct user_regset __no_const user_regset_no_const;
70836
70837 /**
70838 * struct user_regset_view - available regsets
70839diff --git a/include/linux/relay.h b/include/linux/relay.h
70840index 91cacc3..b55ff74 100644
70841--- a/include/linux/relay.h
70842+++ b/include/linux/relay.h
70843@@ -160,7 +160,7 @@ struct rchan_callbacks
70844 * The callback should return 0 if successful, negative if not.
70845 */
70846 int (*remove_buf_file)(struct dentry *dentry);
70847-};
70848+} __no_const;
70849
70850 /*
70851 * CONFIG_RELAY kernel API, kernel/relay.c
70852diff --git a/include/linux/rio.h b/include/linux/rio.h
70853index a3e7842..d973ca6 100644
70854--- a/include/linux/rio.h
70855+++ b/include/linux/rio.h
70856@@ -339,7 +339,7 @@ struct rio_ops {
70857 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
70858 u64 rstart, u32 size, u32 flags);
70859 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
70860-};
70861+} __no_const;
70862
70863 #define RIO_RESOURCE_MEM 0x00000100
70864 #define RIO_RESOURCE_DOORBELL 0x00000200
70865diff --git a/include/linux/rmap.h b/include/linux/rmap.h
70866index c20635c..2f5def4 100644
70867--- a/include/linux/rmap.h
70868+++ b/include/linux/rmap.h
70869@@ -145,8 +145,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
70870 void anon_vma_init(void); /* create anon_vma_cachep */
70871 int anon_vma_prepare(struct vm_area_struct *);
70872 void unlink_anon_vmas(struct vm_area_struct *);
70873-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
70874-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
70875+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
70876+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
70877
70878 static inline void anon_vma_merge(struct vm_area_struct *vma,
70879 struct vm_area_struct *next)
70880diff --git a/include/linux/sched.h b/include/linux/sched.h
70881index d211247..eac6c2c 100644
70882--- a/include/linux/sched.h
70883+++ b/include/linux/sched.h
70884@@ -61,6 +61,7 @@ struct bio_list;
70885 struct fs_struct;
70886 struct perf_event_context;
70887 struct blk_plug;
70888+struct linux_binprm;
70889
70890 /*
70891 * List of flags we want to share for kernel threads,
70892@@ -327,7 +328,7 @@ extern char __sched_text_start[], __sched_text_end[];
70893 extern int in_sched_functions(unsigned long addr);
70894
70895 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
70896-extern signed long schedule_timeout(signed long timeout);
70897+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
70898 extern signed long schedule_timeout_interruptible(signed long timeout);
70899 extern signed long schedule_timeout_killable(signed long timeout);
70900 extern signed long schedule_timeout_uninterruptible(signed long timeout);
70901@@ -354,10 +355,23 @@ struct user_namespace;
70902 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
70903
70904 extern int sysctl_max_map_count;
70905+extern unsigned long sysctl_heap_stack_gap;
70906
70907 #include <linux/aio.h>
70908
70909 #ifdef CONFIG_MMU
70910+
70911+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
70912+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
70913+#else
70914+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
70915+{
70916+ return 0;
70917+}
70918+#endif
70919+
70920+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
70921+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
70922 extern void arch_pick_mmap_layout(struct mm_struct *mm);
70923 extern unsigned long
70924 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
70925@@ -639,6 +653,17 @@ struct signal_struct {
70926 #ifdef CONFIG_TASKSTATS
70927 struct taskstats *stats;
70928 #endif
70929+
70930+#ifdef CONFIG_GRKERNSEC
70931+ u32 curr_ip;
70932+ u32 saved_ip;
70933+ u32 gr_saddr;
70934+ u32 gr_daddr;
70935+ u16 gr_sport;
70936+ u16 gr_dport;
70937+ u8 used_accept:1;
70938+#endif
70939+
70940 #ifdef CONFIG_AUDIT
70941 unsigned audit_tty;
70942 struct tty_audit_buf *tty_audit_buf;
70943@@ -717,6 +742,11 @@ struct user_struct {
70944 struct key *session_keyring; /* UID's default session keyring */
70945 #endif
70946
70947+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
70948+ unsigned int banned;
70949+ unsigned long ban_expires;
70950+#endif
70951+
70952 /* Hash table maintenance information */
70953 struct hlist_node uidhash_node;
70954 kuid_t uid;
70955@@ -1116,7 +1146,7 @@ struct sched_class {
70956 #ifdef CONFIG_FAIR_GROUP_SCHED
70957 void (*task_move_group) (struct task_struct *p, int on_rq);
70958 #endif
70959-};
70960+} __do_const;
70961
70962 struct load_weight {
70963 unsigned long weight, inv_weight;
70964@@ -1360,8 +1390,8 @@ struct task_struct {
70965 struct list_head thread_group;
70966
70967 struct completion *vfork_done; /* for vfork() */
70968- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
70969- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
70970+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
70971+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
70972
70973 cputime_t utime, stime, utimescaled, stimescaled;
70974 cputime_t gtime;
70975@@ -1377,11 +1407,6 @@ struct task_struct {
70976 struct task_cputime cputime_expires;
70977 struct list_head cpu_timers[3];
70978
70979-/* process credentials */
70980- const struct cred __rcu *real_cred; /* objective and real subjective task
70981- * credentials (COW) */
70982- const struct cred __rcu *cred; /* effective (overridable) subjective task
70983- * credentials (COW) */
70984 char comm[TASK_COMM_LEN]; /* executable name excluding path
70985 - access with [gs]et_task_comm (which lock
70986 it with task_lock())
70987@@ -1398,6 +1423,10 @@ struct task_struct {
70988 #endif
70989 /* CPU-specific state of this task */
70990 struct thread_struct thread;
70991+/* thread_info moved to task_struct */
70992+#ifdef CONFIG_X86
70993+ struct thread_info tinfo;
70994+#endif
70995 /* filesystem information */
70996 struct fs_struct *fs;
70997 /* open file information */
70998@@ -1471,6 +1500,10 @@ struct task_struct {
70999 gfp_t lockdep_reclaim_gfp;
71000 #endif
71001
71002+/* process credentials */
71003+ const struct cred __rcu *real_cred; /* objective and real subjective task
71004+ * credentials (COW) */
71005+
71006 /* journalling filesystem info */
71007 void *journal_info;
71008
71009@@ -1509,6 +1542,10 @@ struct task_struct {
71010 /* cg_list protected by css_set_lock and tsk->alloc_lock */
71011 struct list_head cg_list;
71012 #endif
71013+
71014+ const struct cred __rcu *cred; /* effective (overridable) subjective task
71015+ * credentials (COW) */
71016+
71017 #ifdef CONFIG_FUTEX
71018 struct robust_list_head __user *robust_list;
71019 #ifdef CONFIG_COMPAT
71020@@ -1605,8 +1642,74 @@ struct task_struct {
71021 #ifdef CONFIG_UPROBES
71022 struct uprobe_task *utask;
71023 #endif
71024+
71025+#ifdef CONFIG_GRKERNSEC
71026+ /* grsecurity */
71027+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71028+ u64 exec_id;
71029+#endif
71030+#ifdef CONFIG_GRKERNSEC_SETXID
71031+ const struct cred *delayed_cred;
71032+#endif
71033+ struct dentry *gr_chroot_dentry;
71034+ struct acl_subject_label *acl;
71035+ struct acl_role_label *role;
71036+ struct file *exec_file;
71037+ unsigned long brute_expires;
71038+ u16 acl_role_id;
71039+ /* is this the task that authenticated to the special role */
71040+ u8 acl_sp_role;
71041+ u8 is_writable;
71042+ u8 brute;
71043+ u8 gr_is_chrooted;
71044+#endif
71045+
71046 };
71047
71048+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
71049+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
71050+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
71051+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
71052+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
71053+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
71054+
71055+#ifdef CONFIG_PAX_SOFTMODE
71056+extern int pax_softmode;
71057+#endif
71058+
71059+extern int pax_check_flags(unsigned long *);
71060+
71061+/* if tsk != current then task_lock must be held on it */
71062+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
71063+static inline unsigned long pax_get_flags(struct task_struct *tsk)
71064+{
71065+ if (likely(tsk->mm))
71066+ return tsk->mm->pax_flags;
71067+ else
71068+ return 0UL;
71069+}
71070+
71071+/* if tsk != current then task_lock must be held on it */
71072+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
71073+{
71074+ if (likely(tsk->mm)) {
71075+ tsk->mm->pax_flags = flags;
71076+ return 0;
71077+ }
71078+ return -EINVAL;
71079+}
71080+#endif
71081+
71082+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
71083+extern void pax_set_initial_flags(struct linux_binprm *bprm);
71084+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
71085+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
71086+#endif
71087+
71088+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
71089+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
71090+extern void pax_report_refcount_overflow(struct pt_regs *regs);
71091+
71092 /* Future-safe accessor for struct task_struct's cpus_allowed. */
71093 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
71094
71095@@ -1696,7 +1799,7 @@ struct pid_namespace;
71096 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
71097 struct pid_namespace *ns);
71098
71099-static inline pid_t task_pid_nr(struct task_struct *tsk)
71100+static inline pid_t task_pid_nr(const struct task_struct *tsk)
71101 {
71102 return tsk->pid;
71103 }
71104@@ -2155,7 +2258,9 @@ void yield(void);
71105 extern struct exec_domain default_exec_domain;
71106
71107 union thread_union {
71108+#ifndef CONFIG_X86
71109 struct thread_info thread_info;
71110+#endif
71111 unsigned long stack[THREAD_SIZE/sizeof(long)];
71112 };
71113
71114@@ -2188,6 +2293,7 @@ extern struct pid_namespace init_pid_ns;
71115 */
71116
71117 extern struct task_struct *find_task_by_vpid(pid_t nr);
71118+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
71119 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
71120 struct pid_namespace *ns);
71121
71122@@ -2344,7 +2450,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
71123 extern void exit_itimers(struct signal_struct *);
71124 extern void flush_itimer_signals(void);
71125
71126-extern void do_group_exit(int);
71127+extern __noreturn void do_group_exit(int);
71128
71129 extern int allow_signal(int);
71130 extern int disallow_signal(int);
71131@@ -2545,9 +2651,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
71132
71133 #endif
71134
71135-static inline int object_is_on_stack(void *obj)
71136+static inline int object_starts_on_stack(void *obj)
71137 {
71138- void *stack = task_stack_page(current);
71139+ const void *stack = task_stack_page(current);
71140
71141 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
71142 }
71143diff --git a/include/linux/security.h b/include/linux/security.h
71144index eee7478..290f7ba 100644
71145--- a/include/linux/security.h
71146+++ b/include/linux/security.h
71147@@ -26,6 +26,7 @@
71148 #include <linux/capability.h>
71149 #include <linux/slab.h>
71150 #include <linux/err.h>
71151+#include <linux/grsecurity.h>
71152
71153 struct linux_binprm;
71154 struct cred;
71155diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
71156index 68a04a3..866e6a1 100644
71157--- a/include/linux/seq_file.h
71158+++ b/include/linux/seq_file.h
71159@@ -26,6 +26,9 @@ struct seq_file {
71160 struct mutex lock;
71161 const struct seq_operations *op;
71162 int poll_event;
71163+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71164+ u64 exec_id;
71165+#endif
71166 #ifdef CONFIG_USER_NS
71167 struct user_namespace *user_ns;
71168 #endif
71169@@ -38,6 +41,7 @@ struct seq_operations {
71170 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
71171 int (*show) (struct seq_file *m, void *v);
71172 };
71173+typedef struct seq_operations __no_const seq_operations_no_const;
71174
71175 #define SEQ_SKIP 1
71176
71177diff --git a/include/linux/shm.h b/include/linux/shm.h
71178index 429c199..4d42e38 100644
71179--- a/include/linux/shm.h
71180+++ b/include/linux/shm.h
71181@@ -21,6 +21,10 @@ struct shmid_kernel /* private to the kernel */
71182
71183 /* The task created the shm object. NULL if the task is dead. */
71184 struct task_struct *shm_creator;
71185+#ifdef CONFIG_GRKERNSEC
71186+ time_t shm_createtime;
71187+ pid_t shm_lapid;
71188+#endif
71189 };
71190
71191 /* shm_mode upper byte flags */
71192diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
71193index 98399e2..7c74c41 100644
71194--- a/include/linux/skbuff.h
71195+++ b/include/linux/skbuff.h
71196@@ -590,7 +590,7 @@ extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
71197 extern struct sk_buff *__alloc_skb(unsigned int size,
71198 gfp_t priority, int flags, int node);
71199 extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
71200-static inline struct sk_buff *alloc_skb(unsigned int size,
71201+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
71202 gfp_t priority)
71203 {
71204 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
71205@@ -700,7 +700,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
71206 */
71207 static inline int skb_queue_empty(const struct sk_buff_head *list)
71208 {
71209- return list->next == (struct sk_buff *)list;
71210+ return list->next == (const struct sk_buff *)list;
71211 }
71212
71213 /**
71214@@ -713,7 +713,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
71215 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
71216 const struct sk_buff *skb)
71217 {
71218- return skb->next == (struct sk_buff *)list;
71219+ return skb->next == (const struct sk_buff *)list;
71220 }
71221
71222 /**
71223@@ -726,7 +726,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
71224 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
71225 const struct sk_buff *skb)
71226 {
71227- return skb->prev == (struct sk_buff *)list;
71228+ return skb->prev == (const struct sk_buff *)list;
71229 }
71230
71231 /**
71232@@ -1727,7 +1727,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
71233 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
71234 */
71235 #ifndef NET_SKB_PAD
71236-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
71237+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
71238 #endif
71239
71240 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
71241@@ -2305,7 +2305,7 @@ extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
71242 int noblock, int *err);
71243 extern unsigned int datagram_poll(struct file *file, struct socket *sock,
71244 struct poll_table_struct *wait);
71245-extern int skb_copy_datagram_iovec(const struct sk_buff *from,
71246+extern int __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from,
71247 int offset, struct iovec *to,
71248 int size);
71249 extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
71250@@ -2595,6 +2595,9 @@ static inline void nf_reset(struct sk_buff *skb)
71251 nf_bridge_put(skb->nf_bridge);
71252 skb->nf_bridge = NULL;
71253 #endif
71254+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
71255+ skb->nf_trace = 0;
71256+#endif
71257 }
71258
71259 /* Note: This doesn't put any conntrack and bridge info in dst. */
71260diff --git a/include/linux/slab.h b/include/linux/slab.h
71261index 5d168d7..720bff3 100644
71262--- a/include/linux/slab.h
71263+++ b/include/linux/slab.h
71264@@ -12,13 +12,20 @@
71265 #include <linux/gfp.h>
71266 #include <linux/types.h>
71267 #include <linux/workqueue.h>
71268-
71269+#include <linux/err.h>
71270
71271 /*
71272 * Flags to pass to kmem_cache_create().
71273 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
71274 */
71275 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
71276+
71277+#ifdef CONFIG_PAX_USERCOPY_SLABS
71278+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
71279+#else
71280+#define SLAB_USERCOPY 0x00000000UL
71281+#endif
71282+
71283 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
71284 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
71285 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
71286@@ -89,10 +96,13 @@
71287 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
71288 * Both make kfree a no-op.
71289 */
71290-#define ZERO_SIZE_PTR ((void *)16)
71291+#define ZERO_SIZE_PTR \
71292+({ \
71293+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
71294+ (void *)(-MAX_ERRNO-1L); \
71295+})
71296
71297-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
71298- (unsigned long)ZERO_SIZE_PTR)
71299+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
71300
71301 /*
71302 * Common fields provided in kmem_cache by all slab allocators
71303@@ -112,7 +122,7 @@ struct kmem_cache {
71304 unsigned int align; /* Alignment as calculated */
71305 unsigned long flags; /* Active flags on the slab */
71306 const char *name; /* Slab name for sysfs */
71307- int refcount; /* Use counter */
71308+ atomic_t refcount; /* Use counter */
71309 void (*ctor)(void *); /* Called on object slot creation */
71310 struct list_head list; /* List of all slab caches on the system */
71311 };
71312@@ -232,6 +242,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
71313 void kfree(const void *);
71314 void kzfree(const void *);
71315 size_t ksize(const void *);
71316+const char *check_heap_object(const void *ptr, unsigned long n);
71317+bool is_usercopy_object(const void *ptr);
71318
71319 /*
71320 * Allocator specific definitions. These are mainly used to establish optimized
71321@@ -311,6 +323,7 @@ size_t ksize(const void *);
71322 * for general use, and so are not documented here. For a full list of
71323 * potential flags, always refer to linux/gfp.h.
71324 */
71325+
71326 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
71327 {
71328 if (size != 0 && n > SIZE_MAX / size)
71329@@ -370,7 +383,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
71330 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
71331 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
71332 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
71333-extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
71334+extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
71335 #define kmalloc_track_caller(size, flags) \
71336 __kmalloc_track_caller(size, flags, _RET_IP_)
71337 #else
71338@@ -390,7 +403,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
71339 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
71340 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
71341 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
71342-extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
71343+extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
71344 #define kmalloc_node_track_caller(size, flags, node) \
71345 __kmalloc_node_track_caller(size, flags, node, \
71346 _RET_IP_)
71347diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
71348index 8bb6e0e..8eb0dbe 100644
71349--- a/include/linux/slab_def.h
71350+++ b/include/linux/slab_def.h
71351@@ -52,7 +52,7 @@ struct kmem_cache {
71352 /* 4) cache creation/removal */
71353 const char *name;
71354 struct list_head list;
71355- int refcount;
71356+ atomic_t refcount;
71357 int object_size;
71358 int align;
71359
71360@@ -68,10 +68,10 @@ struct kmem_cache {
71361 unsigned long node_allocs;
71362 unsigned long node_frees;
71363 unsigned long node_overflow;
71364- atomic_t allochit;
71365- atomic_t allocmiss;
71366- atomic_t freehit;
71367- atomic_t freemiss;
71368+ atomic_unchecked_t allochit;
71369+ atomic_unchecked_t allocmiss;
71370+ atomic_unchecked_t freehit;
71371+ atomic_unchecked_t freemiss;
71372
71373 /*
71374 * If debugging is enabled, then the allocator can add additional
71375@@ -111,11 +111,16 @@ struct cache_sizes {
71376 #ifdef CONFIG_ZONE_DMA
71377 struct kmem_cache *cs_dmacachep;
71378 #endif
71379+
71380+#ifdef CONFIG_PAX_USERCOPY_SLABS
71381+ struct kmem_cache *cs_usercopycachep;
71382+#endif
71383+
71384 };
71385 extern struct cache_sizes malloc_sizes[];
71386
71387 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
71388-void *__kmalloc(size_t size, gfp_t flags);
71389+void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
71390
71391 #ifdef CONFIG_TRACING
71392 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
71393@@ -152,6 +157,13 @@ found:
71394 cachep = malloc_sizes[i].cs_dmacachep;
71395 else
71396 #endif
71397+
71398+#ifdef CONFIG_PAX_USERCOPY_SLABS
71399+ if (flags & GFP_USERCOPY)
71400+ cachep = malloc_sizes[i].cs_usercopycachep;
71401+ else
71402+#endif
71403+
71404 cachep = malloc_sizes[i].cs_cachep;
71405
71406 ret = kmem_cache_alloc_trace(cachep, flags, size);
71407@@ -162,7 +174,7 @@ found:
71408 }
71409
71410 #ifdef CONFIG_NUMA
71411-extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
71412+extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
71413 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
71414
71415 #ifdef CONFIG_TRACING
71416@@ -205,6 +217,13 @@ found:
71417 cachep = malloc_sizes[i].cs_dmacachep;
71418 else
71419 #endif
71420+
71421+#ifdef CONFIG_PAX_USERCOPY_SLABS
71422+ if (flags & GFP_USERCOPY)
71423+ cachep = malloc_sizes[i].cs_usercopycachep;
71424+ else
71425+#endif
71426+
71427 cachep = malloc_sizes[i].cs_cachep;
71428
71429 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
71430diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
71431index f28e14a..7831211 100644
71432--- a/include/linux/slob_def.h
71433+++ b/include/linux/slob_def.h
71434@@ -11,7 +11,7 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
71435 return kmem_cache_alloc_node(cachep, flags, NUMA_NO_NODE);
71436 }
71437
71438-void *__kmalloc_node(size_t size, gfp_t flags, int node);
71439+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
71440
71441 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
71442 {
71443@@ -31,7 +31,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
71444 return __kmalloc_node(size, flags, NUMA_NO_NODE);
71445 }
71446
71447-static __always_inline void *__kmalloc(size_t size, gfp_t flags)
71448+static __always_inline __size_overflow(1) void *__kmalloc(size_t size, gfp_t flags)
71449 {
71450 return kmalloc(size, flags);
71451 }
71452diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
71453index 9db4825..ed42fb5 100644
71454--- a/include/linux/slub_def.h
71455+++ b/include/linux/slub_def.h
71456@@ -91,7 +91,7 @@ struct kmem_cache {
71457 struct kmem_cache_order_objects max;
71458 struct kmem_cache_order_objects min;
71459 gfp_t allocflags; /* gfp flags to use on each alloc */
71460- int refcount; /* Refcount for slab cache destroy */
71461+ atomic_t refcount; /* Refcount for slab cache destroy */
71462 void (*ctor)(void *);
71463 int inuse; /* Offset to metadata */
71464 int align; /* Alignment */
71465@@ -156,7 +156,7 @@ extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
71466 * Sorry that the following has to be that ugly but some versions of GCC
71467 * have trouble with constant propagation and loops.
71468 */
71469-static __always_inline int kmalloc_index(size_t size)
71470+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
71471 {
71472 if (!size)
71473 return 0;
71474@@ -221,7 +221,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
71475 }
71476
71477 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
71478-void *__kmalloc(size_t size, gfp_t flags);
71479+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
71480
71481 static __always_inline void *
71482 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
71483@@ -265,7 +265,7 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
71484 }
71485 #endif
71486
71487-static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
71488+static __always_inline __size_overflow(1) void *kmalloc_large(size_t size, gfp_t flags)
71489 {
71490 unsigned int order = get_order(size);
71491 return kmalloc_order_trace(size, flags, order);
71492@@ -290,7 +290,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
71493 }
71494
71495 #ifdef CONFIG_NUMA
71496-void *__kmalloc_node(size_t size, gfp_t flags, int node);
71497+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
71498 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
71499
71500 #ifdef CONFIG_TRACING
71501diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
71502index e8d702e..0a56eb4 100644
71503--- a/include/linux/sock_diag.h
71504+++ b/include/linux/sock_diag.h
71505@@ -10,7 +10,7 @@ struct sock;
71506 struct sock_diag_handler {
71507 __u8 family;
71508 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
71509-};
71510+} __do_const;
71511
71512 int sock_diag_register(const struct sock_diag_handler *h);
71513 void sock_diag_unregister(const struct sock_diag_handler *h);
71514diff --git a/include/linux/sonet.h b/include/linux/sonet.h
71515index 680f9a3..f13aeb0 100644
71516--- a/include/linux/sonet.h
71517+++ b/include/linux/sonet.h
71518@@ -7,7 +7,7 @@
71519 #include <uapi/linux/sonet.h>
71520
71521 struct k_sonet_stats {
71522-#define __HANDLE_ITEM(i) atomic_t i
71523+#define __HANDLE_ITEM(i) atomic_unchecked_t i
71524 __SONET_ITEMS
71525 #undef __HANDLE_ITEM
71526 };
71527diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
71528index 34206b8..3db7f1c 100644
71529--- a/include/linux/sunrpc/clnt.h
71530+++ b/include/linux/sunrpc/clnt.h
71531@@ -96,7 +96,7 @@ struct rpc_procinfo {
71532 unsigned int p_timer; /* Which RTT timer to use */
71533 u32 p_statidx; /* Which procedure to account */
71534 const char * p_name; /* name of procedure */
71535-};
71536+} __do_const;
71537
71538 #ifdef __KERNEL__
71539
71540@@ -176,9 +176,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
71541 {
71542 switch (sap->sa_family) {
71543 case AF_INET:
71544- return ntohs(((struct sockaddr_in *)sap)->sin_port);
71545+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
71546 case AF_INET6:
71547- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
71548+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
71549 }
71550 return 0;
71551 }
71552@@ -211,7 +211,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
71553 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
71554 const struct sockaddr *src)
71555 {
71556- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
71557+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
71558 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
71559
71560 dsin->sin_family = ssin->sin_family;
71561@@ -314,7 +314,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
71562 if (sa->sa_family != AF_INET6)
71563 return 0;
71564
71565- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
71566+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
71567 }
71568
71569 #endif /* __KERNEL__ */
71570diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
71571index 676ddf5..4c519a1 100644
71572--- a/include/linux/sunrpc/svc.h
71573+++ b/include/linux/sunrpc/svc.h
71574@@ -410,7 +410,7 @@ struct svc_procedure {
71575 unsigned int pc_count; /* call count */
71576 unsigned int pc_cachetype; /* cache info (NFS) */
71577 unsigned int pc_xdrressize; /* maximum size of XDR reply */
71578-};
71579+} __do_const;
71580
71581 /*
71582 * Function prototypes.
71583diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
71584index 0b8e3e6..33e0a01 100644
71585--- a/include/linux/sunrpc/svc_rdma.h
71586+++ b/include/linux/sunrpc/svc_rdma.h
71587@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
71588 extern unsigned int svcrdma_max_requests;
71589 extern unsigned int svcrdma_max_req_size;
71590
71591-extern atomic_t rdma_stat_recv;
71592-extern atomic_t rdma_stat_read;
71593-extern atomic_t rdma_stat_write;
71594-extern atomic_t rdma_stat_sq_starve;
71595-extern atomic_t rdma_stat_rq_starve;
71596-extern atomic_t rdma_stat_rq_poll;
71597-extern atomic_t rdma_stat_rq_prod;
71598-extern atomic_t rdma_stat_sq_poll;
71599-extern atomic_t rdma_stat_sq_prod;
71600+extern atomic_unchecked_t rdma_stat_recv;
71601+extern atomic_unchecked_t rdma_stat_read;
71602+extern atomic_unchecked_t rdma_stat_write;
71603+extern atomic_unchecked_t rdma_stat_sq_starve;
71604+extern atomic_unchecked_t rdma_stat_rq_starve;
71605+extern atomic_unchecked_t rdma_stat_rq_poll;
71606+extern atomic_unchecked_t rdma_stat_rq_prod;
71607+extern atomic_unchecked_t rdma_stat_sq_poll;
71608+extern atomic_unchecked_t rdma_stat_sq_prod;
71609
71610 #define RPCRDMA_VERSION 1
71611
71612diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
71613index dd74084a..7f509d5 100644
71614--- a/include/linux/sunrpc/svcauth.h
71615+++ b/include/linux/sunrpc/svcauth.h
71616@@ -109,7 +109,7 @@ struct auth_ops {
71617 int (*release)(struct svc_rqst *rq);
71618 void (*domain_release)(struct auth_domain *);
71619 int (*set_client)(struct svc_rqst *rq);
71620-};
71621+} __do_const;
71622
71623 #define SVC_GARBAGE 1
71624 #define SVC_SYSERR 2
71625diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
71626index 071d62c..4ccc7ac 100644
71627--- a/include/linux/swiotlb.h
71628+++ b/include/linux/swiotlb.h
71629@@ -59,7 +59,8 @@ extern void
71630
71631 extern void
71632 swiotlb_free_coherent(struct device *hwdev, size_t size,
71633- void *vaddr, dma_addr_t dma_handle);
71634+ void *vaddr, dma_addr_t dma_handle,
71635+ struct dma_attrs *attrs);
71636
71637 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
71638 unsigned long offset, size_t size,
71639diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
71640index 45e2db2..1635156a 100644
71641--- a/include/linux/syscalls.h
71642+++ b/include/linux/syscalls.h
71643@@ -615,7 +615,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
71644 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
71645 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
71646 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
71647- struct sockaddr __user *, int);
71648+ struct sockaddr __user *, int) __intentional_overflow(0);
71649 asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
71650 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
71651 unsigned int vlen, unsigned flags);
71652diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
71653index 27b3b0b..e093dd9 100644
71654--- a/include/linux/syscore_ops.h
71655+++ b/include/linux/syscore_ops.h
71656@@ -16,7 +16,7 @@ struct syscore_ops {
71657 int (*suspend)(void);
71658 void (*resume)(void);
71659 void (*shutdown)(void);
71660-};
71661+} __do_const;
71662
71663 extern void register_syscore_ops(struct syscore_ops *ops);
71664 extern void unregister_syscore_ops(struct syscore_ops *ops);
71665diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
71666index 14a8ff2..af52bad 100644
71667--- a/include/linux/sysctl.h
71668+++ b/include/linux/sysctl.h
71669@@ -34,13 +34,13 @@ struct ctl_table_root;
71670 struct ctl_table_header;
71671 struct ctl_dir;
71672
71673-typedef struct ctl_table ctl_table;
71674-
71675 typedef int proc_handler (struct ctl_table *ctl, int write,
71676 void __user *buffer, size_t *lenp, loff_t *ppos);
71677
71678 extern int proc_dostring(struct ctl_table *, int,
71679 void __user *, size_t *, loff_t *);
71680+extern int proc_dostring_modpriv(struct ctl_table *, int,
71681+ void __user *, size_t *, loff_t *);
71682 extern int proc_dointvec(struct ctl_table *, int,
71683 void __user *, size_t *, loff_t *);
71684 extern int proc_dointvec_minmax(struct ctl_table *, int,
71685@@ -115,7 +115,9 @@ struct ctl_table
71686 struct ctl_table_poll *poll;
71687 void *extra1;
71688 void *extra2;
71689-};
71690+} __do_const;
71691+typedef struct ctl_table __no_const ctl_table_no_const;
71692+typedef struct ctl_table ctl_table;
71693
71694 struct ctl_node {
71695 struct rb_node node;
71696diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
71697index 381f06d..dc16cc7 100644
71698--- a/include/linux/sysfs.h
71699+++ b/include/linux/sysfs.h
71700@@ -31,7 +31,8 @@ struct attribute {
71701 struct lock_class_key *key;
71702 struct lock_class_key skey;
71703 #endif
71704-};
71705+} __do_const;
71706+typedef struct attribute __no_const attribute_no_const;
71707
71708 /**
71709 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
71710@@ -59,8 +60,8 @@ struct attribute_group {
71711 umode_t (*is_visible)(struct kobject *,
71712 struct attribute *, int);
71713 struct attribute **attrs;
71714-};
71715-
71716+} __do_const;
71717+typedef struct attribute_group __no_const attribute_group_no_const;
71718
71719
71720 /**
71721@@ -107,7 +108,8 @@ struct bin_attribute {
71722 char *, loff_t, size_t);
71723 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
71724 struct vm_area_struct *vma);
71725-};
71726+} __do_const;
71727+typedef struct bin_attribute __no_const bin_attribute_no_const;
71728
71729 /**
71730 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
71731diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
71732index 7faf933..9b85a0c 100644
71733--- a/include/linux/sysrq.h
71734+++ b/include/linux/sysrq.h
71735@@ -16,6 +16,7 @@
71736
71737 #include <linux/errno.h>
71738 #include <linux/types.h>
71739+#include <linux/compiler.h>
71740
71741 /* Enable/disable SYSRQ support by default (0==no, 1==yes). */
71742 #define SYSRQ_DEFAULT_ENABLE 1
71743@@ -36,7 +37,7 @@ struct sysrq_key_op {
71744 char *help_msg;
71745 char *action_msg;
71746 int enable_mask;
71747-};
71748+} __do_const;
71749
71750 #ifdef CONFIG_MAGIC_SYSRQ
71751
71752diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
71753index e7e0473..7989295 100644
71754--- a/include/linux/thread_info.h
71755+++ b/include/linux/thread_info.h
71756@@ -148,6 +148,15 @@ static inline bool test_and_clear_restore_sigmask(void)
71757 #error "no set_restore_sigmask() provided and default one won't work"
71758 #endif
71759
71760+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user);
71761+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
71762+{
71763+#ifndef CONFIG_PAX_USERCOPY_DEBUG
71764+ if (!__builtin_constant_p(n))
71765+#endif
71766+ __check_object_size(ptr, n, to_user);
71767+}
71768+
71769 #endif /* __KERNEL__ */
71770
71771 #endif /* _LINUX_THREAD_INFO_H */
71772diff --git a/include/linux/tty.h b/include/linux/tty.h
71773index 8db1b56..c16a040 100644
71774--- a/include/linux/tty.h
71775+++ b/include/linux/tty.h
71776@@ -194,7 +194,7 @@ struct tty_port {
71777 const struct tty_port_operations *ops; /* Port operations */
71778 spinlock_t lock; /* Lock protecting tty field */
71779 int blocked_open; /* Waiting to open */
71780- int count; /* Usage count */
71781+ atomic_t count; /* Usage count */
71782 wait_queue_head_t open_wait; /* Open waiters */
71783 wait_queue_head_t close_wait; /* Close waiters */
71784 wait_queue_head_t delta_msr_wait; /* Modem status change */
71785@@ -490,7 +490,7 @@ extern int tty_port_open(struct tty_port *port,
71786 struct tty_struct *tty, struct file *filp);
71787 static inline int tty_port_users(struct tty_port *port)
71788 {
71789- return port->count + port->blocked_open;
71790+ return atomic_read(&port->count) + port->blocked_open;
71791 }
71792
71793 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
71794diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
71795index dd976cf..e272742 100644
71796--- a/include/linux/tty_driver.h
71797+++ b/include/linux/tty_driver.h
71798@@ -284,7 +284,7 @@ struct tty_operations {
71799 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
71800 #endif
71801 const struct file_operations *proc_fops;
71802-};
71803+} __do_const;
71804
71805 struct tty_driver {
71806 int magic; /* magic number for this structure */
71807diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
71808index fb79dd8d..07d4773 100644
71809--- a/include/linux/tty_ldisc.h
71810+++ b/include/linux/tty_ldisc.h
71811@@ -149,7 +149,7 @@ struct tty_ldisc_ops {
71812
71813 struct module *owner;
71814
71815- int refcount;
71816+ atomic_t refcount;
71817 };
71818
71819 struct tty_ldisc {
71820diff --git a/include/linux/types.h b/include/linux/types.h
71821index 4d118ba..c3ee9bf 100644
71822--- a/include/linux/types.h
71823+++ b/include/linux/types.h
71824@@ -176,10 +176,26 @@ typedef struct {
71825 int counter;
71826 } atomic_t;
71827
71828+#ifdef CONFIG_PAX_REFCOUNT
71829+typedef struct {
71830+ int counter;
71831+} atomic_unchecked_t;
71832+#else
71833+typedef atomic_t atomic_unchecked_t;
71834+#endif
71835+
71836 #ifdef CONFIG_64BIT
71837 typedef struct {
71838 long counter;
71839 } atomic64_t;
71840+
71841+#ifdef CONFIG_PAX_REFCOUNT
71842+typedef struct {
71843+ long counter;
71844+} atomic64_unchecked_t;
71845+#else
71846+typedef atomic64_t atomic64_unchecked_t;
71847+#endif
71848 #endif
71849
71850 struct list_head {
71851diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
71852index 5ca0951..ab496a5 100644
71853--- a/include/linux/uaccess.h
71854+++ b/include/linux/uaccess.h
71855@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
71856 long ret; \
71857 mm_segment_t old_fs = get_fs(); \
71858 \
71859- set_fs(KERNEL_DS); \
71860 pagefault_disable(); \
71861- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
71862- pagefault_enable(); \
71863+ set_fs(KERNEL_DS); \
71864+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
71865 set_fs(old_fs); \
71866+ pagefault_enable(); \
71867 ret; \
71868 })
71869
71870diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
71871index 8e522cbc..aa8572d 100644
71872--- a/include/linux/uidgid.h
71873+++ b/include/linux/uidgid.h
71874@@ -197,4 +197,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
71875
71876 #endif /* CONFIG_USER_NS */
71877
71878+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
71879+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
71880+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
71881+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
71882+
71883 #endif /* _LINUX_UIDGID_H */
71884diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
71885index 99c1b4d..562e6f3 100644
71886--- a/include/linux/unaligned/access_ok.h
71887+++ b/include/linux/unaligned/access_ok.h
71888@@ -4,34 +4,34 @@
71889 #include <linux/kernel.h>
71890 #include <asm/byteorder.h>
71891
71892-static inline u16 get_unaligned_le16(const void *p)
71893+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
71894 {
71895- return le16_to_cpup((__le16 *)p);
71896+ return le16_to_cpup((const __le16 *)p);
71897 }
71898
71899-static inline u32 get_unaligned_le32(const void *p)
71900+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
71901 {
71902- return le32_to_cpup((__le32 *)p);
71903+ return le32_to_cpup((const __le32 *)p);
71904 }
71905
71906-static inline u64 get_unaligned_le64(const void *p)
71907+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
71908 {
71909- return le64_to_cpup((__le64 *)p);
71910+ return le64_to_cpup((const __le64 *)p);
71911 }
71912
71913-static inline u16 get_unaligned_be16(const void *p)
71914+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
71915 {
71916- return be16_to_cpup((__be16 *)p);
71917+ return be16_to_cpup((const __be16 *)p);
71918 }
71919
71920-static inline u32 get_unaligned_be32(const void *p)
71921+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
71922 {
71923- return be32_to_cpup((__be32 *)p);
71924+ return be32_to_cpup((const __be32 *)p);
71925 }
71926
71927-static inline u64 get_unaligned_be64(const void *p)
71928+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
71929 {
71930- return be64_to_cpup((__be64 *)p);
71931+ return be64_to_cpup((const __be64 *)p);
71932 }
71933
71934 static inline void put_unaligned_le16(u16 val, void *p)
71935diff --git a/include/linux/usb.h b/include/linux/usb.h
71936index 4d22d0f..8d0e8f8 100644
71937--- a/include/linux/usb.h
71938+++ b/include/linux/usb.h
71939@@ -554,7 +554,7 @@ struct usb_device {
71940 int maxchild;
71941
71942 u32 quirks;
71943- atomic_t urbnum;
71944+ atomic_unchecked_t urbnum;
71945
71946 unsigned long active_duration;
71947
71948@@ -1604,7 +1604,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
71949
71950 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
71951 __u8 request, __u8 requesttype, __u16 value, __u16 index,
71952- void *data, __u16 size, int timeout);
71953+ void *data, __u16 size, int timeout) __intentional_overflow(-1);
71954 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
71955 void *data, int len, int *actual_length, int timeout);
71956 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
71957diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
71958index c5d36c6..108f4f9 100644
71959--- a/include/linux/usb/renesas_usbhs.h
71960+++ b/include/linux/usb/renesas_usbhs.h
71961@@ -39,7 +39,7 @@ enum {
71962 */
71963 struct renesas_usbhs_driver_callback {
71964 int (*notify_hotplug)(struct platform_device *pdev);
71965-};
71966+} __no_const;
71967
71968 /*
71969 * callback functions for platform
71970diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
71971index 5209cfe..b6b215f 100644
71972--- a/include/linux/user_namespace.h
71973+++ b/include/linux/user_namespace.h
71974@@ -21,7 +21,7 @@ struct user_namespace {
71975 struct uid_gid_map uid_map;
71976 struct uid_gid_map gid_map;
71977 struct uid_gid_map projid_map;
71978- struct kref kref;
71979+ atomic_t count;
71980 struct user_namespace *parent;
71981 kuid_t owner;
71982 kgid_t group;
71983@@ -37,18 +37,18 @@ extern struct user_namespace init_user_ns;
71984 static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
71985 {
71986 if (ns)
71987- kref_get(&ns->kref);
71988+ atomic_inc(&ns->count);
71989 return ns;
71990 }
71991
71992 extern int create_user_ns(struct cred *new);
71993 extern int unshare_userns(unsigned long unshare_flags, struct cred **new_cred);
71994-extern void free_user_ns(struct kref *kref);
71995+extern void free_user_ns(struct user_namespace *ns);
71996
71997 static inline void put_user_ns(struct user_namespace *ns)
71998 {
71999- if (ns)
72000- kref_put(&ns->kref, free_user_ns);
72001+ if (ns && atomic_dec_and_test(&ns->count))
72002+ free_user_ns(ns);
72003 }
72004
72005 struct seq_operations;
72006diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
72007index 6f8fbcf..8259001 100644
72008--- a/include/linux/vermagic.h
72009+++ b/include/linux/vermagic.h
72010@@ -25,9 +25,35 @@
72011 #define MODULE_ARCH_VERMAGIC ""
72012 #endif
72013
72014+#ifdef CONFIG_PAX_REFCOUNT
72015+#define MODULE_PAX_REFCOUNT "REFCOUNT "
72016+#else
72017+#define MODULE_PAX_REFCOUNT ""
72018+#endif
72019+
72020+#ifdef CONSTIFY_PLUGIN
72021+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
72022+#else
72023+#define MODULE_CONSTIFY_PLUGIN ""
72024+#endif
72025+
72026+#ifdef STACKLEAK_PLUGIN
72027+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
72028+#else
72029+#define MODULE_STACKLEAK_PLUGIN ""
72030+#endif
72031+
72032+#ifdef CONFIG_GRKERNSEC
72033+#define MODULE_GRSEC "GRSEC "
72034+#else
72035+#define MODULE_GRSEC ""
72036+#endif
72037+
72038 #define VERMAGIC_STRING \
72039 UTS_RELEASE " " \
72040 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
72041 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
72042- MODULE_ARCH_VERMAGIC
72043+ MODULE_ARCH_VERMAGIC \
72044+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
72045+ MODULE_GRSEC
72046
72047diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
72048index 6071e91..ca6a489 100644
72049--- a/include/linux/vmalloc.h
72050+++ b/include/linux/vmalloc.h
72051@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
72052 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
72053 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
72054 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
72055+
72056+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
72057+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
72058+#endif
72059+
72060 /* bits [20..32] reserved for arch specific ioremap internals */
72061
72062 /*
72063@@ -62,7 +67,7 @@ extern void *vmalloc_32_user(unsigned long size);
72064 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
72065 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
72066 unsigned long start, unsigned long end, gfp_t gfp_mask,
72067- pgprot_t prot, int node, const void *caller);
72068+ pgprot_t prot, int node, const void *caller) __size_overflow(1);
72069 extern void vfree(const void *addr);
72070
72071 extern void *vmap(struct page **pages, unsigned int count,
72072@@ -124,8 +129,8 @@ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
72073 extern void free_vm_area(struct vm_struct *area);
72074
72075 /* for /dev/kmem */
72076-extern long vread(char *buf, char *addr, unsigned long count);
72077-extern long vwrite(char *buf, char *addr, unsigned long count);
72078+extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
72079+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
72080
72081 /*
72082 * Internals. Dont't use..
72083diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
72084index a13291f..af51fa3 100644
72085--- a/include/linux/vmstat.h
72086+++ b/include/linux/vmstat.h
72087@@ -95,18 +95,18 @@ static inline void vm_events_fold_cpu(int cpu)
72088 /*
72089 * Zone based page accounting with per cpu differentials.
72090 */
72091-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
72092+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
72093
72094 static inline void zone_page_state_add(long x, struct zone *zone,
72095 enum zone_stat_item item)
72096 {
72097- atomic_long_add(x, &zone->vm_stat[item]);
72098- atomic_long_add(x, &vm_stat[item]);
72099+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
72100+ atomic_long_add_unchecked(x, &vm_stat[item]);
72101 }
72102
72103 static inline unsigned long global_page_state(enum zone_stat_item item)
72104 {
72105- long x = atomic_long_read(&vm_stat[item]);
72106+ long x = atomic_long_read_unchecked(&vm_stat[item]);
72107 #ifdef CONFIG_SMP
72108 if (x < 0)
72109 x = 0;
72110@@ -117,7 +117,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
72111 static inline unsigned long zone_page_state(struct zone *zone,
72112 enum zone_stat_item item)
72113 {
72114- long x = atomic_long_read(&zone->vm_stat[item]);
72115+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
72116 #ifdef CONFIG_SMP
72117 if (x < 0)
72118 x = 0;
72119@@ -134,7 +134,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
72120 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
72121 enum zone_stat_item item)
72122 {
72123- long x = atomic_long_read(&zone->vm_stat[item]);
72124+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
72125
72126 #ifdef CONFIG_SMP
72127 int cpu;
72128@@ -226,8 +226,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
72129
72130 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
72131 {
72132- atomic_long_inc(&zone->vm_stat[item]);
72133- atomic_long_inc(&vm_stat[item]);
72134+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
72135+ atomic_long_inc_unchecked(&vm_stat[item]);
72136 }
72137
72138 static inline void __inc_zone_page_state(struct page *page,
72139@@ -238,8 +238,8 @@ static inline void __inc_zone_page_state(struct page *page,
72140
72141 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
72142 {
72143- atomic_long_dec(&zone->vm_stat[item]);
72144- atomic_long_dec(&vm_stat[item]);
72145+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
72146+ atomic_long_dec_unchecked(&vm_stat[item]);
72147 }
72148
72149 static inline void __dec_zone_page_state(struct page *page,
72150diff --git a/include/linux/xattr.h b/include/linux/xattr.h
72151index fdbafc6..b7ffd47 100644
72152--- a/include/linux/xattr.h
72153+++ b/include/linux/xattr.h
72154@@ -28,7 +28,7 @@ struct xattr_handler {
72155 size_t size, int handler_flags);
72156 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
72157 size_t size, int flags, int handler_flags);
72158-};
72159+} __do_const;
72160
72161 struct xattr {
72162 char *name;
72163diff --git a/include/linux/zlib.h b/include/linux/zlib.h
72164index 9c5a6b4..09c9438 100644
72165--- a/include/linux/zlib.h
72166+++ b/include/linux/zlib.h
72167@@ -31,6 +31,7 @@
72168 #define _ZLIB_H
72169
72170 #include <linux/zconf.h>
72171+#include <linux/compiler.h>
72172
72173 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
72174 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
72175@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
72176
72177 /* basic functions */
72178
72179-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
72180+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
72181 /*
72182 Returns the number of bytes that needs to be allocated for a per-
72183 stream workspace with the specified parameters. A pointer to this
72184diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
72185index 95d1c91..6798cca 100644
72186--- a/include/media/v4l2-dev.h
72187+++ b/include/media/v4l2-dev.h
72188@@ -76,7 +76,7 @@ struct v4l2_file_operations {
72189 int (*mmap) (struct file *, struct vm_area_struct *);
72190 int (*open) (struct file *);
72191 int (*release) (struct file *);
72192-};
72193+} __do_const;
72194
72195 /*
72196 * Newer version of video_device, handled by videodev2.c
72197diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
72198index 4118ad1..cb7e25f 100644
72199--- a/include/media/v4l2-ioctl.h
72200+++ b/include/media/v4l2-ioctl.h
72201@@ -284,7 +284,6 @@ struct v4l2_ioctl_ops {
72202 bool valid_prio, int cmd, void *arg);
72203 };
72204
72205-
72206 /* v4l debugging and diagnostics */
72207
72208 /* Debug bitmask flags to be used on V4L2 */
72209diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
72210index adcbb20..62c2559 100644
72211--- a/include/net/9p/transport.h
72212+++ b/include/net/9p/transport.h
72213@@ -57,7 +57,7 @@ struct p9_trans_module {
72214 int (*cancel) (struct p9_client *, struct p9_req_t *req);
72215 int (*zc_request)(struct p9_client *, struct p9_req_t *,
72216 char *, char *, int , int, int, int);
72217-};
72218+} __do_const;
72219
72220 void v9fs_register_trans(struct p9_trans_module *m);
72221 void v9fs_unregister_trans(struct p9_trans_module *m);
72222diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
72223index 7588ef4..e62d35f 100644
72224--- a/include/net/bluetooth/l2cap.h
72225+++ b/include/net/bluetooth/l2cap.h
72226@@ -552,7 +552,7 @@ struct l2cap_ops {
72227 void (*defer) (struct l2cap_chan *chan);
72228 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
72229 unsigned long len, int nb);
72230-};
72231+} __do_const;
72232
72233 struct l2cap_conn {
72234 struct hci_conn *hcon;
72235diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
72236index 9e5425b..8136ffc 100644
72237--- a/include/net/caif/cfctrl.h
72238+++ b/include/net/caif/cfctrl.h
72239@@ -52,7 +52,7 @@ struct cfctrl_rsp {
72240 void (*radioset_rsp)(void);
72241 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
72242 struct cflayer *client_layer);
72243-};
72244+} __no_const;
72245
72246 /* Link Setup Parameters for CAIF-Links. */
72247 struct cfctrl_link_param {
72248@@ -101,8 +101,8 @@ struct cfctrl_request_info {
72249 struct cfctrl {
72250 struct cfsrvl serv;
72251 struct cfctrl_rsp res;
72252- atomic_t req_seq_no;
72253- atomic_t rsp_seq_no;
72254+ atomic_unchecked_t req_seq_no;
72255+ atomic_unchecked_t rsp_seq_no;
72256 struct list_head list;
72257 /* Protects from simultaneous access to first_req list */
72258 spinlock_t info_list_lock;
72259diff --git a/include/net/flow.h b/include/net/flow.h
72260index 628e11b..4c475df 100644
72261--- a/include/net/flow.h
72262+++ b/include/net/flow.h
72263@@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
72264
72265 extern void flow_cache_flush(void);
72266 extern void flow_cache_flush_deferred(void);
72267-extern atomic_t flow_cache_genid;
72268+extern atomic_unchecked_t flow_cache_genid;
72269
72270 #endif
72271diff --git a/include/net/genetlink.h b/include/net/genetlink.h
72272index bdfbe68..4402ebe 100644
72273--- a/include/net/genetlink.h
72274+++ b/include/net/genetlink.h
72275@@ -118,7 +118,7 @@ struct genl_ops {
72276 struct netlink_callback *cb);
72277 int (*done)(struct netlink_callback *cb);
72278 struct list_head ops_list;
72279-};
72280+} __do_const;
72281
72282 extern int genl_register_family(struct genl_family *family);
72283 extern int genl_register_family_with_ops(struct genl_family *family,
72284diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
72285index e5062c9..48a9a4b 100644
72286--- a/include/net/gro_cells.h
72287+++ b/include/net/gro_cells.h
72288@@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
72289 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
72290
72291 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
72292- atomic_long_inc(&dev->rx_dropped);
72293+ atomic_long_inc_unchecked(&dev->rx_dropped);
72294 kfree_skb(skb);
72295 return;
72296 }
72297@@ -73,8 +73,8 @@ static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *de
72298 int i;
72299
72300 gcells->gro_cells_mask = roundup_pow_of_two(netif_get_num_default_rss_queues()) - 1;
72301- gcells->cells = kcalloc(sizeof(struct gro_cell),
72302- gcells->gro_cells_mask + 1,
72303+ gcells->cells = kcalloc(gcells->gro_cells_mask + 1,
72304+ sizeof(struct gro_cell),
72305 GFP_KERNEL);
72306 if (!gcells->cells)
72307 return -ENOMEM;
72308diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
72309index 1832927..ce39aea 100644
72310--- a/include/net/inet_connection_sock.h
72311+++ b/include/net/inet_connection_sock.h
72312@@ -62,7 +62,7 @@ struct inet_connection_sock_af_ops {
72313 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
72314 int (*bind_conflict)(const struct sock *sk,
72315 const struct inet_bind_bucket *tb, bool relax);
72316-};
72317+} __do_const;
72318
72319 /** inet_connection_sock - INET connection oriented sock
72320 *
72321diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
72322index 53f464d..ba76aaa 100644
72323--- a/include/net/inetpeer.h
72324+++ b/include/net/inetpeer.h
72325@@ -47,8 +47,8 @@ struct inet_peer {
72326 */
72327 union {
72328 struct {
72329- atomic_t rid; /* Frag reception counter */
72330- atomic_t ip_id_count; /* IP ID for the next packet */
72331+ atomic_unchecked_t rid; /* Frag reception counter */
72332+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
72333 };
72334 struct rcu_head rcu;
72335 struct inet_peer *gc_next;
72336@@ -182,11 +182,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
72337 more++;
72338 inet_peer_refcheck(p);
72339 do {
72340- old = atomic_read(&p->ip_id_count);
72341+ old = atomic_read_unchecked(&p->ip_id_count);
72342 new = old + more;
72343 if (!new)
72344 new = 1;
72345- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
72346+ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
72347 return new;
72348 }
72349
72350diff --git a/include/net/ip.h b/include/net/ip.h
72351index a68f838..74518ab 100644
72352--- a/include/net/ip.h
72353+++ b/include/net/ip.h
72354@@ -202,7 +202,7 @@ extern struct local_ports {
72355 } sysctl_local_ports;
72356 extern void inet_get_local_port_range(int *low, int *high);
72357
72358-extern unsigned long *sysctl_local_reserved_ports;
72359+extern unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
72360 static inline int inet_is_reserved_local_port(int port)
72361 {
72362 return test_bit(port, sysctl_local_reserved_ports);
72363diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
72364index e49db91..76a81de 100644
72365--- a/include/net/ip_fib.h
72366+++ b/include/net/ip_fib.h
72367@@ -167,7 +167,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
72368
72369 #define FIB_RES_SADDR(net, res) \
72370 ((FIB_RES_NH(res).nh_saddr_genid == \
72371- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
72372+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
72373 FIB_RES_NH(res).nh_saddr : \
72374 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
72375 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
72376diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
72377index 68c69d5..bdab192 100644
72378--- a/include/net/ip_vs.h
72379+++ b/include/net/ip_vs.h
72380@@ -599,7 +599,7 @@ struct ip_vs_conn {
72381 struct ip_vs_conn *control; /* Master control connection */
72382 atomic_t n_control; /* Number of controlled ones */
72383 struct ip_vs_dest *dest; /* real server */
72384- atomic_t in_pkts; /* incoming packet counter */
72385+ atomic_unchecked_t in_pkts; /* incoming packet counter */
72386
72387 /* packet transmitter for different forwarding methods. If it
72388 mangles the packet, it must return NF_DROP or better NF_STOLEN,
72389@@ -737,7 +737,7 @@ struct ip_vs_dest {
72390 __be16 port; /* port number of the server */
72391 union nf_inet_addr addr; /* IP address of the server */
72392 volatile unsigned int flags; /* dest status flags */
72393- atomic_t conn_flags; /* flags to copy to conn */
72394+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
72395 atomic_t weight; /* server weight */
72396
72397 atomic_t refcnt; /* reference counter */
72398@@ -980,11 +980,11 @@ struct netns_ipvs {
72399 /* ip_vs_lblc */
72400 int sysctl_lblc_expiration;
72401 struct ctl_table_header *lblc_ctl_header;
72402- struct ctl_table *lblc_ctl_table;
72403+ ctl_table_no_const *lblc_ctl_table;
72404 /* ip_vs_lblcr */
72405 int sysctl_lblcr_expiration;
72406 struct ctl_table_header *lblcr_ctl_header;
72407- struct ctl_table *lblcr_ctl_table;
72408+ ctl_table_no_const *lblcr_ctl_table;
72409 /* ip_vs_est */
72410 struct list_head est_list; /* estimator list */
72411 spinlock_t est_lock;
72412diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
72413index 80ffde3..968b0f4 100644
72414--- a/include/net/irda/ircomm_tty.h
72415+++ b/include/net/irda/ircomm_tty.h
72416@@ -35,6 +35,7 @@
72417 #include <linux/termios.h>
72418 #include <linux/timer.h>
72419 #include <linux/tty.h> /* struct tty_struct */
72420+#include <asm/local.h>
72421
72422 #include <net/irda/irias_object.h>
72423 #include <net/irda/ircomm_core.h>
72424diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
72425index cc7c197..9f2da2a 100644
72426--- a/include/net/iucv/af_iucv.h
72427+++ b/include/net/iucv/af_iucv.h
72428@@ -141,7 +141,7 @@ struct iucv_sock {
72429 struct iucv_sock_list {
72430 struct hlist_head head;
72431 rwlock_t lock;
72432- atomic_t autobind_name;
72433+ atomic_unchecked_t autobind_name;
72434 };
72435
72436 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
72437diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
72438index df83f69..9b640b8 100644
72439--- a/include/net/llc_c_ac.h
72440+++ b/include/net/llc_c_ac.h
72441@@ -87,7 +87,7 @@
72442 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
72443 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
72444
72445-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
72446+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
72447
72448 extern int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
72449 extern int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
72450diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
72451index 6ca3113..f8026dd 100644
72452--- a/include/net/llc_c_ev.h
72453+++ b/include/net/llc_c_ev.h
72454@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
72455 return (struct llc_conn_state_ev *)skb->cb;
72456 }
72457
72458-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
72459-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
72460+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
72461+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
72462
72463 extern int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
72464 extern int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
72465diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
72466index 0e79cfb..f46db31 100644
72467--- a/include/net/llc_c_st.h
72468+++ b/include/net/llc_c_st.h
72469@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
72470 u8 next_state;
72471 llc_conn_ev_qfyr_t *ev_qualifiers;
72472 llc_conn_action_t *ev_actions;
72473-};
72474+} __do_const;
72475
72476 struct llc_conn_state {
72477 u8 current_state;
72478diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
72479index 37a3bbd..55a4241 100644
72480--- a/include/net/llc_s_ac.h
72481+++ b/include/net/llc_s_ac.h
72482@@ -23,7 +23,7 @@
72483 #define SAP_ACT_TEST_IND 9
72484
72485 /* All action functions must look like this */
72486-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
72487+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
72488
72489 extern int llc_sap_action_unitdata_ind(struct llc_sap *sap,
72490 struct sk_buff *skb);
72491diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
72492index 567c681..cd73ac0 100644
72493--- a/include/net/llc_s_st.h
72494+++ b/include/net/llc_s_st.h
72495@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
72496 llc_sap_ev_t ev;
72497 u8 next_state;
72498 llc_sap_action_t *ev_actions;
72499-};
72500+} __do_const;
72501
72502 struct llc_sap_state {
72503 u8 curr_state;
72504diff --git a/include/net/mac80211.h b/include/net/mac80211.h
72505index ee50c5e..1bc3b1a 100644
72506--- a/include/net/mac80211.h
72507+++ b/include/net/mac80211.h
72508@@ -3996,7 +3996,7 @@ struct rate_control_ops {
72509 void (*add_sta_debugfs)(void *priv, void *priv_sta,
72510 struct dentry *dir);
72511 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
72512-};
72513+} __do_const;
72514
72515 static inline int rate_supported(struct ieee80211_sta *sta,
72516 enum ieee80211_band band,
72517diff --git a/include/net/neighbour.h b/include/net/neighbour.h
72518index 0dab173..1b76af0 100644
72519--- a/include/net/neighbour.h
72520+++ b/include/net/neighbour.h
72521@@ -123,7 +123,7 @@ struct neigh_ops {
72522 void (*error_report)(struct neighbour *, struct sk_buff *);
72523 int (*output)(struct neighbour *, struct sk_buff *);
72524 int (*connected_output)(struct neighbour *, struct sk_buff *);
72525-};
72526+} __do_const;
72527
72528 struct pneigh_entry {
72529 struct pneigh_entry *next;
72530diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
72531index de644bc..dfbcc4c 100644
72532--- a/include/net/net_namespace.h
72533+++ b/include/net/net_namespace.h
72534@@ -115,7 +115,7 @@ struct net {
72535 #endif
72536 struct netns_ipvs *ipvs;
72537 struct sock *diag_nlsk;
72538- atomic_t rt_genid;
72539+ atomic_unchecked_t rt_genid;
72540 };
72541
72542 /*
72543@@ -272,7 +272,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
72544 #define __net_init __init
72545 #define __net_exit __exit_refok
72546 #define __net_initdata __initdata
72547+#ifdef CONSTIFY_PLUGIN
72548 #define __net_initconst __initconst
72549+#else
72550+#define __net_initconst __initdata
72551+#endif
72552 #endif
72553
72554 struct pernet_operations {
72555@@ -282,7 +286,7 @@ struct pernet_operations {
72556 void (*exit_batch)(struct list_head *net_exit_list);
72557 int *id;
72558 size_t size;
72559-};
72560+} __do_const;
72561
72562 /*
72563 * Use these carefully. If you implement a network device and it
72564@@ -330,12 +334,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
72565
72566 static inline int rt_genid(struct net *net)
72567 {
72568- return atomic_read(&net->rt_genid);
72569+ return atomic_read_unchecked(&net->rt_genid);
72570 }
72571
72572 static inline void rt_genid_bump(struct net *net)
72573 {
72574- atomic_inc(&net->rt_genid);
72575+ atomic_inc_unchecked(&net->rt_genid);
72576 }
72577
72578 #endif /* __NET_NET_NAMESPACE_H */
72579diff --git a/include/net/netdma.h b/include/net/netdma.h
72580index 8ba8ce2..99b7fff 100644
72581--- a/include/net/netdma.h
72582+++ b/include/net/netdma.h
72583@@ -24,7 +24,7 @@
72584 #include <linux/dmaengine.h>
72585 #include <linux/skbuff.h>
72586
72587-int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
72588+int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan,
72589 struct sk_buff *skb, int offset, struct iovec *to,
72590 size_t len, struct dma_pinned_list *pinned_list);
72591
72592diff --git a/include/net/netlink.h b/include/net/netlink.h
72593index 9690b0f..87aded7 100644
72594--- a/include/net/netlink.h
72595+++ b/include/net/netlink.h
72596@@ -534,7 +534,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
72597 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
72598 {
72599 if (mark)
72600- skb_trim(skb, (unsigned char *) mark - skb->data);
72601+ skb_trim(skb, (const unsigned char *) mark - skb->data);
72602 }
72603
72604 /**
72605diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
72606index 923cb20..deae816 100644
72607--- a/include/net/netns/conntrack.h
72608+++ b/include/net/netns/conntrack.h
72609@@ -12,10 +12,10 @@ struct nf_conntrack_ecache;
72610 struct nf_proto_net {
72611 #ifdef CONFIG_SYSCTL
72612 struct ctl_table_header *ctl_table_header;
72613- struct ctl_table *ctl_table;
72614+ ctl_table_no_const *ctl_table;
72615 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
72616 struct ctl_table_header *ctl_compat_header;
72617- struct ctl_table *ctl_compat_table;
72618+ ctl_table_no_const *ctl_compat_table;
72619 #endif
72620 #endif
72621 unsigned int users;
72622@@ -58,7 +58,7 @@ struct nf_ip_net {
72623 struct nf_icmp_net icmpv6;
72624 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
72625 struct ctl_table_header *ctl_table_header;
72626- struct ctl_table *ctl_table;
72627+ ctl_table_no_const *ctl_table;
72628 #endif
72629 };
72630
72631diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
72632index 2ae2b83..dbdc85e 100644
72633--- a/include/net/netns/ipv4.h
72634+++ b/include/net/netns/ipv4.h
72635@@ -64,7 +64,7 @@ struct netns_ipv4 {
72636 kgid_t sysctl_ping_group_range[2];
72637 long sysctl_tcp_mem[3];
72638
72639- atomic_t dev_addr_genid;
72640+ atomic_unchecked_t dev_addr_genid;
72641
72642 #ifdef CONFIG_IP_MROUTE
72643 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
72644diff --git a/include/net/protocol.h b/include/net/protocol.h
72645index 047c047..b9dad15 100644
72646--- a/include/net/protocol.h
72647+++ b/include/net/protocol.h
72648@@ -44,7 +44,7 @@ struct net_protocol {
72649 void (*err_handler)(struct sk_buff *skb, u32 info);
72650 unsigned int no_policy:1,
72651 netns_ok:1;
72652-};
72653+} __do_const;
72654
72655 #if IS_ENABLED(CONFIG_IPV6)
72656 struct inet6_protocol {
72657@@ -57,7 +57,7 @@ struct inet6_protocol {
72658 u8 type, u8 code, int offset,
72659 __be32 info);
72660 unsigned int flags; /* INET6_PROTO_xxx */
72661-};
72662+} __do_const;
72663
72664 #define INET6_PROTO_NOPOLICY 0x1
72665 #define INET6_PROTO_FINAL 0x2
72666diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
72667index 5a15fab..d799ea7 100644
72668--- a/include/net/rtnetlink.h
72669+++ b/include/net/rtnetlink.h
72670@@ -81,7 +81,7 @@ struct rtnl_link_ops {
72671 const struct net_device *dev);
72672 unsigned int (*get_num_tx_queues)(void);
72673 unsigned int (*get_num_rx_queues)(void);
72674-};
72675+} __do_const;
72676
72677 extern int __rtnl_link_register(struct rtnl_link_ops *ops);
72678 extern void __rtnl_link_unregister(struct rtnl_link_ops *ops);
72679diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
72680index 7fdf298..197e9f7 100644
72681--- a/include/net/sctp/sctp.h
72682+++ b/include/net/sctp/sctp.h
72683@@ -330,9 +330,9 @@ do { \
72684
72685 #else /* SCTP_DEBUG */
72686
72687-#define SCTP_DEBUG_PRINTK(whatever...)
72688-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
72689-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
72690+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
72691+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
72692+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
72693 #define SCTP_ENABLE_DEBUG
72694 #define SCTP_DISABLE_DEBUG
72695 #define SCTP_ASSERT(expr, str, func)
72696diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
72697index 2a82d13..62a31c2 100644
72698--- a/include/net/sctp/sm.h
72699+++ b/include/net/sctp/sm.h
72700@@ -87,7 +87,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
72701 typedef struct {
72702 sctp_state_fn_t *fn;
72703 const char *name;
72704-} sctp_sm_table_entry_t;
72705+} __do_const sctp_sm_table_entry_t;
72706
72707 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
72708 * currently in use.
72709@@ -299,7 +299,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
72710 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
72711
72712 /* Extern declarations for major data structures. */
72713-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
72714+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
72715
72716
72717 /* Get the size of a DATA chunk payload. */
72718diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
72719index fdeb85a..1329d95 100644
72720--- a/include/net/sctp/structs.h
72721+++ b/include/net/sctp/structs.h
72722@@ -517,7 +517,7 @@ struct sctp_pf {
72723 struct sctp_association *asoc);
72724 void (*addr_v4map) (struct sctp_sock *, union sctp_addr *);
72725 struct sctp_af *af;
72726-};
72727+} __do_const;
72728
72729
72730 /* Structure to track chunk fragments that have been acked, but peer
72731diff --git a/include/net/sock.h b/include/net/sock.h
72732index 25afaa0..8bb0070 100644
72733--- a/include/net/sock.h
72734+++ b/include/net/sock.h
72735@@ -322,7 +322,7 @@ struct sock {
72736 #ifdef CONFIG_RPS
72737 __u32 sk_rxhash;
72738 #endif
72739- atomic_t sk_drops;
72740+ atomic_unchecked_t sk_drops;
72741 int sk_rcvbuf;
72742
72743 struct sk_filter __rcu *sk_filter;
72744@@ -1781,7 +1781,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
72745 }
72746
72747 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
72748- char __user *from, char *to,
72749+ char __user *from, unsigned char *to,
72750 int copy, int offset)
72751 {
72752 if (skb->ip_summed == CHECKSUM_NONE) {
72753@@ -2040,7 +2040,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
72754 }
72755 }
72756
72757-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
72758+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
72759
72760 /**
72761 * sk_page_frag - return an appropriate page_frag
72762diff --git a/include/net/tcp.h b/include/net/tcp.h
72763index aed42c7..43890c6 100644
72764--- a/include/net/tcp.h
72765+++ b/include/net/tcp.h
72766@@ -530,7 +530,7 @@ extern void tcp_retransmit_timer(struct sock *sk);
72767 extern void tcp_xmit_retransmit_queue(struct sock *);
72768 extern void tcp_simple_retransmit(struct sock *);
72769 extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
72770-extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
72771+extern int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
72772
72773 extern void tcp_send_probe0(struct sock *);
72774 extern void tcp_send_partial(struct sock *);
72775@@ -701,8 +701,8 @@ struct tcp_skb_cb {
72776 struct inet6_skb_parm h6;
72777 #endif
72778 } header; /* For incoming frames */
72779- __u32 seq; /* Starting sequence number */
72780- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
72781+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
72782+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
72783 __u32 when; /* used to compute rtt's */
72784 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
72785
72786@@ -716,7 +716,7 @@ struct tcp_skb_cb {
72787
72788 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
72789 /* 1 byte hole */
72790- __u32 ack_seq; /* Sequence number ACK'd */
72791+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
72792 };
72793
72794 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
72795diff --git a/include/net/xfrm.h b/include/net/xfrm.h
72796index 63445ed..d6fc34f 100644
72797--- a/include/net/xfrm.h
72798+++ b/include/net/xfrm.h
72799@@ -304,7 +304,7 @@ struct xfrm_policy_afinfo {
72800 struct net_device *dev,
72801 const struct flowi *fl);
72802 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
72803-};
72804+} __do_const;
72805
72806 extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
72807 extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
72808@@ -340,7 +340,7 @@ struct xfrm_state_afinfo {
72809 struct sk_buff *skb);
72810 int (*transport_finish)(struct sk_buff *skb,
72811 int async);
72812-};
72813+} __do_const;
72814
72815 extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
72816 extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
72817@@ -423,7 +423,7 @@ struct xfrm_mode {
72818 struct module *owner;
72819 unsigned int encap;
72820 int flags;
72821-};
72822+} __do_const;
72823
72824 /* Flags for xfrm_mode. */
72825 enum {
72826@@ -514,7 +514,7 @@ struct xfrm_policy {
72827 struct timer_list timer;
72828
72829 struct flow_cache_object flo;
72830- atomic_t genid;
72831+ atomic_unchecked_t genid;
72832 u32 priority;
72833 u32 index;
72834 struct xfrm_mark mark;
72835diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
72836index 1a046b1..ee0bef0 100644
72837--- a/include/rdma/iw_cm.h
72838+++ b/include/rdma/iw_cm.h
72839@@ -122,7 +122,7 @@ struct iw_cm_verbs {
72840 int backlog);
72841
72842 int (*destroy_listen)(struct iw_cm_id *cm_id);
72843-};
72844+} __no_const;
72845
72846 /**
72847 * iw_create_cm_id - Create an IW CM identifier.
72848diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
72849index 399162b..b337f1a 100644
72850--- a/include/scsi/libfc.h
72851+++ b/include/scsi/libfc.h
72852@@ -762,6 +762,7 @@ struct libfc_function_template {
72853 */
72854 void (*disc_stop_final) (struct fc_lport *);
72855 };
72856+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
72857
72858 /**
72859 * struct fc_disc - Discovery context
72860@@ -866,7 +867,7 @@ struct fc_lport {
72861 struct fc_vport *vport;
72862
72863 /* Operational Information */
72864- struct libfc_function_template tt;
72865+ libfc_function_template_no_const tt;
72866 u8 link_up;
72867 u8 qfull;
72868 enum fc_lport_state state;
72869diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
72870index e65c62e..aa2e5a2 100644
72871--- a/include/scsi/scsi_device.h
72872+++ b/include/scsi/scsi_device.h
72873@@ -170,9 +170,9 @@ struct scsi_device {
72874 unsigned int max_device_blocked; /* what device_blocked counts down from */
72875 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
72876
72877- atomic_t iorequest_cnt;
72878- atomic_t iodone_cnt;
72879- atomic_t ioerr_cnt;
72880+ atomic_unchecked_t iorequest_cnt;
72881+ atomic_unchecked_t iodone_cnt;
72882+ atomic_unchecked_t ioerr_cnt;
72883
72884 struct device sdev_gendev,
72885 sdev_dev;
72886diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
72887index b797e8f..8e2c3aa 100644
72888--- a/include/scsi/scsi_transport_fc.h
72889+++ b/include/scsi/scsi_transport_fc.h
72890@@ -751,7 +751,8 @@ struct fc_function_template {
72891 unsigned long show_host_system_hostname:1;
72892
72893 unsigned long disable_target_scan:1;
72894-};
72895+} __do_const;
72896+typedef struct fc_function_template __no_const fc_function_template_no_const;
72897
72898
72899 /**
72900diff --git a/include/sound/soc.h b/include/sound/soc.h
72901index bc56738..a4be132 100644
72902--- a/include/sound/soc.h
72903+++ b/include/sound/soc.h
72904@@ -771,7 +771,7 @@ struct snd_soc_codec_driver {
72905 /* probe ordering - for components with runtime dependencies */
72906 int probe_order;
72907 int remove_order;
72908-};
72909+} __do_const;
72910
72911 /* SoC platform interface */
72912 struct snd_soc_platform_driver {
72913@@ -817,7 +817,7 @@ struct snd_soc_platform_driver {
72914 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
72915 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
72916 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
72917-};
72918+} __do_const;
72919
72920 struct snd_soc_platform {
72921 const char *name;
72922diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
72923index 663e34a..91b306a 100644
72924--- a/include/target/target_core_base.h
72925+++ b/include/target/target_core_base.h
72926@@ -654,7 +654,7 @@ struct se_device {
72927 spinlock_t stats_lock;
72928 /* Active commands on this virtual SE device */
72929 atomic_t simple_cmds;
72930- atomic_t dev_ordered_id;
72931+ atomic_unchecked_t dev_ordered_id;
72932 atomic_t dev_ordered_sync;
72933 atomic_t dev_qf_count;
72934 int export_count;
72935diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
72936new file mode 100644
72937index 0000000..fb634b7
72938--- /dev/null
72939+++ b/include/trace/events/fs.h
72940@@ -0,0 +1,53 @@
72941+#undef TRACE_SYSTEM
72942+#define TRACE_SYSTEM fs
72943+
72944+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
72945+#define _TRACE_FS_H
72946+
72947+#include <linux/fs.h>
72948+#include <linux/tracepoint.h>
72949+
72950+TRACE_EVENT(do_sys_open,
72951+
72952+ TP_PROTO(const char *filename, int flags, int mode),
72953+
72954+ TP_ARGS(filename, flags, mode),
72955+
72956+ TP_STRUCT__entry(
72957+ __string( filename, filename )
72958+ __field( int, flags )
72959+ __field( int, mode )
72960+ ),
72961+
72962+ TP_fast_assign(
72963+ __assign_str(filename, filename);
72964+ __entry->flags = flags;
72965+ __entry->mode = mode;
72966+ ),
72967+
72968+ TP_printk("\"%s\" %x %o",
72969+ __get_str(filename), __entry->flags, __entry->mode)
72970+);
72971+
72972+TRACE_EVENT(open_exec,
72973+
72974+ TP_PROTO(const char *filename),
72975+
72976+ TP_ARGS(filename),
72977+
72978+ TP_STRUCT__entry(
72979+ __string( filename, filename )
72980+ ),
72981+
72982+ TP_fast_assign(
72983+ __assign_str(filename, filename);
72984+ ),
72985+
72986+ TP_printk("\"%s\"",
72987+ __get_str(filename))
72988+);
72989+
72990+#endif /* _TRACE_FS_H */
72991+
72992+/* This part must be outside protection */
72993+#include <trace/define_trace.h>
72994diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
72995index 1c09820..7f5ec79 100644
72996--- a/include/trace/events/irq.h
72997+++ b/include/trace/events/irq.h
72998@@ -36,7 +36,7 @@ struct softirq_action;
72999 */
73000 TRACE_EVENT(irq_handler_entry,
73001
73002- TP_PROTO(int irq, struct irqaction *action),
73003+ TP_PROTO(int irq, const struct irqaction *action),
73004
73005 TP_ARGS(irq, action),
73006
73007@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
73008 */
73009 TRACE_EVENT(irq_handler_exit,
73010
73011- TP_PROTO(int irq, struct irqaction *action, int ret),
73012+ TP_PROTO(int irq, const struct irqaction *action, int ret),
73013
73014 TP_ARGS(irq, action, ret),
73015
73016diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
73017index 7caf44c..23c6f27 100644
73018--- a/include/uapi/linux/a.out.h
73019+++ b/include/uapi/linux/a.out.h
73020@@ -39,6 +39,14 @@ enum machine_type {
73021 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
73022 };
73023
73024+/* Constants for the N_FLAGS field */
73025+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
73026+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
73027+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
73028+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
73029+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
73030+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
73031+
73032 #if !defined (N_MAGIC)
73033 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
73034 #endif
73035diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
73036index d876736..ccce5c0 100644
73037--- a/include/uapi/linux/byteorder/little_endian.h
73038+++ b/include/uapi/linux/byteorder/little_endian.h
73039@@ -42,51 +42,51 @@
73040
73041 static inline __le64 __cpu_to_le64p(const __u64 *p)
73042 {
73043- return (__force __le64)*p;
73044+ return (__force const __le64)*p;
73045 }
73046-static inline __u64 __le64_to_cpup(const __le64 *p)
73047+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
73048 {
73049- return (__force __u64)*p;
73050+ return (__force const __u64)*p;
73051 }
73052 static inline __le32 __cpu_to_le32p(const __u32 *p)
73053 {
73054- return (__force __le32)*p;
73055+ return (__force const __le32)*p;
73056 }
73057 static inline __u32 __le32_to_cpup(const __le32 *p)
73058 {
73059- return (__force __u32)*p;
73060+ return (__force const __u32)*p;
73061 }
73062 static inline __le16 __cpu_to_le16p(const __u16 *p)
73063 {
73064- return (__force __le16)*p;
73065+ return (__force const __le16)*p;
73066 }
73067 static inline __u16 __le16_to_cpup(const __le16 *p)
73068 {
73069- return (__force __u16)*p;
73070+ return (__force const __u16)*p;
73071 }
73072 static inline __be64 __cpu_to_be64p(const __u64 *p)
73073 {
73074- return (__force __be64)__swab64p(p);
73075+ return (__force const __be64)__swab64p(p);
73076 }
73077 static inline __u64 __be64_to_cpup(const __be64 *p)
73078 {
73079- return __swab64p((__u64 *)p);
73080+ return __swab64p((const __u64 *)p);
73081 }
73082 static inline __be32 __cpu_to_be32p(const __u32 *p)
73083 {
73084- return (__force __be32)__swab32p(p);
73085+ return (__force const __be32)__swab32p(p);
73086 }
73087-static inline __u32 __be32_to_cpup(const __be32 *p)
73088+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
73089 {
73090- return __swab32p((__u32 *)p);
73091+ return __swab32p((const __u32 *)p);
73092 }
73093 static inline __be16 __cpu_to_be16p(const __u16 *p)
73094 {
73095- return (__force __be16)__swab16p(p);
73096+ return (__force const __be16)__swab16p(p);
73097 }
73098 static inline __u16 __be16_to_cpup(const __be16 *p)
73099 {
73100- return __swab16p((__u16 *)p);
73101+ return __swab16p((const __u16 *)p);
73102 }
73103 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
73104 #define __le64_to_cpus(x) do { (void)(x); } while (0)
73105diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
73106index 126a817..d522bd1 100644
73107--- a/include/uapi/linux/elf.h
73108+++ b/include/uapi/linux/elf.h
73109@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
73110 #define PT_GNU_EH_FRAME 0x6474e550
73111
73112 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
73113+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
73114+
73115+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
73116+
73117+/* Constants for the e_flags field */
73118+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
73119+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
73120+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
73121+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
73122+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
73123+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
73124
73125 /*
73126 * Extended Numbering
73127@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
73128 #define DT_DEBUG 21
73129 #define DT_TEXTREL 22
73130 #define DT_JMPREL 23
73131+#define DT_FLAGS 30
73132+ #define DF_TEXTREL 0x00000004
73133 #define DT_ENCODING 32
73134 #define OLD_DT_LOOS 0x60000000
73135 #define DT_LOOS 0x6000000d
73136@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
73137 #define PF_W 0x2
73138 #define PF_X 0x1
73139
73140+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
73141+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
73142+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
73143+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
73144+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
73145+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
73146+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
73147+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
73148+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
73149+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
73150+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
73151+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
73152+
73153 typedef struct elf32_phdr{
73154 Elf32_Word p_type;
73155 Elf32_Off p_offset;
73156@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
73157 #define EI_OSABI 7
73158 #define EI_PAD 8
73159
73160+#define EI_PAX 14
73161+
73162 #define ELFMAG0 0x7f /* EI_MAG */
73163 #define ELFMAG1 'E'
73164 #define ELFMAG2 'L'
73165diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
73166index aa169c4..6a2771d 100644
73167--- a/include/uapi/linux/personality.h
73168+++ b/include/uapi/linux/personality.h
73169@@ -30,6 +30,7 @@ enum {
73170 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
73171 ADDR_NO_RANDOMIZE | \
73172 ADDR_COMPAT_LAYOUT | \
73173+ ADDR_LIMIT_3GB | \
73174 MMAP_PAGE_ZERO)
73175
73176 /*
73177diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
73178index 7530e74..e714828 100644
73179--- a/include/uapi/linux/screen_info.h
73180+++ b/include/uapi/linux/screen_info.h
73181@@ -43,7 +43,8 @@ struct screen_info {
73182 __u16 pages; /* 0x32 */
73183 __u16 vesa_attributes; /* 0x34 */
73184 __u32 capabilities; /* 0x36 */
73185- __u8 _reserved[6]; /* 0x3a */
73186+ __u16 vesapm_size; /* 0x3a */
73187+ __u8 _reserved[4]; /* 0x3c */
73188 } __attribute__((packed));
73189
73190 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
73191diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
73192index 0e011eb..82681b1 100644
73193--- a/include/uapi/linux/swab.h
73194+++ b/include/uapi/linux/swab.h
73195@@ -43,7 +43,7 @@
73196 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
73197 */
73198
73199-static inline __attribute_const__ __u16 __fswab16(__u16 val)
73200+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
73201 {
73202 #ifdef __HAVE_BUILTIN_BSWAP16__
73203 return __builtin_bswap16(val);
73204@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
73205 #endif
73206 }
73207
73208-static inline __attribute_const__ __u32 __fswab32(__u32 val)
73209+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
73210 {
73211 #ifdef __HAVE_BUILTIN_BSWAP32__
73212 return __builtin_bswap32(val);
73213@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
73214 #endif
73215 }
73216
73217-static inline __attribute_const__ __u64 __fswab64(__u64 val)
73218+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
73219 {
73220 #ifdef __HAVE_BUILTIN_BSWAP64__
73221 return __builtin_bswap64(val);
73222diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
73223index 6d67213..8dab561 100644
73224--- a/include/uapi/linux/sysctl.h
73225+++ b/include/uapi/linux/sysctl.h
73226@@ -155,7 +155,11 @@ enum
73227 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
73228 };
73229
73230-
73231+#ifdef CONFIG_PAX_SOFTMODE
73232+enum {
73233+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
73234+};
73235+#endif
73236
73237 /* CTL_VM names: */
73238 enum
73239diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
73240index 26607bd..588b65f 100644
73241--- a/include/uapi/linux/xattr.h
73242+++ b/include/uapi/linux/xattr.h
73243@@ -60,5 +60,9 @@
73244 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
73245 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
73246
73247+/* User namespace */
73248+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
73249+#define XATTR_PAX_FLAGS_SUFFIX "flags"
73250+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
73251
73252 #endif /* _UAPI_LINUX_XATTR_H */
73253diff --git a/include/video/udlfb.h b/include/video/udlfb.h
73254index f9466fa..f4e2b81 100644
73255--- a/include/video/udlfb.h
73256+++ b/include/video/udlfb.h
73257@@ -53,10 +53,10 @@ struct dlfb_data {
73258 u32 pseudo_palette[256];
73259 int blank_mode; /*one of FB_BLANK_ */
73260 /* blit-only rendering path metrics, exposed through sysfs */
73261- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
73262- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
73263- atomic_t bytes_sent; /* to usb, after compression including overhead */
73264- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
73265+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
73266+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
73267+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
73268+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
73269 };
73270
73271 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
73272diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
73273index 0993a22..32ba2fe 100644
73274--- a/include/video/uvesafb.h
73275+++ b/include/video/uvesafb.h
73276@@ -177,6 +177,7 @@ struct uvesafb_par {
73277 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
73278 u8 pmi_setpal; /* PMI for palette changes */
73279 u16 *pmi_base; /* protected mode interface location */
73280+ u8 *pmi_code; /* protected mode code location */
73281 void *pmi_start;
73282 void *pmi_pal;
73283 u8 *vbe_state_orig; /*
73284diff --git a/init/Kconfig b/init/Kconfig
73285index be8b7f5..1eeca9b 100644
73286--- a/init/Kconfig
73287+++ b/init/Kconfig
73288@@ -990,6 +990,7 @@ endif # CGROUPS
73289
73290 config CHECKPOINT_RESTORE
73291 bool "Checkpoint/restore support" if EXPERT
73292+ depends on !GRKERNSEC
73293 default n
73294 help
73295 Enables additional kernel features in a sake of checkpoint/restore.
73296@@ -1468,7 +1469,7 @@ config SLUB_DEBUG
73297
73298 config COMPAT_BRK
73299 bool "Disable heap randomization"
73300- default y
73301+ default n
73302 help
73303 Randomizing heap placement makes heap exploits harder, but it
73304 also breaks ancient binaries (including anything libc5 based).
73305@@ -1711,7 +1712,7 @@ config INIT_ALL_POSSIBLE
73306 config STOP_MACHINE
73307 bool
73308 default y
73309- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
73310+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
73311 help
73312 Need stop_machine() primitive.
73313
73314diff --git a/init/Makefile b/init/Makefile
73315index 7bc47ee..6da2dc7 100644
73316--- a/init/Makefile
73317+++ b/init/Makefile
73318@@ -2,6 +2,9 @@
73319 # Makefile for the linux kernel.
73320 #
73321
73322+ccflags-y := $(GCC_PLUGINS_CFLAGS)
73323+asflags-y := $(GCC_PLUGINS_AFLAGS)
73324+
73325 obj-y := main.o version.o mounts.o
73326 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
73327 obj-y += noinitramfs.o
73328diff --git a/init/do_mounts.c b/init/do_mounts.c
73329index 1d1b634..a1c810f 100644
73330--- a/init/do_mounts.c
73331+++ b/init/do_mounts.c
73332@@ -355,11 +355,11 @@ static void __init get_fs_names(char *page)
73333 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
73334 {
73335 struct super_block *s;
73336- int err = sys_mount(name, "/root", fs, flags, data);
73337+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
73338 if (err)
73339 return err;
73340
73341- sys_chdir("/root");
73342+ sys_chdir((const char __force_user *)"/root");
73343 s = current->fs->pwd.dentry->d_sb;
73344 ROOT_DEV = s->s_dev;
73345 printk(KERN_INFO
73346@@ -480,18 +480,18 @@ void __init change_floppy(char *fmt, ...)
73347 va_start(args, fmt);
73348 vsprintf(buf, fmt, args);
73349 va_end(args);
73350- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
73351+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
73352 if (fd >= 0) {
73353 sys_ioctl(fd, FDEJECT, 0);
73354 sys_close(fd);
73355 }
73356 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
73357- fd = sys_open("/dev/console", O_RDWR, 0);
73358+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
73359 if (fd >= 0) {
73360 sys_ioctl(fd, TCGETS, (long)&termios);
73361 termios.c_lflag &= ~ICANON;
73362 sys_ioctl(fd, TCSETSF, (long)&termios);
73363- sys_read(fd, &c, 1);
73364+ sys_read(fd, (char __user *)&c, 1);
73365 termios.c_lflag |= ICANON;
73366 sys_ioctl(fd, TCSETSF, (long)&termios);
73367 sys_close(fd);
73368@@ -585,6 +585,6 @@ void __init prepare_namespace(void)
73369 mount_root();
73370 out:
73371 devtmpfs_mount("dev");
73372- sys_mount(".", "/", NULL, MS_MOVE, NULL);
73373- sys_chroot(".");
73374+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
73375+ sys_chroot((const char __force_user *)".");
73376 }
73377diff --git a/init/do_mounts.h b/init/do_mounts.h
73378index f5b978a..69dbfe8 100644
73379--- a/init/do_mounts.h
73380+++ b/init/do_mounts.h
73381@@ -15,15 +15,15 @@ extern int root_mountflags;
73382
73383 static inline int create_dev(char *name, dev_t dev)
73384 {
73385- sys_unlink(name);
73386- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
73387+ sys_unlink((char __force_user *)name);
73388+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
73389 }
73390
73391 #if BITS_PER_LONG == 32
73392 static inline u32 bstat(char *name)
73393 {
73394 struct stat64 stat;
73395- if (sys_stat64(name, &stat) != 0)
73396+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
73397 return 0;
73398 if (!S_ISBLK(stat.st_mode))
73399 return 0;
73400@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
73401 static inline u32 bstat(char *name)
73402 {
73403 struct stat stat;
73404- if (sys_newstat(name, &stat) != 0)
73405+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
73406 return 0;
73407 if (!S_ISBLK(stat.st_mode))
73408 return 0;
73409diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
73410index f9acf71..1e19144 100644
73411--- a/init/do_mounts_initrd.c
73412+++ b/init/do_mounts_initrd.c
73413@@ -58,8 +58,8 @@ static void __init handle_initrd(void)
73414 create_dev("/dev/root.old", Root_RAM0);
73415 /* mount initrd on rootfs' /root */
73416 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
73417- sys_mkdir("/old", 0700);
73418- sys_chdir("/old");
73419+ sys_mkdir((const char __force_user *)"/old", 0700);
73420+ sys_chdir((const char __force_user *)"/old");
73421
73422 /*
73423 * In case that a resume from disk is carried out by linuxrc or one of
73424@@ -73,31 +73,31 @@ static void __init handle_initrd(void)
73425 current->flags &= ~PF_FREEZER_SKIP;
73426
73427 /* move initrd to rootfs' /old */
73428- sys_mount("..", ".", NULL, MS_MOVE, NULL);
73429+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
73430 /* switch root and cwd back to / of rootfs */
73431- sys_chroot("..");
73432+ sys_chroot((const char __force_user *)"..");
73433
73434 if (new_decode_dev(real_root_dev) == Root_RAM0) {
73435- sys_chdir("/old");
73436+ sys_chdir((const char __force_user *)"/old");
73437 return;
73438 }
73439
73440- sys_chdir("/");
73441+ sys_chdir((const char __force_user *)"/");
73442 ROOT_DEV = new_decode_dev(real_root_dev);
73443 mount_root();
73444
73445 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
73446- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
73447+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
73448 if (!error)
73449 printk("okay\n");
73450 else {
73451- int fd = sys_open("/dev/root.old", O_RDWR, 0);
73452+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
73453 if (error == -ENOENT)
73454 printk("/initrd does not exist. Ignored.\n");
73455 else
73456 printk("failed\n");
73457 printk(KERN_NOTICE "Unmounting old root\n");
73458- sys_umount("/old", MNT_DETACH);
73459+ sys_umount((char __force_user *)"/old", MNT_DETACH);
73460 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
73461 if (fd < 0) {
73462 error = fd;
73463@@ -120,11 +120,11 @@ int __init initrd_load(void)
73464 * mounted in the normal path.
73465 */
73466 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
73467- sys_unlink("/initrd.image");
73468+ sys_unlink((const char __force_user *)"/initrd.image");
73469 handle_initrd();
73470 return 1;
73471 }
73472 }
73473- sys_unlink("/initrd.image");
73474+ sys_unlink((const char __force_user *)"/initrd.image");
73475 return 0;
73476 }
73477diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
73478index 8cb6db5..d729f50 100644
73479--- a/init/do_mounts_md.c
73480+++ b/init/do_mounts_md.c
73481@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
73482 partitioned ? "_d" : "", minor,
73483 md_setup_args[ent].device_names);
73484
73485- fd = sys_open(name, 0, 0);
73486+ fd = sys_open((char __force_user *)name, 0, 0);
73487 if (fd < 0) {
73488 printk(KERN_ERR "md: open failed - cannot start "
73489 "array %s\n", name);
73490@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
73491 * array without it
73492 */
73493 sys_close(fd);
73494- fd = sys_open(name, 0, 0);
73495+ fd = sys_open((char __force_user *)name, 0, 0);
73496 sys_ioctl(fd, BLKRRPART, 0);
73497 }
73498 sys_close(fd);
73499@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
73500
73501 wait_for_device_probe();
73502
73503- fd = sys_open("/dev/md0", 0, 0);
73504+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
73505 if (fd >= 0) {
73506 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
73507 sys_close(fd);
73508diff --git a/init/init_task.c b/init/init_task.c
73509index 8b2f399..f0797c9 100644
73510--- a/init/init_task.c
73511+++ b/init/init_task.c
73512@@ -20,5 +20,9 @@ EXPORT_SYMBOL(init_task);
73513 * Initial thread structure. Alignment of this is handled by a special
73514 * linker map entry.
73515 */
73516+#ifdef CONFIG_X86
73517+union thread_union init_thread_union __init_task_data;
73518+#else
73519 union thread_union init_thread_union __init_task_data =
73520 { INIT_THREAD_INFO(init_task) };
73521+#endif
73522diff --git a/init/initramfs.c b/init/initramfs.c
73523index 84c6bf1..8899338 100644
73524--- a/init/initramfs.c
73525+++ b/init/initramfs.c
73526@@ -84,7 +84,7 @@ static void __init free_hash(void)
73527 }
73528 }
73529
73530-static long __init do_utime(char *filename, time_t mtime)
73531+static long __init do_utime(char __force_user *filename, time_t mtime)
73532 {
73533 struct timespec t[2];
73534
73535@@ -119,7 +119,7 @@ static void __init dir_utime(void)
73536 struct dir_entry *de, *tmp;
73537 list_for_each_entry_safe(de, tmp, &dir_list, list) {
73538 list_del(&de->list);
73539- do_utime(de->name, de->mtime);
73540+ do_utime((char __force_user *)de->name, de->mtime);
73541 kfree(de->name);
73542 kfree(de);
73543 }
73544@@ -281,7 +281,7 @@ static int __init maybe_link(void)
73545 if (nlink >= 2) {
73546 char *old = find_link(major, minor, ino, mode, collected);
73547 if (old)
73548- return (sys_link(old, collected) < 0) ? -1 : 1;
73549+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
73550 }
73551 return 0;
73552 }
73553@@ -290,11 +290,11 @@ static void __init clean_path(char *path, umode_t mode)
73554 {
73555 struct stat st;
73556
73557- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
73558+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
73559 if (S_ISDIR(st.st_mode))
73560- sys_rmdir(path);
73561+ sys_rmdir((char __force_user *)path);
73562 else
73563- sys_unlink(path);
73564+ sys_unlink((char __force_user *)path);
73565 }
73566 }
73567
73568@@ -315,7 +315,7 @@ static int __init do_name(void)
73569 int openflags = O_WRONLY|O_CREAT;
73570 if (ml != 1)
73571 openflags |= O_TRUNC;
73572- wfd = sys_open(collected, openflags, mode);
73573+ wfd = sys_open((char __force_user *)collected, openflags, mode);
73574
73575 if (wfd >= 0) {
73576 sys_fchown(wfd, uid, gid);
73577@@ -327,17 +327,17 @@ static int __init do_name(void)
73578 }
73579 }
73580 } else if (S_ISDIR(mode)) {
73581- sys_mkdir(collected, mode);
73582- sys_chown(collected, uid, gid);
73583- sys_chmod(collected, mode);
73584+ sys_mkdir((char __force_user *)collected, mode);
73585+ sys_chown((char __force_user *)collected, uid, gid);
73586+ sys_chmod((char __force_user *)collected, mode);
73587 dir_add(collected, mtime);
73588 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
73589 S_ISFIFO(mode) || S_ISSOCK(mode)) {
73590 if (maybe_link() == 0) {
73591- sys_mknod(collected, mode, rdev);
73592- sys_chown(collected, uid, gid);
73593- sys_chmod(collected, mode);
73594- do_utime(collected, mtime);
73595+ sys_mknod((char __force_user *)collected, mode, rdev);
73596+ sys_chown((char __force_user *)collected, uid, gid);
73597+ sys_chmod((char __force_user *)collected, mode);
73598+ do_utime((char __force_user *)collected, mtime);
73599 }
73600 }
73601 return 0;
73602@@ -346,15 +346,15 @@ static int __init do_name(void)
73603 static int __init do_copy(void)
73604 {
73605 if (count >= body_len) {
73606- sys_write(wfd, victim, body_len);
73607+ sys_write(wfd, (char __force_user *)victim, body_len);
73608 sys_close(wfd);
73609- do_utime(vcollected, mtime);
73610+ do_utime((char __force_user *)vcollected, mtime);
73611 kfree(vcollected);
73612 eat(body_len);
73613 state = SkipIt;
73614 return 0;
73615 } else {
73616- sys_write(wfd, victim, count);
73617+ sys_write(wfd, (char __force_user *)victim, count);
73618 body_len -= count;
73619 eat(count);
73620 return 1;
73621@@ -365,9 +365,9 @@ static int __init do_symlink(void)
73622 {
73623 collected[N_ALIGN(name_len) + body_len] = '\0';
73624 clean_path(collected, 0);
73625- sys_symlink(collected + N_ALIGN(name_len), collected);
73626- sys_lchown(collected, uid, gid);
73627- do_utime(collected, mtime);
73628+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
73629+ sys_lchown((char __force_user *)collected, uid, gid);
73630+ do_utime((char __force_user *)collected, mtime);
73631 state = SkipIt;
73632 next_state = Reset;
73633 return 0;
73634diff --git a/init/main.c b/init/main.c
73635index cee4b5c..360e10a 100644
73636--- a/init/main.c
73637+++ b/init/main.c
73638@@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
73639 extern void tc_init(void);
73640 #endif
73641
73642+extern void grsecurity_init(void);
73643+
73644 /*
73645 * Debug helper: via this flag we know that we are in 'early bootup code'
73646 * where only the boot processor is running with IRQ disabled. This means
73647@@ -149,6 +151,61 @@ static int __init set_reset_devices(char *str)
73648
73649 __setup("reset_devices", set_reset_devices);
73650
73651+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
73652+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
73653+static int __init setup_grsec_proc_gid(char *str)
73654+{
73655+ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
73656+ return 1;
73657+}
73658+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
73659+#endif
73660+
73661+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
73662+extern char pax_enter_kernel_user[];
73663+extern char pax_exit_kernel_user[];
73664+extern pgdval_t clone_pgd_mask;
73665+#endif
73666+
73667+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
73668+static int __init setup_pax_nouderef(char *str)
73669+{
73670+#ifdef CONFIG_X86_32
73671+ unsigned int cpu;
73672+ struct desc_struct *gdt;
73673+
73674+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
73675+ gdt = get_cpu_gdt_table(cpu);
73676+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
73677+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
73678+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
73679+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
73680+ }
73681+ loadsegment(ds, __KERNEL_DS);
73682+ loadsegment(es, __KERNEL_DS);
73683+ loadsegment(ss, __KERNEL_DS);
73684+#else
73685+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
73686+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
73687+ clone_pgd_mask = ~(pgdval_t)0UL;
73688+#endif
73689+
73690+ return 0;
73691+}
73692+early_param("pax_nouderef", setup_pax_nouderef);
73693+#endif
73694+
73695+#ifdef CONFIG_PAX_SOFTMODE
73696+int pax_softmode;
73697+
73698+static int __init setup_pax_softmode(char *str)
73699+{
73700+ get_option(&str, &pax_softmode);
73701+ return 1;
73702+}
73703+__setup("pax_softmode=", setup_pax_softmode);
73704+#endif
73705+
73706 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
73707 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
73708 static const char *panic_later, *panic_param;
73709@@ -681,6 +738,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
73710 {
73711 int count = preempt_count();
73712 int ret;
73713+ const char *msg1 = "", *msg2 = "";
73714
73715 if (initcall_debug)
73716 ret = do_one_initcall_debug(fn);
73717@@ -693,15 +751,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
73718 sprintf(msgbuf, "error code %d ", ret);
73719
73720 if (preempt_count() != count) {
73721- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
73722+ msg1 = " preemption imbalance";
73723 preempt_count() = count;
73724 }
73725 if (irqs_disabled()) {
73726- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
73727+ msg2 = " disabled interrupts";
73728 local_irq_enable();
73729 }
73730- if (msgbuf[0]) {
73731- printk("initcall %pF returned with %s\n", fn, msgbuf);
73732+ if (msgbuf[0] || *msg1 || *msg2) {
73733+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
73734 }
73735
73736 return ret;
73737@@ -755,8 +813,14 @@ static void __init do_initcall_level(int level)
73738 level, level,
73739 &repair_env_string);
73740
73741- for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++)
73742+ for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++) {
73743 do_one_initcall(*fn);
73744+
73745+#ifdef LATENT_ENTROPY_PLUGIN
73746+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
73747+#endif
73748+
73749+ }
73750 }
73751
73752 static void __init do_initcalls(void)
73753@@ -790,8 +854,14 @@ static void __init do_pre_smp_initcalls(void)
73754 {
73755 initcall_t *fn;
73756
73757- for (fn = __initcall_start; fn < __initcall0_start; fn++)
73758+ for (fn = __initcall_start; fn < __initcall0_start; fn++) {
73759 do_one_initcall(*fn);
73760+
73761+#ifdef LATENT_ENTROPY_PLUGIN
73762+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
73763+#endif
73764+
73765+ }
73766 }
73767
73768 static int run_init_process(const char *init_filename)
73769@@ -877,7 +947,7 @@ static noinline void __init kernel_init_freeable(void)
73770 do_basic_setup();
73771
73772 /* Open the /dev/console on the rootfs, this should never fail */
73773- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
73774+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
73775 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
73776
73777 (void) sys_dup(0);
73778@@ -890,11 +960,13 @@ static noinline void __init kernel_init_freeable(void)
73779 if (!ramdisk_execute_command)
73780 ramdisk_execute_command = "/init";
73781
73782- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
73783+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
73784 ramdisk_execute_command = NULL;
73785 prepare_namespace();
73786 }
73787
73788+ grsecurity_init();
73789+
73790 /*
73791 * Ok, we have completed the initial bootup, and
73792 * we're essentially up and running. Get rid of the
73793diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
73794index 130dfec..cc88451 100644
73795--- a/ipc/ipc_sysctl.c
73796+++ b/ipc/ipc_sysctl.c
73797@@ -30,7 +30,7 @@ static void *get_ipc(ctl_table *table)
73798 static int proc_ipc_dointvec(ctl_table *table, int write,
73799 void __user *buffer, size_t *lenp, loff_t *ppos)
73800 {
73801- struct ctl_table ipc_table;
73802+ ctl_table_no_const ipc_table;
73803
73804 memcpy(&ipc_table, table, sizeof(ipc_table));
73805 ipc_table.data = get_ipc(table);
73806@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(ctl_table *table, int write,
73807 static int proc_ipc_dointvec_minmax(ctl_table *table, int write,
73808 void __user *buffer, size_t *lenp, loff_t *ppos)
73809 {
73810- struct ctl_table ipc_table;
73811+ ctl_table_no_const ipc_table;
73812
73813 memcpy(&ipc_table, table, sizeof(ipc_table));
73814 ipc_table.data = get_ipc(table);
73815@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(ctl_table *table, int write,
73816 static int proc_ipc_callback_dointvec(ctl_table *table, int write,
73817 void __user *buffer, size_t *lenp, loff_t *ppos)
73818 {
73819- struct ctl_table ipc_table;
73820+ ctl_table_no_const ipc_table;
73821 size_t lenp_bef = *lenp;
73822 int rc;
73823
73824@@ -88,7 +88,7 @@ static int proc_ipc_callback_dointvec(ctl_table *table, int write,
73825 static int proc_ipc_doulongvec_minmax(ctl_table *table, int write,
73826 void __user *buffer, size_t *lenp, loff_t *ppos)
73827 {
73828- struct ctl_table ipc_table;
73829+ ctl_table_no_const ipc_table;
73830 memcpy(&ipc_table, table, sizeof(ipc_table));
73831 ipc_table.data = get_ipc(table);
73832
73833@@ -122,7 +122,7 @@ static void ipc_auto_callback(int val)
73834 static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write,
73835 void __user *buffer, size_t *lenp, loff_t *ppos)
73836 {
73837- struct ctl_table ipc_table;
73838+ ctl_table_no_const ipc_table;
73839 size_t lenp_bef = *lenp;
73840 int oldval;
73841 int rc;
73842diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
73843index 383d638..943fdbb 100644
73844--- a/ipc/mq_sysctl.c
73845+++ b/ipc/mq_sysctl.c
73846@@ -25,7 +25,7 @@ static void *get_mq(ctl_table *table)
73847 static int proc_mq_dointvec_minmax(ctl_table *table, int write,
73848 void __user *buffer, size_t *lenp, loff_t *ppos)
73849 {
73850- struct ctl_table mq_table;
73851+ ctl_table_no_const mq_table;
73852 memcpy(&mq_table, table, sizeof(mq_table));
73853 mq_table.data = get_mq(table);
73854
73855diff --git a/ipc/mqueue.c b/ipc/mqueue.c
73856index f3f40dc..ffe5a3a 100644
73857--- a/ipc/mqueue.c
73858+++ b/ipc/mqueue.c
73859@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
73860 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
73861 info->attr.mq_msgsize);
73862
73863+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
73864 spin_lock(&mq_lock);
73865 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
73866 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
73867diff --git a/ipc/msg.c b/ipc/msg.c
73868index 31cd1bf..9778e0f8 100644
73869--- a/ipc/msg.c
73870+++ b/ipc/msg.c
73871@@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
73872 return security_msg_queue_associate(msq, msgflg);
73873 }
73874
73875+static struct ipc_ops msg_ops = {
73876+ .getnew = newque,
73877+ .associate = msg_security,
73878+ .more_checks = NULL
73879+};
73880+
73881 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
73882 {
73883 struct ipc_namespace *ns;
73884- struct ipc_ops msg_ops;
73885 struct ipc_params msg_params;
73886
73887 ns = current->nsproxy->ipc_ns;
73888
73889- msg_ops.getnew = newque;
73890- msg_ops.associate = msg_security;
73891- msg_ops.more_checks = NULL;
73892-
73893 msg_params.key = key;
73894 msg_params.flg = msgflg;
73895
73896@@ -872,6 +873,7 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp,
73897 goto out_unlock;
73898 break;
73899 }
73900+ msg = ERR_PTR(-EAGAIN);
73901 } else
73902 break;
73903 msg_counter++;
73904diff --git a/ipc/sem.c b/ipc/sem.c
73905index 58d31f1..cce7a55 100644
73906--- a/ipc/sem.c
73907+++ b/ipc/sem.c
73908@@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
73909 return 0;
73910 }
73911
73912+static struct ipc_ops sem_ops = {
73913+ .getnew = newary,
73914+ .associate = sem_security,
73915+ .more_checks = sem_more_checks
73916+};
73917+
73918 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
73919 {
73920 struct ipc_namespace *ns;
73921- struct ipc_ops sem_ops;
73922 struct ipc_params sem_params;
73923
73924 ns = current->nsproxy->ipc_ns;
73925@@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
73926 if (nsems < 0 || nsems > ns->sc_semmsl)
73927 return -EINVAL;
73928
73929- sem_ops.getnew = newary;
73930- sem_ops.associate = sem_security;
73931- sem_ops.more_checks = sem_more_checks;
73932-
73933 sem_params.key = key;
73934 sem_params.flg = semflg;
73935 sem_params.u.nsems = nsems;
73936diff --git a/ipc/shm.c b/ipc/shm.c
73937index 4fa6d8f..55cff14 100644
73938--- a/ipc/shm.c
73939+++ b/ipc/shm.c
73940@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
73941 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
73942 #endif
73943
73944+#ifdef CONFIG_GRKERNSEC
73945+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
73946+ const time_t shm_createtime, const kuid_t cuid,
73947+ const int shmid);
73948+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
73949+ const time_t shm_createtime);
73950+#endif
73951+
73952 void shm_init_ns(struct ipc_namespace *ns)
73953 {
73954 ns->shm_ctlmax = SHMMAX;
73955@@ -521,6 +529,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
73956 shp->shm_lprid = 0;
73957 shp->shm_atim = shp->shm_dtim = 0;
73958 shp->shm_ctim = get_seconds();
73959+#ifdef CONFIG_GRKERNSEC
73960+ {
73961+ struct timespec timeval;
73962+ do_posix_clock_monotonic_gettime(&timeval);
73963+
73964+ shp->shm_createtime = timeval.tv_sec;
73965+ }
73966+#endif
73967 shp->shm_segsz = size;
73968 shp->shm_nattch = 0;
73969 shp->shm_file = file;
73970@@ -572,18 +588,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
73971 return 0;
73972 }
73973
73974+static struct ipc_ops shm_ops = {
73975+ .getnew = newseg,
73976+ .associate = shm_security,
73977+ .more_checks = shm_more_checks
73978+};
73979+
73980 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
73981 {
73982 struct ipc_namespace *ns;
73983- struct ipc_ops shm_ops;
73984 struct ipc_params shm_params;
73985
73986 ns = current->nsproxy->ipc_ns;
73987
73988- shm_ops.getnew = newseg;
73989- shm_ops.associate = shm_security;
73990- shm_ops.more_checks = shm_more_checks;
73991-
73992 shm_params.key = key;
73993 shm_params.flg = shmflg;
73994 shm_params.u.size = size;
73995@@ -1004,6 +1021,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
73996 f_mode = FMODE_READ | FMODE_WRITE;
73997 }
73998 if (shmflg & SHM_EXEC) {
73999+
74000+#ifdef CONFIG_PAX_MPROTECT
74001+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
74002+ goto out;
74003+#endif
74004+
74005 prot |= PROT_EXEC;
74006 acc_mode |= S_IXUGO;
74007 }
74008@@ -1027,9 +1050,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
74009 if (err)
74010 goto out_unlock;
74011
74012+#ifdef CONFIG_GRKERNSEC
74013+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
74014+ shp->shm_perm.cuid, shmid) ||
74015+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
74016+ err = -EACCES;
74017+ goto out_unlock;
74018+ }
74019+#endif
74020+
74021 path = shp->shm_file->f_path;
74022 path_get(&path);
74023 shp->shm_nattch++;
74024+#ifdef CONFIG_GRKERNSEC
74025+ shp->shm_lapid = current->pid;
74026+#endif
74027 size = i_size_read(path.dentry->d_inode);
74028 shm_unlock(shp);
74029
74030diff --git a/kernel/acct.c b/kernel/acct.c
74031index 051e071..15e0920 100644
74032--- a/kernel/acct.c
74033+++ b/kernel/acct.c
74034@@ -550,7 +550,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
74035 */
74036 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
74037 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
74038- file->f_op->write(file, (char *)&ac,
74039+ file->f_op->write(file, (char __force_user *)&ac,
74040 sizeof(acct_t), &file->f_pos);
74041 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
74042 set_fs(fs);
74043diff --git a/kernel/audit.c b/kernel/audit.c
74044index d596e53..dbef3c3 100644
74045--- a/kernel/audit.c
74046+++ b/kernel/audit.c
74047@@ -116,7 +116,7 @@ u32 audit_sig_sid = 0;
74048 3) suppressed due to audit_rate_limit
74049 4) suppressed due to audit_backlog_limit
74050 */
74051-static atomic_t audit_lost = ATOMIC_INIT(0);
74052+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
74053
74054 /* The netlink socket. */
74055 static struct sock *audit_sock;
74056@@ -238,7 +238,7 @@ void audit_log_lost(const char *message)
74057 unsigned long now;
74058 int print;
74059
74060- atomic_inc(&audit_lost);
74061+ atomic_inc_unchecked(&audit_lost);
74062
74063 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
74064
74065@@ -257,7 +257,7 @@ void audit_log_lost(const char *message)
74066 printk(KERN_WARNING
74067 "audit: audit_lost=%d audit_rate_limit=%d "
74068 "audit_backlog_limit=%d\n",
74069- atomic_read(&audit_lost),
74070+ atomic_read_unchecked(&audit_lost),
74071 audit_rate_limit,
74072 audit_backlog_limit);
74073 audit_panic(message);
74074@@ -681,7 +681,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
74075 status_set.pid = audit_pid;
74076 status_set.rate_limit = audit_rate_limit;
74077 status_set.backlog_limit = audit_backlog_limit;
74078- status_set.lost = atomic_read(&audit_lost);
74079+ status_set.lost = atomic_read_unchecked(&audit_lost);
74080 status_set.backlog = skb_queue_len(&audit_skb_queue);
74081 audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0,
74082 &status_set, sizeof(status_set));
74083diff --git a/kernel/auditsc.c b/kernel/auditsc.c
74084index a371f85..da826c1 100644
74085--- a/kernel/auditsc.c
74086+++ b/kernel/auditsc.c
74087@@ -2292,7 +2292,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
74088 }
74089
74090 /* global counter which is incremented every time something logs in */
74091-static atomic_t session_id = ATOMIC_INIT(0);
74092+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
74093
74094 /**
74095 * audit_set_loginuid - set current task's audit_context loginuid
74096@@ -2316,7 +2316,7 @@ int audit_set_loginuid(kuid_t loginuid)
74097 return -EPERM;
74098 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
74099
74100- sessionid = atomic_inc_return(&session_id);
74101+ sessionid = atomic_inc_return_unchecked(&session_id);
74102 if (context && context->in_syscall) {
74103 struct audit_buffer *ab;
74104
74105diff --git a/kernel/capability.c b/kernel/capability.c
74106index 493d972..f87dfbd 100644
74107--- a/kernel/capability.c
74108+++ b/kernel/capability.c
74109@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
74110 * before modification is attempted and the application
74111 * fails.
74112 */
74113+ if (tocopy > ARRAY_SIZE(kdata))
74114+ return -EFAULT;
74115+
74116 if (copy_to_user(dataptr, kdata, tocopy
74117 * sizeof(struct __user_cap_data_struct))) {
74118 return -EFAULT;
74119@@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
74120 int ret;
74121
74122 rcu_read_lock();
74123- ret = security_capable(__task_cred(t), ns, cap);
74124+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
74125+ gr_task_is_capable(t, __task_cred(t), cap);
74126 rcu_read_unlock();
74127
74128- return (ret == 0);
74129+ return ret;
74130 }
74131
74132 /**
74133@@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
74134 int ret;
74135
74136 rcu_read_lock();
74137- ret = security_capable_noaudit(__task_cred(t), ns, cap);
74138+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
74139 rcu_read_unlock();
74140
74141- return (ret == 0);
74142+ return ret;
74143 }
74144
74145 /**
74146@@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
74147 BUG();
74148 }
74149
74150- if (security_capable(current_cred(), ns, cap) == 0) {
74151+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
74152 current->flags |= PF_SUPERPRIV;
74153 return true;
74154 }
74155@@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
74156 }
74157 EXPORT_SYMBOL(ns_capable);
74158
74159+bool ns_capable_nolog(struct user_namespace *ns, int cap)
74160+{
74161+ if (unlikely(!cap_valid(cap))) {
74162+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
74163+ BUG();
74164+ }
74165+
74166+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
74167+ current->flags |= PF_SUPERPRIV;
74168+ return true;
74169+ }
74170+ return false;
74171+}
74172+EXPORT_SYMBOL(ns_capable_nolog);
74173+
74174 /**
74175 * capable - Determine if the current task has a superior capability in effect
74176 * @cap: The capability to be tested for
74177@@ -408,6 +427,12 @@ bool capable(int cap)
74178 }
74179 EXPORT_SYMBOL(capable);
74180
74181+bool capable_nolog(int cap)
74182+{
74183+ return ns_capable_nolog(&init_user_ns, cap);
74184+}
74185+EXPORT_SYMBOL(capable_nolog);
74186+
74187 /**
74188 * nsown_capable - Check superior capability to one's own user_ns
74189 * @cap: The capability in question
74190@@ -440,3 +465,10 @@ bool inode_capable(const struct inode *inode, int cap)
74191
74192 return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
74193 }
74194+
74195+bool inode_capable_nolog(const struct inode *inode, int cap)
74196+{
74197+ struct user_namespace *ns = current_user_ns();
74198+
74199+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
74200+}
74201diff --git a/kernel/cgroup.c b/kernel/cgroup.c
74202index 1e23664..570a83d 100644
74203--- a/kernel/cgroup.c
74204+++ b/kernel/cgroup.c
74205@@ -5543,7 +5543,7 @@ static int cgroup_css_links_read(struct cgroup *cont,
74206 struct css_set *cg = link->cg;
74207 struct task_struct *task;
74208 int count = 0;
74209- seq_printf(seq, "css_set %p\n", cg);
74210+ seq_printf(seq, "css_set %pK\n", cg);
74211 list_for_each_entry(task, &cg->tasks, cg_list) {
74212 if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
74213 seq_puts(seq, " ...\n");
74214diff --git a/kernel/compat.c b/kernel/compat.c
74215index 36700e9..73d770c 100644
74216--- a/kernel/compat.c
74217+++ b/kernel/compat.c
74218@@ -13,6 +13,7 @@
74219
74220 #include <linux/linkage.h>
74221 #include <linux/compat.h>
74222+#include <linux/module.h>
74223 #include <linux/errno.h>
74224 #include <linux/time.h>
74225 #include <linux/signal.h>
74226@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
74227 mm_segment_t oldfs;
74228 long ret;
74229
74230- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
74231+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
74232 oldfs = get_fs();
74233 set_fs(KERNEL_DS);
74234 ret = hrtimer_nanosleep_restart(restart);
74235@@ -252,7 +253,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
74236 oldfs = get_fs();
74237 set_fs(KERNEL_DS);
74238 ret = hrtimer_nanosleep(&tu,
74239- rmtp ? (struct timespec __user *)&rmt : NULL,
74240+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
74241 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
74242 set_fs(oldfs);
74243
74244@@ -361,7 +362,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
74245 mm_segment_t old_fs = get_fs();
74246
74247 set_fs(KERNEL_DS);
74248- ret = sys_sigpending((old_sigset_t __user *) &s);
74249+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
74250 set_fs(old_fs);
74251 if (ret == 0)
74252 ret = put_user(s, set);
74253@@ -451,7 +452,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
74254 mm_segment_t old_fs = get_fs();
74255
74256 set_fs(KERNEL_DS);
74257- ret = sys_old_getrlimit(resource, &r);
74258+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
74259 set_fs(old_fs);
74260
74261 if (!ret) {
74262@@ -523,7 +524,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
74263 mm_segment_t old_fs = get_fs();
74264
74265 set_fs(KERNEL_DS);
74266- ret = sys_getrusage(who, (struct rusage __user *) &r);
74267+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
74268 set_fs(old_fs);
74269
74270 if (ret)
74271@@ -552,8 +553,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
74272 set_fs (KERNEL_DS);
74273 ret = sys_wait4(pid,
74274 (stat_addr ?
74275- (unsigned int __user *) &status : NULL),
74276- options, (struct rusage __user *) &r);
74277+ (unsigned int __force_user *) &status : NULL),
74278+ options, (struct rusage __force_user *) &r);
74279 set_fs (old_fs);
74280
74281 if (ret > 0) {
74282@@ -579,8 +580,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
74283 memset(&info, 0, sizeof(info));
74284
74285 set_fs(KERNEL_DS);
74286- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
74287- uru ? (struct rusage __user *)&ru : NULL);
74288+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
74289+ uru ? (struct rusage __force_user *)&ru : NULL);
74290 set_fs(old_fs);
74291
74292 if ((ret < 0) || (info.si_signo == 0))
74293@@ -714,8 +715,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
74294 oldfs = get_fs();
74295 set_fs(KERNEL_DS);
74296 err = sys_timer_settime(timer_id, flags,
74297- (struct itimerspec __user *) &newts,
74298- (struct itimerspec __user *) &oldts);
74299+ (struct itimerspec __force_user *) &newts,
74300+ (struct itimerspec __force_user *) &oldts);
74301 set_fs(oldfs);
74302 if (!err && old && put_compat_itimerspec(old, &oldts))
74303 return -EFAULT;
74304@@ -732,7 +733,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
74305 oldfs = get_fs();
74306 set_fs(KERNEL_DS);
74307 err = sys_timer_gettime(timer_id,
74308- (struct itimerspec __user *) &ts);
74309+ (struct itimerspec __force_user *) &ts);
74310 set_fs(oldfs);
74311 if (!err && put_compat_itimerspec(setting, &ts))
74312 return -EFAULT;
74313@@ -751,7 +752,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
74314 oldfs = get_fs();
74315 set_fs(KERNEL_DS);
74316 err = sys_clock_settime(which_clock,
74317- (struct timespec __user *) &ts);
74318+ (struct timespec __force_user *) &ts);
74319 set_fs(oldfs);
74320 return err;
74321 }
74322@@ -766,7 +767,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
74323 oldfs = get_fs();
74324 set_fs(KERNEL_DS);
74325 err = sys_clock_gettime(which_clock,
74326- (struct timespec __user *) &ts);
74327+ (struct timespec __force_user *) &ts);
74328 set_fs(oldfs);
74329 if (!err && put_compat_timespec(&ts, tp))
74330 return -EFAULT;
74331@@ -786,7 +787,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
74332
74333 oldfs = get_fs();
74334 set_fs(KERNEL_DS);
74335- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
74336+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
74337 set_fs(oldfs);
74338
74339 err = compat_put_timex(utp, &txc);
74340@@ -806,7 +807,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
74341 oldfs = get_fs();
74342 set_fs(KERNEL_DS);
74343 err = sys_clock_getres(which_clock,
74344- (struct timespec __user *) &ts);
74345+ (struct timespec __force_user *) &ts);
74346 set_fs(oldfs);
74347 if (!err && tp && put_compat_timespec(&ts, tp))
74348 return -EFAULT;
74349@@ -818,9 +819,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
74350 long err;
74351 mm_segment_t oldfs;
74352 struct timespec tu;
74353- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
74354+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
74355
74356- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
74357+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
74358 oldfs = get_fs();
74359 set_fs(KERNEL_DS);
74360 err = clock_nanosleep_restart(restart);
74361@@ -852,8 +853,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
74362 oldfs = get_fs();
74363 set_fs(KERNEL_DS);
74364 err = sys_clock_nanosleep(which_clock, flags,
74365- (struct timespec __user *) &in,
74366- (struct timespec __user *) &out);
74367+ (struct timespec __force_user *) &in,
74368+ (struct timespec __force_user *) &out);
74369 set_fs(oldfs);
74370
74371 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
74372diff --git a/kernel/configs.c b/kernel/configs.c
74373index 42e8fa0..9e7406b 100644
74374--- a/kernel/configs.c
74375+++ b/kernel/configs.c
74376@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
74377 struct proc_dir_entry *entry;
74378
74379 /* create the current config file */
74380+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
74381+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
74382+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
74383+ &ikconfig_file_ops);
74384+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
74385+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
74386+ &ikconfig_file_ops);
74387+#endif
74388+#else
74389 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
74390 &ikconfig_file_ops);
74391+#endif
74392+
74393 if (!entry)
74394 return -ENOMEM;
74395
74396diff --git a/kernel/cred.c b/kernel/cred.c
74397index e0573a4..3874e41 100644
74398--- a/kernel/cred.c
74399+++ b/kernel/cred.c
74400@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
74401 validate_creds(cred);
74402 alter_cred_subscribers(cred, -1);
74403 put_cred(cred);
74404+
74405+#ifdef CONFIG_GRKERNSEC_SETXID
74406+ cred = (struct cred *) tsk->delayed_cred;
74407+ if (cred != NULL) {
74408+ tsk->delayed_cred = NULL;
74409+ validate_creds(cred);
74410+ alter_cred_subscribers(cred, -1);
74411+ put_cred(cred);
74412+ }
74413+#endif
74414 }
74415
74416 /**
74417@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
74418 * Always returns 0 thus allowing this function to be tail-called at the end
74419 * of, say, sys_setgid().
74420 */
74421-int commit_creds(struct cred *new)
74422+static int __commit_creds(struct cred *new)
74423 {
74424 struct task_struct *task = current;
74425 const struct cred *old = task->real_cred;
74426@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
74427
74428 get_cred(new); /* we will require a ref for the subj creds too */
74429
74430+ gr_set_role_label(task, new->uid, new->gid);
74431+
74432 /* dumpability changes */
74433 if (!uid_eq(old->euid, new->euid) ||
74434 !gid_eq(old->egid, new->egid) ||
74435@@ -479,6 +491,102 @@ int commit_creds(struct cred *new)
74436 put_cred(old);
74437 return 0;
74438 }
74439+#ifdef CONFIG_GRKERNSEC_SETXID
74440+extern int set_user(struct cred *new);
74441+
74442+void gr_delayed_cred_worker(void)
74443+{
74444+ const struct cred *new = current->delayed_cred;
74445+ struct cred *ncred;
74446+
74447+ current->delayed_cred = NULL;
74448+
74449+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
74450+ // from doing get_cred on it when queueing this
74451+ put_cred(new);
74452+ return;
74453+ } else if (new == NULL)
74454+ return;
74455+
74456+ ncred = prepare_creds();
74457+ if (!ncred)
74458+ goto die;
74459+ // uids
74460+ ncred->uid = new->uid;
74461+ ncred->euid = new->euid;
74462+ ncred->suid = new->suid;
74463+ ncred->fsuid = new->fsuid;
74464+ // gids
74465+ ncred->gid = new->gid;
74466+ ncred->egid = new->egid;
74467+ ncred->sgid = new->sgid;
74468+ ncred->fsgid = new->fsgid;
74469+ // groups
74470+ if (set_groups(ncred, new->group_info) < 0) {
74471+ abort_creds(ncred);
74472+ goto die;
74473+ }
74474+ // caps
74475+ ncred->securebits = new->securebits;
74476+ ncred->cap_inheritable = new->cap_inheritable;
74477+ ncred->cap_permitted = new->cap_permitted;
74478+ ncred->cap_effective = new->cap_effective;
74479+ ncred->cap_bset = new->cap_bset;
74480+
74481+ if (set_user(ncred)) {
74482+ abort_creds(ncred);
74483+ goto die;
74484+ }
74485+
74486+ // from doing get_cred on it when queueing this
74487+ put_cred(new);
74488+
74489+ __commit_creds(ncred);
74490+ return;
74491+die:
74492+ // from doing get_cred on it when queueing this
74493+ put_cred(new);
74494+ do_group_exit(SIGKILL);
74495+}
74496+#endif
74497+
74498+int commit_creds(struct cred *new)
74499+{
74500+#ifdef CONFIG_GRKERNSEC_SETXID
74501+ int ret;
74502+ int schedule_it = 0;
74503+ struct task_struct *t;
74504+
74505+ /* we won't get called with tasklist_lock held for writing
74506+ and interrupts disabled as the cred struct in that case is
74507+ init_cred
74508+ */
74509+ if (grsec_enable_setxid && !current_is_single_threaded() &&
74510+ uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
74511+ !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
74512+ schedule_it = 1;
74513+ }
74514+ ret = __commit_creds(new);
74515+ if (schedule_it) {
74516+ rcu_read_lock();
74517+ read_lock(&tasklist_lock);
74518+ for (t = next_thread(current); t != current;
74519+ t = next_thread(t)) {
74520+ if (t->delayed_cred == NULL) {
74521+ t->delayed_cred = get_cred(new);
74522+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
74523+ set_tsk_need_resched(t);
74524+ }
74525+ }
74526+ read_unlock(&tasklist_lock);
74527+ rcu_read_unlock();
74528+ }
74529+ return ret;
74530+#else
74531+ return __commit_creds(new);
74532+#endif
74533+}
74534+
74535 EXPORT_SYMBOL(commit_creds);
74536
74537 /**
74538diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
74539index 9a61738..c5c8f3a 100644
74540--- a/kernel/debug/debug_core.c
74541+++ b/kernel/debug/debug_core.c
74542@@ -122,7 +122,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
74543 */
74544 static atomic_t masters_in_kgdb;
74545 static atomic_t slaves_in_kgdb;
74546-static atomic_t kgdb_break_tasklet_var;
74547+static atomic_unchecked_t kgdb_break_tasklet_var;
74548 atomic_t kgdb_setting_breakpoint;
74549
74550 struct task_struct *kgdb_usethread;
74551@@ -132,7 +132,7 @@ int kgdb_single_step;
74552 static pid_t kgdb_sstep_pid;
74553
74554 /* to keep track of the CPU which is doing the single stepping*/
74555-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
74556+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
74557
74558 /*
74559 * If you are debugging a problem where roundup (the collection of
74560@@ -540,7 +540,7 @@ return_normal:
74561 * kernel will only try for the value of sstep_tries before
74562 * giving up and continuing on.
74563 */
74564- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
74565+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
74566 (kgdb_info[cpu].task &&
74567 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
74568 atomic_set(&kgdb_active, -1);
74569@@ -634,8 +634,8 @@ cpu_master_loop:
74570 }
74571
74572 kgdb_restore:
74573- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
74574- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
74575+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
74576+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
74577 if (kgdb_info[sstep_cpu].task)
74578 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
74579 else
74580@@ -887,18 +887,18 @@ static void kgdb_unregister_callbacks(void)
74581 static void kgdb_tasklet_bpt(unsigned long ing)
74582 {
74583 kgdb_breakpoint();
74584- atomic_set(&kgdb_break_tasklet_var, 0);
74585+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
74586 }
74587
74588 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
74589
74590 void kgdb_schedule_breakpoint(void)
74591 {
74592- if (atomic_read(&kgdb_break_tasklet_var) ||
74593+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
74594 atomic_read(&kgdb_active) != -1 ||
74595 atomic_read(&kgdb_setting_breakpoint))
74596 return;
74597- atomic_inc(&kgdb_break_tasklet_var);
74598+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
74599 tasklet_schedule(&kgdb_tasklet_breakpoint);
74600 }
74601 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
74602diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
74603index 8875254..7cf4928 100644
74604--- a/kernel/debug/kdb/kdb_main.c
74605+++ b/kernel/debug/kdb/kdb_main.c
74606@@ -1974,7 +1974,7 @@ static int kdb_lsmod(int argc, const char **argv)
74607 continue;
74608
74609 kdb_printf("%-20s%8u 0x%p ", mod->name,
74610- mod->core_size, (void *)mod);
74611+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
74612 #ifdef CONFIG_MODULE_UNLOAD
74613 kdb_printf("%4ld ", module_refcount(mod));
74614 #endif
74615@@ -1984,7 +1984,7 @@ static int kdb_lsmod(int argc, const char **argv)
74616 kdb_printf(" (Loading)");
74617 else
74618 kdb_printf(" (Live)");
74619- kdb_printf(" 0x%p", mod->module_core);
74620+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
74621
74622 #ifdef CONFIG_MODULE_UNLOAD
74623 {
74624diff --git a/kernel/events/core.c b/kernel/events/core.c
74625index 7b6646a..3cb1135 100644
74626--- a/kernel/events/core.c
74627+++ b/kernel/events/core.c
74628@@ -182,7 +182,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
74629 return 0;
74630 }
74631
74632-static atomic64_t perf_event_id;
74633+static atomic64_unchecked_t perf_event_id;
74634
74635 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
74636 enum event_type_t event_type);
74637@@ -2677,7 +2677,7 @@ static void __perf_event_read(void *info)
74638
74639 static inline u64 perf_event_count(struct perf_event *event)
74640 {
74641- return local64_read(&event->count) + atomic64_read(&event->child_count);
74642+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
74643 }
74644
74645 static u64 perf_event_read(struct perf_event *event)
74646@@ -3007,9 +3007,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
74647 mutex_lock(&event->child_mutex);
74648 total += perf_event_read(event);
74649 *enabled += event->total_time_enabled +
74650- atomic64_read(&event->child_total_time_enabled);
74651+ atomic64_read_unchecked(&event->child_total_time_enabled);
74652 *running += event->total_time_running +
74653- atomic64_read(&event->child_total_time_running);
74654+ atomic64_read_unchecked(&event->child_total_time_running);
74655
74656 list_for_each_entry(child, &event->child_list, child_list) {
74657 total += perf_event_read(child);
74658@@ -3412,10 +3412,10 @@ void perf_event_update_userpage(struct perf_event *event)
74659 userpg->offset -= local64_read(&event->hw.prev_count);
74660
74661 userpg->time_enabled = enabled +
74662- atomic64_read(&event->child_total_time_enabled);
74663+ atomic64_read_unchecked(&event->child_total_time_enabled);
74664
74665 userpg->time_running = running +
74666- atomic64_read(&event->child_total_time_running);
74667+ atomic64_read_unchecked(&event->child_total_time_running);
74668
74669 arch_perf_update_userpage(userpg, now);
74670
74671@@ -3974,11 +3974,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
74672 values[n++] = perf_event_count(event);
74673 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
74674 values[n++] = enabled +
74675- atomic64_read(&event->child_total_time_enabled);
74676+ atomic64_read_unchecked(&event->child_total_time_enabled);
74677 }
74678 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
74679 values[n++] = running +
74680- atomic64_read(&event->child_total_time_running);
74681+ atomic64_read_unchecked(&event->child_total_time_running);
74682 }
74683 if (read_format & PERF_FORMAT_ID)
74684 values[n++] = primary_event_id(event);
74685@@ -4721,12 +4721,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
74686 * need to add enough zero bytes after the string to handle
74687 * the 64bit alignment we do later.
74688 */
74689- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
74690+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
74691 if (!buf) {
74692 name = strncpy(tmp, "//enomem", sizeof(tmp));
74693 goto got_name;
74694 }
74695- name = d_path(&file->f_path, buf, PATH_MAX);
74696+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
74697 if (IS_ERR(name)) {
74698 name = strncpy(tmp, "//toolong", sizeof(tmp));
74699 goto got_name;
74700@@ -6165,7 +6165,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
74701 event->parent = parent_event;
74702
74703 event->ns = get_pid_ns(task_active_pid_ns(current));
74704- event->id = atomic64_inc_return(&perf_event_id);
74705+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
74706
74707 event->state = PERF_EVENT_STATE_INACTIVE;
74708
74709@@ -6790,10 +6790,10 @@ static void sync_child_event(struct perf_event *child_event,
74710 /*
74711 * Add back the child's count to the parent's count:
74712 */
74713- atomic64_add(child_val, &parent_event->child_count);
74714- atomic64_add(child_event->total_time_enabled,
74715+ atomic64_add_unchecked(child_val, &parent_event->child_count);
74716+ atomic64_add_unchecked(child_event->total_time_enabled,
74717 &parent_event->child_total_time_enabled);
74718- atomic64_add(child_event->total_time_running,
74719+ atomic64_add_unchecked(child_event->total_time_running,
74720 &parent_event->child_total_time_running);
74721
74722 /*
74723diff --git a/kernel/exit.c b/kernel/exit.c
74724index b4df219..f13c02d 100644
74725--- a/kernel/exit.c
74726+++ b/kernel/exit.c
74727@@ -170,6 +170,10 @@ void release_task(struct task_struct * p)
74728 struct task_struct *leader;
74729 int zap_leader;
74730 repeat:
74731+#ifdef CONFIG_NET
74732+ gr_del_task_from_ip_table(p);
74733+#endif
74734+
74735 /* don't need to get the RCU readlock here - the process is dead and
74736 * can't be modifying its own credentials. But shut RCU-lockdep up */
74737 rcu_read_lock();
74738@@ -338,7 +342,7 @@ int allow_signal(int sig)
74739 * know it'll be handled, so that they don't get converted to
74740 * SIGKILL or just silently dropped.
74741 */
74742- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
74743+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
74744 recalc_sigpending();
74745 spin_unlock_irq(&current->sighand->siglock);
74746 return 0;
74747@@ -708,6 +712,8 @@ void do_exit(long code)
74748 struct task_struct *tsk = current;
74749 int group_dead;
74750
74751+ set_fs(USER_DS);
74752+
74753 profile_task_exit(tsk);
74754
74755 WARN_ON(blk_needs_flush_plug(tsk));
74756@@ -724,7 +730,6 @@ void do_exit(long code)
74757 * mm_release()->clear_child_tid() from writing to a user-controlled
74758 * kernel address.
74759 */
74760- set_fs(USER_DS);
74761
74762 ptrace_event(PTRACE_EVENT_EXIT, code);
74763
74764@@ -783,6 +788,9 @@ void do_exit(long code)
74765 tsk->exit_code = code;
74766 taskstats_exit(tsk, group_dead);
74767
74768+ gr_acl_handle_psacct(tsk, code);
74769+ gr_acl_handle_exit();
74770+
74771 exit_mm(tsk);
74772
74773 if (group_dead)
74774@@ -903,7 +911,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
74775 * Take down every thread in the group. This is called by fatal signals
74776 * as well as by sys_exit_group (below).
74777 */
74778-void
74779+__noreturn void
74780 do_group_exit(int exit_code)
74781 {
74782 struct signal_struct *sig = current->signal;
74783diff --git a/kernel/fork.c b/kernel/fork.c
74784index 5630e52..0cee608 100644
74785--- a/kernel/fork.c
74786+++ b/kernel/fork.c
74787@@ -318,7 +318,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
74788 *stackend = STACK_END_MAGIC; /* for overflow detection */
74789
74790 #ifdef CONFIG_CC_STACKPROTECTOR
74791- tsk->stack_canary = get_random_int();
74792+ tsk->stack_canary = pax_get_random_long();
74793 #endif
74794
74795 /*
74796@@ -344,13 +344,81 @@ free_tsk:
74797 }
74798
74799 #ifdef CONFIG_MMU
74800+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
74801+{
74802+ struct vm_area_struct *tmp;
74803+ unsigned long charge;
74804+ struct mempolicy *pol;
74805+ struct file *file;
74806+
74807+ charge = 0;
74808+ if (mpnt->vm_flags & VM_ACCOUNT) {
74809+ unsigned long len = vma_pages(mpnt);
74810+
74811+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
74812+ goto fail_nomem;
74813+ charge = len;
74814+ }
74815+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
74816+ if (!tmp)
74817+ goto fail_nomem;
74818+ *tmp = *mpnt;
74819+ tmp->vm_mm = mm;
74820+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
74821+ pol = mpol_dup(vma_policy(mpnt));
74822+ if (IS_ERR(pol))
74823+ goto fail_nomem_policy;
74824+ vma_set_policy(tmp, pol);
74825+ if (anon_vma_fork(tmp, mpnt))
74826+ goto fail_nomem_anon_vma_fork;
74827+ tmp->vm_flags &= ~VM_LOCKED;
74828+ tmp->vm_next = tmp->vm_prev = NULL;
74829+ tmp->vm_mirror = NULL;
74830+ file = tmp->vm_file;
74831+ if (file) {
74832+ struct inode *inode = file->f_path.dentry->d_inode;
74833+ struct address_space *mapping = file->f_mapping;
74834+
74835+ get_file(file);
74836+ if (tmp->vm_flags & VM_DENYWRITE)
74837+ atomic_dec(&inode->i_writecount);
74838+ mutex_lock(&mapping->i_mmap_mutex);
74839+ if (tmp->vm_flags & VM_SHARED)
74840+ mapping->i_mmap_writable++;
74841+ flush_dcache_mmap_lock(mapping);
74842+ /* insert tmp into the share list, just after mpnt */
74843+ if (unlikely(tmp->vm_flags & VM_NONLINEAR))
74844+ vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
74845+ else
74846+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
74847+ flush_dcache_mmap_unlock(mapping);
74848+ mutex_unlock(&mapping->i_mmap_mutex);
74849+ }
74850+
74851+ /*
74852+ * Clear hugetlb-related page reserves for children. This only
74853+ * affects MAP_PRIVATE mappings. Faults generated by the child
74854+ * are not guaranteed to succeed, even if read-only
74855+ */
74856+ if (is_vm_hugetlb_page(tmp))
74857+ reset_vma_resv_huge_pages(tmp);
74858+
74859+ return tmp;
74860+
74861+fail_nomem_anon_vma_fork:
74862+ mpol_put(pol);
74863+fail_nomem_policy:
74864+ kmem_cache_free(vm_area_cachep, tmp);
74865+fail_nomem:
74866+ vm_unacct_memory(charge);
74867+ return NULL;
74868+}
74869+
74870 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
74871 {
74872 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
74873 struct rb_node **rb_link, *rb_parent;
74874 int retval;
74875- unsigned long charge;
74876- struct mempolicy *pol;
74877
74878 uprobe_start_dup_mmap();
74879 down_write(&oldmm->mmap_sem);
74880@@ -364,8 +432,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
74881 mm->locked_vm = 0;
74882 mm->mmap = NULL;
74883 mm->mmap_cache = NULL;
74884- mm->free_area_cache = oldmm->mmap_base;
74885- mm->cached_hole_size = ~0UL;
74886+ mm->free_area_cache = oldmm->free_area_cache;
74887+ mm->cached_hole_size = oldmm->cached_hole_size;
74888 mm->map_count = 0;
74889 cpumask_clear(mm_cpumask(mm));
74890 mm->mm_rb = RB_ROOT;
74891@@ -381,57 +449,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
74892
74893 prev = NULL;
74894 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
74895- struct file *file;
74896-
74897 if (mpnt->vm_flags & VM_DONTCOPY) {
74898 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
74899 -vma_pages(mpnt));
74900 continue;
74901 }
74902- charge = 0;
74903- if (mpnt->vm_flags & VM_ACCOUNT) {
74904- unsigned long len = vma_pages(mpnt);
74905-
74906- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
74907- goto fail_nomem;
74908- charge = len;
74909- }
74910- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
74911- if (!tmp)
74912- goto fail_nomem;
74913- *tmp = *mpnt;
74914- INIT_LIST_HEAD(&tmp->anon_vma_chain);
74915- pol = mpol_dup(vma_policy(mpnt));
74916- retval = PTR_ERR(pol);
74917- if (IS_ERR(pol))
74918- goto fail_nomem_policy;
74919- vma_set_policy(tmp, pol);
74920- tmp->vm_mm = mm;
74921- if (anon_vma_fork(tmp, mpnt))
74922- goto fail_nomem_anon_vma_fork;
74923- tmp->vm_flags &= ~VM_LOCKED;
74924- tmp->vm_next = tmp->vm_prev = NULL;
74925- file = tmp->vm_file;
74926- if (file) {
74927- struct inode *inode = file->f_path.dentry->d_inode;
74928- struct address_space *mapping = file->f_mapping;
74929-
74930- get_file(file);
74931- if (tmp->vm_flags & VM_DENYWRITE)
74932- atomic_dec(&inode->i_writecount);
74933- mutex_lock(&mapping->i_mmap_mutex);
74934- if (tmp->vm_flags & VM_SHARED)
74935- mapping->i_mmap_writable++;
74936- flush_dcache_mmap_lock(mapping);
74937- /* insert tmp into the share list, just after mpnt */
74938- if (unlikely(tmp->vm_flags & VM_NONLINEAR))
74939- vma_nonlinear_insert(tmp,
74940- &mapping->i_mmap_nonlinear);
74941- else
74942- vma_interval_tree_insert_after(tmp, mpnt,
74943- &mapping->i_mmap);
74944- flush_dcache_mmap_unlock(mapping);
74945- mutex_unlock(&mapping->i_mmap_mutex);
74946+ tmp = dup_vma(mm, oldmm, mpnt);
74947+ if (!tmp) {
74948+ retval = -ENOMEM;
74949+ goto out;
74950 }
74951
74952 /*
74953@@ -463,6 +489,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
74954 if (retval)
74955 goto out;
74956 }
74957+
74958+#ifdef CONFIG_PAX_SEGMEXEC
74959+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
74960+ struct vm_area_struct *mpnt_m;
74961+
74962+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
74963+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
74964+
74965+ if (!mpnt->vm_mirror)
74966+ continue;
74967+
74968+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
74969+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
74970+ mpnt->vm_mirror = mpnt_m;
74971+ } else {
74972+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
74973+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
74974+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
74975+ mpnt->vm_mirror->vm_mirror = mpnt;
74976+ }
74977+ }
74978+ BUG_ON(mpnt_m);
74979+ }
74980+#endif
74981+
74982 /* a new mm has just been created */
74983 arch_dup_mmap(oldmm, mm);
74984 retval = 0;
74985@@ -472,14 +523,6 @@ out:
74986 up_write(&oldmm->mmap_sem);
74987 uprobe_end_dup_mmap();
74988 return retval;
74989-fail_nomem_anon_vma_fork:
74990- mpol_put(pol);
74991-fail_nomem_policy:
74992- kmem_cache_free(vm_area_cachep, tmp);
74993-fail_nomem:
74994- retval = -ENOMEM;
74995- vm_unacct_memory(charge);
74996- goto out;
74997 }
74998
74999 static inline int mm_alloc_pgd(struct mm_struct *mm)
75000@@ -694,8 +737,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
75001 return ERR_PTR(err);
75002
75003 mm = get_task_mm(task);
75004- if (mm && mm != current->mm &&
75005- !ptrace_may_access(task, mode)) {
75006+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
75007+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
75008 mmput(mm);
75009 mm = ERR_PTR(-EACCES);
75010 }
75011@@ -917,13 +960,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
75012 spin_unlock(&fs->lock);
75013 return -EAGAIN;
75014 }
75015- fs->users++;
75016+ atomic_inc(&fs->users);
75017 spin_unlock(&fs->lock);
75018 return 0;
75019 }
75020 tsk->fs = copy_fs_struct(fs);
75021 if (!tsk->fs)
75022 return -ENOMEM;
75023+ /* Carry through gr_chroot_dentry and is_chrooted instead
75024+ of recomputing it here. Already copied when the task struct
75025+ is duplicated. This allows pivot_root to not be treated as
75026+ a chroot
75027+ */
75028+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
75029+
75030 return 0;
75031 }
75032
75033@@ -1196,6 +1246,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
75034 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
75035 #endif
75036 retval = -EAGAIN;
75037+
75038+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
75039+
75040 if (atomic_read(&p->real_cred->user->processes) >=
75041 task_rlimit(p, RLIMIT_NPROC)) {
75042 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
75043@@ -1435,6 +1488,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
75044 goto bad_fork_free_pid;
75045 }
75046
75047+ /* synchronizes with gr_set_acls()
75048+ we need to call this past the point of no return for fork()
75049+ */
75050+ gr_copy_label(p);
75051+
75052 if (clone_flags & CLONE_THREAD) {
75053 current->signal->nr_threads++;
75054 atomic_inc(&current->signal->live);
75055@@ -1518,6 +1576,8 @@ bad_fork_cleanup_count:
75056 bad_fork_free:
75057 free_task(p);
75058 fork_out:
75059+ gr_log_forkfail(retval);
75060+
75061 return ERR_PTR(retval);
75062 }
75063
75064@@ -1568,6 +1628,23 @@ long do_fork(unsigned long clone_flags,
75065 return -EINVAL;
75066 }
75067
75068+#ifdef CONFIG_GRKERNSEC
75069+ if (clone_flags & CLONE_NEWUSER) {
75070+ /*
75071+ * This doesn't really inspire confidence:
75072+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
75073+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
75074+ * Increases kernel attack surface in areas developers
75075+ * previously cared little about ("low importance due
75076+ * to requiring "root" capability")
75077+ * To be removed when this code receives *proper* review
75078+ */
75079+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
75080+ !capable(CAP_SETGID))
75081+ return -EPERM;
75082+ }
75083+#endif
75084+
75085 /*
75086 * Determine whether and which event to report to ptracer. When
75087 * called from kernel_thread or CLONE_UNTRACED is explicitly
75088@@ -1602,6 +1679,8 @@ long do_fork(unsigned long clone_flags,
75089 if (clone_flags & CLONE_PARENT_SETTID)
75090 put_user(nr, parent_tidptr);
75091
75092+ gr_handle_brute_check();
75093+
75094 if (clone_flags & CLONE_VFORK) {
75095 p->vfork_done = &vfork;
75096 init_completion(&vfork);
75097@@ -1755,7 +1834,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
75098 return 0;
75099
75100 /* don't need lock here; in the worst case we'll do useless copy */
75101- if (fs->users == 1)
75102+ if (atomic_read(&fs->users) == 1)
75103 return 0;
75104
75105 *new_fsp = copy_fs_struct(fs);
75106@@ -1869,7 +1948,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
75107 fs = current->fs;
75108 spin_lock(&fs->lock);
75109 current->fs = new_fs;
75110- if (--fs->users)
75111+ gr_set_chroot_entries(current, &current->fs->root);
75112+ if (atomic_dec_return(&fs->users))
75113 new_fs = NULL;
75114 else
75115 new_fs = fs;
75116diff --git a/kernel/futex.c b/kernel/futex.c
75117index 8879430..31696f1 100644
75118--- a/kernel/futex.c
75119+++ b/kernel/futex.c
75120@@ -54,6 +54,7 @@
75121 #include <linux/mount.h>
75122 #include <linux/pagemap.h>
75123 #include <linux/syscalls.h>
75124+#include <linux/ptrace.h>
75125 #include <linux/signal.h>
75126 #include <linux/export.h>
75127 #include <linux/magic.h>
75128@@ -239,6 +240,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
75129 struct page *page, *page_head;
75130 int err, ro = 0;
75131
75132+#ifdef CONFIG_PAX_SEGMEXEC
75133+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
75134+ return -EFAULT;
75135+#endif
75136+
75137 /*
75138 * The futex address must be "naturally" aligned.
75139 */
75140@@ -2731,6 +2737,7 @@ static int __init futex_init(void)
75141 {
75142 u32 curval;
75143 int i;
75144+ mm_segment_t oldfs;
75145
75146 /*
75147 * This will fail and we want it. Some arch implementations do
75148@@ -2742,8 +2749,11 @@ static int __init futex_init(void)
75149 * implementation, the non-functional ones will return
75150 * -ENOSYS.
75151 */
75152+ oldfs = get_fs();
75153+ set_fs(USER_DS);
75154 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
75155 futex_cmpxchg_enabled = 1;
75156+ set_fs(oldfs);
75157
75158 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
75159 plist_head_init(&futex_queues[i].chain);
75160diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
75161index a9642d5..51eb98c 100644
75162--- a/kernel/futex_compat.c
75163+++ b/kernel/futex_compat.c
75164@@ -31,7 +31,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
75165 return 0;
75166 }
75167
75168-static void __user *futex_uaddr(struct robust_list __user *entry,
75169+static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
75170 compat_long_t futex_offset)
75171 {
75172 compat_uptr_t base = ptr_to_compat(entry);
75173diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
75174index 9b22d03..6295b62 100644
75175--- a/kernel/gcov/base.c
75176+++ b/kernel/gcov/base.c
75177@@ -102,11 +102,6 @@ void gcov_enable_events(void)
75178 }
75179
75180 #ifdef CONFIG_MODULES
75181-static inline int within(void *addr, void *start, unsigned long size)
75182-{
75183- return ((addr >= start) && (addr < start + size));
75184-}
75185-
75186 /* Update list and generate events when modules are unloaded. */
75187 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
75188 void *data)
75189@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
75190 prev = NULL;
75191 /* Remove entries located in module from linked list. */
75192 for (info = gcov_info_head; info; info = info->next) {
75193- if (within(info, mod->module_core, mod->core_size)) {
75194+ if (within_module_core_rw((unsigned long)info, mod)) {
75195 if (prev)
75196 prev->next = info->next;
75197 else
75198diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
75199index cdd5607..c3fc919 100644
75200--- a/kernel/hrtimer.c
75201+++ b/kernel/hrtimer.c
75202@@ -1407,7 +1407,7 @@ void hrtimer_peek_ahead_timers(void)
75203 local_irq_restore(flags);
75204 }
75205
75206-static void run_hrtimer_softirq(struct softirq_action *h)
75207+static void run_hrtimer_softirq(void)
75208 {
75209 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
75210
75211@@ -1751,7 +1751,7 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
75212 return NOTIFY_OK;
75213 }
75214
75215-static struct notifier_block __cpuinitdata hrtimers_nb = {
75216+static struct notifier_block hrtimers_nb = {
75217 .notifier_call = hrtimer_cpu_notify,
75218 };
75219
75220diff --git a/kernel/jump_label.c b/kernel/jump_label.c
75221index 60f48fa..7f3a770 100644
75222--- a/kernel/jump_label.c
75223+++ b/kernel/jump_label.c
75224@@ -13,6 +13,7 @@
75225 #include <linux/sort.h>
75226 #include <linux/err.h>
75227 #include <linux/static_key.h>
75228+#include <linux/mm.h>
75229
75230 #ifdef HAVE_JUMP_LABEL
75231
75232@@ -50,7 +51,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
75233
75234 size = (((unsigned long)stop - (unsigned long)start)
75235 / sizeof(struct jump_entry));
75236+ pax_open_kernel();
75237 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
75238+ pax_close_kernel();
75239 }
75240
75241 static void jump_label_update(struct static_key *key, int enable);
75242@@ -357,10 +360,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
75243 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
75244 struct jump_entry *iter;
75245
75246+ pax_open_kernel();
75247 for (iter = iter_start; iter < iter_stop; iter++) {
75248 if (within_module_init(iter->code, mod))
75249 iter->code = 0;
75250 }
75251+ pax_close_kernel();
75252 }
75253
75254 static int
75255diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
75256index 2169fee..706ccca 100644
75257--- a/kernel/kallsyms.c
75258+++ b/kernel/kallsyms.c
75259@@ -11,6 +11,9 @@
75260 * Changed the compression method from stem compression to "table lookup"
75261 * compression (see scripts/kallsyms.c for a more complete description)
75262 */
75263+#ifdef CONFIG_GRKERNSEC_HIDESYM
75264+#define __INCLUDED_BY_HIDESYM 1
75265+#endif
75266 #include <linux/kallsyms.h>
75267 #include <linux/module.h>
75268 #include <linux/init.h>
75269@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
75270
75271 static inline int is_kernel_inittext(unsigned long addr)
75272 {
75273+ if (system_state != SYSTEM_BOOTING)
75274+ return 0;
75275+
75276 if (addr >= (unsigned long)_sinittext
75277 && addr <= (unsigned long)_einittext)
75278 return 1;
75279 return 0;
75280 }
75281
75282+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
75283+#ifdef CONFIG_MODULES
75284+static inline int is_module_text(unsigned long addr)
75285+{
75286+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
75287+ return 1;
75288+
75289+ addr = ktla_ktva(addr);
75290+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
75291+}
75292+#else
75293+static inline int is_module_text(unsigned long addr)
75294+{
75295+ return 0;
75296+}
75297+#endif
75298+#endif
75299+
75300 static inline int is_kernel_text(unsigned long addr)
75301 {
75302 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
75303@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
75304
75305 static inline int is_kernel(unsigned long addr)
75306 {
75307+
75308+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
75309+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
75310+ return 1;
75311+
75312+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
75313+#else
75314 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
75315+#endif
75316+
75317 return 1;
75318 return in_gate_area_no_mm(addr);
75319 }
75320
75321 static int is_ksym_addr(unsigned long addr)
75322 {
75323+
75324+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
75325+ if (is_module_text(addr))
75326+ return 0;
75327+#endif
75328+
75329 if (all_var)
75330 return is_kernel(addr);
75331
75332@@ -470,7 +509,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
75333
75334 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
75335 {
75336- iter->name[0] = '\0';
75337 iter->nameoff = get_symbol_offset(new_pos);
75338 iter->pos = new_pos;
75339 }
75340@@ -518,6 +556,11 @@ static int s_show(struct seq_file *m, void *p)
75341 {
75342 struct kallsym_iter *iter = m->private;
75343
75344+#ifdef CONFIG_GRKERNSEC_HIDESYM
75345+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
75346+ return 0;
75347+#endif
75348+
75349 /* Some debugging symbols have no name. Ignore them. */
75350 if (!iter->name[0])
75351 return 0;
75352@@ -531,6 +574,7 @@ static int s_show(struct seq_file *m, void *p)
75353 */
75354 type = iter->exported ? toupper(iter->type) :
75355 tolower(iter->type);
75356+
75357 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
75358 type, iter->name, iter->module_name);
75359 } else
75360@@ -556,7 +600,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
75361 struct kallsym_iter *iter;
75362 int ret;
75363
75364- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
75365+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
75366 if (!iter)
75367 return -ENOMEM;
75368 reset_iter(iter, 0);
75369diff --git a/kernel/kcmp.c b/kernel/kcmp.c
75370index e30ac0f..3528cac 100644
75371--- a/kernel/kcmp.c
75372+++ b/kernel/kcmp.c
75373@@ -99,6 +99,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
75374 struct task_struct *task1, *task2;
75375 int ret;
75376
75377+#ifdef CONFIG_GRKERNSEC
75378+ return -ENOSYS;
75379+#endif
75380+
75381 rcu_read_lock();
75382
75383 /*
75384diff --git a/kernel/kexec.c b/kernel/kexec.c
75385index 5e4bd78..00c5b91 100644
75386--- a/kernel/kexec.c
75387+++ b/kernel/kexec.c
75388@@ -1045,7 +1045,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
75389 unsigned long flags)
75390 {
75391 struct compat_kexec_segment in;
75392- struct kexec_segment out, __user *ksegments;
75393+ struct kexec_segment out;
75394+ struct kexec_segment __user *ksegments;
75395 unsigned long i, result;
75396
75397 /* Don't allow clients that don't understand the native
75398diff --git a/kernel/kmod.c b/kernel/kmod.c
75399index 0023a87..9c0c068 100644
75400--- a/kernel/kmod.c
75401+++ b/kernel/kmod.c
75402@@ -74,7 +74,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
75403 kfree(info->argv);
75404 }
75405
75406-static int call_modprobe(char *module_name, int wait)
75407+static int call_modprobe(char *module_name, char *module_param, int wait)
75408 {
75409 static char *envp[] = {
75410 "HOME=/",
75411@@ -83,7 +83,7 @@ static int call_modprobe(char *module_name, int wait)
75412 NULL
75413 };
75414
75415- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
75416+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
75417 if (!argv)
75418 goto out;
75419
75420@@ -95,7 +95,8 @@ static int call_modprobe(char *module_name, int wait)
75421 argv[1] = "-q";
75422 argv[2] = "--";
75423 argv[3] = module_name; /* check free_modprobe_argv() */
75424- argv[4] = NULL;
75425+ argv[4] = module_param;
75426+ argv[5] = NULL;
75427
75428 return call_usermodehelper_fns(modprobe_path, argv, envp,
75429 wait | UMH_KILLABLE, NULL, free_modprobe_argv, NULL);
75430@@ -120,9 +121,8 @@ out:
75431 * If module auto-loading support is disabled then this function
75432 * becomes a no-operation.
75433 */
75434-int __request_module(bool wait, const char *fmt, ...)
75435+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
75436 {
75437- va_list args;
75438 char module_name[MODULE_NAME_LEN];
75439 unsigned int max_modprobes;
75440 int ret;
75441@@ -130,9 +130,7 @@ int __request_module(bool wait, const char *fmt, ...)
75442 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
75443 static int kmod_loop_msg;
75444
75445- va_start(args, fmt);
75446- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
75447- va_end(args);
75448+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
75449 if (ret >= MODULE_NAME_LEN)
75450 return -ENAMETOOLONG;
75451
75452@@ -140,6 +138,20 @@ int __request_module(bool wait, const char *fmt, ...)
75453 if (ret)
75454 return ret;
75455
75456+#ifdef CONFIG_GRKERNSEC_MODHARDEN
75457+ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
75458+ /* hack to workaround consolekit/udisks stupidity */
75459+ read_lock(&tasklist_lock);
75460+ if (!strcmp(current->comm, "mount") &&
75461+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
75462+ read_unlock(&tasklist_lock);
75463+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
75464+ return -EPERM;
75465+ }
75466+ read_unlock(&tasklist_lock);
75467+ }
75468+#endif
75469+
75470 /* If modprobe needs a service that is in a module, we get a recursive
75471 * loop. Limit the number of running kmod threads to max_threads/2 or
75472 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
75473@@ -168,11 +180,52 @@ int __request_module(bool wait, const char *fmt, ...)
75474
75475 trace_module_request(module_name, wait, _RET_IP_);
75476
75477- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
75478+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
75479
75480 atomic_dec(&kmod_concurrent);
75481 return ret;
75482 }
75483+
75484+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
75485+{
75486+ va_list args;
75487+ int ret;
75488+
75489+ va_start(args, fmt);
75490+ ret = ____request_module(wait, module_param, fmt, args);
75491+ va_end(args);
75492+
75493+ return ret;
75494+}
75495+
75496+int __request_module(bool wait, const char *fmt, ...)
75497+{
75498+ va_list args;
75499+ int ret;
75500+
75501+#ifdef CONFIG_GRKERNSEC_MODHARDEN
75502+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
75503+ char module_param[MODULE_NAME_LEN];
75504+
75505+ memset(module_param, 0, sizeof(module_param));
75506+
75507+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
75508+
75509+ va_start(args, fmt);
75510+ ret = ____request_module(wait, module_param, fmt, args);
75511+ va_end(args);
75512+
75513+ return ret;
75514+ }
75515+#endif
75516+
75517+ va_start(args, fmt);
75518+ ret = ____request_module(wait, NULL, fmt, args);
75519+ va_end(args);
75520+
75521+ return ret;
75522+}
75523+
75524 EXPORT_SYMBOL(__request_module);
75525 #endif /* CONFIG_MODULES */
75526
75527@@ -283,7 +336,7 @@ static int wait_for_helper(void *data)
75528 *
75529 * Thus the __user pointer cast is valid here.
75530 */
75531- sys_wait4(pid, (int __user *)&ret, 0, NULL);
75532+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
75533
75534 /*
75535 * If ret is 0, either ____call_usermodehelper failed and the
75536@@ -635,7 +688,7 @@ EXPORT_SYMBOL(call_usermodehelper_fns);
75537 static int proc_cap_handler(struct ctl_table *table, int write,
75538 void __user *buffer, size_t *lenp, loff_t *ppos)
75539 {
75540- struct ctl_table t;
75541+ ctl_table_no_const t;
75542 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
75543 kernel_cap_t new_cap;
75544 int err, i;
75545diff --git a/kernel/kprobes.c b/kernel/kprobes.c
75546index 098f396..fe85ff1 100644
75547--- a/kernel/kprobes.c
75548+++ b/kernel/kprobes.c
75549@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
75550 * kernel image and loaded module images reside. This is required
75551 * so x86_64 can correctly handle the %rip-relative fixups.
75552 */
75553- kip->insns = module_alloc(PAGE_SIZE);
75554+ kip->insns = module_alloc_exec(PAGE_SIZE);
75555 if (!kip->insns) {
75556 kfree(kip);
75557 return NULL;
75558@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
75559 */
75560 if (!list_is_singular(&kip->list)) {
75561 list_del(&kip->list);
75562- module_free(NULL, kip->insns);
75563+ module_free_exec(NULL, kip->insns);
75564 kfree(kip);
75565 }
75566 return 1;
75567@@ -2063,7 +2063,7 @@ static int __init init_kprobes(void)
75568 {
75569 int i, err = 0;
75570 unsigned long offset = 0, size = 0;
75571- char *modname, namebuf[128];
75572+ char *modname, namebuf[KSYM_NAME_LEN];
75573 const char *symbol_name;
75574 void *addr;
75575 struct kprobe_blackpoint *kb;
75576@@ -2148,11 +2148,11 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
75577 kprobe_type = "k";
75578
75579 if (sym)
75580- seq_printf(pi, "%p %s %s+0x%x %s ",
75581+ seq_printf(pi, "%pK %s %s+0x%x %s ",
75582 p->addr, kprobe_type, sym, offset,
75583 (modname ? modname : " "));
75584 else
75585- seq_printf(pi, "%p %s %p ",
75586+ seq_printf(pi, "%pK %s %pK ",
75587 p->addr, kprobe_type, p->addr);
75588
75589 if (!pp)
75590@@ -2190,7 +2190,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
75591 const char *sym = NULL;
75592 unsigned int i = *(loff_t *) v;
75593 unsigned long offset = 0;
75594- char *modname, namebuf[128];
75595+ char *modname, namebuf[KSYM_NAME_LEN];
75596
75597 head = &kprobe_table[i];
75598 preempt_disable();
75599diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
75600index 6ada93c..dce7d5d 100644
75601--- a/kernel/ksysfs.c
75602+++ b/kernel/ksysfs.c
75603@@ -46,6 +46,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
75604 {
75605 if (count+1 > UEVENT_HELPER_PATH_LEN)
75606 return -ENOENT;
75607+ if (!capable(CAP_SYS_ADMIN))
75608+ return -EPERM;
75609 memcpy(uevent_helper, buf, count);
75610 uevent_helper[count] = '\0';
75611 if (count && uevent_helper[count-1] == '\n')
75612@@ -172,7 +174,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
75613 return count;
75614 }
75615
75616-static struct bin_attribute notes_attr = {
75617+static bin_attribute_no_const notes_attr __read_only = {
75618 .attr = {
75619 .name = "notes",
75620 .mode = S_IRUGO,
75621diff --git a/kernel/lockdep.c b/kernel/lockdep.c
75622index 7981e5b..7f2105c 100644
75623--- a/kernel/lockdep.c
75624+++ b/kernel/lockdep.c
75625@@ -590,6 +590,10 @@ static int static_obj(void *obj)
75626 end = (unsigned long) &_end,
75627 addr = (unsigned long) obj;
75628
75629+#ifdef CONFIG_PAX_KERNEXEC
75630+ start = ktla_ktva(start);
75631+#endif
75632+
75633 /*
75634 * static variable?
75635 */
75636@@ -730,6 +734,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
75637 if (!static_obj(lock->key)) {
75638 debug_locks_off();
75639 printk("INFO: trying to register non-static key.\n");
75640+ printk("lock:%pS key:%pS.\n", lock, lock->key);
75641 printk("the code is fine but needs lockdep annotation.\n");
75642 printk("turning off the locking correctness validator.\n");
75643 dump_stack();
75644@@ -3078,7 +3083,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
75645 if (!class)
75646 return 0;
75647 }
75648- atomic_inc((atomic_t *)&class->ops);
75649+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
75650 if (very_verbose(class)) {
75651 printk("\nacquire class [%p] %s", class->key, class->name);
75652 if (class->name_version > 1)
75653diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
75654index b2c71c5..7b88d63 100644
75655--- a/kernel/lockdep_proc.c
75656+++ b/kernel/lockdep_proc.c
75657@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
75658 return 0;
75659 }
75660
75661- seq_printf(m, "%p", class->key);
75662+ seq_printf(m, "%pK", class->key);
75663 #ifdef CONFIG_DEBUG_LOCKDEP
75664 seq_printf(m, " OPS:%8ld", class->ops);
75665 #endif
75666@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
75667
75668 list_for_each_entry(entry, &class->locks_after, entry) {
75669 if (entry->distance == 1) {
75670- seq_printf(m, " -> [%p] ", entry->class->key);
75671+ seq_printf(m, " -> [%pK] ", entry->class->key);
75672 print_name(m, entry->class);
75673 seq_puts(m, "\n");
75674 }
75675@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
75676 if (!class->key)
75677 continue;
75678
75679- seq_printf(m, "[%p] ", class->key);
75680+ seq_printf(m, "[%pK] ", class->key);
75681 print_name(m, class);
75682 seq_puts(m, "\n");
75683 }
75684@@ -495,7 +495,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
75685 if (!i)
75686 seq_line(m, '-', 40-namelen, namelen);
75687
75688- snprintf(ip, sizeof(ip), "[<%p>]",
75689+ snprintf(ip, sizeof(ip), "[<%pK>]",
75690 (void *)class->contention_point[i]);
75691 seq_printf(m, "%40s %14lu %29s %pS\n",
75692 name, stats->contention_point[i],
75693@@ -510,7 +510,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
75694 if (!i)
75695 seq_line(m, '-', 40-namelen, namelen);
75696
75697- snprintf(ip, sizeof(ip), "[<%p>]",
75698+ snprintf(ip, sizeof(ip), "[<%pK>]",
75699 (void *)class->contending_point[i]);
75700 seq_printf(m, "%40s %14lu %29s %pS\n",
75701 name, stats->contending_point[i],
75702diff --git a/kernel/module.c b/kernel/module.c
75703index eab0827..f488603 100644
75704--- a/kernel/module.c
75705+++ b/kernel/module.c
75706@@ -61,6 +61,7 @@
75707 #include <linux/pfn.h>
75708 #include <linux/bsearch.h>
75709 #include <linux/fips.h>
75710+#include <linux/grsecurity.h>
75711 #include <uapi/linux/module.h>
75712 #include "module-internal.h"
75713
75714@@ -156,7 +157,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
75715
75716 /* Bounds of module allocation, for speeding __module_address.
75717 * Protected by module_mutex. */
75718-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
75719+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
75720+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
75721
75722 int register_module_notifier(struct notifier_block * nb)
75723 {
75724@@ -322,7 +324,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
75725 return true;
75726
75727 list_for_each_entry_rcu(mod, &modules, list) {
75728- struct symsearch arr[] = {
75729+ struct symsearch modarr[] = {
75730 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
75731 NOT_GPL_ONLY, false },
75732 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
75733@@ -347,7 +349,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
75734 if (mod->state == MODULE_STATE_UNFORMED)
75735 continue;
75736
75737- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
75738+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
75739 return true;
75740 }
75741 return false;
75742@@ -484,7 +486,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
75743 static int percpu_modalloc(struct module *mod,
75744 unsigned long size, unsigned long align)
75745 {
75746- if (align > PAGE_SIZE) {
75747+ if (align-1 >= PAGE_SIZE) {
75748 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
75749 mod->name, align, PAGE_SIZE);
75750 align = PAGE_SIZE;
75751@@ -1088,7 +1090,7 @@ struct module_attribute module_uevent =
75752 static ssize_t show_coresize(struct module_attribute *mattr,
75753 struct module_kobject *mk, char *buffer)
75754 {
75755- return sprintf(buffer, "%u\n", mk->mod->core_size);
75756+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
75757 }
75758
75759 static struct module_attribute modinfo_coresize =
75760@@ -1097,7 +1099,7 @@ static struct module_attribute modinfo_coresize =
75761 static ssize_t show_initsize(struct module_attribute *mattr,
75762 struct module_kobject *mk, char *buffer)
75763 {
75764- return sprintf(buffer, "%u\n", mk->mod->init_size);
75765+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
75766 }
75767
75768 static struct module_attribute modinfo_initsize =
75769@@ -1311,7 +1313,7 @@ resolve_symbol_wait(struct module *mod,
75770 */
75771 #ifdef CONFIG_SYSFS
75772
75773-#ifdef CONFIG_KALLSYMS
75774+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
75775 static inline bool sect_empty(const Elf_Shdr *sect)
75776 {
75777 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
75778@@ -1451,7 +1453,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
75779 {
75780 unsigned int notes, loaded, i;
75781 struct module_notes_attrs *notes_attrs;
75782- struct bin_attribute *nattr;
75783+ bin_attribute_no_const *nattr;
75784
75785 /* failed to create section attributes, so can't create notes */
75786 if (!mod->sect_attrs)
75787@@ -1563,7 +1565,7 @@ static void del_usage_links(struct module *mod)
75788 static int module_add_modinfo_attrs(struct module *mod)
75789 {
75790 struct module_attribute *attr;
75791- struct module_attribute *temp_attr;
75792+ module_attribute_no_const *temp_attr;
75793 int error = 0;
75794 int i;
75795
75796@@ -1777,21 +1779,21 @@ static void set_section_ro_nx(void *base,
75797
75798 static void unset_module_core_ro_nx(struct module *mod)
75799 {
75800- set_page_attributes(mod->module_core + mod->core_text_size,
75801- mod->module_core + mod->core_size,
75802+ set_page_attributes(mod->module_core_rw,
75803+ mod->module_core_rw + mod->core_size_rw,
75804 set_memory_x);
75805- set_page_attributes(mod->module_core,
75806- mod->module_core + mod->core_ro_size,
75807+ set_page_attributes(mod->module_core_rx,
75808+ mod->module_core_rx + mod->core_size_rx,
75809 set_memory_rw);
75810 }
75811
75812 static void unset_module_init_ro_nx(struct module *mod)
75813 {
75814- set_page_attributes(mod->module_init + mod->init_text_size,
75815- mod->module_init + mod->init_size,
75816+ set_page_attributes(mod->module_init_rw,
75817+ mod->module_init_rw + mod->init_size_rw,
75818 set_memory_x);
75819- set_page_attributes(mod->module_init,
75820- mod->module_init + mod->init_ro_size,
75821+ set_page_attributes(mod->module_init_rx,
75822+ mod->module_init_rx + mod->init_size_rx,
75823 set_memory_rw);
75824 }
75825
75826@@ -1804,14 +1806,14 @@ void set_all_modules_text_rw(void)
75827 list_for_each_entry_rcu(mod, &modules, list) {
75828 if (mod->state == MODULE_STATE_UNFORMED)
75829 continue;
75830- if ((mod->module_core) && (mod->core_text_size)) {
75831- set_page_attributes(mod->module_core,
75832- mod->module_core + mod->core_text_size,
75833+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
75834+ set_page_attributes(mod->module_core_rx,
75835+ mod->module_core_rx + mod->core_size_rx,
75836 set_memory_rw);
75837 }
75838- if ((mod->module_init) && (mod->init_text_size)) {
75839- set_page_attributes(mod->module_init,
75840- mod->module_init + mod->init_text_size,
75841+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
75842+ set_page_attributes(mod->module_init_rx,
75843+ mod->module_init_rx + mod->init_size_rx,
75844 set_memory_rw);
75845 }
75846 }
75847@@ -1827,14 +1829,14 @@ void set_all_modules_text_ro(void)
75848 list_for_each_entry_rcu(mod, &modules, list) {
75849 if (mod->state == MODULE_STATE_UNFORMED)
75850 continue;
75851- if ((mod->module_core) && (mod->core_text_size)) {
75852- set_page_attributes(mod->module_core,
75853- mod->module_core + mod->core_text_size,
75854+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
75855+ set_page_attributes(mod->module_core_rx,
75856+ mod->module_core_rx + mod->core_size_rx,
75857 set_memory_ro);
75858 }
75859- if ((mod->module_init) && (mod->init_text_size)) {
75860- set_page_attributes(mod->module_init,
75861- mod->module_init + mod->init_text_size,
75862+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
75863+ set_page_attributes(mod->module_init_rx,
75864+ mod->module_init_rx + mod->init_size_rx,
75865 set_memory_ro);
75866 }
75867 }
75868@@ -1880,16 +1882,19 @@ static void free_module(struct module *mod)
75869
75870 /* This may be NULL, but that's OK */
75871 unset_module_init_ro_nx(mod);
75872- module_free(mod, mod->module_init);
75873+ module_free(mod, mod->module_init_rw);
75874+ module_free_exec(mod, mod->module_init_rx);
75875 kfree(mod->args);
75876 percpu_modfree(mod);
75877
75878 /* Free lock-classes: */
75879- lockdep_free_key_range(mod->module_core, mod->core_size);
75880+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
75881+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
75882
75883 /* Finally, free the core (containing the module structure) */
75884 unset_module_core_ro_nx(mod);
75885- module_free(mod, mod->module_core);
75886+ module_free_exec(mod, mod->module_core_rx);
75887+ module_free(mod, mod->module_core_rw);
75888
75889 #ifdef CONFIG_MPU
75890 update_protections(current->mm);
75891@@ -1959,9 +1964,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
75892 int ret = 0;
75893 const struct kernel_symbol *ksym;
75894
75895+#ifdef CONFIG_GRKERNSEC_MODHARDEN
75896+ int is_fs_load = 0;
75897+ int register_filesystem_found = 0;
75898+ char *p;
75899+
75900+ p = strstr(mod->args, "grsec_modharden_fs");
75901+ if (p) {
75902+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
75903+ /* copy \0 as well */
75904+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
75905+ is_fs_load = 1;
75906+ }
75907+#endif
75908+
75909 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
75910 const char *name = info->strtab + sym[i].st_name;
75911
75912+#ifdef CONFIG_GRKERNSEC_MODHARDEN
75913+ /* it's a real shame this will never get ripped and copied
75914+ upstream! ;(
75915+ */
75916+ if (is_fs_load && !strcmp(name, "register_filesystem"))
75917+ register_filesystem_found = 1;
75918+#endif
75919+
75920 switch (sym[i].st_shndx) {
75921 case SHN_COMMON:
75922 /* We compiled with -fno-common. These are not
75923@@ -1982,7 +2009,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
75924 ksym = resolve_symbol_wait(mod, info, name);
75925 /* Ok if resolved. */
75926 if (ksym && !IS_ERR(ksym)) {
75927+ pax_open_kernel();
75928 sym[i].st_value = ksym->value;
75929+ pax_close_kernel();
75930 break;
75931 }
75932
75933@@ -2001,11 +2030,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
75934 secbase = (unsigned long)mod_percpu(mod);
75935 else
75936 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
75937+ pax_open_kernel();
75938 sym[i].st_value += secbase;
75939+ pax_close_kernel();
75940 break;
75941 }
75942 }
75943
75944+#ifdef CONFIG_GRKERNSEC_MODHARDEN
75945+ if (is_fs_load && !register_filesystem_found) {
75946+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
75947+ ret = -EPERM;
75948+ }
75949+#endif
75950+
75951 return ret;
75952 }
75953
75954@@ -2089,22 +2127,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
75955 || s->sh_entsize != ~0UL
75956 || strstarts(sname, ".init"))
75957 continue;
75958- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
75959+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
75960+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
75961+ else
75962+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
75963 pr_debug("\t%s\n", sname);
75964 }
75965- switch (m) {
75966- case 0: /* executable */
75967- mod->core_size = debug_align(mod->core_size);
75968- mod->core_text_size = mod->core_size;
75969- break;
75970- case 1: /* RO: text and ro-data */
75971- mod->core_size = debug_align(mod->core_size);
75972- mod->core_ro_size = mod->core_size;
75973- break;
75974- case 3: /* whole core */
75975- mod->core_size = debug_align(mod->core_size);
75976- break;
75977- }
75978 }
75979
75980 pr_debug("Init section allocation order:\n");
75981@@ -2118,23 +2146,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
75982 || s->sh_entsize != ~0UL
75983 || !strstarts(sname, ".init"))
75984 continue;
75985- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
75986- | INIT_OFFSET_MASK);
75987+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
75988+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
75989+ else
75990+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
75991+ s->sh_entsize |= INIT_OFFSET_MASK;
75992 pr_debug("\t%s\n", sname);
75993 }
75994- switch (m) {
75995- case 0: /* executable */
75996- mod->init_size = debug_align(mod->init_size);
75997- mod->init_text_size = mod->init_size;
75998- break;
75999- case 1: /* RO: text and ro-data */
76000- mod->init_size = debug_align(mod->init_size);
76001- mod->init_ro_size = mod->init_size;
76002- break;
76003- case 3: /* whole init */
76004- mod->init_size = debug_align(mod->init_size);
76005- break;
76006- }
76007 }
76008 }
76009
76010@@ -2306,7 +2324,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
76011
76012 /* Put symbol section at end of init part of module. */
76013 symsect->sh_flags |= SHF_ALLOC;
76014- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
76015+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
76016 info->index.sym) | INIT_OFFSET_MASK;
76017 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
76018
76019@@ -2323,13 +2341,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
76020 }
76021
76022 /* Append room for core symbols at end of core part. */
76023- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
76024- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
76025- mod->core_size += strtab_size;
76026+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
76027+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
76028+ mod->core_size_rx += strtab_size;
76029
76030 /* Put string table section at end of init part of module. */
76031 strsect->sh_flags |= SHF_ALLOC;
76032- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
76033+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
76034 info->index.str) | INIT_OFFSET_MASK;
76035 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
76036 }
76037@@ -2347,12 +2365,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
76038 /* Make sure we get permanent strtab: don't use info->strtab. */
76039 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
76040
76041+ pax_open_kernel();
76042+
76043 /* Set types up while we still have access to sections. */
76044 for (i = 0; i < mod->num_symtab; i++)
76045 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
76046
76047- mod->core_symtab = dst = mod->module_core + info->symoffs;
76048- mod->core_strtab = s = mod->module_core + info->stroffs;
76049+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
76050+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
76051 src = mod->symtab;
76052 for (ndst = i = 0; i < mod->num_symtab; i++) {
76053 if (i == 0 ||
76054@@ -2364,6 +2384,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
76055 }
76056 }
76057 mod->core_num_syms = ndst;
76058+
76059+ pax_close_kernel();
76060 }
76061 #else
76062 static inline void layout_symtab(struct module *mod, struct load_info *info)
76063@@ -2397,17 +2419,33 @@ void * __weak module_alloc(unsigned long size)
76064 return vmalloc_exec(size);
76065 }
76066
76067-static void *module_alloc_update_bounds(unsigned long size)
76068+static void *module_alloc_update_bounds_rw(unsigned long size)
76069 {
76070 void *ret = module_alloc(size);
76071
76072 if (ret) {
76073 mutex_lock(&module_mutex);
76074 /* Update module bounds. */
76075- if ((unsigned long)ret < module_addr_min)
76076- module_addr_min = (unsigned long)ret;
76077- if ((unsigned long)ret + size > module_addr_max)
76078- module_addr_max = (unsigned long)ret + size;
76079+ if ((unsigned long)ret < module_addr_min_rw)
76080+ module_addr_min_rw = (unsigned long)ret;
76081+ if ((unsigned long)ret + size > module_addr_max_rw)
76082+ module_addr_max_rw = (unsigned long)ret + size;
76083+ mutex_unlock(&module_mutex);
76084+ }
76085+ return ret;
76086+}
76087+
76088+static void *module_alloc_update_bounds_rx(unsigned long size)
76089+{
76090+ void *ret = module_alloc_exec(size);
76091+
76092+ if (ret) {
76093+ mutex_lock(&module_mutex);
76094+ /* Update module bounds. */
76095+ if ((unsigned long)ret < module_addr_min_rx)
76096+ module_addr_min_rx = (unsigned long)ret;
76097+ if ((unsigned long)ret + size > module_addr_max_rx)
76098+ module_addr_max_rx = (unsigned long)ret + size;
76099 mutex_unlock(&module_mutex);
76100 }
76101 return ret;
76102@@ -2683,8 +2721,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
76103 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
76104 {
76105 const char *modmagic = get_modinfo(info, "vermagic");
76106+ const char *license = get_modinfo(info, "license");
76107 int err;
76108
76109+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
76110+ if (!license || !license_is_gpl_compatible(license))
76111+ return -ENOEXEC;
76112+#endif
76113+
76114 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
76115 modmagic = NULL;
76116
76117@@ -2710,7 +2754,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
76118 }
76119
76120 /* Set up license info based on the info section */
76121- set_license(mod, get_modinfo(info, "license"));
76122+ set_license(mod, license);
76123
76124 return 0;
76125 }
76126@@ -2804,7 +2848,7 @@ static int move_module(struct module *mod, struct load_info *info)
76127 void *ptr;
76128
76129 /* Do the allocs. */
76130- ptr = module_alloc_update_bounds(mod->core_size);
76131+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
76132 /*
76133 * The pointer to this block is stored in the module structure
76134 * which is inside the block. Just mark it as not being a
76135@@ -2814,11 +2858,11 @@ static int move_module(struct module *mod, struct load_info *info)
76136 if (!ptr)
76137 return -ENOMEM;
76138
76139- memset(ptr, 0, mod->core_size);
76140- mod->module_core = ptr;
76141+ memset(ptr, 0, mod->core_size_rw);
76142+ mod->module_core_rw = ptr;
76143
76144- if (mod->init_size) {
76145- ptr = module_alloc_update_bounds(mod->init_size);
76146+ if (mod->init_size_rw) {
76147+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
76148 /*
76149 * The pointer to this block is stored in the module structure
76150 * which is inside the block. This block doesn't need to be
76151@@ -2827,13 +2871,45 @@ static int move_module(struct module *mod, struct load_info *info)
76152 */
76153 kmemleak_ignore(ptr);
76154 if (!ptr) {
76155- module_free(mod, mod->module_core);
76156+ module_free(mod, mod->module_core_rw);
76157 return -ENOMEM;
76158 }
76159- memset(ptr, 0, mod->init_size);
76160- mod->module_init = ptr;
76161+ memset(ptr, 0, mod->init_size_rw);
76162+ mod->module_init_rw = ptr;
76163 } else
76164- mod->module_init = NULL;
76165+ mod->module_init_rw = NULL;
76166+
76167+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
76168+ kmemleak_not_leak(ptr);
76169+ if (!ptr) {
76170+ if (mod->module_init_rw)
76171+ module_free(mod, mod->module_init_rw);
76172+ module_free(mod, mod->module_core_rw);
76173+ return -ENOMEM;
76174+ }
76175+
76176+ pax_open_kernel();
76177+ memset(ptr, 0, mod->core_size_rx);
76178+ pax_close_kernel();
76179+ mod->module_core_rx = ptr;
76180+
76181+ if (mod->init_size_rx) {
76182+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
76183+ kmemleak_ignore(ptr);
76184+ if (!ptr && mod->init_size_rx) {
76185+ module_free_exec(mod, mod->module_core_rx);
76186+ if (mod->module_init_rw)
76187+ module_free(mod, mod->module_init_rw);
76188+ module_free(mod, mod->module_core_rw);
76189+ return -ENOMEM;
76190+ }
76191+
76192+ pax_open_kernel();
76193+ memset(ptr, 0, mod->init_size_rx);
76194+ pax_close_kernel();
76195+ mod->module_init_rx = ptr;
76196+ } else
76197+ mod->module_init_rx = NULL;
76198
76199 /* Transfer each section which specifies SHF_ALLOC */
76200 pr_debug("final section addresses:\n");
76201@@ -2844,16 +2920,45 @@ static int move_module(struct module *mod, struct load_info *info)
76202 if (!(shdr->sh_flags & SHF_ALLOC))
76203 continue;
76204
76205- if (shdr->sh_entsize & INIT_OFFSET_MASK)
76206- dest = mod->module_init
76207- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
76208- else
76209- dest = mod->module_core + shdr->sh_entsize;
76210+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
76211+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
76212+ dest = mod->module_init_rw
76213+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
76214+ else
76215+ dest = mod->module_init_rx
76216+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
76217+ } else {
76218+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
76219+ dest = mod->module_core_rw + shdr->sh_entsize;
76220+ else
76221+ dest = mod->module_core_rx + shdr->sh_entsize;
76222+ }
76223+
76224+ if (shdr->sh_type != SHT_NOBITS) {
76225+
76226+#ifdef CONFIG_PAX_KERNEXEC
76227+#ifdef CONFIG_X86_64
76228+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
76229+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
76230+#endif
76231+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
76232+ pax_open_kernel();
76233+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
76234+ pax_close_kernel();
76235+ } else
76236+#endif
76237
76238- if (shdr->sh_type != SHT_NOBITS)
76239 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
76240+ }
76241 /* Update sh_addr to point to copy in image. */
76242- shdr->sh_addr = (unsigned long)dest;
76243+
76244+#ifdef CONFIG_PAX_KERNEXEC
76245+ if (shdr->sh_flags & SHF_EXECINSTR)
76246+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
76247+ else
76248+#endif
76249+
76250+ shdr->sh_addr = (unsigned long)dest;
76251 pr_debug("\t0x%lx %s\n",
76252 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
76253 }
76254@@ -2908,12 +3013,12 @@ static void flush_module_icache(const struct module *mod)
76255 * Do it before processing of module parameters, so the module
76256 * can provide parameter accessor functions of its own.
76257 */
76258- if (mod->module_init)
76259- flush_icache_range((unsigned long)mod->module_init,
76260- (unsigned long)mod->module_init
76261- + mod->init_size);
76262- flush_icache_range((unsigned long)mod->module_core,
76263- (unsigned long)mod->module_core + mod->core_size);
76264+ if (mod->module_init_rx)
76265+ flush_icache_range((unsigned long)mod->module_init_rx,
76266+ (unsigned long)mod->module_init_rx
76267+ + mod->init_size_rx);
76268+ flush_icache_range((unsigned long)mod->module_core_rx,
76269+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
76270
76271 set_fs(old_fs);
76272 }
76273@@ -2983,8 +3088,10 @@ out:
76274 static void module_deallocate(struct module *mod, struct load_info *info)
76275 {
76276 percpu_modfree(mod);
76277- module_free(mod, mod->module_init);
76278- module_free(mod, mod->module_core);
76279+ module_free_exec(mod, mod->module_init_rx);
76280+ module_free_exec(mod, mod->module_core_rx);
76281+ module_free(mod, mod->module_init_rw);
76282+ module_free(mod, mod->module_core_rw);
76283 }
76284
76285 int __weak module_finalize(const Elf_Ehdr *hdr,
76286@@ -2997,7 +3104,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
76287 static int post_relocation(struct module *mod, const struct load_info *info)
76288 {
76289 /* Sort exception table now relocations are done. */
76290+ pax_open_kernel();
76291 sort_extable(mod->extable, mod->extable + mod->num_exentries);
76292+ pax_close_kernel();
76293
76294 /* Copy relocated percpu area over. */
76295 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
76296@@ -3051,16 +3160,16 @@ static int do_init_module(struct module *mod)
76297 MODULE_STATE_COMING, mod);
76298
76299 /* Set RO and NX regions for core */
76300- set_section_ro_nx(mod->module_core,
76301- mod->core_text_size,
76302- mod->core_ro_size,
76303- mod->core_size);
76304+ set_section_ro_nx(mod->module_core_rx,
76305+ mod->core_size_rx,
76306+ mod->core_size_rx,
76307+ mod->core_size_rx);
76308
76309 /* Set RO and NX regions for init */
76310- set_section_ro_nx(mod->module_init,
76311- mod->init_text_size,
76312- mod->init_ro_size,
76313- mod->init_size);
76314+ set_section_ro_nx(mod->module_init_rx,
76315+ mod->init_size_rx,
76316+ mod->init_size_rx,
76317+ mod->init_size_rx);
76318
76319 do_mod_ctors(mod);
76320 /* Start the module */
76321@@ -3122,11 +3231,12 @@ static int do_init_module(struct module *mod)
76322 mod->strtab = mod->core_strtab;
76323 #endif
76324 unset_module_init_ro_nx(mod);
76325- module_free(mod, mod->module_init);
76326- mod->module_init = NULL;
76327- mod->init_size = 0;
76328- mod->init_ro_size = 0;
76329- mod->init_text_size = 0;
76330+ module_free(mod, mod->module_init_rw);
76331+ module_free_exec(mod, mod->module_init_rx);
76332+ mod->module_init_rw = NULL;
76333+ mod->module_init_rx = NULL;
76334+ mod->init_size_rw = 0;
76335+ mod->init_size_rx = 0;
76336 mutex_unlock(&module_mutex);
76337 wake_up_all(&module_wq);
76338
76339@@ -3209,9 +3319,38 @@ again:
76340 if (err)
76341 goto free_unload;
76342
76343+ /* Now copy in args */
76344+ mod->args = strndup_user(uargs, ~0UL >> 1);
76345+ if (IS_ERR(mod->args)) {
76346+ err = PTR_ERR(mod->args);
76347+ goto free_unload;
76348+ }
76349+
76350 /* Set up MODINFO_ATTR fields */
76351 setup_modinfo(mod, info);
76352
76353+#ifdef CONFIG_GRKERNSEC_MODHARDEN
76354+ {
76355+ char *p, *p2;
76356+
76357+ if (strstr(mod->args, "grsec_modharden_netdev")) {
76358+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
76359+ err = -EPERM;
76360+ goto free_modinfo;
76361+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
76362+ p += sizeof("grsec_modharden_normal") - 1;
76363+ p2 = strstr(p, "_");
76364+ if (p2) {
76365+ *p2 = '\0';
76366+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
76367+ *p2 = '_';
76368+ }
76369+ err = -EPERM;
76370+ goto free_modinfo;
76371+ }
76372+ }
76373+#endif
76374+
76375 /* Fix up syms, so that st_value is a pointer to location. */
76376 err = simplify_symbols(mod, info);
76377 if (err < 0)
76378@@ -3227,13 +3366,6 @@ again:
76379
76380 flush_module_icache(mod);
76381
76382- /* Now copy in args */
76383- mod->args = strndup_user(uargs, ~0UL >> 1);
76384- if (IS_ERR(mod->args)) {
76385- err = PTR_ERR(mod->args);
76386- goto free_arch_cleanup;
76387- }
76388-
76389 dynamic_debug_setup(info->debug, info->num_debug);
76390
76391 mutex_lock(&module_mutex);
76392@@ -3278,11 +3410,10 @@ again:
76393 mutex_unlock(&module_mutex);
76394 dynamic_debug_remove(info->debug);
76395 synchronize_sched();
76396- kfree(mod->args);
76397- free_arch_cleanup:
76398 module_arch_cleanup(mod);
76399 free_modinfo:
76400 free_modinfo(mod);
76401+ kfree(mod->args);
76402 free_unload:
76403 module_unload_free(mod);
76404 unlink_mod:
76405@@ -3365,10 +3496,16 @@ static const char *get_ksymbol(struct module *mod,
76406 unsigned long nextval;
76407
76408 /* At worse, next value is at end of module */
76409- if (within_module_init(addr, mod))
76410- nextval = (unsigned long)mod->module_init+mod->init_text_size;
76411+ if (within_module_init_rx(addr, mod))
76412+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
76413+ else if (within_module_init_rw(addr, mod))
76414+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
76415+ else if (within_module_core_rx(addr, mod))
76416+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
76417+ else if (within_module_core_rw(addr, mod))
76418+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
76419 else
76420- nextval = (unsigned long)mod->module_core+mod->core_text_size;
76421+ return NULL;
76422
76423 /* Scan for closest preceding symbol, and next symbol. (ELF
76424 starts real symbols at 1). */
76425@@ -3621,7 +3758,7 @@ static int m_show(struct seq_file *m, void *p)
76426 return 0;
76427
76428 seq_printf(m, "%s %u",
76429- mod->name, mod->init_size + mod->core_size);
76430+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
76431 print_unload_info(m, mod);
76432
76433 /* Informative for users. */
76434@@ -3630,7 +3767,7 @@ static int m_show(struct seq_file *m, void *p)
76435 mod->state == MODULE_STATE_COMING ? "Loading":
76436 "Live");
76437 /* Used by oprofile and other similar tools. */
76438- seq_printf(m, " 0x%pK", mod->module_core);
76439+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
76440
76441 /* Taints info */
76442 if (mod->taints)
76443@@ -3666,7 +3803,17 @@ static const struct file_operations proc_modules_operations = {
76444
76445 static int __init proc_modules_init(void)
76446 {
76447+#ifndef CONFIG_GRKERNSEC_HIDESYM
76448+#ifdef CONFIG_GRKERNSEC_PROC_USER
76449+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
76450+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
76451+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
76452+#else
76453 proc_create("modules", 0, NULL, &proc_modules_operations);
76454+#endif
76455+#else
76456+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
76457+#endif
76458 return 0;
76459 }
76460 module_init(proc_modules_init);
76461@@ -3727,14 +3874,14 @@ struct module *__module_address(unsigned long addr)
76462 {
76463 struct module *mod;
76464
76465- if (addr < module_addr_min || addr > module_addr_max)
76466+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
76467+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
76468 return NULL;
76469
76470 list_for_each_entry_rcu(mod, &modules, list) {
76471 if (mod->state == MODULE_STATE_UNFORMED)
76472 continue;
76473- if (within_module_core(addr, mod)
76474- || within_module_init(addr, mod))
76475+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
76476 return mod;
76477 }
76478 return NULL;
76479@@ -3769,11 +3916,20 @@ bool is_module_text_address(unsigned long addr)
76480 */
76481 struct module *__module_text_address(unsigned long addr)
76482 {
76483- struct module *mod = __module_address(addr);
76484+ struct module *mod;
76485+
76486+#ifdef CONFIG_X86_32
76487+ addr = ktla_ktva(addr);
76488+#endif
76489+
76490+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
76491+ return NULL;
76492+
76493+ mod = __module_address(addr);
76494+
76495 if (mod) {
76496 /* Make sure it's within the text section. */
76497- if (!within(addr, mod->module_init, mod->init_text_size)
76498- && !within(addr, mod->module_core, mod->core_text_size))
76499+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
76500 mod = NULL;
76501 }
76502 return mod;
76503diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
76504index 7e3443f..b2a1e6b 100644
76505--- a/kernel/mutex-debug.c
76506+++ b/kernel/mutex-debug.c
76507@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
76508 }
76509
76510 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
76511- struct thread_info *ti)
76512+ struct task_struct *task)
76513 {
76514 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
76515
76516 /* Mark the current thread as blocked on the lock: */
76517- ti->task->blocked_on = waiter;
76518+ task->blocked_on = waiter;
76519 }
76520
76521 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
76522- struct thread_info *ti)
76523+ struct task_struct *task)
76524 {
76525 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
76526- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
76527- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
76528- ti->task->blocked_on = NULL;
76529+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
76530+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
76531+ task->blocked_on = NULL;
76532
76533 list_del_init(&waiter->list);
76534 waiter->task = NULL;
76535diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
76536index 0799fd3..d06ae3b 100644
76537--- a/kernel/mutex-debug.h
76538+++ b/kernel/mutex-debug.h
76539@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
76540 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
76541 extern void debug_mutex_add_waiter(struct mutex *lock,
76542 struct mutex_waiter *waiter,
76543- struct thread_info *ti);
76544+ struct task_struct *task);
76545 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
76546- struct thread_info *ti);
76547+ struct task_struct *task);
76548 extern void debug_mutex_unlock(struct mutex *lock);
76549 extern void debug_mutex_init(struct mutex *lock, const char *name,
76550 struct lock_class_key *key);
76551diff --git a/kernel/mutex.c b/kernel/mutex.c
76552index a307cc9..27fd2e9 100644
76553--- a/kernel/mutex.c
76554+++ b/kernel/mutex.c
76555@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
76556 spin_lock_mutex(&lock->wait_lock, flags);
76557
76558 debug_mutex_lock_common(lock, &waiter);
76559- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
76560+ debug_mutex_add_waiter(lock, &waiter, task);
76561
76562 /* add waiting tasks to the end of the waitqueue (FIFO): */
76563 list_add_tail(&waiter.list, &lock->wait_list);
76564@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
76565 * TASK_UNINTERRUPTIBLE case.)
76566 */
76567 if (unlikely(signal_pending_state(state, task))) {
76568- mutex_remove_waiter(lock, &waiter,
76569- task_thread_info(task));
76570+ mutex_remove_waiter(lock, &waiter, task);
76571 mutex_release(&lock->dep_map, 1, ip);
76572 spin_unlock_mutex(&lock->wait_lock, flags);
76573
76574@@ -247,7 +246,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
76575 done:
76576 lock_acquired(&lock->dep_map, ip);
76577 /* got the lock - rejoice! */
76578- mutex_remove_waiter(lock, &waiter, current_thread_info());
76579+ mutex_remove_waiter(lock, &waiter, task);
76580 mutex_set_owner(lock);
76581
76582 /* set it to 0 if there are no waiters left: */
76583diff --git a/kernel/notifier.c b/kernel/notifier.c
76584index 2d5cc4c..d9ea600 100644
76585--- a/kernel/notifier.c
76586+++ b/kernel/notifier.c
76587@@ -5,6 +5,7 @@
76588 #include <linux/rcupdate.h>
76589 #include <linux/vmalloc.h>
76590 #include <linux/reboot.h>
76591+#include <linux/mm.h>
76592
76593 /*
76594 * Notifier list for kernel code which wants to be called
76595@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
76596 while ((*nl) != NULL) {
76597 if (n->priority > (*nl)->priority)
76598 break;
76599- nl = &((*nl)->next);
76600+ nl = (struct notifier_block **)&((*nl)->next);
76601 }
76602- n->next = *nl;
76603+ pax_open_kernel();
76604+ *(const void **)&n->next = *nl;
76605 rcu_assign_pointer(*nl, n);
76606+ pax_close_kernel();
76607 return 0;
76608 }
76609
76610@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
76611 return 0;
76612 if (n->priority > (*nl)->priority)
76613 break;
76614- nl = &((*nl)->next);
76615+ nl = (struct notifier_block **)&((*nl)->next);
76616 }
76617- n->next = *nl;
76618+ pax_open_kernel();
76619+ *(const void **)&n->next = *nl;
76620 rcu_assign_pointer(*nl, n);
76621+ pax_close_kernel();
76622 return 0;
76623 }
76624
76625@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
76626 {
76627 while ((*nl) != NULL) {
76628 if ((*nl) == n) {
76629+ pax_open_kernel();
76630 rcu_assign_pointer(*nl, n->next);
76631+ pax_close_kernel();
76632 return 0;
76633 }
76634- nl = &((*nl)->next);
76635+ nl = (struct notifier_block **)&((*nl)->next);
76636 }
76637 return -ENOENT;
76638 }
76639diff --git a/kernel/panic.c b/kernel/panic.c
76640index e1b2822..5edc1d9 100644
76641--- a/kernel/panic.c
76642+++ b/kernel/panic.c
76643@@ -410,7 +410,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
76644 const char *board;
76645
76646 printk(KERN_WARNING "------------[ cut here ]------------\n");
76647- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
76648+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
76649 board = dmi_get_system_info(DMI_PRODUCT_NAME);
76650 if (board)
76651 printk(KERN_WARNING "Hardware name: %s\n", board);
76652@@ -465,7 +465,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
76653 */
76654 void __stack_chk_fail(void)
76655 {
76656- panic("stack-protector: Kernel stack is corrupted in: %p\n",
76657+ dump_stack();
76658+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
76659 __builtin_return_address(0));
76660 }
76661 EXPORT_SYMBOL(__stack_chk_fail);
76662diff --git a/kernel/pid.c b/kernel/pid.c
76663index f2c6a68..4922d97 100644
76664--- a/kernel/pid.c
76665+++ b/kernel/pid.c
76666@@ -33,6 +33,7 @@
76667 #include <linux/rculist.h>
76668 #include <linux/bootmem.h>
76669 #include <linux/hash.h>
76670+#include <linux/security.h>
76671 #include <linux/pid_namespace.h>
76672 #include <linux/init_task.h>
76673 #include <linux/syscalls.h>
76674@@ -46,7 +47,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
76675
76676 int pid_max = PID_MAX_DEFAULT;
76677
76678-#define RESERVED_PIDS 300
76679+#define RESERVED_PIDS 500
76680
76681 int pid_max_min = RESERVED_PIDS + 1;
76682 int pid_max_max = PID_MAX_LIMIT;
76683@@ -441,10 +442,18 @@ EXPORT_SYMBOL(pid_task);
76684 */
76685 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
76686 {
76687+ struct task_struct *task;
76688+
76689 rcu_lockdep_assert(rcu_read_lock_held(),
76690 "find_task_by_pid_ns() needs rcu_read_lock()"
76691 " protection");
76692- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
76693+
76694+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
76695+
76696+ if (gr_pid_is_chrooted(task))
76697+ return NULL;
76698+
76699+ return task;
76700 }
76701
76702 struct task_struct *find_task_by_vpid(pid_t vnr)
76703@@ -452,6 +461,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
76704 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
76705 }
76706
76707+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
76708+{
76709+ rcu_lockdep_assert(rcu_read_lock_held(),
76710+ "find_task_by_pid_ns() needs rcu_read_lock()"
76711+ " protection");
76712+ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
76713+}
76714+
76715 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
76716 {
76717 struct pid *pid;
76718diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
76719index bea15bd..789f3d0 100644
76720--- a/kernel/pid_namespace.c
76721+++ b/kernel/pid_namespace.c
76722@@ -249,7 +249,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
76723 void __user *buffer, size_t *lenp, loff_t *ppos)
76724 {
76725 struct pid_namespace *pid_ns = task_active_pid_ns(current);
76726- struct ctl_table tmp = *table;
76727+ ctl_table_no_const tmp = *table;
76728
76729 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
76730 return -EPERM;
76731diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
76732index 942ca27..111e609 100644
76733--- a/kernel/posix-cpu-timers.c
76734+++ b/kernel/posix-cpu-timers.c
76735@@ -1576,14 +1576,14 @@ struct k_clock clock_posix_cpu = {
76736
76737 static __init int init_posix_cpu_timers(void)
76738 {
76739- struct k_clock process = {
76740+ static struct k_clock process = {
76741 .clock_getres = process_cpu_clock_getres,
76742 .clock_get = process_cpu_clock_get,
76743 .timer_create = process_cpu_timer_create,
76744 .nsleep = process_cpu_nsleep,
76745 .nsleep_restart = process_cpu_nsleep_restart,
76746 };
76747- struct k_clock thread = {
76748+ static struct k_clock thread = {
76749 .clock_getres = thread_cpu_clock_getres,
76750 .clock_get = thread_cpu_clock_get,
76751 .timer_create = thread_cpu_timer_create,
76752diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
76753index e885be1..380fe76 100644
76754--- a/kernel/posix-timers.c
76755+++ b/kernel/posix-timers.c
76756@@ -43,6 +43,7 @@
76757 #include <linux/idr.h>
76758 #include <linux/posix-clock.h>
76759 #include <linux/posix-timers.h>
76760+#include <linux/grsecurity.h>
76761 #include <linux/syscalls.h>
76762 #include <linux/wait.h>
76763 #include <linux/workqueue.h>
76764@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
76765 * which we beg off on and pass to do_sys_settimeofday().
76766 */
76767
76768-static struct k_clock posix_clocks[MAX_CLOCKS];
76769+static struct k_clock *posix_clocks[MAX_CLOCKS];
76770
76771 /*
76772 * These ones are defined below.
76773@@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
76774 */
76775 static __init int init_posix_timers(void)
76776 {
76777- struct k_clock clock_realtime = {
76778+ static struct k_clock clock_realtime = {
76779 .clock_getres = hrtimer_get_res,
76780 .clock_get = posix_clock_realtime_get,
76781 .clock_set = posix_clock_realtime_set,
76782@@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
76783 .timer_get = common_timer_get,
76784 .timer_del = common_timer_del,
76785 };
76786- struct k_clock clock_monotonic = {
76787+ static struct k_clock clock_monotonic = {
76788 .clock_getres = hrtimer_get_res,
76789 .clock_get = posix_ktime_get_ts,
76790 .nsleep = common_nsleep,
76791@@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
76792 .timer_get = common_timer_get,
76793 .timer_del = common_timer_del,
76794 };
76795- struct k_clock clock_monotonic_raw = {
76796+ static struct k_clock clock_monotonic_raw = {
76797 .clock_getres = hrtimer_get_res,
76798 .clock_get = posix_get_monotonic_raw,
76799 };
76800- struct k_clock clock_realtime_coarse = {
76801+ static struct k_clock clock_realtime_coarse = {
76802 .clock_getres = posix_get_coarse_res,
76803 .clock_get = posix_get_realtime_coarse,
76804 };
76805- struct k_clock clock_monotonic_coarse = {
76806+ static struct k_clock clock_monotonic_coarse = {
76807 .clock_getres = posix_get_coarse_res,
76808 .clock_get = posix_get_monotonic_coarse,
76809 };
76810- struct k_clock clock_boottime = {
76811+ static struct k_clock clock_boottime = {
76812 .clock_getres = hrtimer_get_res,
76813 .clock_get = posix_get_boottime,
76814 .nsleep = common_nsleep,
76815@@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
76816 return;
76817 }
76818
76819- posix_clocks[clock_id] = *new_clock;
76820+ posix_clocks[clock_id] = new_clock;
76821 }
76822 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
76823
76824@@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
76825 return (id & CLOCKFD_MASK) == CLOCKFD ?
76826 &clock_posix_dynamic : &clock_posix_cpu;
76827
76828- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
76829+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
76830 return NULL;
76831- return &posix_clocks[id];
76832+ return posix_clocks[id];
76833 }
76834
76835 static int common_timer_create(struct k_itimer *new_timer)
76836@@ -966,6 +967,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
76837 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
76838 return -EFAULT;
76839
76840+ /* only the CLOCK_REALTIME clock can be set, all other clocks
76841+ have their clock_set fptr set to a nosettime dummy function
76842+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
76843+ call common_clock_set, which calls do_sys_settimeofday, which
76844+ we hook
76845+ */
76846+
76847 return kc->clock_set(which_clock, &new_tp);
76848 }
76849
76850diff --git a/kernel/power/process.c b/kernel/power/process.c
76851index d5a258b..4271191 100644
76852--- a/kernel/power/process.c
76853+++ b/kernel/power/process.c
76854@@ -33,6 +33,7 @@ static int try_to_freeze_tasks(bool user_only)
76855 u64 elapsed_csecs64;
76856 unsigned int elapsed_csecs;
76857 bool wakeup = false;
76858+ bool timedout = false;
76859
76860 do_gettimeofday(&start);
76861
76862@@ -43,13 +44,20 @@ static int try_to_freeze_tasks(bool user_only)
76863
76864 while (true) {
76865 todo = 0;
76866+ if (time_after(jiffies, end_time))
76867+ timedout = true;
76868 read_lock(&tasklist_lock);
76869 do_each_thread(g, p) {
76870 if (p == current || !freeze_task(p))
76871 continue;
76872
76873- if (!freezer_should_skip(p))
76874+ if (!freezer_should_skip(p)) {
76875 todo++;
76876+ if (timedout) {
76877+ printk(KERN_ERR "Task refusing to freeze:\n");
76878+ sched_show_task(p);
76879+ }
76880+ }
76881 } while_each_thread(g, p);
76882 read_unlock(&tasklist_lock);
76883
76884@@ -58,7 +66,7 @@ static int try_to_freeze_tasks(bool user_only)
76885 todo += wq_busy;
76886 }
76887
76888- if (!todo || time_after(jiffies, end_time))
76889+ if (!todo || timedout)
76890 break;
76891
76892 if (pm_wakeup_pending()) {
76893diff --git a/kernel/printk.c b/kernel/printk.c
76894index 267ce78..2487112 100644
76895--- a/kernel/printk.c
76896+++ b/kernel/printk.c
76897@@ -609,11 +609,17 @@ static unsigned int devkmsg_poll(struct file *file, poll_table *wait)
76898 return ret;
76899 }
76900
76901+static int check_syslog_permissions(int type, bool from_file);
76902+
76903 static int devkmsg_open(struct inode *inode, struct file *file)
76904 {
76905 struct devkmsg_user *user;
76906 int err;
76907
76908+ err = check_syslog_permissions(SYSLOG_ACTION_OPEN, SYSLOG_FROM_FILE);
76909+ if (err)
76910+ return err;
76911+
76912 /* write-only does not need any file context */
76913 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
76914 return 0;
76915@@ -822,7 +828,7 @@ static int syslog_action_restricted(int type)
76916 if (dmesg_restrict)
76917 return 1;
76918 /* Unless restricted, we allow "read all" and "get buffer size" for everybody */
76919- return type != SYSLOG_ACTION_READ_ALL && type != SYSLOG_ACTION_SIZE_BUFFER;
76920+ return type != SYSLOG_ACTION_OPEN && type != SYSLOG_ACTION_READ_ALL && type != SYSLOG_ACTION_SIZE_BUFFER;
76921 }
76922
76923 static int check_syslog_permissions(int type, bool from_file)
76924@@ -834,6 +840,11 @@ static int check_syslog_permissions(int type, bool from_file)
76925 if (from_file && type != SYSLOG_ACTION_OPEN)
76926 return 0;
76927
76928+#ifdef CONFIG_GRKERNSEC_DMESG
76929+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
76930+ return -EPERM;
76931+#endif
76932+
76933 if (syslog_action_restricted(type)) {
76934 if (capable(CAP_SYSLOG))
76935 return 0;
76936diff --git a/kernel/profile.c b/kernel/profile.c
76937index 1f39181..86093471 100644
76938--- a/kernel/profile.c
76939+++ b/kernel/profile.c
76940@@ -40,7 +40,7 @@ struct profile_hit {
76941 /* Oprofile timer tick hook */
76942 static int (*timer_hook)(struct pt_regs *) __read_mostly;
76943
76944-static atomic_t *prof_buffer;
76945+static atomic_unchecked_t *prof_buffer;
76946 static unsigned long prof_len, prof_shift;
76947
76948 int prof_on __read_mostly;
76949@@ -282,7 +282,7 @@ static void profile_flip_buffers(void)
76950 hits[i].pc = 0;
76951 continue;
76952 }
76953- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
76954+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
76955 hits[i].hits = hits[i].pc = 0;
76956 }
76957 }
76958@@ -343,9 +343,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
76959 * Add the current hit(s) and flush the write-queue out
76960 * to the global buffer:
76961 */
76962- atomic_add(nr_hits, &prof_buffer[pc]);
76963+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
76964 for (i = 0; i < NR_PROFILE_HIT; ++i) {
76965- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
76966+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
76967 hits[i].pc = hits[i].hits = 0;
76968 }
76969 out:
76970@@ -420,7 +420,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
76971 {
76972 unsigned long pc;
76973 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
76974- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
76975+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
76976 }
76977 #endif /* !CONFIG_SMP */
76978
76979@@ -518,7 +518,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
76980 return -EFAULT;
76981 buf++; p++; count--; read++;
76982 }
76983- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
76984+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
76985 if (copy_to_user(buf, (void *)pnt, count))
76986 return -EFAULT;
76987 read += count;
76988@@ -549,7 +549,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
76989 }
76990 #endif
76991 profile_discard_flip_buffers();
76992- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
76993+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
76994 return count;
76995 }
76996
76997diff --git a/kernel/ptrace.c b/kernel/ptrace.c
76998index 6cbeaae..cfe7ff0 100644
76999--- a/kernel/ptrace.c
77000+++ b/kernel/ptrace.c
77001@@ -324,7 +324,7 @@ static int ptrace_attach(struct task_struct *task, long request,
77002 if (seize)
77003 flags |= PT_SEIZED;
77004 rcu_read_lock();
77005- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
77006+ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
77007 flags |= PT_PTRACE_CAP;
77008 rcu_read_unlock();
77009 task->ptrace = flags;
77010@@ -535,7 +535,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
77011 break;
77012 return -EIO;
77013 }
77014- if (copy_to_user(dst, buf, retval))
77015+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
77016 return -EFAULT;
77017 copied += retval;
77018 src += retval;
77019@@ -720,7 +720,7 @@ int ptrace_request(struct task_struct *child, long request,
77020 bool seized = child->ptrace & PT_SEIZED;
77021 int ret = -EIO;
77022 siginfo_t siginfo, *si;
77023- void __user *datavp = (void __user *) data;
77024+ void __user *datavp = (__force void __user *) data;
77025 unsigned long __user *datalp = datavp;
77026 unsigned long flags;
77027
77028@@ -922,14 +922,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
77029 goto out;
77030 }
77031
77032+ if (gr_handle_ptrace(child, request)) {
77033+ ret = -EPERM;
77034+ goto out_put_task_struct;
77035+ }
77036+
77037 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
77038 ret = ptrace_attach(child, request, addr, data);
77039 /*
77040 * Some architectures need to do book-keeping after
77041 * a ptrace attach.
77042 */
77043- if (!ret)
77044+ if (!ret) {
77045 arch_ptrace_attach(child);
77046+ gr_audit_ptrace(child);
77047+ }
77048 goto out_put_task_struct;
77049 }
77050
77051@@ -957,7 +964,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
77052 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
77053 if (copied != sizeof(tmp))
77054 return -EIO;
77055- return put_user(tmp, (unsigned long __user *)data);
77056+ return put_user(tmp, (__force unsigned long __user *)data);
77057 }
77058
77059 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
77060@@ -1051,7 +1058,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
77061 }
77062
77063 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
77064- compat_long_t addr, compat_long_t data)
77065+ compat_ulong_t addr, compat_ulong_t data)
77066 {
77067 struct task_struct *child;
77068 long ret;
77069@@ -1067,14 +1074,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
77070 goto out;
77071 }
77072
77073+ if (gr_handle_ptrace(child, request)) {
77074+ ret = -EPERM;
77075+ goto out_put_task_struct;
77076+ }
77077+
77078 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
77079 ret = ptrace_attach(child, request, addr, data);
77080 /*
77081 * Some architectures need to do book-keeping after
77082 * a ptrace attach.
77083 */
77084- if (!ret)
77085+ if (!ret) {
77086 arch_ptrace_attach(child);
77087+ gr_audit_ptrace(child);
77088+ }
77089 goto out_put_task_struct;
77090 }
77091
77092diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
77093index e7dce58..ad0d7b7 100644
77094--- a/kernel/rcutiny.c
77095+++ b/kernel/rcutiny.c
77096@@ -46,7 +46,7 @@
77097 struct rcu_ctrlblk;
77098 static void invoke_rcu_callbacks(void);
77099 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
77100-static void rcu_process_callbacks(struct softirq_action *unused);
77101+static void rcu_process_callbacks(void);
77102 static void __call_rcu(struct rcu_head *head,
77103 void (*func)(struct rcu_head *rcu),
77104 struct rcu_ctrlblk *rcp);
77105@@ -310,7 +310,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
77106 rcu_is_callbacks_kthread()));
77107 }
77108
77109-static void rcu_process_callbacks(struct softirq_action *unused)
77110+static void rcu_process_callbacks(void)
77111 {
77112 __rcu_process_callbacks(&rcu_sched_ctrlblk);
77113 __rcu_process_callbacks(&rcu_bh_ctrlblk);
77114diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
77115index f85016a..91cb03b 100644
77116--- a/kernel/rcutiny_plugin.h
77117+++ b/kernel/rcutiny_plugin.h
77118@@ -896,7 +896,7 @@ static int rcu_kthread(void *arg)
77119 have_rcu_kthread_work = morework;
77120 local_irq_restore(flags);
77121 if (work)
77122- rcu_process_callbacks(NULL);
77123+ rcu_process_callbacks();
77124 schedule_timeout_interruptible(1); /* Leave CPU for others. */
77125 }
77126
77127diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
77128index 31dea01..ad91ffb 100644
77129--- a/kernel/rcutorture.c
77130+++ b/kernel/rcutorture.c
77131@@ -163,12 +163,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
77132 { 0 };
77133 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
77134 { 0 };
77135-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
77136-static atomic_t n_rcu_torture_alloc;
77137-static atomic_t n_rcu_torture_alloc_fail;
77138-static atomic_t n_rcu_torture_free;
77139-static atomic_t n_rcu_torture_mberror;
77140-static atomic_t n_rcu_torture_error;
77141+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
77142+static atomic_unchecked_t n_rcu_torture_alloc;
77143+static atomic_unchecked_t n_rcu_torture_alloc_fail;
77144+static atomic_unchecked_t n_rcu_torture_free;
77145+static atomic_unchecked_t n_rcu_torture_mberror;
77146+static atomic_unchecked_t n_rcu_torture_error;
77147 static long n_rcu_torture_barrier_error;
77148 static long n_rcu_torture_boost_ktrerror;
77149 static long n_rcu_torture_boost_rterror;
77150@@ -272,11 +272,11 @@ rcu_torture_alloc(void)
77151
77152 spin_lock_bh(&rcu_torture_lock);
77153 if (list_empty(&rcu_torture_freelist)) {
77154- atomic_inc(&n_rcu_torture_alloc_fail);
77155+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
77156 spin_unlock_bh(&rcu_torture_lock);
77157 return NULL;
77158 }
77159- atomic_inc(&n_rcu_torture_alloc);
77160+ atomic_inc_unchecked(&n_rcu_torture_alloc);
77161 p = rcu_torture_freelist.next;
77162 list_del_init(p);
77163 spin_unlock_bh(&rcu_torture_lock);
77164@@ -289,7 +289,7 @@ rcu_torture_alloc(void)
77165 static void
77166 rcu_torture_free(struct rcu_torture *p)
77167 {
77168- atomic_inc(&n_rcu_torture_free);
77169+ atomic_inc_unchecked(&n_rcu_torture_free);
77170 spin_lock_bh(&rcu_torture_lock);
77171 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
77172 spin_unlock_bh(&rcu_torture_lock);
77173@@ -409,7 +409,7 @@ rcu_torture_cb(struct rcu_head *p)
77174 i = rp->rtort_pipe_count;
77175 if (i > RCU_TORTURE_PIPE_LEN)
77176 i = RCU_TORTURE_PIPE_LEN;
77177- atomic_inc(&rcu_torture_wcount[i]);
77178+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
77179 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
77180 rp->rtort_mbtest = 0;
77181 rcu_torture_free(rp);
77182@@ -457,7 +457,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
77183 i = rp->rtort_pipe_count;
77184 if (i > RCU_TORTURE_PIPE_LEN)
77185 i = RCU_TORTURE_PIPE_LEN;
77186- atomic_inc(&rcu_torture_wcount[i]);
77187+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
77188 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
77189 rp->rtort_mbtest = 0;
77190 list_del(&rp->rtort_free);
77191@@ -975,7 +975,7 @@ rcu_torture_writer(void *arg)
77192 i = old_rp->rtort_pipe_count;
77193 if (i > RCU_TORTURE_PIPE_LEN)
77194 i = RCU_TORTURE_PIPE_LEN;
77195- atomic_inc(&rcu_torture_wcount[i]);
77196+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
77197 old_rp->rtort_pipe_count++;
77198 cur_ops->deferred_free(old_rp);
77199 }
77200@@ -1060,7 +1060,7 @@ static void rcu_torture_timer(unsigned long unused)
77201 }
77202 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
77203 if (p->rtort_mbtest == 0)
77204- atomic_inc(&n_rcu_torture_mberror);
77205+ atomic_inc_unchecked(&n_rcu_torture_mberror);
77206 spin_lock(&rand_lock);
77207 cur_ops->read_delay(&rand);
77208 n_rcu_torture_timers++;
77209@@ -1124,7 +1124,7 @@ rcu_torture_reader(void *arg)
77210 }
77211 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
77212 if (p->rtort_mbtest == 0)
77213- atomic_inc(&n_rcu_torture_mberror);
77214+ atomic_inc_unchecked(&n_rcu_torture_mberror);
77215 cur_ops->read_delay(&rand);
77216 preempt_disable();
77217 pipe_count = p->rtort_pipe_count;
77218@@ -1183,11 +1183,11 @@ rcu_torture_printk(char *page)
77219 rcu_torture_current,
77220 rcu_torture_current_version,
77221 list_empty(&rcu_torture_freelist),
77222- atomic_read(&n_rcu_torture_alloc),
77223- atomic_read(&n_rcu_torture_alloc_fail),
77224- atomic_read(&n_rcu_torture_free));
77225+ atomic_read_unchecked(&n_rcu_torture_alloc),
77226+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
77227+ atomic_read_unchecked(&n_rcu_torture_free));
77228 cnt += sprintf(&page[cnt], "rtmbe: %d rtbke: %ld rtbre: %ld ",
77229- atomic_read(&n_rcu_torture_mberror),
77230+ atomic_read_unchecked(&n_rcu_torture_mberror),
77231 n_rcu_torture_boost_ktrerror,
77232 n_rcu_torture_boost_rterror);
77233 cnt += sprintf(&page[cnt], "rtbf: %ld rtb: %ld nt: %ld ",
77234@@ -1206,14 +1206,14 @@ rcu_torture_printk(char *page)
77235 n_barrier_attempts,
77236 n_rcu_torture_barrier_error);
77237 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
77238- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
77239+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
77240 n_rcu_torture_barrier_error != 0 ||
77241 n_rcu_torture_boost_ktrerror != 0 ||
77242 n_rcu_torture_boost_rterror != 0 ||
77243 n_rcu_torture_boost_failure != 0 ||
77244 i > 1) {
77245 cnt += sprintf(&page[cnt], "!!! ");
77246- atomic_inc(&n_rcu_torture_error);
77247+ atomic_inc_unchecked(&n_rcu_torture_error);
77248 WARN_ON_ONCE(1);
77249 }
77250 cnt += sprintf(&page[cnt], "Reader Pipe: ");
77251@@ -1227,7 +1227,7 @@ rcu_torture_printk(char *page)
77252 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
77253 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
77254 cnt += sprintf(&page[cnt], " %d",
77255- atomic_read(&rcu_torture_wcount[i]));
77256+ atomic_read_unchecked(&rcu_torture_wcount[i]));
77257 }
77258 cnt += sprintf(&page[cnt], "\n");
77259 if (cur_ops->stats)
77260@@ -1920,7 +1920,7 @@ rcu_torture_cleanup(void)
77261
77262 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
77263
77264- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
77265+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
77266 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
77267 else if (n_online_successes != n_online_attempts ||
77268 n_offline_successes != n_offline_attempts)
77269@@ -1989,18 +1989,18 @@ rcu_torture_init(void)
77270
77271 rcu_torture_current = NULL;
77272 rcu_torture_current_version = 0;
77273- atomic_set(&n_rcu_torture_alloc, 0);
77274- atomic_set(&n_rcu_torture_alloc_fail, 0);
77275- atomic_set(&n_rcu_torture_free, 0);
77276- atomic_set(&n_rcu_torture_mberror, 0);
77277- atomic_set(&n_rcu_torture_error, 0);
77278+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
77279+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
77280+ atomic_set_unchecked(&n_rcu_torture_free, 0);
77281+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
77282+ atomic_set_unchecked(&n_rcu_torture_error, 0);
77283 n_rcu_torture_barrier_error = 0;
77284 n_rcu_torture_boost_ktrerror = 0;
77285 n_rcu_torture_boost_rterror = 0;
77286 n_rcu_torture_boost_failure = 0;
77287 n_rcu_torture_boosts = 0;
77288 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
77289- atomic_set(&rcu_torture_wcount[i], 0);
77290+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
77291 for_each_possible_cpu(cpu) {
77292 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
77293 per_cpu(rcu_torture_count, cpu)[i] = 0;
77294diff --git a/kernel/rcutree.c b/kernel/rcutree.c
77295index e441b77..dd54f17 100644
77296--- a/kernel/rcutree.c
77297+++ b/kernel/rcutree.c
77298@@ -349,9 +349,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
77299 rcu_prepare_for_idle(smp_processor_id());
77300 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
77301 smp_mb__before_atomic_inc(); /* See above. */
77302- atomic_inc(&rdtp->dynticks);
77303+ atomic_inc_unchecked(&rdtp->dynticks);
77304 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
77305- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
77306+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
77307
77308 /*
77309 * It is illegal to enter an extended quiescent state while
77310@@ -487,10 +487,10 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
77311 int user)
77312 {
77313 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
77314- atomic_inc(&rdtp->dynticks);
77315+ atomic_inc_unchecked(&rdtp->dynticks);
77316 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
77317 smp_mb__after_atomic_inc(); /* See above. */
77318- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
77319+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
77320 rcu_cleanup_after_idle(smp_processor_id());
77321 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
77322 if (!user && !is_idle_task(current)) {
77323@@ -629,14 +629,14 @@ void rcu_nmi_enter(void)
77324 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
77325
77326 if (rdtp->dynticks_nmi_nesting == 0 &&
77327- (atomic_read(&rdtp->dynticks) & 0x1))
77328+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
77329 return;
77330 rdtp->dynticks_nmi_nesting++;
77331 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
77332- atomic_inc(&rdtp->dynticks);
77333+ atomic_inc_unchecked(&rdtp->dynticks);
77334 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
77335 smp_mb__after_atomic_inc(); /* See above. */
77336- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
77337+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
77338 }
77339
77340 /**
77341@@ -655,9 +655,9 @@ void rcu_nmi_exit(void)
77342 return;
77343 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
77344 smp_mb__before_atomic_inc(); /* See above. */
77345- atomic_inc(&rdtp->dynticks);
77346+ atomic_inc_unchecked(&rdtp->dynticks);
77347 smp_mb__after_atomic_inc(); /* Force delay to next write. */
77348- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
77349+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
77350 }
77351
77352 /**
77353@@ -671,7 +671,7 @@ int rcu_is_cpu_idle(void)
77354 int ret;
77355
77356 preempt_disable();
77357- ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
77358+ ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
77359 preempt_enable();
77360 return ret;
77361 }
77362@@ -739,7 +739,7 @@ int rcu_is_cpu_rrupt_from_idle(void)
77363 */
77364 static int dyntick_save_progress_counter(struct rcu_data *rdp)
77365 {
77366- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
77367+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
77368 return (rdp->dynticks_snap & 0x1) == 0;
77369 }
77370
77371@@ -754,7 +754,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
77372 unsigned int curr;
77373 unsigned int snap;
77374
77375- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
77376+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
77377 snap = (unsigned int)rdp->dynticks_snap;
77378
77379 /*
77380@@ -802,10 +802,10 @@ static int jiffies_till_stall_check(void)
77381 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
77382 */
77383 if (till_stall_check < 3) {
77384- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
77385+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
77386 till_stall_check = 3;
77387 } else if (till_stall_check > 300) {
77388- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
77389+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
77390 till_stall_check = 300;
77391 }
77392 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
77393@@ -1592,7 +1592,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
77394 rsp->qlen += rdp->qlen;
77395 rdp->n_cbs_orphaned += rdp->qlen;
77396 rdp->qlen_lazy = 0;
77397- ACCESS_ONCE(rdp->qlen) = 0;
77398+ ACCESS_ONCE_RW(rdp->qlen) = 0;
77399 }
77400
77401 /*
77402@@ -1838,7 +1838,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
77403 }
77404 smp_mb(); /* List handling before counting for rcu_barrier(). */
77405 rdp->qlen_lazy -= count_lazy;
77406- ACCESS_ONCE(rdp->qlen) -= count;
77407+ ACCESS_ONCE_RW(rdp->qlen) -= count;
77408 rdp->n_cbs_invoked += count;
77409
77410 /* Reinstate batch limit if we have worked down the excess. */
77411@@ -2031,7 +2031,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
77412 /*
77413 * Do RCU core processing for the current CPU.
77414 */
77415-static void rcu_process_callbacks(struct softirq_action *unused)
77416+static void rcu_process_callbacks(void)
77417 {
77418 struct rcu_state *rsp;
77419
77420@@ -2154,7 +2154,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
77421 local_irq_restore(flags);
77422 return;
77423 }
77424- ACCESS_ONCE(rdp->qlen)++;
77425+ ACCESS_ONCE_RW(rdp->qlen)++;
77426 if (lazy)
77427 rdp->qlen_lazy++;
77428 else
77429@@ -2363,11 +2363,11 @@ void synchronize_sched_expedited(void)
77430 * counter wrap on a 32-bit system. Quite a few more CPUs would of
77431 * course be required on a 64-bit system.
77432 */
77433- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
77434+ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
77435 (ulong)atomic_long_read(&rsp->expedited_done) +
77436 ULONG_MAX / 8)) {
77437 synchronize_sched();
77438- atomic_long_inc(&rsp->expedited_wrap);
77439+ atomic_long_inc_unchecked(&rsp->expedited_wrap);
77440 return;
77441 }
77442
77443@@ -2375,7 +2375,7 @@ void synchronize_sched_expedited(void)
77444 * Take a ticket. Note that atomic_inc_return() implies a
77445 * full memory barrier.
77446 */
77447- snap = atomic_long_inc_return(&rsp->expedited_start);
77448+ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
77449 firstsnap = snap;
77450 get_online_cpus();
77451 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
77452@@ -2388,14 +2388,14 @@ void synchronize_sched_expedited(void)
77453 synchronize_sched_expedited_cpu_stop,
77454 NULL) == -EAGAIN) {
77455 put_online_cpus();
77456- atomic_long_inc(&rsp->expedited_tryfail);
77457+ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
77458
77459 /* Check to see if someone else did our work for us. */
77460 s = atomic_long_read(&rsp->expedited_done);
77461 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
77462 /* ensure test happens before caller kfree */
77463 smp_mb__before_atomic_inc(); /* ^^^ */
77464- atomic_long_inc(&rsp->expedited_workdone1);
77465+ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
77466 return;
77467 }
77468
77469@@ -2404,7 +2404,7 @@ void synchronize_sched_expedited(void)
77470 udelay(trycount * num_online_cpus());
77471 } else {
77472 wait_rcu_gp(call_rcu_sched);
77473- atomic_long_inc(&rsp->expedited_normal);
77474+ atomic_long_inc_unchecked(&rsp->expedited_normal);
77475 return;
77476 }
77477
77478@@ -2413,7 +2413,7 @@ void synchronize_sched_expedited(void)
77479 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
77480 /* ensure test happens before caller kfree */
77481 smp_mb__before_atomic_inc(); /* ^^^ */
77482- atomic_long_inc(&rsp->expedited_workdone2);
77483+ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
77484 return;
77485 }
77486
77487@@ -2425,10 +2425,10 @@ void synchronize_sched_expedited(void)
77488 * period works for us.
77489 */
77490 get_online_cpus();
77491- snap = atomic_long_read(&rsp->expedited_start);
77492+ snap = atomic_long_read_unchecked(&rsp->expedited_start);
77493 smp_mb(); /* ensure read is before try_stop_cpus(). */
77494 }
77495- atomic_long_inc(&rsp->expedited_stoppedcpus);
77496+ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
77497
77498 /*
77499 * Everyone up to our most recent fetch is covered by our grace
77500@@ -2437,16 +2437,16 @@ void synchronize_sched_expedited(void)
77501 * than we did already did their update.
77502 */
77503 do {
77504- atomic_long_inc(&rsp->expedited_done_tries);
77505+ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
77506 s = atomic_long_read(&rsp->expedited_done);
77507 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
77508 /* ensure test happens before caller kfree */
77509 smp_mb__before_atomic_inc(); /* ^^^ */
77510- atomic_long_inc(&rsp->expedited_done_lost);
77511+ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
77512 break;
77513 }
77514 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
77515- atomic_long_inc(&rsp->expedited_done_exit);
77516+ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
77517
77518 put_online_cpus();
77519 }
77520@@ -2620,7 +2620,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
77521 * ACCESS_ONCE() to prevent the compiler from speculating
77522 * the increment to precede the early-exit check.
77523 */
77524- ACCESS_ONCE(rsp->n_barrier_done)++;
77525+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
77526 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
77527 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
77528 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
77529@@ -2670,7 +2670,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
77530
77531 /* Increment ->n_barrier_done to prevent duplicate work. */
77532 smp_mb(); /* Keep increment after above mechanism. */
77533- ACCESS_ONCE(rsp->n_barrier_done)++;
77534+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
77535 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
77536 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
77537 smp_mb(); /* Keep increment before caller's subsequent code. */
77538@@ -2715,10 +2715,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
77539 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
77540 init_callback_list(rdp);
77541 rdp->qlen_lazy = 0;
77542- ACCESS_ONCE(rdp->qlen) = 0;
77543+ ACCESS_ONCE_RW(rdp->qlen) = 0;
77544 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
77545 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
77546- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
77547+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
77548 #ifdef CONFIG_RCU_USER_QS
77549 WARN_ON_ONCE(rdp->dynticks->in_user);
77550 #endif
77551@@ -2754,8 +2754,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
77552 rdp->blimit = blimit;
77553 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
77554 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
77555- atomic_set(&rdp->dynticks->dynticks,
77556- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
77557+ atomic_set_unchecked(&rdp->dynticks->dynticks,
77558+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
77559 rcu_prepare_for_idle_init(cpu);
77560 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
77561
77562diff --git a/kernel/rcutree.h b/kernel/rcutree.h
77563index 4b69291..704c92e 100644
77564--- a/kernel/rcutree.h
77565+++ b/kernel/rcutree.h
77566@@ -86,7 +86,7 @@ struct rcu_dynticks {
77567 long long dynticks_nesting; /* Track irq/process nesting level. */
77568 /* Process level is worth LLONG_MAX/2. */
77569 int dynticks_nmi_nesting; /* Track NMI nesting level. */
77570- atomic_t dynticks; /* Even value for idle, else odd. */
77571+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
77572 #ifdef CONFIG_RCU_FAST_NO_HZ
77573 int dyntick_drain; /* Prepare-for-idle state variable. */
77574 unsigned long dyntick_holdoff;
77575@@ -423,17 +423,17 @@ struct rcu_state {
77576 /* _rcu_barrier(). */
77577 /* End of fields guarded by barrier_mutex. */
77578
77579- atomic_long_t expedited_start; /* Starting ticket. */
77580- atomic_long_t expedited_done; /* Done ticket. */
77581- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
77582- atomic_long_t expedited_tryfail; /* # acquisition failures. */
77583- atomic_long_t expedited_workdone1; /* # done by others #1. */
77584- atomic_long_t expedited_workdone2; /* # done by others #2. */
77585- atomic_long_t expedited_normal; /* # fallbacks to normal. */
77586- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
77587- atomic_long_t expedited_done_tries; /* # tries to update _done. */
77588- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
77589- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
77590+ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
77591+ atomic_long_t expedited_done; /* Done ticket. */
77592+ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
77593+ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
77594+ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
77595+ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
77596+ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
77597+ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
77598+ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
77599+ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
77600+ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
77601
77602 unsigned long jiffies_force_qs; /* Time at which to invoke */
77603 /* force_quiescent_state(). */
77604diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
77605index c1cc7e1..f62e436 100644
77606--- a/kernel/rcutree_plugin.h
77607+++ b/kernel/rcutree_plugin.h
77608@@ -892,7 +892,7 @@ void synchronize_rcu_expedited(void)
77609
77610 /* Clean up and exit. */
77611 smp_mb(); /* ensure expedited GP seen before counter increment. */
77612- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
77613+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
77614 unlock_mb_ret:
77615 mutex_unlock(&sync_rcu_preempt_exp_mutex);
77616 mb_ret:
77617@@ -1440,7 +1440,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
77618 free_cpumask_var(cm);
77619 }
77620
77621-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
77622+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
77623 .store = &rcu_cpu_kthread_task,
77624 .thread_should_run = rcu_cpu_kthread_should_run,
77625 .thread_fn = rcu_cpu_kthread,
77626@@ -2072,7 +2072,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
77627 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
77628 printk(KERN_ERR "\t%d: (%lu %s) idle=%03x/%llx/%d %s\n",
77629 cpu, ticks_value, ticks_title,
77630- atomic_read(&rdtp->dynticks) & 0xfff,
77631+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
77632 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
77633 fast_no_hz);
77634 }
77635@@ -2192,7 +2192,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
77636
77637 /* Enqueue the callback on the nocb list and update counts. */
77638 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
77639- ACCESS_ONCE(*old_rhpp) = rhp;
77640+ ACCESS_ONCE_RW(*old_rhpp) = rhp;
77641 atomic_long_add(rhcount, &rdp->nocb_q_count);
77642 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
77643
77644@@ -2384,12 +2384,12 @@ static int rcu_nocb_kthread(void *arg)
77645 * Extract queued callbacks, update counts, and wait
77646 * for a grace period to elapse.
77647 */
77648- ACCESS_ONCE(rdp->nocb_head) = NULL;
77649+ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
77650 tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
77651 c = atomic_long_xchg(&rdp->nocb_q_count, 0);
77652 cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
77653- ACCESS_ONCE(rdp->nocb_p_count) += c;
77654- ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
77655+ ACCESS_ONCE_RW(rdp->nocb_p_count) += c;
77656+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) += cl;
77657 wait_rcu_gp(rdp->rsp->call_remote);
77658
77659 /* Each pass through the following loop invokes a callback. */
77660@@ -2411,8 +2411,8 @@ static int rcu_nocb_kthread(void *arg)
77661 list = next;
77662 }
77663 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
77664- ACCESS_ONCE(rdp->nocb_p_count) -= c;
77665- ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
77666+ ACCESS_ONCE_RW(rdp->nocb_p_count) -= c;
77667+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) -= cl;
77668 rdp->n_nocbs_invoked += c;
77669 }
77670 return 0;
77671@@ -2438,7 +2438,7 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
77672 rdp = per_cpu_ptr(rsp->rda, cpu);
77673 t = kthread_run(rcu_nocb_kthread, rdp, "rcuo%d", cpu);
77674 BUG_ON(IS_ERR(t));
77675- ACCESS_ONCE(rdp->nocb_kthread) = t;
77676+ ACCESS_ONCE_RW(rdp->nocb_kthread) = t;
77677 }
77678 }
77679
77680diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
77681index 0d095dc..1985b19 100644
77682--- a/kernel/rcutree_trace.c
77683+++ b/kernel/rcutree_trace.c
77684@@ -123,7 +123,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
77685 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
77686 rdp->passed_quiesce, rdp->qs_pending);
77687 seq_printf(m, " dt=%d/%llx/%d df=%lu",
77688- atomic_read(&rdp->dynticks->dynticks),
77689+ atomic_read_unchecked(&rdp->dynticks->dynticks),
77690 rdp->dynticks->dynticks_nesting,
77691 rdp->dynticks->dynticks_nmi_nesting,
77692 rdp->dynticks_fqs);
77693@@ -184,17 +184,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
77694 struct rcu_state *rsp = (struct rcu_state *)m->private;
77695
77696 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
77697- atomic_long_read(&rsp->expedited_start),
77698+ atomic_long_read_unchecked(&rsp->expedited_start),
77699 atomic_long_read(&rsp->expedited_done),
77700- atomic_long_read(&rsp->expedited_wrap),
77701- atomic_long_read(&rsp->expedited_tryfail),
77702- atomic_long_read(&rsp->expedited_workdone1),
77703- atomic_long_read(&rsp->expedited_workdone2),
77704- atomic_long_read(&rsp->expedited_normal),
77705- atomic_long_read(&rsp->expedited_stoppedcpus),
77706- atomic_long_read(&rsp->expedited_done_tries),
77707- atomic_long_read(&rsp->expedited_done_lost),
77708- atomic_long_read(&rsp->expedited_done_exit));
77709+ atomic_long_read_unchecked(&rsp->expedited_wrap),
77710+ atomic_long_read_unchecked(&rsp->expedited_tryfail),
77711+ atomic_long_read_unchecked(&rsp->expedited_workdone1),
77712+ atomic_long_read_unchecked(&rsp->expedited_workdone2),
77713+ atomic_long_read_unchecked(&rsp->expedited_normal),
77714+ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
77715+ atomic_long_read_unchecked(&rsp->expedited_done_tries),
77716+ atomic_long_read_unchecked(&rsp->expedited_done_lost),
77717+ atomic_long_read_unchecked(&rsp->expedited_done_exit));
77718 return 0;
77719 }
77720
77721diff --git a/kernel/resource.c b/kernel/resource.c
77722index 73f35d4..4684fc4 100644
77723--- a/kernel/resource.c
77724+++ b/kernel/resource.c
77725@@ -143,8 +143,18 @@ static const struct file_operations proc_iomem_operations = {
77726
77727 static int __init ioresources_init(void)
77728 {
77729+#ifdef CONFIG_GRKERNSEC_PROC_ADD
77730+#ifdef CONFIG_GRKERNSEC_PROC_USER
77731+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
77732+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
77733+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
77734+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
77735+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
77736+#endif
77737+#else
77738 proc_create("ioports", 0, NULL, &proc_ioports_operations);
77739 proc_create("iomem", 0, NULL, &proc_iomem_operations);
77740+#endif
77741 return 0;
77742 }
77743 __initcall(ioresources_init);
77744diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
77745index 98ec494..4241d6d 100644
77746--- a/kernel/rtmutex-tester.c
77747+++ b/kernel/rtmutex-tester.c
77748@@ -20,7 +20,7 @@
77749 #define MAX_RT_TEST_MUTEXES 8
77750
77751 static spinlock_t rttest_lock;
77752-static atomic_t rttest_event;
77753+static atomic_unchecked_t rttest_event;
77754
77755 struct test_thread_data {
77756 int opcode;
77757@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
77758
77759 case RTTEST_LOCKCONT:
77760 td->mutexes[td->opdata] = 1;
77761- td->event = atomic_add_return(1, &rttest_event);
77762+ td->event = atomic_add_return_unchecked(1, &rttest_event);
77763 return 0;
77764
77765 case RTTEST_RESET:
77766@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
77767 return 0;
77768
77769 case RTTEST_RESETEVENT:
77770- atomic_set(&rttest_event, 0);
77771+ atomic_set_unchecked(&rttest_event, 0);
77772 return 0;
77773
77774 default:
77775@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
77776 return ret;
77777
77778 td->mutexes[id] = 1;
77779- td->event = atomic_add_return(1, &rttest_event);
77780+ td->event = atomic_add_return_unchecked(1, &rttest_event);
77781 rt_mutex_lock(&mutexes[id]);
77782- td->event = atomic_add_return(1, &rttest_event);
77783+ td->event = atomic_add_return_unchecked(1, &rttest_event);
77784 td->mutexes[id] = 4;
77785 return 0;
77786
77787@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
77788 return ret;
77789
77790 td->mutexes[id] = 1;
77791- td->event = atomic_add_return(1, &rttest_event);
77792+ td->event = atomic_add_return_unchecked(1, &rttest_event);
77793 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
77794- td->event = atomic_add_return(1, &rttest_event);
77795+ td->event = atomic_add_return_unchecked(1, &rttest_event);
77796 td->mutexes[id] = ret ? 0 : 4;
77797 return ret ? -EINTR : 0;
77798
77799@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
77800 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
77801 return ret;
77802
77803- td->event = atomic_add_return(1, &rttest_event);
77804+ td->event = atomic_add_return_unchecked(1, &rttest_event);
77805 rt_mutex_unlock(&mutexes[id]);
77806- td->event = atomic_add_return(1, &rttest_event);
77807+ td->event = atomic_add_return_unchecked(1, &rttest_event);
77808 td->mutexes[id] = 0;
77809 return 0;
77810
77811@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
77812 break;
77813
77814 td->mutexes[dat] = 2;
77815- td->event = atomic_add_return(1, &rttest_event);
77816+ td->event = atomic_add_return_unchecked(1, &rttest_event);
77817 break;
77818
77819 default:
77820@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
77821 return;
77822
77823 td->mutexes[dat] = 3;
77824- td->event = atomic_add_return(1, &rttest_event);
77825+ td->event = atomic_add_return_unchecked(1, &rttest_event);
77826 break;
77827
77828 case RTTEST_LOCKNOWAIT:
77829@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
77830 return;
77831
77832 td->mutexes[dat] = 1;
77833- td->event = atomic_add_return(1, &rttest_event);
77834+ td->event = atomic_add_return_unchecked(1, &rttest_event);
77835 return;
77836
77837 default:
77838diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
77839index 0984a21..939f183 100644
77840--- a/kernel/sched/auto_group.c
77841+++ b/kernel/sched/auto_group.c
77842@@ -11,7 +11,7 @@
77843
77844 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
77845 static struct autogroup autogroup_default;
77846-static atomic_t autogroup_seq_nr;
77847+static atomic_unchecked_t autogroup_seq_nr;
77848
77849 void __init autogroup_init(struct task_struct *init_task)
77850 {
77851@@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
77852
77853 kref_init(&ag->kref);
77854 init_rwsem(&ag->lock);
77855- ag->id = atomic_inc_return(&autogroup_seq_nr);
77856+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
77857 ag->tg = tg;
77858 #ifdef CONFIG_RT_GROUP_SCHED
77859 /*
77860diff --git a/kernel/sched/core.c b/kernel/sched/core.c
77861index 26058d0..e315889 100644
77862--- a/kernel/sched/core.c
77863+++ b/kernel/sched/core.c
77864@@ -3367,7 +3367,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
77865 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
77866 * positive (at least 1, or number of jiffies left till timeout) if completed.
77867 */
77868-long __sched
77869+long __sched __intentional_overflow(-1)
77870 wait_for_completion_interruptible_timeout(struct completion *x,
77871 unsigned long timeout)
77872 {
77873@@ -3384,7 +3384,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
77874 *
77875 * The return value is -ERESTARTSYS if interrupted, 0 if completed.
77876 */
77877-int __sched wait_for_completion_killable(struct completion *x)
77878+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
77879 {
77880 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
77881 if (t == -ERESTARTSYS)
77882@@ -3405,7 +3405,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
77883 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
77884 * positive (at least 1, or number of jiffies left till timeout) if completed.
77885 */
77886-long __sched
77887+long __sched __intentional_overflow(-1)
77888 wait_for_completion_killable_timeout(struct completion *x,
77889 unsigned long timeout)
77890 {
77891@@ -3631,6 +3631,8 @@ int can_nice(const struct task_struct *p, const int nice)
77892 /* convert nice value [19,-20] to rlimit style value [1,40] */
77893 int nice_rlim = 20 - nice;
77894
77895+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
77896+
77897 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
77898 capable(CAP_SYS_NICE));
77899 }
77900@@ -3664,7 +3666,8 @@ SYSCALL_DEFINE1(nice, int, increment)
77901 if (nice > 19)
77902 nice = 19;
77903
77904- if (increment < 0 && !can_nice(current, nice))
77905+ if (increment < 0 && (!can_nice(current, nice) ||
77906+ gr_handle_chroot_nice()))
77907 return -EPERM;
77908
77909 retval = security_task_setnice(current, nice);
77910@@ -3818,6 +3821,7 @@ recheck:
77911 unsigned long rlim_rtprio =
77912 task_rlimit(p, RLIMIT_RTPRIO);
77913
77914+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
77915 /* can't set/change the rt policy */
77916 if (policy != p->policy && !rlim_rtprio)
77917 return -EPERM;
77918@@ -4901,7 +4905,7 @@ static void migrate_tasks(unsigned int dead_cpu)
77919
77920 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
77921
77922-static struct ctl_table sd_ctl_dir[] = {
77923+static ctl_table_no_const sd_ctl_dir[] __read_only = {
77924 {
77925 .procname = "sched_domain",
77926 .mode = 0555,
77927@@ -4918,17 +4922,17 @@ static struct ctl_table sd_ctl_root[] = {
77928 {}
77929 };
77930
77931-static struct ctl_table *sd_alloc_ctl_entry(int n)
77932+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
77933 {
77934- struct ctl_table *entry =
77935+ ctl_table_no_const *entry =
77936 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
77937
77938 return entry;
77939 }
77940
77941-static void sd_free_ctl_entry(struct ctl_table **tablep)
77942+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
77943 {
77944- struct ctl_table *entry;
77945+ ctl_table_no_const *entry;
77946
77947 /*
77948 * In the intermediate directories, both the child directory and
77949@@ -4936,22 +4940,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
77950 * will always be set. In the lowest directory the names are
77951 * static strings and all have proc handlers.
77952 */
77953- for (entry = *tablep; entry->mode; entry++) {
77954- if (entry->child)
77955- sd_free_ctl_entry(&entry->child);
77956+ for (entry = tablep; entry->mode; entry++) {
77957+ if (entry->child) {
77958+ sd_free_ctl_entry(entry->child);
77959+ pax_open_kernel();
77960+ entry->child = NULL;
77961+ pax_close_kernel();
77962+ }
77963 if (entry->proc_handler == NULL)
77964 kfree(entry->procname);
77965 }
77966
77967- kfree(*tablep);
77968- *tablep = NULL;
77969+ kfree(tablep);
77970 }
77971
77972 static int min_load_idx = 0;
77973 static int max_load_idx = CPU_LOAD_IDX_MAX;
77974
77975 static void
77976-set_table_entry(struct ctl_table *entry,
77977+set_table_entry(ctl_table_no_const *entry,
77978 const char *procname, void *data, int maxlen,
77979 umode_t mode, proc_handler *proc_handler,
77980 bool load_idx)
77981@@ -4971,7 +4978,7 @@ set_table_entry(struct ctl_table *entry,
77982 static struct ctl_table *
77983 sd_alloc_ctl_domain_table(struct sched_domain *sd)
77984 {
77985- struct ctl_table *table = sd_alloc_ctl_entry(13);
77986+ ctl_table_no_const *table = sd_alloc_ctl_entry(13);
77987
77988 if (table == NULL)
77989 return NULL;
77990@@ -5006,9 +5013,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
77991 return table;
77992 }
77993
77994-static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
77995+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
77996 {
77997- struct ctl_table *entry, *table;
77998+ ctl_table_no_const *entry, *table;
77999 struct sched_domain *sd;
78000 int domain_num = 0, i;
78001 char buf[32];
78002@@ -5035,11 +5042,13 @@ static struct ctl_table_header *sd_sysctl_header;
78003 static void register_sched_domain_sysctl(void)
78004 {
78005 int i, cpu_num = num_possible_cpus();
78006- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
78007+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
78008 char buf[32];
78009
78010 WARN_ON(sd_ctl_dir[0].child);
78011+ pax_open_kernel();
78012 sd_ctl_dir[0].child = entry;
78013+ pax_close_kernel();
78014
78015 if (entry == NULL)
78016 return;
78017@@ -5062,8 +5071,12 @@ static void unregister_sched_domain_sysctl(void)
78018 if (sd_sysctl_header)
78019 unregister_sysctl_table(sd_sysctl_header);
78020 sd_sysctl_header = NULL;
78021- if (sd_ctl_dir[0].child)
78022- sd_free_ctl_entry(&sd_ctl_dir[0].child);
78023+ if (sd_ctl_dir[0].child) {
78024+ sd_free_ctl_entry(sd_ctl_dir[0].child);
78025+ pax_open_kernel();
78026+ sd_ctl_dir[0].child = NULL;
78027+ pax_close_kernel();
78028+ }
78029 }
78030 #else
78031 static void register_sched_domain_sysctl(void)
78032@@ -5162,7 +5175,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
78033 * happens before everything else. This has to be lower priority than
78034 * the notifier in the perf_event subsystem, though.
78035 */
78036-static struct notifier_block __cpuinitdata migration_notifier = {
78037+static struct notifier_block migration_notifier = {
78038 .notifier_call = migration_call,
78039 .priority = CPU_PRI_MIGRATION,
78040 };
78041diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
78042index 81fa536..6ccf96a 100644
78043--- a/kernel/sched/fair.c
78044+++ b/kernel/sched/fair.c
78045@@ -830,7 +830,7 @@ void task_numa_fault(int node, int pages, bool migrated)
78046
78047 static void reset_ptenuma_scan(struct task_struct *p)
78048 {
78049- ACCESS_ONCE(p->mm->numa_scan_seq)++;
78050+ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
78051 p->mm->numa_scan_offset = 0;
78052 }
78053
78054@@ -3254,25 +3254,18 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
78055 */
78056 static int select_idle_sibling(struct task_struct *p, int target)
78057 {
78058- int cpu = smp_processor_id();
78059- int prev_cpu = task_cpu(p);
78060 struct sched_domain *sd;
78061 struct sched_group *sg;
78062- int i;
78063+ int i = task_cpu(p);
78064
78065- /*
78066- * If the task is going to be woken-up on this cpu and if it is
78067- * already idle, then it is the right target.
78068- */
78069- if (target == cpu && idle_cpu(cpu))
78070- return cpu;
78071+ if (idle_cpu(target))
78072+ return target;
78073
78074 /*
78075- * If the task is going to be woken-up on the cpu where it previously
78076- * ran and if it is currently idle, then it the right target.
78077+ * If the prevous cpu is cache affine and idle, don't be stupid.
78078 */
78079- if (target == prev_cpu && idle_cpu(prev_cpu))
78080- return prev_cpu;
78081+ if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
78082+ return i;
78083
78084 /*
78085 * Otherwise, iterate the domains and find an elegible idle cpu.
78086@@ -3286,7 +3279,7 @@ static int select_idle_sibling(struct task_struct *p, int target)
78087 goto next;
78088
78089 for_each_cpu(i, sched_group_cpus(sg)) {
78090- if (!idle_cpu(i))
78091+ if (i == target || !idle_cpu(i))
78092 goto next;
78093 }
78094
78095@@ -5663,7 +5656,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
78096 * run_rebalance_domains is triggered when needed from the scheduler tick.
78097 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
78098 */
78099-static void run_rebalance_domains(struct softirq_action *h)
78100+static void run_rebalance_domains(void)
78101 {
78102 int this_cpu = smp_processor_id();
78103 struct rq *this_rq = cpu_rq(this_cpu);
78104diff --git a/kernel/signal.c b/kernel/signal.c
78105index dec9c30..d1da15b 100644
78106--- a/kernel/signal.c
78107+++ b/kernel/signal.c
78108@@ -50,12 +50,12 @@ static struct kmem_cache *sigqueue_cachep;
78109
78110 int print_fatal_signals __read_mostly;
78111
78112-static void __user *sig_handler(struct task_struct *t, int sig)
78113+static __sighandler_t sig_handler(struct task_struct *t, int sig)
78114 {
78115 return t->sighand->action[sig - 1].sa.sa_handler;
78116 }
78117
78118-static int sig_handler_ignored(void __user *handler, int sig)
78119+static int sig_handler_ignored(__sighandler_t handler, int sig)
78120 {
78121 /* Is it explicitly or implicitly ignored? */
78122 return handler == SIG_IGN ||
78123@@ -64,7 +64,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
78124
78125 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
78126 {
78127- void __user *handler;
78128+ __sighandler_t handler;
78129
78130 handler = sig_handler(t, sig);
78131
78132@@ -368,6 +368,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
78133 atomic_inc(&user->sigpending);
78134 rcu_read_unlock();
78135
78136+ if (!override_rlimit)
78137+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
78138+
78139 if (override_rlimit ||
78140 atomic_read(&user->sigpending) <=
78141 task_rlimit(t, RLIMIT_SIGPENDING)) {
78142@@ -495,7 +498,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
78143
78144 int unhandled_signal(struct task_struct *tsk, int sig)
78145 {
78146- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
78147+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
78148 if (is_global_init(tsk))
78149 return 1;
78150 if (handler != SIG_IGN && handler != SIG_DFL)
78151@@ -815,6 +818,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
78152 }
78153 }
78154
78155+ /* allow glibc communication via tgkill to other threads in our
78156+ thread group */
78157+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
78158+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
78159+ && gr_handle_signal(t, sig))
78160+ return -EPERM;
78161+
78162 return security_task_kill(t, info, sig, 0);
78163 }
78164
78165@@ -1197,7 +1207,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
78166 return send_signal(sig, info, p, 1);
78167 }
78168
78169-static int
78170+int
78171 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
78172 {
78173 return send_signal(sig, info, t, 0);
78174@@ -1234,6 +1244,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
78175 unsigned long int flags;
78176 int ret, blocked, ignored;
78177 struct k_sigaction *action;
78178+ int is_unhandled = 0;
78179
78180 spin_lock_irqsave(&t->sighand->siglock, flags);
78181 action = &t->sighand->action[sig-1];
78182@@ -1248,9 +1259,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
78183 }
78184 if (action->sa.sa_handler == SIG_DFL)
78185 t->signal->flags &= ~SIGNAL_UNKILLABLE;
78186+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
78187+ is_unhandled = 1;
78188 ret = specific_send_sig_info(sig, info, t);
78189 spin_unlock_irqrestore(&t->sighand->siglock, flags);
78190
78191+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
78192+ normal operation */
78193+ if (is_unhandled) {
78194+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
78195+ gr_handle_crash(t, sig);
78196+ }
78197+
78198 return ret;
78199 }
78200
78201@@ -1317,8 +1337,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
78202 ret = check_kill_permission(sig, info, p);
78203 rcu_read_unlock();
78204
78205- if (!ret && sig)
78206+ if (!ret && sig) {
78207 ret = do_send_sig_info(sig, info, p, true);
78208+ if (!ret)
78209+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
78210+ }
78211
78212 return ret;
78213 }
78214@@ -2855,7 +2878,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
78215 int error = -ESRCH;
78216
78217 rcu_read_lock();
78218- p = find_task_by_vpid(pid);
78219+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
78220+ /* allow glibc communication via tgkill to other threads in our
78221+ thread group */
78222+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
78223+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
78224+ p = find_task_by_vpid_unrestricted(pid);
78225+ else
78226+#endif
78227+ p = find_task_by_vpid(pid);
78228 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
78229 error = check_kill_permission(sig, info, p);
78230 /*
78231@@ -3138,8 +3169,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
78232 }
78233 seg = get_fs();
78234 set_fs(KERNEL_DS);
78235- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
78236- (stack_t __force __user *) &uoss,
78237+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
78238+ (stack_t __force_user *) &uoss,
78239 compat_user_stack_pointer());
78240 set_fs(seg);
78241 if (ret >= 0 && uoss_ptr) {
78242diff --git a/kernel/smp.c b/kernel/smp.c
78243index 69f38bd..77bbf12 100644
78244--- a/kernel/smp.c
78245+++ b/kernel/smp.c
78246@@ -77,7 +77,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
78247 return NOTIFY_OK;
78248 }
78249
78250-static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
78251+static struct notifier_block hotplug_cfd_notifier = {
78252 .notifier_call = hotplug_cfd,
78253 };
78254
78255diff --git a/kernel/smpboot.c b/kernel/smpboot.c
78256index d6c5fc0..530560c 100644
78257--- a/kernel/smpboot.c
78258+++ b/kernel/smpboot.c
78259@@ -275,7 +275,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
78260 }
78261 smpboot_unpark_thread(plug_thread, cpu);
78262 }
78263- list_add(&plug_thread->list, &hotplug_threads);
78264+ pax_list_add(&plug_thread->list, &hotplug_threads);
78265 out:
78266 mutex_unlock(&smpboot_threads_lock);
78267 return ret;
78268@@ -292,7 +292,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
78269 {
78270 get_online_cpus();
78271 mutex_lock(&smpboot_threads_lock);
78272- list_del(&plug_thread->list);
78273+ pax_list_del(&plug_thread->list);
78274 smpboot_destroy_threads(plug_thread);
78275 mutex_unlock(&smpboot_threads_lock);
78276 put_online_cpus();
78277diff --git a/kernel/softirq.c b/kernel/softirq.c
78278index ed567ba..e71dabf 100644
78279--- a/kernel/softirq.c
78280+++ b/kernel/softirq.c
78281@@ -53,11 +53,11 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
78282 EXPORT_SYMBOL(irq_stat);
78283 #endif
78284
78285-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
78286+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
78287
78288 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
78289
78290-char *softirq_to_name[NR_SOFTIRQS] = {
78291+const char * const softirq_to_name[NR_SOFTIRQS] = {
78292 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
78293 "TASKLET", "SCHED", "HRTIMER", "RCU"
78294 };
78295@@ -244,7 +244,7 @@ restart:
78296 kstat_incr_softirqs_this_cpu(vec_nr);
78297
78298 trace_softirq_entry(vec_nr);
78299- h->action(h);
78300+ h->action();
78301 trace_softirq_exit(vec_nr);
78302 if (unlikely(prev_count != preempt_count())) {
78303 printk(KERN_ERR "huh, entered softirq %u %s %p"
78304@@ -391,7 +391,7 @@ void __raise_softirq_irqoff(unsigned int nr)
78305 or_softirq_pending(1UL << nr);
78306 }
78307
78308-void open_softirq(int nr, void (*action)(struct softirq_action *))
78309+void __init open_softirq(int nr, void (*action)(void))
78310 {
78311 softirq_vec[nr].action = action;
78312 }
78313@@ -447,7 +447,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
78314
78315 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
78316
78317-static void tasklet_action(struct softirq_action *a)
78318+static void tasklet_action(void)
78319 {
78320 struct tasklet_struct *list;
78321
78322@@ -482,7 +482,7 @@ static void tasklet_action(struct softirq_action *a)
78323 }
78324 }
78325
78326-static void tasklet_hi_action(struct softirq_action *a)
78327+static void tasklet_hi_action(void)
78328 {
78329 struct tasklet_struct *list;
78330
78331@@ -718,7 +718,7 @@ static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
78332 return NOTIFY_OK;
78333 }
78334
78335-static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
78336+static struct notifier_block remote_softirq_cpu_notifier = {
78337 .notifier_call = remote_softirq_cpu_notify,
78338 };
78339
78340@@ -835,11 +835,11 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
78341 return NOTIFY_OK;
78342 }
78343
78344-static struct notifier_block __cpuinitdata cpu_nfb = {
78345+static struct notifier_block cpu_nfb = {
78346 .notifier_call = cpu_callback
78347 };
78348
78349-static struct smp_hotplug_thread softirq_threads = {
78350+static struct smp_hotplug_thread softirq_threads __read_only = {
78351 .store = &ksoftirqd,
78352 .thread_should_run = ksoftirqd_should_run,
78353 .thread_fn = run_ksoftirqd,
78354diff --git a/kernel/srcu.c b/kernel/srcu.c
78355index 2b85982..d52ab26 100644
78356--- a/kernel/srcu.c
78357+++ b/kernel/srcu.c
78358@@ -305,9 +305,9 @@ int __srcu_read_lock(struct srcu_struct *sp)
78359 preempt_disable();
78360 idx = rcu_dereference_index_check(sp->completed,
78361 rcu_read_lock_sched_held()) & 0x1;
78362- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
78363+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
78364 smp_mb(); /* B */ /* Avoid leaking the critical section. */
78365- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
78366+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
78367 preempt_enable();
78368 return idx;
78369 }
78370@@ -323,7 +323,7 @@ void __srcu_read_unlock(struct srcu_struct *sp, int idx)
78371 {
78372 preempt_disable();
78373 smp_mb(); /* C */ /* Avoid leaking the critical section. */
78374- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) -= 1;
78375+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) -= 1;
78376 preempt_enable();
78377 }
78378 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
78379diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
78380index 2f194e9..2c05ea9 100644
78381--- a/kernel/stop_machine.c
78382+++ b/kernel/stop_machine.c
78383@@ -362,7 +362,7 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
78384 * cpu notifiers. It currently shares the same priority as sched
78385 * migration_notifier.
78386 */
78387-static struct notifier_block __cpuinitdata cpu_stop_cpu_notifier = {
78388+static struct notifier_block cpu_stop_cpu_notifier = {
78389 .notifier_call = cpu_stop_cpu_callback,
78390 .priority = 10,
78391 };
78392diff --git a/kernel/sys.c b/kernel/sys.c
78393index 265b376..4e42ef5 100644
78394--- a/kernel/sys.c
78395+++ b/kernel/sys.c
78396@@ -157,6 +157,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
78397 error = -EACCES;
78398 goto out;
78399 }
78400+
78401+ if (gr_handle_chroot_setpriority(p, niceval)) {
78402+ error = -EACCES;
78403+ goto out;
78404+ }
78405+
78406 no_nice = security_task_setnice(p, niceval);
78407 if (no_nice) {
78408 error = no_nice;
78409@@ -595,6 +601,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
78410 goto error;
78411 }
78412
78413+ if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
78414+ goto error;
78415+
78416 if (rgid != (gid_t) -1 ||
78417 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
78418 new->sgid = new->egid;
78419@@ -630,6 +639,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
78420 old = current_cred();
78421
78422 retval = -EPERM;
78423+
78424+ if (gr_check_group_change(kgid, kgid, kgid))
78425+ goto error;
78426+
78427 if (nsown_capable(CAP_SETGID))
78428 new->gid = new->egid = new->sgid = new->fsgid = kgid;
78429 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
78430@@ -647,7 +660,7 @@ error:
78431 /*
78432 * change the user struct in a credentials set to match the new UID
78433 */
78434-static int set_user(struct cred *new)
78435+int set_user(struct cred *new)
78436 {
78437 struct user_struct *new_user;
78438
78439@@ -727,6 +740,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
78440 goto error;
78441 }
78442
78443+ if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
78444+ goto error;
78445+
78446 if (!uid_eq(new->uid, old->uid)) {
78447 retval = set_user(new);
78448 if (retval < 0)
78449@@ -777,6 +793,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
78450 old = current_cred();
78451
78452 retval = -EPERM;
78453+
78454+ if (gr_check_crash_uid(kuid))
78455+ goto error;
78456+ if (gr_check_user_change(kuid, kuid, kuid))
78457+ goto error;
78458+
78459 if (nsown_capable(CAP_SETUID)) {
78460 new->suid = new->uid = kuid;
78461 if (!uid_eq(kuid, old->uid)) {
78462@@ -846,6 +868,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
78463 goto error;
78464 }
78465
78466+ if (gr_check_user_change(kruid, keuid, INVALID_UID))
78467+ goto error;
78468+
78469 if (ruid != (uid_t) -1) {
78470 new->uid = kruid;
78471 if (!uid_eq(kruid, old->uid)) {
78472@@ -928,6 +953,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
78473 goto error;
78474 }
78475
78476+ if (gr_check_group_change(krgid, kegid, INVALID_GID))
78477+ goto error;
78478+
78479 if (rgid != (gid_t) -1)
78480 new->gid = krgid;
78481 if (egid != (gid_t) -1)
78482@@ -981,6 +1009,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
78483 if (!uid_valid(kuid))
78484 return old_fsuid;
78485
78486+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
78487+ goto error;
78488+
78489 new = prepare_creds();
78490 if (!new)
78491 return old_fsuid;
78492@@ -995,6 +1026,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
78493 }
78494 }
78495
78496+error:
78497 abort_creds(new);
78498 return old_fsuid;
78499
78500@@ -1027,12 +1059,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
78501 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
78502 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
78503 nsown_capable(CAP_SETGID)) {
78504+ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
78505+ goto error;
78506+
78507 if (!gid_eq(kgid, old->fsgid)) {
78508 new->fsgid = kgid;
78509 goto change_okay;
78510 }
78511 }
78512
78513+error:
78514 abort_creds(new);
78515 return old_fsgid;
78516
78517@@ -1340,19 +1376,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
78518 return -EFAULT;
78519
78520 down_read(&uts_sem);
78521- error = __copy_to_user(&name->sysname, &utsname()->sysname,
78522+ error = __copy_to_user(name->sysname, &utsname()->sysname,
78523 __OLD_UTS_LEN);
78524 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
78525- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
78526+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
78527 __OLD_UTS_LEN);
78528 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
78529- error |= __copy_to_user(&name->release, &utsname()->release,
78530+ error |= __copy_to_user(name->release, &utsname()->release,
78531 __OLD_UTS_LEN);
78532 error |= __put_user(0, name->release + __OLD_UTS_LEN);
78533- error |= __copy_to_user(&name->version, &utsname()->version,
78534+ error |= __copy_to_user(name->version, &utsname()->version,
78535 __OLD_UTS_LEN);
78536 error |= __put_user(0, name->version + __OLD_UTS_LEN);
78537- error |= __copy_to_user(&name->machine, &utsname()->machine,
78538+ error |= __copy_to_user(name->machine, &utsname()->machine,
78539 __OLD_UTS_LEN);
78540 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
78541 up_read(&uts_sem);
78542@@ -2026,7 +2062,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
78543 error = get_dumpable(me->mm);
78544 break;
78545 case PR_SET_DUMPABLE:
78546- if (arg2 < 0 || arg2 > 1) {
78547+ if (arg2 > 1) {
78548 error = -EINVAL;
78549 break;
78550 }
78551diff --git a/kernel/sysctl.c b/kernel/sysctl.c
78552index c88878d..e4fa5d1 100644
78553--- a/kernel/sysctl.c
78554+++ b/kernel/sysctl.c
78555@@ -92,7 +92,6 @@
78556
78557
78558 #if defined(CONFIG_SYSCTL)
78559-
78560 /* External variables not in a header file. */
78561 extern int sysctl_overcommit_memory;
78562 extern int sysctl_overcommit_ratio;
78563@@ -172,10 +171,8 @@ static int proc_taint(struct ctl_table *table, int write,
78564 void __user *buffer, size_t *lenp, loff_t *ppos);
78565 #endif
78566
78567-#ifdef CONFIG_PRINTK
78568 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
78569 void __user *buffer, size_t *lenp, loff_t *ppos);
78570-#endif
78571
78572 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
78573 void __user *buffer, size_t *lenp, loff_t *ppos);
78574@@ -206,6 +203,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
78575
78576 #endif
78577
78578+extern struct ctl_table grsecurity_table[];
78579+
78580 static struct ctl_table kern_table[];
78581 static struct ctl_table vm_table[];
78582 static struct ctl_table fs_table[];
78583@@ -220,6 +219,20 @@ extern struct ctl_table epoll_table[];
78584 int sysctl_legacy_va_layout;
78585 #endif
78586
78587+#ifdef CONFIG_PAX_SOFTMODE
78588+static ctl_table pax_table[] = {
78589+ {
78590+ .procname = "softmode",
78591+ .data = &pax_softmode,
78592+ .maxlen = sizeof(unsigned int),
78593+ .mode = 0600,
78594+ .proc_handler = &proc_dointvec,
78595+ },
78596+
78597+ { }
78598+};
78599+#endif
78600+
78601 /* The default sysctl tables: */
78602
78603 static struct ctl_table sysctl_base_table[] = {
78604@@ -268,6 +281,22 @@ static int max_extfrag_threshold = 1000;
78605 #endif
78606
78607 static struct ctl_table kern_table[] = {
78608+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
78609+ {
78610+ .procname = "grsecurity",
78611+ .mode = 0500,
78612+ .child = grsecurity_table,
78613+ },
78614+#endif
78615+
78616+#ifdef CONFIG_PAX_SOFTMODE
78617+ {
78618+ .procname = "pax",
78619+ .mode = 0500,
78620+ .child = pax_table,
78621+ },
78622+#endif
78623+
78624 {
78625 .procname = "sched_child_runs_first",
78626 .data = &sysctl_sched_child_runs_first,
78627@@ -593,7 +622,7 @@ static struct ctl_table kern_table[] = {
78628 .data = &modprobe_path,
78629 .maxlen = KMOD_PATH_LEN,
78630 .mode = 0644,
78631- .proc_handler = proc_dostring,
78632+ .proc_handler = proc_dostring_modpriv,
78633 },
78634 {
78635 .procname = "modules_disabled",
78636@@ -760,16 +789,20 @@ static struct ctl_table kern_table[] = {
78637 .extra1 = &zero,
78638 .extra2 = &one,
78639 },
78640+#endif
78641 {
78642 .procname = "kptr_restrict",
78643 .data = &kptr_restrict,
78644 .maxlen = sizeof(int),
78645 .mode = 0644,
78646 .proc_handler = proc_dointvec_minmax_sysadmin,
78647+#ifdef CONFIG_GRKERNSEC_HIDESYM
78648+ .extra1 = &two,
78649+#else
78650 .extra1 = &zero,
78651+#endif
78652 .extra2 = &two,
78653 },
78654-#endif
78655 {
78656 .procname = "ngroups_max",
78657 .data = &ngroups_max,
78658@@ -1266,6 +1299,13 @@ static struct ctl_table vm_table[] = {
78659 .proc_handler = proc_dointvec_minmax,
78660 .extra1 = &zero,
78661 },
78662+ {
78663+ .procname = "heap_stack_gap",
78664+ .data = &sysctl_heap_stack_gap,
78665+ .maxlen = sizeof(sysctl_heap_stack_gap),
78666+ .mode = 0644,
78667+ .proc_handler = proc_doulongvec_minmax,
78668+ },
78669 #else
78670 {
78671 .procname = "nr_trim_pages",
78672@@ -1716,6 +1756,16 @@ int proc_dostring(struct ctl_table *table, int write,
78673 buffer, lenp, ppos);
78674 }
78675
78676+int proc_dostring_modpriv(struct ctl_table *table, int write,
78677+ void __user *buffer, size_t *lenp, loff_t *ppos)
78678+{
78679+ if (write && !capable(CAP_SYS_MODULE))
78680+ return -EPERM;
78681+
78682+ return _proc_do_string(table->data, table->maxlen, write,
78683+ buffer, lenp, ppos);
78684+}
78685+
78686 static size_t proc_skip_spaces(char **buf)
78687 {
78688 size_t ret;
78689@@ -1821,6 +1871,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
78690 len = strlen(tmp);
78691 if (len > *size)
78692 len = *size;
78693+ if (len > sizeof(tmp))
78694+ len = sizeof(tmp);
78695 if (copy_to_user(*buf, tmp, len))
78696 return -EFAULT;
78697 *size -= len;
78698@@ -1985,7 +2037,7 @@ int proc_dointvec(struct ctl_table *table, int write,
78699 static int proc_taint(struct ctl_table *table, int write,
78700 void __user *buffer, size_t *lenp, loff_t *ppos)
78701 {
78702- struct ctl_table t;
78703+ ctl_table_no_const t;
78704 unsigned long tmptaint = get_taint();
78705 int err;
78706
78707@@ -2013,7 +2065,6 @@ static int proc_taint(struct ctl_table *table, int write,
78708 return err;
78709 }
78710
78711-#ifdef CONFIG_PRINTK
78712 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
78713 void __user *buffer, size_t *lenp, loff_t *ppos)
78714 {
78715@@ -2022,7 +2073,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
78716
78717 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
78718 }
78719-#endif
78720
78721 struct do_proc_dointvec_minmax_conv_param {
78722 int *min;
78723@@ -2169,8 +2219,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
78724 *i = val;
78725 } else {
78726 val = convdiv * (*i) / convmul;
78727- if (!first)
78728+ if (!first) {
78729 err = proc_put_char(&buffer, &left, '\t');
78730+ if (err)
78731+ break;
78732+ }
78733 err = proc_put_long(&buffer, &left, val, false);
78734 if (err)
78735 break;
78736@@ -2562,6 +2615,12 @@ int proc_dostring(struct ctl_table *table, int write,
78737 return -ENOSYS;
78738 }
78739
78740+int proc_dostring_modpriv(struct ctl_table *table, int write,
78741+ void __user *buffer, size_t *lenp, loff_t *ppos)
78742+{
78743+ return -ENOSYS;
78744+}
78745+
78746 int proc_dointvec(struct ctl_table *table, int write,
78747 void __user *buffer, size_t *lenp, loff_t *ppos)
78748 {
78749@@ -2618,5 +2677,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
78750 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
78751 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
78752 EXPORT_SYMBOL(proc_dostring);
78753+EXPORT_SYMBOL(proc_dostring_modpriv);
78754 EXPORT_SYMBOL(proc_doulongvec_minmax);
78755 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
78756diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
78757index 0ddf3a0..a199f50 100644
78758--- a/kernel/sysctl_binary.c
78759+++ b/kernel/sysctl_binary.c
78760@@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
78761 int i;
78762
78763 set_fs(KERNEL_DS);
78764- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
78765+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
78766 set_fs(old_fs);
78767 if (result < 0)
78768 goto out_kfree;
78769@@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
78770 }
78771
78772 set_fs(KERNEL_DS);
78773- result = vfs_write(file, buffer, str - buffer, &pos);
78774+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
78775 set_fs(old_fs);
78776 if (result < 0)
78777 goto out_kfree;
78778@@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
78779 int i;
78780
78781 set_fs(KERNEL_DS);
78782- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
78783+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
78784 set_fs(old_fs);
78785 if (result < 0)
78786 goto out_kfree;
78787@@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
78788 }
78789
78790 set_fs(KERNEL_DS);
78791- result = vfs_write(file, buffer, str - buffer, &pos);
78792+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
78793 set_fs(old_fs);
78794 if (result < 0)
78795 goto out_kfree;
78796@@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
78797 int i;
78798
78799 set_fs(KERNEL_DS);
78800- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
78801+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
78802 set_fs(old_fs);
78803 if (result < 0)
78804 goto out;
78805@@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
78806 __le16 dnaddr;
78807
78808 set_fs(KERNEL_DS);
78809- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
78810+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
78811 set_fs(old_fs);
78812 if (result < 0)
78813 goto out;
78814@@ -1234,7 +1234,7 @@ static ssize_t bin_dn_node_address(struct file *file,
78815 le16_to_cpu(dnaddr) & 0x3ff);
78816
78817 set_fs(KERNEL_DS);
78818- result = vfs_write(file, buf, len, &pos);
78819+ result = vfs_write(file, (const char __force_user *)buf, len, &pos);
78820 set_fs(old_fs);
78821 if (result < 0)
78822 goto out;
78823diff --git a/kernel/taskstats.c b/kernel/taskstats.c
78824index 145bb4d..b2aa969 100644
78825--- a/kernel/taskstats.c
78826+++ b/kernel/taskstats.c
78827@@ -28,9 +28,12 @@
78828 #include <linux/fs.h>
78829 #include <linux/file.h>
78830 #include <linux/pid_namespace.h>
78831+#include <linux/grsecurity.h>
78832 #include <net/genetlink.h>
78833 #include <linux/atomic.h>
78834
78835+extern int gr_is_taskstats_denied(int pid);
78836+
78837 /*
78838 * Maximum length of a cpumask that can be specified in
78839 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
78840@@ -570,6 +573,9 @@ err:
78841
78842 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
78843 {
78844+ if (gr_is_taskstats_denied(current->pid))
78845+ return -EACCES;
78846+
78847 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
78848 return cmd_attr_register_cpumask(info);
78849 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
78850diff --git a/kernel/time.c b/kernel/time.c
78851index d226c6a..2f0d217 100644
78852--- a/kernel/time.c
78853+++ b/kernel/time.c
78854@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
78855 return error;
78856
78857 if (tz) {
78858+ /* we log in do_settimeofday called below, so don't log twice
78859+ */
78860+ if (!tv)
78861+ gr_log_timechange();
78862+
78863 sys_tz = *tz;
78864 update_vsyscall_tz();
78865 if (firsttime) {
78866@@ -493,7 +498,7 @@ EXPORT_SYMBOL(usecs_to_jiffies);
78867 * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
78868 * value to a scaled second value.
78869 */
78870-unsigned long
78871+unsigned long __intentional_overflow(-1)
78872 timespec_to_jiffies(const struct timespec *value)
78873 {
78874 unsigned long sec = value->tv_sec;
78875diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
78876index f11d83b..d016d91 100644
78877--- a/kernel/time/alarmtimer.c
78878+++ b/kernel/time/alarmtimer.c
78879@@ -750,7 +750,7 @@ static int __init alarmtimer_init(void)
78880 struct platform_device *pdev;
78881 int error = 0;
78882 int i;
78883- struct k_clock alarm_clock = {
78884+ static struct k_clock alarm_clock = {
78885 .clock_getres = alarm_clock_getres,
78886 .clock_get = alarm_clock_get,
78887 .timer_create = alarm_timer_create,
78888diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
78889index a13987a..36cd791 100644
78890--- a/kernel/time/tick-broadcast.c
78891+++ b/kernel/time/tick-broadcast.c
78892@@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
78893 * then clear the broadcast bit.
78894 */
78895 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
78896- int cpu = smp_processor_id();
78897+ cpu = smp_processor_id();
78898
78899 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
78900 tick_broadcast_clear_oneshot(cpu);
78901diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
78902index cbc6acb..3a77191 100644
78903--- a/kernel/time/timekeeping.c
78904+++ b/kernel/time/timekeeping.c
78905@@ -15,6 +15,7 @@
78906 #include <linux/init.h>
78907 #include <linux/mm.h>
78908 #include <linux/sched.h>
78909+#include <linux/grsecurity.h>
78910 #include <linux/syscore_ops.h>
78911 #include <linux/clocksource.h>
78912 #include <linux/jiffies.h>
78913@@ -412,6 +413,8 @@ int do_settimeofday(const struct timespec *tv)
78914 if (!timespec_valid_strict(tv))
78915 return -EINVAL;
78916
78917+ gr_log_timechange();
78918+
78919 write_seqlock_irqsave(&tk->lock, flags);
78920
78921 timekeeping_forward_now(tk);
78922diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
78923index af5a7e9..715611a 100644
78924--- a/kernel/time/timer_list.c
78925+++ b/kernel/time/timer_list.c
78926@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
78927
78928 static void print_name_offset(struct seq_file *m, void *sym)
78929 {
78930+#ifdef CONFIG_GRKERNSEC_HIDESYM
78931+ SEQ_printf(m, "<%p>", NULL);
78932+#else
78933 char symname[KSYM_NAME_LEN];
78934
78935 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
78936 SEQ_printf(m, "<%pK>", sym);
78937 else
78938 SEQ_printf(m, "%s", symname);
78939+#endif
78940 }
78941
78942 static void
78943@@ -112,7 +116,11 @@ next_one:
78944 static void
78945 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
78946 {
78947+#ifdef CONFIG_GRKERNSEC_HIDESYM
78948+ SEQ_printf(m, " .base: %p\n", NULL);
78949+#else
78950 SEQ_printf(m, " .base: %pK\n", base);
78951+#endif
78952 SEQ_printf(m, " .index: %d\n",
78953 base->index);
78954 SEQ_printf(m, " .resolution: %Lu nsecs\n",
78955@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
78956 {
78957 struct proc_dir_entry *pe;
78958
78959+#ifdef CONFIG_GRKERNSEC_PROC_ADD
78960+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
78961+#else
78962 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
78963+#endif
78964 if (!pe)
78965 return -ENOMEM;
78966 return 0;
78967diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
78968index 0b537f2..40d6c20 100644
78969--- a/kernel/time/timer_stats.c
78970+++ b/kernel/time/timer_stats.c
78971@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
78972 static unsigned long nr_entries;
78973 static struct entry entries[MAX_ENTRIES];
78974
78975-static atomic_t overflow_count;
78976+static atomic_unchecked_t overflow_count;
78977
78978 /*
78979 * The entries are in a hash-table, for fast lookup:
78980@@ -140,7 +140,7 @@ static void reset_entries(void)
78981 nr_entries = 0;
78982 memset(entries, 0, sizeof(entries));
78983 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
78984- atomic_set(&overflow_count, 0);
78985+ atomic_set_unchecked(&overflow_count, 0);
78986 }
78987
78988 static struct entry *alloc_entry(void)
78989@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
78990 if (likely(entry))
78991 entry->count++;
78992 else
78993- atomic_inc(&overflow_count);
78994+ atomic_inc_unchecked(&overflow_count);
78995
78996 out_unlock:
78997 raw_spin_unlock_irqrestore(lock, flags);
78998@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
78999
79000 static void print_name_offset(struct seq_file *m, unsigned long addr)
79001 {
79002+#ifdef CONFIG_GRKERNSEC_HIDESYM
79003+ seq_printf(m, "<%p>", NULL);
79004+#else
79005 char symname[KSYM_NAME_LEN];
79006
79007 if (lookup_symbol_name(addr, symname) < 0)
79008- seq_printf(m, "<%p>", (void *)addr);
79009+ seq_printf(m, "<%pK>", (void *)addr);
79010 else
79011 seq_printf(m, "%s", symname);
79012+#endif
79013 }
79014
79015 static int tstats_show(struct seq_file *m, void *v)
79016@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
79017
79018 seq_puts(m, "Timer Stats Version: v0.2\n");
79019 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
79020- if (atomic_read(&overflow_count))
79021+ if (atomic_read_unchecked(&overflow_count))
79022 seq_printf(m, "Overflow: %d entries\n",
79023- atomic_read(&overflow_count));
79024+ atomic_read_unchecked(&overflow_count));
79025
79026 for (i = 0; i < nr_entries; i++) {
79027 entry = entries + i;
79028@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
79029 {
79030 struct proc_dir_entry *pe;
79031
79032+#ifdef CONFIG_GRKERNSEC_PROC_ADD
79033+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
79034+#else
79035 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
79036+#endif
79037 if (!pe)
79038 return -ENOMEM;
79039 return 0;
79040diff --git a/kernel/timer.c b/kernel/timer.c
79041index 367d008..5dee98f 100644
79042--- a/kernel/timer.c
79043+++ b/kernel/timer.c
79044@@ -1363,7 +1363,7 @@ void update_process_times(int user_tick)
79045 /*
79046 * This function runs timers and the timer-tq in bottom half context.
79047 */
79048-static void run_timer_softirq(struct softirq_action *h)
79049+static void run_timer_softirq(void)
79050 {
79051 struct tvec_base *base = __this_cpu_read(tvec_bases);
79052
79053@@ -1481,7 +1481,7 @@ static void process_timeout(unsigned long __data)
79054 *
79055 * In all cases the return value is guaranteed to be non-negative.
79056 */
79057-signed long __sched schedule_timeout(signed long timeout)
79058+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
79059 {
79060 struct timer_list timer;
79061 unsigned long expire;
79062@@ -1772,7 +1772,7 @@ static int __cpuinit timer_cpu_notify(struct notifier_block *self,
79063 return NOTIFY_OK;
79064 }
79065
79066-static struct notifier_block __cpuinitdata timers_nb = {
79067+static struct notifier_block timers_nb = {
79068 .notifier_call = timer_cpu_notify,
79069 };
79070
79071diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
79072index c0bd030..62a1927 100644
79073--- a/kernel/trace/blktrace.c
79074+++ b/kernel/trace/blktrace.c
79075@@ -317,7 +317,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
79076 struct blk_trace *bt = filp->private_data;
79077 char buf[16];
79078
79079- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
79080+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
79081
79082 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
79083 }
79084@@ -375,7 +375,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
79085 return 1;
79086
79087 bt = buf->chan->private_data;
79088- atomic_inc(&bt->dropped);
79089+ atomic_inc_unchecked(&bt->dropped);
79090 return 0;
79091 }
79092
79093@@ -476,7 +476,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
79094
79095 bt->dir = dir;
79096 bt->dev = dev;
79097- atomic_set(&bt->dropped, 0);
79098+ atomic_set_unchecked(&bt->dropped, 0);
79099
79100 ret = -EIO;
79101 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
79102diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
79103index b27052c..0e1af95 100644
79104--- a/kernel/trace/ftrace.c
79105+++ b/kernel/trace/ftrace.c
79106@@ -1874,12 +1874,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
79107 if (unlikely(ftrace_disabled))
79108 return 0;
79109
79110+ ret = ftrace_arch_code_modify_prepare();
79111+ FTRACE_WARN_ON(ret);
79112+ if (ret)
79113+ return 0;
79114+
79115 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
79116+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
79117 if (ret) {
79118 ftrace_bug(ret, ip);
79119- return 0;
79120 }
79121- return 1;
79122+ return ret ? 0 : 1;
79123 }
79124
79125 /*
79126@@ -2965,7 +2970,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
79127
79128 int
79129 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
79130- void *data)
79131+ void *data)
79132 {
79133 struct ftrace_func_probe *entry;
79134 struct ftrace_page *pg;
79135@@ -3832,8 +3837,10 @@ static int ftrace_process_locs(struct module *mod,
79136 if (!count)
79137 return 0;
79138
79139+ pax_open_kernel();
79140 sort(start, count, sizeof(*start),
79141 ftrace_cmp_ips, ftrace_swap_ips);
79142+ pax_close_kernel();
79143
79144 start_pg = ftrace_allocate_pages(count);
79145 if (!start_pg)
79146@@ -4559,8 +4566,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
79147 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
79148
79149 static int ftrace_graph_active;
79150-static struct notifier_block ftrace_suspend_notifier;
79151-
79152 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
79153 {
79154 return 0;
79155@@ -4704,6 +4709,10 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
79156 return NOTIFY_DONE;
79157 }
79158
79159+static struct notifier_block ftrace_suspend_notifier = {
79160+ .notifier_call = ftrace_suspend_notifier_call
79161+};
79162+
79163 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
79164 trace_func_graph_ent_t entryfunc)
79165 {
79166@@ -4717,7 +4726,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
79167 goto out;
79168 }
79169
79170- ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
79171 register_pm_notifier(&ftrace_suspend_notifier);
79172
79173 ftrace_graph_active++;
79174diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
79175index ce8514f..8233573 100644
79176--- a/kernel/trace/ring_buffer.c
79177+++ b/kernel/trace/ring_buffer.c
79178@@ -346,9 +346,9 @@ struct buffer_data_page {
79179 */
79180 struct buffer_page {
79181 struct list_head list; /* list of buffer pages */
79182- local_t write; /* index for next write */
79183+ local_unchecked_t write; /* index for next write */
79184 unsigned read; /* index for next read */
79185- local_t entries; /* entries on this page */
79186+ local_unchecked_t entries; /* entries on this page */
79187 unsigned long real_end; /* real end of data */
79188 struct buffer_data_page *page; /* Actual data page */
79189 };
79190@@ -461,8 +461,8 @@ struct ring_buffer_per_cpu {
79191 unsigned long last_overrun;
79192 local_t entries_bytes;
79193 local_t entries;
79194- local_t overrun;
79195- local_t commit_overrun;
79196+ local_unchecked_t overrun;
79197+ local_unchecked_t commit_overrun;
79198 local_t dropped_events;
79199 local_t committing;
79200 local_t commits;
79201@@ -861,8 +861,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
79202 *
79203 * We add a counter to the write field to denote this.
79204 */
79205- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
79206- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
79207+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
79208+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
79209
79210 /*
79211 * Just make sure we have seen our old_write and synchronize
79212@@ -890,8 +890,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
79213 * cmpxchg to only update if an interrupt did not already
79214 * do it for us. If the cmpxchg fails, we don't care.
79215 */
79216- (void)local_cmpxchg(&next_page->write, old_write, val);
79217- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
79218+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
79219+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
79220
79221 /*
79222 * No need to worry about races with clearing out the commit.
79223@@ -1250,12 +1250,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
79224
79225 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
79226 {
79227- return local_read(&bpage->entries) & RB_WRITE_MASK;
79228+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
79229 }
79230
79231 static inline unsigned long rb_page_write(struct buffer_page *bpage)
79232 {
79233- return local_read(&bpage->write) & RB_WRITE_MASK;
79234+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
79235 }
79236
79237 static int
79238@@ -1350,7 +1350,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
79239 * bytes consumed in ring buffer from here.
79240 * Increment overrun to account for the lost events.
79241 */
79242- local_add(page_entries, &cpu_buffer->overrun);
79243+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
79244 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
79245 }
79246
79247@@ -1906,7 +1906,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
79248 * it is our responsibility to update
79249 * the counters.
79250 */
79251- local_add(entries, &cpu_buffer->overrun);
79252+ local_add_unchecked(entries, &cpu_buffer->overrun);
79253 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
79254
79255 /*
79256@@ -2056,7 +2056,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
79257 if (tail == BUF_PAGE_SIZE)
79258 tail_page->real_end = 0;
79259
79260- local_sub(length, &tail_page->write);
79261+ local_sub_unchecked(length, &tail_page->write);
79262 return;
79263 }
79264
79265@@ -2091,7 +2091,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
79266 rb_event_set_padding(event);
79267
79268 /* Set the write back to the previous setting */
79269- local_sub(length, &tail_page->write);
79270+ local_sub_unchecked(length, &tail_page->write);
79271 return;
79272 }
79273
79274@@ -2103,7 +2103,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
79275
79276 /* Set write to end of buffer */
79277 length = (tail + length) - BUF_PAGE_SIZE;
79278- local_sub(length, &tail_page->write);
79279+ local_sub_unchecked(length, &tail_page->write);
79280 }
79281
79282 /*
79283@@ -2129,7 +2129,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
79284 * about it.
79285 */
79286 if (unlikely(next_page == commit_page)) {
79287- local_inc(&cpu_buffer->commit_overrun);
79288+ local_inc_unchecked(&cpu_buffer->commit_overrun);
79289 goto out_reset;
79290 }
79291
79292@@ -2185,7 +2185,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
79293 cpu_buffer->tail_page) &&
79294 (cpu_buffer->commit_page ==
79295 cpu_buffer->reader_page))) {
79296- local_inc(&cpu_buffer->commit_overrun);
79297+ local_inc_unchecked(&cpu_buffer->commit_overrun);
79298 goto out_reset;
79299 }
79300 }
79301@@ -2233,7 +2233,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
79302 length += RB_LEN_TIME_EXTEND;
79303
79304 tail_page = cpu_buffer->tail_page;
79305- write = local_add_return(length, &tail_page->write);
79306+ write = local_add_return_unchecked(length, &tail_page->write);
79307
79308 /* set write to only the index of the write */
79309 write &= RB_WRITE_MASK;
79310@@ -2250,7 +2250,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
79311 kmemcheck_annotate_bitfield(event, bitfield);
79312 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
79313
79314- local_inc(&tail_page->entries);
79315+ local_inc_unchecked(&tail_page->entries);
79316
79317 /*
79318 * If this is the first commit on the page, then update
79319@@ -2283,7 +2283,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
79320
79321 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
79322 unsigned long write_mask =
79323- local_read(&bpage->write) & ~RB_WRITE_MASK;
79324+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
79325 unsigned long event_length = rb_event_length(event);
79326 /*
79327 * This is on the tail page. It is possible that
79328@@ -2293,7 +2293,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
79329 */
79330 old_index += write_mask;
79331 new_index += write_mask;
79332- index = local_cmpxchg(&bpage->write, old_index, new_index);
79333+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
79334 if (index == old_index) {
79335 /* update counters */
79336 local_sub(event_length, &cpu_buffer->entries_bytes);
79337@@ -2632,7 +2632,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
79338
79339 /* Do the likely case first */
79340 if (likely(bpage->page == (void *)addr)) {
79341- local_dec(&bpage->entries);
79342+ local_dec_unchecked(&bpage->entries);
79343 return;
79344 }
79345
79346@@ -2644,7 +2644,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
79347 start = bpage;
79348 do {
79349 if (bpage->page == (void *)addr) {
79350- local_dec(&bpage->entries);
79351+ local_dec_unchecked(&bpage->entries);
79352 return;
79353 }
79354 rb_inc_page(cpu_buffer, &bpage);
79355@@ -2926,7 +2926,7 @@ static inline unsigned long
79356 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
79357 {
79358 return local_read(&cpu_buffer->entries) -
79359- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
79360+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
79361 }
79362
79363 /**
79364@@ -3015,7 +3015,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
79365 return 0;
79366
79367 cpu_buffer = buffer->buffers[cpu];
79368- ret = local_read(&cpu_buffer->overrun);
79369+ ret = local_read_unchecked(&cpu_buffer->overrun);
79370
79371 return ret;
79372 }
79373@@ -3038,7 +3038,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
79374 return 0;
79375
79376 cpu_buffer = buffer->buffers[cpu];
79377- ret = local_read(&cpu_buffer->commit_overrun);
79378+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
79379
79380 return ret;
79381 }
79382@@ -3105,7 +3105,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
79383 /* if you care about this being correct, lock the buffer */
79384 for_each_buffer_cpu(buffer, cpu) {
79385 cpu_buffer = buffer->buffers[cpu];
79386- overruns += local_read(&cpu_buffer->overrun);
79387+ overruns += local_read_unchecked(&cpu_buffer->overrun);
79388 }
79389
79390 return overruns;
79391@@ -3281,8 +3281,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
79392 /*
79393 * Reset the reader page to size zero.
79394 */
79395- local_set(&cpu_buffer->reader_page->write, 0);
79396- local_set(&cpu_buffer->reader_page->entries, 0);
79397+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
79398+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
79399 local_set(&cpu_buffer->reader_page->page->commit, 0);
79400 cpu_buffer->reader_page->real_end = 0;
79401
79402@@ -3316,7 +3316,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
79403 * want to compare with the last_overrun.
79404 */
79405 smp_mb();
79406- overwrite = local_read(&(cpu_buffer->overrun));
79407+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
79408
79409 /*
79410 * Here's the tricky part.
79411@@ -3886,8 +3886,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
79412
79413 cpu_buffer->head_page
79414 = list_entry(cpu_buffer->pages, struct buffer_page, list);
79415- local_set(&cpu_buffer->head_page->write, 0);
79416- local_set(&cpu_buffer->head_page->entries, 0);
79417+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
79418+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
79419 local_set(&cpu_buffer->head_page->page->commit, 0);
79420
79421 cpu_buffer->head_page->read = 0;
79422@@ -3897,14 +3897,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
79423
79424 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
79425 INIT_LIST_HEAD(&cpu_buffer->new_pages);
79426- local_set(&cpu_buffer->reader_page->write, 0);
79427- local_set(&cpu_buffer->reader_page->entries, 0);
79428+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
79429+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
79430 local_set(&cpu_buffer->reader_page->page->commit, 0);
79431 cpu_buffer->reader_page->read = 0;
79432
79433 local_set(&cpu_buffer->entries_bytes, 0);
79434- local_set(&cpu_buffer->overrun, 0);
79435- local_set(&cpu_buffer->commit_overrun, 0);
79436+ local_set_unchecked(&cpu_buffer->overrun, 0);
79437+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
79438 local_set(&cpu_buffer->dropped_events, 0);
79439 local_set(&cpu_buffer->entries, 0);
79440 local_set(&cpu_buffer->committing, 0);
79441@@ -4308,8 +4308,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
79442 rb_init_page(bpage);
79443 bpage = reader->page;
79444 reader->page = *data_page;
79445- local_set(&reader->write, 0);
79446- local_set(&reader->entries, 0);
79447+ local_set_unchecked(&reader->write, 0);
79448+ local_set_unchecked(&reader->entries, 0);
79449 reader->read = 0;
79450 *data_page = bpage;
79451
79452diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
79453index fe1d581..43a0f38 100644
79454--- a/kernel/trace/trace.c
79455+++ b/kernel/trace/trace.c
79456@@ -4494,10 +4494,9 @@ static const struct file_operations tracing_dyn_info_fops = {
79457 };
79458 #endif
79459
79460-static struct dentry *d_tracer;
79461-
79462 struct dentry *tracing_init_dentry(void)
79463 {
79464+ static struct dentry *d_tracer;
79465 static int once;
79466
79467 if (d_tracer)
79468@@ -4517,10 +4516,9 @@ struct dentry *tracing_init_dentry(void)
79469 return d_tracer;
79470 }
79471
79472-static struct dentry *d_percpu;
79473-
79474 struct dentry *tracing_dentry_percpu(void)
79475 {
79476+ static struct dentry *d_percpu;
79477 static int once;
79478 struct dentry *d_tracer;
79479
79480diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
79481index 880073d..42db7c3 100644
79482--- a/kernel/trace/trace_events.c
79483+++ b/kernel/trace/trace_events.c
79484@@ -1330,10 +1330,6 @@ static LIST_HEAD(ftrace_module_file_list);
79485 struct ftrace_module_file_ops {
79486 struct list_head list;
79487 struct module *mod;
79488- struct file_operations id;
79489- struct file_operations enable;
79490- struct file_operations format;
79491- struct file_operations filter;
79492 };
79493
79494 static struct ftrace_module_file_ops *
79495@@ -1354,17 +1350,12 @@ trace_create_file_ops(struct module *mod)
79496
79497 file_ops->mod = mod;
79498
79499- file_ops->id = ftrace_event_id_fops;
79500- file_ops->id.owner = mod;
79501-
79502- file_ops->enable = ftrace_enable_fops;
79503- file_ops->enable.owner = mod;
79504-
79505- file_ops->filter = ftrace_event_filter_fops;
79506- file_ops->filter.owner = mod;
79507-
79508- file_ops->format = ftrace_event_format_fops;
79509- file_ops->format.owner = mod;
79510+ pax_open_kernel();
79511+ mod->trace_id.owner = mod;
79512+ mod->trace_enable.owner = mod;
79513+ mod->trace_filter.owner = mod;
79514+ mod->trace_format.owner = mod;
79515+ pax_close_kernel();
79516
79517 list_add(&file_ops->list, &ftrace_module_file_list);
79518
79519@@ -1388,8 +1379,8 @@ static void trace_module_add_events(struct module *mod)
79520
79521 for_each_event(call, start, end) {
79522 __trace_add_event_call(*call, mod,
79523- &file_ops->id, &file_ops->enable,
79524- &file_ops->filter, &file_ops->format);
79525+ &mod->trace_id, &mod->trace_enable,
79526+ &mod->trace_filter, &mod->trace_format);
79527 }
79528 }
79529
79530diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
79531index fd3c8aa..5f324a6 100644
79532--- a/kernel/trace/trace_mmiotrace.c
79533+++ b/kernel/trace/trace_mmiotrace.c
79534@@ -24,7 +24,7 @@ struct header_iter {
79535 static struct trace_array *mmio_trace_array;
79536 static bool overrun_detected;
79537 static unsigned long prev_overruns;
79538-static atomic_t dropped_count;
79539+static atomic_unchecked_t dropped_count;
79540
79541 static void mmio_reset_data(struct trace_array *tr)
79542 {
79543@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
79544
79545 static unsigned long count_overruns(struct trace_iterator *iter)
79546 {
79547- unsigned long cnt = atomic_xchg(&dropped_count, 0);
79548+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
79549 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
79550
79551 if (over > prev_overruns)
79552@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
79553 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
79554 sizeof(*entry), 0, pc);
79555 if (!event) {
79556- atomic_inc(&dropped_count);
79557+ atomic_inc_unchecked(&dropped_count);
79558 return;
79559 }
79560 entry = ring_buffer_event_data(event);
79561@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
79562 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
79563 sizeof(*entry), 0, pc);
79564 if (!event) {
79565- atomic_inc(&dropped_count);
79566+ atomic_inc_unchecked(&dropped_count);
79567 return;
79568 }
79569 entry = ring_buffer_event_data(event);
79570diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
79571index 194d796..76edb8f 100644
79572--- a/kernel/trace/trace_output.c
79573+++ b/kernel/trace/trace_output.c
79574@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
79575
79576 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
79577 if (!IS_ERR(p)) {
79578- p = mangle_path(s->buffer + s->len, p, "\n");
79579+ p = mangle_path(s->buffer + s->len, p, "\n\\");
79580 if (p) {
79581 s->len = p - s->buffer;
79582 return 1;
79583@@ -852,14 +852,16 @@ int register_ftrace_event(struct trace_event *event)
79584 goto out;
79585 }
79586
79587+ pax_open_kernel();
79588 if (event->funcs->trace == NULL)
79589- event->funcs->trace = trace_nop_print;
79590+ *(void **)&event->funcs->trace = trace_nop_print;
79591 if (event->funcs->raw == NULL)
79592- event->funcs->raw = trace_nop_print;
79593+ *(void **)&event->funcs->raw = trace_nop_print;
79594 if (event->funcs->hex == NULL)
79595- event->funcs->hex = trace_nop_print;
79596+ *(void **)&event->funcs->hex = trace_nop_print;
79597 if (event->funcs->binary == NULL)
79598- event->funcs->binary = trace_nop_print;
79599+ *(void **)&event->funcs->binary = trace_nop_print;
79600+ pax_close_kernel();
79601
79602 key = event->type & (EVENT_HASHSIZE - 1);
79603
79604diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
79605index 42ca822..cdcacc6 100644
79606--- a/kernel/trace/trace_stack.c
79607+++ b/kernel/trace/trace_stack.c
79608@@ -52,7 +52,7 @@ static inline void check_stack(void)
79609 return;
79610
79611 /* we do not handle interrupt stacks yet */
79612- if (!object_is_on_stack(&this_size))
79613+ if (!object_starts_on_stack(&this_size))
79614 return;
79615
79616 local_irq_save(flags);
79617diff --git a/kernel/user.c b/kernel/user.c
79618index 7f6ff2b..1ac8f18 100644
79619--- a/kernel/user.c
79620+++ b/kernel/user.c
79621@@ -47,9 +47,7 @@ struct user_namespace init_user_ns = {
79622 .count = 4294967295U,
79623 },
79624 },
79625- .kref = {
79626- .refcount = ATOMIC_INIT(3),
79627- },
79628+ .count = ATOMIC_INIT(3),
79629 .owner = GLOBAL_ROOT_UID,
79630 .group = GLOBAL_ROOT_GID,
79631 .proc_inum = PROC_USER_INIT_INO,
79632diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
79633index f45e128..a5a5fb6 100644
79634--- a/kernel/user_namespace.c
79635+++ b/kernel/user_namespace.c
79636@@ -88,7 +88,7 @@ int create_user_ns(struct cred *new)
79637 return ret;
79638 }
79639
79640- kref_init(&ns->kref);
79641+ atomic_set(&ns->count, 1);
79642 /* Leave the new->user_ns reference with the new user namespace. */
79643 ns->parent = parent_ns;
79644 ns->owner = owner;
79645@@ -116,15 +116,16 @@ int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
79646 return create_user_ns(cred);
79647 }
79648
79649-void free_user_ns(struct kref *kref)
79650+void free_user_ns(struct user_namespace *ns)
79651 {
79652- struct user_namespace *parent, *ns =
79653- container_of(kref, struct user_namespace, kref);
79654+ struct user_namespace *parent;
79655
79656- parent = ns->parent;
79657- proc_free_inum(ns->proc_inum);
79658- kmem_cache_free(user_ns_cachep, ns);
79659- put_user_ns(parent);
79660+ do {
79661+ parent = ns->parent;
79662+ proc_free_inum(ns->proc_inum);
79663+ kmem_cache_free(user_ns_cachep, ns);
79664+ ns = parent;
79665+ } while (atomic_dec_and_test(&parent->count));
79666 }
79667 EXPORT_SYMBOL(free_user_ns);
79668
79669@@ -815,7 +816,7 @@ static int userns_install(struct nsproxy *nsproxy, void *ns)
79670 if (atomic_read(&current->mm->mm_users) > 1)
79671 return -EINVAL;
79672
79673- if (current->fs->users != 1)
79674+ if (atomic_read(&current->fs->users) != 1)
79675 return -EINVAL;
79676
79677 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
79678diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
79679index 63da38c..639904e 100644
79680--- a/kernel/utsname_sysctl.c
79681+++ b/kernel/utsname_sysctl.c
79682@@ -46,7 +46,7 @@ static void put_uts(ctl_table *table, int write, void *which)
79683 static int proc_do_uts_string(ctl_table *table, int write,
79684 void __user *buffer, size_t *lenp, loff_t *ppos)
79685 {
79686- struct ctl_table uts_table;
79687+ ctl_table_no_const uts_table;
79688 int r;
79689 memcpy(&uts_table, table, sizeof(uts_table));
79690 uts_table.data = get_uts(table, write);
79691diff --git a/kernel/watchdog.c b/kernel/watchdog.c
79692index 75a2ab3..5961da7 100644
79693--- a/kernel/watchdog.c
79694+++ b/kernel/watchdog.c
79695@@ -527,7 +527,7 @@ int proc_dowatchdog(struct ctl_table *table, int write,
79696 }
79697 #endif /* CONFIG_SYSCTL */
79698
79699-static struct smp_hotplug_thread watchdog_threads = {
79700+static struct smp_hotplug_thread watchdog_threads __read_only = {
79701 .store = &softlockup_watchdog,
79702 .thread_should_run = watchdog_should_run,
79703 .thread_fn = watchdog,
79704diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
79705index 67604e5..fe94fb1 100644
79706--- a/lib/Kconfig.debug
79707+++ b/lib/Kconfig.debug
79708@@ -550,7 +550,7 @@ config DEBUG_MUTEXES
79709
79710 config DEBUG_LOCK_ALLOC
79711 bool "Lock debugging: detect incorrect freeing of live locks"
79712- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
79713+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
79714 select DEBUG_SPINLOCK
79715 select DEBUG_MUTEXES
79716 select LOCKDEP
79717@@ -564,7 +564,7 @@ config DEBUG_LOCK_ALLOC
79718
79719 config PROVE_LOCKING
79720 bool "Lock debugging: prove locking correctness"
79721- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
79722+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
79723 select LOCKDEP
79724 select DEBUG_SPINLOCK
79725 select DEBUG_MUTEXES
79726@@ -670,7 +670,7 @@ config LOCKDEP
79727
79728 config LOCK_STAT
79729 bool "Lock usage statistics"
79730- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
79731+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
79732 select LOCKDEP
79733 select DEBUG_SPINLOCK
79734 select DEBUG_MUTEXES
79735@@ -1278,6 +1278,7 @@ config LATENCYTOP
79736 depends on DEBUG_KERNEL
79737 depends on STACKTRACE_SUPPORT
79738 depends on PROC_FS
79739+ depends on !GRKERNSEC_HIDESYM
79740 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
79741 select KALLSYMS
79742 select KALLSYMS_ALL
79743@@ -1306,7 +1307,7 @@ config INTERVAL_TREE_TEST
79744
79745 config PROVIDE_OHCI1394_DMA_INIT
79746 bool "Remote debugging over FireWire early on boot"
79747- depends on PCI && X86
79748+ depends on PCI && X86 && !GRKERNSEC
79749 help
79750 If you want to debug problems which hang or crash the kernel early
79751 on boot and the crashing machine has a FireWire port, you can use
79752@@ -1335,7 +1336,7 @@ config PROVIDE_OHCI1394_DMA_INIT
79753
79754 config FIREWIRE_OHCI_REMOTE_DMA
79755 bool "Remote debugging over FireWire with firewire-ohci"
79756- depends on FIREWIRE_OHCI
79757+ depends on FIREWIRE_OHCI && !GRKERNSEC
79758 help
79759 This option lets you use the FireWire bus for remote debugging
79760 with help of the firewire-ohci driver. It enables unfiltered
79761diff --git a/lib/Makefile b/lib/Makefile
79762index 02ed6c0..bd243da 100644
79763--- a/lib/Makefile
79764+++ b/lib/Makefile
79765@@ -47,7 +47,7 @@ obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
79766
79767 obj-$(CONFIG_BTREE) += btree.o
79768 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
79769-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
79770+obj-y += list_debug.o
79771 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
79772
79773 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
79774diff --git a/lib/bitmap.c b/lib/bitmap.c
79775index 06f7e4f..f3cf2b0 100644
79776--- a/lib/bitmap.c
79777+++ b/lib/bitmap.c
79778@@ -422,7 +422,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
79779 {
79780 int c, old_c, totaldigits, ndigits, nchunks, nbits;
79781 u32 chunk;
79782- const char __user __force *ubuf = (const char __user __force *)buf;
79783+ const char __user *ubuf = (const char __force_user *)buf;
79784
79785 bitmap_zero(maskp, nmaskbits);
79786
79787@@ -507,7 +507,7 @@ int bitmap_parse_user(const char __user *ubuf,
79788 {
79789 if (!access_ok(VERIFY_READ, ubuf, ulen))
79790 return -EFAULT;
79791- return __bitmap_parse((const char __force *)ubuf,
79792+ return __bitmap_parse((const char __force_kernel *)ubuf,
79793 ulen, 1, maskp, nmaskbits);
79794
79795 }
79796@@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
79797 {
79798 unsigned a, b;
79799 int c, old_c, totaldigits;
79800- const char __user __force *ubuf = (const char __user __force *)buf;
79801+ const char __user *ubuf = (const char __force_user *)buf;
79802 int exp_digit, in_range;
79803
79804 totaldigits = c = 0;
79805@@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
79806 {
79807 if (!access_ok(VERIFY_READ, ubuf, ulen))
79808 return -EFAULT;
79809- return __bitmap_parselist((const char __force *)ubuf,
79810+ return __bitmap_parselist((const char __force_kernel *)ubuf,
79811 ulen, 1, maskp, nmaskbits);
79812 }
79813 EXPORT_SYMBOL(bitmap_parselist_user);
79814diff --git a/lib/bug.c b/lib/bug.c
79815index d0cdf14..4d07bd2 100644
79816--- a/lib/bug.c
79817+++ b/lib/bug.c
79818@@ -134,6 +134,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
79819 return BUG_TRAP_TYPE_NONE;
79820
79821 bug = find_bug(bugaddr);
79822+ if (!bug)
79823+ return BUG_TRAP_TYPE_NONE;
79824
79825 file = NULL;
79826 line = 0;
79827diff --git a/lib/debugobjects.c b/lib/debugobjects.c
79828index d11808c..dc2d6f8 100644
79829--- a/lib/debugobjects.c
79830+++ b/lib/debugobjects.c
79831@@ -287,7 +287,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
79832 if (limit > 4)
79833 return;
79834
79835- is_on_stack = object_is_on_stack(addr);
79836+ is_on_stack = object_starts_on_stack(addr);
79837 if (is_on_stack == onstack)
79838 return;
79839
79840diff --git a/lib/devres.c b/lib/devres.c
79841index 80b9c76..9e32279 100644
79842--- a/lib/devres.c
79843+++ b/lib/devres.c
79844@@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
79845 void devm_iounmap(struct device *dev, void __iomem *addr)
79846 {
79847 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
79848- (void *)addr));
79849+ (void __force *)addr));
79850 iounmap(addr);
79851 }
79852 EXPORT_SYMBOL(devm_iounmap);
79853@@ -192,7 +192,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
79854 {
79855 ioport_unmap(addr);
79856 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
79857- devm_ioport_map_match, (void *)addr));
79858+ devm_ioport_map_match, (void __force *)addr));
79859 }
79860 EXPORT_SYMBOL(devm_ioport_unmap);
79861
79862diff --git a/lib/div64.c b/lib/div64.c
79863index a163b6c..9618fa5 100644
79864--- a/lib/div64.c
79865+++ b/lib/div64.c
79866@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
79867 EXPORT_SYMBOL(__div64_32);
79868
79869 #ifndef div_s64_rem
79870-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
79871+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
79872 {
79873 u64 quotient;
79874
79875@@ -90,7 +90,7 @@ EXPORT_SYMBOL(div_s64_rem);
79876 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
79877 */
79878 #ifndef div64_u64
79879-u64 div64_u64(u64 dividend, u64 divisor)
79880+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
79881 {
79882 u32 high = divisor >> 32;
79883 u64 quot;
79884diff --git a/lib/dma-debug.c b/lib/dma-debug.c
79885index 5e396ac..58d5de1 100644
79886--- a/lib/dma-debug.c
79887+++ b/lib/dma-debug.c
79888@@ -768,7 +768,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
79889
79890 void dma_debug_add_bus(struct bus_type *bus)
79891 {
79892- struct notifier_block *nb;
79893+ notifier_block_no_const *nb;
79894
79895 if (global_disable)
79896 return;
79897@@ -942,7 +942,7 @@ out:
79898
79899 static void check_for_stack(struct device *dev, void *addr)
79900 {
79901- if (object_is_on_stack(addr))
79902+ if (object_starts_on_stack(addr))
79903 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
79904 "stack [addr=%p]\n", addr);
79905 }
79906diff --git a/lib/inflate.c b/lib/inflate.c
79907index 013a761..c28f3fc 100644
79908--- a/lib/inflate.c
79909+++ b/lib/inflate.c
79910@@ -269,7 +269,7 @@ static void free(void *where)
79911 malloc_ptr = free_mem_ptr;
79912 }
79913 #else
79914-#define malloc(a) kmalloc(a, GFP_KERNEL)
79915+#define malloc(a) kmalloc((a), GFP_KERNEL)
79916 #define free(a) kfree(a)
79917 #endif
79918
79919diff --git a/lib/ioremap.c b/lib/ioremap.c
79920index 0c9216c..863bd89 100644
79921--- a/lib/ioremap.c
79922+++ b/lib/ioremap.c
79923@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
79924 unsigned long next;
79925
79926 phys_addr -= addr;
79927- pmd = pmd_alloc(&init_mm, pud, addr);
79928+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
79929 if (!pmd)
79930 return -ENOMEM;
79931 do {
79932@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
79933 unsigned long next;
79934
79935 phys_addr -= addr;
79936- pud = pud_alloc(&init_mm, pgd, addr);
79937+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
79938 if (!pud)
79939 return -ENOMEM;
79940 do {
79941diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
79942index bd2bea9..6b3c95e 100644
79943--- a/lib/is_single_threaded.c
79944+++ b/lib/is_single_threaded.c
79945@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
79946 struct task_struct *p, *t;
79947 bool ret;
79948
79949+ if (!mm)
79950+ return true;
79951+
79952 if (atomic_read(&task->signal->live) != 1)
79953 return false;
79954
79955diff --git a/lib/kobject.c b/lib/kobject.c
79956index e07ee1f..998489d 100644
79957--- a/lib/kobject.c
79958+++ b/lib/kobject.c
79959@@ -852,9 +852,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
79960
79961
79962 static DEFINE_SPINLOCK(kobj_ns_type_lock);
79963-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
79964+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
79965
79966-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
79967+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
79968 {
79969 enum kobj_ns_type type = ops->type;
79970 int error;
79971diff --git a/lib/list_debug.c b/lib/list_debug.c
79972index c24c2f7..06e070b 100644
79973--- a/lib/list_debug.c
79974+++ b/lib/list_debug.c
79975@@ -11,7 +11,9 @@
79976 #include <linux/bug.h>
79977 #include <linux/kernel.h>
79978 #include <linux/rculist.h>
79979+#include <linux/mm.h>
79980
79981+#ifdef CONFIG_DEBUG_LIST
79982 /*
79983 * Insert a new entry between two known consecutive entries.
79984 *
79985@@ -19,21 +21,32 @@
79986 * the prev/next entries already!
79987 */
79988
79989-void __list_add(struct list_head *new,
79990- struct list_head *prev,
79991- struct list_head *next)
79992+static bool __list_add_debug(struct list_head *new,
79993+ struct list_head *prev,
79994+ struct list_head *next)
79995 {
79996- WARN(next->prev != prev,
79997+ if (WARN(next->prev != prev,
79998 "list_add corruption. next->prev should be "
79999 "prev (%p), but was %p. (next=%p).\n",
80000- prev, next->prev, next);
80001- WARN(prev->next != next,
80002+ prev, next->prev, next) ||
80003+ WARN(prev->next != next,
80004 "list_add corruption. prev->next should be "
80005 "next (%p), but was %p. (prev=%p).\n",
80006- next, prev->next, prev);
80007- WARN(new == prev || new == next,
80008- "list_add double add: new=%p, prev=%p, next=%p.\n",
80009- new, prev, next);
80010+ next, prev->next, prev) ||
80011+ WARN(new == prev || new == next,
80012+ "list_add double add: new=%p, prev=%p, next=%p.\n",
80013+ new, prev, next))
80014+ return false;
80015+ return true;
80016+}
80017+
80018+void __list_add(struct list_head *new,
80019+ struct list_head *prev,
80020+ struct list_head *next)
80021+{
80022+ if (!__list_add_debug(new, prev, next))
80023+ return;
80024+
80025 next->prev = new;
80026 new->next = next;
80027 new->prev = prev;
80028@@ -41,7 +54,7 @@ void __list_add(struct list_head *new,
80029 }
80030 EXPORT_SYMBOL(__list_add);
80031
80032-void __list_del_entry(struct list_head *entry)
80033+static bool __list_del_entry_debug(struct list_head *entry)
80034 {
80035 struct list_head *prev, *next;
80036
80037@@ -60,9 +73,16 @@ void __list_del_entry(struct list_head *entry)
80038 WARN(next->prev != entry,
80039 "list_del corruption. next->prev should be %p, "
80040 "but was %p\n", entry, next->prev))
80041+ return false;
80042+ return true;
80043+}
80044+
80045+void __list_del_entry(struct list_head *entry)
80046+{
80047+ if (!__list_del_entry_debug(entry))
80048 return;
80049
80050- __list_del(prev, next);
80051+ __list_del(entry->prev, entry->next);
80052 }
80053 EXPORT_SYMBOL(__list_del_entry);
80054
80055@@ -86,15 +106,85 @@ EXPORT_SYMBOL(list_del);
80056 void __list_add_rcu(struct list_head *new,
80057 struct list_head *prev, struct list_head *next)
80058 {
80059- WARN(next->prev != prev,
80060- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
80061- prev, next->prev, next);
80062- WARN(prev->next != next,
80063- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
80064- next, prev->next, prev);
80065+ if (!__list_add_debug(new, prev, next))
80066+ return;
80067+
80068 new->next = next;
80069 new->prev = prev;
80070 rcu_assign_pointer(list_next_rcu(prev), new);
80071 next->prev = new;
80072 }
80073 EXPORT_SYMBOL(__list_add_rcu);
80074+#endif
80075+
80076+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
80077+{
80078+#ifdef CONFIG_DEBUG_LIST
80079+ if (!__list_add_debug(new, prev, next))
80080+ return;
80081+#endif
80082+
80083+ pax_open_kernel();
80084+ next->prev = new;
80085+ new->next = next;
80086+ new->prev = prev;
80087+ prev->next = new;
80088+ pax_close_kernel();
80089+}
80090+EXPORT_SYMBOL(__pax_list_add);
80091+
80092+void pax_list_del(struct list_head *entry)
80093+{
80094+#ifdef CONFIG_DEBUG_LIST
80095+ if (!__list_del_entry_debug(entry))
80096+ return;
80097+#endif
80098+
80099+ pax_open_kernel();
80100+ __list_del(entry->prev, entry->next);
80101+ entry->next = LIST_POISON1;
80102+ entry->prev = LIST_POISON2;
80103+ pax_close_kernel();
80104+}
80105+EXPORT_SYMBOL(pax_list_del);
80106+
80107+void pax_list_del_init(struct list_head *entry)
80108+{
80109+ pax_open_kernel();
80110+ __list_del(entry->prev, entry->next);
80111+ INIT_LIST_HEAD(entry);
80112+ pax_close_kernel();
80113+}
80114+EXPORT_SYMBOL(pax_list_del_init);
80115+
80116+void __pax_list_add_rcu(struct list_head *new,
80117+ struct list_head *prev, struct list_head *next)
80118+{
80119+#ifdef CONFIG_DEBUG_LIST
80120+ if (!__list_add_debug(new, prev, next))
80121+ return;
80122+#endif
80123+
80124+ pax_open_kernel();
80125+ new->next = next;
80126+ new->prev = prev;
80127+ rcu_assign_pointer(list_next_rcu(prev), new);
80128+ next->prev = new;
80129+ pax_close_kernel();
80130+}
80131+EXPORT_SYMBOL(__pax_list_add_rcu);
80132+
80133+void pax_list_del_rcu(struct list_head *entry)
80134+{
80135+#ifdef CONFIG_DEBUG_LIST
80136+ if (!__list_del_entry_debug(entry))
80137+ return;
80138+#endif
80139+
80140+ pax_open_kernel();
80141+ __list_del(entry->prev, entry->next);
80142+ entry->next = LIST_POISON1;
80143+ entry->prev = LIST_POISON2;
80144+ pax_close_kernel();
80145+}
80146+EXPORT_SYMBOL(pax_list_del_rcu);
80147diff --git a/lib/radix-tree.c b/lib/radix-tree.c
80148index e796429..6e38f9f 100644
80149--- a/lib/radix-tree.c
80150+++ b/lib/radix-tree.c
80151@@ -92,7 +92,7 @@ struct radix_tree_preload {
80152 int nr;
80153 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
80154 };
80155-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
80156+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
80157
80158 static inline void *ptr_to_indirect(void *ptr)
80159 {
80160diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
80161index bb2b201..46abaf9 100644
80162--- a/lib/strncpy_from_user.c
80163+++ b/lib/strncpy_from_user.c
80164@@ -21,7 +21,7 @@
80165 */
80166 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
80167 {
80168- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
80169+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
80170 long res = 0;
80171
80172 /*
80173diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
80174index a28df52..3d55877 100644
80175--- a/lib/strnlen_user.c
80176+++ b/lib/strnlen_user.c
80177@@ -26,7 +26,7 @@
80178 */
80179 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
80180 {
80181- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
80182+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
80183 long align, res = 0;
80184 unsigned long c;
80185
80186diff --git a/lib/swiotlb.c b/lib/swiotlb.c
80187index 196b069..358f342 100644
80188--- a/lib/swiotlb.c
80189+++ b/lib/swiotlb.c
80190@@ -642,7 +642,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
80191
80192 void
80193 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
80194- dma_addr_t dev_addr)
80195+ dma_addr_t dev_addr, struct dma_attrs *attrs)
80196 {
80197 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
80198
80199diff --git a/lib/vsprintf.c b/lib/vsprintf.c
80200index fab33a9..3b5fe68 100644
80201--- a/lib/vsprintf.c
80202+++ b/lib/vsprintf.c
80203@@ -16,6 +16,9 @@
80204 * - scnprintf and vscnprintf
80205 */
80206
80207+#ifdef CONFIG_GRKERNSEC_HIDESYM
80208+#define __INCLUDED_BY_HIDESYM 1
80209+#endif
80210 #include <stdarg.h>
80211 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
80212 #include <linux/types.h>
80213@@ -541,7 +544,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
80214 char sym[KSYM_SYMBOL_LEN];
80215 if (ext == 'B')
80216 sprint_backtrace(sym, value);
80217- else if (ext != 'f' && ext != 's')
80218+ else if (ext != 'f' && ext != 's' && ext != 'a')
80219 sprint_symbol(sym, value);
80220 else
80221 sprint_symbol_no_offset(sym, value);
80222@@ -974,7 +977,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
80223 return number(buf, end, *(const netdev_features_t *)addr, spec);
80224 }
80225
80226+#ifdef CONFIG_GRKERNSEC_HIDESYM
80227+int kptr_restrict __read_mostly = 2;
80228+#else
80229 int kptr_restrict __read_mostly;
80230+#endif
80231
80232 /*
80233 * Show a '%p' thing. A kernel extension is that the '%p' is followed
80234@@ -988,6 +995,8 @@ int kptr_restrict __read_mostly;
80235 * - 'S' For symbolic direct pointers with offset
80236 * - 's' For symbolic direct pointers without offset
80237 * - 'B' For backtraced symbolic direct pointers with offset
80238+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
80239+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
80240 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
80241 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
80242 * - 'M' For a 6-byte MAC address, it prints the address in the
80243@@ -1043,12 +1052,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
80244
80245 if (!ptr && *fmt != 'K') {
80246 /*
80247- * Print (null) with the same width as a pointer so it makes
80248+ * Print (nil) with the same width as a pointer so it makes
80249 * tabular output look nice.
80250 */
80251 if (spec.field_width == -1)
80252 spec.field_width = default_width;
80253- return string(buf, end, "(null)", spec);
80254+ return string(buf, end, "(nil)", spec);
80255 }
80256
80257 switch (*fmt) {
80258@@ -1058,6 +1067,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
80259 /* Fallthrough */
80260 case 'S':
80261 case 's':
80262+#ifdef CONFIG_GRKERNSEC_HIDESYM
80263+ break;
80264+#else
80265+ return symbol_string(buf, end, ptr, spec, *fmt);
80266+#endif
80267+ case 'A':
80268+ case 'a':
80269 case 'B':
80270 return symbol_string(buf, end, ptr, spec, *fmt);
80271 case 'R':
80272@@ -1098,6 +1114,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
80273 va_end(va);
80274 return buf;
80275 }
80276+ case 'P':
80277+ break;
80278 case 'K':
80279 /*
80280 * %pK cannot be used in IRQ context because its test
80281@@ -1121,6 +1139,21 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
80282 }
80283 break;
80284 }
80285+
80286+#ifdef CONFIG_GRKERNSEC_HIDESYM
80287+ /* 'P' = approved pointers to copy to userland,
80288+ as in the /proc/kallsyms case, as we make it display nothing
80289+ for non-root users, and the real contents for root users
80290+ Also ignore 'K' pointers, since we force their NULLing for non-root users
80291+ above
80292+ */
80293+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'K' && is_usercopy_object(buf)) {
80294+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
80295+ dump_stack();
80296+ ptr = NULL;
80297+ }
80298+#endif
80299+
80300 spec.flags |= SMALL;
80301 if (spec.field_width == -1) {
80302 spec.field_width = default_width;
80303@@ -1842,11 +1875,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
80304 typeof(type) value; \
80305 if (sizeof(type) == 8) { \
80306 args = PTR_ALIGN(args, sizeof(u32)); \
80307- *(u32 *)&value = *(u32 *)args; \
80308- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
80309+ *(u32 *)&value = *(const u32 *)args; \
80310+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
80311 } else { \
80312 args = PTR_ALIGN(args, sizeof(type)); \
80313- value = *(typeof(type) *)args; \
80314+ value = *(const typeof(type) *)args; \
80315 } \
80316 args += sizeof(type); \
80317 value; \
80318@@ -1909,7 +1942,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
80319 case FORMAT_TYPE_STR: {
80320 const char *str_arg = args;
80321 args += strlen(str_arg) + 1;
80322- str = string(str, end, (char *)str_arg, spec);
80323+ str = string(str, end, str_arg, spec);
80324 break;
80325 }
80326
80327diff --git a/localversion-grsec b/localversion-grsec
80328new file mode 100644
80329index 0000000..7cd6065
80330--- /dev/null
80331+++ b/localversion-grsec
80332@@ -0,0 +1 @@
80333+-grsec
80334diff --git a/mm/Kconfig b/mm/Kconfig
80335index 278e3ab..87c384d 100644
80336--- a/mm/Kconfig
80337+++ b/mm/Kconfig
80338@@ -286,10 +286,10 @@ config KSM
80339 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
80340
80341 config DEFAULT_MMAP_MIN_ADDR
80342- int "Low address space to protect from user allocation"
80343+ int "Low address space to protect from user allocation"
80344 depends on MMU
80345- default 4096
80346- help
80347+ default 65536
80348+ help
80349 This is the portion of low virtual memory which should be protected
80350 from userspace allocation. Keeping a user from writing to low pages
80351 can help reduce the impact of kernel NULL pointer bugs.
80352@@ -320,7 +320,7 @@ config MEMORY_FAILURE
80353
80354 config HWPOISON_INJECT
80355 tristate "HWPoison pages injector"
80356- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
80357+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
80358 select PROC_PAGE_MONITOR
80359
80360 config NOMMU_INITIAL_TRIM_EXCESS
80361diff --git a/mm/filemap.c b/mm/filemap.c
80362index 83efee7..3f99381 100644
80363--- a/mm/filemap.c
80364+++ b/mm/filemap.c
80365@@ -1747,7 +1747,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
80366 struct address_space *mapping = file->f_mapping;
80367
80368 if (!mapping->a_ops->readpage)
80369- return -ENOEXEC;
80370+ return -ENODEV;
80371 file_accessed(file);
80372 vma->vm_ops = &generic_file_vm_ops;
80373 return 0;
80374@@ -2087,6 +2087,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
80375 *pos = i_size_read(inode);
80376
80377 if (limit != RLIM_INFINITY) {
80378+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
80379 if (*pos >= limit) {
80380 send_sig(SIGXFSZ, current, 0);
80381 return -EFBIG;
80382diff --git a/mm/fremap.c b/mm/fremap.c
80383index a0aaf0e..20325c3 100644
80384--- a/mm/fremap.c
80385+++ b/mm/fremap.c
80386@@ -157,6 +157,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
80387 retry:
80388 vma = find_vma(mm, start);
80389
80390+#ifdef CONFIG_PAX_SEGMEXEC
80391+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
80392+ goto out;
80393+#endif
80394+
80395 /*
80396 * Make sure the vma is shared, that it supports prefaulting,
80397 * and that the remapped range is valid and fully within
80398diff --git a/mm/highmem.c b/mm/highmem.c
80399index b32b70c..e512eb0 100644
80400--- a/mm/highmem.c
80401+++ b/mm/highmem.c
80402@@ -138,8 +138,9 @@ static void flush_all_zero_pkmaps(void)
80403 * So no dangers, even with speculative execution.
80404 */
80405 page = pte_page(pkmap_page_table[i]);
80406+ pax_open_kernel();
80407 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
80408-
80409+ pax_close_kernel();
80410 set_page_address(page, NULL);
80411 need_flush = 1;
80412 }
80413@@ -198,9 +199,11 @@ start:
80414 }
80415 }
80416 vaddr = PKMAP_ADDR(last_pkmap_nr);
80417+
80418+ pax_open_kernel();
80419 set_pte_at(&init_mm, vaddr,
80420 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
80421-
80422+ pax_close_kernel();
80423 pkmap_count[last_pkmap_nr] = 1;
80424 set_page_address(page, (void *)vaddr);
80425
80426diff --git a/mm/hugetlb.c b/mm/hugetlb.c
80427index d7cec92..b05cc33 100644
80428--- a/mm/hugetlb.c
80429+++ b/mm/hugetlb.c
80430@@ -2008,15 +2008,17 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
80431 struct hstate *h = &default_hstate;
80432 unsigned long tmp;
80433 int ret;
80434+ ctl_table_no_const hugetlb_table;
80435
80436 tmp = h->max_huge_pages;
80437
80438 if (write && h->order >= MAX_ORDER)
80439 return -EINVAL;
80440
80441- table->data = &tmp;
80442- table->maxlen = sizeof(unsigned long);
80443- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
80444+ hugetlb_table = *table;
80445+ hugetlb_table.data = &tmp;
80446+ hugetlb_table.maxlen = sizeof(unsigned long);
80447+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
80448 if (ret)
80449 goto out;
80450
80451@@ -2073,15 +2075,17 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
80452 struct hstate *h = &default_hstate;
80453 unsigned long tmp;
80454 int ret;
80455+ ctl_table_no_const hugetlb_table;
80456
80457 tmp = h->nr_overcommit_huge_pages;
80458
80459 if (write && h->order >= MAX_ORDER)
80460 return -EINVAL;
80461
80462- table->data = &tmp;
80463- table->maxlen = sizeof(unsigned long);
80464- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
80465+ hugetlb_table = *table;
80466+ hugetlb_table.data = &tmp;
80467+ hugetlb_table.maxlen = sizeof(unsigned long);
80468+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
80469 if (ret)
80470 goto out;
80471
80472@@ -2515,6 +2519,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
80473 return 1;
80474 }
80475
80476+#ifdef CONFIG_PAX_SEGMEXEC
80477+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
80478+{
80479+ struct mm_struct *mm = vma->vm_mm;
80480+ struct vm_area_struct *vma_m;
80481+ unsigned long address_m;
80482+ pte_t *ptep_m;
80483+
80484+ vma_m = pax_find_mirror_vma(vma);
80485+ if (!vma_m)
80486+ return;
80487+
80488+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
80489+ address_m = address + SEGMEXEC_TASK_SIZE;
80490+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
80491+ get_page(page_m);
80492+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
80493+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
80494+}
80495+#endif
80496+
80497 /*
80498 * Hugetlb_cow() should be called with page lock of the original hugepage held.
80499 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
80500@@ -2633,6 +2658,11 @@ retry_avoidcopy:
80501 make_huge_pte(vma, new_page, 1));
80502 page_remove_rmap(old_page);
80503 hugepage_add_new_anon_rmap(new_page, vma, address);
80504+
80505+#ifdef CONFIG_PAX_SEGMEXEC
80506+ pax_mirror_huge_pte(vma, address, new_page);
80507+#endif
80508+
80509 /* Make the old page be freed below */
80510 new_page = old_page;
80511 }
80512@@ -2792,6 +2822,10 @@ retry:
80513 && (vma->vm_flags & VM_SHARED)));
80514 set_huge_pte_at(mm, address, ptep, new_pte);
80515
80516+#ifdef CONFIG_PAX_SEGMEXEC
80517+ pax_mirror_huge_pte(vma, address, page);
80518+#endif
80519+
80520 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
80521 /* Optimization, do the COW without a second fault */
80522 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
80523@@ -2821,6 +2855,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
80524 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
80525 struct hstate *h = hstate_vma(vma);
80526
80527+#ifdef CONFIG_PAX_SEGMEXEC
80528+ struct vm_area_struct *vma_m;
80529+#endif
80530+
80531 address &= huge_page_mask(h);
80532
80533 ptep = huge_pte_offset(mm, address);
80534@@ -2834,6 +2872,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
80535 VM_FAULT_SET_HINDEX(hstate_index(h));
80536 }
80537
80538+#ifdef CONFIG_PAX_SEGMEXEC
80539+ vma_m = pax_find_mirror_vma(vma);
80540+ if (vma_m) {
80541+ unsigned long address_m;
80542+
80543+ if (vma->vm_start > vma_m->vm_start) {
80544+ address_m = address;
80545+ address -= SEGMEXEC_TASK_SIZE;
80546+ vma = vma_m;
80547+ h = hstate_vma(vma);
80548+ } else
80549+ address_m = address + SEGMEXEC_TASK_SIZE;
80550+
80551+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
80552+ return VM_FAULT_OOM;
80553+ address_m &= HPAGE_MASK;
80554+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
80555+ }
80556+#endif
80557+
80558 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
80559 if (!ptep)
80560 return VM_FAULT_OOM;
80561diff --git a/mm/internal.h b/mm/internal.h
80562index 9ba2110..eaf0674 100644
80563--- a/mm/internal.h
80564+++ b/mm/internal.h
80565@@ -100,6 +100,7 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
80566 * in mm/page_alloc.c
80567 */
80568 extern void __free_pages_bootmem(struct page *page, unsigned int order);
80569+extern void free_compound_page(struct page *page);
80570 extern void prep_compound_page(struct page *page, unsigned long order);
80571 #ifdef CONFIG_MEMORY_FAILURE
80572 extern bool is_free_buddy_page(struct page *page);
80573diff --git a/mm/kmemleak.c b/mm/kmemleak.c
80574index 752a705..6c3102e 100644
80575--- a/mm/kmemleak.c
80576+++ b/mm/kmemleak.c
80577@@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
80578
80579 for (i = 0; i < object->trace_len; i++) {
80580 void *ptr = (void *)object->trace[i];
80581- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
80582+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
80583 }
80584 }
80585
80586@@ -1853,7 +1853,7 @@ static int __init kmemleak_late_init(void)
80587 return -ENOMEM;
80588 }
80589
80590- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
80591+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
80592 &kmemleak_fops);
80593 if (!dentry)
80594 pr_warning("Failed to create the debugfs kmemleak file\n");
80595diff --git a/mm/maccess.c b/mm/maccess.c
80596index d53adf9..03a24bf 100644
80597--- a/mm/maccess.c
80598+++ b/mm/maccess.c
80599@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
80600 set_fs(KERNEL_DS);
80601 pagefault_disable();
80602 ret = __copy_from_user_inatomic(dst,
80603- (__force const void __user *)src, size);
80604+ (const void __force_user *)src, size);
80605 pagefault_enable();
80606 set_fs(old_fs);
80607
80608@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
80609
80610 set_fs(KERNEL_DS);
80611 pagefault_disable();
80612- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
80613+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
80614 pagefault_enable();
80615 set_fs(old_fs);
80616
80617diff --git a/mm/madvise.c b/mm/madvise.c
80618index 03dfa5c..b032917 100644
80619--- a/mm/madvise.c
80620+++ b/mm/madvise.c
80621@@ -48,6 +48,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
80622 pgoff_t pgoff;
80623 unsigned long new_flags = vma->vm_flags;
80624
80625+#ifdef CONFIG_PAX_SEGMEXEC
80626+ struct vm_area_struct *vma_m;
80627+#endif
80628+
80629 switch (behavior) {
80630 case MADV_NORMAL:
80631 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
80632@@ -123,6 +127,13 @@ success:
80633 /*
80634 * vm_flags is protected by the mmap_sem held in write mode.
80635 */
80636+
80637+#ifdef CONFIG_PAX_SEGMEXEC
80638+ vma_m = pax_find_mirror_vma(vma);
80639+ if (vma_m)
80640+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
80641+#endif
80642+
80643 vma->vm_flags = new_flags;
80644
80645 out:
80646@@ -181,6 +192,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
80647 struct vm_area_struct ** prev,
80648 unsigned long start, unsigned long end)
80649 {
80650+
80651+#ifdef CONFIG_PAX_SEGMEXEC
80652+ struct vm_area_struct *vma_m;
80653+#endif
80654+
80655 *prev = vma;
80656 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
80657 return -EINVAL;
80658@@ -193,6 +209,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
80659 zap_page_range(vma, start, end - start, &details);
80660 } else
80661 zap_page_range(vma, start, end - start, NULL);
80662+
80663+#ifdef CONFIG_PAX_SEGMEXEC
80664+ vma_m = pax_find_mirror_vma(vma);
80665+ if (vma_m) {
80666+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
80667+ struct zap_details details = {
80668+ .nonlinear_vma = vma_m,
80669+ .last_index = ULONG_MAX,
80670+ };
80671+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
80672+ } else
80673+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
80674+ }
80675+#endif
80676+
80677 return 0;
80678 }
80679
80680@@ -397,6 +428,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
80681 if (end < start)
80682 goto out;
80683
80684+#ifdef CONFIG_PAX_SEGMEXEC
80685+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
80686+ if (end > SEGMEXEC_TASK_SIZE)
80687+ goto out;
80688+ } else
80689+#endif
80690+
80691+ if (end > TASK_SIZE)
80692+ goto out;
80693+
80694 error = 0;
80695 if (end == start)
80696 goto out;
80697diff --git a/mm/memory-failure.c b/mm/memory-failure.c
80698index c6e4dd3..1f41988 100644
80699--- a/mm/memory-failure.c
80700+++ b/mm/memory-failure.c
80701@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
80702
80703 int sysctl_memory_failure_recovery __read_mostly = 1;
80704
80705-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
80706+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
80707
80708 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
80709
80710@@ -202,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
80711 pfn, t->comm, t->pid);
80712 si.si_signo = SIGBUS;
80713 si.si_errno = 0;
80714- si.si_addr = (void *)addr;
80715+ si.si_addr = (void __user *)addr;
80716 #ifdef __ARCH_SI_TRAPNO
80717 si.si_trapno = trapno;
80718 #endif
80719@@ -760,7 +760,7 @@ static struct page_state {
80720 unsigned long res;
80721 char *msg;
80722 int (*action)(struct page *p, unsigned long pfn);
80723-} error_states[] = {
80724+} __do_const error_states[] = {
80725 { reserved, reserved, "reserved kernel", me_kernel },
80726 /*
80727 * free pages are specially detected outside this table:
80728@@ -1040,7 +1040,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
80729 }
80730
80731 nr_pages = 1 << compound_trans_order(hpage);
80732- atomic_long_add(nr_pages, &mce_bad_pages);
80733+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
80734
80735 /*
80736 * We need/can do nothing about count=0 pages.
80737@@ -1070,7 +1070,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
80738 if (!PageHWPoison(hpage)
80739 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
80740 || (p != hpage && TestSetPageHWPoison(hpage))) {
80741- atomic_long_sub(nr_pages, &mce_bad_pages);
80742+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
80743 return 0;
80744 }
80745 set_page_hwpoison_huge_page(hpage);
80746@@ -1128,7 +1128,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
80747 }
80748 if (hwpoison_filter(p)) {
80749 if (TestClearPageHWPoison(p))
80750- atomic_long_sub(nr_pages, &mce_bad_pages);
80751+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
80752 unlock_page(hpage);
80753 put_page(hpage);
80754 return 0;
80755@@ -1323,7 +1323,7 @@ int unpoison_memory(unsigned long pfn)
80756 return 0;
80757 }
80758 if (TestClearPageHWPoison(p))
80759- atomic_long_sub(nr_pages, &mce_bad_pages);
80760+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
80761 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
80762 return 0;
80763 }
80764@@ -1337,7 +1337,7 @@ int unpoison_memory(unsigned long pfn)
80765 */
80766 if (TestClearPageHWPoison(page)) {
80767 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
80768- atomic_long_sub(nr_pages, &mce_bad_pages);
80769+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
80770 freeit = 1;
80771 if (PageHuge(page))
80772 clear_page_hwpoison_huge_page(page);
80773@@ -1442,7 +1442,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
80774 }
80775 done:
80776 if (!PageHWPoison(hpage))
80777- atomic_long_add(1 << compound_trans_order(hpage),
80778+ atomic_long_add_unchecked(1 << compound_trans_order(hpage),
80779 &mce_bad_pages);
80780 set_page_hwpoison_huge_page(hpage);
80781 dequeue_hwpoisoned_huge_page(hpage);
80782@@ -1583,7 +1583,7 @@ int soft_offline_page(struct page *page, int flags)
80783 return ret;
80784
80785 done:
80786- atomic_long_add(1, &mce_bad_pages);
80787+ atomic_long_add_unchecked(1, &mce_bad_pages);
80788 SetPageHWPoison(page);
80789 /* keep elevated page count for bad page */
80790 return ret;
80791diff --git a/mm/memory.c b/mm/memory.c
80792index bb1369f..b9631d2 100644
80793--- a/mm/memory.c
80794+++ b/mm/memory.c
80795@@ -433,6 +433,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
80796 free_pte_range(tlb, pmd, addr);
80797 } while (pmd++, addr = next, addr != end);
80798
80799+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
80800 start &= PUD_MASK;
80801 if (start < floor)
80802 return;
80803@@ -447,6 +448,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
80804 pmd = pmd_offset(pud, start);
80805 pud_clear(pud);
80806 pmd_free_tlb(tlb, pmd, start);
80807+#endif
80808+
80809 }
80810
80811 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
80812@@ -466,6 +469,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
80813 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
80814 } while (pud++, addr = next, addr != end);
80815
80816+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
80817 start &= PGDIR_MASK;
80818 if (start < floor)
80819 return;
80820@@ -480,6 +484,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
80821 pud = pud_offset(pgd, start);
80822 pgd_clear(pgd);
80823 pud_free_tlb(tlb, pud, start);
80824+#endif
80825+
80826 }
80827
80828 /*
80829@@ -1618,12 +1624,6 @@ no_page_table:
80830 return page;
80831 }
80832
80833-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
80834-{
80835- return stack_guard_page_start(vma, addr) ||
80836- stack_guard_page_end(vma, addr+PAGE_SIZE);
80837-}
80838-
80839 /**
80840 * __get_user_pages() - pin user pages in memory
80841 * @tsk: task_struct of target task
80842@@ -1709,10 +1709,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
80843
80844 i = 0;
80845
80846- do {
80847+ while (nr_pages) {
80848 struct vm_area_struct *vma;
80849
80850- vma = find_extend_vma(mm, start);
80851+ vma = find_vma(mm, start);
80852 if (!vma && in_gate_area(mm, start)) {
80853 unsigned long pg = start & PAGE_MASK;
80854 pgd_t *pgd;
80855@@ -1760,7 +1760,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
80856 goto next_page;
80857 }
80858
80859- if (!vma ||
80860+ if (!vma || start < vma->vm_start ||
80861 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
80862 !(vm_flags & vma->vm_flags))
80863 return i ? : -EFAULT;
80864@@ -1787,11 +1787,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
80865 int ret;
80866 unsigned int fault_flags = 0;
80867
80868- /* For mlock, just skip the stack guard page. */
80869- if (foll_flags & FOLL_MLOCK) {
80870- if (stack_guard_page(vma, start))
80871- goto next_page;
80872- }
80873 if (foll_flags & FOLL_WRITE)
80874 fault_flags |= FAULT_FLAG_WRITE;
80875 if (nonblocking)
80876@@ -1865,7 +1860,7 @@ next_page:
80877 start += PAGE_SIZE;
80878 nr_pages--;
80879 } while (nr_pages && start < vma->vm_end);
80880- } while (nr_pages);
80881+ }
80882 return i;
80883 }
80884 EXPORT_SYMBOL(__get_user_pages);
80885@@ -2072,6 +2067,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
80886 page_add_file_rmap(page);
80887 set_pte_at(mm, addr, pte, mk_pte(page, prot));
80888
80889+#ifdef CONFIG_PAX_SEGMEXEC
80890+ pax_mirror_file_pte(vma, addr, page, ptl);
80891+#endif
80892+
80893 retval = 0;
80894 pte_unmap_unlock(pte, ptl);
80895 return retval;
80896@@ -2116,9 +2115,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
80897 if (!page_count(page))
80898 return -EINVAL;
80899 if (!(vma->vm_flags & VM_MIXEDMAP)) {
80900+
80901+#ifdef CONFIG_PAX_SEGMEXEC
80902+ struct vm_area_struct *vma_m;
80903+#endif
80904+
80905 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
80906 BUG_ON(vma->vm_flags & VM_PFNMAP);
80907 vma->vm_flags |= VM_MIXEDMAP;
80908+
80909+#ifdef CONFIG_PAX_SEGMEXEC
80910+ vma_m = pax_find_mirror_vma(vma);
80911+ if (vma_m)
80912+ vma_m->vm_flags |= VM_MIXEDMAP;
80913+#endif
80914+
80915 }
80916 return insert_page(vma, addr, page, vma->vm_page_prot);
80917 }
80918@@ -2201,6 +2212,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
80919 unsigned long pfn)
80920 {
80921 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
80922+ BUG_ON(vma->vm_mirror);
80923
80924 if (addr < vma->vm_start || addr >= vma->vm_end)
80925 return -EFAULT;
80926@@ -2401,7 +2413,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
80927
80928 BUG_ON(pud_huge(*pud));
80929
80930- pmd = pmd_alloc(mm, pud, addr);
80931+ pmd = (mm == &init_mm) ?
80932+ pmd_alloc_kernel(mm, pud, addr) :
80933+ pmd_alloc(mm, pud, addr);
80934 if (!pmd)
80935 return -ENOMEM;
80936 do {
80937@@ -2421,7 +2435,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
80938 unsigned long next;
80939 int err;
80940
80941- pud = pud_alloc(mm, pgd, addr);
80942+ pud = (mm == &init_mm) ?
80943+ pud_alloc_kernel(mm, pgd, addr) :
80944+ pud_alloc(mm, pgd, addr);
80945 if (!pud)
80946 return -ENOMEM;
80947 do {
80948@@ -2509,6 +2525,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
80949 copy_user_highpage(dst, src, va, vma);
80950 }
80951
80952+#ifdef CONFIG_PAX_SEGMEXEC
80953+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
80954+{
80955+ struct mm_struct *mm = vma->vm_mm;
80956+ spinlock_t *ptl;
80957+ pte_t *pte, entry;
80958+
80959+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
80960+ entry = *pte;
80961+ if (!pte_present(entry)) {
80962+ if (!pte_none(entry)) {
80963+ BUG_ON(pte_file(entry));
80964+ free_swap_and_cache(pte_to_swp_entry(entry));
80965+ pte_clear_not_present_full(mm, address, pte, 0);
80966+ }
80967+ } else {
80968+ struct page *page;
80969+
80970+ flush_cache_page(vma, address, pte_pfn(entry));
80971+ entry = ptep_clear_flush(vma, address, pte);
80972+ BUG_ON(pte_dirty(entry));
80973+ page = vm_normal_page(vma, address, entry);
80974+ if (page) {
80975+ update_hiwater_rss(mm);
80976+ if (PageAnon(page))
80977+ dec_mm_counter_fast(mm, MM_ANONPAGES);
80978+ else
80979+ dec_mm_counter_fast(mm, MM_FILEPAGES);
80980+ page_remove_rmap(page);
80981+ page_cache_release(page);
80982+ }
80983+ }
80984+ pte_unmap_unlock(pte, ptl);
80985+}
80986+
80987+/* PaX: if vma is mirrored, synchronize the mirror's PTE
80988+ *
80989+ * the ptl of the lower mapped page is held on entry and is not released on exit
80990+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
80991+ */
80992+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
80993+{
80994+ struct mm_struct *mm = vma->vm_mm;
80995+ unsigned long address_m;
80996+ spinlock_t *ptl_m;
80997+ struct vm_area_struct *vma_m;
80998+ pmd_t *pmd_m;
80999+ pte_t *pte_m, entry_m;
81000+
81001+ BUG_ON(!page_m || !PageAnon(page_m));
81002+
81003+ vma_m = pax_find_mirror_vma(vma);
81004+ if (!vma_m)
81005+ return;
81006+
81007+ BUG_ON(!PageLocked(page_m));
81008+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
81009+ address_m = address + SEGMEXEC_TASK_SIZE;
81010+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
81011+ pte_m = pte_offset_map(pmd_m, address_m);
81012+ ptl_m = pte_lockptr(mm, pmd_m);
81013+ if (ptl != ptl_m) {
81014+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
81015+ if (!pte_none(*pte_m))
81016+ goto out;
81017+ }
81018+
81019+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
81020+ page_cache_get(page_m);
81021+ page_add_anon_rmap(page_m, vma_m, address_m);
81022+ inc_mm_counter_fast(mm, MM_ANONPAGES);
81023+ set_pte_at(mm, address_m, pte_m, entry_m);
81024+ update_mmu_cache(vma_m, address_m, entry_m);
81025+out:
81026+ if (ptl != ptl_m)
81027+ spin_unlock(ptl_m);
81028+ pte_unmap(pte_m);
81029+ unlock_page(page_m);
81030+}
81031+
81032+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
81033+{
81034+ struct mm_struct *mm = vma->vm_mm;
81035+ unsigned long address_m;
81036+ spinlock_t *ptl_m;
81037+ struct vm_area_struct *vma_m;
81038+ pmd_t *pmd_m;
81039+ pte_t *pte_m, entry_m;
81040+
81041+ BUG_ON(!page_m || PageAnon(page_m));
81042+
81043+ vma_m = pax_find_mirror_vma(vma);
81044+ if (!vma_m)
81045+ return;
81046+
81047+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
81048+ address_m = address + SEGMEXEC_TASK_SIZE;
81049+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
81050+ pte_m = pte_offset_map(pmd_m, address_m);
81051+ ptl_m = pte_lockptr(mm, pmd_m);
81052+ if (ptl != ptl_m) {
81053+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
81054+ if (!pte_none(*pte_m))
81055+ goto out;
81056+ }
81057+
81058+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
81059+ page_cache_get(page_m);
81060+ page_add_file_rmap(page_m);
81061+ inc_mm_counter_fast(mm, MM_FILEPAGES);
81062+ set_pte_at(mm, address_m, pte_m, entry_m);
81063+ update_mmu_cache(vma_m, address_m, entry_m);
81064+out:
81065+ if (ptl != ptl_m)
81066+ spin_unlock(ptl_m);
81067+ pte_unmap(pte_m);
81068+}
81069+
81070+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
81071+{
81072+ struct mm_struct *mm = vma->vm_mm;
81073+ unsigned long address_m;
81074+ spinlock_t *ptl_m;
81075+ struct vm_area_struct *vma_m;
81076+ pmd_t *pmd_m;
81077+ pte_t *pte_m, entry_m;
81078+
81079+ vma_m = pax_find_mirror_vma(vma);
81080+ if (!vma_m)
81081+ return;
81082+
81083+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
81084+ address_m = address + SEGMEXEC_TASK_SIZE;
81085+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
81086+ pte_m = pte_offset_map(pmd_m, address_m);
81087+ ptl_m = pte_lockptr(mm, pmd_m);
81088+ if (ptl != ptl_m) {
81089+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
81090+ if (!pte_none(*pte_m))
81091+ goto out;
81092+ }
81093+
81094+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
81095+ set_pte_at(mm, address_m, pte_m, entry_m);
81096+out:
81097+ if (ptl != ptl_m)
81098+ spin_unlock(ptl_m);
81099+ pte_unmap(pte_m);
81100+}
81101+
81102+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
81103+{
81104+ struct page *page_m;
81105+ pte_t entry;
81106+
81107+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
81108+ goto out;
81109+
81110+ entry = *pte;
81111+ page_m = vm_normal_page(vma, address, entry);
81112+ if (!page_m)
81113+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
81114+ else if (PageAnon(page_m)) {
81115+ if (pax_find_mirror_vma(vma)) {
81116+ pte_unmap_unlock(pte, ptl);
81117+ lock_page(page_m);
81118+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
81119+ if (pte_same(entry, *pte))
81120+ pax_mirror_anon_pte(vma, address, page_m, ptl);
81121+ else
81122+ unlock_page(page_m);
81123+ }
81124+ } else
81125+ pax_mirror_file_pte(vma, address, page_m, ptl);
81126+
81127+out:
81128+ pte_unmap_unlock(pte, ptl);
81129+}
81130+#endif
81131+
81132 /*
81133 * This routine handles present pages, when users try to write
81134 * to a shared page. It is done by copying the page to a new address
81135@@ -2725,6 +2921,12 @@ gotten:
81136 */
81137 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
81138 if (likely(pte_same(*page_table, orig_pte))) {
81139+
81140+#ifdef CONFIG_PAX_SEGMEXEC
81141+ if (pax_find_mirror_vma(vma))
81142+ BUG_ON(!trylock_page(new_page));
81143+#endif
81144+
81145 if (old_page) {
81146 if (!PageAnon(old_page)) {
81147 dec_mm_counter_fast(mm, MM_FILEPAGES);
81148@@ -2776,6 +2978,10 @@ gotten:
81149 page_remove_rmap(old_page);
81150 }
81151
81152+#ifdef CONFIG_PAX_SEGMEXEC
81153+ pax_mirror_anon_pte(vma, address, new_page, ptl);
81154+#endif
81155+
81156 /* Free the old page.. */
81157 new_page = old_page;
81158 ret |= VM_FAULT_WRITE;
81159@@ -3051,6 +3257,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
81160 swap_free(entry);
81161 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
81162 try_to_free_swap(page);
81163+
81164+#ifdef CONFIG_PAX_SEGMEXEC
81165+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
81166+#endif
81167+
81168 unlock_page(page);
81169 if (swapcache) {
81170 /*
81171@@ -3074,6 +3285,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
81172
81173 /* No need to invalidate - it was non-present before */
81174 update_mmu_cache(vma, address, page_table);
81175+
81176+#ifdef CONFIG_PAX_SEGMEXEC
81177+ pax_mirror_anon_pte(vma, address, page, ptl);
81178+#endif
81179+
81180 unlock:
81181 pte_unmap_unlock(page_table, ptl);
81182 out:
81183@@ -3093,40 +3309,6 @@ out_release:
81184 }
81185
81186 /*
81187- * This is like a special single-page "expand_{down|up}wards()",
81188- * except we must first make sure that 'address{-|+}PAGE_SIZE'
81189- * doesn't hit another vma.
81190- */
81191-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
81192-{
81193- address &= PAGE_MASK;
81194- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
81195- struct vm_area_struct *prev = vma->vm_prev;
81196-
81197- /*
81198- * Is there a mapping abutting this one below?
81199- *
81200- * That's only ok if it's the same stack mapping
81201- * that has gotten split..
81202- */
81203- if (prev && prev->vm_end == address)
81204- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
81205-
81206- expand_downwards(vma, address - PAGE_SIZE);
81207- }
81208- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
81209- struct vm_area_struct *next = vma->vm_next;
81210-
81211- /* As VM_GROWSDOWN but s/below/above/ */
81212- if (next && next->vm_start == address + PAGE_SIZE)
81213- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
81214-
81215- expand_upwards(vma, address + PAGE_SIZE);
81216- }
81217- return 0;
81218-}
81219-
81220-/*
81221 * We enter with non-exclusive mmap_sem (to exclude vma changes,
81222 * but allow concurrent faults), and pte mapped but not yet locked.
81223 * We return with mmap_sem still held, but pte unmapped and unlocked.
81224@@ -3135,27 +3317,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
81225 unsigned long address, pte_t *page_table, pmd_t *pmd,
81226 unsigned int flags)
81227 {
81228- struct page *page;
81229+ struct page *page = NULL;
81230 spinlock_t *ptl;
81231 pte_t entry;
81232
81233- pte_unmap(page_table);
81234-
81235- /* Check if we need to add a guard page to the stack */
81236- if (check_stack_guard_page(vma, address) < 0)
81237- return VM_FAULT_SIGBUS;
81238-
81239- /* Use the zero-page for reads */
81240 if (!(flags & FAULT_FLAG_WRITE)) {
81241 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
81242 vma->vm_page_prot));
81243- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
81244+ ptl = pte_lockptr(mm, pmd);
81245+ spin_lock(ptl);
81246 if (!pte_none(*page_table))
81247 goto unlock;
81248 goto setpte;
81249 }
81250
81251 /* Allocate our own private page. */
81252+ pte_unmap(page_table);
81253+
81254 if (unlikely(anon_vma_prepare(vma)))
81255 goto oom;
81256 page = alloc_zeroed_user_highpage_movable(vma, address);
81257@@ -3174,6 +3352,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
81258 if (!pte_none(*page_table))
81259 goto release;
81260
81261+#ifdef CONFIG_PAX_SEGMEXEC
81262+ if (pax_find_mirror_vma(vma))
81263+ BUG_ON(!trylock_page(page));
81264+#endif
81265+
81266 inc_mm_counter_fast(mm, MM_ANONPAGES);
81267 page_add_new_anon_rmap(page, vma, address);
81268 setpte:
81269@@ -3181,6 +3364,12 @@ setpte:
81270
81271 /* No need to invalidate - it was non-present before */
81272 update_mmu_cache(vma, address, page_table);
81273+
81274+#ifdef CONFIG_PAX_SEGMEXEC
81275+ if (page)
81276+ pax_mirror_anon_pte(vma, address, page, ptl);
81277+#endif
81278+
81279 unlock:
81280 pte_unmap_unlock(page_table, ptl);
81281 return 0;
81282@@ -3324,6 +3513,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
81283 */
81284 /* Only go through if we didn't race with anybody else... */
81285 if (likely(pte_same(*page_table, orig_pte))) {
81286+
81287+#ifdef CONFIG_PAX_SEGMEXEC
81288+ if (anon && pax_find_mirror_vma(vma))
81289+ BUG_ON(!trylock_page(page));
81290+#endif
81291+
81292 flush_icache_page(vma, page);
81293 entry = mk_pte(page, vma->vm_page_prot);
81294 if (flags & FAULT_FLAG_WRITE)
81295@@ -3343,6 +3538,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
81296
81297 /* no need to invalidate: a not-present page won't be cached */
81298 update_mmu_cache(vma, address, page_table);
81299+
81300+#ifdef CONFIG_PAX_SEGMEXEC
81301+ if (anon)
81302+ pax_mirror_anon_pte(vma, address, page, ptl);
81303+ else
81304+ pax_mirror_file_pte(vma, address, page, ptl);
81305+#endif
81306+
81307 } else {
81308 if (cow_page)
81309 mem_cgroup_uncharge_page(cow_page);
81310@@ -3664,6 +3867,12 @@ int handle_pte_fault(struct mm_struct *mm,
81311 if (flags & FAULT_FLAG_WRITE)
81312 flush_tlb_fix_spurious_fault(vma, address);
81313 }
81314+
81315+#ifdef CONFIG_PAX_SEGMEXEC
81316+ pax_mirror_pte(vma, address, pte, pmd, ptl);
81317+ return 0;
81318+#endif
81319+
81320 unlock:
81321 pte_unmap_unlock(pte, ptl);
81322 return 0;
81323@@ -3680,6 +3889,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
81324 pmd_t *pmd;
81325 pte_t *pte;
81326
81327+#ifdef CONFIG_PAX_SEGMEXEC
81328+ struct vm_area_struct *vma_m;
81329+#endif
81330+
81331 __set_current_state(TASK_RUNNING);
81332
81333 count_vm_event(PGFAULT);
81334@@ -3691,6 +3904,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
81335 if (unlikely(is_vm_hugetlb_page(vma)))
81336 return hugetlb_fault(mm, vma, address, flags);
81337
81338+#ifdef CONFIG_PAX_SEGMEXEC
81339+ vma_m = pax_find_mirror_vma(vma);
81340+ if (vma_m) {
81341+ unsigned long address_m;
81342+ pgd_t *pgd_m;
81343+ pud_t *pud_m;
81344+ pmd_t *pmd_m;
81345+
81346+ if (vma->vm_start > vma_m->vm_start) {
81347+ address_m = address;
81348+ address -= SEGMEXEC_TASK_SIZE;
81349+ vma = vma_m;
81350+ } else
81351+ address_m = address + SEGMEXEC_TASK_SIZE;
81352+
81353+ pgd_m = pgd_offset(mm, address_m);
81354+ pud_m = pud_alloc(mm, pgd_m, address_m);
81355+ if (!pud_m)
81356+ return VM_FAULT_OOM;
81357+ pmd_m = pmd_alloc(mm, pud_m, address_m);
81358+ if (!pmd_m)
81359+ return VM_FAULT_OOM;
81360+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
81361+ return VM_FAULT_OOM;
81362+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
81363+ }
81364+#endif
81365+
81366 retry:
81367 pgd = pgd_offset(mm, address);
81368 pud = pud_alloc(mm, pgd, address);
81369@@ -3789,6 +4030,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
81370 spin_unlock(&mm->page_table_lock);
81371 return 0;
81372 }
81373+
81374+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
81375+{
81376+ pud_t *new = pud_alloc_one(mm, address);
81377+ if (!new)
81378+ return -ENOMEM;
81379+
81380+ smp_wmb(); /* See comment in __pte_alloc */
81381+
81382+ spin_lock(&mm->page_table_lock);
81383+ if (pgd_present(*pgd)) /* Another has populated it */
81384+ pud_free(mm, new);
81385+ else
81386+ pgd_populate_kernel(mm, pgd, new);
81387+ spin_unlock(&mm->page_table_lock);
81388+ return 0;
81389+}
81390 #endif /* __PAGETABLE_PUD_FOLDED */
81391
81392 #ifndef __PAGETABLE_PMD_FOLDED
81393@@ -3819,11 +4077,35 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
81394 spin_unlock(&mm->page_table_lock);
81395 return 0;
81396 }
81397+
81398+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
81399+{
81400+ pmd_t *new = pmd_alloc_one(mm, address);
81401+ if (!new)
81402+ return -ENOMEM;
81403+
81404+ smp_wmb(); /* See comment in __pte_alloc */
81405+
81406+ spin_lock(&mm->page_table_lock);
81407+#ifndef __ARCH_HAS_4LEVEL_HACK
81408+ if (pud_present(*pud)) /* Another has populated it */
81409+ pmd_free(mm, new);
81410+ else
81411+ pud_populate_kernel(mm, pud, new);
81412+#else
81413+ if (pgd_present(*pud)) /* Another has populated it */
81414+ pmd_free(mm, new);
81415+ else
81416+ pgd_populate_kernel(mm, pud, new);
81417+#endif /* __ARCH_HAS_4LEVEL_HACK */
81418+ spin_unlock(&mm->page_table_lock);
81419+ return 0;
81420+}
81421 #endif /* __PAGETABLE_PMD_FOLDED */
81422
81423-int make_pages_present(unsigned long addr, unsigned long end)
81424+ssize_t make_pages_present(unsigned long addr, unsigned long end)
81425 {
81426- int ret, len, write;
81427+ ssize_t ret, len, write;
81428 struct vm_area_struct * vma;
81429
81430 vma = find_vma(current->mm, addr);
81431@@ -3856,7 +4138,7 @@ static int __init gate_vma_init(void)
81432 gate_vma.vm_start = FIXADDR_USER_START;
81433 gate_vma.vm_end = FIXADDR_USER_END;
81434 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
81435- gate_vma.vm_page_prot = __P101;
81436+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
81437
81438 return 0;
81439 }
81440@@ -3990,8 +4272,8 @@ out:
81441 return ret;
81442 }
81443
81444-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
81445- void *buf, int len, int write)
81446+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
81447+ void *buf, size_t len, int write)
81448 {
81449 resource_size_t phys_addr;
81450 unsigned long prot = 0;
81451@@ -4016,8 +4298,8 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
81452 * Access another process' address space as given in mm. If non-NULL, use the
81453 * given task for page fault accounting.
81454 */
81455-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
81456- unsigned long addr, void *buf, int len, int write)
81457+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
81458+ unsigned long addr, void *buf, size_t len, int write)
81459 {
81460 struct vm_area_struct *vma;
81461 void *old_buf = buf;
81462@@ -4025,7 +4307,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
81463 down_read(&mm->mmap_sem);
81464 /* ignore errors, just check how much was successfully transferred */
81465 while (len) {
81466- int bytes, ret, offset;
81467+ ssize_t bytes, ret, offset;
81468 void *maddr;
81469 struct page *page = NULL;
81470
81471@@ -4084,8 +4366,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
81472 *
81473 * The caller must hold a reference on @mm.
81474 */
81475-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
81476- void *buf, int len, int write)
81477+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
81478+ void *buf, size_t len, int write)
81479 {
81480 return __access_remote_vm(NULL, mm, addr, buf, len, write);
81481 }
81482@@ -4095,11 +4377,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
81483 * Source/target buffer must be kernel space,
81484 * Do not walk the page table directly, use get_user_pages
81485 */
81486-int access_process_vm(struct task_struct *tsk, unsigned long addr,
81487- void *buf, int len, int write)
81488+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
81489+ void *buf, size_t len, int write)
81490 {
81491 struct mm_struct *mm;
81492- int ret;
81493+ ssize_t ret;
81494
81495 mm = get_task_mm(tsk);
81496 if (!mm)
81497diff --git a/mm/mempolicy.c b/mm/mempolicy.c
81498index 3df6d12..a11056a 100644
81499--- a/mm/mempolicy.c
81500+++ b/mm/mempolicy.c
81501@@ -721,6 +721,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
81502 unsigned long vmstart;
81503 unsigned long vmend;
81504
81505+#ifdef CONFIG_PAX_SEGMEXEC
81506+ struct vm_area_struct *vma_m;
81507+#endif
81508+
81509 vma = find_vma(mm, start);
81510 if (!vma || vma->vm_start > start)
81511 return -EFAULT;
81512@@ -757,9 +761,20 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
81513 if (err)
81514 goto out;
81515 }
81516+
81517 err = vma_replace_policy(vma, new_pol);
81518 if (err)
81519 goto out;
81520+
81521+#ifdef CONFIG_PAX_SEGMEXEC
81522+ vma_m = pax_find_mirror_vma(vma);
81523+ if (vma_m) {
81524+ err = vma_replace_policy(vma_m, new_pol);
81525+ if (err)
81526+ goto out;
81527+ }
81528+#endif
81529+
81530 }
81531
81532 out:
81533@@ -1216,6 +1231,17 @@ static long do_mbind(unsigned long start, unsigned long len,
81534
81535 if (end < start)
81536 return -EINVAL;
81537+
81538+#ifdef CONFIG_PAX_SEGMEXEC
81539+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
81540+ if (end > SEGMEXEC_TASK_SIZE)
81541+ return -EINVAL;
81542+ } else
81543+#endif
81544+
81545+ if (end > TASK_SIZE)
81546+ return -EINVAL;
81547+
81548 if (end == start)
81549 return 0;
81550
81551@@ -1445,8 +1471,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
81552 */
81553 tcred = __task_cred(task);
81554 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
81555- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
81556- !capable(CAP_SYS_NICE)) {
81557+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
81558 rcu_read_unlock();
81559 err = -EPERM;
81560 goto out_put;
81561@@ -1477,6 +1502,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
81562 goto out;
81563 }
81564
81565+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
81566+ if (mm != current->mm &&
81567+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
81568+ mmput(mm);
81569+ err = -EPERM;
81570+ goto out;
81571+ }
81572+#endif
81573+
81574 err = do_migrate_pages(mm, old, new,
81575 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
81576
81577diff --git a/mm/migrate.c b/mm/migrate.c
81578index 2fd8b4a..d70358f 100644
81579--- a/mm/migrate.c
81580+++ b/mm/migrate.c
81581@@ -1401,8 +1401,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
81582 */
81583 tcred = __task_cred(task);
81584 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
81585- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
81586- !capable(CAP_SYS_NICE)) {
81587+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
81588 rcu_read_unlock();
81589 err = -EPERM;
81590 goto out;
81591diff --git a/mm/mlock.c b/mm/mlock.c
81592index c9bd528..da8d069 100644
81593--- a/mm/mlock.c
81594+++ b/mm/mlock.c
81595@@ -13,6 +13,7 @@
81596 #include <linux/pagemap.h>
81597 #include <linux/mempolicy.h>
81598 #include <linux/syscalls.h>
81599+#include <linux/security.h>
81600 #include <linux/sched.h>
81601 #include <linux/export.h>
81602 #include <linux/rmap.h>
81603@@ -369,7 +370,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
81604 {
81605 unsigned long nstart, end, tmp;
81606 struct vm_area_struct * vma, * prev;
81607- int error;
81608+ int error = 0;
81609
81610 VM_BUG_ON(start & ~PAGE_MASK);
81611 VM_BUG_ON(len != PAGE_ALIGN(len));
81612@@ -378,6 +379,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
81613 return -EINVAL;
81614 if (end == start)
81615 return 0;
81616+ if (end > TASK_SIZE)
81617+ return -EINVAL;
81618+
81619 vma = find_vma(current->mm, start);
81620 if (!vma || vma->vm_start > start)
81621 return -ENOMEM;
81622@@ -389,6 +393,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
81623 for (nstart = start ; ; ) {
81624 vm_flags_t newflags;
81625
81626+#ifdef CONFIG_PAX_SEGMEXEC
81627+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
81628+ break;
81629+#endif
81630+
81631 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
81632
81633 newflags = vma->vm_flags | VM_LOCKED;
81634@@ -494,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
81635 lock_limit >>= PAGE_SHIFT;
81636
81637 /* check against resource limits */
81638+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
81639 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
81640 error = do_mlock(start, len, 1);
81641 up_write(&current->mm->mmap_sem);
81642@@ -528,6 +538,12 @@ static int do_mlockall(int flags)
81643 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
81644 vm_flags_t newflags;
81645
81646+#ifdef CONFIG_PAX_SEGMEXEC
81647+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
81648+ break;
81649+#endif
81650+
81651+ BUG_ON(vma->vm_end > TASK_SIZE);
81652 newflags = vma->vm_flags | VM_LOCKED;
81653 if (!(flags & MCL_CURRENT))
81654 newflags &= ~VM_LOCKED;
81655@@ -560,6 +576,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
81656 lock_limit >>= PAGE_SHIFT;
81657
81658 ret = -ENOMEM;
81659+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
81660 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
81661 capable(CAP_IPC_LOCK))
81662 ret = do_mlockall(flags);
81663diff --git a/mm/mmap.c b/mm/mmap.c
81664index 8832b87..04240d1 100644
81665--- a/mm/mmap.c
81666+++ b/mm/mmap.c
81667@@ -32,6 +32,7 @@
81668 #include <linux/khugepaged.h>
81669 #include <linux/uprobes.h>
81670 #include <linux/rbtree_augmented.h>
81671+#include <linux/random.h>
81672
81673 #include <asm/uaccess.h>
81674 #include <asm/cacheflush.h>
81675@@ -48,6 +49,16 @@
81676 #define arch_rebalance_pgtables(addr, len) (addr)
81677 #endif
81678
81679+static inline void verify_mm_writelocked(struct mm_struct *mm)
81680+{
81681+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
81682+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
81683+ up_read(&mm->mmap_sem);
81684+ BUG();
81685+ }
81686+#endif
81687+}
81688+
81689 static void unmap_region(struct mm_struct *mm,
81690 struct vm_area_struct *vma, struct vm_area_struct *prev,
81691 unsigned long start, unsigned long end);
81692@@ -67,22 +78,32 @@ static void unmap_region(struct mm_struct *mm,
81693 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
81694 *
81695 */
81696-pgprot_t protection_map[16] = {
81697+pgprot_t protection_map[16] __read_only = {
81698 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
81699 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
81700 };
81701
81702-pgprot_t vm_get_page_prot(unsigned long vm_flags)
81703+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
81704 {
81705- return __pgprot(pgprot_val(protection_map[vm_flags &
81706+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
81707 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
81708 pgprot_val(arch_vm_get_page_prot(vm_flags)));
81709+
81710+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
81711+ if (!(__supported_pte_mask & _PAGE_NX) &&
81712+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
81713+ (vm_flags & (VM_READ | VM_WRITE)))
81714+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
81715+#endif
81716+
81717+ return prot;
81718 }
81719 EXPORT_SYMBOL(vm_get_page_prot);
81720
81721 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
81722 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
81723 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
81724+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
81725 /*
81726 * Make sure vm_committed_as in one cacheline and not cacheline shared with
81727 * other variables. It can be updated by several CPUs frequently.
81728@@ -238,6 +259,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
81729 struct vm_area_struct *next = vma->vm_next;
81730
81731 might_sleep();
81732+ BUG_ON(vma->vm_mirror);
81733 if (vma->vm_ops && vma->vm_ops->close)
81734 vma->vm_ops->close(vma);
81735 if (vma->vm_file)
81736@@ -281,6 +303,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
81737 * not page aligned -Ram Gupta
81738 */
81739 rlim = rlimit(RLIMIT_DATA);
81740+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
81741 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
81742 (mm->end_data - mm->start_data) > rlim)
81743 goto out;
81744@@ -888,6 +911,12 @@ static int
81745 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
81746 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
81747 {
81748+
81749+#ifdef CONFIG_PAX_SEGMEXEC
81750+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
81751+ return 0;
81752+#endif
81753+
81754 if (is_mergeable_vma(vma, file, vm_flags) &&
81755 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
81756 if (vma->vm_pgoff == vm_pgoff)
81757@@ -907,6 +936,12 @@ static int
81758 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
81759 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
81760 {
81761+
81762+#ifdef CONFIG_PAX_SEGMEXEC
81763+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
81764+ return 0;
81765+#endif
81766+
81767 if (is_mergeable_vma(vma, file, vm_flags) &&
81768 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
81769 pgoff_t vm_pglen;
81770@@ -949,13 +984,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
81771 struct vm_area_struct *vma_merge(struct mm_struct *mm,
81772 struct vm_area_struct *prev, unsigned long addr,
81773 unsigned long end, unsigned long vm_flags,
81774- struct anon_vma *anon_vma, struct file *file,
81775+ struct anon_vma *anon_vma, struct file *file,
81776 pgoff_t pgoff, struct mempolicy *policy)
81777 {
81778 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
81779 struct vm_area_struct *area, *next;
81780 int err;
81781
81782+#ifdef CONFIG_PAX_SEGMEXEC
81783+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
81784+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
81785+
81786+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
81787+#endif
81788+
81789 /*
81790 * We later require that vma->vm_flags == vm_flags,
81791 * so this tests vma->vm_flags & VM_SPECIAL, too.
81792@@ -971,6 +1013,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
81793 if (next && next->vm_end == end) /* cases 6, 7, 8 */
81794 next = next->vm_next;
81795
81796+#ifdef CONFIG_PAX_SEGMEXEC
81797+ if (prev)
81798+ prev_m = pax_find_mirror_vma(prev);
81799+ if (area)
81800+ area_m = pax_find_mirror_vma(area);
81801+ if (next)
81802+ next_m = pax_find_mirror_vma(next);
81803+#endif
81804+
81805 /*
81806 * Can it merge with the predecessor?
81807 */
81808@@ -990,9 +1041,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
81809 /* cases 1, 6 */
81810 err = vma_adjust(prev, prev->vm_start,
81811 next->vm_end, prev->vm_pgoff, NULL);
81812- } else /* cases 2, 5, 7 */
81813+
81814+#ifdef CONFIG_PAX_SEGMEXEC
81815+ if (!err && prev_m)
81816+ err = vma_adjust(prev_m, prev_m->vm_start,
81817+ next_m->vm_end, prev_m->vm_pgoff, NULL);
81818+#endif
81819+
81820+ } else { /* cases 2, 5, 7 */
81821 err = vma_adjust(prev, prev->vm_start,
81822 end, prev->vm_pgoff, NULL);
81823+
81824+#ifdef CONFIG_PAX_SEGMEXEC
81825+ if (!err && prev_m)
81826+ err = vma_adjust(prev_m, prev_m->vm_start,
81827+ end_m, prev_m->vm_pgoff, NULL);
81828+#endif
81829+
81830+ }
81831 if (err)
81832 return NULL;
81833 khugepaged_enter_vma_merge(prev);
81834@@ -1006,12 +1072,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
81835 mpol_equal(policy, vma_policy(next)) &&
81836 can_vma_merge_before(next, vm_flags,
81837 anon_vma, file, pgoff+pglen)) {
81838- if (prev && addr < prev->vm_end) /* case 4 */
81839+ if (prev && addr < prev->vm_end) { /* case 4 */
81840 err = vma_adjust(prev, prev->vm_start,
81841 addr, prev->vm_pgoff, NULL);
81842- else /* cases 3, 8 */
81843+
81844+#ifdef CONFIG_PAX_SEGMEXEC
81845+ if (!err && prev_m)
81846+ err = vma_adjust(prev_m, prev_m->vm_start,
81847+ addr_m, prev_m->vm_pgoff, NULL);
81848+#endif
81849+
81850+ } else { /* cases 3, 8 */
81851 err = vma_adjust(area, addr, next->vm_end,
81852 next->vm_pgoff - pglen, NULL);
81853+
81854+#ifdef CONFIG_PAX_SEGMEXEC
81855+ if (!err && area_m)
81856+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
81857+ next_m->vm_pgoff - pglen, NULL);
81858+#endif
81859+
81860+ }
81861 if (err)
81862 return NULL;
81863 khugepaged_enter_vma_merge(area);
81864@@ -1120,8 +1201,10 @@ none:
81865 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
81866 struct file *file, long pages)
81867 {
81868- const unsigned long stack_flags
81869- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
81870+
81871+#ifdef CONFIG_PAX_RANDMMAP
81872+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
81873+#endif
81874
81875 mm->total_vm += pages;
81876
81877@@ -1129,7 +1212,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
81878 mm->shared_vm += pages;
81879 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
81880 mm->exec_vm += pages;
81881- } else if (flags & stack_flags)
81882+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
81883 mm->stack_vm += pages;
81884 }
81885 #endif /* CONFIG_PROC_FS */
81886@@ -1165,7 +1248,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
81887 * (the exception is when the underlying filesystem is noexec
81888 * mounted, in which case we dont add PROT_EXEC.)
81889 */
81890- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
81891+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
81892 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
81893 prot |= PROT_EXEC;
81894
81895@@ -1191,7 +1274,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
81896 /* Obtain the address to map to. we verify (or select) it and ensure
81897 * that it represents a valid section of the address space.
81898 */
81899- addr = get_unmapped_area(file, addr, len, pgoff, flags);
81900+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
81901 if (addr & ~PAGE_MASK)
81902 return addr;
81903
81904@@ -1202,6 +1285,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
81905 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
81906 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
81907
81908+#ifdef CONFIG_PAX_MPROTECT
81909+ if (mm->pax_flags & MF_PAX_MPROTECT) {
81910+#ifndef CONFIG_PAX_MPROTECT_COMPAT
81911+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
81912+ gr_log_rwxmmap(file);
81913+
81914+#ifdef CONFIG_PAX_EMUPLT
81915+ vm_flags &= ~VM_EXEC;
81916+#else
81917+ return -EPERM;
81918+#endif
81919+
81920+ }
81921+
81922+ if (!(vm_flags & VM_EXEC))
81923+ vm_flags &= ~VM_MAYEXEC;
81924+#else
81925+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
81926+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
81927+#endif
81928+ else
81929+ vm_flags &= ~VM_MAYWRITE;
81930+ }
81931+#endif
81932+
81933+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
81934+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
81935+ vm_flags &= ~VM_PAGEEXEC;
81936+#endif
81937+
81938 if (flags & MAP_LOCKED)
81939 if (!can_do_mlock())
81940 return -EPERM;
81941@@ -1213,6 +1326,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
81942 locked += mm->locked_vm;
81943 lock_limit = rlimit(RLIMIT_MEMLOCK);
81944 lock_limit >>= PAGE_SHIFT;
81945+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
81946 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
81947 return -EAGAIN;
81948 }
81949@@ -1279,6 +1393,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
81950 }
81951 }
81952
81953+ if (!gr_acl_handle_mmap(file, prot))
81954+ return -EACCES;
81955+
81956 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
81957 }
81958
81959@@ -1356,7 +1473,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
81960 vm_flags_t vm_flags = vma->vm_flags;
81961
81962 /* If it was private or non-writable, the write bit is already clear */
81963- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
81964+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
81965 return 0;
81966
81967 /* The backer wishes to know when pages are first written to? */
81968@@ -1405,16 +1522,30 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
81969 unsigned long charged = 0;
81970 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
81971
81972+#ifdef CONFIG_PAX_SEGMEXEC
81973+ struct vm_area_struct *vma_m = NULL;
81974+#endif
81975+
81976+ /*
81977+ * mm->mmap_sem is required to protect against another thread
81978+ * changing the mappings in case we sleep.
81979+ */
81980+ verify_mm_writelocked(mm);
81981+
81982 /* Clear old maps */
81983 error = -ENOMEM;
81984-munmap_back:
81985 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
81986 if (do_munmap(mm, addr, len))
81987 return -ENOMEM;
81988- goto munmap_back;
81989+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
81990 }
81991
81992 /* Check against address space limit. */
81993+
81994+#ifdef CONFIG_PAX_RANDMMAP
81995+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
81996+#endif
81997+
81998 if (!may_expand_vm(mm, len >> PAGE_SHIFT))
81999 return -ENOMEM;
82000
82001@@ -1460,6 +1591,16 @@ munmap_back:
82002 goto unacct_error;
82003 }
82004
82005+#ifdef CONFIG_PAX_SEGMEXEC
82006+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
82007+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
82008+ if (!vma_m) {
82009+ error = -ENOMEM;
82010+ goto free_vma;
82011+ }
82012+ }
82013+#endif
82014+
82015 vma->vm_mm = mm;
82016 vma->vm_start = addr;
82017 vma->vm_end = addr + len;
82018@@ -1484,6 +1625,13 @@ munmap_back:
82019 if (error)
82020 goto unmap_and_free_vma;
82021
82022+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
82023+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
82024+ vma->vm_flags |= VM_PAGEEXEC;
82025+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
82026+ }
82027+#endif
82028+
82029 /* Can addr have changed??
82030 *
82031 * Answer: Yes, several device drivers can do it in their
82032@@ -1522,6 +1670,11 @@ munmap_back:
82033 vma_link(mm, vma, prev, rb_link, rb_parent);
82034 file = vma->vm_file;
82035
82036+#ifdef CONFIG_PAX_SEGMEXEC
82037+ if (vma_m)
82038+ BUG_ON(pax_mirror_vma(vma_m, vma));
82039+#endif
82040+
82041 /* Once vma denies write, undo our temporary denial count */
82042 if (correct_wcount)
82043 atomic_inc(&inode->i_writecount);
82044@@ -1529,6 +1682,7 @@ out:
82045 perf_event_mmap(vma);
82046
82047 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
82048+ track_exec_limit(mm, addr, addr + len, vm_flags);
82049 if (vm_flags & VM_LOCKED) {
82050 if (!mlock_vma_pages_range(vma, addr, addr + len))
82051 mm->locked_vm += (len >> PAGE_SHIFT);
82052@@ -1550,6 +1704,12 @@ unmap_and_free_vma:
82053 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
82054 charged = 0;
82055 free_vma:
82056+
82057+#ifdef CONFIG_PAX_SEGMEXEC
82058+ if (vma_m)
82059+ kmem_cache_free(vm_area_cachep, vma_m);
82060+#endif
82061+
82062 kmem_cache_free(vm_area_cachep, vma);
82063 unacct_error:
82064 if (charged)
82065@@ -1557,6 +1717,62 @@ unacct_error:
82066 return error;
82067 }
82068
82069+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
82070+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
82071+{
82072+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
82073+ return (random32() & 0xFF) << PAGE_SHIFT;
82074+
82075+ return 0;
82076+}
82077+#endif
82078+
82079+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
82080+{
82081+ if (!vma) {
82082+#ifdef CONFIG_STACK_GROWSUP
82083+ if (addr > sysctl_heap_stack_gap)
82084+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
82085+ else
82086+ vma = find_vma(current->mm, 0);
82087+ if (vma && (vma->vm_flags & VM_GROWSUP))
82088+ return false;
82089+#endif
82090+ return true;
82091+ }
82092+
82093+ if (addr + len > vma->vm_start)
82094+ return false;
82095+
82096+ if (vma->vm_flags & VM_GROWSDOWN)
82097+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
82098+#ifdef CONFIG_STACK_GROWSUP
82099+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
82100+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
82101+#endif
82102+ else if (offset)
82103+ return offset <= vma->vm_start - addr - len;
82104+
82105+ return true;
82106+}
82107+
82108+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
82109+{
82110+ if (vma->vm_start < len)
82111+ return -ENOMEM;
82112+
82113+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
82114+ if (offset <= vma->vm_start - len)
82115+ return vma->vm_start - len - offset;
82116+ else
82117+ return -ENOMEM;
82118+ }
82119+
82120+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
82121+ return vma->vm_start - len - sysctl_heap_stack_gap;
82122+ return -ENOMEM;
82123+}
82124+
82125 unsigned long unmapped_area(struct vm_unmapped_area_info *info)
82126 {
82127 /*
82128@@ -1776,6 +1992,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
82129 struct mm_struct *mm = current->mm;
82130 struct vm_area_struct *vma;
82131 struct vm_unmapped_area_info info;
82132+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
82133
82134 if (len > TASK_SIZE)
82135 return -ENOMEM;
82136@@ -1783,17 +2000,26 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
82137 if (flags & MAP_FIXED)
82138 return addr;
82139
82140+#ifdef CONFIG_PAX_RANDMMAP
82141+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
82142+#endif
82143+
82144 if (addr) {
82145 addr = PAGE_ALIGN(addr);
82146 vma = find_vma(mm, addr);
82147- if (TASK_SIZE - len >= addr &&
82148- (!vma || addr + len <= vma->vm_start))
82149+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
82150 return addr;
82151 }
82152
82153 info.flags = 0;
82154 info.length = len;
82155 info.low_limit = TASK_UNMAPPED_BASE;
82156+
82157+#ifdef CONFIG_PAX_RANDMMAP
82158+ if (mm->pax_flags & MF_PAX_RANDMMAP)
82159+ info.low_limit += mm->delta_mmap;
82160+#endif
82161+
82162 info.high_limit = TASK_SIZE;
82163 info.align_mask = 0;
82164 return vm_unmapped_area(&info);
82165@@ -1802,10 +2028,16 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
82166
82167 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
82168 {
82169+
82170+#ifdef CONFIG_PAX_SEGMEXEC
82171+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
82172+ return;
82173+#endif
82174+
82175 /*
82176 * Is this a new hole at the lowest possible address?
82177 */
82178- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
82179+ if (addr >= mm->mmap_base && addr < mm->free_area_cache)
82180 mm->free_area_cache = addr;
82181 }
82182
82183@@ -1823,6 +2055,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
82184 struct mm_struct *mm = current->mm;
82185 unsigned long addr = addr0;
82186 struct vm_unmapped_area_info info;
82187+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
82188
82189 /* requested length too big for entire address space */
82190 if (len > TASK_SIZE)
82191@@ -1831,12 +2064,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
82192 if (flags & MAP_FIXED)
82193 return addr;
82194
82195+#ifdef CONFIG_PAX_RANDMMAP
82196+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
82197+#endif
82198+
82199 /* requesting a specific address */
82200 if (addr) {
82201 addr = PAGE_ALIGN(addr);
82202 vma = find_vma(mm, addr);
82203- if (TASK_SIZE - len >= addr &&
82204- (!vma || addr + len <= vma->vm_start))
82205+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
82206 return addr;
82207 }
82208
82209@@ -1857,6 +2093,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
82210 VM_BUG_ON(addr != -ENOMEM);
82211 info.flags = 0;
82212 info.low_limit = TASK_UNMAPPED_BASE;
82213+
82214+#ifdef CONFIG_PAX_RANDMMAP
82215+ if (mm->pax_flags & MF_PAX_RANDMMAP)
82216+ info.low_limit += mm->delta_mmap;
82217+#endif
82218+
82219 info.high_limit = TASK_SIZE;
82220 addr = vm_unmapped_area(&info);
82221 }
82222@@ -1867,6 +2109,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
82223
82224 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
82225 {
82226+
82227+#ifdef CONFIG_PAX_SEGMEXEC
82228+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
82229+ return;
82230+#endif
82231+
82232 /*
82233 * Is this a new hole at the highest possible address?
82234 */
82235@@ -1874,8 +2122,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
82236 mm->free_area_cache = addr;
82237
82238 /* dont allow allocations above current base */
82239- if (mm->free_area_cache > mm->mmap_base)
82240+ if (mm->free_area_cache > mm->mmap_base) {
82241 mm->free_area_cache = mm->mmap_base;
82242+ mm->cached_hole_size = ~0UL;
82243+ }
82244 }
82245
82246 unsigned long
82247@@ -1922,7 +2172,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
82248
82249 /* Check the cache first. */
82250 /* (Cache hit rate is typically around 35%.) */
82251- vma = mm->mmap_cache;
82252+ vma = ACCESS_ONCE(mm->mmap_cache);
82253 if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
82254 struct rb_node *rb_node;
82255
82256@@ -1974,6 +2224,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
82257 return vma;
82258 }
82259
82260+#ifdef CONFIG_PAX_SEGMEXEC
82261+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
82262+{
82263+ struct vm_area_struct *vma_m;
82264+
82265+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
82266+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
82267+ BUG_ON(vma->vm_mirror);
82268+ return NULL;
82269+ }
82270+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
82271+ vma_m = vma->vm_mirror;
82272+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
82273+ BUG_ON(vma->vm_file != vma_m->vm_file);
82274+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
82275+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
82276+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
82277+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
82278+ return vma_m;
82279+}
82280+#endif
82281+
82282 /*
82283 * Verify that the stack growth is acceptable and
82284 * update accounting. This is shared with both the
82285@@ -1990,6 +2262,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
82286 return -ENOMEM;
82287
82288 /* Stack limit test */
82289+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
82290 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
82291 return -ENOMEM;
82292
82293@@ -2000,6 +2273,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
82294 locked = mm->locked_vm + grow;
82295 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
82296 limit >>= PAGE_SHIFT;
82297+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
82298 if (locked > limit && !capable(CAP_IPC_LOCK))
82299 return -ENOMEM;
82300 }
82301@@ -2029,37 +2303,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
82302 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
82303 * vma is the last one with address > vma->vm_end. Have to extend vma.
82304 */
82305+#ifndef CONFIG_IA64
82306+static
82307+#endif
82308 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
82309 {
82310 int error;
82311+ bool locknext;
82312
82313 if (!(vma->vm_flags & VM_GROWSUP))
82314 return -EFAULT;
82315
82316+ /* Also guard against wrapping around to address 0. */
82317+ if (address < PAGE_ALIGN(address+1))
82318+ address = PAGE_ALIGN(address+1);
82319+ else
82320+ return -ENOMEM;
82321+
82322 /*
82323 * We must make sure the anon_vma is allocated
82324 * so that the anon_vma locking is not a noop.
82325 */
82326 if (unlikely(anon_vma_prepare(vma)))
82327 return -ENOMEM;
82328+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
82329+ if (locknext && anon_vma_prepare(vma->vm_next))
82330+ return -ENOMEM;
82331 vma_lock_anon_vma(vma);
82332+ if (locknext)
82333+ vma_lock_anon_vma(vma->vm_next);
82334
82335 /*
82336 * vma->vm_start/vm_end cannot change under us because the caller
82337 * is required to hold the mmap_sem in read mode. We need the
82338- * anon_vma lock to serialize against concurrent expand_stacks.
82339- * Also guard against wrapping around to address 0.
82340+ * anon_vma locks to serialize against concurrent expand_stacks
82341+ * and expand_upwards.
82342 */
82343- if (address < PAGE_ALIGN(address+4))
82344- address = PAGE_ALIGN(address+4);
82345- else {
82346- vma_unlock_anon_vma(vma);
82347- return -ENOMEM;
82348- }
82349 error = 0;
82350
82351 /* Somebody else might have raced and expanded it already */
82352- if (address > vma->vm_end) {
82353+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
82354+ error = -ENOMEM;
82355+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
82356 unsigned long size, grow;
82357
82358 size = address - vma->vm_start;
82359@@ -2094,6 +2379,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
82360 }
82361 }
82362 }
82363+ if (locknext)
82364+ vma_unlock_anon_vma(vma->vm_next);
82365 vma_unlock_anon_vma(vma);
82366 khugepaged_enter_vma_merge(vma);
82367 validate_mm(vma->vm_mm);
82368@@ -2108,6 +2395,8 @@ int expand_downwards(struct vm_area_struct *vma,
82369 unsigned long address)
82370 {
82371 int error;
82372+ bool lockprev = false;
82373+ struct vm_area_struct *prev;
82374
82375 /*
82376 * We must make sure the anon_vma is allocated
82377@@ -2121,6 +2410,15 @@ int expand_downwards(struct vm_area_struct *vma,
82378 if (error)
82379 return error;
82380
82381+ prev = vma->vm_prev;
82382+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
82383+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
82384+#endif
82385+ if (lockprev && anon_vma_prepare(prev))
82386+ return -ENOMEM;
82387+ if (lockprev)
82388+ vma_lock_anon_vma(prev);
82389+
82390 vma_lock_anon_vma(vma);
82391
82392 /*
82393@@ -2130,9 +2428,17 @@ int expand_downwards(struct vm_area_struct *vma,
82394 */
82395
82396 /* Somebody else might have raced and expanded it already */
82397- if (address < vma->vm_start) {
82398+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
82399+ error = -ENOMEM;
82400+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
82401 unsigned long size, grow;
82402
82403+#ifdef CONFIG_PAX_SEGMEXEC
82404+ struct vm_area_struct *vma_m;
82405+
82406+ vma_m = pax_find_mirror_vma(vma);
82407+#endif
82408+
82409 size = vma->vm_end - address;
82410 grow = (vma->vm_start - address) >> PAGE_SHIFT;
82411
82412@@ -2157,6 +2463,18 @@ int expand_downwards(struct vm_area_struct *vma,
82413 vma->vm_pgoff -= grow;
82414 anon_vma_interval_tree_post_update_vma(vma);
82415 vma_gap_update(vma);
82416+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
82417+
82418+#ifdef CONFIG_PAX_SEGMEXEC
82419+ if (vma_m) {
82420+ anon_vma_interval_tree_pre_update_vma(vma_m);
82421+ vma_m->vm_start -= grow << PAGE_SHIFT;
82422+ vma_m->vm_pgoff -= grow;
82423+ anon_vma_interval_tree_post_update_vma(vma_m);
82424+ vma_gap_update(vma_m);
82425+ }
82426+#endif
82427+
82428 spin_unlock(&vma->vm_mm->page_table_lock);
82429
82430 perf_event_mmap(vma);
82431@@ -2263,6 +2581,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
82432 do {
82433 long nrpages = vma_pages(vma);
82434
82435+#ifdef CONFIG_PAX_SEGMEXEC
82436+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
82437+ vma = remove_vma(vma);
82438+ continue;
82439+ }
82440+#endif
82441+
82442 if (vma->vm_flags & VM_ACCOUNT)
82443 nr_accounted += nrpages;
82444 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
82445@@ -2308,6 +2633,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
82446 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
82447 vma->vm_prev = NULL;
82448 do {
82449+
82450+#ifdef CONFIG_PAX_SEGMEXEC
82451+ if (vma->vm_mirror) {
82452+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
82453+ vma->vm_mirror->vm_mirror = NULL;
82454+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
82455+ vma->vm_mirror = NULL;
82456+ }
82457+#endif
82458+
82459 vma_rb_erase(vma, &mm->mm_rb);
82460 mm->map_count--;
82461 tail_vma = vma;
82462@@ -2339,14 +2674,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
82463 struct vm_area_struct *new;
82464 int err = -ENOMEM;
82465
82466+#ifdef CONFIG_PAX_SEGMEXEC
82467+ struct vm_area_struct *vma_m, *new_m = NULL;
82468+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
82469+#endif
82470+
82471 if (is_vm_hugetlb_page(vma) && (addr &
82472 ~(huge_page_mask(hstate_vma(vma)))))
82473 return -EINVAL;
82474
82475+#ifdef CONFIG_PAX_SEGMEXEC
82476+ vma_m = pax_find_mirror_vma(vma);
82477+#endif
82478+
82479 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
82480 if (!new)
82481 goto out_err;
82482
82483+#ifdef CONFIG_PAX_SEGMEXEC
82484+ if (vma_m) {
82485+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
82486+ if (!new_m) {
82487+ kmem_cache_free(vm_area_cachep, new);
82488+ goto out_err;
82489+ }
82490+ }
82491+#endif
82492+
82493 /* most fields are the same, copy all, and then fixup */
82494 *new = *vma;
82495
82496@@ -2359,6 +2713,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
82497 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
82498 }
82499
82500+#ifdef CONFIG_PAX_SEGMEXEC
82501+ if (vma_m) {
82502+ *new_m = *vma_m;
82503+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
82504+ new_m->vm_mirror = new;
82505+ new->vm_mirror = new_m;
82506+
82507+ if (new_below)
82508+ new_m->vm_end = addr_m;
82509+ else {
82510+ new_m->vm_start = addr_m;
82511+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
82512+ }
82513+ }
82514+#endif
82515+
82516 pol = mpol_dup(vma_policy(vma));
82517 if (IS_ERR(pol)) {
82518 err = PTR_ERR(pol);
82519@@ -2381,6 +2751,36 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
82520 else
82521 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
82522
82523+#ifdef CONFIG_PAX_SEGMEXEC
82524+ if (!err && vma_m) {
82525+ if (anon_vma_clone(new_m, vma_m))
82526+ goto out_free_mpol;
82527+
82528+ mpol_get(pol);
82529+ vma_set_policy(new_m, pol);
82530+
82531+ if (new_m->vm_file)
82532+ get_file(new_m->vm_file);
82533+
82534+ if (new_m->vm_ops && new_m->vm_ops->open)
82535+ new_m->vm_ops->open(new_m);
82536+
82537+ if (new_below)
82538+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
82539+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
82540+ else
82541+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
82542+
82543+ if (err) {
82544+ if (new_m->vm_ops && new_m->vm_ops->close)
82545+ new_m->vm_ops->close(new_m);
82546+ if (new_m->vm_file)
82547+ fput(new_m->vm_file);
82548+ mpol_put(pol);
82549+ }
82550+ }
82551+#endif
82552+
82553 /* Success. */
82554 if (!err)
82555 return 0;
82556@@ -2390,10 +2790,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
82557 new->vm_ops->close(new);
82558 if (new->vm_file)
82559 fput(new->vm_file);
82560- unlink_anon_vmas(new);
82561 out_free_mpol:
82562 mpol_put(pol);
82563 out_free_vma:
82564+
82565+#ifdef CONFIG_PAX_SEGMEXEC
82566+ if (new_m) {
82567+ unlink_anon_vmas(new_m);
82568+ kmem_cache_free(vm_area_cachep, new_m);
82569+ }
82570+#endif
82571+
82572+ unlink_anon_vmas(new);
82573 kmem_cache_free(vm_area_cachep, new);
82574 out_err:
82575 return err;
82576@@ -2406,6 +2814,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
82577 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
82578 unsigned long addr, int new_below)
82579 {
82580+
82581+#ifdef CONFIG_PAX_SEGMEXEC
82582+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
82583+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
82584+ if (mm->map_count >= sysctl_max_map_count-1)
82585+ return -ENOMEM;
82586+ } else
82587+#endif
82588+
82589 if (mm->map_count >= sysctl_max_map_count)
82590 return -ENOMEM;
82591
82592@@ -2417,11 +2834,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
82593 * work. This now handles partial unmappings.
82594 * Jeremy Fitzhardinge <jeremy@goop.org>
82595 */
82596+#ifdef CONFIG_PAX_SEGMEXEC
82597 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
82598 {
82599+ int ret = __do_munmap(mm, start, len);
82600+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
82601+ return ret;
82602+
82603+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
82604+}
82605+
82606+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
82607+#else
82608+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
82609+#endif
82610+{
82611 unsigned long end;
82612 struct vm_area_struct *vma, *prev, *last;
82613
82614+ /*
82615+ * mm->mmap_sem is required to protect against another thread
82616+ * changing the mappings in case we sleep.
82617+ */
82618+ verify_mm_writelocked(mm);
82619+
82620 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
82621 return -EINVAL;
82622
82623@@ -2496,6 +2932,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
82624 /* Fix up all other VM information */
82625 remove_vma_list(mm, vma);
82626
82627+ track_exec_limit(mm, start, end, 0UL);
82628+
82629 return 0;
82630 }
82631
82632@@ -2504,6 +2942,13 @@ int vm_munmap(unsigned long start, size_t len)
82633 int ret;
82634 struct mm_struct *mm = current->mm;
82635
82636+
82637+#ifdef CONFIG_PAX_SEGMEXEC
82638+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
82639+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
82640+ return -EINVAL;
82641+#endif
82642+
82643 down_write(&mm->mmap_sem);
82644 ret = do_munmap(mm, start, len);
82645 up_write(&mm->mmap_sem);
82646@@ -2517,16 +2962,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
82647 return vm_munmap(addr, len);
82648 }
82649
82650-static inline void verify_mm_writelocked(struct mm_struct *mm)
82651-{
82652-#ifdef CONFIG_DEBUG_VM
82653- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
82654- WARN_ON(1);
82655- up_read(&mm->mmap_sem);
82656- }
82657-#endif
82658-}
82659-
82660 /*
82661 * this is really a simplified "do_mmap". it only handles
82662 * anonymous maps. eventually we may be able to do some
82663@@ -2540,6 +2975,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
82664 struct rb_node ** rb_link, * rb_parent;
82665 pgoff_t pgoff = addr >> PAGE_SHIFT;
82666 int error;
82667+ unsigned long charged;
82668
82669 len = PAGE_ALIGN(len);
82670 if (!len)
82671@@ -2547,16 +2983,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
82672
82673 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
82674
82675+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
82676+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
82677+ flags &= ~VM_EXEC;
82678+
82679+#ifdef CONFIG_PAX_MPROTECT
82680+ if (mm->pax_flags & MF_PAX_MPROTECT)
82681+ flags &= ~VM_MAYEXEC;
82682+#endif
82683+
82684+ }
82685+#endif
82686+
82687 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
82688 if (error & ~PAGE_MASK)
82689 return error;
82690
82691+ charged = len >> PAGE_SHIFT;
82692+
82693 /*
82694 * mlock MCL_FUTURE?
82695 */
82696 if (mm->def_flags & VM_LOCKED) {
82697 unsigned long locked, lock_limit;
82698- locked = len >> PAGE_SHIFT;
82699+ locked = charged;
82700 locked += mm->locked_vm;
82701 lock_limit = rlimit(RLIMIT_MEMLOCK);
82702 lock_limit >>= PAGE_SHIFT;
82703@@ -2573,21 +3023,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
82704 /*
82705 * Clear old maps. this also does some error checking for us
82706 */
82707- munmap_back:
82708 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
82709 if (do_munmap(mm, addr, len))
82710 return -ENOMEM;
82711- goto munmap_back;
82712+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
82713 }
82714
82715 /* Check against address space limits *after* clearing old maps... */
82716- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
82717+ if (!may_expand_vm(mm, charged))
82718 return -ENOMEM;
82719
82720 if (mm->map_count > sysctl_max_map_count)
82721 return -ENOMEM;
82722
82723- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
82724+ if (security_vm_enough_memory_mm(mm, charged))
82725 return -ENOMEM;
82726
82727 /* Can we just expand an old private anonymous mapping? */
82728@@ -2601,7 +3050,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
82729 */
82730 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
82731 if (!vma) {
82732- vm_unacct_memory(len >> PAGE_SHIFT);
82733+ vm_unacct_memory(charged);
82734 return -ENOMEM;
82735 }
82736
82737@@ -2615,11 +3064,12 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
82738 vma_link(mm, vma, prev, rb_link, rb_parent);
82739 out:
82740 perf_event_mmap(vma);
82741- mm->total_vm += len >> PAGE_SHIFT;
82742+ mm->total_vm += charged;
82743 if (flags & VM_LOCKED) {
82744 if (!mlock_vma_pages_range(vma, addr, addr + len))
82745- mm->locked_vm += (len >> PAGE_SHIFT);
82746+ mm->locked_vm += charged;
82747 }
82748+ track_exec_limit(mm, addr, addr + len, flags);
82749 return addr;
82750 }
82751
82752@@ -2677,6 +3127,7 @@ void exit_mmap(struct mm_struct *mm)
82753 while (vma) {
82754 if (vma->vm_flags & VM_ACCOUNT)
82755 nr_accounted += vma_pages(vma);
82756+ vma->vm_mirror = NULL;
82757 vma = remove_vma(vma);
82758 }
82759 vm_unacct_memory(nr_accounted);
82760@@ -2693,6 +3144,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
82761 struct vm_area_struct *prev;
82762 struct rb_node **rb_link, *rb_parent;
82763
82764+#ifdef CONFIG_PAX_SEGMEXEC
82765+ struct vm_area_struct *vma_m = NULL;
82766+#endif
82767+
82768+ if (security_mmap_addr(vma->vm_start))
82769+ return -EPERM;
82770+
82771 /*
82772 * The vm_pgoff of a purely anonymous vma should be irrelevant
82773 * until its first write fault, when page's anon_vma and index
82774@@ -2716,7 +3174,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
82775 security_vm_enough_memory_mm(mm, vma_pages(vma)))
82776 return -ENOMEM;
82777
82778+#ifdef CONFIG_PAX_SEGMEXEC
82779+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
82780+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
82781+ if (!vma_m)
82782+ return -ENOMEM;
82783+ }
82784+#endif
82785+
82786 vma_link(mm, vma, prev, rb_link, rb_parent);
82787+
82788+#ifdef CONFIG_PAX_SEGMEXEC
82789+ if (vma_m)
82790+ BUG_ON(pax_mirror_vma(vma_m, vma));
82791+#endif
82792+
82793 return 0;
82794 }
82795
82796@@ -2736,6 +3208,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
82797 struct mempolicy *pol;
82798 bool faulted_in_anon_vma = true;
82799
82800+ BUG_ON(vma->vm_mirror);
82801+
82802 /*
82803 * If anonymous vma has not yet been faulted, update new pgoff
82804 * to match new location, to increase its chance of merging.
82805@@ -2802,6 +3276,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
82806 return NULL;
82807 }
82808
82809+#ifdef CONFIG_PAX_SEGMEXEC
82810+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
82811+{
82812+ struct vm_area_struct *prev_m;
82813+ struct rb_node **rb_link_m, *rb_parent_m;
82814+ struct mempolicy *pol_m;
82815+
82816+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
82817+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
82818+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
82819+ *vma_m = *vma;
82820+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
82821+ if (anon_vma_clone(vma_m, vma))
82822+ return -ENOMEM;
82823+ pol_m = vma_policy(vma_m);
82824+ mpol_get(pol_m);
82825+ vma_set_policy(vma_m, pol_m);
82826+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
82827+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
82828+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
82829+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
82830+ if (vma_m->vm_file)
82831+ get_file(vma_m->vm_file);
82832+ if (vma_m->vm_ops && vma_m->vm_ops->open)
82833+ vma_m->vm_ops->open(vma_m);
82834+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
82835+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
82836+ vma_m->vm_mirror = vma;
82837+ vma->vm_mirror = vma_m;
82838+ return 0;
82839+}
82840+#endif
82841+
82842 /*
82843 * Return true if the calling process may expand its vm space by the passed
82844 * number of pages
82845@@ -2813,6 +3320,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
82846
82847 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
82848
82849+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
82850 if (cur + npages > lim)
82851 return 0;
82852 return 1;
82853@@ -2883,6 +3391,22 @@ int install_special_mapping(struct mm_struct *mm,
82854 vma->vm_start = addr;
82855 vma->vm_end = addr + len;
82856
82857+#ifdef CONFIG_PAX_MPROTECT
82858+ if (mm->pax_flags & MF_PAX_MPROTECT) {
82859+#ifndef CONFIG_PAX_MPROTECT_COMPAT
82860+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
82861+ return -EPERM;
82862+ if (!(vm_flags & VM_EXEC))
82863+ vm_flags &= ~VM_MAYEXEC;
82864+#else
82865+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
82866+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
82867+#endif
82868+ else
82869+ vm_flags &= ~VM_MAYWRITE;
82870+ }
82871+#endif
82872+
82873 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
82874 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
82875
82876diff --git a/mm/mprotect.c b/mm/mprotect.c
82877index 94722a4..9837984 100644
82878--- a/mm/mprotect.c
82879+++ b/mm/mprotect.c
82880@@ -23,10 +23,17 @@
82881 #include <linux/mmu_notifier.h>
82882 #include <linux/migrate.h>
82883 #include <linux/perf_event.h>
82884+
82885+#ifdef CONFIG_PAX_MPROTECT
82886+#include <linux/elf.h>
82887+#include <linux/binfmts.h>
82888+#endif
82889+
82890 #include <asm/uaccess.h>
82891 #include <asm/pgtable.h>
82892 #include <asm/cacheflush.h>
82893 #include <asm/tlbflush.h>
82894+#include <asm/mmu_context.h>
82895
82896 #ifndef pgprot_modify
82897 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
82898@@ -233,6 +240,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
82899 return pages;
82900 }
82901
82902+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
82903+/* called while holding the mmap semaphor for writing except stack expansion */
82904+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
82905+{
82906+ unsigned long oldlimit, newlimit = 0UL;
82907+
82908+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
82909+ return;
82910+
82911+ spin_lock(&mm->page_table_lock);
82912+ oldlimit = mm->context.user_cs_limit;
82913+ if ((prot & VM_EXEC) && oldlimit < end)
82914+ /* USER_CS limit moved up */
82915+ newlimit = end;
82916+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
82917+ /* USER_CS limit moved down */
82918+ newlimit = start;
82919+
82920+ if (newlimit) {
82921+ mm->context.user_cs_limit = newlimit;
82922+
82923+#ifdef CONFIG_SMP
82924+ wmb();
82925+ cpus_clear(mm->context.cpu_user_cs_mask);
82926+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
82927+#endif
82928+
82929+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
82930+ }
82931+ spin_unlock(&mm->page_table_lock);
82932+ if (newlimit == end) {
82933+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
82934+
82935+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
82936+ if (is_vm_hugetlb_page(vma))
82937+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
82938+ else
82939+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
82940+ }
82941+}
82942+#endif
82943+
82944 int
82945 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
82946 unsigned long start, unsigned long end, unsigned long newflags)
82947@@ -245,11 +294,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
82948 int error;
82949 int dirty_accountable = 0;
82950
82951+#ifdef CONFIG_PAX_SEGMEXEC
82952+ struct vm_area_struct *vma_m = NULL;
82953+ unsigned long start_m, end_m;
82954+
82955+ start_m = start + SEGMEXEC_TASK_SIZE;
82956+ end_m = end + SEGMEXEC_TASK_SIZE;
82957+#endif
82958+
82959 if (newflags == oldflags) {
82960 *pprev = vma;
82961 return 0;
82962 }
82963
82964+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
82965+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
82966+
82967+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
82968+ return -ENOMEM;
82969+
82970+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
82971+ return -ENOMEM;
82972+ }
82973+
82974 /*
82975 * If we make a private mapping writable we increase our commit;
82976 * but (without finer accounting) cannot reduce our commit if we
82977@@ -266,6 +333,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
82978 }
82979 }
82980
82981+#ifdef CONFIG_PAX_SEGMEXEC
82982+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
82983+ if (start != vma->vm_start) {
82984+ error = split_vma(mm, vma, start, 1);
82985+ if (error)
82986+ goto fail;
82987+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
82988+ *pprev = (*pprev)->vm_next;
82989+ }
82990+
82991+ if (end != vma->vm_end) {
82992+ error = split_vma(mm, vma, end, 0);
82993+ if (error)
82994+ goto fail;
82995+ }
82996+
82997+ if (pax_find_mirror_vma(vma)) {
82998+ error = __do_munmap(mm, start_m, end_m - start_m);
82999+ if (error)
83000+ goto fail;
83001+ } else {
83002+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
83003+ if (!vma_m) {
83004+ error = -ENOMEM;
83005+ goto fail;
83006+ }
83007+ vma->vm_flags = newflags;
83008+ error = pax_mirror_vma(vma_m, vma);
83009+ if (error) {
83010+ vma->vm_flags = oldflags;
83011+ goto fail;
83012+ }
83013+ }
83014+ }
83015+#endif
83016+
83017 /*
83018 * First try to merge with previous and/or next vma.
83019 */
83020@@ -296,9 +399,21 @@ success:
83021 * vm_flags and vm_page_prot are protected by the mmap_sem
83022 * held in write mode.
83023 */
83024+
83025+#ifdef CONFIG_PAX_SEGMEXEC
83026+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
83027+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
83028+#endif
83029+
83030 vma->vm_flags = newflags;
83031+
83032+#ifdef CONFIG_PAX_MPROTECT
83033+ if (mm->binfmt && mm->binfmt->handle_mprotect)
83034+ mm->binfmt->handle_mprotect(vma, newflags);
83035+#endif
83036+
83037 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
83038- vm_get_page_prot(newflags));
83039+ vm_get_page_prot(vma->vm_flags));
83040
83041 if (vma_wants_writenotify(vma)) {
83042 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
83043@@ -337,6 +452,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
83044 end = start + len;
83045 if (end <= start)
83046 return -ENOMEM;
83047+
83048+#ifdef CONFIG_PAX_SEGMEXEC
83049+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
83050+ if (end > SEGMEXEC_TASK_SIZE)
83051+ return -EINVAL;
83052+ } else
83053+#endif
83054+
83055+ if (end > TASK_SIZE)
83056+ return -EINVAL;
83057+
83058 if (!arch_validate_prot(prot))
83059 return -EINVAL;
83060
83061@@ -344,7 +470,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
83062 /*
83063 * Does the application expect PROT_READ to imply PROT_EXEC:
83064 */
83065- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
83066+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
83067 prot |= PROT_EXEC;
83068
83069 vm_flags = calc_vm_prot_bits(prot);
83070@@ -376,6 +502,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
83071 if (start > vma->vm_start)
83072 prev = vma;
83073
83074+#ifdef CONFIG_PAX_MPROTECT
83075+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
83076+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
83077+#endif
83078+
83079 for (nstart = start ; ; ) {
83080 unsigned long newflags;
83081
83082@@ -386,6 +517,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
83083
83084 /* newflags >> 4 shift VM_MAY% in place of VM_% */
83085 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
83086+ if (prot & (PROT_WRITE | PROT_EXEC))
83087+ gr_log_rwxmprotect(vma->vm_file);
83088+
83089+ error = -EACCES;
83090+ goto out;
83091+ }
83092+
83093+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
83094 error = -EACCES;
83095 goto out;
83096 }
83097@@ -400,6 +539,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
83098 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
83099 if (error)
83100 goto out;
83101+
83102+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
83103+
83104 nstart = tmp;
83105
83106 if (nstart < prev->vm_end)
83107diff --git a/mm/mremap.c b/mm/mremap.c
83108index e1031e1..1f2a0a1 100644
83109--- a/mm/mremap.c
83110+++ b/mm/mremap.c
83111@@ -125,6 +125,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
83112 continue;
83113 pte = ptep_get_and_clear(mm, old_addr, old_pte);
83114 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
83115+
83116+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
83117+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
83118+ pte = pte_exprotect(pte);
83119+#endif
83120+
83121 set_pte_at(mm, new_addr, new_pte, pte);
83122 }
83123
83124@@ -319,6 +325,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
83125 if (is_vm_hugetlb_page(vma))
83126 goto Einval;
83127
83128+#ifdef CONFIG_PAX_SEGMEXEC
83129+ if (pax_find_mirror_vma(vma))
83130+ goto Einval;
83131+#endif
83132+
83133 /* We can't remap across vm area boundaries */
83134 if (old_len > vma->vm_end - addr)
83135 goto Efault;
83136@@ -375,20 +386,25 @@ static unsigned long mremap_to(unsigned long addr,
83137 unsigned long ret = -EINVAL;
83138 unsigned long charged = 0;
83139 unsigned long map_flags;
83140+ unsigned long pax_task_size = TASK_SIZE;
83141
83142 if (new_addr & ~PAGE_MASK)
83143 goto out;
83144
83145- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
83146+#ifdef CONFIG_PAX_SEGMEXEC
83147+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
83148+ pax_task_size = SEGMEXEC_TASK_SIZE;
83149+#endif
83150+
83151+ pax_task_size -= PAGE_SIZE;
83152+
83153+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
83154 goto out;
83155
83156 /* Check if the location we're moving into overlaps the
83157 * old location at all, and fail if it does.
83158 */
83159- if ((new_addr <= addr) && (new_addr+new_len) > addr)
83160- goto out;
83161-
83162- if ((addr <= new_addr) && (addr+old_len) > new_addr)
83163+ if (addr + old_len > new_addr && new_addr + new_len > addr)
83164 goto out;
83165
83166 ret = do_munmap(mm, new_addr, new_len);
83167@@ -456,6 +472,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
83168 struct vm_area_struct *vma;
83169 unsigned long ret = -EINVAL;
83170 unsigned long charged = 0;
83171+ unsigned long pax_task_size = TASK_SIZE;
83172
83173 down_write(&current->mm->mmap_sem);
83174
83175@@ -476,6 +493,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
83176 if (!new_len)
83177 goto out;
83178
83179+#ifdef CONFIG_PAX_SEGMEXEC
83180+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
83181+ pax_task_size = SEGMEXEC_TASK_SIZE;
83182+#endif
83183+
83184+ pax_task_size -= PAGE_SIZE;
83185+
83186+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
83187+ old_len > pax_task_size || addr > pax_task_size-old_len)
83188+ goto out;
83189+
83190 if (flags & MREMAP_FIXED) {
83191 if (flags & MREMAP_MAYMOVE)
83192 ret = mremap_to(addr, old_len, new_addr, new_len);
83193@@ -524,6 +552,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
83194 addr + new_len);
83195 }
83196 ret = addr;
83197+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
83198 goto out;
83199 }
83200 }
83201@@ -547,7 +576,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
83202 goto out;
83203 }
83204
83205+ map_flags = vma->vm_flags;
83206 ret = move_vma(vma, addr, old_len, new_len, new_addr);
83207+ if (!(ret & ~PAGE_MASK)) {
83208+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
83209+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
83210+ }
83211 }
83212 out:
83213 if (ret & ~PAGE_MASK)
83214diff --git a/mm/nommu.c b/mm/nommu.c
83215index 79c3cac..b2601ea 100644
83216--- a/mm/nommu.c
83217+++ b/mm/nommu.c
83218@@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
83219 int sysctl_overcommit_ratio = 50; /* default is 50% */
83220 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
83221 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
83222-int heap_stack_gap = 0;
83223
83224 atomic_long_t mmap_pages_allocated;
83225
83226@@ -819,7 +818,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
83227 struct vm_area_struct *vma;
83228
83229 /* check the cache first */
83230- vma = mm->mmap_cache;
83231+ vma = ACCESS_ONCE(mm->mmap_cache);
83232 if (vma && vma->vm_start <= addr && vma->vm_end > addr)
83233 return vma;
83234
83235@@ -839,15 +838,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
83236 EXPORT_SYMBOL(find_vma);
83237
83238 /*
83239- * find a VMA
83240- * - we don't extend stack VMAs under NOMMU conditions
83241- */
83242-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
83243-{
83244- return find_vma(mm, addr);
83245-}
83246-
83247-/*
83248 * expand a stack to a given address
83249 * - not supported under NOMMU conditions
83250 */
83251@@ -1555,6 +1545,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
83252
83253 /* most fields are the same, copy all, and then fixup */
83254 *new = *vma;
83255+ INIT_LIST_HEAD(&new->anon_vma_chain);
83256 *region = *vma->vm_region;
83257 new->vm_region = region;
83258
83259@@ -1975,8 +1966,8 @@ int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
83260 }
83261 EXPORT_SYMBOL(generic_file_remap_pages);
83262
83263-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
83264- unsigned long addr, void *buf, int len, int write)
83265+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
83266+ unsigned long addr, void *buf, size_t len, int write)
83267 {
83268 struct vm_area_struct *vma;
83269
83270@@ -2017,8 +2008,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
83271 *
83272 * The caller must hold a reference on @mm.
83273 */
83274-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
83275- void *buf, int len, int write)
83276+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
83277+ void *buf, size_t len, int write)
83278 {
83279 return __access_remote_vm(NULL, mm, addr, buf, len, write);
83280 }
83281@@ -2027,7 +2018,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
83282 * Access another process' address space.
83283 * - source/target buffer must be kernel space
83284 */
83285-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
83286+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
83287 {
83288 struct mm_struct *mm;
83289
83290diff --git a/mm/page-writeback.c b/mm/page-writeback.c
83291index 0713bfb..b95bb87 100644
83292--- a/mm/page-writeback.c
83293+++ b/mm/page-writeback.c
83294@@ -655,7 +655,7 @@ unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
83295 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
83296 * - the bdi dirty thresh drops quickly due to change of JBOD workload
83297 */
83298-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
83299+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
83300 unsigned long thresh,
83301 unsigned long bg_thresh,
83302 unsigned long dirty,
83303@@ -1630,7 +1630,7 @@ ratelimit_handler(struct notifier_block *self, unsigned long action,
83304 }
83305 }
83306
83307-static struct notifier_block __cpuinitdata ratelimit_nb = {
83308+static struct notifier_block ratelimit_nb = {
83309 .notifier_call = ratelimit_handler,
83310 .next = NULL,
83311 };
83312diff --git a/mm/page_alloc.c b/mm/page_alloc.c
83313index 6a83cd3..3ab04ef 100644
83314--- a/mm/page_alloc.c
83315+++ b/mm/page_alloc.c
83316@@ -58,6 +58,7 @@
83317 #include <linux/prefetch.h>
83318 #include <linux/migrate.h>
83319 #include <linux/page-debug-flags.h>
83320+#include <linux/random.h>
83321
83322 #include <asm/tlbflush.h>
83323 #include <asm/div64.h>
83324@@ -338,7 +339,7 @@ out:
83325 * This usage means that zero-order pages may not be compound.
83326 */
83327
83328-static void free_compound_page(struct page *page)
83329+void free_compound_page(struct page *page)
83330 {
83331 __free_pages_ok(page, compound_order(page));
83332 }
83333@@ -693,6 +694,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
83334 int i;
83335 int bad = 0;
83336
83337+#ifdef CONFIG_PAX_MEMORY_SANITIZE
83338+ unsigned long index = 1UL << order;
83339+#endif
83340+
83341 trace_mm_page_free(page, order);
83342 kmemcheck_free_shadow(page, order);
83343
83344@@ -708,6 +713,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
83345 debug_check_no_obj_freed(page_address(page),
83346 PAGE_SIZE << order);
83347 }
83348+
83349+#ifdef CONFIG_PAX_MEMORY_SANITIZE
83350+ for (; index; --index)
83351+ sanitize_highpage(page + index - 1);
83352+#endif
83353+
83354 arch_free_page(page, order);
83355 kernel_map_pages(page, 1 << order, 0);
83356
83357@@ -730,6 +741,19 @@ static void __free_pages_ok(struct page *page, unsigned int order)
83358 local_irq_restore(flags);
83359 }
83360
83361+#ifdef CONFIG_PAX_LATENT_ENTROPY
83362+bool __meminitdata extra_latent_entropy;
83363+
83364+static int __init setup_pax_extra_latent_entropy(char *str)
83365+{
83366+ extra_latent_entropy = true;
83367+ return 0;
83368+}
83369+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
83370+
83371+volatile u64 latent_entropy;
83372+#endif
83373+
83374 /*
83375 * Read access to zone->managed_pages is safe because it's unsigned long,
83376 * but we still need to serialize writers. Currently all callers of
83377@@ -752,6 +776,19 @@ void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
83378 set_page_count(p, 0);
83379 }
83380
83381+#ifdef CONFIG_PAX_LATENT_ENTROPY
83382+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
83383+ u64 hash = 0;
83384+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
83385+ const u64 *data = lowmem_page_address(page);
83386+
83387+ for (index = 0; index < end; index++)
83388+ hash ^= hash + data[index];
83389+ latent_entropy ^= hash;
83390+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
83391+ }
83392+#endif
83393+
83394 page_zone(page)->managed_pages += 1 << order;
83395 set_page_refcounted(page);
83396 __free_pages(page, order);
83397@@ -861,8 +898,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
83398 arch_alloc_page(page, order);
83399 kernel_map_pages(page, 1 << order, 1);
83400
83401+#ifndef CONFIG_PAX_MEMORY_SANITIZE
83402 if (gfp_flags & __GFP_ZERO)
83403 prep_zero_page(page, order, gfp_flags);
83404+#endif
83405
83406 if (order && (gfp_flags & __GFP_COMP))
83407 prep_compound_page(page, order);
83408@@ -3752,7 +3791,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
83409 unsigned long pfn;
83410
83411 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
83412+#ifdef CONFIG_X86_32
83413+ /* boot failures in VMware 8 on 32bit vanilla since
83414+ this change */
83415+ if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
83416+#else
83417 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
83418+#endif
83419 return 1;
83420 }
83421 return 0;
83422diff --git a/mm/percpu.c b/mm/percpu.c
83423index 8c8e08f..73a5cda 100644
83424--- a/mm/percpu.c
83425+++ b/mm/percpu.c
83426@@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
83427 static unsigned int pcpu_high_unit_cpu __read_mostly;
83428
83429 /* the address of the first chunk which starts with the kernel static area */
83430-void *pcpu_base_addr __read_mostly;
83431+void *pcpu_base_addr __read_only;
83432 EXPORT_SYMBOL_GPL(pcpu_base_addr);
83433
83434 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
83435diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
83436index fd26d04..0cea1b0 100644
83437--- a/mm/process_vm_access.c
83438+++ b/mm/process_vm_access.c
83439@@ -13,6 +13,7 @@
83440 #include <linux/uio.h>
83441 #include <linux/sched.h>
83442 #include <linux/highmem.h>
83443+#include <linux/security.h>
83444 #include <linux/ptrace.h>
83445 #include <linux/slab.h>
83446 #include <linux/syscalls.h>
83447@@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
83448 size_t iov_l_curr_offset = 0;
83449 ssize_t iov_len;
83450
83451+ return -ENOSYS; // PaX: until properly audited
83452+
83453 /*
83454 * Work out how many pages of struct pages we're going to need
83455 * when eventually calling get_user_pages
83456 */
83457 for (i = 0; i < riovcnt; i++) {
83458 iov_len = rvec[i].iov_len;
83459- if (iov_len > 0) {
83460- nr_pages_iov = ((unsigned long)rvec[i].iov_base
83461- + iov_len)
83462- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
83463- / PAGE_SIZE + 1;
83464- nr_pages = max(nr_pages, nr_pages_iov);
83465- }
83466+ if (iov_len <= 0)
83467+ continue;
83468+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
83469+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
83470+ nr_pages = max(nr_pages, nr_pages_iov);
83471 }
83472
83473 if (nr_pages == 0)
83474@@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
83475 goto free_proc_pages;
83476 }
83477
83478+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
83479+ rc = -EPERM;
83480+ goto put_task_struct;
83481+ }
83482+
83483 mm = mm_access(task, PTRACE_MODE_ATTACH);
83484 if (!mm || IS_ERR(mm)) {
83485 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
83486diff --git a/mm/rmap.c b/mm/rmap.c
83487index 2c78f8c..9e9c624 100644
83488--- a/mm/rmap.c
83489+++ b/mm/rmap.c
83490@@ -163,6 +163,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
83491 struct anon_vma *anon_vma = vma->anon_vma;
83492 struct anon_vma_chain *avc;
83493
83494+#ifdef CONFIG_PAX_SEGMEXEC
83495+ struct anon_vma_chain *avc_m = NULL;
83496+#endif
83497+
83498 might_sleep();
83499 if (unlikely(!anon_vma)) {
83500 struct mm_struct *mm = vma->vm_mm;
83501@@ -172,6 +176,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
83502 if (!avc)
83503 goto out_enomem;
83504
83505+#ifdef CONFIG_PAX_SEGMEXEC
83506+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
83507+ if (!avc_m)
83508+ goto out_enomem_free_avc;
83509+#endif
83510+
83511 anon_vma = find_mergeable_anon_vma(vma);
83512 allocated = NULL;
83513 if (!anon_vma) {
83514@@ -185,6 +195,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
83515 /* page_table_lock to protect against threads */
83516 spin_lock(&mm->page_table_lock);
83517 if (likely(!vma->anon_vma)) {
83518+
83519+#ifdef CONFIG_PAX_SEGMEXEC
83520+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
83521+
83522+ if (vma_m) {
83523+ BUG_ON(vma_m->anon_vma);
83524+ vma_m->anon_vma = anon_vma;
83525+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
83526+ avc_m = NULL;
83527+ }
83528+#endif
83529+
83530 vma->anon_vma = anon_vma;
83531 anon_vma_chain_link(vma, avc, anon_vma);
83532 allocated = NULL;
83533@@ -195,12 +217,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
83534
83535 if (unlikely(allocated))
83536 put_anon_vma(allocated);
83537+
83538+#ifdef CONFIG_PAX_SEGMEXEC
83539+ if (unlikely(avc_m))
83540+ anon_vma_chain_free(avc_m);
83541+#endif
83542+
83543 if (unlikely(avc))
83544 anon_vma_chain_free(avc);
83545 }
83546 return 0;
83547
83548 out_enomem_free_avc:
83549+
83550+#ifdef CONFIG_PAX_SEGMEXEC
83551+ if (avc_m)
83552+ anon_vma_chain_free(avc_m);
83553+#endif
83554+
83555 anon_vma_chain_free(avc);
83556 out_enomem:
83557 return -ENOMEM;
83558@@ -236,7 +270,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
83559 * Attach the anon_vmas from src to dst.
83560 * Returns 0 on success, -ENOMEM on failure.
83561 */
83562-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
83563+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
83564 {
83565 struct anon_vma_chain *avc, *pavc;
83566 struct anon_vma *root = NULL;
83567@@ -269,7 +303,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
83568 * the corresponding VMA in the parent process is attached to.
83569 * Returns 0 on success, non-zero on failure.
83570 */
83571-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
83572+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
83573 {
83574 struct anon_vma_chain *avc;
83575 struct anon_vma *anon_vma;
83576diff --git a/mm/shmem.c b/mm/shmem.c
83577index efd0b3a..994b702 100644
83578--- a/mm/shmem.c
83579+++ b/mm/shmem.c
83580@@ -31,7 +31,7 @@
83581 #include <linux/export.h>
83582 #include <linux/swap.h>
83583
83584-static struct vfsmount *shm_mnt;
83585+struct vfsmount *shm_mnt;
83586
83587 #ifdef CONFIG_SHMEM
83588 /*
83589@@ -75,7 +75,7 @@ static struct vfsmount *shm_mnt;
83590 #define BOGO_DIRENT_SIZE 20
83591
83592 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
83593-#define SHORT_SYMLINK_LEN 128
83594+#define SHORT_SYMLINK_LEN 64
83595
83596 /*
83597 * shmem_fallocate and shmem_writepage communicate via inode->i_private
83598@@ -2202,6 +2202,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
83599 static int shmem_xattr_validate(const char *name)
83600 {
83601 struct { const char *prefix; size_t len; } arr[] = {
83602+
83603+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
83604+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
83605+#endif
83606+
83607 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
83608 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
83609 };
83610@@ -2257,6 +2262,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
83611 if (err)
83612 return err;
83613
83614+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
83615+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
83616+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
83617+ return -EOPNOTSUPP;
83618+ if (size > 8)
83619+ return -EINVAL;
83620+ }
83621+#endif
83622+
83623 return simple_xattr_set(&info->xattrs, name, value, size, flags);
83624 }
83625
83626@@ -2562,8 +2576,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
83627 int err = -ENOMEM;
83628
83629 /* Round up to L1_CACHE_BYTES to resist false sharing */
83630- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
83631- L1_CACHE_BYTES), GFP_KERNEL);
83632+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
83633 if (!sbinfo)
83634 return -ENOMEM;
83635
83636diff --git a/mm/slab.c b/mm/slab.c
83637index e7667a3..a48e73b 100644
83638--- a/mm/slab.c
83639+++ b/mm/slab.c
83640@@ -306,7 +306,7 @@ struct kmem_list3 {
83641 * Need this for bootstrapping a per node allocator.
83642 */
83643 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
83644-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
83645+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
83646 #define CACHE_CACHE 0
83647 #define SIZE_AC MAX_NUMNODES
83648 #define SIZE_L3 (2 * MAX_NUMNODES)
83649@@ -407,10 +407,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
83650 if ((x)->max_freeable < i) \
83651 (x)->max_freeable = i; \
83652 } while (0)
83653-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
83654-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
83655-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
83656-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
83657+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
83658+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
83659+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
83660+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
83661 #else
83662 #define STATS_INC_ACTIVE(x) do { } while (0)
83663 #define STATS_DEC_ACTIVE(x) do { } while (0)
83664@@ -518,7 +518,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
83665 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
83666 */
83667 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
83668- const struct slab *slab, void *obj)
83669+ const struct slab *slab, const void *obj)
83670 {
83671 u32 offset = (obj - slab->s_mem);
83672 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
83673@@ -539,12 +539,13 @@ EXPORT_SYMBOL(malloc_sizes);
83674 struct cache_names {
83675 char *name;
83676 char *name_dma;
83677+ char *name_usercopy;
83678 };
83679
83680 static struct cache_names __initdata cache_names[] = {
83681-#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
83682+#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)", .name_usercopy = "size-" #x "(USERCOPY)" },
83683 #include <linux/kmalloc_sizes.h>
83684- {NULL,}
83685+ {NULL}
83686 #undef CACHE
83687 };
83688
83689@@ -729,6 +730,12 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
83690 if (unlikely(gfpflags & GFP_DMA))
83691 return csizep->cs_dmacachep;
83692 #endif
83693+
83694+#ifdef CONFIG_PAX_USERCOPY_SLABS
83695+ if (unlikely(gfpflags & GFP_USERCOPY))
83696+ return csizep->cs_usercopycachep;
83697+#endif
83698+
83699 return csizep->cs_cachep;
83700 }
83701
83702@@ -1482,7 +1489,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
83703 return notifier_from_errno(err);
83704 }
83705
83706-static struct notifier_block __cpuinitdata cpucache_notifier = {
83707+static struct notifier_block cpucache_notifier = {
83708 &cpuup_callback, NULL, 0
83709 };
83710
83711@@ -1667,12 +1674,12 @@ void __init kmem_cache_init(void)
83712 */
83713
83714 sizes[INDEX_AC].cs_cachep = create_kmalloc_cache(names[INDEX_AC].name,
83715- sizes[INDEX_AC].cs_size, ARCH_KMALLOC_FLAGS);
83716+ sizes[INDEX_AC].cs_size, ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
83717
83718 if (INDEX_AC != INDEX_L3)
83719 sizes[INDEX_L3].cs_cachep =
83720 create_kmalloc_cache(names[INDEX_L3].name,
83721- sizes[INDEX_L3].cs_size, ARCH_KMALLOC_FLAGS);
83722+ sizes[INDEX_L3].cs_size, ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
83723
83724 slab_early_init = 0;
83725
83726@@ -1686,13 +1693,20 @@ void __init kmem_cache_init(void)
83727 */
83728 if (!sizes->cs_cachep)
83729 sizes->cs_cachep = create_kmalloc_cache(names->name,
83730- sizes->cs_size, ARCH_KMALLOC_FLAGS);
83731+ sizes->cs_size, ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
83732
83733 #ifdef CONFIG_ZONE_DMA
83734 sizes->cs_dmacachep = create_kmalloc_cache(
83735 names->name_dma, sizes->cs_size,
83736 SLAB_CACHE_DMA|ARCH_KMALLOC_FLAGS);
83737 #endif
83738+
83739+#ifdef CONFIG_PAX_USERCOPY_SLABS
83740+ sizes->cs_usercopycachep = create_kmalloc_cache(
83741+ names->name_usercopy, sizes->cs_size,
83742+ ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
83743+#endif
83744+
83745 sizes++;
83746 names++;
83747 }
83748@@ -3924,6 +3938,7 @@ void kfree(const void *objp)
83749
83750 if (unlikely(ZERO_OR_NULL_PTR(objp)))
83751 return;
83752+ VM_BUG_ON(!virt_addr_valid(objp));
83753 local_irq_save(flags);
83754 kfree_debugcheck(objp);
83755 c = virt_to_cache(objp);
83756@@ -4365,10 +4380,10 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
83757 }
83758 /* cpu stats */
83759 {
83760- unsigned long allochit = atomic_read(&cachep->allochit);
83761- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
83762- unsigned long freehit = atomic_read(&cachep->freehit);
83763- unsigned long freemiss = atomic_read(&cachep->freemiss);
83764+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
83765+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
83766+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
83767+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
83768
83769 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
83770 allochit, allocmiss, freehit, freemiss);
83771@@ -4600,13 +4615,71 @@ static const struct file_operations proc_slabstats_operations = {
83772 static int __init slab_proc_init(void)
83773 {
83774 #ifdef CONFIG_DEBUG_SLAB_LEAK
83775- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
83776+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
83777 #endif
83778 return 0;
83779 }
83780 module_init(slab_proc_init);
83781 #endif
83782
83783+bool is_usercopy_object(const void *ptr)
83784+{
83785+ struct page *page;
83786+ struct kmem_cache *cachep;
83787+
83788+ if (ZERO_OR_NULL_PTR(ptr))
83789+ return false;
83790+
83791+ if (!slab_is_available())
83792+ return false;
83793+
83794+ if (!virt_addr_valid(ptr))
83795+ return false;
83796+
83797+ page = virt_to_head_page(ptr);
83798+
83799+ if (!PageSlab(page))
83800+ return false;
83801+
83802+ cachep = page->slab_cache;
83803+ return cachep->flags & SLAB_USERCOPY;
83804+}
83805+
83806+#ifdef CONFIG_PAX_USERCOPY
83807+const char *check_heap_object(const void *ptr, unsigned long n)
83808+{
83809+ struct page *page;
83810+ struct kmem_cache *cachep;
83811+ struct slab *slabp;
83812+ unsigned int objnr;
83813+ unsigned long offset;
83814+
83815+ if (ZERO_OR_NULL_PTR(ptr))
83816+ return "<null>";
83817+
83818+ if (!virt_addr_valid(ptr))
83819+ return NULL;
83820+
83821+ page = virt_to_head_page(ptr);
83822+
83823+ if (!PageSlab(page))
83824+ return NULL;
83825+
83826+ cachep = page->slab_cache;
83827+ if (!(cachep->flags & SLAB_USERCOPY))
83828+ return cachep->name;
83829+
83830+ slabp = page->slab_page;
83831+ objnr = obj_to_index(cachep, slabp, ptr);
83832+ BUG_ON(objnr >= cachep->num);
83833+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
83834+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
83835+ return NULL;
83836+
83837+ return cachep->name;
83838+}
83839+#endif
83840+
83841 /**
83842 * ksize - get the actual amount of memory allocated for a given object
83843 * @objp: Pointer to the object
83844diff --git a/mm/slab.h b/mm/slab.h
83845index 34a98d6..73633d1 100644
83846--- a/mm/slab.h
83847+++ b/mm/slab.h
83848@@ -58,7 +58,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
83849
83850 /* Legal flag mask for kmem_cache_create(), for various configurations */
83851 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
83852- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
83853+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | SLAB_USERCOPY)
83854
83855 #if defined(CONFIG_DEBUG_SLAB)
83856 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
83857@@ -220,6 +220,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
83858 return s;
83859
83860 page = virt_to_head_page(x);
83861+
83862+ BUG_ON(!PageSlab(page));
83863+
83864 cachep = page->slab_cache;
83865 if (slab_equal_or_root(cachep, s))
83866 return cachep;
83867diff --git a/mm/slab_common.c b/mm/slab_common.c
83868index 3f3cd97..93b0236 100644
83869--- a/mm/slab_common.c
83870+++ b/mm/slab_common.c
83871@@ -22,7 +22,7 @@
83872
83873 #include "slab.h"
83874
83875-enum slab_state slab_state;
83876+enum slab_state slab_state __read_only;
83877 LIST_HEAD(slab_caches);
83878 DEFINE_MUTEX(slab_mutex);
83879 struct kmem_cache *kmem_cache;
83880@@ -209,7 +209,7 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
83881
83882 err = __kmem_cache_create(s, flags);
83883 if (!err) {
83884- s->refcount = 1;
83885+ atomic_set(&s->refcount, 1);
83886 list_add(&s->list, &slab_caches);
83887 memcg_cache_list_add(memcg, s);
83888 } else {
83889@@ -255,8 +255,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
83890
83891 get_online_cpus();
83892 mutex_lock(&slab_mutex);
83893- s->refcount--;
83894- if (!s->refcount) {
83895+ if (atomic_dec_and_test(&s->refcount)) {
83896 list_del(&s->list);
83897
83898 if (!__kmem_cache_shutdown(s)) {
83899@@ -302,7 +301,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
83900 panic("Creation of kmalloc slab %s size=%zd failed. Reason %d\n",
83901 name, size, err);
83902
83903- s->refcount = -1; /* Exempt from merging for now */
83904+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
83905 }
83906
83907 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
83908@@ -315,7 +314,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
83909
83910 create_boot_cache(s, name, size, flags);
83911 list_add(&s->list, &slab_caches);
83912- s->refcount = 1;
83913+ atomic_set(&s->refcount, 1);
83914 return s;
83915 }
83916
83917diff --git a/mm/slob.c b/mm/slob.c
83918index a99fdf7..6ee34ec 100644
83919--- a/mm/slob.c
83920+++ b/mm/slob.c
83921@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
83922 /*
83923 * Return the size of a slob block.
83924 */
83925-static slobidx_t slob_units(slob_t *s)
83926+static slobidx_t slob_units(const slob_t *s)
83927 {
83928 if (s->units > 0)
83929 return s->units;
83930@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
83931 /*
83932 * Return the next free slob block pointer after this one.
83933 */
83934-static slob_t *slob_next(slob_t *s)
83935+static slob_t *slob_next(const slob_t *s)
83936 {
83937 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
83938 slobidx_t next;
83939@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
83940 /*
83941 * Returns true if s is the last free block in its page.
83942 */
83943-static int slob_last(slob_t *s)
83944+static int slob_last(const slob_t *s)
83945 {
83946 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
83947 }
83948
83949-static void *slob_new_pages(gfp_t gfp, int order, int node)
83950+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
83951 {
83952- void *page;
83953+ struct page *page;
83954
83955 #ifdef CONFIG_NUMA
83956 if (node != NUMA_NO_NODE)
83957@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
83958 if (!page)
83959 return NULL;
83960
83961- return page_address(page);
83962+ __SetPageSlab(page);
83963+ return page;
83964 }
83965
83966-static void slob_free_pages(void *b, int order)
83967+static void slob_free_pages(struct page *sp, int order)
83968 {
83969 if (current->reclaim_state)
83970 current->reclaim_state->reclaimed_slab += 1 << order;
83971- free_pages((unsigned long)b, order);
83972+ __ClearPageSlab(sp);
83973+ reset_page_mapcount(sp);
83974+ sp->private = 0;
83975+ __free_pages(sp, order);
83976 }
83977
83978 /*
83979@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
83980
83981 /* Not enough space: must allocate a new page */
83982 if (!b) {
83983- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
83984- if (!b)
83985+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
83986+ if (!sp)
83987 return NULL;
83988- sp = virt_to_page(b);
83989- __SetPageSlab(sp);
83990+ b = page_address(sp);
83991
83992 spin_lock_irqsave(&slob_lock, flags);
83993 sp->units = SLOB_UNITS(PAGE_SIZE);
83994 sp->freelist = b;
83995+ sp->private = 0;
83996 INIT_LIST_HEAD(&sp->list);
83997 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
83998 set_slob_page_free(sp, slob_list);
83999@@ -359,9 +363,7 @@ static void slob_free(void *block, int size)
84000 if (slob_page_free(sp))
84001 clear_slob_page_free(sp);
84002 spin_unlock_irqrestore(&slob_lock, flags);
84003- __ClearPageSlab(sp);
84004- reset_page_mapcount(sp);
84005- slob_free_pages(b, 0);
84006+ slob_free_pages(sp, 0);
84007 return;
84008 }
84009
84010@@ -424,11 +426,10 @@ out:
84011 */
84012
84013 static __always_inline void *
84014-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
84015+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
84016 {
84017- unsigned int *m;
84018- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
84019- void *ret;
84020+ slob_t *m;
84021+ void *ret = NULL;
84022
84023 gfp &= gfp_allowed_mask;
84024
84025@@ -442,23 +443,41 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
84026
84027 if (!m)
84028 return NULL;
84029- *m = size;
84030+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
84031+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
84032+ m[0].units = size;
84033+ m[1].units = align;
84034 ret = (void *)m + align;
84035
84036 trace_kmalloc_node(caller, ret,
84037 size, size + align, gfp, node);
84038 } else {
84039 unsigned int order = get_order(size);
84040+ struct page *page;
84041
84042 if (likely(order))
84043 gfp |= __GFP_COMP;
84044- ret = slob_new_pages(gfp, order, node);
84045+ page = slob_new_pages(gfp, order, node);
84046+ if (page) {
84047+ ret = page_address(page);
84048+ page->private = size;
84049+ }
84050
84051 trace_kmalloc_node(caller, ret,
84052 size, PAGE_SIZE << order, gfp, node);
84053 }
84054
84055- kmemleak_alloc(ret, size, 1, gfp);
84056+ return ret;
84057+}
84058+
84059+static __always_inline void *
84060+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
84061+{
84062+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
84063+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
84064+
84065+ if (!ZERO_OR_NULL_PTR(ret))
84066+ kmemleak_alloc(ret, size, 1, gfp);
84067 return ret;
84068 }
84069
84070@@ -493,34 +512,112 @@ void kfree(const void *block)
84071 return;
84072 kmemleak_free(block);
84073
84074+ VM_BUG_ON(!virt_addr_valid(block));
84075 sp = virt_to_page(block);
84076- if (PageSlab(sp)) {
84077+ VM_BUG_ON(!PageSlab(sp));
84078+ if (!sp->private) {
84079 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
84080- unsigned int *m = (unsigned int *)(block - align);
84081- slob_free(m, *m + align);
84082- } else
84083+ slob_t *m = (slob_t *)(block - align);
84084+ slob_free(m, m[0].units + align);
84085+ } else {
84086+ __ClearPageSlab(sp);
84087+ reset_page_mapcount(sp);
84088+ sp->private = 0;
84089 __free_pages(sp, compound_order(sp));
84090+ }
84091 }
84092 EXPORT_SYMBOL(kfree);
84093
84094+bool is_usercopy_object(const void *ptr)
84095+{
84096+ if (!slab_is_available())
84097+ return false;
84098+
84099+ // PAX: TODO
84100+
84101+ return false;
84102+}
84103+
84104+#ifdef CONFIG_PAX_USERCOPY
84105+const char *check_heap_object(const void *ptr, unsigned long n)
84106+{
84107+ struct page *page;
84108+ const slob_t *free;
84109+ const void *base;
84110+ unsigned long flags;
84111+
84112+ if (ZERO_OR_NULL_PTR(ptr))
84113+ return "<null>";
84114+
84115+ if (!virt_addr_valid(ptr))
84116+ return NULL;
84117+
84118+ page = virt_to_head_page(ptr);
84119+ if (!PageSlab(page))
84120+ return NULL;
84121+
84122+ if (page->private) {
84123+ base = page;
84124+ if (base <= ptr && n <= page->private - (ptr - base))
84125+ return NULL;
84126+ return "<slob>";
84127+ }
84128+
84129+ /* some tricky double walking to find the chunk */
84130+ spin_lock_irqsave(&slob_lock, flags);
84131+ base = (void *)((unsigned long)ptr & PAGE_MASK);
84132+ free = page->freelist;
84133+
84134+ while (!slob_last(free) && (void *)free <= ptr) {
84135+ base = free + slob_units(free);
84136+ free = slob_next(free);
84137+ }
84138+
84139+ while (base < (void *)free) {
84140+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
84141+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
84142+ int offset;
84143+
84144+ if (ptr < base + align)
84145+ break;
84146+
84147+ offset = ptr - base - align;
84148+ if (offset >= m) {
84149+ base += size;
84150+ continue;
84151+ }
84152+
84153+ if (n > m - offset)
84154+ break;
84155+
84156+ spin_unlock_irqrestore(&slob_lock, flags);
84157+ return NULL;
84158+ }
84159+
84160+ spin_unlock_irqrestore(&slob_lock, flags);
84161+ return "<slob>";
84162+}
84163+#endif
84164+
84165 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
84166 size_t ksize(const void *block)
84167 {
84168 struct page *sp;
84169 int align;
84170- unsigned int *m;
84171+ slob_t *m;
84172
84173 BUG_ON(!block);
84174 if (unlikely(block == ZERO_SIZE_PTR))
84175 return 0;
84176
84177 sp = virt_to_page(block);
84178- if (unlikely(!PageSlab(sp)))
84179- return PAGE_SIZE << compound_order(sp);
84180+ VM_BUG_ON(!PageSlab(sp));
84181+ if (sp->private)
84182+ return sp->private;
84183
84184 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
84185- m = (unsigned int *)(block - align);
84186- return SLOB_UNITS(*m) * SLOB_UNIT;
84187+ m = (slob_t *)(block - align);
84188+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
84189 }
84190 EXPORT_SYMBOL(ksize);
84191
84192@@ -536,23 +633,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
84193
84194 void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
84195 {
84196- void *b;
84197+ void *b = NULL;
84198
84199 flags &= gfp_allowed_mask;
84200
84201 lockdep_trace_alloc(flags);
84202
84203+#ifdef CONFIG_PAX_USERCOPY_SLABS
84204+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
84205+#else
84206 if (c->size < PAGE_SIZE) {
84207 b = slob_alloc(c->size, flags, c->align, node);
84208 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
84209 SLOB_UNITS(c->size) * SLOB_UNIT,
84210 flags, node);
84211 } else {
84212- b = slob_new_pages(flags, get_order(c->size), node);
84213+ struct page *sp;
84214+
84215+ sp = slob_new_pages(flags, get_order(c->size), node);
84216+ if (sp) {
84217+ b = page_address(sp);
84218+ sp->private = c->size;
84219+ }
84220 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
84221 PAGE_SIZE << get_order(c->size),
84222 flags, node);
84223 }
84224+#endif
84225
84226 if (c->ctor)
84227 c->ctor(b);
84228@@ -564,10 +671,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
84229
84230 static void __kmem_cache_free(void *b, int size)
84231 {
84232- if (size < PAGE_SIZE)
84233+ struct page *sp;
84234+
84235+ sp = virt_to_page(b);
84236+ BUG_ON(!PageSlab(sp));
84237+ if (!sp->private)
84238 slob_free(b, size);
84239 else
84240- slob_free_pages(b, get_order(size));
84241+ slob_free_pages(sp, get_order(size));
84242 }
84243
84244 static void kmem_rcu_free(struct rcu_head *head)
84245@@ -580,17 +691,31 @@ static void kmem_rcu_free(struct rcu_head *head)
84246
84247 void kmem_cache_free(struct kmem_cache *c, void *b)
84248 {
84249+ int size = c->size;
84250+
84251+#ifdef CONFIG_PAX_USERCOPY_SLABS
84252+ if (size + c->align < PAGE_SIZE) {
84253+ size += c->align;
84254+ b -= c->align;
84255+ }
84256+#endif
84257+
84258 kmemleak_free_recursive(b, c->flags);
84259 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
84260 struct slob_rcu *slob_rcu;
84261- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
84262- slob_rcu->size = c->size;
84263+ slob_rcu = b + (size - sizeof(struct slob_rcu));
84264+ slob_rcu->size = size;
84265 call_rcu(&slob_rcu->head, kmem_rcu_free);
84266 } else {
84267- __kmem_cache_free(b, c->size);
84268+ __kmem_cache_free(b, size);
84269 }
84270
84271+#ifdef CONFIG_PAX_USERCOPY_SLABS
84272+ trace_kfree(_RET_IP_, b);
84273+#else
84274 trace_kmem_cache_free(_RET_IP_, b);
84275+#endif
84276+
84277 }
84278 EXPORT_SYMBOL(kmem_cache_free);
84279
84280diff --git a/mm/slub.c b/mm/slub.c
84281index ba2ca53..991c4f7 100644
84282--- a/mm/slub.c
84283+++ b/mm/slub.c
84284@@ -197,7 +197,7 @@ struct track {
84285
84286 enum track_item { TRACK_ALLOC, TRACK_FREE };
84287
84288-#ifdef CONFIG_SYSFS
84289+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
84290 static int sysfs_slab_add(struct kmem_cache *);
84291 static int sysfs_slab_alias(struct kmem_cache *, const char *);
84292 static void sysfs_slab_remove(struct kmem_cache *);
84293@@ -518,7 +518,7 @@ static void print_track(const char *s, struct track *t)
84294 if (!t->addr)
84295 return;
84296
84297- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
84298+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
84299 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
84300 #ifdef CONFIG_STACKTRACE
84301 {
84302@@ -2653,7 +2653,7 @@ static int slub_min_objects;
84303 * Merge control. If this is set then no merging of slab caches will occur.
84304 * (Could be removed. This was introduced to pacify the merge skeptics.)
84305 */
84306-static int slub_nomerge;
84307+static int slub_nomerge = 1;
84308
84309 /*
84310 * Calculate the order of allocation given an slab object size.
84311@@ -3181,6 +3181,10 @@ EXPORT_SYMBOL(kmalloc_caches);
84312 static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
84313 #endif
84314
84315+#ifdef CONFIG_PAX_USERCOPY_SLABS
84316+static struct kmem_cache *kmalloc_usercopy_caches[SLUB_PAGE_SHIFT];
84317+#endif
84318+
84319 static int __init setup_slub_min_order(char *str)
84320 {
84321 get_option(&str, &slub_min_order);
84322@@ -3272,6 +3276,13 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
84323 return kmalloc_dma_caches[index];
84324
84325 #endif
84326+
84327+#ifdef CONFIG_PAX_USERCOPY_SLABS
84328+ if (flags & SLAB_USERCOPY)
84329+ return kmalloc_usercopy_caches[index];
84330+
84331+#endif
84332+
84333 return kmalloc_caches[index];
84334 }
84335
84336@@ -3340,6 +3351,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
84337 EXPORT_SYMBOL(__kmalloc_node);
84338 #endif
84339
84340+bool is_usercopy_object(const void *ptr)
84341+{
84342+ struct page *page;
84343+ struct kmem_cache *s;
84344+
84345+ if (ZERO_OR_NULL_PTR(ptr))
84346+ return false;
84347+
84348+ if (!slab_is_available())
84349+ return false;
84350+
84351+ if (!virt_addr_valid(ptr))
84352+ return false;
84353+
84354+ page = virt_to_head_page(ptr);
84355+
84356+ if (!PageSlab(page))
84357+ return false;
84358+
84359+ s = page->slab_cache;
84360+ return s->flags & SLAB_USERCOPY;
84361+}
84362+
84363+#ifdef CONFIG_PAX_USERCOPY
84364+const char *check_heap_object(const void *ptr, unsigned long n)
84365+{
84366+ struct page *page;
84367+ struct kmem_cache *s;
84368+ unsigned long offset;
84369+
84370+ if (ZERO_OR_NULL_PTR(ptr))
84371+ return "<null>";
84372+
84373+ if (!virt_addr_valid(ptr))
84374+ return NULL;
84375+
84376+ page = virt_to_head_page(ptr);
84377+
84378+ if (!PageSlab(page))
84379+ return NULL;
84380+
84381+ s = page->slab_cache;
84382+ if (!(s->flags & SLAB_USERCOPY))
84383+ return s->name;
84384+
84385+ offset = (ptr - page_address(page)) % s->size;
84386+ if (offset <= s->object_size && n <= s->object_size - offset)
84387+ return NULL;
84388+
84389+ return s->name;
84390+}
84391+#endif
84392+
84393 size_t ksize(const void *object)
84394 {
84395 struct page *page;
84396@@ -3404,6 +3468,7 @@ void kfree(const void *x)
84397 if (unlikely(ZERO_OR_NULL_PTR(x)))
84398 return;
84399
84400+ VM_BUG_ON(!virt_addr_valid(x));
84401 page = virt_to_head_page(x);
84402 if (unlikely(!PageSlab(page))) {
84403 BUG_ON(!PageCompound(page));
84404@@ -3712,17 +3777,17 @@ void __init kmem_cache_init(void)
84405
84406 /* Caches that are not of the two-to-the-power-of size */
84407 if (KMALLOC_MIN_SIZE <= 32) {
84408- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
84409+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
84410 caches++;
84411 }
84412
84413 if (KMALLOC_MIN_SIZE <= 64) {
84414- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
84415+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
84416 caches++;
84417 }
84418
84419 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
84420- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
84421+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
84422 caches++;
84423 }
84424
84425@@ -3764,6 +3829,22 @@ void __init kmem_cache_init(void)
84426 }
84427 }
84428 #endif
84429+
84430+#ifdef CONFIG_PAX_USERCOPY_SLABS
84431+ for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
84432+ struct kmem_cache *s = kmalloc_caches[i];
84433+
84434+ if (s && s->size) {
84435+ char *name = kasprintf(GFP_NOWAIT,
84436+ "usercopy-kmalloc-%d", s->object_size);
84437+
84438+ BUG_ON(!name);
84439+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(name,
84440+ s->object_size, SLAB_USERCOPY);
84441+ }
84442+ }
84443+#endif
84444+
84445 printk(KERN_INFO
84446 "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
84447 " CPUs=%d, Nodes=%d\n",
84448@@ -3790,7 +3871,7 @@ static int slab_unmergeable(struct kmem_cache *s)
84449 /*
84450 * We may have set a slab to be unmergeable during bootstrap.
84451 */
84452- if (s->refcount < 0)
84453+ if (atomic_read(&s->refcount) < 0)
84454 return 1;
84455
84456 return 0;
84457@@ -3848,7 +3929,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
84458
84459 s = find_mergeable(memcg, size, align, flags, name, ctor);
84460 if (s) {
84461- s->refcount++;
84462+ atomic_inc(&s->refcount);
84463 /*
84464 * Adjust the object sizes so that we clear
84465 * the complete object on kzalloc.
84466@@ -3857,7 +3938,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
84467 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
84468
84469 if (sysfs_slab_alias(s, name)) {
84470- s->refcount--;
84471+ atomic_dec(&s->refcount);
84472 s = NULL;
84473 }
84474 }
84475@@ -3919,7 +4000,7 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
84476 return NOTIFY_OK;
84477 }
84478
84479-static struct notifier_block __cpuinitdata slab_notifier = {
84480+static struct notifier_block slab_notifier = {
84481 .notifier_call = slab_cpuup_callback
84482 };
84483
84484@@ -3977,7 +4058,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
84485 }
84486 #endif
84487
84488-#ifdef CONFIG_SYSFS
84489+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
84490 static int count_inuse(struct page *page)
84491 {
84492 return page->inuse;
84493@@ -4364,12 +4445,12 @@ static void resiliency_test(void)
84494 validate_slab_cache(kmalloc_caches[9]);
84495 }
84496 #else
84497-#ifdef CONFIG_SYSFS
84498+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
84499 static void resiliency_test(void) {};
84500 #endif
84501 #endif
84502
84503-#ifdef CONFIG_SYSFS
84504+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
84505 enum slab_stat_type {
84506 SL_ALL, /* All slabs */
84507 SL_PARTIAL, /* Only partially allocated slabs */
84508@@ -4613,7 +4694,7 @@ SLAB_ATTR_RO(ctor);
84509
84510 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
84511 {
84512- return sprintf(buf, "%d\n", s->refcount - 1);
84513+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
84514 }
84515 SLAB_ATTR_RO(aliases);
84516
84517@@ -5266,6 +5347,7 @@ static char *create_unique_id(struct kmem_cache *s)
84518 return name;
84519 }
84520
84521+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
84522 static int sysfs_slab_add(struct kmem_cache *s)
84523 {
84524 int err;
84525@@ -5323,6 +5405,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
84526 kobject_del(&s->kobj);
84527 kobject_put(&s->kobj);
84528 }
84529+#endif
84530
84531 /*
84532 * Need to buffer aliases during bootup until sysfs becomes
84533@@ -5336,6 +5419,7 @@ struct saved_alias {
84534
84535 static struct saved_alias *alias_list;
84536
84537+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
84538 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
84539 {
84540 struct saved_alias *al;
84541@@ -5358,6 +5442,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
84542 alias_list = al;
84543 return 0;
84544 }
84545+#endif
84546
84547 static int __init slab_sysfs_init(void)
84548 {
84549diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
84550index 1b7e22a..3fcd4f3 100644
84551--- a/mm/sparse-vmemmap.c
84552+++ b/mm/sparse-vmemmap.c
84553@@ -128,7 +128,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
84554 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
84555 if (!p)
84556 return NULL;
84557- pud_populate(&init_mm, pud, p);
84558+ pud_populate_kernel(&init_mm, pud, p);
84559 }
84560 return pud;
84561 }
84562@@ -140,7 +140,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
84563 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
84564 if (!p)
84565 return NULL;
84566- pgd_populate(&init_mm, pgd, p);
84567+ pgd_populate_kernel(&init_mm, pgd, p);
84568 }
84569 return pgd;
84570 }
84571diff --git a/mm/sparse.c b/mm/sparse.c
84572index 6b5fb76..db0c190 100644
84573--- a/mm/sparse.c
84574+++ b/mm/sparse.c
84575@@ -782,7 +782,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
84576
84577 for (i = 0; i < PAGES_PER_SECTION; i++) {
84578 if (PageHWPoison(&memmap[i])) {
84579- atomic_long_sub(1, &mce_bad_pages);
84580+ atomic_long_sub_unchecked(1, &mce_bad_pages);
84581 ClearPageHWPoison(&memmap[i]);
84582 }
84583 }
84584diff --git a/mm/swap.c b/mm/swap.c
84585index 6310dc2..3662b3f 100644
84586--- a/mm/swap.c
84587+++ b/mm/swap.c
84588@@ -30,6 +30,7 @@
84589 #include <linux/backing-dev.h>
84590 #include <linux/memcontrol.h>
84591 #include <linux/gfp.h>
84592+#include <linux/hugetlb.h>
84593
84594 #include "internal.h"
84595
84596@@ -72,6 +73,8 @@ static void __put_compound_page(struct page *page)
84597
84598 __page_cache_release(page);
84599 dtor = get_compound_page_dtor(page);
84600+ if (!PageHuge(page))
84601+ BUG_ON(dtor != free_compound_page);
84602 (*dtor)(page);
84603 }
84604
84605diff --git a/mm/swapfile.c b/mm/swapfile.c
84606index e97a0e5..b50e796 100644
84607--- a/mm/swapfile.c
84608+++ b/mm/swapfile.c
84609@@ -64,7 +64,7 @@ static DEFINE_MUTEX(swapon_mutex);
84610
84611 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
84612 /* Activity counter to indicate that a swapon or swapoff has occurred */
84613-static atomic_t proc_poll_event = ATOMIC_INIT(0);
84614+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
84615
84616 static inline unsigned char swap_count(unsigned char ent)
84617 {
84618@@ -1608,7 +1608,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
84619 }
84620 filp_close(swap_file, NULL);
84621 err = 0;
84622- atomic_inc(&proc_poll_event);
84623+ atomic_inc_unchecked(&proc_poll_event);
84624 wake_up_interruptible(&proc_poll_wait);
84625
84626 out_dput:
84627@@ -1625,8 +1625,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
84628
84629 poll_wait(file, &proc_poll_wait, wait);
84630
84631- if (seq->poll_event != atomic_read(&proc_poll_event)) {
84632- seq->poll_event = atomic_read(&proc_poll_event);
84633+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
84634+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
84635 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
84636 }
84637
84638@@ -1724,7 +1724,7 @@ static int swaps_open(struct inode *inode, struct file *file)
84639 return ret;
84640
84641 seq = file->private_data;
84642- seq->poll_event = atomic_read(&proc_poll_event);
84643+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
84644 return 0;
84645 }
84646
84647@@ -2066,7 +2066,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
84648 (frontswap_map) ? "FS" : "");
84649
84650 mutex_unlock(&swapon_mutex);
84651- atomic_inc(&proc_poll_event);
84652+ atomic_inc_unchecked(&proc_poll_event);
84653 wake_up_interruptible(&proc_poll_wait);
84654
84655 if (S_ISREG(inode->i_mode))
84656diff --git a/mm/util.c b/mm/util.c
84657index c55e26b..3f913a9 100644
84658--- a/mm/util.c
84659+++ b/mm/util.c
84660@@ -292,6 +292,12 @@ done:
84661 void arch_pick_mmap_layout(struct mm_struct *mm)
84662 {
84663 mm->mmap_base = TASK_UNMAPPED_BASE;
84664+
84665+#ifdef CONFIG_PAX_RANDMMAP
84666+ if (mm->pax_flags & MF_PAX_RANDMMAP)
84667+ mm->mmap_base += mm->delta_mmap;
84668+#endif
84669+
84670 mm->get_unmapped_area = arch_get_unmapped_area;
84671 mm->unmap_area = arch_unmap_area;
84672 }
84673diff --git a/mm/vmalloc.c b/mm/vmalloc.c
84674index 5123a16..f234a48 100644
84675--- a/mm/vmalloc.c
84676+++ b/mm/vmalloc.c
84677@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
84678
84679 pte = pte_offset_kernel(pmd, addr);
84680 do {
84681- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
84682- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
84683+
84684+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
84685+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
84686+ BUG_ON(!pte_exec(*pte));
84687+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
84688+ continue;
84689+ }
84690+#endif
84691+
84692+ {
84693+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
84694+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
84695+ }
84696 } while (pte++, addr += PAGE_SIZE, addr != end);
84697 }
84698
84699@@ -100,16 +111,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
84700 pte = pte_alloc_kernel(pmd, addr);
84701 if (!pte)
84702 return -ENOMEM;
84703+
84704+ pax_open_kernel();
84705 do {
84706 struct page *page = pages[*nr];
84707
84708- if (WARN_ON(!pte_none(*pte)))
84709+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
84710+ if (pgprot_val(prot) & _PAGE_NX)
84711+#endif
84712+
84713+ if (!pte_none(*pte)) {
84714+ pax_close_kernel();
84715+ WARN_ON(1);
84716 return -EBUSY;
84717- if (WARN_ON(!page))
84718+ }
84719+ if (!page) {
84720+ pax_close_kernel();
84721+ WARN_ON(1);
84722 return -ENOMEM;
84723+ }
84724 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
84725 (*nr)++;
84726 } while (pte++, addr += PAGE_SIZE, addr != end);
84727+ pax_close_kernel();
84728 return 0;
84729 }
84730
84731@@ -119,7 +143,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
84732 pmd_t *pmd;
84733 unsigned long next;
84734
84735- pmd = pmd_alloc(&init_mm, pud, addr);
84736+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
84737 if (!pmd)
84738 return -ENOMEM;
84739 do {
84740@@ -136,7 +160,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
84741 pud_t *pud;
84742 unsigned long next;
84743
84744- pud = pud_alloc(&init_mm, pgd, addr);
84745+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
84746 if (!pud)
84747 return -ENOMEM;
84748 do {
84749@@ -191,11 +215,20 @@ int is_vmalloc_or_module_addr(const void *x)
84750 * and fall back on vmalloc() if that fails. Others
84751 * just put it in the vmalloc space.
84752 */
84753-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
84754+#ifdef CONFIG_MODULES
84755+#ifdef MODULES_VADDR
84756 unsigned long addr = (unsigned long)x;
84757 if (addr >= MODULES_VADDR && addr < MODULES_END)
84758 return 1;
84759 #endif
84760+
84761+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
84762+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
84763+ return 1;
84764+#endif
84765+
84766+#endif
84767+
84768 return is_vmalloc_addr(x);
84769 }
84770
84771@@ -216,8 +249,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
84772
84773 if (!pgd_none(*pgd)) {
84774 pud_t *pud = pud_offset(pgd, addr);
84775+#ifdef CONFIG_X86
84776+ if (!pud_large(*pud))
84777+#endif
84778 if (!pud_none(*pud)) {
84779 pmd_t *pmd = pmd_offset(pud, addr);
84780+#ifdef CONFIG_X86
84781+ if (!pmd_large(*pmd))
84782+#endif
84783 if (!pmd_none(*pmd)) {
84784 pte_t *ptep, pte;
84785
84786@@ -329,7 +368,7 @@ static void purge_vmap_area_lazy(void);
84787 * Allocate a region of KVA of the specified size and alignment, within the
84788 * vstart and vend.
84789 */
84790-static struct vmap_area *alloc_vmap_area(unsigned long size,
84791+static __size_overflow(1) struct vmap_area *alloc_vmap_area(unsigned long size,
84792 unsigned long align,
84793 unsigned long vstart, unsigned long vend,
84794 int node, gfp_t gfp_mask)
84795@@ -1328,6 +1367,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
84796 struct vm_struct *area;
84797
84798 BUG_ON(in_interrupt());
84799+
84800+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
84801+ if (flags & VM_KERNEXEC) {
84802+ if (start != VMALLOC_START || end != VMALLOC_END)
84803+ return NULL;
84804+ start = (unsigned long)MODULES_EXEC_VADDR;
84805+ end = (unsigned long)MODULES_EXEC_END;
84806+ }
84807+#endif
84808+
84809 if (flags & VM_IOREMAP) {
84810 int bit = fls(size);
84811
84812@@ -1568,6 +1617,11 @@ void *vmap(struct page **pages, unsigned int count,
84813 if (count > totalram_pages)
84814 return NULL;
84815
84816+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
84817+ if (!(pgprot_val(prot) & _PAGE_NX))
84818+ flags |= VM_KERNEXEC;
84819+#endif
84820+
84821 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
84822 __builtin_return_address(0));
84823 if (!area)
84824@@ -1669,6 +1723,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
84825 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
84826 goto fail;
84827
84828+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
84829+ if (!(pgprot_val(prot) & _PAGE_NX))
84830+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
84831+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
84832+ else
84833+#endif
84834+
84835 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
84836 start, end, node, gfp_mask, caller);
84837 if (!area)
84838@@ -1842,10 +1903,9 @@ EXPORT_SYMBOL(vzalloc_node);
84839 * For tight control over page level allocator and protection flags
84840 * use __vmalloc() instead.
84841 */
84842-
84843 void *vmalloc_exec(unsigned long size)
84844 {
84845- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
84846+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
84847 -1, __builtin_return_address(0));
84848 }
84849
84850@@ -2136,6 +2196,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
84851 unsigned long uaddr = vma->vm_start;
84852 unsigned long usize = vma->vm_end - vma->vm_start;
84853
84854+ BUG_ON(vma->vm_mirror);
84855+
84856 if ((PAGE_SIZE-1) & (unsigned long)addr)
84857 return -EINVAL;
84858
84859@@ -2575,7 +2637,11 @@ static int s_show(struct seq_file *m, void *p)
84860 v->addr, v->addr + v->size, v->size);
84861
84862 if (v->caller)
84863+#ifdef CONFIG_GRKERNSEC_HIDESYM
84864+ seq_printf(m, " %pK", v->caller);
84865+#else
84866 seq_printf(m, " %pS", v->caller);
84867+#endif
84868
84869 if (v->nr_pages)
84870 seq_printf(m, " pages=%d", v->nr_pages);
84871diff --git a/mm/vmstat.c b/mm/vmstat.c
84872index 9800306..76b4b27 100644
84873--- a/mm/vmstat.c
84874+++ b/mm/vmstat.c
84875@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
84876 *
84877 * vm_stat contains the global counters
84878 */
84879-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
84880+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
84881 EXPORT_SYMBOL(vm_stat);
84882
84883 #ifdef CONFIG_SMP
84884@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
84885 v = p->vm_stat_diff[i];
84886 p->vm_stat_diff[i] = 0;
84887 local_irq_restore(flags);
84888- atomic_long_add(v, &zone->vm_stat[i]);
84889+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
84890 global_diff[i] += v;
84891 #ifdef CONFIG_NUMA
84892 /* 3 seconds idle till flush */
84893@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
84894
84895 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
84896 if (global_diff[i])
84897- atomic_long_add(global_diff[i], &vm_stat[i]);
84898+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
84899 }
84900
84901 void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
84902@@ -503,8 +503,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
84903 if (pset->vm_stat_diff[i]) {
84904 int v = pset->vm_stat_diff[i];
84905 pset->vm_stat_diff[i] = 0;
84906- atomic_long_add(v, &zone->vm_stat[i]);
84907- atomic_long_add(v, &vm_stat[i]);
84908+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
84909+ atomic_long_add_unchecked(v, &vm_stat[i]);
84910 }
84911 }
84912 #endif
84913@@ -1223,7 +1223,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
84914 return NOTIFY_OK;
84915 }
84916
84917-static struct notifier_block __cpuinitdata vmstat_notifier =
84918+static struct notifier_block vmstat_notifier =
84919 { &vmstat_cpuup_callback, NULL, 0 };
84920 #endif
84921
84922@@ -1238,10 +1238,20 @@ static int __init setup_vmstat(void)
84923 start_cpu_timer(cpu);
84924 #endif
84925 #ifdef CONFIG_PROC_FS
84926- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
84927- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
84928- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
84929- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
84930+ {
84931+ mode_t gr_mode = S_IRUGO;
84932+#ifdef CONFIG_GRKERNSEC_PROC_ADD
84933+ gr_mode = S_IRUSR;
84934+#endif
84935+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
84936+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
84937+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
84938+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
84939+#else
84940+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
84941+#endif
84942+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
84943+ }
84944 #endif
84945 return 0;
84946 }
84947diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
84948index acc74ad..be02639 100644
84949--- a/net/8021q/vlan.c
84950+++ b/net/8021q/vlan.c
84951@@ -108,6 +108,13 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
84952 if (vlan_id)
84953 vlan_vid_del(real_dev, vlan_id);
84954
84955+ /* Take it out of our own structures, but be sure to interlock with
84956+ * HW accelerating devices or SW vlan input packet processing if
84957+ * VLAN is not 0 (leave it there for 802.1p).
84958+ */
84959+ if (vlan_id)
84960+ vlan_vid_del(real_dev, vlan_id);
84961+
84962 /* Get rid of the vlan's reference to real_dev */
84963 dev_put(real_dev);
84964 }
84965@@ -485,7 +492,7 @@ out:
84966 return NOTIFY_DONE;
84967 }
84968
84969-static struct notifier_block vlan_notifier_block __read_mostly = {
84970+static struct notifier_block vlan_notifier_block = {
84971 .notifier_call = vlan_device_event,
84972 };
84973
84974@@ -560,8 +567,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
84975 err = -EPERM;
84976 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
84977 break;
84978- if ((args.u.name_type >= 0) &&
84979- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
84980+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
84981 struct vlan_net *vn;
84982
84983 vn = net_generic(net, vlan_net_id);
84984diff --git a/net/9p/mod.c b/net/9p/mod.c
84985index 6ab36ae..6f1841b 100644
84986--- a/net/9p/mod.c
84987+++ b/net/9p/mod.c
84988@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
84989 void v9fs_register_trans(struct p9_trans_module *m)
84990 {
84991 spin_lock(&v9fs_trans_lock);
84992- list_add_tail(&m->list, &v9fs_trans_list);
84993+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
84994 spin_unlock(&v9fs_trans_lock);
84995 }
84996 EXPORT_SYMBOL(v9fs_register_trans);
84997@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
84998 void v9fs_unregister_trans(struct p9_trans_module *m)
84999 {
85000 spin_lock(&v9fs_trans_lock);
85001- list_del_init(&m->list);
85002+ pax_list_del_init((struct list_head *)&m->list);
85003 spin_unlock(&v9fs_trans_lock);
85004 }
85005 EXPORT_SYMBOL(v9fs_unregister_trans);
85006diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
85007index 02efb25..41541a9 100644
85008--- a/net/9p/trans_fd.c
85009+++ b/net/9p/trans_fd.c
85010@@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
85011 oldfs = get_fs();
85012 set_fs(get_ds());
85013 /* The cast to a user pointer is valid due to the set_fs() */
85014- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
85015+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
85016 set_fs(oldfs);
85017
85018 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
85019diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
85020index 876fbe8..8bbea9f 100644
85021--- a/net/atm/atm_misc.c
85022+++ b/net/atm/atm_misc.c
85023@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
85024 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
85025 return 1;
85026 atm_return(vcc, truesize);
85027- atomic_inc(&vcc->stats->rx_drop);
85028+ atomic_inc_unchecked(&vcc->stats->rx_drop);
85029 return 0;
85030 }
85031 EXPORT_SYMBOL(atm_charge);
85032@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
85033 }
85034 }
85035 atm_return(vcc, guess);
85036- atomic_inc(&vcc->stats->rx_drop);
85037+ atomic_inc_unchecked(&vcc->stats->rx_drop);
85038 return NULL;
85039 }
85040 EXPORT_SYMBOL(atm_alloc_charge);
85041@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
85042
85043 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
85044 {
85045-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
85046+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
85047 __SONET_ITEMS
85048 #undef __HANDLE_ITEM
85049 }
85050@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
85051
85052 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
85053 {
85054-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
85055+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
85056 __SONET_ITEMS
85057 #undef __HANDLE_ITEM
85058 }
85059diff --git a/net/atm/lec.h b/net/atm/lec.h
85060index a86aff9..3a0d6f6 100644
85061--- a/net/atm/lec.h
85062+++ b/net/atm/lec.h
85063@@ -48,7 +48,7 @@ struct lane2_ops {
85064 const u8 *tlvs, u32 sizeoftlvs);
85065 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
85066 const u8 *tlvs, u32 sizeoftlvs);
85067-};
85068+} __no_const;
85069
85070 /*
85071 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
85072diff --git a/net/atm/proc.c b/net/atm/proc.c
85073index 0d020de..011c7bb 100644
85074--- a/net/atm/proc.c
85075+++ b/net/atm/proc.c
85076@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
85077 const struct k_atm_aal_stats *stats)
85078 {
85079 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
85080- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
85081- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
85082- atomic_read(&stats->rx_drop));
85083+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
85084+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
85085+ atomic_read_unchecked(&stats->rx_drop));
85086 }
85087
85088 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
85089diff --git a/net/atm/resources.c b/net/atm/resources.c
85090index 0447d5d..3cf4728 100644
85091--- a/net/atm/resources.c
85092+++ b/net/atm/resources.c
85093@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
85094 static void copy_aal_stats(struct k_atm_aal_stats *from,
85095 struct atm_aal_stats *to)
85096 {
85097-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
85098+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
85099 __AAL_STAT_ITEMS
85100 #undef __HANDLE_ITEM
85101 }
85102@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
85103 static void subtract_aal_stats(struct k_atm_aal_stats *from,
85104 struct atm_aal_stats *to)
85105 {
85106-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
85107+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
85108 __AAL_STAT_ITEMS
85109 #undef __HANDLE_ITEM
85110 }
85111diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
85112index d5744b7..506bae3 100644
85113--- a/net/ax25/sysctl_net_ax25.c
85114+++ b/net/ax25/sysctl_net_ax25.c
85115@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
85116 {
85117 char path[sizeof("net/ax25/") + IFNAMSIZ];
85118 int k;
85119- struct ctl_table *table;
85120+ ctl_table_no_const *table;
85121
85122 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
85123 if (!table)
85124diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
85125index 1ee94d0..14beea2 100644
85126--- a/net/batman-adv/bat_iv_ogm.c
85127+++ b/net/batman-adv/bat_iv_ogm.c
85128@@ -63,7 +63,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
85129
85130 /* randomize initial seqno to avoid collision */
85131 get_random_bytes(&random_seqno, sizeof(random_seqno));
85132- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
85133+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
85134
85135 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
85136 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
85137@@ -615,9 +615,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
85138 batadv_ogm_packet = (struct batadv_ogm_packet *)(*ogm_buff);
85139
85140 /* change sequence number to network order */
85141- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
85142+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
85143 batadv_ogm_packet->seqno = htonl(seqno);
85144- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
85145+ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
85146
85147 batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn);
85148 batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc);
85149@@ -1022,7 +1022,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
85150 return;
85151
85152 /* could be changed by schedule_own_packet() */
85153- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
85154+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
85155
85156 if (batadv_ogm_packet->flags & BATADV_DIRECTLINK)
85157 has_directlink_flag = 1;
85158diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
85159index f1d37cd..4190879 100644
85160--- a/net/batman-adv/hard-interface.c
85161+++ b/net/batman-adv/hard-interface.c
85162@@ -370,7 +370,7 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
85163 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
85164 dev_add_pack(&hard_iface->batman_adv_ptype);
85165
85166- atomic_set(&hard_iface->frag_seqno, 1);
85167+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
85168 batadv_info(hard_iface->soft_iface, "Adding interface: %s\n",
85169 hard_iface->net_dev->name);
85170
85171@@ -493,7 +493,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
85172 /* This can't be called via a bat_priv callback because
85173 * we have no bat_priv yet.
85174 */
85175- atomic_set(&hard_iface->bat_iv.ogm_seqno, 1);
85176+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, 1);
85177 hard_iface->bat_iv.ogm_buff = NULL;
85178
85179 return hard_iface;
85180diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
85181index 6b548fd..fc32c8d 100644
85182--- a/net/batman-adv/soft-interface.c
85183+++ b/net/batman-adv/soft-interface.c
85184@@ -252,7 +252,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
85185 primary_if->net_dev->dev_addr, ETH_ALEN);
85186
85187 /* set broadcast sequence number */
85188- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
85189+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
85190 bcast_packet->seqno = htonl(seqno);
85191
85192 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
85193@@ -497,7 +497,7 @@ struct net_device *batadv_softif_create(const char *name)
85194 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
85195
85196 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
85197- atomic_set(&bat_priv->bcast_seqno, 1);
85198+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
85199 atomic_set(&bat_priv->tt.vn, 0);
85200 atomic_set(&bat_priv->tt.local_changes, 0);
85201 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
85202diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
85203index ae9ac9a..11e0fe7 100644
85204--- a/net/batman-adv/types.h
85205+++ b/net/batman-adv/types.h
85206@@ -48,7 +48,7 @@
85207 struct batadv_hard_iface_bat_iv {
85208 unsigned char *ogm_buff;
85209 int ogm_buff_len;
85210- atomic_t ogm_seqno;
85211+ atomic_unchecked_t ogm_seqno;
85212 };
85213
85214 struct batadv_hard_iface {
85215@@ -56,7 +56,7 @@ struct batadv_hard_iface {
85216 int16_t if_num;
85217 char if_status;
85218 struct net_device *net_dev;
85219- atomic_t frag_seqno;
85220+ atomic_unchecked_t frag_seqno;
85221 struct kobject *hardif_obj;
85222 atomic_t refcount;
85223 struct packet_type batman_adv_ptype;
85224@@ -284,7 +284,7 @@ struct batadv_priv {
85225 atomic_t orig_interval; /* uint */
85226 atomic_t hop_penalty; /* uint */
85227 atomic_t log_level; /* uint */
85228- atomic_t bcast_seqno;
85229+ atomic_unchecked_t bcast_seqno;
85230 atomic_t bcast_queue_left;
85231 atomic_t batman_queue_left;
85232 char num_ifaces;
85233diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
85234index 10aff49..ea8e021 100644
85235--- a/net/batman-adv/unicast.c
85236+++ b/net/batman-adv/unicast.c
85237@@ -272,7 +272,7 @@ int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
85238 frag1->flags = BATADV_UNI_FRAG_HEAD | large_tail;
85239 frag2->flags = large_tail;
85240
85241- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
85242+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
85243 frag1->seqno = htons(seqno - 1);
85244 frag2->seqno = htons(seqno);
85245
85246diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
85247index 07f0739..3c42e34 100644
85248--- a/net/bluetooth/hci_sock.c
85249+++ b/net/bluetooth/hci_sock.c
85250@@ -934,7 +934,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
85251 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
85252 }
85253
85254- len = min_t(unsigned int, len, sizeof(uf));
85255+ len = min((size_t)len, sizeof(uf));
85256 if (copy_from_user(&uf, optval, len)) {
85257 err = -EFAULT;
85258 break;
85259diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
85260index 22e6583..426e2f3 100644
85261--- a/net/bluetooth/l2cap_core.c
85262+++ b/net/bluetooth/l2cap_core.c
85263@@ -3400,8 +3400,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
85264 break;
85265
85266 case L2CAP_CONF_RFC:
85267- if (olen == sizeof(rfc))
85268- memcpy(&rfc, (void *)val, olen);
85269+ if (olen != sizeof(rfc))
85270+ break;
85271+
85272+ memcpy(&rfc, (void *)val, olen);
85273
85274 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
85275 rfc.mode != chan->mode)
85276diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
85277index 1bcfb84..dad9f98 100644
85278--- a/net/bluetooth/l2cap_sock.c
85279+++ b/net/bluetooth/l2cap_sock.c
85280@@ -479,7 +479,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
85281 struct sock *sk = sock->sk;
85282 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
85283 struct l2cap_options opts;
85284- int len, err = 0;
85285+ int err = 0;
85286+ size_t len = optlen;
85287 u32 opt;
85288
85289 BT_DBG("sk %p", sk);
85290@@ -501,7 +502,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
85291 opts.max_tx = chan->max_tx;
85292 opts.txwin_size = chan->tx_win;
85293
85294- len = min_t(unsigned int, sizeof(opts), optlen);
85295+ len = min(sizeof(opts), len);
85296 if (copy_from_user((char *) &opts, optval, len)) {
85297 err = -EFAULT;
85298 break;
85299@@ -581,7 +582,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
85300 struct bt_security sec;
85301 struct bt_power pwr;
85302 struct l2cap_conn *conn;
85303- int len, err = 0;
85304+ int err = 0;
85305+ size_t len = optlen;
85306 u32 opt;
85307
85308 BT_DBG("sk %p", sk);
85309@@ -604,7 +606,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
85310
85311 sec.level = BT_SECURITY_LOW;
85312
85313- len = min_t(unsigned int, sizeof(sec), optlen);
85314+ len = min(sizeof(sec), len);
85315 if (copy_from_user((char *) &sec, optval, len)) {
85316 err = -EFAULT;
85317 break;
85318@@ -701,7 +703,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
85319
85320 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
85321
85322- len = min_t(unsigned int, sizeof(pwr), optlen);
85323+ len = min(sizeof(pwr), len);
85324 if (copy_from_user((char *) &pwr, optval, len)) {
85325 err = -EFAULT;
85326 break;
85327diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
85328index ce3f665..2c7d08f 100644
85329--- a/net/bluetooth/rfcomm/sock.c
85330+++ b/net/bluetooth/rfcomm/sock.c
85331@@ -667,7 +667,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
85332 struct sock *sk = sock->sk;
85333 struct bt_security sec;
85334 int err = 0;
85335- size_t len;
85336+ size_t len = optlen;
85337 u32 opt;
85338
85339 BT_DBG("sk %p", sk);
85340@@ -689,7 +689,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
85341
85342 sec.level = BT_SECURITY_LOW;
85343
85344- len = min_t(unsigned int, sizeof(sec), optlen);
85345+ len = min(sizeof(sec), len);
85346 if (copy_from_user((char *) &sec, optval, len)) {
85347 err = -EFAULT;
85348 break;
85349diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
85350index bd6fd0f..6492cba 100644
85351--- a/net/bluetooth/rfcomm/tty.c
85352+++ b/net/bluetooth/rfcomm/tty.c
85353@@ -309,7 +309,7 @@ static void rfcomm_dev_del(struct rfcomm_dev *dev)
85354 BUG_ON(test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags));
85355
85356 spin_lock_irqsave(&dev->port.lock, flags);
85357- if (dev->port.count > 0) {
85358+ if (atomic_read(&dev->port.count) > 0) {
85359 spin_unlock_irqrestore(&dev->port.lock, flags);
85360 return;
85361 }
85362@@ -664,10 +664,10 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
85363 return -ENODEV;
85364
85365 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
85366- dev->channel, dev->port.count);
85367+ dev->channel, atomic_read(&dev->port.count));
85368
85369 spin_lock_irqsave(&dev->port.lock, flags);
85370- if (++dev->port.count > 1) {
85371+ if (atomic_inc_return(&dev->port.count) > 1) {
85372 spin_unlock_irqrestore(&dev->port.lock, flags);
85373 return 0;
85374 }
85375@@ -732,10 +732,10 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
85376 return;
85377
85378 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
85379- dev->port.count);
85380+ atomic_read(&dev->port.count));
85381
85382 spin_lock_irqsave(&dev->port.lock, flags);
85383- if (!--dev->port.count) {
85384+ if (!atomic_dec_return(&dev->port.count)) {
85385 spin_unlock_irqrestore(&dev->port.lock, flags);
85386 if (dev->tty_dev->parent)
85387 device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST);
85388diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
85389index d9576e6..85f4f4e 100644
85390--- a/net/bridge/br_fdb.c
85391+++ b/net/bridge/br_fdb.c
85392@@ -386,7 +386,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
85393 return 0;
85394 br_warn(br, "adding interface %s with same address "
85395 "as a received packet\n",
85396- source->dev->name);
85397+ source ? source->dev->name : br->dev->name);
85398 fdb_delete(br, fdb);
85399 }
85400
85401diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
85402index 5fe2ff3..121d696 100644
85403--- a/net/bridge/netfilter/ebtables.c
85404+++ b/net/bridge/netfilter/ebtables.c
85405@@ -1523,7 +1523,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
85406 tmp.valid_hooks = t->table->valid_hooks;
85407 }
85408 mutex_unlock(&ebt_mutex);
85409- if (copy_to_user(user, &tmp, *len) != 0){
85410+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
85411 BUGPRINT("c2u Didn't work\n");
85412 ret = -EFAULT;
85413 break;
85414@@ -2327,7 +2327,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
85415 goto out;
85416 tmp.valid_hooks = t->valid_hooks;
85417
85418- if (copy_to_user(user, &tmp, *len) != 0) {
85419+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
85420 ret = -EFAULT;
85421 break;
85422 }
85423@@ -2338,7 +2338,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
85424 tmp.entries_size = t->table->entries_size;
85425 tmp.valid_hooks = t->table->valid_hooks;
85426
85427- if (copy_to_user(user, &tmp, *len) != 0) {
85428+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
85429 ret = -EFAULT;
85430 break;
85431 }
85432diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
85433index a376ec1..1fbd6be 100644
85434--- a/net/caif/cfctrl.c
85435+++ b/net/caif/cfctrl.c
85436@@ -10,6 +10,7 @@
85437 #include <linux/spinlock.h>
85438 #include <linux/slab.h>
85439 #include <linux/pkt_sched.h>
85440+#include <linux/sched.h>
85441 #include <net/caif/caif_layer.h>
85442 #include <net/caif/cfpkt.h>
85443 #include <net/caif/cfctrl.h>
85444@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
85445 memset(&dev_info, 0, sizeof(dev_info));
85446 dev_info.id = 0xff;
85447 cfsrvl_init(&this->serv, 0, &dev_info, false);
85448- atomic_set(&this->req_seq_no, 1);
85449- atomic_set(&this->rsp_seq_no, 1);
85450+ atomic_set_unchecked(&this->req_seq_no, 1);
85451+ atomic_set_unchecked(&this->rsp_seq_no, 1);
85452 this->serv.layer.receive = cfctrl_recv;
85453 sprintf(this->serv.layer.name, "ctrl");
85454 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
85455@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
85456 struct cfctrl_request_info *req)
85457 {
85458 spin_lock_bh(&ctrl->info_list_lock);
85459- atomic_inc(&ctrl->req_seq_no);
85460- req->sequence_no = atomic_read(&ctrl->req_seq_no);
85461+ atomic_inc_unchecked(&ctrl->req_seq_no);
85462+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
85463 list_add_tail(&req->list, &ctrl->list);
85464 spin_unlock_bh(&ctrl->info_list_lock);
85465 }
85466@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
85467 if (p != first)
85468 pr_warn("Requests are not received in order\n");
85469
85470- atomic_set(&ctrl->rsp_seq_no,
85471+ atomic_set_unchecked(&ctrl->rsp_seq_no,
85472 p->sequence_no);
85473 list_del(&p->list);
85474 goto out;
85475diff --git a/net/can/af_can.c b/net/can/af_can.c
85476index ddac1ee..3ee0a78 100644
85477--- a/net/can/af_can.c
85478+++ b/net/can/af_can.c
85479@@ -872,7 +872,7 @@ static const struct net_proto_family can_family_ops = {
85480 };
85481
85482 /* notifier block for netdevice event */
85483-static struct notifier_block can_netdev_notifier __read_mostly = {
85484+static struct notifier_block can_netdev_notifier = {
85485 .notifier_call = can_notifier,
85486 };
85487
85488diff --git a/net/can/gw.c b/net/can/gw.c
85489index 574dda78e..3d2b3da 100644
85490--- a/net/can/gw.c
85491+++ b/net/can/gw.c
85492@@ -67,7 +67,6 @@ MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
85493 MODULE_ALIAS("can-gw");
85494
85495 static HLIST_HEAD(cgw_list);
85496-static struct notifier_block notifier;
85497
85498 static struct kmem_cache *cgw_cache __read_mostly;
85499
85500@@ -893,6 +892,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
85501 return err;
85502 }
85503
85504+static struct notifier_block notifier = {
85505+ .notifier_call = cgw_notifier
85506+};
85507+
85508 static __init int cgw_module_init(void)
85509 {
85510 printk(banner);
85511@@ -904,7 +907,6 @@ static __init int cgw_module_init(void)
85512 return -ENOMEM;
85513
85514 /* set notifier */
85515- notifier.notifier_call = cgw_notifier;
85516 register_netdevice_notifier(&notifier);
85517
85518 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
85519diff --git a/net/compat.c b/net/compat.c
85520index 79ae884..17c5c09 100644
85521--- a/net/compat.c
85522+++ b/net/compat.c
85523@@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
85524 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
85525 __get_user(kmsg->msg_flags, &umsg->msg_flags))
85526 return -EFAULT;
85527- kmsg->msg_name = compat_ptr(tmp1);
85528- kmsg->msg_iov = compat_ptr(tmp2);
85529- kmsg->msg_control = compat_ptr(tmp3);
85530+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
85531+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
85532+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
85533 return 0;
85534 }
85535
85536@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
85537
85538 if (kern_msg->msg_namelen) {
85539 if (mode == VERIFY_READ) {
85540- int err = move_addr_to_kernel(kern_msg->msg_name,
85541+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
85542 kern_msg->msg_namelen,
85543 kern_address);
85544 if (err < 0)
85545@@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
85546 kern_msg->msg_name = NULL;
85547
85548 tot_len = iov_from_user_compat_to_kern(kern_iov,
85549- (struct compat_iovec __user *)kern_msg->msg_iov,
85550+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
85551 kern_msg->msg_iovlen);
85552 if (tot_len >= 0)
85553 kern_msg->msg_iov = kern_iov;
85554@@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
85555
85556 #define CMSG_COMPAT_FIRSTHDR(msg) \
85557 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
85558- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
85559+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
85560 (struct compat_cmsghdr __user *)NULL)
85561
85562 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
85563 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
85564 (ucmlen) <= (unsigned long) \
85565 ((mhdr)->msg_controllen - \
85566- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
85567+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
85568
85569 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
85570 struct compat_cmsghdr __user *cmsg, int cmsg_len)
85571 {
85572 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
85573- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
85574+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
85575 msg->msg_controllen)
85576 return NULL;
85577 return (struct compat_cmsghdr __user *)ptr;
85578@@ -219,7 +219,7 @@ Efault:
85579
85580 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
85581 {
85582- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
85583+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
85584 struct compat_cmsghdr cmhdr;
85585 struct compat_timeval ctv;
85586 struct compat_timespec cts[3];
85587@@ -275,7 +275,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
85588
85589 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
85590 {
85591- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
85592+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
85593 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
85594 int fdnum = scm->fp->count;
85595 struct file **fp = scm->fp->fp;
85596@@ -363,7 +363,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
85597 return -EFAULT;
85598 old_fs = get_fs();
85599 set_fs(KERNEL_DS);
85600- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
85601+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
85602 set_fs(old_fs);
85603
85604 return err;
85605@@ -424,7 +424,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
85606 len = sizeof(ktime);
85607 old_fs = get_fs();
85608 set_fs(KERNEL_DS);
85609- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
85610+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
85611 set_fs(old_fs);
85612
85613 if (!err) {
85614@@ -567,7 +567,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
85615 case MCAST_JOIN_GROUP:
85616 case MCAST_LEAVE_GROUP:
85617 {
85618- struct compat_group_req __user *gr32 = (void *)optval;
85619+ struct compat_group_req __user *gr32 = (void __user *)optval;
85620 struct group_req __user *kgr =
85621 compat_alloc_user_space(sizeof(struct group_req));
85622 u32 interface;
85623@@ -588,7 +588,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
85624 case MCAST_BLOCK_SOURCE:
85625 case MCAST_UNBLOCK_SOURCE:
85626 {
85627- struct compat_group_source_req __user *gsr32 = (void *)optval;
85628+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
85629 struct group_source_req __user *kgsr = compat_alloc_user_space(
85630 sizeof(struct group_source_req));
85631 u32 interface;
85632@@ -609,7 +609,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
85633 }
85634 case MCAST_MSFILTER:
85635 {
85636- struct compat_group_filter __user *gf32 = (void *)optval;
85637+ struct compat_group_filter __user *gf32 = (void __user *)optval;
85638 struct group_filter __user *kgf;
85639 u32 interface, fmode, numsrc;
85640
85641@@ -647,7 +647,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
85642 char __user *optval, int __user *optlen,
85643 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
85644 {
85645- struct compat_group_filter __user *gf32 = (void *)optval;
85646+ struct compat_group_filter __user *gf32 = (void __user *)optval;
85647 struct group_filter __user *kgf;
85648 int __user *koptlen;
85649 u32 interface, fmode, numsrc;
85650@@ -796,7 +796,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
85651
85652 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
85653 return -EINVAL;
85654- if (copy_from_user(a, args, nas[call]))
85655+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
85656 return -EFAULT;
85657 a0 = a[0];
85658 a1 = a[1];
85659diff --git a/net/core/datagram.c b/net/core/datagram.c
85660index 368f9c3..f82d4a3 100644
85661--- a/net/core/datagram.c
85662+++ b/net/core/datagram.c
85663@@ -289,7 +289,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
85664 }
85665
85666 kfree_skb(skb);
85667- atomic_inc(&sk->sk_drops);
85668+ atomic_inc_unchecked(&sk->sk_drops);
85669 sk_mem_reclaim_partial(sk);
85670
85671 return err;
85672diff --git a/net/core/dev.c b/net/core/dev.c
85673index 5d9c43d..b471558 100644
85674--- a/net/core/dev.c
85675+++ b/net/core/dev.c
85676@@ -1250,9 +1250,13 @@ void dev_load(struct net *net, const char *name)
85677 if (no_module && capable(CAP_NET_ADMIN))
85678 no_module = request_module("netdev-%s", name);
85679 if (no_module && capable(CAP_SYS_MODULE)) {
85680+#ifdef CONFIG_GRKERNSEC_MODHARDEN
85681+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
85682+#else
85683 if (!request_module("%s", name))
85684 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
85685 name);
85686+#endif
85687 }
85688 }
85689 EXPORT_SYMBOL(dev_load);
85690@@ -1714,7 +1718,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
85691 {
85692 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
85693 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
85694- atomic_long_inc(&dev->rx_dropped);
85695+ atomic_long_inc_unchecked(&dev->rx_dropped);
85696 kfree_skb(skb);
85697 return NET_RX_DROP;
85698 }
85699@@ -1724,7 +1728,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
85700 nf_reset(skb);
85701
85702 if (unlikely(!is_skb_forwardable(dev, skb))) {
85703- atomic_long_inc(&dev->rx_dropped);
85704+ atomic_long_inc_unchecked(&dev->rx_dropped);
85705 kfree_skb(skb);
85706 return NET_RX_DROP;
85707 }
85708@@ -2179,7 +2183,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
85709
85710 struct dev_gso_cb {
85711 void (*destructor)(struct sk_buff *skb);
85712-};
85713+} __no_const;
85714
85715 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
85716
85717@@ -3052,7 +3056,7 @@ enqueue:
85718
85719 local_irq_restore(flags);
85720
85721- atomic_long_inc(&skb->dev->rx_dropped);
85722+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
85723 kfree_skb(skb);
85724 return NET_RX_DROP;
85725 }
85726@@ -3124,7 +3128,7 @@ int netif_rx_ni(struct sk_buff *skb)
85727 }
85728 EXPORT_SYMBOL(netif_rx_ni);
85729
85730-static void net_tx_action(struct softirq_action *h)
85731+static void net_tx_action(void)
85732 {
85733 struct softnet_data *sd = &__get_cpu_var(softnet_data);
85734
85735@@ -3462,7 +3466,7 @@ ncls:
85736 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
85737 } else {
85738 drop:
85739- atomic_long_inc(&skb->dev->rx_dropped);
85740+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
85741 kfree_skb(skb);
85742 /* Jamal, now you will not able to escape explaining
85743 * me how you were going to use this. :-)
85744@@ -4045,7 +4049,7 @@ void netif_napi_del(struct napi_struct *napi)
85745 }
85746 EXPORT_SYMBOL(netif_napi_del);
85747
85748-static void net_rx_action(struct softirq_action *h)
85749+static void net_rx_action(void)
85750 {
85751 struct softnet_data *sd = &__get_cpu_var(softnet_data);
85752 unsigned long time_limit = jiffies + 2;
85753@@ -4529,8 +4533,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
85754 else
85755 seq_printf(seq, "%04x", ntohs(pt->type));
85756
85757+#ifdef CONFIG_GRKERNSEC_HIDESYM
85758+ seq_printf(seq, " %-8s %p\n",
85759+ pt->dev ? pt->dev->name : "", NULL);
85760+#else
85761 seq_printf(seq, " %-8s %pF\n",
85762 pt->dev ? pt->dev->name : "", pt->func);
85763+#endif
85764 }
85765
85766 return 0;
85767@@ -6102,7 +6111,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
85768 } else {
85769 netdev_stats_to_stats64(storage, &dev->stats);
85770 }
85771- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
85772+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
85773 return storage;
85774 }
85775 EXPORT_SYMBOL(dev_get_stats);
85776diff --git a/net/core/flow.c b/net/core/flow.c
85777index 3bad824..2071a55 100644
85778--- a/net/core/flow.c
85779+++ b/net/core/flow.c
85780@@ -61,7 +61,7 @@ struct flow_cache {
85781 struct timer_list rnd_timer;
85782 };
85783
85784-atomic_t flow_cache_genid = ATOMIC_INIT(0);
85785+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
85786 EXPORT_SYMBOL(flow_cache_genid);
85787 static struct flow_cache flow_cache_global;
85788 static struct kmem_cache *flow_cachep __read_mostly;
85789@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
85790
85791 static int flow_entry_valid(struct flow_cache_entry *fle)
85792 {
85793- if (atomic_read(&flow_cache_genid) != fle->genid)
85794+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
85795 return 0;
85796 if (fle->object && !fle->object->ops->check(fle->object))
85797 return 0;
85798@@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
85799 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
85800 fcp->hash_count++;
85801 }
85802- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
85803+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
85804 flo = fle->object;
85805 if (!flo)
85806 goto ret_object;
85807@@ -280,7 +280,7 @@ nocache:
85808 }
85809 flo = resolver(net, key, family, dir, flo, ctx);
85810 if (fle) {
85811- fle->genid = atomic_read(&flow_cache_genid);
85812+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
85813 if (!IS_ERR(flo))
85814 fle->object = flo;
85815 else
85816diff --git a/net/core/iovec.c b/net/core/iovec.c
85817index 7e7aeb0..2a998cb 100644
85818--- a/net/core/iovec.c
85819+++ b/net/core/iovec.c
85820@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
85821 if (m->msg_namelen) {
85822 if (mode == VERIFY_READ) {
85823 void __user *namep;
85824- namep = (void __user __force *) m->msg_name;
85825+ namep = (void __force_user *) m->msg_name;
85826 err = move_addr_to_kernel(namep, m->msg_namelen,
85827 address);
85828 if (err < 0)
85829@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
85830 }
85831
85832 size = m->msg_iovlen * sizeof(struct iovec);
85833- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
85834+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
85835 return -EFAULT;
85836
85837 m->msg_iov = iov;
85838diff --git a/net/core/neighbour.c b/net/core/neighbour.c
85839index c815f28..e6403f2 100644
85840--- a/net/core/neighbour.c
85841+++ b/net/core/neighbour.c
85842@@ -2776,7 +2776,7 @@ static int proc_unres_qlen(ctl_table *ctl, int write, void __user *buffer,
85843 size_t *lenp, loff_t *ppos)
85844 {
85845 int size, ret;
85846- ctl_table tmp = *ctl;
85847+ ctl_table_no_const tmp = *ctl;
85848
85849 tmp.extra1 = &zero;
85850 tmp.extra2 = &unres_qlen_max;
85851diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
85852index 28c5f5a..7edf2e2 100644
85853--- a/net/core/net-sysfs.c
85854+++ b/net/core/net-sysfs.c
85855@@ -1455,7 +1455,7 @@ void netdev_class_remove_file(struct class_attribute *class_attr)
85856 }
85857 EXPORT_SYMBOL(netdev_class_remove_file);
85858
85859-int netdev_kobject_init(void)
85860+int __init netdev_kobject_init(void)
85861 {
85862 kobj_ns_type_register(&net_ns_type_operations);
85863 return class_register(&net_class);
85864diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
85865index 8acce01..2e306bb 100644
85866--- a/net/core/net_namespace.c
85867+++ b/net/core/net_namespace.c
85868@@ -442,7 +442,7 @@ static int __register_pernet_operations(struct list_head *list,
85869 int error;
85870 LIST_HEAD(net_exit_list);
85871
85872- list_add_tail(&ops->list, list);
85873+ pax_list_add_tail((struct list_head *)&ops->list, list);
85874 if (ops->init || (ops->id && ops->size)) {
85875 for_each_net(net) {
85876 error = ops_init(ops, net);
85877@@ -455,7 +455,7 @@ static int __register_pernet_operations(struct list_head *list,
85878
85879 out_undo:
85880 /* If I have an error cleanup all namespaces I initialized */
85881- list_del(&ops->list);
85882+ pax_list_del((struct list_head *)&ops->list);
85883 ops_exit_list(ops, &net_exit_list);
85884 ops_free_list(ops, &net_exit_list);
85885 return error;
85886@@ -466,7 +466,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
85887 struct net *net;
85888 LIST_HEAD(net_exit_list);
85889
85890- list_del(&ops->list);
85891+ pax_list_del((struct list_head *)&ops->list);
85892 for_each_net(net)
85893 list_add_tail(&net->exit_list, &net_exit_list);
85894 ops_exit_list(ops, &net_exit_list);
85895@@ -600,7 +600,7 @@ int register_pernet_device(struct pernet_operations *ops)
85896 mutex_lock(&net_mutex);
85897 error = register_pernet_operations(&pernet_list, ops);
85898 if (!error && (first_device == &pernet_list))
85899- first_device = &ops->list;
85900+ first_device = (struct list_head *)&ops->list;
85901 mutex_unlock(&net_mutex);
85902 return error;
85903 }
85904diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
85905index 6212ec9..dd4ad3b 100644
85906--- a/net/core/rtnetlink.c
85907+++ b/net/core/rtnetlink.c
85908@@ -58,7 +58,7 @@ struct rtnl_link {
85909 rtnl_doit_func doit;
85910 rtnl_dumpit_func dumpit;
85911 rtnl_calcit_func calcit;
85912-};
85913+} __no_const;
85914
85915 static DEFINE_MUTEX(rtnl_mutex);
85916
85917@@ -299,10 +299,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
85918 if (rtnl_link_ops_get(ops->kind))
85919 return -EEXIST;
85920
85921- if (!ops->dellink)
85922- ops->dellink = unregister_netdevice_queue;
85923+ if (!ops->dellink) {
85924+ pax_open_kernel();
85925+ *(void **)&ops->dellink = unregister_netdevice_queue;
85926+ pax_close_kernel();
85927+ }
85928
85929- list_add_tail(&ops->list, &link_ops);
85930+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
85931 return 0;
85932 }
85933 EXPORT_SYMBOL_GPL(__rtnl_link_register);
85934@@ -349,7 +352,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
85935 for_each_net(net) {
85936 __rtnl_kill_links(net, ops);
85937 }
85938- list_del(&ops->list);
85939+ pax_list_del((struct list_head *)&ops->list);
85940 }
85941 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
85942
85943diff --git a/net/core/scm.c b/net/core/scm.c
85944index 2dc6cda..2159524 100644
85945--- a/net/core/scm.c
85946+++ b/net/core/scm.c
85947@@ -226,7 +226,7 @@ EXPORT_SYMBOL(__scm_send);
85948 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
85949 {
85950 struct cmsghdr __user *cm
85951- = (__force struct cmsghdr __user *)msg->msg_control;
85952+ = (struct cmsghdr __force_user *)msg->msg_control;
85953 struct cmsghdr cmhdr;
85954 int cmlen = CMSG_LEN(len);
85955 int err;
85956@@ -249,7 +249,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
85957 err = -EFAULT;
85958 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
85959 goto out;
85960- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
85961+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
85962 goto out;
85963 cmlen = CMSG_SPACE(len);
85964 if (msg->msg_controllen < cmlen)
85965@@ -265,7 +265,7 @@ EXPORT_SYMBOL(put_cmsg);
85966 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
85967 {
85968 struct cmsghdr __user *cm
85969- = (__force struct cmsghdr __user*)msg->msg_control;
85970+ = (struct cmsghdr __force_user *)msg->msg_control;
85971
85972 int fdmax = 0;
85973 int fdnum = scm->fp->count;
85974@@ -285,7 +285,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
85975 if (fdnum < fdmax)
85976 fdmax = fdnum;
85977
85978- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
85979+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
85980 i++, cmfptr++)
85981 {
85982 struct socket *sock;
85983diff --git a/net/core/sock.c b/net/core/sock.c
85984index bc131d4..029e378 100644
85985--- a/net/core/sock.c
85986+++ b/net/core/sock.c
85987@@ -388,7 +388,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
85988 struct sk_buff_head *list = &sk->sk_receive_queue;
85989
85990 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
85991- atomic_inc(&sk->sk_drops);
85992+ atomic_inc_unchecked(&sk->sk_drops);
85993 trace_sock_rcvqueue_full(sk, skb);
85994 return -ENOMEM;
85995 }
85996@@ -398,7 +398,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
85997 return err;
85998
85999 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
86000- atomic_inc(&sk->sk_drops);
86001+ atomic_inc_unchecked(&sk->sk_drops);
86002 return -ENOBUFS;
86003 }
86004
86005@@ -418,7 +418,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
86006 skb_dst_force(skb);
86007
86008 spin_lock_irqsave(&list->lock, flags);
86009- skb->dropcount = atomic_read(&sk->sk_drops);
86010+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
86011 __skb_queue_tail(list, skb);
86012 spin_unlock_irqrestore(&list->lock, flags);
86013
86014@@ -438,7 +438,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
86015 skb->dev = NULL;
86016
86017 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
86018- atomic_inc(&sk->sk_drops);
86019+ atomic_inc_unchecked(&sk->sk_drops);
86020 goto discard_and_relse;
86021 }
86022 if (nested)
86023@@ -456,7 +456,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
86024 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
86025 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
86026 bh_unlock_sock(sk);
86027- atomic_inc(&sk->sk_drops);
86028+ atomic_inc_unchecked(&sk->sk_drops);
86029 goto discard_and_relse;
86030 }
86031
86032@@ -930,12 +930,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
86033 struct timeval tm;
86034 } v;
86035
86036- int lv = sizeof(int);
86037- int len;
86038+ unsigned int lv = sizeof(int);
86039+ unsigned int len;
86040
86041 if (get_user(len, optlen))
86042 return -EFAULT;
86043- if (len < 0)
86044+ if (len > INT_MAX)
86045 return -EINVAL;
86046
86047 memset(&v, 0, sizeof(v));
86048@@ -1083,11 +1083,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
86049
86050 case SO_PEERNAME:
86051 {
86052- char address[128];
86053+ char address[_K_SS_MAXSIZE];
86054
86055 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
86056 return -ENOTCONN;
86057- if (lv < len)
86058+ if (lv < len || sizeof address < len)
86059 return -EINVAL;
86060 if (copy_to_user(optval, address, len))
86061 return -EFAULT;
86062@@ -1146,7 +1146,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
86063
86064 if (len > lv)
86065 len = lv;
86066- if (copy_to_user(optval, &v, len))
86067+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
86068 return -EFAULT;
86069 lenout:
86070 if (put_user(len, optlen))
86071@@ -2276,7 +2276,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
86072 */
86073 smp_wmb();
86074 atomic_set(&sk->sk_refcnt, 1);
86075- atomic_set(&sk->sk_drops, 0);
86076+ atomic_set_unchecked(&sk->sk_drops, 0);
86077 }
86078 EXPORT_SYMBOL(sock_init_data);
86079
86080diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
86081index 750f44f..922399c 100644
86082--- a/net/core/sock_diag.c
86083+++ b/net/core/sock_diag.c
86084@@ -9,26 +9,33 @@
86085 #include <linux/inet_diag.h>
86086 #include <linux/sock_diag.h>
86087
86088-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
86089+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
86090 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
86091 static DEFINE_MUTEX(sock_diag_table_mutex);
86092
86093 int sock_diag_check_cookie(void *sk, __u32 *cookie)
86094 {
86095+#ifndef CONFIG_GRKERNSEC_HIDESYM
86096 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
86097 cookie[1] != INET_DIAG_NOCOOKIE) &&
86098 ((u32)(unsigned long)sk != cookie[0] ||
86099 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
86100 return -ESTALE;
86101 else
86102+#endif
86103 return 0;
86104 }
86105 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
86106
86107 void sock_diag_save_cookie(void *sk, __u32 *cookie)
86108 {
86109+#ifdef CONFIG_GRKERNSEC_HIDESYM
86110+ cookie[0] = 0;
86111+ cookie[1] = 0;
86112+#else
86113 cookie[0] = (u32)(unsigned long)sk;
86114 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
86115+#endif
86116 }
86117 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
86118
86119@@ -75,8 +82,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
86120 mutex_lock(&sock_diag_table_mutex);
86121 if (sock_diag_handlers[hndl->family])
86122 err = -EBUSY;
86123- else
86124+ else {
86125+ pax_open_kernel();
86126 sock_diag_handlers[hndl->family] = hndl;
86127+ pax_close_kernel();
86128+ }
86129 mutex_unlock(&sock_diag_table_mutex);
86130
86131 return err;
86132@@ -92,26 +102,13 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
86133
86134 mutex_lock(&sock_diag_table_mutex);
86135 BUG_ON(sock_diag_handlers[family] != hnld);
86136+ pax_open_kernel();
86137 sock_diag_handlers[family] = NULL;
86138+ pax_close_kernel();
86139 mutex_unlock(&sock_diag_table_mutex);
86140 }
86141 EXPORT_SYMBOL_GPL(sock_diag_unregister);
86142
86143-static const inline struct sock_diag_handler *sock_diag_lock_handler(int family)
86144-{
86145- if (sock_diag_handlers[family] == NULL)
86146- request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
86147- NETLINK_SOCK_DIAG, family);
86148-
86149- mutex_lock(&sock_diag_table_mutex);
86150- return sock_diag_handlers[family];
86151-}
86152-
86153-static inline void sock_diag_unlock_handler(const struct sock_diag_handler *h)
86154-{
86155- mutex_unlock(&sock_diag_table_mutex);
86156-}
86157-
86158 static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
86159 {
86160 int err;
86161@@ -124,12 +121,17 @@ static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
86162 if (req->sdiag_family >= AF_MAX)
86163 return -EINVAL;
86164
86165- hndl = sock_diag_lock_handler(req->sdiag_family);
86166+ if (sock_diag_handlers[req->sdiag_family] == NULL)
86167+ request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
86168+ NETLINK_SOCK_DIAG, req->sdiag_family);
86169+
86170+ mutex_lock(&sock_diag_table_mutex);
86171+ hndl = sock_diag_handlers[req->sdiag_family];
86172 if (hndl == NULL)
86173 err = -ENOENT;
86174 else
86175 err = hndl->dump(skb, nlh);
86176- sock_diag_unlock_handler(hndl);
86177+ mutex_unlock(&sock_diag_table_mutex);
86178
86179 return err;
86180 }
86181diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
86182index d1b0804..98cf5f7 100644
86183--- a/net/core/sysctl_net_core.c
86184+++ b/net/core/sysctl_net_core.c
86185@@ -26,7 +26,7 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write,
86186 {
86187 unsigned int orig_size, size;
86188 int ret, i;
86189- ctl_table tmp = {
86190+ ctl_table_no_const tmp = {
86191 .data = &size,
86192 .maxlen = sizeof(size),
86193 .mode = table->mode
86194@@ -205,13 +205,12 @@ static struct ctl_table netns_core_table[] = {
86195
86196 static __net_init int sysctl_core_net_init(struct net *net)
86197 {
86198- struct ctl_table *tbl;
86199+ ctl_table_no_const *tbl = NULL;
86200
86201 net->core.sysctl_somaxconn = SOMAXCONN;
86202
86203- tbl = netns_core_table;
86204 if (!net_eq(net, &init_net)) {
86205- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
86206+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
86207 if (tbl == NULL)
86208 goto err_dup;
86209
86210@@ -221,17 +220,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
86211 if (net->user_ns != &init_user_ns) {
86212 tbl[0].procname = NULL;
86213 }
86214- }
86215-
86216- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
86217+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
86218+ } else
86219+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
86220 if (net->core.sysctl_hdr == NULL)
86221 goto err_reg;
86222
86223 return 0;
86224
86225 err_reg:
86226- if (tbl != netns_core_table)
86227- kfree(tbl);
86228+ kfree(tbl);
86229 err_dup:
86230 return -ENOMEM;
86231 }
86232@@ -246,7 +244,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
86233 kfree(tbl);
86234 }
86235
86236-static __net_initdata struct pernet_operations sysctl_core_ops = {
86237+static __net_initconst struct pernet_operations sysctl_core_ops = {
86238 .init = sysctl_core_net_init,
86239 .exit = sysctl_core_net_exit,
86240 };
86241diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
86242index 307c322..78a4c6f 100644
86243--- a/net/decnet/af_decnet.c
86244+++ b/net/decnet/af_decnet.c
86245@@ -468,6 +468,7 @@ static struct proto dn_proto = {
86246 .sysctl_rmem = sysctl_decnet_rmem,
86247 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
86248 .obj_size = sizeof(struct dn_sock),
86249+ .slab_flags = SLAB_USERCOPY,
86250 };
86251
86252 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
86253diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
86254index a55eecc..dd8428c 100644
86255--- a/net/decnet/sysctl_net_decnet.c
86256+++ b/net/decnet/sysctl_net_decnet.c
86257@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
86258
86259 if (len > *lenp) len = *lenp;
86260
86261- if (copy_to_user(buffer, addr, len))
86262+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
86263 return -EFAULT;
86264
86265 *lenp = len;
86266@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
86267
86268 if (len > *lenp) len = *lenp;
86269
86270- if (copy_to_user(buffer, devname, len))
86271+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
86272 return -EFAULT;
86273
86274 *lenp = len;
86275diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
86276index fcf104e..95552d4 100644
86277--- a/net/ipv4/af_inet.c
86278+++ b/net/ipv4/af_inet.c
86279@@ -1717,13 +1717,9 @@ static int __init inet_init(void)
86280
86281 BUILD_BUG_ON(sizeof(struct inet_skb_parm) > sizeof(dummy_skb->cb));
86282
86283- sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
86284- if (!sysctl_local_reserved_ports)
86285- goto out;
86286-
86287 rc = proto_register(&tcp_prot, 1);
86288 if (rc)
86289- goto out_free_reserved_ports;
86290+ goto out;
86291
86292 rc = proto_register(&udp_prot, 1);
86293 if (rc)
86294@@ -1832,8 +1828,6 @@ out_unregister_udp_proto:
86295 proto_unregister(&udp_prot);
86296 out_unregister_tcp_proto:
86297 proto_unregister(&tcp_prot);
86298-out_free_reserved_ports:
86299- kfree(sysctl_local_reserved_ports);
86300 goto out;
86301 }
86302
86303diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
86304index a69b4e4..dbccba5 100644
86305--- a/net/ipv4/ah4.c
86306+++ b/net/ipv4/ah4.c
86307@@ -421,7 +421,7 @@ static void ah4_err(struct sk_buff *skb, u32 info)
86308 return;
86309
86310 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
86311- atomic_inc(&flow_cache_genid);
86312+ atomic_inc_unchecked(&flow_cache_genid);
86313 rt_genid_bump(net);
86314
86315 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0);
86316diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
86317index a8e4f26..25e5f40 100644
86318--- a/net/ipv4/devinet.c
86319+++ b/net/ipv4/devinet.c
86320@@ -1763,7 +1763,7 @@ static int ipv4_doint_and_flush(ctl_table *ctl, int write,
86321 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
86322 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
86323
86324-static struct devinet_sysctl_table {
86325+static const struct devinet_sysctl_table {
86326 struct ctl_table_header *sysctl_header;
86327 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
86328 } devinet_sysctl = {
86329@@ -1881,7 +1881,7 @@ static __net_init int devinet_init_net(struct net *net)
86330 int err;
86331 struct ipv4_devconf *all, *dflt;
86332 #ifdef CONFIG_SYSCTL
86333- struct ctl_table *tbl = ctl_forward_entry;
86334+ ctl_table_no_const *tbl = NULL;
86335 struct ctl_table_header *forw_hdr;
86336 #endif
86337
86338@@ -1899,7 +1899,7 @@ static __net_init int devinet_init_net(struct net *net)
86339 goto err_alloc_dflt;
86340
86341 #ifdef CONFIG_SYSCTL
86342- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
86343+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
86344 if (tbl == NULL)
86345 goto err_alloc_ctl;
86346
86347@@ -1919,7 +1919,10 @@ static __net_init int devinet_init_net(struct net *net)
86348 goto err_reg_dflt;
86349
86350 err = -ENOMEM;
86351- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
86352+ if (!net_eq(net, &init_net))
86353+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
86354+ else
86355+ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
86356 if (forw_hdr == NULL)
86357 goto err_reg_ctl;
86358 net->ipv4.forw_hdr = forw_hdr;
86359@@ -1935,8 +1938,7 @@ err_reg_ctl:
86360 err_reg_dflt:
86361 __devinet_sysctl_unregister(all);
86362 err_reg_all:
86363- if (tbl != ctl_forward_entry)
86364- kfree(tbl);
86365+ kfree(tbl);
86366 err_alloc_ctl:
86367 #endif
86368 if (dflt != &ipv4_devconf_dflt)
86369diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
86370index 3b4f0cd..8cb864c 100644
86371--- a/net/ipv4/esp4.c
86372+++ b/net/ipv4/esp4.c
86373@@ -503,7 +503,7 @@ static void esp4_err(struct sk_buff *skb, u32 info)
86374 return;
86375
86376 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
86377- atomic_inc(&flow_cache_genid);
86378+ atomic_inc_unchecked(&flow_cache_genid);
86379 rt_genid_bump(net);
86380
86381 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0);
86382diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
86383index 5cd75e2..f57ef39 100644
86384--- a/net/ipv4/fib_frontend.c
86385+++ b/net/ipv4/fib_frontend.c
86386@@ -1020,12 +1020,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
86387 #ifdef CONFIG_IP_ROUTE_MULTIPATH
86388 fib_sync_up(dev);
86389 #endif
86390- atomic_inc(&net->ipv4.dev_addr_genid);
86391+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
86392 rt_cache_flush(dev_net(dev));
86393 break;
86394 case NETDEV_DOWN:
86395 fib_del_ifaddr(ifa, NULL);
86396- atomic_inc(&net->ipv4.dev_addr_genid);
86397+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
86398 if (ifa->ifa_dev->ifa_list == NULL) {
86399 /* Last address was deleted from this interface.
86400 * Disable IP.
86401@@ -1061,7 +1061,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
86402 #ifdef CONFIG_IP_ROUTE_MULTIPATH
86403 fib_sync_up(dev);
86404 #endif
86405- atomic_inc(&net->ipv4.dev_addr_genid);
86406+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
86407 rt_cache_flush(net);
86408 break;
86409 case NETDEV_DOWN:
86410diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
86411index 4797a80..2bd54e9 100644
86412--- a/net/ipv4/fib_semantics.c
86413+++ b/net/ipv4/fib_semantics.c
86414@@ -767,7 +767,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
86415 nh->nh_saddr = inet_select_addr(nh->nh_dev,
86416 nh->nh_gw,
86417 nh->nh_parent->fib_scope);
86418- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
86419+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
86420
86421 return nh->nh_saddr;
86422 }
86423diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
86424index d0670f0..744ac80 100644
86425--- a/net/ipv4/inet_connection_sock.c
86426+++ b/net/ipv4/inet_connection_sock.c
86427@@ -37,7 +37,7 @@ struct local_ports sysctl_local_ports __read_mostly = {
86428 .range = { 32768, 61000 },
86429 };
86430
86431-unsigned long *sysctl_local_reserved_ports;
86432+unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
86433 EXPORT_SYMBOL(sysctl_local_reserved_ports);
86434
86435 void inet_get_local_port_range(int *low, int *high)
86436diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
86437index fa3ae81..0dbe6b8 100644
86438--- a/net/ipv4/inet_hashtables.c
86439+++ b/net/ipv4/inet_hashtables.c
86440@@ -18,12 +18,15 @@
86441 #include <linux/sched.h>
86442 #include <linux/slab.h>
86443 #include <linux/wait.h>
86444+#include <linux/security.h>
86445
86446 #include <net/inet_connection_sock.h>
86447 #include <net/inet_hashtables.h>
86448 #include <net/secure_seq.h>
86449 #include <net/ip.h>
86450
86451+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
86452+
86453 /*
86454 * Allocate and initialize a new local port bind bucket.
86455 * The bindhash mutex for snum's hash chain must be held here.
86456@@ -540,6 +543,8 @@ ok:
86457 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
86458 spin_unlock(&head->lock);
86459
86460+ gr_update_task_in_ip_table(current, inet_sk(sk));
86461+
86462 if (tw) {
86463 inet_twsk_deschedule(tw, death_row);
86464 while (twrefcnt) {
86465diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
86466index 000e3d2..5472da3 100644
86467--- a/net/ipv4/inetpeer.c
86468+++ b/net/ipv4/inetpeer.c
86469@@ -503,8 +503,8 @@ relookup:
86470 if (p) {
86471 p->daddr = *daddr;
86472 atomic_set(&p->refcnt, 1);
86473- atomic_set(&p->rid, 0);
86474- atomic_set(&p->ip_id_count,
86475+ atomic_set_unchecked(&p->rid, 0);
86476+ atomic_set_unchecked(&p->ip_id_count,
86477 (daddr->family == AF_INET) ?
86478 secure_ip_id(daddr->addr.a4) :
86479 secure_ipv6_id(daddr->addr.a6));
86480diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
86481index a8fc332..4ca4ca65 100644
86482--- a/net/ipv4/ip_fragment.c
86483+++ b/net/ipv4/ip_fragment.c
86484@@ -319,7 +319,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
86485 return 0;
86486
86487 start = qp->rid;
86488- end = atomic_inc_return(&peer->rid);
86489+ end = atomic_inc_return_unchecked(&peer->rid);
86490 qp->rid = end;
86491
86492 rc = qp->q.fragments && (end - start) > max;
86493@@ -786,12 +786,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
86494
86495 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
86496 {
86497- struct ctl_table *table;
86498+ ctl_table_no_const *table = NULL;
86499 struct ctl_table_header *hdr;
86500
86501- table = ip4_frags_ns_ctl_table;
86502 if (!net_eq(net, &init_net)) {
86503- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
86504+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
86505 if (table == NULL)
86506 goto err_alloc;
86507
86508@@ -802,9 +801,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
86509 /* Don't export sysctls to unprivileged users */
86510 if (net->user_ns != &init_user_ns)
86511 table[0].procname = NULL;
86512- }
86513+ hdr = register_net_sysctl(net, "net/ipv4", table);
86514+ } else
86515+ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
86516
86517- hdr = register_net_sysctl(net, "net/ipv4", table);
86518 if (hdr == NULL)
86519 goto err_reg;
86520
86521@@ -812,8 +812,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
86522 return 0;
86523
86524 err_reg:
86525- if (!net_eq(net, &init_net))
86526- kfree(table);
86527+ kfree(table);
86528 err_alloc:
86529 return -ENOMEM;
86530 }
86531diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
86532index a85062b..2958a9b 100644
86533--- a/net/ipv4/ip_gre.c
86534+++ b/net/ipv4/ip_gre.c
86535@@ -124,7 +124,7 @@ static bool log_ecn_error = true;
86536 module_param(log_ecn_error, bool, 0644);
86537 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
86538
86539-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
86540+static struct rtnl_link_ops ipgre_link_ops;
86541 static int ipgre_tunnel_init(struct net_device *dev);
86542 static void ipgre_tunnel_setup(struct net_device *dev);
86543 static int ipgre_tunnel_bind_dev(struct net_device *dev);
86544@@ -1753,7 +1753,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
86545 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
86546 };
86547
86548-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
86549+static struct rtnl_link_ops ipgre_link_ops = {
86550 .kind = "gre",
86551 .maxtype = IFLA_GRE_MAX,
86552 .policy = ipgre_policy,
86553@@ -1766,7 +1766,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
86554 .fill_info = ipgre_fill_info,
86555 };
86556
86557-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
86558+static struct rtnl_link_ops ipgre_tap_ops = {
86559 .kind = "gretap",
86560 .maxtype = IFLA_GRE_MAX,
86561 .policy = ipgre_policy,
86562diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
86563index d9c4f11..02b82dbc 100644
86564--- a/net/ipv4/ip_sockglue.c
86565+++ b/net/ipv4/ip_sockglue.c
86566@@ -1152,7 +1152,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
86567 len = min_t(unsigned int, len, opt->optlen);
86568 if (put_user(len, optlen))
86569 return -EFAULT;
86570- if (copy_to_user(optval, opt->__data, len))
86571+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
86572+ copy_to_user(optval, opt->__data, len))
86573 return -EFAULT;
86574 return 0;
86575 }
86576@@ -1283,7 +1284,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
86577 if (sk->sk_type != SOCK_STREAM)
86578 return -ENOPROTOOPT;
86579
86580- msg.msg_control = optval;
86581+ msg.msg_control = (void __force_kernel *)optval;
86582 msg.msg_controllen = len;
86583 msg.msg_flags = flags;
86584
86585diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
86586index c3a4233..1412161 100644
86587--- a/net/ipv4/ip_vti.c
86588+++ b/net/ipv4/ip_vti.c
86589@@ -47,7 +47,7 @@
86590 #define HASH_SIZE 16
86591 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&(HASH_SIZE-1))
86592
86593-static struct rtnl_link_ops vti_link_ops __read_mostly;
86594+static struct rtnl_link_ops vti_link_ops;
86595
86596 static int vti_net_id __read_mostly;
86597 struct vti_net {
86598@@ -886,7 +886,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
86599 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
86600 };
86601
86602-static struct rtnl_link_ops vti_link_ops __read_mostly = {
86603+static struct rtnl_link_ops vti_link_ops = {
86604 .kind = "vti",
86605 .maxtype = IFLA_VTI_MAX,
86606 .policy = vti_policy,
86607diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
86608index 9a46dae..5f793a0 100644
86609--- a/net/ipv4/ipcomp.c
86610+++ b/net/ipv4/ipcomp.c
86611@@ -48,7 +48,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
86612 return;
86613
86614 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
86615- atomic_inc(&flow_cache_genid);
86616+ atomic_inc_unchecked(&flow_cache_genid);
86617 rt_genid_bump(net);
86618
86619 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_COMP, 0);
86620diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
86621index a2e50ae..e152b7c 100644
86622--- a/net/ipv4/ipconfig.c
86623+++ b/net/ipv4/ipconfig.c
86624@@ -323,7 +323,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
86625
86626 mm_segment_t oldfs = get_fs();
86627 set_fs(get_ds());
86628- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
86629+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
86630 set_fs(oldfs);
86631 return res;
86632 }
86633@@ -334,7 +334,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
86634
86635 mm_segment_t oldfs = get_fs();
86636 set_fs(get_ds());
86637- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
86638+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
86639 set_fs(oldfs);
86640 return res;
86641 }
86642@@ -345,7 +345,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
86643
86644 mm_segment_t oldfs = get_fs();
86645 set_fs(get_ds());
86646- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
86647+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
86648 set_fs(oldfs);
86649 return res;
86650 }
86651diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
86652index 191fc24..1b3b804 100644
86653--- a/net/ipv4/ipip.c
86654+++ b/net/ipv4/ipip.c
86655@@ -138,7 +138,7 @@ struct ipip_net {
86656 static int ipip_tunnel_init(struct net_device *dev);
86657 static void ipip_tunnel_setup(struct net_device *dev);
86658 static void ipip_dev_free(struct net_device *dev);
86659-static struct rtnl_link_ops ipip_link_ops __read_mostly;
86660+static struct rtnl_link_ops ipip_link_ops;
86661
86662 static struct rtnl_link_stats64 *ipip_get_stats64(struct net_device *dev,
86663 struct rtnl_link_stats64 *tot)
86664@@ -972,7 +972,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
86665 [IFLA_IPTUN_PMTUDISC] = { .type = NLA_U8 },
86666 };
86667
86668-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
86669+static struct rtnl_link_ops ipip_link_ops = {
86670 .kind = "ipip",
86671 .maxtype = IFLA_IPTUN_MAX,
86672 .policy = ipip_policy,
86673diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
86674index 3ea4127..849297b 100644
86675--- a/net/ipv4/netfilter/arp_tables.c
86676+++ b/net/ipv4/netfilter/arp_tables.c
86677@@ -879,14 +879,14 @@ static int compat_table_info(const struct xt_table_info *info,
86678 #endif
86679
86680 static int get_info(struct net *net, void __user *user,
86681- const int *len, int compat)
86682+ int len, int compat)
86683 {
86684 char name[XT_TABLE_MAXNAMELEN];
86685 struct xt_table *t;
86686 int ret;
86687
86688- if (*len != sizeof(struct arpt_getinfo)) {
86689- duprintf("length %u != %Zu\n", *len,
86690+ if (len != sizeof(struct arpt_getinfo)) {
86691+ duprintf("length %u != %Zu\n", len,
86692 sizeof(struct arpt_getinfo));
86693 return -EINVAL;
86694 }
86695@@ -923,7 +923,7 @@ static int get_info(struct net *net, void __user *user,
86696 info.size = private->size;
86697 strcpy(info.name, name);
86698
86699- if (copy_to_user(user, &info, *len) != 0)
86700+ if (copy_to_user(user, &info, len) != 0)
86701 ret = -EFAULT;
86702 else
86703 ret = 0;
86704@@ -1682,7 +1682,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
86705
86706 switch (cmd) {
86707 case ARPT_SO_GET_INFO:
86708- ret = get_info(sock_net(sk), user, len, 1);
86709+ ret = get_info(sock_net(sk), user, *len, 1);
86710 break;
86711 case ARPT_SO_GET_ENTRIES:
86712 ret = compat_get_entries(sock_net(sk), user, len);
86713@@ -1727,7 +1727,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
86714
86715 switch (cmd) {
86716 case ARPT_SO_GET_INFO:
86717- ret = get_info(sock_net(sk), user, len, 0);
86718+ ret = get_info(sock_net(sk), user, *len, 0);
86719 break;
86720
86721 case ARPT_SO_GET_ENTRIES:
86722diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
86723index 17c5e06..1b91206 100644
86724--- a/net/ipv4/netfilter/ip_tables.c
86725+++ b/net/ipv4/netfilter/ip_tables.c
86726@@ -1068,14 +1068,14 @@ static int compat_table_info(const struct xt_table_info *info,
86727 #endif
86728
86729 static int get_info(struct net *net, void __user *user,
86730- const int *len, int compat)
86731+ int len, int compat)
86732 {
86733 char name[XT_TABLE_MAXNAMELEN];
86734 struct xt_table *t;
86735 int ret;
86736
86737- if (*len != sizeof(struct ipt_getinfo)) {
86738- duprintf("length %u != %zu\n", *len,
86739+ if (len != sizeof(struct ipt_getinfo)) {
86740+ duprintf("length %u != %zu\n", len,
86741 sizeof(struct ipt_getinfo));
86742 return -EINVAL;
86743 }
86744@@ -1112,7 +1112,7 @@ static int get_info(struct net *net, void __user *user,
86745 info.size = private->size;
86746 strcpy(info.name, name);
86747
86748- if (copy_to_user(user, &info, *len) != 0)
86749+ if (copy_to_user(user, &info, len) != 0)
86750 ret = -EFAULT;
86751 else
86752 ret = 0;
86753@@ -1966,7 +1966,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
86754
86755 switch (cmd) {
86756 case IPT_SO_GET_INFO:
86757- ret = get_info(sock_net(sk), user, len, 1);
86758+ ret = get_info(sock_net(sk), user, *len, 1);
86759 break;
86760 case IPT_SO_GET_ENTRIES:
86761 ret = compat_get_entries(sock_net(sk), user, len);
86762@@ -2013,7 +2013,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
86763
86764 switch (cmd) {
86765 case IPT_SO_GET_INFO:
86766- ret = get_info(sock_net(sk), user, len, 0);
86767+ ret = get_info(sock_net(sk), user, *len, 0);
86768 break;
86769
86770 case IPT_SO_GET_ENTRIES:
86771diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
86772index dc454cc..5bb917f 100644
86773--- a/net/ipv4/ping.c
86774+++ b/net/ipv4/ping.c
86775@@ -844,7 +844,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
86776 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
86777 0, sock_i_ino(sp),
86778 atomic_read(&sp->sk_refcnt), sp,
86779- atomic_read(&sp->sk_drops), len);
86780+ atomic_read_unchecked(&sp->sk_drops), len);
86781 }
86782
86783 static int ping_seq_show(struct seq_file *seq, void *v)
86784diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
86785index 6f08991..55867ad 100644
86786--- a/net/ipv4/raw.c
86787+++ b/net/ipv4/raw.c
86788@@ -311,7 +311,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
86789 int raw_rcv(struct sock *sk, struct sk_buff *skb)
86790 {
86791 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
86792- atomic_inc(&sk->sk_drops);
86793+ atomic_inc_unchecked(&sk->sk_drops);
86794 kfree_skb(skb);
86795 return NET_RX_DROP;
86796 }
86797@@ -747,16 +747,20 @@ static int raw_init(struct sock *sk)
86798
86799 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
86800 {
86801+ struct icmp_filter filter;
86802+
86803 if (optlen > sizeof(struct icmp_filter))
86804 optlen = sizeof(struct icmp_filter);
86805- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
86806+ if (copy_from_user(&filter, optval, optlen))
86807 return -EFAULT;
86808+ raw_sk(sk)->filter = filter;
86809 return 0;
86810 }
86811
86812 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
86813 {
86814 int len, ret = -EFAULT;
86815+ struct icmp_filter filter;
86816
86817 if (get_user(len, optlen))
86818 goto out;
86819@@ -766,8 +770,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
86820 if (len > sizeof(struct icmp_filter))
86821 len = sizeof(struct icmp_filter);
86822 ret = -EFAULT;
86823- if (put_user(len, optlen) ||
86824- copy_to_user(optval, &raw_sk(sk)->filter, len))
86825+ filter = raw_sk(sk)->filter;
86826+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
86827 goto out;
86828 ret = 0;
86829 out: return ret;
86830@@ -998,7 +1002,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
86831 0, 0L, 0,
86832 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
86833 0, sock_i_ino(sp),
86834- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
86835+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
86836 }
86837
86838 static int raw_seq_show(struct seq_file *seq, void *v)
86839diff --git a/net/ipv4/route.c b/net/ipv4/route.c
86840index a0fcc47..32e2c89 100644
86841--- a/net/ipv4/route.c
86842+++ b/net/ipv4/route.c
86843@@ -2552,34 +2552,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
86844 .maxlen = sizeof(int),
86845 .mode = 0200,
86846 .proc_handler = ipv4_sysctl_rtcache_flush,
86847+ .extra1 = &init_net,
86848 },
86849 { },
86850 };
86851
86852 static __net_init int sysctl_route_net_init(struct net *net)
86853 {
86854- struct ctl_table *tbl;
86855+ ctl_table_no_const *tbl = NULL;
86856
86857- tbl = ipv4_route_flush_table;
86858 if (!net_eq(net, &init_net)) {
86859- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
86860+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
86861 if (tbl == NULL)
86862 goto err_dup;
86863
86864 /* Don't export sysctls to unprivileged users */
86865 if (net->user_ns != &init_user_ns)
86866 tbl[0].procname = NULL;
86867- }
86868- tbl[0].extra1 = net;
86869+ tbl[0].extra1 = net;
86870+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
86871+ } else
86872+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
86873
86874- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
86875 if (net->ipv4.route_hdr == NULL)
86876 goto err_reg;
86877 return 0;
86878
86879 err_reg:
86880- if (tbl != ipv4_route_flush_table)
86881- kfree(tbl);
86882+ kfree(tbl);
86883 err_dup:
86884 return -ENOMEM;
86885 }
86886@@ -2602,7 +2602,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
86887
86888 static __net_init int rt_genid_init(struct net *net)
86889 {
86890- atomic_set(&net->rt_genid, 0);
86891+ atomic_set_unchecked(&net->rt_genid, 0);
86892 get_random_bytes(&net->ipv4.dev_addr_genid,
86893 sizeof(net->ipv4.dev_addr_genid));
86894 return 0;
86895diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
86896index d84400b..62e066e 100644
86897--- a/net/ipv4/sysctl_net_ipv4.c
86898+++ b/net/ipv4/sysctl_net_ipv4.c
86899@@ -54,7 +54,7 @@ static int ipv4_local_port_range(ctl_table *table, int write,
86900 {
86901 int ret;
86902 int range[2];
86903- ctl_table tmp = {
86904+ ctl_table_no_const tmp = {
86905 .data = &range,
86906 .maxlen = sizeof(range),
86907 .mode = table->mode,
86908@@ -107,7 +107,7 @@ static int ipv4_ping_group_range(ctl_table *table, int write,
86909 int ret;
86910 gid_t urange[2];
86911 kgid_t low, high;
86912- ctl_table tmp = {
86913+ ctl_table_no_const tmp = {
86914 .data = &urange,
86915 .maxlen = sizeof(urange),
86916 .mode = table->mode,
86917@@ -138,7 +138,7 @@ static int proc_tcp_congestion_control(ctl_table *ctl, int write,
86918 void __user *buffer, size_t *lenp, loff_t *ppos)
86919 {
86920 char val[TCP_CA_NAME_MAX];
86921- ctl_table tbl = {
86922+ ctl_table_no_const tbl = {
86923 .data = val,
86924 .maxlen = TCP_CA_NAME_MAX,
86925 };
86926@@ -157,7 +157,7 @@ static int proc_tcp_available_congestion_control(ctl_table *ctl,
86927 void __user *buffer, size_t *lenp,
86928 loff_t *ppos)
86929 {
86930- ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
86931+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
86932 int ret;
86933
86934 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
86935@@ -174,7 +174,7 @@ static int proc_allowed_congestion_control(ctl_table *ctl,
86936 void __user *buffer, size_t *lenp,
86937 loff_t *ppos)
86938 {
86939- ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
86940+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
86941 int ret;
86942
86943 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
86944@@ -200,15 +200,17 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
86945 struct mem_cgroup *memcg;
86946 #endif
86947
86948- ctl_table tmp = {
86949+ ctl_table_no_const tmp = {
86950 .data = &vec,
86951 .maxlen = sizeof(vec),
86952 .mode = ctl->mode,
86953 };
86954
86955 if (!write) {
86956- ctl->data = &net->ipv4.sysctl_tcp_mem;
86957- return proc_doulongvec_minmax(ctl, write, buffer, lenp, ppos);
86958+ ctl_table_no_const tcp_mem = *ctl;
86959+
86960+ tcp_mem.data = &net->ipv4.sysctl_tcp_mem;
86961+ return proc_doulongvec_minmax(&tcp_mem, write, buffer, lenp, ppos);
86962 }
86963
86964 ret = proc_doulongvec_minmax(&tmp, write, buffer, lenp, ppos);
86965@@ -235,7 +237,7 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
86966 int proc_tcp_fastopen_key(ctl_table *ctl, int write, void __user *buffer,
86967 size_t *lenp, loff_t *ppos)
86968 {
86969- ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
86970+ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
86971 struct tcp_fastopen_context *ctxt;
86972 int ret;
86973 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
86974@@ -476,7 +478,7 @@ static struct ctl_table ipv4_table[] = {
86975 },
86976 {
86977 .procname = "ip_local_reserved_ports",
86978- .data = NULL, /* initialized in sysctl_ipv4_init */
86979+ .data = sysctl_local_reserved_ports,
86980 .maxlen = 65536,
86981 .mode = 0644,
86982 .proc_handler = proc_do_large_bitmap,
86983@@ -860,11 +862,10 @@ static struct ctl_table ipv4_net_table[] = {
86984
86985 static __net_init int ipv4_sysctl_init_net(struct net *net)
86986 {
86987- struct ctl_table *table;
86988+ ctl_table_no_const *table = NULL;
86989
86990- table = ipv4_net_table;
86991 if (!net_eq(net, &init_net)) {
86992- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
86993+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
86994 if (table == NULL)
86995 goto err_alloc;
86996
86997@@ -897,15 +898,17 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
86998
86999 tcp_init_mem(net);
87000
87001- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
87002+ if (!net_eq(net, &init_net))
87003+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
87004+ else
87005+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
87006 if (net->ipv4.ipv4_hdr == NULL)
87007 goto err_reg;
87008
87009 return 0;
87010
87011 err_reg:
87012- if (!net_eq(net, &init_net))
87013- kfree(table);
87014+ kfree(table);
87015 err_alloc:
87016 return -ENOMEM;
87017 }
87018@@ -927,16 +930,6 @@ static __net_initdata struct pernet_operations ipv4_sysctl_ops = {
87019 static __init int sysctl_ipv4_init(void)
87020 {
87021 struct ctl_table_header *hdr;
87022- struct ctl_table *i;
87023-
87024- for (i = ipv4_table; i->procname; i++) {
87025- if (strcmp(i->procname, "ip_local_reserved_ports") == 0) {
87026- i->data = sysctl_local_reserved_ports;
87027- break;
87028- }
87029- }
87030- if (!i->procname)
87031- return -EINVAL;
87032
87033 hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table);
87034 if (hdr == NULL)
87035diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
87036index 9841a71..ef60409 100644
87037--- a/net/ipv4/tcp_input.c
87038+++ b/net/ipv4/tcp_input.c
87039@@ -4730,7 +4730,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
87040 * simplifies code)
87041 */
87042 static void
87043-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
87044+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
87045 struct sk_buff *head, struct sk_buff *tail,
87046 u32 start, u32 end)
87047 {
87048@@ -5847,6 +5847,7 @@ discard:
87049 tcp_paws_reject(&tp->rx_opt, 0))
87050 goto discard_and_undo;
87051
87052+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
87053 if (th->syn) {
87054 /* We see SYN without ACK. It is attempt of
87055 * simultaneous connect with crossed SYNs.
87056@@ -5897,6 +5898,7 @@ discard:
87057 goto discard;
87058 #endif
87059 }
87060+#endif
87061 /* "fifth, if neither of the SYN or RST bits is set then
87062 * drop the segment and return."
87063 */
87064@@ -5941,7 +5943,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
87065 goto discard;
87066
87067 if (th->syn) {
87068- if (th->fin)
87069+ if (th->fin || th->urg || th->psh)
87070 goto discard;
87071 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
87072 return 1;
87073diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
87074index d9130a9..00328ff 100644
87075--- a/net/ipv4/tcp_ipv4.c
87076+++ b/net/ipv4/tcp_ipv4.c
87077@@ -90,6 +90,10 @@ int sysctl_tcp_low_latency __read_mostly;
87078 EXPORT_SYMBOL(sysctl_tcp_low_latency);
87079
87080
87081+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87082+extern int grsec_enable_blackhole;
87083+#endif
87084+
87085 #ifdef CONFIG_TCP_MD5SIG
87086 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
87087 __be32 daddr, __be32 saddr, const struct tcphdr *th);
87088@@ -1895,6 +1899,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
87089 return 0;
87090
87091 reset:
87092+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87093+ if (!grsec_enable_blackhole)
87094+#endif
87095 tcp_v4_send_reset(rsk, skb);
87096 discard:
87097 kfree_skb(skb);
87098@@ -1994,12 +2001,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
87099 TCP_SKB_CB(skb)->sacked = 0;
87100
87101 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
87102- if (!sk)
87103+ if (!sk) {
87104+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87105+ ret = 1;
87106+#endif
87107 goto no_tcp_socket;
87108-
87109+ }
87110 process:
87111- if (sk->sk_state == TCP_TIME_WAIT)
87112+ if (sk->sk_state == TCP_TIME_WAIT) {
87113+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87114+ ret = 2;
87115+#endif
87116 goto do_time_wait;
87117+ }
87118
87119 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
87120 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
87121@@ -2050,6 +2064,10 @@ no_tcp_socket:
87122 bad_packet:
87123 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
87124 } else {
87125+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87126+ if (!grsec_enable_blackhole || (ret == 1 &&
87127+ (skb->dev->flags & IFF_LOOPBACK)))
87128+#endif
87129 tcp_v4_send_reset(NULL, skb);
87130 }
87131
87132diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
87133index f35f2df..ccb5ca6 100644
87134--- a/net/ipv4/tcp_minisocks.c
87135+++ b/net/ipv4/tcp_minisocks.c
87136@@ -27,6 +27,10 @@
87137 #include <net/inet_common.h>
87138 #include <net/xfrm.h>
87139
87140+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87141+extern int grsec_enable_blackhole;
87142+#endif
87143+
87144 int sysctl_tcp_syncookies __read_mostly = 1;
87145 EXPORT_SYMBOL(sysctl_tcp_syncookies);
87146
87147@@ -742,7 +746,10 @@ embryonic_reset:
87148 * avoid becoming vulnerable to outside attack aiming at
87149 * resetting legit local connections.
87150 */
87151- req->rsk_ops->send_reset(sk, skb);
87152+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87153+ if (!grsec_enable_blackhole)
87154+#endif
87155+ req->rsk_ops->send_reset(sk, skb);
87156 } else if (fastopen) { /* received a valid RST pkt */
87157 reqsk_fastopen_remove(sk, req, true);
87158 tcp_reset(sk);
87159diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
87160index 4526fe6..1a34e43 100644
87161--- a/net/ipv4/tcp_probe.c
87162+++ b/net/ipv4/tcp_probe.c
87163@@ -204,7 +204,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
87164 if (cnt + width >= len)
87165 break;
87166
87167- if (copy_to_user(buf + cnt, tbuf, width))
87168+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
87169 return -EFAULT;
87170 cnt += width;
87171 }
87172diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
87173index b78aac3..e18230b 100644
87174--- a/net/ipv4/tcp_timer.c
87175+++ b/net/ipv4/tcp_timer.c
87176@@ -22,6 +22,10 @@
87177 #include <linux/gfp.h>
87178 #include <net/tcp.h>
87179
87180+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87181+extern int grsec_lastack_retries;
87182+#endif
87183+
87184 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
87185 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
87186 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
87187@@ -185,6 +189,13 @@ static int tcp_write_timeout(struct sock *sk)
87188 }
87189 }
87190
87191+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87192+ if ((sk->sk_state == TCP_LAST_ACK) &&
87193+ (grsec_lastack_retries > 0) &&
87194+ (grsec_lastack_retries < retry_until))
87195+ retry_until = grsec_lastack_retries;
87196+#endif
87197+
87198 if (retransmits_timed_out(sk, retry_until,
87199 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
87200 /* Has it gone just too far? */
87201diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
87202index 1f4d405..3524677 100644
87203--- a/net/ipv4/udp.c
87204+++ b/net/ipv4/udp.c
87205@@ -87,6 +87,7 @@
87206 #include <linux/types.h>
87207 #include <linux/fcntl.h>
87208 #include <linux/module.h>
87209+#include <linux/security.h>
87210 #include <linux/socket.h>
87211 #include <linux/sockios.h>
87212 #include <linux/igmp.h>
87213@@ -111,6 +112,10 @@
87214 #include <trace/events/skb.h>
87215 #include "udp_impl.h"
87216
87217+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87218+extern int grsec_enable_blackhole;
87219+#endif
87220+
87221 struct udp_table udp_table __read_mostly;
87222 EXPORT_SYMBOL(udp_table);
87223
87224@@ -569,6 +574,9 @@ found:
87225 return s;
87226 }
87227
87228+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
87229+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
87230+
87231 /*
87232 * This routine is called by the ICMP module when it gets some
87233 * sort of error condition. If err < 0 then the socket should
87234@@ -864,9 +872,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
87235 dport = usin->sin_port;
87236 if (dport == 0)
87237 return -EINVAL;
87238+
87239+ err = gr_search_udp_sendmsg(sk, usin);
87240+ if (err)
87241+ return err;
87242 } else {
87243 if (sk->sk_state != TCP_ESTABLISHED)
87244 return -EDESTADDRREQ;
87245+
87246+ err = gr_search_udp_sendmsg(sk, NULL);
87247+ if (err)
87248+ return err;
87249+
87250 daddr = inet->inet_daddr;
87251 dport = inet->inet_dport;
87252 /* Open fast path for connected socket.
87253@@ -1108,7 +1125,7 @@ static unsigned int first_packet_length(struct sock *sk)
87254 udp_lib_checksum_complete(skb)) {
87255 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
87256 IS_UDPLITE(sk));
87257- atomic_inc(&sk->sk_drops);
87258+ atomic_inc_unchecked(&sk->sk_drops);
87259 __skb_unlink(skb, rcvq);
87260 __skb_queue_tail(&list_kill, skb);
87261 }
87262@@ -1194,6 +1211,10 @@ try_again:
87263 if (!skb)
87264 goto out;
87265
87266+ err = gr_search_udp_recvmsg(sk, skb);
87267+ if (err)
87268+ goto out_free;
87269+
87270 ulen = skb->len - sizeof(struct udphdr);
87271 copied = len;
87272 if (copied > ulen)
87273@@ -1227,7 +1248,7 @@ try_again:
87274 if (unlikely(err)) {
87275 trace_kfree_skb(skb, udp_recvmsg);
87276 if (!peeked) {
87277- atomic_inc(&sk->sk_drops);
87278+ atomic_inc_unchecked(&sk->sk_drops);
87279 UDP_INC_STATS_USER(sock_net(sk),
87280 UDP_MIB_INERRORS, is_udplite);
87281 }
87282@@ -1510,7 +1531,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
87283
87284 drop:
87285 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
87286- atomic_inc(&sk->sk_drops);
87287+ atomic_inc_unchecked(&sk->sk_drops);
87288 kfree_skb(skb);
87289 return -1;
87290 }
87291@@ -1529,7 +1550,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
87292 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
87293
87294 if (!skb1) {
87295- atomic_inc(&sk->sk_drops);
87296+ atomic_inc_unchecked(&sk->sk_drops);
87297 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
87298 IS_UDPLITE(sk));
87299 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
87300@@ -1698,6 +1719,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
87301 goto csum_error;
87302
87303 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
87304+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87305+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
87306+#endif
87307 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
87308
87309 /*
87310@@ -2120,7 +2144,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
87311 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
87312 0, sock_i_ino(sp),
87313 atomic_read(&sp->sk_refcnt), sp,
87314- atomic_read(&sp->sk_drops), len);
87315+ atomic_read_unchecked(&sp->sk_drops), len);
87316 }
87317
87318 int udp4_seq_show(struct seq_file *seq, void *v)
87319diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
87320index a36d17e..96d099f 100644
87321--- a/net/ipv6/addrconf.c
87322+++ b/net/ipv6/addrconf.c
87323@@ -2272,7 +2272,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
87324 p.iph.ihl = 5;
87325 p.iph.protocol = IPPROTO_IPV6;
87326 p.iph.ttl = 64;
87327- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
87328+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
87329
87330 if (ops->ndo_do_ioctl) {
87331 mm_segment_t oldfs = get_fs();
87332@@ -4388,7 +4388,7 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write,
87333 int *valp = ctl->data;
87334 int val = *valp;
87335 loff_t pos = *ppos;
87336- ctl_table lctl;
87337+ ctl_table_no_const lctl;
87338 int ret;
87339
87340 /*
87341@@ -4470,7 +4470,7 @@ int addrconf_sysctl_disable(ctl_table *ctl, int write,
87342 int *valp = ctl->data;
87343 int val = *valp;
87344 loff_t pos = *ppos;
87345- ctl_table lctl;
87346+ ctl_table_no_const lctl;
87347 int ret;
87348
87349 /*
87350diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
87351index fff5bdd..15194fb 100644
87352--- a/net/ipv6/icmp.c
87353+++ b/net/ipv6/icmp.c
87354@@ -973,7 +973,7 @@ ctl_table ipv6_icmp_table_template[] = {
87355
87356 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
87357 {
87358- struct ctl_table *table;
87359+ ctl_table_no_const *table;
87360
87361 table = kmemdup(ipv6_icmp_table_template,
87362 sizeof(ipv6_icmp_table_template),
87363diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
87364index 131dd09..f7ed64f 100644
87365--- a/net/ipv6/ip6_gre.c
87366+++ b/net/ipv6/ip6_gre.c
87367@@ -73,7 +73,7 @@ struct ip6gre_net {
87368 struct net_device *fb_tunnel_dev;
87369 };
87370
87371-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
87372+static struct rtnl_link_ops ip6gre_link_ops;
87373 static int ip6gre_tunnel_init(struct net_device *dev);
87374 static void ip6gre_tunnel_setup(struct net_device *dev);
87375 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
87376@@ -1337,7 +1337,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
87377 }
87378
87379
87380-static struct inet6_protocol ip6gre_protocol __read_mostly = {
87381+static struct inet6_protocol ip6gre_protocol = {
87382 .handler = ip6gre_rcv,
87383 .err_handler = ip6gre_err,
87384 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
87385@@ -1671,7 +1671,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
87386 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
87387 };
87388
87389-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
87390+static struct rtnl_link_ops ip6gre_link_ops = {
87391 .kind = "ip6gre",
87392 .maxtype = IFLA_GRE_MAX,
87393 .policy = ip6gre_policy,
87394@@ -1684,7 +1684,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
87395 .fill_info = ip6gre_fill_info,
87396 };
87397
87398-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
87399+static struct rtnl_link_ops ip6gre_tap_ops = {
87400 .kind = "ip6gretap",
87401 .maxtype = IFLA_GRE_MAX,
87402 .policy = ip6gre_policy,
87403diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
87404index a14f28b..b4b8956 100644
87405--- a/net/ipv6/ip6_tunnel.c
87406+++ b/net/ipv6/ip6_tunnel.c
87407@@ -87,7 +87,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
87408
87409 static int ip6_tnl_dev_init(struct net_device *dev);
87410 static void ip6_tnl_dev_setup(struct net_device *dev);
87411-static struct rtnl_link_ops ip6_link_ops __read_mostly;
87412+static struct rtnl_link_ops ip6_link_ops;
87413
87414 static int ip6_tnl_net_id __read_mostly;
87415 struct ip6_tnl_net {
87416@@ -1686,7 +1686,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
87417 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
87418 };
87419
87420-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
87421+static struct rtnl_link_ops ip6_link_ops = {
87422 .kind = "ip6tnl",
87423 .maxtype = IFLA_IPTUN_MAX,
87424 .policy = ip6_tnl_policy,
87425diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
87426index d1e2e8e..51c19ae 100644
87427--- a/net/ipv6/ipv6_sockglue.c
87428+++ b/net/ipv6/ipv6_sockglue.c
87429@@ -991,7 +991,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
87430 if (sk->sk_type != SOCK_STREAM)
87431 return -ENOPROTOOPT;
87432
87433- msg.msg_control = optval;
87434+ msg.msg_control = (void __force_kernel *)optval;
87435 msg.msg_controllen = len;
87436 msg.msg_flags = flags;
87437
87438diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
87439index 125a90d..2a11f36 100644
87440--- a/net/ipv6/netfilter/ip6_tables.c
87441+++ b/net/ipv6/netfilter/ip6_tables.c
87442@@ -1076,14 +1076,14 @@ static int compat_table_info(const struct xt_table_info *info,
87443 #endif
87444
87445 static int get_info(struct net *net, void __user *user,
87446- const int *len, int compat)
87447+ int len, int compat)
87448 {
87449 char name[XT_TABLE_MAXNAMELEN];
87450 struct xt_table *t;
87451 int ret;
87452
87453- if (*len != sizeof(struct ip6t_getinfo)) {
87454- duprintf("length %u != %zu\n", *len,
87455+ if (len != sizeof(struct ip6t_getinfo)) {
87456+ duprintf("length %u != %zu\n", len,
87457 sizeof(struct ip6t_getinfo));
87458 return -EINVAL;
87459 }
87460@@ -1120,7 +1120,7 @@ static int get_info(struct net *net, void __user *user,
87461 info.size = private->size;
87462 strcpy(info.name, name);
87463
87464- if (copy_to_user(user, &info, *len) != 0)
87465+ if (copy_to_user(user, &info, len) != 0)
87466 ret = -EFAULT;
87467 else
87468 ret = 0;
87469@@ -1974,7 +1974,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
87470
87471 switch (cmd) {
87472 case IP6T_SO_GET_INFO:
87473- ret = get_info(sock_net(sk), user, len, 1);
87474+ ret = get_info(sock_net(sk), user, *len, 1);
87475 break;
87476 case IP6T_SO_GET_ENTRIES:
87477 ret = compat_get_entries(sock_net(sk), user, len);
87478@@ -2021,7 +2021,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
87479
87480 switch (cmd) {
87481 case IP6T_SO_GET_INFO:
87482- ret = get_info(sock_net(sk), user, len, 0);
87483+ ret = get_info(sock_net(sk), user, *len, 0);
87484 break;
87485
87486 case IP6T_SO_GET_ENTRIES:
87487diff --git a/net/ipv6/netfilter/ip6t_NPT.c b/net/ipv6/netfilter/ip6t_NPT.c
87488index 83acc14..0ea43c7 100644
87489--- a/net/ipv6/netfilter/ip6t_NPT.c
87490+++ b/net/ipv6/netfilter/ip6t_NPT.c
87491@@ -57,7 +57,7 @@ static bool ip6t_npt_map_pfx(const struct ip6t_npt_tginfo *npt,
87492 if (pfx_len - i >= 32)
87493 mask = 0;
87494 else
87495- mask = htonl(~((1 << (pfx_len - i)) - 1));
87496+ mask = htonl((1 << (i - pfx_len + 32)) - 1);
87497
87498 idx = i / 32;
87499 addr->s6_addr32[idx] &= mask;
87500diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
87501index 2f3a018..8bca195 100644
87502--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
87503+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
87504@@ -89,12 +89,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
87505
87506 static int nf_ct_frag6_sysctl_register(struct net *net)
87507 {
87508- struct ctl_table *table;
87509+ ctl_table_no_const *table = NULL;
87510 struct ctl_table_header *hdr;
87511
87512- table = nf_ct_frag6_sysctl_table;
87513 if (!net_eq(net, &init_net)) {
87514- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
87515+ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
87516 GFP_KERNEL);
87517 if (table == NULL)
87518 goto err_alloc;
87519@@ -102,9 +101,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
87520 table[0].data = &net->ipv6.frags.high_thresh;
87521 table[1].data = &net->ipv6.frags.low_thresh;
87522 table[2].data = &net->ipv6.frags.timeout;
87523- }
87524-
87525- hdr = register_net_sysctl(net, "net/netfilter", table);
87526+ hdr = register_net_sysctl(net, "net/netfilter", table);
87527+ } else
87528+ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
87529 if (hdr == NULL)
87530 goto err_reg;
87531
87532@@ -112,8 +111,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
87533 return 0;
87534
87535 err_reg:
87536- if (!net_eq(net, &init_net))
87537- kfree(table);
87538+ kfree(table);
87539 err_alloc:
87540 return -ENOMEM;
87541 }
87542diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
87543index 70fa814..d70c28c 100644
87544--- a/net/ipv6/raw.c
87545+++ b/net/ipv6/raw.c
87546@@ -379,7 +379,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
87547 {
87548 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
87549 skb_checksum_complete(skb)) {
87550- atomic_inc(&sk->sk_drops);
87551+ atomic_inc_unchecked(&sk->sk_drops);
87552 kfree_skb(skb);
87553 return NET_RX_DROP;
87554 }
87555@@ -407,7 +407,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
87556 struct raw6_sock *rp = raw6_sk(sk);
87557
87558 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
87559- atomic_inc(&sk->sk_drops);
87560+ atomic_inc_unchecked(&sk->sk_drops);
87561 kfree_skb(skb);
87562 return NET_RX_DROP;
87563 }
87564@@ -431,7 +431,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
87565
87566 if (inet->hdrincl) {
87567 if (skb_checksum_complete(skb)) {
87568- atomic_inc(&sk->sk_drops);
87569+ atomic_inc_unchecked(&sk->sk_drops);
87570 kfree_skb(skb);
87571 return NET_RX_DROP;
87572 }
87573@@ -604,7 +604,7 @@ out:
87574 return err;
87575 }
87576
87577-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
87578+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
87579 struct flowi6 *fl6, struct dst_entry **dstp,
87580 unsigned int flags)
87581 {
87582@@ -916,12 +916,15 @@ do_confirm:
87583 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
87584 char __user *optval, int optlen)
87585 {
87586+ struct icmp6_filter filter;
87587+
87588 switch (optname) {
87589 case ICMPV6_FILTER:
87590 if (optlen > sizeof(struct icmp6_filter))
87591 optlen = sizeof(struct icmp6_filter);
87592- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
87593+ if (copy_from_user(&filter, optval, optlen))
87594 return -EFAULT;
87595+ raw6_sk(sk)->filter = filter;
87596 return 0;
87597 default:
87598 return -ENOPROTOOPT;
87599@@ -934,6 +937,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
87600 char __user *optval, int __user *optlen)
87601 {
87602 int len;
87603+ struct icmp6_filter filter;
87604
87605 switch (optname) {
87606 case ICMPV6_FILTER:
87607@@ -945,7 +949,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
87608 len = sizeof(struct icmp6_filter);
87609 if (put_user(len, optlen))
87610 return -EFAULT;
87611- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
87612+ filter = raw6_sk(sk)->filter;
87613+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
87614 return -EFAULT;
87615 return 0;
87616 default:
87617@@ -1253,7 +1258,7 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
87618 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
87619 0,
87620 sock_i_ino(sp),
87621- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
87622+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
87623 }
87624
87625 static int raw6_seq_show(struct seq_file *seq, void *v)
87626diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
87627index d9ba8a2..f3f9e14 100644
87628--- a/net/ipv6/reassembly.c
87629+++ b/net/ipv6/reassembly.c
87630@@ -608,12 +608,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
87631
87632 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
87633 {
87634- struct ctl_table *table;
87635+ ctl_table_no_const *table = NULL;
87636 struct ctl_table_header *hdr;
87637
87638- table = ip6_frags_ns_ctl_table;
87639 if (!net_eq(net, &init_net)) {
87640- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
87641+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
87642 if (table == NULL)
87643 goto err_alloc;
87644
87645@@ -624,9 +623,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
87646 /* Don't export sysctls to unprivileged users */
87647 if (net->user_ns != &init_user_ns)
87648 table[0].procname = NULL;
87649- }
87650+ hdr = register_net_sysctl(net, "net/ipv6", table);
87651+ } else
87652+ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
87653
87654- hdr = register_net_sysctl(net, "net/ipv6", table);
87655 if (hdr == NULL)
87656 goto err_reg;
87657
87658@@ -634,8 +634,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
87659 return 0;
87660
87661 err_reg:
87662- if (!net_eq(net, &init_net))
87663- kfree(table);
87664+ kfree(table);
87665 err_alloc:
87666 return -ENOMEM;
87667 }
87668diff --git a/net/ipv6/route.c b/net/ipv6/route.c
87669index 5845613..3af8fc7 100644
87670--- a/net/ipv6/route.c
87671+++ b/net/ipv6/route.c
87672@@ -2966,7 +2966,7 @@ ctl_table ipv6_route_table_template[] = {
87673
87674 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
87675 {
87676- struct ctl_table *table;
87677+ ctl_table_no_const *table;
87678
87679 table = kmemdup(ipv6_route_table_template,
87680 sizeof(ipv6_route_table_template),
87681diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
87682index cfba99b..20ca511 100644
87683--- a/net/ipv6/sit.c
87684+++ b/net/ipv6/sit.c
87685@@ -72,7 +72,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
87686 static int ipip6_tunnel_init(struct net_device *dev);
87687 static void ipip6_tunnel_setup(struct net_device *dev);
87688 static void ipip6_dev_free(struct net_device *dev);
87689-static struct rtnl_link_ops sit_link_ops __read_mostly;
87690+static struct rtnl_link_ops sit_link_ops;
87691
87692 static int sit_net_id __read_mostly;
87693 struct sit_net {
87694@@ -1463,7 +1463,7 @@ static const struct nla_policy ipip6_policy[IFLA_IPTUN_MAX + 1] = {
87695 #endif
87696 };
87697
87698-static struct rtnl_link_ops sit_link_ops __read_mostly = {
87699+static struct rtnl_link_ops sit_link_ops = {
87700 .kind = "sit",
87701 .maxtype = IFLA_IPTUN_MAX,
87702 .policy = ipip6_policy,
87703diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
87704index e85c48b..b8268d3 100644
87705--- a/net/ipv6/sysctl_net_ipv6.c
87706+++ b/net/ipv6/sysctl_net_ipv6.c
87707@@ -40,7 +40,7 @@ static ctl_table ipv6_rotable[] = {
87708
87709 static int __net_init ipv6_sysctl_net_init(struct net *net)
87710 {
87711- struct ctl_table *ipv6_table;
87712+ ctl_table_no_const *ipv6_table;
87713 struct ctl_table *ipv6_route_table;
87714 struct ctl_table *ipv6_icmp_table;
87715 int err;
87716diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
87717index 8d19346..e47216f 100644
87718--- a/net/ipv6/tcp_ipv6.c
87719+++ b/net/ipv6/tcp_ipv6.c
87720@@ -103,6 +103,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
87721 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
87722 }
87723
87724+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87725+extern int grsec_enable_blackhole;
87726+#endif
87727+
87728 static void tcp_v6_hash(struct sock *sk)
87729 {
87730 if (sk->sk_state != TCP_CLOSE) {
87731@@ -1440,6 +1444,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
87732 return 0;
87733
87734 reset:
87735+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87736+ if (!grsec_enable_blackhole)
87737+#endif
87738 tcp_v6_send_reset(sk, skb);
87739 discard:
87740 if (opt_skb)
87741@@ -1521,12 +1528,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
87742 TCP_SKB_CB(skb)->sacked = 0;
87743
87744 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
87745- if (!sk)
87746+ if (!sk) {
87747+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87748+ ret = 1;
87749+#endif
87750 goto no_tcp_socket;
87751+ }
87752
87753 process:
87754- if (sk->sk_state == TCP_TIME_WAIT)
87755+ if (sk->sk_state == TCP_TIME_WAIT) {
87756+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87757+ ret = 2;
87758+#endif
87759 goto do_time_wait;
87760+ }
87761
87762 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
87763 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
87764@@ -1575,6 +1590,10 @@ no_tcp_socket:
87765 bad_packet:
87766 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
87767 } else {
87768+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87769+ if (!grsec_enable_blackhole || (ret == 1 &&
87770+ (skb->dev->flags & IFF_LOOPBACK)))
87771+#endif
87772 tcp_v6_send_reset(NULL, skb);
87773 }
87774
87775diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
87776index fb08329..2d6919e 100644
87777--- a/net/ipv6/udp.c
87778+++ b/net/ipv6/udp.c
87779@@ -51,6 +51,10 @@
87780 #include <trace/events/skb.h>
87781 #include "udp_impl.h"
87782
87783+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87784+extern int grsec_enable_blackhole;
87785+#endif
87786+
87787 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
87788 {
87789 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
87790@@ -395,7 +399,7 @@ try_again:
87791 if (unlikely(err)) {
87792 trace_kfree_skb(skb, udpv6_recvmsg);
87793 if (!peeked) {
87794- atomic_inc(&sk->sk_drops);
87795+ atomic_inc_unchecked(&sk->sk_drops);
87796 if (is_udp4)
87797 UDP_INC_STATS_USER(sock_net(sk),
87798 UDP_MIB_INERRORS,
87799@@ -633,7 +637,7 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
87800 return rc;
87801 drop:
87802 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
87803- atomic_inc(&sk->sk_drops);
87804+ atomic_inc_unchecked(&sk->sk_drops);
87805 kfree_skb(skb);
87806 return -1;
87807 }
87808@@ -691,7 +695,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
87809 if (likely(skb1 == NULL))
87810 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
87811 if (!skb1) {
87812- atomic_inc(&sk->sk_drops);
87813+ atomic_inc_unchecked(&sk->sk_drops);
87814 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
87815 IS_UDPLITE(sk));
87816 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
87817@@ -862,6 +866,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
87818 goto discard;
87819
87820 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
87821+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87822+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
87823+#endif
87824 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
87825
87826 kfree_skb(skb);
87827@@ -1379,7 +1386,7 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
87828 0,
87829 sock_i_ino(sp),
87830 atomic_read(&sp->sk_refcnt), sp,
87831- atomic_read(&sp->sk_drops));
87832+ atomic_read_unchecked(&sp->sk_drops));
87833 }
87834
87835 int udp6_seq_show(struct seq_file *seq, void *v)
87836diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
87837index a68c88c..d55b0c5 100644
87838--- a/net/irda/ircomm/ircomm_tty.c
87839+++ b/net/irda/ircomm/ircomm_tty.c
87840@@ -312,12 +312,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
87841 add_wait_queue(&port->open_wait, &wait);
87842
87843 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
87844- __FILE__, __LINE__, tty->driver->name, port->count);
87845+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
87846
87847 spin_lock_irqsave(&port->lock, flags);
87848 if (!tty_hung_up_p(filp)) {
87849 extra_count = 1;
87850- port->count--;
87851+ atomic_dec(&port->count);
87852 }
87853 spin_unlock_irqrestore(&port->lock, flags);
87854 port->blocked_open++;
87855@@ -353,7 +353,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
87856 }
87857
87858 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
87859- __FILE__, __LINE__, tty->driver->name, port->count);
87860+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
87861
87862 schedule();
87863 }
87864@@ -364,13 +364,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
87865 if (extra_count) {
87866 /* ++ is not atomic, so this should be protected - Jean II */
87867 spin_lock_irqsave(&port->lock, flags);
87868- port->count++;
87869+ atomic_inc(&port->count);
87870 spin_unlock_irqrestore(&port->lock, flags);
87871 }
87872 port->blocked_open--;
87873
87874 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
87875- __FILE__, __LINE__, tty->driver->name, port->count);
87876+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
87877
87878 if (!retval)
87879 port->flags |= ASYNC_NORMAL_ACTIVE;
87880@@ -444,12 +444,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
87881
87882 /* ++ is not atomic, so this should be protected - Jean II */
87883 spin_lock_irqsave(&self->port.lock, flags);
87884- self->port.count++;
87885+ atomic_inc(&self->port.count);
87886 spin_unlock_irqrestore(&self->port.lock, flags);
87887 tty_port_tty_set(&self->port, tty);
87888
87889 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
87890- self->line, self->port.count);
87891+ self->line, atomic_read(&self->port.count));
87892
87893 /* Not really used by us, but lets do it anyway */
87894 tty->low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
87895@@ -986,7 +986,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
87896 tty_kref_put(port->tty);
87897 }
87898 port->tty = NULL;
87899- port->count = 0;
87900+ atomic_set(&port->count, 0);
87901 spin_unlock_irqrestore(&port->lock, flags);
87902
87903 wake_up_interruptible(&port->open_wait);
87904@@ -1343,7 +1343,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
87905 seq_putc(m, '\n');
87906
87907 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
87908- seq_printf(m, "Open count: %d\n", self->port.count);
87909+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
87910 seq_printf(m, "Max data size: %d\n", self->max_data_size);
87911 seq_printf(m, "Max header size: %d\n", self->max_header_size);
87912
87913diff --git a/net/irda/iriap.c b/net/irda/iriap.c
87914index e71e85b..29340a9 100644
87915--- a/net/irda/iriap.c
87916+++ b/net/irda/iriap.c
87917@@ -495,8 +495,11 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self,
87918 /* case CS_ISO_8859_9: */
87919 /* case CS_UNICODE: */
87920 default:
87921- IRDA_DEBUG(0, "%s(), charset %s, not supported\n",
87922- __func__, ias_charset_types[charset]);
87923+ IRDA_DEBUG(0, "%s(), charset [%d] %s, not supported\n",
87924+ __func__, charset,
87925+ charset < ARRAY_SIZE(ias_charset_types) ?
87926+ ias_charset_types[charset] :
87927+ "(unknown)");
87928
87929 /* Aborting, close connection! */
87930 iriap_disconnect_request(self);
87931diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
87932index cd6f7a9..e63fe89 100644
87933--- a/net/iucv/af_iucv.c
87934+++ b/net/iucv/af_iucv.c
87935@@ -782,10 +782,10 @@ static int iucv_sock_autobind(struct sock *sk)
87936
87937 write_lock_bh(&iucv_sk_list.lock);
87938
87939- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
87940+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
87941 while (__iucv_get_sock_by_name(name)) {
87942 sprintf(name, "%08x",
87943- atomic_inc_return(&iucv_sk_list.autobind_name));
87944+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
87945 }
87946
87947 write_unlock_bh(&iucv_sk_list.lock);
87948diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
87949index df08250..02021fe 100644
87950--- a/net/iucv/iucv.c
87951+++ b/net/iucv/iucv.c
87952@@ -690,7 +690,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
87953 return NOTIFY_OK;
87954 }
87955
87956-static struct notifier_block __refdata iucv_cpu_notifier = {
87957+static struct notifier_block iucv_cpu_notifier = {
87958 .notifier_call = iucv_cpu_notify,
87959 };
87960
87961diff --git a/net/key/af_key.c b/net/key/af_key.c
87962index 5b426a6..970032b 100644
87963--- a/net/key/af_key.c
87964+++ b/net/key/af_key.c
87965@@ -3019,10 +3019,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
87966 static u32 get_acqseq(void)
87967 {
87968 u32 res;
87969- static atomic_t acqseq;
87970+ static atomic_unchecked_t acqseq;
87971
87972 do {
87973- res = atomic_inc_return(&acqseq);
87974+ res = atomic_inc_return_unchecked(&acqseq);
87975 } while (!res);
87976 return res;
87977 }
87978diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
87979index 0479c64..9e72ff4 100644
87980--- a/net/mac80211/cfg.c
87981+++ b/net/mac80211/cfg.c
87982@@ -790,7 +790,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
87983 ret = ieee80211_vif_use_channel(sdata, chandef,
87984 IEEE80211_CHANCTX_EXCLUSIVE);
87985 }
87986- } else if (local->open_count == local->monitors) {
87987+ } else if (local_read(&local->open_count) == local->monitors) {
87988 local->_oper_channel = chandef->chan;
87989 local->_oper_channel_type = cfg80211_get_chandef_type(chandef);
87990 ieee80211_hw_config(local, 0);
87991@@ -2499,7 +2499,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local,
87992 list_del(&dep->list);
87993 mutex_unlock(&local->mtx);
87994
87995- ieee80211_roc_notify_destroy(dep);
87996+ ieee80211_roc_notify_destroy(dep, true);
87997 return 0;
87998 }
87999
88000@@ -2539,7 +2539,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local,
88001 ieee80211_start_next_roc(local);
88002 mutex_unlock(&local->mtx);
88003
88004- ieee80211_roc_notify_destroy(found);
88005+ ieee80211_roc_notify_destroy(found, true);
88006 } else {
88007 /* work may be pending so use it all the time */
88008 found->abort = true;
88009@@ -2549,6 +2549,8 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local,
88010
88011 /* work will clean up etc */
88012 flush_delayed_work(&found->work);
88013+ WARN_ON(!found->to_be_freed);
88014+ kfree(found);
88015 }
88016
88017 return 0;
88018@@ -2716,7 +2718,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
88019 else
88020 local->probe_req_reg--;
88021
88022- if (!local->open_count)
88023+ if (!local_read(&local->open_count))
88024 break;
88025
88026 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
88027diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
88028index 2ed065c..bec0c2b 100644
88029--- a/net/mac80211/ieee80211_i.h
88030+++ b/net/mac80211/ieee80211_i.h
88031@@ -28,6 +28,7 @@
88032 #include <net/ieee80211_radiotap.h>
88033 #include <net/cfg80211.h>
88034 #include <net/mac80211.h>
88035+#include <asm/local.h>
88036 #include "key.h"
88037 #include "sta_info.h"
88038 #include "debug.h"
88039@@ -346,6 +347,7 @@ struct ieee80211_roc_work {
88040 struct ieee80211_channel *chan;
88041
88042 bool started, abort, hw_begun, notified;
88043+ bool to_be_freed;
88044
88045 unsigned long hw_start_time;
88046
88047@@ -909,7 +911,7 @@ struct ieee80211_local {
88048 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
88049 spinlock_t queue_stop_reason_lock;
88050
88051- int open_count;
88052+ local_t open_count;
88053 int monitors, cooked_mntrs;
88054 /* number of interfaces with corresponding FIF_ flags */
88055 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
88056@@ -1363,7 +1365,7 @@ void ieee80211_offchannel_return(struct ieee80211_local *local);
88057 void ieee80211_roc_setup(struct ieee80211_local *local);
88058 void ieee80211_start_next_roc(struct ieee80211_local *local);
88059 void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata);
88060-void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc);
88061+void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc, bool free);
88062 void ieee80211_sw_roc_work(struct work_struct *work);
88063 void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc);
88064
88065diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
88066index 8be854e..ad72a69 100644
88067--- a/net/mac80211/iface.c
88068+++ b/net/mac80211/iface.c
88069@@ -546,7 +546,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
88070 break;
88071 }
88072
88073- if (local->open_count == 0) {
88074+ if (local_read(&local->open_count) == 0) {
88075 res = drv_start(local);
88076 if (res)
88077 goto err_del_bss;
88078@@ -591,7 +591,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
88079 break;
88080 }
88081
88082- if (local->monitors == 0 && local->open_count == 0) {
88083+ if (local->monitors == 0 && local_read(&local->open_count) == 0) {
88084 res = ieee80211_add_virtual_monitor(local);
88085 if (res)
88086 goto err_stop;
88087@@ -699,7 +699,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
88088 mutex_unlock(&local->mtx);
88089
88090 if (coming_up)
88091- local->open_count++;
88092+ local_inc(&local->open_count);
88093
88094 if (hw_reconf_flags)
88095 ieee80211_hw_config(local, hw_reconf_flags);
88096@@ -713,7 +713,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
88097 err_del_interface:
88098 drv_remove_interface(local, sdata);
88099 err_stop:
88100- if (!local->open_count)
88101+ if (!local_read(&local->open_count))
88102 drv_stop(local);
88103 err_del_bss:
88104 sdata->bss = NULL;
88105@@ -827,7 +827,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
88106 }
88107
88108 if (going_down)
88109- local->open_count--;
88110+ local_dec(&local->open_count);
88111
88112 switch (sdata->vif.type) {
88113 case NL80211_IFTYPE_AP_VLAN:
88114@@ -884,7 +884,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
88115
88116 ieee80211_recalc_ps(local, -1);
88117
88118- if (local->open_count == 0) {
88119+ if (local_read(&local->open_count) == 0) {
88120 if (local->ops->napi_poll)
88121 napi_disable(&local->napi);
88122 ieee80211_clear_tx_pending(local);
88123@@ -910,7 +910,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
88124 }
88125 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
88126
88127- if (local->monitors == local->open_count && local->monitors > 0)
88128+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
88129 ieee80211_add_virtual_monitor(local);
88130 }
88131
88132diff --git a/net/mac80211/main.c b/net/mac80211/main.c
88133index 1b087ff..bf600e9 100644
88134--- a/net/mac80211/main.c
88135+++ b/net/mac80211/main.c
88136@@ -181,7 +181,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
88137 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
88138 IEEE80211_CONF_CHANGE_POWER);
88139
88140- if (changed && local->open_count) {
88141+ if (changed && local_read(&local->open_count)) {
88142 ret = drv_config(local, changed);
88143 /*
88144 * Goal:
88145diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
88146index a3ad4c3..7acbdaa 100644
88147--- a/net/mac80211/offchannel.c
88148+++ b/net/mac80211/offchannel.c
88149@@ -299,10 +299,13 @@ void ieee80211_start_next_roc(struct ieee80211_local *local)
88150 }
88151 }
88152
88153-void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc)
88154+void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc, bool free)
88155 {
88156 struct ieee80211_roc_work *dep, *tmp;
88157
88158+ if (WARN_ON(roc->to_be_freed))
88159+ return;
88160+
88161 /* was never transmitted */
88162 if (roc->frame) {
88163 cfg80211_mgmt_tx_status(&roc->sdata->wdev,
88164@@ -318,9 +321,12 @@ void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc)
88165 GFP_KERNEL);
88166
88167 list_for_each_entry_safe(dep, tmp, &roc->dependents, list)
88168- ieee80211_roc_notify_destroy(dep);
88169+ ieee80211_roc_notify_destroy(dep, true);
88170
88171- kfree(roc);
88172+ if (free)
88173+ kfree(roc);
88174+ else
88175+ roc->to_be_freed = true;
88176 }
88177
88178 void ieee80211_sw_roc_work(struct work_struct *work)
88179@@ -333,6 +339,9 @@ void ieee80211_sw_roc_work(struct work_struct *work)
88180
88181 mutex_lock(&local->mtx);
88182
88183+ if (roc->to_be_freed)
88184+ goto out_unlock;
88185+
88186 if (roc->abort)
88187 goto finish;
88188
88189@@ -372,7 +381,7 @@ void ieee80211_sw_roc_work(struct work_struct *work)
88190 finish:
88191 list_del(&roc->list);
88192 started = roc->started;
88193- ieee80211_roc_notify_destroy(roc);
88194+ ieee80211_roc_notify_destroy(roc, !roc->abort);
88195
88196 if (started) {
88197 drv_flush(local, false);
88198@@ -412,7 +421,7 @@ static void ieee80211_hw_roc_done(struct work_struct *work)
88199
88200 list_del(&roc->list);
88201
88202- ieee80211_roc_notify_destroy(roc);
88203+ ieee80211_roc_notify_destroy(roc, true);
88204
88205 /* if there's another roc, start it now */
88206 ieee80211_start_next_roc(local);
88207@@ -462,12 +471,14 @@ void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata)
88208 list_for_each_entry_safe(roc, tmp, &tmp_list, list) {
88209 if (local->ops->remain_on_channel) {
88210 list_del(&roc->list);
88211- ieee80211_roc_notify_destroy(roc);
88212+ ieee80211_roc_notify_destroy(roc, true);
88213 } else {
88214 ieee80211_queue_delayed_work(&local->hw, &roc->work, 0);
88215
88216 /* work will clean up etc */
88217 flush_delayed_work(&roc->work);
88218+ WARN_ON(!roc->to_be_freed);
88219+ kfree(roc);
88220 }
88221 }
88222
88223diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
88224index 79a48f3..5e185c9 100644
88225--- a/net/mac80211/pm.c
88226+++ b/net/mac80211/pm.c
88227@@ -35,7 +35,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
88228 struct sta_info *sta;
88229 struct ieee80211_chanctx *ctx;
88230
88231- if (!local->open_count)
88232+ if (!local_read(&local->open_count))
88233 goto suspend;
88234
88235 ieee80211_scan_cancel(local);
88236@@ -73,7 +73,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
88237 cancel_work_sync(&local->dynamic_ps_enable_work);
88238 del_timer_sync(&local->dynamic_ps_timer);
88239
88240- local->wowlan = wowlan && local->open_count;
88241+ local->wowlan = wowlan && local_read(&local->open_count);
88242 if (local->wowlan) {
88243 int err = drv_suspend(local, wowlan);
88244 if (err < 0) {
88245@@ -187,7 +187,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
88246 mutex_unlock(&local->chanctx_mtx);
88247
88248 /* stop hardware - this must stop RX */
88249- if (local->open_count)
88250+ if (local_read(&local->open_count))
88251 ieee80211_stop_device(local);
88252
88253 suspend:
88254diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
88255index dd88381..eef4dd6 100644
88256--- a/net/mac80211/rate.c
88257+++ b/net/mac80211/rate.c
88258@@ -493,7 +493,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
88259
88260 ASSERT_RTNL();
88261
88262- if (local->open_count)
88263+ if (local_read(&local->open_count))
88264 return -EBUSY;
88265
88266 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
88267diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
88268index c97a065..ff61928 100644
88269--- a/net/mac80211/rc80211_pid_debugfs.c
88270+++ b/net/mac80211/rc80211_pid_debugfs.c
88271@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
88272
88273 spin_unlock_irqrestore(&events->lock, status);
88274
88275- if (copy_to_user(buf, pb, p))
88276+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
88277 return -EFAULT;
88278
88279 return p;
88280diff --git a/net/mac80211/util.c b/net/mac80211/util.c
88281index f11e8c5..08d0013 100644
88282--- a/net/mac80211/util.c
88283+++ b/net/mac80211/util.c
88284@@ -1380,7 +1380,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
88285 }
88286 #endif
88287 /* everything else happens only if HW was up & running */
88288- if (!local->open_count)
88289+ if (!local_read(&local->open_count))
88290 goto wake_up;
88291
88292 /*
88293diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
88294index 49e96df..63a51c3 100644
88295--- a/net/netfilter/Kconfig
88296+++ b/net/netfilter/Kconfig
88297@@ -936,6 +936,16 @@ config NETFILTER_XT_MATCH_ESP
88298
88299 To compile it as a module, choose M here. If unsure, say N.
88300
88301+config NETFILTER_XT_MATCH_GRADM
88302+ tristate '"gradm" match support'
88303+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
88304+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
88305+ ---help---
88306+ The gradm match allows to match on grsecurity RBAC being enabled.
88307+ It is useful when iptables rules are applied early on bootup to
88308+ prevent connections to the machine (except from a trusted host)
88309+ while the RBAC system is disabled.
88310+
88311 config NETFILTER_XT_MATCH_HASHLIMIT
88312 tristate '"hashlimit" match support'
88313 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
88314diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
88315index 3259697..54d5393 100644
88316--- a/net/netfilter/Makefile
88317+++ b/net/netfilter/Makefile
88318@@ -109,6 +109,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
88319 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
88320 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
88321 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
88322+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
88323 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
88324 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
88325 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
88326diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
88327index 6d6d8f2..a676749 100644
88328--- a/net/netfilter/ipset/ip_set_core.c
88329+++ b/net/netfilter/ipset/ip_set_core.c
88330@@ -1800,7 +1800,7 @@ done:
88331 return ret;
88332 }
88333
88334-static struct nf_sockopt_ops so_set __read_mostly = {
88335+static struct nf_sockopt_ops so_set = {
88336 .pf = PF_INET,
88337 .get_optmin = SO_IP_SET,
88338 .get_optmax = SO_IP_SET + 1,
88339diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
88340index 30e764a..c3b6a9d 100644
88341--- a/net/netfilter/ipvs/ip_vs_conn.c
88342+++ b/net/netfilter/ipvs/ip_vs_conn.c
88343@@ -554,7 +554,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
88344 /* Increase the refcnt counter of the dest */
88345 atomic_inc(&dest->refcnt);
88346
88347- conn_flags = atomic_read(&dest->conn_flags);
88348+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
88349 if (cp->protocol != IPPROTO_UDP)
88350 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
88351 flags = cp->flags;
88352@@ -899,7 +899,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
88353 atomic_set(&cp->refcnt, 1);
88354
88355 atomic_set(&cp->n_control, 0);
88356- atomic_set(&cp->in_pkts, 0);
88357+ atomic_set_unchecked(&cp->in_pkts, 0);
88358
88359 atomic_inc(&ipvs->conn_count);
88360 if (flags & IP_VS_CONN_F_NO_CPORT)
88361@@ -1180,7 +1180,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
88362
88363 /* Don't drop the entry if its number of incoming packets is not
88364 located in [0, 8] */
88365- i = atomic_read(&cp->in_pkts);
88366+ i = atomic_read_unchecked(&cp->in_pkts);
88367 if (i > 8 || i < 0) return 0;
88368
88369 if (!todrop_rate[i]) return 0;
88370diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
88371index 47edf5a..235b07d 100644
88372--- a/net/netfilter/ipvs/ip_vs_core.c
88373+++ b/net/netfilter/ipvs/ip_vs_core.c
88374@@ -559,7 +559,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
88375 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
88376 /* do not touch skb anymore */
88377
88378- atomic_inc(&cp->in_pkts);
88379+ atomic_inc_unchecked(&cp->in_pkts);
88380 ip_vs_conn_put(cp);
88381 return ret;
88382 }
88383@@ -1691,7 +1691,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
88384 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
88385 pkts = sysctl_sync_threshold(ipvs);
88386 else
88387- pkts = atomic_add_return(1, &cp->in_pkts);
88388+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
88389
88390 if (ipvs->sync_state & IP_VS_STATE_MASTER)
88391 ip_vs_sync_conn(net, cp, pkts);
88392diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
88393index ec664cb..7f34a77 100644
88394--- a/net/netfilter/ipvs/ip_vs_ctl.c
88395+++ b/net/netfilter/ipvs/ip_vs_ctl.c
88396@@ -787,7 +787,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
88397 ip_vs_rs_hash(ipvs, dest);
88398 write_unlock_bh(&ipvs->rs_lock);
88399 }
88400- atomic_set(&dest->conn_flags, conn_flags);
88401+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
88402
88403 /* bind the service */
88404 if (!dest->svc) {
88405@@ -1688,7 +1688,7 @@ proc_do_sync_ports(ctl_table *table, int write,
88406 * align with netns init in ip_vs_control_net_init()
88407 */
88408
88409-static struct ctl_table vs_vars[] = {
88410+static ctl_table_no_const vs_vars[] __read_only = {
88411 {
88412 .procname = "amemthresh",
88413 .maxlen = sizeof(int),
88414@@ -2081,7 +2081,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
88415 " %-7s %-6d %-10d %-10d\n",
88416 &dest->addr.in6,
88417 ntohs(dest->port),
88418- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
88419+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
88420 atomic_read(&dest->weight),
88421 atomic_read(&dest->activeconns),
88422 atomic_read(&dest->inactconns));
88423@@ -2092,7 +2092,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
88424 "%-7s %-6d %-10d %-10d\n",
88425 ntohl(dest->addr.ip),
88426 ntohs(dest->port),
88427- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
88428+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
88429 atomic_read(&dest->weight),
88430 atomic_read(&dest->activeconns),
88431 atomic_read(&dest->inactconns));
88432@@ -2562,7 +2562,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
88433
88434 entry.addr = dest->addr.ip;
88435 entry.port = dest->port;
88436- entry.conn_flags = atomic_read(&dest->conn_flags);
88437+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
88438 entry.weight = atomic_read(&dest->weight);
88439 entry.u_threshold = dest->u_threshold;
88440 entry.l_threshold = dest->l_threshold;
88441@@ -3098,7 +3098,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
88442 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
88443 nla_put_u16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
88444 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
88445- (atomic_read(&dest->conn_flags) &
88446+ (atomic_read_unchecked(&dest->conn_flags) &
88447 IP_VS_CONN_F_FWD_MASK)) ||
88448 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
88449 atomic_read(&dest->weight)) ||
88450@@ -3688,7 +3688,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
88451 {
88452 int idx;
88453 struct netns_ipvs *ipvs = net_ipvs(net);
88454- struct ctl_table *tbl;
88455+ ctl_table_no_const *tbl;
88456
88457 atomic_set(&ipvs->dropentry, 0);
88458 spin_lock_init(&ipvs->dropentry_lock);
88459diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
88460index fdd89b9..bd96aa9 100644
88461--- a/net/netfilter/ipvs/ip_vs_lblc.c
88462+++ b/net/netfilter/ipvs/ip_vs_lblc.c
88463@@ -115,7 +115,7 @@ struct ip_vs_lblc_table {
88464 * IPVS LBLC sysctl table
88465 */
88466 #ifdef CONFIG_SYSCTL
88467-static ctl_table vs_vars_table[] = {
88468+static ctl_table_no_const vs_vars_table[] __read_only = {
88469 {
88470 .procname = "lblc_expiration",
88471 .data = NULL,
88472diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
88473index c03b6a3..8ce3681 100644
88474--- a/net/netfilter/ipvs/ip_vs_lblcr.c
88475+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
88476@@ -288,7 +288,7 @@ struct ip_vs_lblcr_table {
88477 * IPVS LBLCR sysctl table
88478 */
88479
88480-static ctl_table vs_vars_table[] = {
88481+static ctl_table_no_const vs_vars_table[] __read_only = {
88482 {
88483 .procname = "lblcr_expiration",
88484 .data = NULL,
88485diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
88486index 44fd10c..2a163b3 100644
88487--- a/net/netfilter/ipvs/ip_vs_sync.c
88488+++ b/net/netfilter/ipvs/ip_vs_sync.c
88489@@ -596,7 +596,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
88490 cp = cp->control;
88491 if (cp) {
88492 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
88493- pkts = atomic_add_return(1, &cp->in_pkts);
88494+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
88495 else
88496 pkts = sysctl_sync_threshold(ipvs);
88497 ip_vs_sync_conn(net, cp->control, pkts);
88498@@ -758,7 +758,7 @@ control:
88499 if (!cp)
88500 return;
88501 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
88502- pkts = atomic_add_return(1, &cp->in_pkts);
88503+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
88504 else
88505 pkts = sysctl_sync_threshold(ipvs);
88506 goto sloop;
88507@@ -885,7 +885,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
88508
88509 if (opt)
88510 memcpy(&cp->in_seq, opt, sizeof(*opt));
88511- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
88512+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
88513 cp->state = state;
88514 cp->old_state = cp->state;
88515 /*
88516diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
88517index ee6b7a9..f9a89f6 100644
88518--- a/net/netfilter/ipvs/ip_vs_xmit.c
88519+++ b/net/netfilter/ipvs/ip_vs_xmit.c
88520@@ -1210,7 +1210,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
88521 else
88522 rc = NF_ACCEPT;
88523 /* do not touch skb anymore */
88524- atomic_inc(&cp->in_pkts);
88525+ atomic_inc_unchecked(&cp->in_pkts);
88526 goto out;
88527 }
88528
88529@@ -1332,7 +1332,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
88530 else
88531 rc = NF_ACCEPT;
88532 /* do not touch skb anymore */
88533- atomic_inc(&cp->in_pkts);
88534+ atomic_inc_unchecked(&cp->in_pkts);
88535 goto out;
88536 }
88537
88538diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
88539index 7df424e..a527b02 100644
88540--- a/net/netfilter/nf_conntrack_acct.c
88541+++ b/net/netfilter/nf_conntrack_acct.c
88542@@ -60,7 +60,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
88543 #ifdef CONFIG_SYSCTL
88544 static int nf_conntrack_acct_init_sysctl(struct net *net)
88545 {
88546- struct ctl_table *table;
88547+ ctl_table_no_const *table;
88548
88549 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
88550 GFP_KERNEL);
88551diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
88552index e4a0c4f..c263f28 100644
88553--- a/net/netfilter/nf_conntrack_core.c
88554+++ b/net/netfilter/nf_conntrack_core.c
88555@@ -1529,6 +1529,10 @@ err_extend:
88556 #define DYING_NULLS_VAL ((1<<30)+1)
88557 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
88558
88559+#ifdef CONFIG_GRKERNSEC_HIDESYM
88560+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
88561+#endif
88562+
88563 static int nf_conntrack_init_net(struct net *net)
88564 {
88565 int ret;
88566@@ -1543,7 +1547,11 @@ static int nf_conntrack_init_net(struct net *net)
88567 goto err_stat;
88568 }
88569
88570+#ifdef CONFIG_GRKERNSEC_HIDESYM
88571+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08lx", atomic_inc_return_unchecked(&conntrack_cache_id));
88572+#else
88573 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
88574+#endif
88575 if (!net->ct.slabname) {
88576 ret = -ENOMEM;
88577 goto err_slabname;
88578diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
88579index faa978f..1afb18f 100644
88580--- a/net/netfilter/nf_conntrack_ecache.c
88581+++ b/net/netfilter/nf_conntrack_ecache.c
88582@@ -186,7 +186,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
88583 #ifdef CONFIG_SYSCTL
88584 static int nf_conntrack_event_init_sysctl(struct net *net)
88585 {
88586- struct ctl_table *table;
88587+ ctl_table_no_const *table;
88588
88589 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
88590 GFP_KERNEL);
88591diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
88592index 884f2b3..d53b33a 100644
88593--- a/net/netfilter/nf_conntrack_helper.c
88594+++ b/net/netfilter/nf_conntrack_helper.c
88595@@ -55,7 +55,7 @@ static struct ctl_table helper_sysctl_table[] = {
88596
88597 static int nf_conntrack_helper_init_sysctl(struct net *net)
88598 {
88599- struct ctl_table *table;
88600+ ctl_table_no_const *table;
88601
88602 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
88603 GFP_KERNEL);
88604diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
88605index 51e928d..72a413a 100644
88606--- a/net/netfilter/nf_conntrack_proto.c
88607+++ b/net/netfilter/nf_conntrack_proto.c
88608@@ -51,7 +51,7 @@ nf_ct_register_sysctl(struct net *net,
88609
88610 static void
88611 nf_ct_unregister_sysctl(struct ctl_table_header **header,
88612- struct ctl_table **table,
88613+ ctl_table_no_const **table,
88614 unsigned int users)
88615 {
88616 if (users > 0)
88617diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
88618index e7185c6..4ad6c9c 100644
88619--- a/net/netfilter/nf_conntrack_standalone.c
88620+++ b/net/netfilter/nf_conntrack_standalone.c
88621@@ -470,7 +470,7 @@ static ctl_table nf_ct_netfilter_table[] = {
88622
88623 static int nf_conntrack_standalone_init_sysctl(struct net *net)
88624 {
88625- struct ctl_table *table;
88626+ ctl_table_no_const *table;
88627
88628 if (net_eq(net, &init_net)) {
88629 nf_ct_netfilter_header =
88630diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
88631index 7ea8026..bc9512d 100644
88632--- a/net/netfilter/nf_conntrack_timestamp.c
88633+++ b/net/netfilter/nf_conntrack_timestamp.c
88634@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
88635 #ifdef CONFIG_SYSCTL
88636 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
88637 {
88638- struct ctl_table *table;
88639+ ctl_table_no_const *table;
88640
88641 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
88642 GFP_KERNEL);
88643diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
88644index 9e31269..bc4c1b7 100644
88645--- a/net/netfilter/nf_log.c
88646+++ b/net/netfilter/nf_log.c
88647@@ -215,7 +215,7 @@ static const struct file_operations nflog_file_ops = {
88648
88649 #ifdef CONFIG_SYSCTL
88650 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
88651-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
88652+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
88653 static struct ctl_table_header *nf_log_dir_header;
88654
88655 static int nf_log_proc_dostring(ctl_table *table, int write,
88656@@ -246,14 +246,16 @@ static int nf_log_proc_dostring(ctl_table *table, int write,
88657 rcu_assign_pointer(nf_loggers[tindex], logger);
88658 mutex_unlock(&nf_log_mutex);
88659 } else {
88660+ ctl_table_no_const nf_log_table = *table;
88661+
88662 mutex_lock(&nf_log_mutex);
88663 logger = rcu_dereference_protected(nf_loggers[tindex],
88664 lockdep_is_held(&nf_log_mutex));
88665 if (!logger)
88666- table->data = "NONE";
88667+ nf_log_table.data = "NONE";
88668 else
88669- table->data = logger->name;
88670- r = proc_dostring(table, write, buffer, lenp, ppos);
88671+ nf_log_table.data = logger->name;
88672+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
88673 mutex_unlock(&nf_log_mutex);
88674 }
88675
88676diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
88677index f042ae5..30ea486 100644
88678--- a/net/netfilter/nf_sockopt.c
88679+++ b/net/netfilter/nf_sockopt.c
88680@@ -45,7 +45,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
88681 }
88682 }
88683
88684- list_add(&reg->list, &nf_sockopts);
88685+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
88686 out:
88687 mutex_unlock(&nf_sockopt_mutex);
88688 return ret;
88689@@ -55,7 +55,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
88690 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
88691 {
88692 mutex_lock(&nf_sockopt_mutex);
88693- list_del(&reg->list);
88694+ pax_list_del((struct list_head *)&reg->list);
88695 mutex_unlock(&nf_sockopt_mutex);
88696 }
88697 EXPORT_SYMBOL(nf_unregister_sockopt);
88698diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
88699index 589d686..dc3fd5d 100644
88700--- a/net/netfilter/nfnetlink_acct.c
88701+++ b/net/netfilter/nfnetlink_acct.c
88702@@ -49,6 +49,8 @@ nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb,
88703 return -EINVAL;
88704
88705 acct_name = nla_data(tb[NFACCT_NAME]);
88706+ if (strlen(acct_name) == 0)
88707+ return -EINVAL;
88708
88709 list_for_each_entry(nfacct, &nfnl_acct_list, head) {
88710 if (strncmp(nfacct->name, acct_name, NFACCT_NAME_MAX) != 0)
88711diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
88712index 92fd8ec..3f6ea4b 100644
88713--- a/net/netfilter/nfnetlink_log.c
88714+++ b/net/netfilter/nfnetlink_log.c
88715@@ -72,7 +72,7 @@ struct nfulnl_instance {
88716 };
88717
88718 static DEFINE_SPINLOCK(instances_lock);
88719-static atomic_t global_seq;
88720+static atomic_unchecked_t global_seq;
88721
88722 #define INSTANCE_BUCKETS 16
88723 static struct hlist_head instance_table[INSTANCE_BUCKETS];
88724@@ -537,7 +537,7 @@ __build_packet_message(struct nfulnl_instance *inst,
88725 /* global sequence number */
88726 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
88727 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
88728- htonl(atomic_inc_return(&global_seq))))
88729+ htonl(atomic_inc_return_unchecked(&global_seq))))
88730 goto nla_put_failure;
88731
88732 if (data_len) {
88733diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
88734index 3158d87..39006c9 100644
88735--- a/net/netfilter/nfnetlink_queue_core.c
88736+++ b/net/netfilter/nfnetlink_queue_core.c
88737@@ -1064,8 +1064,10 @@ static int __init nfnetlink_queue_init(void)
88738
88739 #ifdef CONFIG_PROC_FS
88740 if (!proc_create("nfnetlink_queue", 0440,
88741- proc_net_netfilter, &nfqnl_file_ops))
88742+ proc_net_netfilter, &nfqnl_file_ops)) {
88743+ status = -ENOMEM;
88744 goto cleanup_subsys;
88745+ }
88746 #endif
88747
88748 register_netdevice_notifier(&nfqnl_dev_notifier);
88749diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
88750new file mode 100644
88751index 0000000..c566332
88752--- /dev/null
88753+++ b/net/netfilter/xt_gradm.c
88754@@ -0,0 +1,51 @@
88755+/*
88756+ * gradm match for netfilter
88757